hoollyzhang commited on
Commit
fbbb23b
Β·
1 Parent(s): 66759f2

feat:Add application file

Browse files
.env.example CHANGED
@@ -1,3 +1,5 @@
1
- OPENAI_API_KEY= # your openai api key (required)
 
 
2
  CODEBOX_API_KEY= # your codebox api key (optional, required for production)
3
- VERBOSE=False # set to True to enable verbose logging
 
1
+ OPENAI_API_KEY= # your openai api key (required)
2
+ OPENAI_PROXY= # your openai proxy (optional)
3
+ OPENAI_API_BASE= # your openai api base url (optional)
4
  CODEBOX_API_KEY= # your codebox api key (optional, required for production)
5
+ VERBOSE=False # set to True to enable verbose logging
frontend/app.py β†’ app.py RENAMED
@@ -12,12 +12,17 @@ st.title('Code Interpreter API πŸš€')
12
 
13
  # This will create a sidebar
14
  st.sidebar.title("Code Interpreter API πŸš€")
15
- st.sidebar.markdown("[Github Repo](https://github.com/shroominic/codeinterpreter-api)")
 
 
 
 
16
 
17
 
18
  # This will create a textbox where you can input text
19
  input_text = st.text_area("Write your prompt")
20
- uploaded_files = st.file_uploader("Upload your files", accept_multiple_files=True)
 
21
 
22
  uploaded_files_list = []
23
  for uploaded_file in uploaded_files:
@@ -30,4 +35,4 @@ button_pressed = st.button('Run code interpreter', use_container_width=True)
30
 
31
  # This will display the images only when the button is pressed
32
  if button_pressed and input_text != "":
33
- asyncio.run(get_images(input_text, files=uploaded_files_list))
 
12
 
13
  # This will create a sidebar
14
  st.sidebar.title("Code Interpreter API πŸš€")
15
+
16
+ st.sidebar.markdown("### 给勇ε“₯ε“₯打call πŸš€")
17
+
18
+ st.sidebar.markdown(
19
+ "![Code Interpreter](https://vercel.brzhang.club/_next/image?url=%2Fcoffer.jpg&w=640&q=75)")
20
 
21
 
22
  # This will create a textbox where you can input text
23
  input_text = st.text_area("Write your prompt")
24
+ uploaded_files = st.file_uploader(
25
+ "Upload your files", accept_multiple_files=True)
26
 
27
  uploaded_files_list = []
28
  for uploaded_file in uploaded_files:
 
35
 
36
  # This will display the images only when the button is pressed
37
  if button_pressed and input_text != "":
38
+ asyncio.run(get_images(input_text, files=uploaded_files_list))
codeinterpreterapi/config.py CHANGED
@@ -1,3 +1,4 @@
 
1
  from pydantic import BaseSettings
2
  from dotenv import load_dotenv
3
  from typing import Optional
@@ -15,6 +16,8 @@ class CodeInterpreterAPISettings(BaseSettings):
15
 
16
  CODEBOX_API_KEY: Optional[str] = None
17
  OPENAI_API_KEY: Optional[str] = None
 
 
18
 
19
 
20
  settings = CodeInterpreterAPISettings()
 
1
+ from os import path
2
  from pydantic import BaseSettings
3
  from dotenv import load_dotenv
4
  from typing import Optional
 
16
 
17
  CODEBOX_API_KEY: Optional[str] = None
18
  OPENAI_API_KEY: Optional[str] = None
19
+ OPENAI_API_BASE: Optional[str] = "https://api.openai.com/v1"
20
+ OPENAI_PROXY: Optional[str] = None
21
 
22
 
23
  settings = CodeInterpreterAPISettings()
codeinterpreterapi/session.py CHANGED
@@ -1,4 +1,6 @@
1
- import uuid, base64, re
 
 
2
  from io import BytesIO
3
  from typing import Optional
4
  from codeboxapi import CodeBox # type: ignore
@@ -24,13 +26,16 @@ class CodeInterpreterSession:
24
  self,
25
  model=None,
26
  openai_api_key=settings.OPENAI_API_KEY,
 
 
27
  verbose=settings.VERBOSE,
28
  tools: list[BaseTool] = None
29
  ) -> None:
30
  self.codebox = CodeBox()
31
  self.verbose = verbose
32
  self.tools: list[BaseTool] = self._tools(tools)
33
- self.llm: BaseChatModel = self._llm(model, openai_api_key)
 
34
  self.agent_executor: AgentExecutor = self._agent_executor()
35
  self.input_files: list[File] = []
36
  self.output_files: list[File] = []
@@ -43,8 +48,7 @@ class CodeInterpreterSession:
43
  return additional_tools + [
44
  StructuredTool(
45
  name="python",
46
- description=
47
- # TODO: variables as context to the agent
48
  # TODO: current files as context to the agent
49
  "Input a string of code to a python interpreter (jupyter kernel). "
50
  "Variables are preserved between runs. ",
@@ -54,9 +58,14 @@ class CodeInterpreterSession:
54
  ),
55
  ]
56
 
57
- def _llm(self, model: Optional[str] = None, openai_api_key: Optional[str] = None) -> BaseChatModel:
 
 
 
 
 
58
  if model is None:
59
- model = "gpt-4"
60
 
61
  if openai_api_key is None:
62
  raise ValueError(
@@ -67,6 +76,8 @@ class CodeInterpreterSession:
67
  temperature=0.03,
68
  model=model,
69
  openai_api_key=openai_api_key,
 
 
70
  max_retries=3,
71
  request_timeout=60 * 3,
72
  ) # type: ignore
@@ -76,7 +87,8 @@ class CodeInterpreterSession:
76
  llm=self.llm,
77
  tools=self.tools,
78
  system_message=code_interpreter_system_message,
79
- extra_prompt_messages=[MessagesPlaceholder(variable_name="memory")],
 
80
  )
81
 
82
  def _agent_executor(self) -> AgentExecutor:
@@ -86,7 +98,8 @@ class CodeInterpreterSession:
86
  max_iterations=9,
87
  tools=self.tools,
88
  verbose=self.verbose,
89
- memory=ConversationBufferMemory(memory_key="memory", return_messages=True),
 
90
  )
91
 
92
  async def show_code(self, code: str) -> None:
@@ -108,7 +121,8 @@ class CodeInterpreterSession:
108
  filename = f"image-{uuid.uuid4()}.png"
109
  file_buffer = BytesIO(base64.b64decode(output.content))
110
  file_buffer.name = filename
111
- self.output_files.append(File(name=filename, content=file_buffer.read()))
 
112
  return f"Image {filename} got send to the user."
113
 
114
  elif output.type == "error":
@@ -118,7 +132,8 @@ class CodeInterpreterSession:
118
  ):
119
  await self.codebox.ainstall(package.group(1))
120
  return f"{package.group(1)} was missing but got installed now. Please try again."
121
- else: pass
 
122
  # TODO: preanalyze error to optimize next code generation
123
  if self.verbose:
124
  print("Error:", output.content)
@@ -157,7 +172,8 @@ class CodeInterpreterSession:
157
  for file in self.output_files:
158
  if str(file.name) in final_response:
159
  # rm ![Any](file.name) from the response
160
- final_response = re.sub(rf"\n\n!\[.*\]\(.*\)", "", final_response)
 
161
 
162
  if self.output_files and re.search(rf"\n\[.*\]\(.*\)", final_response):
163
  final_response = await remove_download_link(final_response, self.llm)
 
1
+ import uuid
2
+ import base64
3
+ import re
4
  from io import BytesIO
5
  from typing import Optional
6
  from codeboxapi import CodeBox # type: ignore
 
26
  self,
27
  model=None,
28
  openai_api_key=settings.OPENAI_API_KEY,
29
+ openai_proxy=settings.OPENAI_PROXY,
30
+ openai_api_base=settings.OPENAI_API_BASE,
31
  verbose=settings.VERBOSE,
32
  tools: list[BaseTool] = None
33
  ) -> None:
34
  self.codebox = CodeBox()
35
  self.verbose = verbose
36
  self.tools: list[BaseTool] = self._tools(tools)
37
+ self.llm: BaseChatModel = self._llm(
38
+ model, openai_api_key, openai_proxy, openai_api_base)
39
  self.agent_executor: AgentExecutor = self._agent_executor()
40
  self.input_files: list[File] = []
41
  self.output_files: list[File] = []
 
48
  return additional_tools + [
49
  StructuredTool(
50
  name="python",
51
+ description= # TODO: variables as context to the agent
 
52
  # TODO: current files as context to the agent
53
  "Input a string of code to a python interpreter (jupyter kernel). "
54
  "Variables are preserved between runs. ",
 
58
  ),
59
  ]
60
 
61
+ def _llm(self, model: Optional[str] = None, openai_api_key: Optional[str] = None, openai_proxy: Optional[str] = None, openai_api_base: Optional[str] = None) -> BaseChatModel:
62
+
63
+ print("OpenAI API Key:", openai_api_key)
64
+ print("OpenAI Proxy:", openai_proxy)
65
+ print("OpenAI API Base:", openai_api_base)
66
+ print("OpenAI Model:", model)
67
  if model is None:
68
+ model = "gpt-3.5-turbo"
69
 
70
  if openai_api_key is None:
71
  raise ValueError(
 
76
  temperature=0.03,
77
  model=model,
78
  openai_api_key=openai_api_key,
79
+ # openai_proxy=openai_proxy,
80
+ openai_api_base=openai_api_base,
81
  max_retries=3,
82
  request_timeout=60 * 3,
83
  ) # type: ignore
 
87
  llm=self.llm,
88
  tools=self.tools,
89
  system_message=code_interpreter_system_message,
90
+ extra_prompt_messages=[
91
+ MessagesPlaceholder(variable_name="memory")],
92
  )
93
 
94
  def _agent_executor(self) -> AgentExecutor:
 
98
  max_iterations=9,
99
  tools=self.tools,
100
  verbose=self.verbose,
101
+ memory=ConversationBufferMemory(
102
+ memory_key="memory", return_messages=True),
103
  )
104
 
105
  async def show_code(self, code: str) -> None:
 
121
  filename = f"image-{uuid.uuid4()}.png"
122
  file_buffer = BytesIO(base64.b64decode(output.content))
123
  file_buffer.name = filename
124
+ self.output_files.append(
125
+ File(name=filename, content=file_buffer.read()))
126
  return f"Image {filename} got send to the user."
127
 
128
  elif output.type == "error":
 
132
  ):
133
  await self.codebox.ainstall(package.group(1))
134
  return f"{package.group(1)} was missing but got installed now. Please try again."
135
+ else:
136
+ pass
137
  # TODO: preanalyze error to optimize next code generation
138
  if self.verbose:
139
  print("Error:", output.content)
 
172
  for file in self.output_files:
173
  if str(file.name) in final_response:
174
  # rm ![Any](file.name) from the response
175
+ final_response = re.sub(
176
+ rf"\n\n!\[.*\]\(.*\)", "", final_response)
177
 
178
  if self.output_files and re.search(rf"\n\[.*\]\(.*\)", final_response):
179
  final_response = await remove_download_link(final_response, self.llm)
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ python-dotenv>=1.0.0,<2.0.0
2
+ openai>=0.27.8,<0.28.0
3
+ langchain>=0.0.232,<0.1.0
4
+ codeboxapi>=0.0.8,<0.1.0
5
+ streamlit>=1.24.1,<1.25.0
6
+ pytest>=7.4.0,<8.0.0
7
+ mypy>=1.4.1,<2.0.0
8
+ black>=23.7.0,<24.0.0
9
+ Pillow>=8.0.0,<9.0.0