Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
  • Loading branch information
KillianLucas committed Apr 24, 2024
2 parents 81c0b08 + 9a44661 commit c7ff254
Show file tree
Hide file tree
Showing 4 changed files with 378 additions and 363 deletions.
18 changes: 10 additions & 8 deletions interpreter/core/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,14 @@ def run(self, messages):
), "No message after the first can have the role 'system'"

# Detect function support
if self.supports_functions != None:
supports_functions = self.supports_functions
elif litellm.supports_function_calling(self.model):
supports_functions = True
else:
supports_functions = False
if self.supports_functions == None:
try:
if litellm.supports_function_calling(self.model):
self.supports_functions = True
else:
self.supports_functions = False
except:
self.supports_functions = False

# Trim image messages if they're there
if self.supports_vision:
Expand All @@ -89,7 +91,7 @@ def run(self, messages):
# Convert to OpenAI messages format
messages = convert_to_openai_messages(
messages,
function_calling=supports_functions,
function_calling=self.supports_functions,
vision=self.supports_vision,
shrink_images=self.interpreter.shrink_images,
)
Expand Down Expand Up @@ -193,7 +195,7 @@ def run(self, messages):
if self.interpreter.verbose:
litellm.set_verbose = True

if supports_functions:
if self.supports_functions:
yield from run_function_calling_llm(self, params)
else:
yield from run_text_llm(self, params)
Expand Down
2 changes: 1 addition & 1 deletion interpreter/terminal_interface/profiles/defaults/local.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ def download_model(models_dir, models, interpreter):
"""
)

interpreter.llm.supports_functions = False
interpreter.llm.api_base = "http://localhost:1234/v1"
interpreter.llm.api_key = "x"

Expand Down

2 comments on commit c7ff254

@zheyi911
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

PS E:\python3.11> interpreter
E:\python3.11\Lib\site-packages\pydantic_internal_fields.py:160: UserWarning: Field "model_name" has conflict with protected namespace "model_".

You may be able to resolve this warning by setting model_config['protected_namespaces'] = ().
warnings.warn(
E:\python3.11\Lib\site-packages\pydantic_internal_fields.py:160: UserWarning: Field "model_info" has conflict with protected namespace "model_".

You may be able to resolve this warning by setting model_config['protected_namespaces'] = ().
warnings.warn(

Welcome to Open Interpreter.

────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────

▌ OpenAI API key not found

To use GPT-4 (highly recommended) please provide an OpenAI API key.

To use another language model, consult the documentation at docs.openinterpreter.com.

────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────

OpenAI API key:

I would like to ask why there will be a series of errors when I start

@zheyi911
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have updated the following error, and the following is the complete error

C:\Users\27559>interpreter --local
:88: SyntaxWarning: "is" with a literal. Did you mean "=="?

▌ Open Interpreter is compatible with several local model providers.

[?] What one would you like to use?:

Ollama
Llamafile
LM Studio
Jan

4 Ollama models found. To download a new model, run ollama run , then start a new interpreter session.

For a full list of downloadable models, check out https://ollama.com/library

[?] Select a downloaded Ollama model::
failed
NAME

llama3:8b
llama3

Using Ollama model: llama3:8b

hello
Traceback (most recent call last):
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\llm.py", line 229, in fixed_litellm_completions
yield from litellm.completion(**params)
File "E:\python3.11\Lib\site-packages\litellm\llms\ollama.py", line 260, in ollama_completion_stream
raise e
File "E:\python3.11\Lib\site-packages\litellm\llms\ollama.py", line 248, in ollama_completion_stream
status_code=response.status_code, message=response.text
^^^^^^^^^^^^^
File "E:\python3.11\Lib\site-packages\httpx_models.py", line 576, in text
content = self.content
^^^^^^^^^^^^
File "E:\python3.11\Lib\site-packages\httpx_models.py", line 570, in content
raise ResponseNotRead()
httpx.ResponseNotRead: Attempted to access streaming response content, without having called read().

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "E:\python3.11\Lib\site-packages\interpreter\core\respond.py", line 69, in respond
for chunk in interpreter.llm.run(messages_for_llm):
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\llm.py", line 201, in run
yield from run_text_llm(self, params)
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\run_text_llm.py", line 20, in run_text_llm
for chunk in llm.completions(**params):
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\llm.py", line 232, in fixed_litellm_completions
raise first_error
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\llm.py", line 213, in fixed_litellm_completions
yield from litellm.completion(**params)
File "E:\python3.11\Lib\site-packages\litellm\llms\ollama.py", line 260, in ollama_completion_stream
raise e
File "E:\python3.11\Lib\site-packages\litellm\llms\ollama.py", line 248, in ollama_completion_stream
status_code=response.status_code, message=response.text
^^^^^^^^^^^^^
File "E:\python3.11\Lib\site-packages\httpx_models.py", line 576, in text
content = self.content
^^^^^^^^^^^^
File "E:\python3.11\Lib\site-packages\httpx_models.py", line 570, in content
raise ResponseNotRead()
httpx.ResponseNotRead: Attempted to access streaming response content, without having called read().

    Python Version: 3.11.9
    Pip Version: 24.0
    Open-interpreter Version: cmd: Open Interpreter 0.2.5 New Computer Update

, pkg: 0.2.5
OS Version and Architecture: Windows-10-10.0.22631-SP0
CPU Info: Intel64 Family 6 Model 141 Stepping 1, GenuineIntel
RAM Info: 31.74 GB, used: 19.41, free: 12.34

    # Interpreter Info

    Vision: False
    Model: ollama/llama3:8b
    Function calling: False
    Context window: 8000
    Max tokens: 1200

    Auto run: False
    API base: None
    Offline: True

    Curl output: Not local

    # Messages

    System Message:

You are Open Interpreter, a world-class programmer that can execute code on the user's machine.
First, list all of the information you know related to the user's request.
Next, write a plan. Always recap the plan between each code block (you have extreme short-term memory loss, so you need to recap the plan between each message block to retain it).
The code you write must be able to be executed as is. Invalid syntax will cause a catastrophic failure. Do not include the language of the code in the response.
When you execute code, it will be executed on the user's machine. The user has given you full and complete permission to execute any code necessary to complete the task. Execute the code.
You can access the internet. Run any code to achieve the goal, and if at first you don't succeed, try again and again.
You can install new packages.
When a user refers to a filename, they're likely referring to an existing file in the directory you're currently executing code in.
Write messages to the user in Markdown.
In general, try to make plans with as few steps as possible. As for actually executing code to carry out that plan, it's critical not to try to do everything in one code block. You should try something, print information about it, then continue from there in tiny, informed steps. You will never get it on the first try, and attempting it in one go will often lead to errors you cant see.
You are capable of any task.
Once you have accomplished the task, ask the user if they are happy with the result and wait for their response. It is very important to get feedback from the user.
The user will tell you the next task after you ask them.

    {'role': 'user', 'type': 'message', 'content': 'hello'}

Traceback (most recent call last):
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\llm.py", line 229, in fixed_litellm_completions
yield from litellm.completion(**params)
File "E:\python3.11\Lib\site-packages\litellm\llms\ollama.py", line 260, in ollama_completion_stream
raise e
File "E:\python3.11\Lib\site-packages\litellm\llms\ollama.py", line 248, in ollama_completion_stream
status_code=response.status_code, message=response.text
^^^^^^^^^^^^^
File "E:\python3.11\Lib\site-packages\httpx_models.py", line 576, in text
content = self.content
^^^^^^^^^^^^
File "E:\python3.11\Lib\site-packages\httpx_models.py", line 570, in content
raise ResponseNotRead()
httpx.ResponseNotRead: Attempted to access streaming response content, without having called read().

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "E:\python3.11\Lib\site-packages\interpreter\core\respond.py", line 69, in respond
for chunk in interpreter.llm.run(messages_for_llm):
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\llm.py", line 201, in run
yield from run_text_llm(self, params)
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\run_text_llm.py", line 20, in run_text_llm
for chunk in llm.completions(**params):
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\llm.py", line 232, in fixed_litellm_completions
raise first_error
File "E:\python3.11\Lib\site-packages\interpreter\core\llm\llm.py", line 213, in fixed_litellm_completions
yield from litellm.completion(**params)
File "E:\python3.11\Lib\site-packages\litellm\llms\ollama.py", line 260, in ollama_completion_stream
raise e
File "E:\python3.11\Lib\site-packages\litellm\llms\ollama.py", line 248, in ollama_completion_stream
status_code=response.status_code, message=response.text
^^^^^^^^^^^^^
File "E:\python3.11\Lib\site-packages\httpx_models.py", line 576, in text
content = self.content
^^^^^^^^^^^^
File "E:\python3.11\Lib\site-packages\httpx_models.py", line 570, in content
raise ResponseNotRead()
httpx.ResponseNotRead: Attempted to access streaming response content, without having called read().

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "", line 198, in _run_module_as_main
File "", line 88, in run_code
File "E:\python3.11\Scripts\interpreter.exe_main
.py", line 7, in
File "E:\python3.11\Lib\site-packages\interpreter\terminal_interface\start_terminal_interface.py", line 453, in main
start_terminal_interface(interpreter)
File "E:\python3.11\Lib\site-packages\interpreter\terminal_interface\start_terminal_interface.py", line 427, in start_terminal_interface
interpreter.chat()
File "E:\python3.11\Lib\site-packages\interpreter\core\core.py", line 166, in chat
for _ in self._streaming_chat(message=message, display=display):
File "E:\python3.11\Lib\site-packages\interpreter\core\core.py", line 195, in _streaming_chat
yield from terminal_interface(self, message)
File "E:\python3.11\Lib\site-packages\interpreter\terminal_interface\terminal_interface.py", line 133, in terminal_interface
for chunk in interpreter.chat(message, display=False, stream=True):
File "E:\python3.11\Lib\site-packages\interpreter\core\core.py", line 234, in _streaming_chat
yield from self._respond_and_store()
File "E:\python3.11\Lib\site-packages\interpreter\core\core.py", line 282, in _respond_and_store
for chunk in respond(self):
File "E:\python3.11\Lib\site-packages\interpreter\core\respond.py", line 115, in respond
raise Exception(
Exception: Error occurred. Attempted to access streaming response content, without having called read().

Please sign in to comment.