Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev #29

Merged
merged 7 commits into from
Feb 4, 2024
Merged

Dev #29

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .github/workflows/test_and_deploy.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ jobs:
test:
name: ${{ matrix.platform }} py${{ matrix.python-version }}
runs-on: ${{ matrix.platform }}
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
platform: [ ubuntu-latest, windows-latest, macos-latest ]
python-version: [ '3.9', '3.10', '3.11' ]
Expand Down
8 changes: 4 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -138,21 +138,21 @@ Install [napari](napari.org) in the environment using conda-forge: (very importa

conda install -c conda-forge napari pyqt

**Or**, with pip (linux, windows, or Intel Macs, not recommended on Apple M1/M2!):
**Or**, with pip (linux, windows, or Intel Macs, _not_ recommended on Apple M1/M2!):

pip install napari
python -m pip install "napari[all]"

Install napari-chatgpt in the environment:

pip install napari-chatgpt
python -m pip install napari-chatgpt

## Installation variations:

To install the latest development version (not recommended for end-users):

conda create -y -n napari-chatgpt -c conda-forge python=3.9
conda activate napari-chatgpt
pip install napari
python -m pip install "napari[all]"
git clone https://github.com/royerlab/napari-chatgpt.git
cd napari-chatgpt
pip install -e .
Expand Down
9 changes: 5 additions & 4 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[metadata]
name = napari-chatgpt
version = v2024.2.1
version = v2024.2.4
description = A napari plugin to process and analyse images with chatGPT.
long_description = file: README.md
long_description_content_type = text/markdown
Expand Down Expand Up @@ -35,12 +35,13 @@ install_requires =
magicgui
scikit-image
qtpy
langchain==0.0.352
langchain==0.1.5
langchain-openai==0.0.5
openai
anthropic
fastapi
uvicorn
websockets
anthropic
openai
tiktoken
wikipedia
lxml
Expand Down
19 changes: 9 additions & 10 deletions src/napari_chatgpt/_tests/test_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,13 @@ def test_omega_q_widget(make_napari_viewer, capsys):
viewer = make_napari_viewer()
viewer.add_image(np.random.random((100, 100)))

# No testing of UI elements yet!
#
# # create our widget, passing in the viewer
# my_widget = OmegaQWidget(viewer)

# create our widget, passing in the viewer
my_widget = OmegaQWidget(viewer)

# # call our widget method
# my_widget._on_click()
#
# # # call our widget method
# # my_widget._on_click()
# #
# # # read captured output and check that it's as we expected
# # captured = capsys.readouterr()
# # assert 'Omega' in captured.out
# # read captured output and check that it's as we expected
# captured = capsys.readouterr()
# assert 'Omega' in captured.out
118 changes: 67 additions & 51 deletions src/napari_chatgpt/_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
Replace code below according to your needs.
"""
import sys
import traceback
from typing import TYPE_CHECKING, List

from PyQt5.QtCore import Qt
Expand All @@ -18,7 +19,7 @@
from napari_chatgpt.chat_server.chat_server import NapariChatServer
from napari_chatgpt.utils.configuration.app_configuration import \
AppConfiguration
from napari_chatgpt.utils.ollama.ollama import is_ollama_running, \
from napari_chatgpt.utils.ollama.ollama_server import is_ollama_running, \
get_ollama_models
from napari_chatgpt.utils.openai.model_list import get_openai_model_list
from napari_chatgpt.utils.python.installed_packages import \
Expand All @@ -28,7 +29,7 @@
if TYPE_CHECKING:
pass

from arbol import aprint
from arbol import aprint, asection

_creativity_mapping = {}
_creativity_mapping['normal'] = 0.0
Expand Down Expand Up @@ -100,22 +101,27 @@ def _model_selection(self):

if is_package_installed('anthropic'):
# Add Anthropic models to the combo box:
model_list.append('claude-2')
model_list.append('claude-instant-1')
model_list.append('claude-2.1')
model_list.append('claude-2.0')
model_list.append('claude-instant-1.2')


if is_ollama_running():
ollama_models = get_ollama_models()
for ollama_model in ollama_models:
model_list.append('ollama_'+ollama_model)

# Postprocess list:
# Postprocess model list:

# Special cases (common prefix):
if 'gpt-3.5-turbo' in model_list:
model_list.remove('gpt-3.5-turbo')

# get list of bad models for main LLM:
bad_models_filters = self.config.get('bad_models_filters', ['0613', 'vision'])
bad_models_filters = ['0613', 'vision', 'turbo-instruct', 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-16k']

# get list of best models for main LLM:
best_models_filters = self.config.get('best_models_filters', ['0314', '0301', '1106', 'gpt-4'])
best_models_filters = ['0314', '0301', '1106', 'gpt-4']

# Ensure that some 'bad' or unsupported models are excluded:
bad_models = [m for m in model_list if any(bm in m for bm in bad_models_filters)]
Expand Down Expand Up @@ -372,50 +378,60 @@ def _start_omega_button(self):
self.layout.addWidget(self.start_omega_button)

def _on_click(self):
aprint("Starting Omega now!")

# Stop previous instance if it exists:
if self.server:
self.server.stop()

# Temperature:
temperature = float(_creativity_mapping[
self.creativity_combo_box.currentText()])
tool_temperature = 0.01*temperature

# Model selected:
main_llm_model_name = self.model_combo_box.currentText()

# Warn users with a modal window that the selected model might be sub-optimal:
if 'gpt-4' not in main_llm_model_name:
show_warning_dialog(f"You have selected this model: "
f"'{main_llm_model_name}'This is not a GPT4-level model. "
f"Omega's cognitive and coding abilities will be degraded. "
f"Please visit <a href='https://github.com/royerlab/napari-chatgpt/wiki/OpenAIKey'>our wiki</a> "
f"for information on how to gain access to GPT4.")

# Set tool LLM model name via configuration file.
tool_llm_model_name = self.config.get('tool_llm_model_name', 'same')
if tool_llm_model_name.strip() == 'same':
tool_llm_model_name = main_llm_model_name

from napari_chatgpt.chat_server.chat_server import start_chat_server
self.server = start_chat_server(self.viewer,
main_llm_model_name=main_llm_model_name,
tool_llm_model_name=tool_llm_model_name,
temperature=temperature,
tool_temperature=tool_temperature,
memory_type=self.memory_type_combo_box.currentText(),
agent_personality=self.agent_personality_combo_box.currentText(),
fix_imports=self.fix_imports_checkbox.isChecked(),
install_missing_packages=self.install_missing_packages_checkbox.isChecked(),
fix_bad_calls=self.fix_bad_calls_checkbox.isChecked(),
autofix_mistakes=self.autofix_mistakes_checkbox.isChecked(),
autofix_widget=self.autofix_widgets_checkbox.isChecked(),
be_didactic=self.tutorial_mode_checkbox.isChecked(),
save_chats_as_notebooks=self.save_chats_as_notebooks.isChecked(),
verbose=self.verbose_checkbox.isChecked()
)
try:
with asection("Starting Omega now!"):

# Stop previous instance if it exists:
if self.server:
aprint("Server already started")
self.server.stop()

# Temperature:
temperature = float(_creativity_mapping[
self.creativity_combo_box.currentText()])
tool_temperature = 0.01*temperature

# Model selected:
main_llm_model_name = self.model_combo_box.currentText()

# Warn users with a modal window that the selected model might be sub-optimal:
if 'gpt-4' not in main_llm_model_name:
aprint("Warning: you did not select a gpt-4 level model. Omega's cognitive and coding abilities will be degraded.")
show_warning_dialog(f"You have selected this model: '{main_llm_model_name}'. "
f"This is not a GPT4-level model. "
f"Omega's cognitive and coding abilities will be degraded. "
f"It might even completely fail or be too slow. "
f"Please visit <a href='https://github.com/royerlab/napari-chatgpt/wiki/OpenAIKey'>our wiki</a> "
f"for information on how to gain access to GPT4.")

# Set tool LLM model name via configuration file.
tool_llm_model_name = self.config.get('tool_llm_model_name', 'same')
if tool_llm_model_name.strip() == 'same':
aprint(f"Using the same model {main_llm_model_name} for the main and tool's LLM.")
tool_llm_model_name = main_llm_model_name

from napari_chatgpt.chat_server.chat_server import start_chat_server
self.server = start_chat_server(self.viewer,
main_llm_model_name=main_llm_model_name,
tool_llm_model_name=tool_llm_model_name,
temperature=temperature,
tool_temperature=tool_temperature,
memory_type=self.memory_type_combo_box.currentText(),
agent_personality=self.agent_personality_combo_box.currentText(),
fix_imports=self.fix_imports_checkbox.isChecked(),
install_missing_packages=self.install_missing_packages_checkbox.isChecked(),
fix_bad_calls=self.fix_bad_calls_checkbox.isChecked(),
autofix_mistakes=self.autofix_mistakes_checkbox.isChecked(),
autofix_widget=self.autofix_widgets_checkbox.isChecked(),
be_didactic=self.tutorial_mode_checkbox.isChecked(),
save_chats_as_notebooks=self.save_chats_as_notebooks.isChecked(),
verbose=self.verbose_checkbox.isChecked()
)

except Exception as e:
aprint(f"Error: {e}")
aprint("Omega failed to start. Please check the console for more information.")
traceback.print_exc()


def main():
Expand Down
130 changes: 66 additions & 64 deletions src/napari_chatgpt/chat_server/chat_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ async def websocket_endpoint(websocket: WebSocket):
_set_viewer_info(viewer_info)

# call LLM:
result = await agent_chain.acall(question)
result = await agent_chain.ainvoke(question)

aprint(
f"Agent response:\n{result['chat_history'][-1]}\n\n")
Expand Down Expand Up @@ -276,69 +276,71 @@ def start_chat_server(viewer: napari.Viewer = None,
verbose: bool = False
):

# get configuration:
config = AppConfiguration('omega')

# Set OpenAI key if necessary:
if ('gpt' in main_llm_model_name or 'gpt' in tool_llm_model_name )and is_package_installed(
'openai'):
set_api_key('OpenAI')

# Set Anthropic key if necessary:
if ('claude' in main_llm_model_name or 'claude' in tool_llm_model_name) and is_package_installed('anthropic'):
set_api_key('Anthropic')

# Instantiates napari viewer:
if not viewer:
viewer = napari.Viewer()

# Instantiates a notebook:
notebook = JupyterNotebookFile(notebook_folder_path=config.get('notebook_path')) if save_chats_as_notebooks else None

# Instantiates a napari bridge:
bridge = NapariBridge(viewer=viewer)

# Register snapshot function:
notebook.register_snapshot_function(bridge.take_snapshot)

# Instantiates server:
chat_server = NapariChatServer(notebook=notebook,
napari_bridge=bridge,
main_llm_model_name=main_llm_model_name,
tool_llm_model_name=tool_llm_model_name,
temperature=temperature,
tool_temperature=tool_temperature,
memory_type=memory_type,
agent_personality=agent_personality,
fix_imports=fix_imports,
install_missing_packages=install_missing_packages,
fix_bad_calls=fix_bad_calls,
autofix_mistakes=autofix_mistakes,
autofix_widget=autofix_widget,
be_didactic=be_didactic,
verbose=verbose
)

# Define server thread code:
def server_thread_function():
# Start Chat server:
chat_server.run()

# Create and start the thread that will run Omega:
server_thread = Thread(target=server_thread_function, args=())
server_thread.start()

# function to open browser on page:
def _open_browser():
url = f"http://127.0.0.1:{chat_server.port}"
webbrowser.open(url, new=0, autoraise=True)

# open browser after delay of a few seconds:
if config.get('open_browser', True):
QTimer.singleShot(2000, _open_browser)

# Return the server:
return chat_server
with asection("Starting chat server"):

# get configuration:
config = AppConfiguration('omega')

# Set OpenAI key if necessary:
if ('gpt' in main_llm_model_name or 'gpt' in tool_llm_model_name )and is_package_installed(
'openai'):
set_api_key('OpenAI')

# Set Anthropic key if necessary:
if ('claude' in main_llm_model_name or 'claude' in tool_llm_model_name) and is_package_installed('anthropic'):
set_api_key('Anthropic')

# Instantiates napari viewer:
if not viewer:
viewer = napari.Viewer()

# Instantiates a notebook:
notebook = JupyterNotebookFile(notebook_folder_path=config.get('notebook_path')) if save_chats_as_notebooks else None

# Instantiates a napari bridge:
bridge = NapariBridge(viewer=viewer)

# Register snapshot function:
notebook.register_snapshot_function(bridge.take_snapshot)

# Instantiates server:
chat_server = NapariChatServer(notebook=notebook,
napari_bridge=bridge,
main_llm_model_name=main_llm_model_name,
tool_llm_model_name=tool_llm_model_name,
temperature=temperature,
tool_temperature=tool_temperature,
memory_type=memory_type,
agent_personality=agent_personality,
fix_imports=fix_imports,
install_missing_packages=install_missing_packages,
fix_bad_calls=fix_bad_calls,
autofix_mistakes=autofix_mistakes,
autofix_widget=autofix_widget,
be_didactic=be_didactic,
verbose=verbose
)

# Define server thread code:
def server_thread_function():
# Start Chat server:
chat_server.run()

# Create and start the thread that will run Omega:
server_thread = Thread(target=server_thread_function, args=())
server_thread.start()

# function to open browser on page:
def _open_browser():
url = f"http://127.0.0.1:{chat_server.port}"
webbrowser.open(url, new=0, autoraise=True)

# open browser after delay of a few seconds:
if config.get('open_browser', True):
QTimer.singleShot(2000, _open_browser)

# Return the server:
return chat_server


if __name__ == "__main__":
Expand Down
Loading
Loading