Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev #28

Merged
merged 8 commits into from
Feb 1, 2024
Merged

Dev #28

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ Install [napari](napari.org) in the environment using conda-forge: (very importa

conda install -c conda-forge napari pyqt

**Or**, with pip:
**Or**, with pip (linux, windows, or Intel Macs, not recommended on Apple M1/M2!):

pip install napari

Expand All @@ -156,6 +156,7 @@ To install the latest development version (not recommended for end-users):
git clone https://github.com/royerlab/napari-chatgpt.git
cd napari-chatgpt
pip install -e .
pip install -e ".[testing]"

or:

Expand Down
5 changes: 3 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[metadata]
name = napari-chatgpt
version = v2024.1.23
version = v2024.2.1
description = A napari plugin to process and analyse images with chatGPT.
long_description = file: README.md
long_description_content_type = text/markdown
Expand Down Expand Up @@ -57,7 +57,8 @@ install_requires =
tabulate
numba
imageio[ffmpeg,pyav]

notebook
nbformat


python_requires = >=3.9
Expand Down
136 changes: 105 additions & 31 deletions src/napari_chatgpt/_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,26 +9,26 @@
import sys
from typing import TYPE_CHECKING, List

from napari_chatgpt.chat_server.chat_server import NapariChatServer
from napari_chatgpt.utils.api_keys.api_key import set_api_key
from napari_chatgpt.utils.ollama.ollama import is_ollama_running, \
get_ollama_models
from napari_chatgpt.utils.openai.model_list import get_openai_model_list
from napari_chatgpt.utils.python.installed_packages import \
is_package_installed
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QApplication, QLabel, QCheckBox
from PyQt5.QtWidgets import QVBoxLayout, QComboBox
from napari.viewer import Viewer
from qtpy.QtWidgets import QPushButton, QWidget



from napari_chatgpt.chat_server.chat_server import NapariChatServer
from napari_chatgpt.utils.configuration.app_configuration import \
AppConfiguration
from napari_chatgpt.utils.ollama.ollama import is_ollama_running, \
get_ollama_models
from napari_chatgpt.utils.openai.model_list import get_openai_model_list
from napari_chatgpt.utils.python.installed_packages import \
is_package_installed
from napari_chatgpt.utils.qt.warning_dialog import show_warning_dialog

if TYPE_CHECKING:
pass

from arbol import aprint, asection
from arbol import aprint

_creativity_mapping = {}
_creativity_mapping['normal'] = 0.0
Expand All @@ -46,6 +46,9 @@ def __init__(self, napari_viewer):
super().__init__()
aprint("OmegaQWidget instantiated!")

# Get app configuration:
self.config = AppConfiguration('omega')

# Napari viewer instance:
self.viewer = napari_viewer

Expand All @@ -68,6 +71,8 @@ def __init__(self, napari_viewer):
self._install_missing_packages()
self._autofix_mistakes()
self._autofix_widgets()
self._tutorial_mode()
self._save_chats_as_notebooks()
self._verbose()

self._start_omega_button()
Expand All @@ -76,6 +81,7 @@ def __init__(self, napari_viewer):
self.setLayout(self.layout)

def _model_selection(self):

aprint("Setting up model selection UI.")

# Create a QLabel instance
Expand All @@ -86,17 +92,11 @@ def _model_selection(self):
self.model_combo_box = QComboBox()
# Set tooltip for the combo box
self.model_combo_box.setToolTip(
"Choose an LLM model. Best models are GPT4 and GPT3.5, \n"
"with Claude a bit behind, other models are experimental\n"
"and unfortunately barely usable. WARNING: recent GPT models\n"
"have poor coding performance (0613), avoid them!\n"
"Models at the top of list are better!")

# Model list:
model_list: List[str] = []
"Choose an LLM model. Best models are GPT4s. \n"
"other models are less competent. \n")

# Add OpenAI models to the combo box:
model_list = get_openai_model_list(verbose=True)
model_list: List[str] = list(get_openai_model_list(verbose=True))

if is_package_installed('anthropic'):
# Add Anthropic models to the combo box:
Expand All @@ -111,17 +111,21 @@ def _model_selection(self):

# Postprocess list:

# Ensure that some 'bad' or unsuported models are excluded:
bad_models = [m for m in model_list if '0613' in m or 'vision' in m]
# get list of bad models for main LLM:
bad_models_filters = self.config.get('bad_models_filters', ['0613', 'vision'])

# get list of best models for main LLM:
best_models_filters = self.config.get('best_models_filters', ['0314', '0301', '1106', 'gpt-4'])

# Ensure that some 'bad' or unsupported models are excluded:
bad_models = [m for m in model_list if any(bm in m for bm in bad_models_filters)]
for bad_model in bad_models:
if bad_model in model_list:
model_list.remove(bad_model)
# model_list.append(bad_model)



# Ensure that the best models are at the top of the list:
best_models = [m for m in model_list if '0314' in m or '0301' in m or '1106' in m or 'gpt-4' in m]
best_models = [m for m in model_list if any(bm in m for bm in best_models_filters)]
model_list = best_models + [m for m in model_list if m not in best_models]

# Ensure that the very best models are at the top of the list:
Expand Down Expand Up @@ -211,9 +215,12 @@ def _personality_selection(self):
def _fix_imports(self):
aprint("Setting up fix imports UI.")

# Get app configuration:
config = AppConfiguration('omega')

# Create a QLabel instance
self.fix_imports_checkbox = QCheckBox("Fix missing imports")
self.fix_imports_checkbox.setChecked(True)
self.fix_imports_checkbox.setChecked(config.get('fix_missing_imports', True))
self.fix_imports_checkbox.setToolTip(
"Uses LLM to check for missing imports.\n"
"This involves a LLM call which can incur additional\n"
Expand All @@ -225,9 +232,12 @@ def _fix_imports(self):
def _fix_bad_version_calls(self):
aprint("Setting up bad version imports UI.")

# Get app configuration:
config = AppConfiguration('omega')

# Create a QLabel instance
self.fix_bad_calls_checkbox = QCheckBox("Fix bad function calls")
self.fix_bad_calls_checkbox.setChecked(True)
self.fix_bad_calls_checkbox.setChecked(config.get('fix_bad_calls', True))
self.fix_bad_calls_checkbox.setToolTip("Uses LLM to fix function calls.\n"
"When turned on, this detects wrong function calls, \n"
"possibly because of library version mismatch and fixes,"
Expand All @@ -241,10 +251,13 @@ def _fix_bad_version_calls(self):
def _install_missing_packages(self):
aprint("Setting up install missing packages UI.")

# Get app configuration:
config = AppConfiguration('omega')

# Create a QLabel instance
self.install_missing_packages_checkbox = QCheckBox(
"Install missing packages")
self.install_missing_packages_checkbox.setChecked(True)
self.install_missing_packages_checkbox.setChecked(config.get('install_missing_packages', True))
self.install_missing_packages_checkbox.setToolTip(
"Uses LLM to figure out which packages to install.\n"
"This involves a LLM call which can incur additional\n"
Expand All @@ -255,10 +268,13 @@ def _install_missing_packages(self):
def _autofix_mistakes(self):
aprint("Setting up autofix mistakes UI.")

# Get app configuration:
config = AppConfiguration('omega')

# Create a QLabel instance
self.autofix_mistakes_checkbox = QCheckBox(
"Autofix coding mistakes")
self.autofix_mistakes_checkbox.setChecked(False)
self.autofix_mistakes_checkbox.setChecked(config.get('autofix_mistakes', True))
self.autofix_mistakes_checkbox.setToolTip(
"When checked Omega will try to fix on its own coding mistakes\n"
"when processing data and interacting with the napari viewer.\n"
Expand All @@ -272,10 +288,13 @@ def _autofix_mistakes(self):
def _autofix_widgets(self):
aprint("Setting up autofix widgets UI.")

# Get app configuration:
config = AppConfiguration('omega')

# Create a QLabel instance
self.autofix_widgets_checkbox = QCheckBox(
"Autofix widget coding mistakes")
self.autofix_widgets_checkbox.setChecked(False)
self.autofix_widgets_checkbox.setChecked(config.get('autofix_widgets', True))
self.autofix_widgets_checkbox.setToolTip(
"When checked Omega will try to fix its own \n"
"coding mistakes when making widgets. \n"
Expand All @@ -285,13 +304,49 @@ def _autofix_widgets(self):
# Add the install_missing_packages checkbox to the layout:
self.layout.addWidget(self.autofix_widgets_checkbox)

def _tutorial_mode(self):
aprint("Setting up tutorial mode UI.")

# Get app configuration:
config = AppConfiguration('omega')

# Create a QLabel instance
self.tutorial_mode_checkbox = QCheckBox(
"Tutorial/Didactic mode")
self.tutorial_mode_checkbox.setChecked(config.get('tutorial_mode_checkbox', False))
self.tutorial_mode_checkbox.setToolTip(
"When checked Omega will actively asks questions \n"
"to clarify and disambiguate the request, and \n"
"will propose multiple options and be didactic. ")
# Add the install_missing_packages checkbox to the layout:
self.layout.addWidget(self.tutorial_mode_checkbox)

def _save_chats_as_notebooks(self):
aprint("Setting up save notebooks UI.")

# Get app configuration:
config = AppConfiguration('omega')

# Create a QLabel instance
self.save_chats_as_notebooks = QCheckBox(
"Save chats as Jupyter notebooks")
self.save_chats_as_notebooks.setChecked(config.get('save_chats_as_notebooks', True))
self.save_chats_as_notebooks.setToolTip(
"When checked Omega will save the chats as Jupyter notebooks \n"
"by default in a folder on the user's desktop.")
# Add the install_missing_packages checkbox to the layout:
self.layout.addWidget(self.save_chats_as_notebooks)

def _verbose(self):
aprint("Setting up verbose UI.")

# Get app configuration:
config = AppConfiguration('omega')

# Create a QLabel instance
self.verbose_checkbox = QCheckBox(
"High console verbosity")
self.verbose_checkbox.setChecked(False)
self.verbose_checkbox.setChecked(config.get('verbose', False))
self.verbose_checkbox.setToolTip(
"High level of verbosity in the console\n"
"This includes a lot of internal logging\n"
Expand Down Expand Up @@ -328,9 +383,26 @@ def _on_click(self):
self.creativity_combo_box.currentText()])
tool_temperature = 0.01*temperature

# Model selected:
main_llm_model_name = self.model_combo_box.currentText()

# Warn users with a modal window that the selected model might be sub-optimal:
if 'gpt-4' not in main_llm_model_name:
show_warning_dialog(f"You have selected this model: "
f"'{main_llm_model_name}'This is not a GPT4-level model. "
f"Omega's cognitive and coding abilities will be degraded. "
f"Please visit <a href='https://github.com/royerlab/napari-chatgpt/wiki/OpenAIKey'>our wiki</a> "
f"for information on how to gain access to GPT4.")

# Set tool LLM model name via configuration file.
tool_llm_model_name = self.config.get('tool_llm_model_name', 'same')
if tool_llm_model_name.strip() == 'same':
tool_llm_model_name = main_llm_model_name

from napari_chatgpt.chat_server.chat_server import start_chat_server
self.server = start_chat_server(self.viewer,
llm_model_name=self.model_combo_box.currentText(),
main_llm_model_name=main_llm_model_name,
tool_llm_model_name=tool_llm_model_name,
temperature=temperature,
tool_temperature=tool_temperature,
memory_type=self.memory_type_combo_box.currentText(),
Expand All @@ -340,6 +412,8 @@ def _on_click(self):
fix_bad_calls=self.fix_bad_calls_checkbox.isChecked(),
autofix_mistakes=self.autofix_mistakes_checkbox.isChecked(),
autofix_widget=self.autofix_widgets_checkbox.isChecked(),
be_didactic=self.tutorial_mode_checkbox.isChecked(),
save_chats_as_notebooks=self.save_chats_as_notebooks.isChecked(),
verbose=self.verbose_checkbox.isChecked()
)

Expand Down
19 changes: 17 additions & 2 deletions src/napari_chatgpt/chat_server/callbacks/callbacks_handle_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,23 @@
from arbol import aprint
from langchain.callbacks.base import AsyncCallbackHandler
from langchain.schema import AgentFinish, AgentAction, LLMResult, BaseMessage
from starlette.websockets import WebSocket

from napari_chatgpt.chat_server.chat_response import ChatResponse
from napari_chatgpt.utils.notebook.jupyter_notebook import JupyterNotebookFile
from napari_chatgpt.utils.strings.camel_case_to_normal import \
camel_case_to_lower_case


class ChatCallbackHandler(AsyncCallbackHandler):
"""Callback handler for chat responses."""

def __init__(self, websocket, verbose: bool = False):
self.websocket = websocket
def __init__(self,
websocket: WebSocket,
notebook: JupyterNotebookFile,
verbose: bool = False):
self.websocket: WebSocket = websocket
self.notebook: JupyterNotebookFile = notebook
self.verbose = verbose
self.last_tool_used = ''
self.last_tool_input = ''
Expand Down Expand Up @@ -100,6 +106,11 @@ async def on_tool_error(
resp = ChatResponse(sender="agent", message=message, type="error")
asyncio.run(self.websocket.send_json(resp.dict()))

if self.notebook:
self.notebook.add_markdown_cell("### Omega:\n"+
"Error:\n"+
message)

async def on_text(self, text: str, **kwargs: Any) -> Any:
"""Run on arbitrary text."""
if self.verbose:
Expand All @@ -121,6 +132,10 @@ async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
resp = ChatResponse(sender="agent", message=message, type="action")
await self.websocket.send_json(resp.dict())

if self.notebook:
self.notebook.add_markdown_cell("### Omega:\n"+
message)

async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run on agent end."""
if self.verbose:
Expand Down
Loading
Loading