diff --git a/README.md b/README.md index 078a5f5..09369c6 100644 --- a/README.md +++ b/README.md @@ -138,7 +138,7 @@ Install [napari](napari.org) in the environment using conda-forge: (very importa conda install -c conda-forge napari pyqt -**Or**, with pip: +**Or**, with pip (linux, windows, or Intel Macs, not recommended on Apple M1/M2!): pip install napari @@ -156,6 +156,7 @@ To install the latest development version (not recommended for end-users): git clone https://github.com/royerlab/napari-chatgpt.git cd napari-chatgpt pip install -e . + pip install -e ".[testing]" or: diff --git a/setup.cfg b/setup.cfg index 2e3b13f..db83c01 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = napari-chatgpt -version = v2024.1.23 +version = v2024.2.1 description = A napari plugin to process and analyse images with chatGPT. long_description = file: README.md long_description_content_type = text/markdown @@ -57,7 +57,8 @@ install_requires = tabulate numba imageio[ffmpeg,pyav] - + notebook + nbformat python_requires = >=3.9 diff --git a/src/napari_chatgpt/_widget.py b/src/napari_chatgpt/_widget.py index 448f5c6..46d66de 100644 --- a/src/napari_chatgpt/_widget.py +++ b/src/napari_chatgpt/_widget.py @@ -9,26 +9,26 @@ import sys from typing import TYPE_CHECKING, List -from napari_chatgpt.chat_server.chat_server import NapariChatServer -from napari_chatgpt.utils.api_keys.api_key import set_api_key -from napari_chatgpt.utils.ollama.ollama import is_ollama_running, \ - get_ollama_models -from napari_chatgpt.utils.openai.model_list import get_openai_model_list -from napari_chatgpt.utils.python.installed_packages import \ - is_package_installed from PyQt5.QtCore import Qt from PyQt5.QtWidgets import QApplication, QLabel, QCheckBox from PyQt5.QtWidgets import QVBoxLayout, QComboBox from napari.viewer import Viewer from qtpy.QtWidgets import QPushButton, QWidget - - +from napari_chatgpt.chat_server.chat_server import NapariChatServer +from napari_chatgpt.utils.configuration.app_configuration import \ + AppConfiguration +from napari_chatgpt.utils.ollama.ollama import is_ollama_running, \ + get_ollama_models +from napari_chatgpt.utils.openai.model_list import get_openai_model_list +from napari_chatgpt.utils.python.installed_packages import \ + is_package_installed +from napari_chatgpt.utils.qt.warning_dialog import show_warning_dialog if TYPE_CHECKING: pass -from arbol import aprint, asection +from arbol import aprint _creativity_mapping = {} _creativity_mapping['normal'] = 0.0 @@ -46,6 +46,9 @@ def __init__(self, napari_viewer): super().__init__() aprint("OmegaQWidget instantiated!") + # Get app configuration: + self.config = AppConfiguration('omega') + # Napari viewer instance: self.viewer = napari_viewer @@ -68,6 +71,8 @@ def __init__(self, napari_viewer): self._install_missing_packages() self._autofix_mistakes() self._autofix_widgets() + self._tutorial_mode() + self._save_chats_as_notebooks() self._verbose() self._start_omega_button() @@ -76,6 +81,7 @@ def __init__(self, napari_viewer): self.setLayout(self.layout) def _model_selection(self): + aprint("Setting up model selection UI.") # Create a QLabel instance @@ -86,17 +92,11 @@ def _model_selection(self): self.model_combo_box = QComboBox() # Set tooltip for the combo box self.model_combo_box.setToolTip( - "Choose an LLM model. Best models are GPT4 and GPT3.5, \n" - "with Claude a bit behind, other models are experimental\n" - "and unfortunately barely usable. WARNING: recent GPT models\n" - "have poor coding performance (0613), avoid them!\n" - "Models at the top of list are better!") - - # Model list: - model_list: List[str] = [] + "Choose an LLM model. Best models are GPT4s. \n" + "other models are less competent. \n") # Add OpenAI models to the combo box: - model_list = get_openai_model_list(verbose=True) + model_list: List[str] = list(get_openai_model_list(verbose=True)) if is_package_installed('anthropic'): # Add Anthropic models to the combo box: @@ -111,17 +111,21 @@ def _model_selection(self): # Postprocess list: - # Ensure that some 'bad' or unsuported models are excluded: - bad_models = [m for m in model_list if '0613' in m or 'vision' in m] + # get list of bad models for main LLM: + bad_models_filters = self.config.get('bad_models_filters', ['0613', 'vision']) + + # get list of best models for main LLM: + best_models_filters = self.config.get('best_models_filters', ['0314', '0301', '1106', 'gpt-4']) + + # Ensure that some 'bad' or unsupported models are excluded: + bad_models = [m for m in model_list if any(bm in m for bm in bad_models_filters)] for bad_model in bad_models: if bad_model in model_list: model_list.remove(bad_model) # model_list.append(bad_model) - - # Ensure that the best models are at the top of the list: - best_models = [m for m in model_list if '0314' in m or '0301' in m or '1106' in m or 'gpt-4' in m] + best_models = [m for m in model_list if any(bm in m for bm in best_models_filters)] model_list = best_models + [m for m in model_list if m not in best_models] # Ensure that the very best models are at the top of the list: @@ -211,9 +215,12 @@ def _personality_selection(self): def _fix_imports(self): aprint("Setting up fix imports UI.") + # Get app configuration: + config = AppConfiguration('omega') + # Create a QLabel instance self.fix_imports_checkbox = QCheckBox("Fix missing imports") - self.fix_imports_checkbox.setChecked(True) + self.fix_imports_checkbox.setChecked(config.get('fix_missing_imports', True)) self.fix_imports_checkbox.setToolTip( "Uses LLM to check for missing imports.\n" "This involves a LLM call which can incur additional\n" @@ -225,9 +232,12 @@ def _fix_imports(self): def _fix_bad_version_calls(self): aprint("Setting up bad version imports UI.") + # Get app configuration: + config = AppConfiguration('omega') + # Create a QLabel instance self.fix_bad_calls_checkbox = QCheckBox("Fix bad function calls") - self.fix_bad_calls_checkbox.setChecked(True) + self.fix_bad_calls_checkbox.setChecked(config.get('fix_bad_calls', True)) self.fix_bad_calls_checkbox.setToolTip("Uses LLM to fix function calls.\n" "When turned on, this detects wrong function calls, \n" "possibly because of library version mismatch and fixes," @@ -241,10 +251,13 @@ def _fix_bad_version_calls(self): def _install_missing_packages(self): aprint("Setting up install missing packages UI.") + # Get app configuration: + config = AppConfiguration('omega') + # Create a QLabel instance self.install_missing_packages_checkbox = QCheckBox( "Install missing packages") - self.install_missing_packages_checkbox.setChecked(True) + self.install_missing_packages_checkbox.setChecked(config.get('install_missing_packages', True)) self.install_missing_packages_checkbox.setToolTip( "Uses LLM to figure out which packages to install.\n" "This involves a LLM call which can incur additional\n" @@ -255,10 +268,13 @@ def _install_missing_packages(self): def _autofix_mistakes(self): aprint("Setting up autofix mistakes UI.") + # Get app configuration: + config = AppConfiguration('omega') + # Create a QLabel instance self.autofix_mistakes_checkbox = QCheckBox( "Autofix coding mistakes") - self.autofix_mistakes_checkbox.setChecked(False) + self.autofix_mistakes_checkbox.setChecked(config.get('autofix_mistakes', True)) self.autofix_mistakes_checkbox.setToolTip( "When checked Omega will try to fix on its own coding mistakes\n" "when processing data and interacting with the napari viewer.\n" @@ -272,10 +288,13 @@ def _autofix_mistakes(self): def _autofix_widgets(self): aprint("Setting up autofix widgets UI.") + # Get app configuration: + config = AppConfiguration('omega') + # Create a QLabel instance self.autofix_widgets_checkbox = QCheckBox( "Autofix widget coding mistakes") - self.autofix_widgets_checkbox.setChecked(False) + self.autofix_widgets_checkbox.setChecked(config.get('autofix_widgets', True)) self.autofix_widgets_checkbox.setToolTip( "When checked Omega will try to fix its own \n" "coding mistakes when making widgets. \n" @@ -285,13 +304,49 @@ def _autofix_widgets(self): # Add the install_missing_packages checkbox to the layout: self.layout.addWidget(self.autofix_widgets_checkbox) + def _tutorial_mode(self): + aprint("Setting up tutorial mode UI.") + + # Get app configuration: + config = AppConfiguration('omega') + + # Create a QLabel instance + self.tutorial_mode_checkbox = QCheckBox( + "Tutorial/Didactic mode") + self.tutorial_mode_checkbox.setChecked(config.get('tutorial_mode_checkbox', False)) + self.tutorial_mode_checkbox.setToolTip( + "When checked Omega will actively asks questions \n" + "to clarify and disambiguate the request, and \n" + "will propose multiple options and be didactic. ") + # Add the install_missing_packages checkbox to the layout: + self.layout.addWidget(self.tutorial_mode_checkbox) + + def _save_chats_as_notebooks(self): + aprint("Setting up save notebooks UI.") + + # Get app configuration: + config = AppConfiguration('omega') + + # Create a QLabel instance + self.save_chats_as_notebooks = QCheckBox( + "Save chats as Jupyter notebooks") + self.save_chats_as_notebooks.setChecked(config.get('save_chats_as_notebooks', True)) + self.save_chats_as_notebooks.setToolTip( + "When checked Omega will save the chats as Jupyter notebooks \n" + "by default in a folder on the user's desktop.") + # Add the install_missing_packages checkbox to the layout: + self.layout.addWidget(self.save_chats_as_notebooks) + def _verbose(self): aprint("Setting up verbose UI.") + # Get app configuration: + config = AppConfiguration('omega') + # Create a QLabel instance self.verbose_checkbox = QCheckBox( "High console verbosity") - self.verbose_checkbox.setChecked(False) + self.verbose_checkbox.setChecked(config.get('verbose', False)) self.verbose_checkbox.setToolTip( "High level of verbosity in the console\n" "This includes a lot of internal logging\n" @@ -328,9 +383,26 @@ def _on_click(self): self.creativity_combo_box.currentText()]) tool_temperature = 0.01*temperature + # Model selected: + main_llm_model_name = self.model_combo_box.currentText() + + # Warn users with a modal window that the selected model might be sub-optimal: + if 'gpt-4' not in main_llm_model_name: + show_warning_dialog(f"You have selected this model: " + f"'{main_llm_model_name}'This is not a GPT4-level model. " + f"Omega's cognitive and coding abilities will be degraded. " + f"Please visit our wiki " + f"for information on how to gain access to GPT4.") + + # Set tool LLM model name via configuration file. + tool_llm_model_name = self.config.get('tool_llm_model_name', 'same') + if tool_llm_model_name.strip() == 'same': + tool_llm_model_name = main_llm_model_name + from napari_chatgpt.chat_server.chat_server import start_chat_server self.server = start_chat_server(self.viewer, - llm_model_name=self.model_combo_box.currentText(), + main_llm_model_name=main_llm_model_name, + tool_llm_model_name=tool_llm_model_name, temperature=temperature, tool_temperature=tool_temperature, memory_type=self.memory_type_combo_box.currentText(), @@ -340,6 +412,8 @@ def _on_click(self): fix_bad_calls=self.fix_bad_calls_checkbox.isChecked(), autofix_mistakes=self.autofix_mistakes_checkbox.isChecked(), autofix_widget=self.autofix_widgets_checkbox.isChecked(), + be_didactic=self.tutorial_mode_checkbox.isChecked(), + save_chats_as_notebooks=self.save_chats_as_notebooks.isChecked(), verbose=self.verbose_checkbox.isChecked() ) diff --git a/src/napari_chatgpt/chat_server/callbacks/callbacks_handle_chat.py b/src/napari_chatgpt/chat_server/callbacks/callbacks_handle_chat.py index 4fce940..5f1eb2a 100644 --- a/src/napari_chatgpt/chat_server/callbacks/callbacks_handle_chat.py +++ b/src/napari_chatgpt/chat_server/callbacks/callbacks_handle_chat.py @@ -6,8 +6,10 @@ from arbol import aprint from langchain.callbacks.base import AsyncCallbackHandler from langchain.schema import AgentFinish, AgentAction, LLMResult, BaseMessage +from starlette.websockets import WebSocket from napari_chatgpt.chat_server.chat_response import ChatResponse +from napari_chatgpt.utils.notebook.jupyter_notebook import JupyterNotebookFile from napari_chatgpt.utils.strings.camel_case_to_normal import \ camel_case_to_lower_case @@ -15,8 +17,12 @@ class ChatCallbackHandler(AsyncCallbackHandler): """Callback handler for chat responses.""" - def __init__(self, websocket, verbose: bool = False): - self.websocket = websocket + def __init__(self, + websocket: WebSocket, + notebook: JupyterNotebookFile, + verbose: bool = False): + self.websocket: WebSocket = websocket + self.notebook: JupyterNotebookFile = notebook self.verbose = verbose self.last_tool_used = '' self.last_tool_input = '' @@ -100,6 +106,11 @@ async def on_tool_error( resp = ChatResponse(sender="agent", message=message, type="error") asyncio.run(self.websocket.send_json(resp.dict())) + if self.notebook: + self.notebook.add_markdown_cell("### Omega:\n"+ + "Error:\n"+ + message) + async def on_text(self, text: str, **kwargs: Any) -> Any: """Run on arbitrary text.""" if self.verbose: @@ -121,6 +132,10 @@ async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any: resp = ChatResponse(sender="agent", message=message, type="action") await self.websocket.send_json(resp.dict()) + if self.notebook: + self.notebook.add_markdown_cell("### Omega:\n"+ + message) + async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any: """Run on agent end.""" if self.verbose: diff --git a/src/napari_chatgpt/chat_server/callbacks/callbacks_handler_tool.py b/src/napari_chatgpt/chat_server/callbacks/callbacks_handler_tool.py index d7c655d..6d8b525 100644 --- a/src/napari_chatgpt/chat_server/callbacks/callbacks_handler_tool.py +++ b/src/napari_chatgpt/chat_server/callbacks/callbacks_handler_tool.py @@ -3,16 +3,22 @@ from arbol import aprint from langchain.callbacks.base import BaseCallbackHandler from langchain.schema import AgentFinish, AgentAction, LLMResult +from starlette.websockets import WebSocket from napari_chatgpt.chat_server.chat_response import ChatResponse from napari_chatgpt.utils.async_utils.run_async import run_async +from napari_chatgpt.utils.notebook.jupyter_notebook import JupyterNotebookFile class ToolCallbackHandler(BaseCallbackHandler): """Callback handler for tool responses.""" - def __init__(self, websocket, verbose: bool = False): + def __init__(self, + websocket: WebSocket, + notebook: JupyterNotebookFile, + verbose: bool = False): self.websocket = websocket + self.notebook = notebook self.verbose = verbose self.last_internal_tool_response = None @@ -47,14 +53,21 @@ def on_chain_start( def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> Any: """Run when chain ends running.""" - self.last_internal_tool_response = outputs['text'] + tool_response = outputs['text'] + self.last_internal_tool_response = tool_response if self.verbose: - aprint(f"TOOL on_chain_end: {self.last_internal_tool_response}") + aprint(f"TOOL on_chain_end: {tool_response}") resp = ChatResponse(sender="agent", - message=self.last_internal_tool_response, + message=tool_response, type="tool_result") run_async(self.websocket.send_json, resp.dict()) + if self.notebook: + self.notebook.add_markdown_cell("### Omega:\n"+ + "Tool response:\n"+ + tool_response) + + def on_chain_error( self, error: Union[Exception, KeyboardInterrupt], **kwargs: Any ) -> Any: @@ -84,9 +97,17 @@ def on_tool_end(self, output: str, **kwargs: Any) -> Any: if output.startswith('Error:') or 'Failure' in output: resp = ChatResponse(sender="agent", message=output, type="error") run_async(self.websocket.send_json, resp.dict()) + if self.notebook: + self.notebook.add_markdown_cell("### Omega:\n" + + "Error:\n" + + output) else: resp = ChatResponse(sender="agent", message=output, type="tool_result") run_async(self.websocket.send_json, resp.dict()) + if self.notebook: + self.notebook.add_markdown_cell("### Omega:\n" + + "Tool result:\n" + + output) def on_tool_error( @@ -100,6 +121,10 @@ def on_tool_error( message = f"Failed because:\n'{error_message}'\nException: '{error_type}'\n" resp = ChatResponse(sender="agent", message=message, type="error") run_async(self.websocket.send_json, resp.dict()) + if self.notebook: + self.notebook.add_markdown_cell("### Omega:\n" + + "Error:\n" + + message) def on_text(self, text: str, **kwargs: Any) -> Any: """Run on arbitrary text.""" diff --git a/src/napari_chatgpt/chat_server/chat_server.py b/src/napari_chatgpt/chat_server/chat_server.py index 7f01dc1..558af89 100644 --- a/src/napari_chatgpt/chat_server/chat_server.py +++ b/src/napari_chatgpt/chat_server/chat_server.py @@ -29,6 +29,9 @@ from napari_chatgpt.omega.omega_init import initialize_omega_agent from napari_chatgpt.utils.api_keys.api_key import set_api_key +from napari_chatgpt.utils.configuration.app_configuration import \ + AppConfiguration +from napari_chatgpt.utils.notebook.jupyter_notebook import JupyterNotebookFile from napari_chatgpt.utils.openai.default_model import \ get_default_openai_model_name from napari_chatgpt.utils.python.installed_packages import is_package_installed @@ -36,8 +39,10 @@ class NapariChatServer: def __init__(self, + notebook: JupyterNotebookFile, napari_bridge: NapariBridge, - llm_model_name: str = get_default_openai_model_name(), + main_llm_model_name: str = get_default_openai_model_name(), + tool_llm_model_name: str = None, temperature: float = 0.01, tool_temperature: float = 0.01, memory_type: str = 'standard', @@ -47,6 +52,7 @@ def __init__(self, fix_bad_calls: bool = True, autofix_mistakes: bool = False, autofix_widget: bool = False, + be_didactic: bool = False, verbose: bool = False ): @@ -54,12 +60,21 @@ def __init__(self, self.running = True self.uvicorn_server = None + # Notebook: + self.notebook: JupyterNotebookFile = notebook + # Napari bridge: - self.napari_bridge = napari_bridge + self.napari_bridge: NapariBridge = napari_bridge # Instantiate FastAPI: self.app = FastAPI() + # get configuration + config = AppConfiguration('omega') + + # port: + self.port = config.get('port', 9000) + # Mount static files: static_files_path = os.path.join( os.path.dirname(os.path.abspath(__file__)), @@ -89,19 +104,26 @@ async def get(request: Request): async def websocket_endpoint(websocket: WebSocket): await websocket.accept() + # restart a notebook: + if self.notebook: + self.notebook.restart() + # Chat callback handler: - chat_callback_handler = ChatCallbackHandler(websocket, + chat_callback_handler = ChatCallbackHandler(websocket=websocket, + notebook=self.notebook, verbose=verbose) # Tool callback handler: - tool_callback_handler = ToolCallbackHandler(websocket, + tool_callback_handler = ToolCallbackHandler(websocket=websocket, + notebook=self.notebook, verbose=verbose) # Memory callback handler: memory_callback_handler = ArbolCallbackHandler('Memory') main_llm, memory_llm, tool_llm, max_token_limit = instantiate_LLMs( - llm_model_name=llm_model_name, + main_llm_model_name=main_llm_model_name, + tool_llm_model_name=tool_llm_model_name, temperature=temperature, tool_temperature=tool_temperature, chat_callback_handler=chat_callback_handler, @@ -133,12 +155,13 @@ async def websocket_endpoint(websocket: WebSocket): agent_chain = initialize_omega_agent( to_napari_queue=napari_bridge.to_napari_queue, from_napari_queue=napari_bridge.from_napari_queue, - llm_model_name=llm_model_name, + main_llm_model_name=main_llm_model_name, main_llm=main_llm, tool_llm=tool_llm, is_async=True, chat_callback_handler=chat_callback_handler, tool_callback_handler=tool_callback_handler, + notebook=self.notebook, has_human_input_tool=False, memory=memory, agent_personality=agent_personality, @@ -147,6 +170,7 @@ async def websocket_endpoint(websocket: WebSocket): fix_bad_calls=fix_bad_calls, autofix_mistakes=autofix_mistakes, autofix_widget=autofix_widget, + be_didactic=be_didactic, verbose=verbose ) @@ -161,6 +185,8 @@ async def websocket_endpoint(websocket: WebSocket): resp = ChatResponse(sender="user", message=question) await websocket.send_json(resp.dict()) + if self.notebook: + self.notebook.add_markdown_cell("### User:\n" + question) aprint(f"Human Question/Request:\n{question}\n\n") @@ -185,6 +211,17 @@ async def websocket_endpoint(websocket: WebSocket): type="final") await websocket.send_json(end_resp.dict()) + if self.notebook: + # Add agent response to notebook: + self.notebook.add_markdown_cell("### Omega:\n" + result['output']) + + # Add snapshot to notebook: + self.notebook.take_snapshot() + + # write notebook: + self.notebook.write() + + # Current chat history: current_chat_history = get_buffer_string( result['chat_history']) @@ -208,7 +245,7 @@ async def websocket_endpoint(websocket: WebSocket): dialog_counter += 1 def _start_uvicorn_server(self, app): - config = Config(app, port=9000) + config = Config(app, port=self.port) self.uvicorn_server = Server(config=config) self.uvicorn_server.run() @@ -223,7 +260,8 @@ def stop(self): def start_chat_server(viewer: napari.Viewer = None, - llm_model_name: str = get_default_openai_model_name(), + main_llm_model_name: str = get_default_openai_model_name(), + tool_llm_model_name: str = None, temperature: float = 0.01, tool_temperature: float = 0.01, memory_type: str = 'standard', @@ -233,28 +271,41 @@ def start_chat_server(viewer: napari.Viewer = None, fix_bad_calls: bool = True, autofix_mistakes: bool = False, autofix_widget: bool = False, + be_didactic: bool = False, + save_chats_as_notebooks: bool = False, verbose: bool = False ): + + # get configuration: + config = AppConfiguration('omega') + # Set OpenAI key if necessary: - if 'gpt' in llm_model_name and '4all' not in llm_model_name and is_package_installed( + if ('gpt' in main_llm_model_name or 'gpt' in tool_llm_model_name )and is_package_installed( 'openai'): set_api_key('OpenAI') - # Set Anthropic key if necessary: - if 'claude' in llm_model_name and is_package_installed('anthropic'): + if ('claude' in main_llm_model_name or 'claude' in tool_llm_model_name) and is_package_installed('anthropic'): set_api_key('Anthropic') # Instantiates napari viewer: if not viewer: viewer = napari.Viewer() + # Instantiates a notebook: + notebook = JupyterNotebookFile(notebook_folder_path=config.get('notebook_path')) if save_chats_as_notebooks else None + # Instantiates a napari bridge: - bridge = NapariBridge(viewer) + bridge = NapariBridge(viewer=viewer) + + # Register snapshot function: + notebook.register_snapshot_function(bridge.take_snapshot) # Instantiates server: - chat_server = NapariChatServer(bridge, - llm_model_name=llm_model_name, + chat_server = NapariChatServer(notebook=notebook, + napari_bridge=bridge, + main_llm_model_name=main_llm_model_name, + tool_llm_model_name=tool_llm_model_name, temperature=temperature, tool_temperature=tool_temperature, memory_type=memory_type, @@ -264,6 +315,7 @@ def start_chat_server(viewer: napari.Viewer = None, fix_bad_calls=fix_bad_calls, autofix_mistakes=autofix_mistakes, autofix_widget=autofix_widget, + be_didactic=be_didactic, verbose=verbose ) @@ -278,17 +330,20 @@ def server_thread_function(): # function to open browser on page: def _open_browser(): - url = "http://127.0.0.1:9000" + url = f"http://127.0.0.1:{chat_server.port}" webbrowser.open(url, new=0, autoraise=True) # open browser after delay of a few seconds: - QTimer.singleShot(2000, _open_browser) + if config.get('open_browser', True): + QTimer.singleShot(2000, _open_browser) # Return the server: return chat_server if __name__ == "__main__": + + # Start chat server: start_chat_server() # Start qt event loop and wait for it to stop: diff --git a/src/napari_chatgpt/llm/llms.py b/src/napari_chatgpt/llm/llms.py index ca5ba17..52bc314 100644 --- a/src/napari_chatgpt/llm/llms.py +++ b/src/napari_chatgpt/llm/llms.py @@ -2,7 +2,8 @@ from langchain.callbacks.manager import AsyncCallbackManager -def instantiate_LLMs(llm_model_name: str, +def instantiate_LLMs(main_llm_model_name: str, + tool_llm_model_name: str, temperature: float, tool_temperature: float, chat_callback_handler, @@ -11,38 +12,41 @@ def instantiate_LLMs(llm_model_name: str, verbose: bool = False ): - aprint(f"Instantiating LLMs with model: '{llm_model_name}', t={temperature}, t_tool={tool_temperature}. ") + # If the tool LLM model name is not specified, then we use the same as for the main LLM: + if not tool_llm_model_name: + tool_llm_model_name = main_llm_model_name + + aprint(f"Instantiating LLMs with models: main:'{main_llm_model_name}', tool:{tool_llm_model_name}, t={temperature}, t_tool={tool_temperature}. ") + + # Instantiate all three LLMs needed for the agent: + main_llm, max_token_limit = _instantiate_single_llm(main_llm_model_name, verbose, temperature, True, chat_callback_handler) + tool_llm, _ = _instantiate_single_llm(tool_llm_model_name, verbose, tool_temperature, True, tool_callback_handler) + memory_llm, _ = _instantiate_single_llm(main_llm_model_name, False, temperature, False, memory_callback_handler) + + # Return the three LLMs and the maximum token limit for the main LLM: + return main_llm, memory_llm, tool_llm, max_token_limit + + +def _instantiate_single_llm(llm_model_name: str, + verbose: bool = False, + temperature: float = 0.0, + streaming: bool = True, + callback_handler=None,): + + if 'gpt-' in llm_model_name: # Import OpenAI ChatGPT model: from langchain.chat_models import ChatOpenAI # Instantiates Main LLM: - main_llm = ChatOpenAI( + llm = ChatOpenAI( model_name=llm_model_name, verbose=verbose, - streaming=True, + streaming=streaming, temperature=temperature, callback_manager=AsyncCallbackManager( - [chat_callback_handler]) - ) - - # Instantiates Tool LLM: - tool_llm = ChatOpenAI( - model_name=llm_model_name, - verbose=verbose, - streaming=True, - temperature=tool_temperature, - - callback_manager=AsyncCallbackManager([tool_callback_handler]) - ) - - # Instantiates Memory LLM: - memory_llm = ChatOpenAI( - model_name=llm_model_name, - verbose=False, - temperature=temperature, - callback_manager=AsyncCallbackManager([memory_callback_handler]) + [callback_handler]) ) if 'gpt-4-1106-preview' in llm_model_name or 'gpt-4-vision-preview' in llm_model_name: @@ -60,7 +64,7 @@ def instantiate_LLMs(llm_model_name: str, else: max_token_limit = 4096 - + return llm, max_token_limit elif 'claude' in llm_model_name: @@ -70,37 +74,15 @@ def instantiate_LLMs(llm_model_name: str, max_token_limit = 8000 # Instantiates Main LLM: - main_llm = ChatAnthropic( + llm = ChatAnthropic( model=llm_model_name, verbose=verbose, - streaming=True, + streaming=streaming, temperature=temperature, max_tokens_to_sample=max_token_limit, - callback_manager=AsyncCallbackManager( - [chat_callback_handler]) - ) - - # Instantiates Tool LLM: - tool_llm = ChatAnthropic( - model=llm_model_name, - verbose=verbose, - streaming=True, - temperature=tool_temperature, - max_tokens_to_sample=max_token_limit, - callback_manager=AsyncCallbackManager([tool_callback_handler]) - ) - - # Instantiates Memory LLM: - memory_llm = ChatAnthropic( - model=llm_model_name, - verbose=False, - temperature=temperature, - max_tokens_to_sample=max_token_limit, - callback_manager=AsyncCallbackManager([memory_callback_handler]) - ) - - + callback_manager=AsyncCallbackManager([callback_handler])) + return llm, max_token_limit elif 'ollama' in llm_model_name: @@ -115,33 +97,149 @@ def instantiate_LLMs(llm_model_name: str, start_ollama() # Instantiates Main LLM: - main_llm = OllamaFixed( + llm = OllamaFixed( base_url="http://localhost:11434", model=llm_model_name, verbose=verbose, + streaming=streaming, temperature=temperature, callback_manager=AsyncCallbackManager( - [chat_callback_handler]) - ) - - # Instantiates Tool LLM: - tool_llm = OllamaFixed( - base_url="http://localhost:11434", - model=llm_model_name, - verbose=verbose, - temperature=tool_temperature, - callback_manager=AsyncCallbackManager([tool_callback_handler]) - ) - - # Instantiates Memory LLM: - memory_llm = OllamaFixed( - base_url="http://localhost:11434", - model=llm_model_name, - verbose=False, - temperature=temperature, - callback_manager=AsyncCallbackManager([memory_callback_handler]) + [callback_handler]) ) max_token_limit = 4096 - return main_llm, memory_llm, tool_llm, max_token_limit + return llm, max_token_limit + + + # if 'gpt-' in main_llm_model_name: + # + # # Import OpenAI ChatGPT model: + # from langchain.chat_models import ChatOpenAI + # + # # Instantiates Main LLM: + # main_llm = ChatOpenAI( + # model_name=main_llm_model_name, + # verbose=verbose, + # streaming=True, + # temperature=temperature, + # callback_manager=AsyncCallbackManager( + # [chat_callback_handler]) + # ) + # + # # Instantiates Tool LLM: + # tool_llm = ChatOpenAI( + # model_name=main_llm_model_name, + # verbose=verbose, + # streaming=True, + # temperature=tool_temperature, + # + # callback_manager=AsyncCallbackManager([tool_callback_handler]) + # ) + # + # # Instantiates Memory LLM: + # memory_llm = ChatOpenAI( + # model_name=main_llm_model_name, + # verbose=False, + # temperature=temperature, + # callback_manager=AsyncCallbackManager([memory_callback_handler]) + # ) + # + # if 'gpt-4-1106-preview' in main_llm_model_name or 'gpt-4-vision-preview' in main_llm_model_name: + # max_token_limit = 128000 + # elif '32k' in main_llm_model_name: + # max_token_limit = 32000 + # elif '16k' in main_llm_model_name: + # max_token_limit = 16385 + # elif 'gpt-4' in main_llm_model_name: + # max_token_limit = 8192 + # elif 'gpt-3.5-turbo-1106' in main_llm_model_name: + # max_token_limit = 16385 + # elif 'gpt-3.5' in main_llm_model_name: + # max_token_limit = 4096 + # else: + # max_token_limit = 4096 + # + # + # + # elif 'claude' in main_llm_model_name: + # + # # Import Claude LLM: + # from langchain.chat_models import ChatAnthropic + # + # max_token_limit = 8000 + # + # # Instantiates Main LLM: + # main_llm = ChatAnthropic( + # model=main_llm_model_name, + # verbose=verbose, + # streaming=True, + # temperature=temperature, + # max_tokens_to_sample=max_token_limit, + # callback_manager=AsyncCallbackManager( + # [chat_callback_handler]) + # ) + # + # # Instantiates Tool LLM: + # tool_llm = ChatAnthropic( + # model=main_llm_model_name, + # verbose=verbose, + # streaming=True, + # temperature=tool_temperature, + # max_tokens_to_sample=max_token_limit, + # callback_manager=AsyncCallbackManager([tool_callback_handler]) + # ) + # + # # Instantiates Memory LLM: + # memory_llm = ChatAnthropic( + # model=main_llm_model_name, + # verbose=False, + # temperature=temperature, + # max_tokens_to_sample=max_token_limit, + # callback_manager=AsyncCallbackManager([memory_callback_handler]) + # ) + # + # + # + # + # elif 'ollama' in main_llm_model_name: + # + # # Import Ollama LLM model: + # from napari_chatgpt.llm.ollama import OllamaFixed + # from napari_chatgpt.utils.ollama.ollama import start_ollama + # + # # Remove ollama prefix: + # main_llm_model_name = main_llm_model_name.removeprefix('ollama_') + # + # # start Ollama server: + # start_ollama() + # + # # Instantiates Main LLM: + # main_llm = OllamaFixed( + # base_url="http://localhost:11434", + # model=main_llm_model_name, + # verbose=verbose, + # temperature=temperature, + # callback_manager=AsyncCallbackManager( + # [chat_callback_handler]) + # ) + # + # # Instantiates Tool LLM: + # tool_llm = OllamaFixed( + # base_url="http://localhost:11434", + # model=main_llm_model_name, + # verbose=verbose, + # temperature=tool_temperature, + # callback_manager=AsyncCallbackManager([tool_callback_handler]) + # ) + # + # # Instantiates Memory LLM: + # memory_llm = OllamaFixed( + # base_url="http://localhost:11434", + # model=main_llm_model_name, + # verbose=False, + # temperature=temperature, + # callback_manager=AsyncCallbackManager([memory_callback_handler]) + # ) + # + # max_token_limit = 4096 \ No newline at end of file diff --git a/src/napari_chatgpt/omega/napari_bridge.py b/src/napari_chatgpt/omega/napari_bridge.py index 7bebfba..247342f 100644 --- a/src/napari_chatgpt/omega/napari_bridge.py +++ b/src/napari_chatgpt/omega/napari_bridge.py @@ -3,6 +3,7 @@ import napari import napari.viewer +from PIL.Image import fromarray from arbol import aprint, asection from napari import Viewer from napari.qt.threading import thread_worker @@ -10,6 +11,7 @@ from napari_chatgpt.omega.tools.special.exception_catcher_tool import \ enqueue_exception from napari_chatgpt.utils.napari.napari_viewer_info import get_viewer_info +from napari_chatgpt.utils.notebook.jupyter_notebook import JupyterNotebookFile from napari_chatgpt.utils.python.exception_guard import ExceptionGuard # global Variable to exchange information with the viewer: @@ -50,10 +52,14 @@ def qt_code_executor(fun: Callable[[napari.Viewer], None]): def omega_napari_worker(to_napari_queue: Queue, from_napari_queue: Queue): while True: + + # get code from the queue: code = to_napari_queue.get() + + # execute code on napari's QT thread: if code is None: break # stops. - # aprint(f"omega_napari_worker received: '{code}' from the queue.") + yield code # create the worker: @@ -63,9 +69,31 @@ def omega_napari_worker(to_napari_queue: Queue, def get_viewer_info(self) -> str: + # Setting up delegated function: + delegated_function = lambda v: get_viewer_info(v) + + return self._execute_in_napari_context(delegated_function) + + + def take_snapshot(self): + + # Delegated function: + def _delegated_snapshot_function(viewer: Viewer): + + # Take a screenshot of the whole Napari viewer + screenshot = self.viewer.screenshot(canvas_only=False, flash=False) + + # Convert the screenshot (NumPy array) to a PIL image + pil_image = fromarray(screenshot) + + return pil_image + + # Execute delegated function in napari context and return result: + return self._execute_in_napari_context(_delegated_snapshot_function) + + + def _execute_in_napari_context(self, delegated_function): try: - # Setting up delegated function: - delegated_function = lambda v: get_viewer_info(v) # Send code to napari: self.to_napari_queue.put(delegated_function) @@ -75,8 +103,11 @@ def get_viewer_info(self) -> str: if isinstance(response, ExceptionGuard): exception_guard = response + # print stack trace: + import traceback + traceback.print_exc() # raise exception_guard.exception - return f"Error: {exception_guard.exception_type_name} with message: '{str(exception_guard.exception)}' while using tool: {self.__class__.__name__} ." + return f"Error: {exception_guard.exception_type_name} with message: '{str(exception_guard.exception)}' ." return response @@ -90,3 +121,4 @@ def get_viewer_info(self) -> str: + diff --git a/src/napari_chatgpt/omega/omega_agent/OpenAIFunctionsOmegaAgent.py b/src/napari_chatgpt/omega/omega_agent/OpenAIFunctionsOmegaAgent.py index 11f682e..eadd9b5 100644 --- a/src/napari_chatgpt/omega/omega_agent/OpenAIFunctionsOmegaAgent.py +++ b/src/napari_chatgpt/omega/omega_agent/OpenAIFunctionsOmegaAgent.py @@ -15,12 +15,15 @@ ) from napari_chatgpt.omega.napari_bridge import _get_viewer_info +from napari_chatgpt.omega.omega_agent.prompts import DIDACTICS class OpenAIFunctionsOmegaAgent(OpenAIFunctionsAgent): # Convenience class to override some features of the OpenAIFunctionsAgent + be_didactic: bool = False + async def aplan( self, intermediate_steps: List[Tuple[AgentAction, str]], @@ -57,6 +60,14 @@ async def aplan( ) )) + if self.be_didactic: + messages.insert(-1, SystemMessage( + content=DIDACTICS, + additional_kwargs=dict( + system_message_type="didactics" + ) + )) + predicted_message = await self.llm.apredict_messages( messages, functions=self.functions, callbacks=callbacks ) diff --git a/src/napari_chatgpt/omega/omega_agent/prompts.py b/src/napari_chatgpt/omega/omega_agent/prompts.py index a3c0623..ddf7189 100644 --- a/src/napari_chatgpt/omega/omega_agent/prompts.py +++ b/src/napari_chatgpt/omega/omega_agent/prompts.py @@ -19,37 +19,6 @@ PERSONALITY[ 'yoda'] = '\nYou possess the personality and dialog style of the character Yoda from Star Wars. Strong in you the force is, particularly when it comes to image processing and analysis.\n' -# -# SUFFIX = \ -# """ -# **TOOLS:** -# ------ -# You can ask me to use specific tools to perform tasks or answer questions. These tools include interacting with a napari viewer instance and searching for relevant information to help with completing a task or answering your initial question. These tools can generate and execute code. Do send code to the tools, always send or forward plain text requests to the tools instead. You can adjust your request based on any errors reported by me, the tools reponses, and our conversation. -# Important: do not respond to me with code, always use a tool to complete a task or respond to a question that involves napari. -# The available tools are: -# {{tools}} -# -# {format_instructions} -# -# **HUMANS INPUT:** -# Here is my input (remember to respond with the schema described above, and NOTHING else): -# {{{{input}}}} -# -# """ -# -# # """ If I explicitly ask for a step-by-step plan for solving some task, please do not use a tool! Instead, give me the detailed plan in plain text, without any code, of what I should ask you to do. Index each step of the plan with an integer. For example, to blur an image: 1. convert image to float, 2. apply Gaussian blu of sigma=1. -# # """ -# -# TEMPLATE_TOOL_RESPONSE = \ -# """ -# TOOL RESPONSE: -# {observation} -# -# Notes: -# - Please use the tool's response, your own knowledge, and our conversation to reply to my previous comment. -# - If you are using information from the tool, please clearly state it without mentioning the name of the tool. -# - If the tool did not succeed, or if an error is returned, refine your request based on the tool's response, try a different tool, or stop and provide a final answer. -# - I have forgotten all the responses from the tool, do not assume that I know these responses. -# - Please stick to the provided format for your response and avoid adding any additional information. -# -# """ + +DIDACTICS = "\nBefore doing anything you first ask questions to better understand the request, seeking more details to resolve ambiguities. In particular, ask didactic questions to clarify which method variant or specific approach should be used. Educate on how to solve the image processing & analysis task, list potential ideas and solutions, and provide several options. \n" + diff --git a/src/napari_chatgpt/omega/omega_init.py b/src/napari_chatgpt/omega/omega_init.py index 4afab20..387981d 100644 --- a/src/napari_chatgpt/omega/omega_init.py +++ b/src/napari_chatgpt/omega/omega_init.py @@ -1,6 +1,7 @@ from queue import Queue import langchain +from arbol import aprint from langchain.agents import AgentExecutor from langchain.agents.conversational_chat.prompt import SUFFIX from langchain.base_language import BaseLanguageModel @@ -43,6 +44,9 @@ from napari_chatgpt.omega.tools.special.pip_install_tool import PipInstallTool from napari_chatgpt.omega.tools.special.python_repl import \ PythonCodeExecutionTool +from napari_chatgpt.utils.configuration.app_configuration import \ + AppConfiguration +from napari_chatgpt.utils.notebook.jupyter_notebook import JupyterNotebookFile from napari_chatgpt.utils.omega_plugins.discover_omega_plugins import \ discover_omega_tools from napari_chatgpt.utils.openai.gpt_vision import is_gpt_vision_available @@ -53,12 +57,13 @@ def initialize_omega_agent(to_napari_queue: Queue = None, from_napari_queue: Queue = None, - llm_model_name: str = None, + main_llm_model_name: str = None, main_llm: BaseLanguageModel = None, tool_llm: BaseLanguageModel = None, is_async: bool = False, chat_callback_handler: BaseCallbackHandler = None, tool_callback_handler: BaseCallbackHandler = None, + notebook: JupyterNotebookFile = None, has_human_input_tool: bool = True, memory: BaseMemory = None, agent_personality: str = 'neutral', @@ -67,9 +72,13 @@ def initialize_omega_agent(to_napari_queue: Queue = None, fix_bad_calls: bool = True, autofix_mistakes: bool = False, autofix_widget: bool = False, + be_didactic: bool = False, verbose: bool = False ) -> AgentExecutor: + # Get app configuration: + config = AppConfiguration('omega') + # Chat callback manager: chat_callback_manager = (AsyncCallbackManager( [chat_callback_handler]) if is_async else CallbackManager( @@ -99,6 +108,7 @@ def initialize_omega_agent(to_napari_queue: Queue = None, kwargs = {'llm': tool_llm, 'to_napari_queue': to_napari_queue, 'from_napari_queue': from_napari_queue, + 'notebook': notebook, 'callback_manager': tool_callback_manager, 'fix_imports': fix_imports, 'install_missing_packages': install_missing_packages, @@ -107,9 +117,9 @@ def initialize_omega_agent(to_napari_queue: Queue = None, } # Adding all napari tools: - tools.append(NapariViewerControlTool(**kwargs, return_direct=False)) - tools.append(NapariViewerQueryTool(**kwargs, return_direct=False)) - tools.append(NapariViewerExecutionTool(**kwargs, return_direct=False)) + tools.append(NapariViewerControlTool(**kwargs, return_direct=not autofix_mistakes)) + tools.append(NapariViewerQueryTool(**kwargs, return_direct=not autofix_mistakes)) + tools.append(NapariViewerExecutionTool(**kwargs, return_direct=not autofix_mistakes)) if is_gpt_vision_available(): tools.append(NapariViewerVisionTool(**kwargs, return_direct=False)) tools.append(NapariWidgetMakerTool(**kwargs, return_direct=not autofix_widget)) @@ -117,7 +127,7 @@ def initialize_omega_agent(to_napari_queue: Queue = None, tools.append(WebImageSearchTool(**kwargs)) tools.append(CellNucleiSegmentationTool(**kwargs)) - # Future task: remove if once Aydin supports Apple Silicon: + # Future task: remove once Aydin supports Apple Silicon: if not is_apple_silicon(): tools.append(ImageDenoisingTool(**kwargs)) @@ -148,12 +158,11 @@ def initialize_omega_agent(to_napari_queue: Queue = None, from langchain.globals import set_debug set_debug(True) - # prepend the personality: PREFIX_ = SYSTEM + PERSONALITY[agent_personality] # Create the agent: - if 'gpt-' in llm_model_name: + if 'gpt-' in main_llm_model_name: # Import OpenAI's functions agent class: from napari_chatgpt.omega.omega_agent.OpenAIFunctionsOmegaAgent import \ @@ -171,11 +180,15 @@ def initialize_omega_agent(to_napari_queue: Queue = None, # human_message=SUFFIX, verbose=verbose, callback_manager=chat_callback_manager, - extra_prompt_messages=extra_prompt_messages + extra_prompt_messages=extra_prompt_messages, + be_didactic=be_didactic ) else: + if be_didactic: + aprint("Didactic mode not yet supported for non-OpenAI agents. Ignoring.") + # Import default ReAct Agent class: from napari_chatgpt.omega.omega_agent.ConversationalChatOmegaAgent import \ ConversationalChatOmegaAgent @@ -190,7 +203,6 @@ def initialize_omega_agent(to_napari_queue: Queue = None, callback_manager=chat_callback_manager, ) - # Create the executor: agent_executor = AgentExecutor.from_agent_and_tools( agent=agent, @@ -198,9 +210,9 @@ def initialize_omega_agent(to_napari_queue: Queue = None, memory=memory, verbose=verbose, callback_manager=chat_callback_manager, - max_iterations=5, + max_iterations=config.get('agent_max_iterations', 5), early_stopping_method='generate', - handle_parsing_errors=True + handle_parsing_errors=config.get('agent_handle_parsing_errors', True), ) return agent_executor diff --git a/src/napari_chatgpt/omega/tools/async_base_tool.py b/src/napari_chatgpt/omega/tools/async_base_tool.py index 8c0e576..616af7b 100644 --- a/src/napari_chatgpt/omega/tools/async_base_tool.py +++ b/src/napari_chatgpt/omega/tools/async_base_tool.py @@ -4,12 +4,16 @@ from arbol import aprint from langchain.tools import BaseTool +from napari_chatgpt.utils.notebook.jupyter_notebook import JupyterNotebookFile + _aysync_tool_thread_pool = ThreadPoolExecutor() class AsyncBaseTool(BaseTool): _executor = ThreadPoolExecutor() + notebook: JupyterNotebookFile = None + async def _arun(self, query: str) -> str: """Use the tool asynchronously.""" aprint(f"Starting async call to {type(self).__name__}({query}) ") diff --git a/src/napari_chatgpt/omega/tools/napari/cell_nuclei_segmentation_tool.py b/src/napari_chatgpt/omega/tools/napari/cell_nuclei_segmentation_tool.py index c06137b..8a667d2 100644 --- a/src/napari_chatgpt/omega/tools/napari/cell_nuclei_segmentation_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/cell_nuclei_segmentation_tool.py @@ -235,6 +235,11 @@ def _run_code(self, request: str, code: str, viewer: Viewer) -> str: with asection(f"Running segmentation..."): segmented_image = segment(viewer) + # At this point we assume the code ran successfully and we add it to the notebook: + if self.notebook: + self.notebook.add_code_cell(code) + + # Add to viewer: viewer.add_labels(segmented_image, name='segmented') diff --git a/src/napari_chatgpt/omega/tools/napari/image_denoising_tool.py b/src/napari_chatgpt/omega/tools/napari/image_denoising_tool.py index d4ca1b3..89c935e 100644 --- a/src/napari_chatgpt/omega/tools/napari/image_denoising_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/image_denoising_tool.py @@ -147,6 +147,10 @@ def _run_code(self, request: str, code: str, viewer: Viewer) -> str: # get the function: denoise = getattr(loaded_module, 'denoise') + # At this point we assume the code ran successfully and we add it to the notebook: + if self.notebook: + self.notebook.add_code_cell(code) + # Run denoising: with asection(f"Running image denoising..."): denoised_image = denoise(viewer) diff --git a/src/napari_chatgpt/omega/tools/napari/napari_base_tool.py b/src/napari_chatgpt/omega/tools/napari/napari_base_tool.py index 88ecd26..7cebe7b 100644 --- a/src/napari_chatgpt/omega/tools/napari/napari_base_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/napari_base_tool.py @@ -1,14 +1,16 @@ import sys +import traceback from pathlib import Path from queue import Queue from typing import Union, Optional from arbol import aprint, asection -from langchain import LLMChain, PromptTemplate +from langchain.chains import LLMChain from langchain.chat_models.base import BaseChatModel from langchain.llms.base import LLM from langchain.schema.language_model import BaseLanguageModel +from langchain_core.prompts import PromptTemplate from napari import Viewer from pydantic import Field @@ -16,9 +18,15 @@ from napari_chatgpt.omega.tools.async_base_tool import AsyncBaseTool from napari_chatgpt.omega.tools.instructions import \ omega_generic_codegen_instructions +from napari_chatgpt.utils.python.consolidate_imports import consolidate_imports +from napari_chatgpt.utils.python.dynamic_import import execute_as_module +from napari_chatgpt.utils.python.exception_description import \ + exception_description from napari_chatgpt.utils.python.exception_guard import ExceptionGuard from napari_chatgpt.utils.python.fix_bad_fun_calls import \ fix_all_bad_function_calls +from napari_chatgpt.utils.python.fix_code_given_error import \ + fix_code_given_error_message from napari_chatgpt.utils.python.installed_packages import \ installed_package_list from napari_chatgpt.utils.python.missing_packages import required_packages @@ -28,29 +36,6 @@ from napari_chatgpt.utils.strings.filter_lines import filter_lines -def _get_delegated_code(name: str, signature: bool = False): - with asection(f"Getting delegated code: '{name}' (signature={signature})"): - # Get current package folder: - current_package_folder = Path(__file__).parent - - # Get package folder: - package_folder = Path.joinpath(current_package_folder, f"delegated_code") - - # file path: - file_path = Path.joinpath(package_folder, f"{name}.py") - aprint(f'Filepath: {file_path}') - - # code: - code = file_path.read_text() - - # extract signature: - if signature: - aprint('Extracting signature!') - splitted_code = code.split('### SIGNATURE') - code = splitted_code[1] - - return code - class NapariBaseTool(AsyncBaseTool): """A base tool for that delegates to execution to a sub-LLM and communicates with napari via queues.""" @@ -194,9 +179,21 @@ def _prepare_code(self, # prepend missing imports: code = '\n'.join(imports) + '\n\n' + code + # consolidate imports: + code = consolidate_imports(code) + + # notify that code was modified for missing imports: + code = "# Note: code was modified to add missing imports:\n" + code + # Fix code, this takes care of wrong function calls and more: if self.fix_bad_calls and do_fix_bad_calls: - code, _ = fix_all_bad_function_calls(code) + code, fixed, _ = fix_all_bad_function_calls(code) + + if fixed: + # notify that code was fixed for bad calls: + code = "# Note: code was modified to fix bad function calls.\n" + code + + # Remove any offending lines: code = filter_lines(code, @@ -212,10 +209,73 @@ def _prepare_code(self, # Install them: pip_install(packages) + # Notify that some packages might be missing and that Omega attempted to install them: + code = f"# Note: some packages ({','.join(packages)}) might be missing and Omega attempted to install them.\n" + code + # Return fully prepared and fixed code: return code + def _run_code_catch_errors_fix_and_try_again(self, + code, + viewer, + error:str = '', + instructions:str = '', + nb_tries: int = 3) -> str: + + try: + with asection(f"Running code:"): + + # Run the code: + aprint(f"Code:\n{code}") + captured_output = execute_as_module(code, viewer=viewer) + + # Add successfully run code to notebook: + if self.notebook: + self.notebook.add_code_cell(code) + aprint(f"This is what the code returned:\n{captured_output}") + + except Exception as e: + if nb_tries >= 1: + traceback.print_exc() + description = error+'\n\n'+exception_description(e) + description = description.strip() + fixed_code = fix_code_given_error_message(code=code, + error=description, + instructions=instructions, + viewer=viewer, + llm=self.llm, + verbose=self.verbose) + # We try again: + return self._run_code_catch_errors_fix_and_try_again(fixed_code, + viewer=viewer, + error=error, + nb_tries = nb_tries-1) + else: + # No more tries available, we give up! + raise e + return captured_output +def _get_delegated_code(name: str, signature: bool = False): + with asection(f"Getting delegated code: '{name}' (signature={signature})"): + # Get current package folder: + current_package_folder = Path(__file__).parent + + # Get package folder: + package_folder = Path.joinpath(current_package_folder, f"delegated_code") + # file path: + file_path = Path.joinpath(package_folder, f"{name}.py") + aprint(f'Filepath: {file_path}') + + # code: + code = file_path.read_text() + + # extract signature: + if signature: + aprint('Extracting signature!') + splitted_code = code.split('### SIGNATURE') + code = splitted_code[1] + + return code diff --git a/src/napari_chatgpt/omega/tools/napari/viewer_control_tool.py b/src/napari_chatgpt/omega/tools/napari/viewer_control_tool.py index 702e39e..f6bbc22 100644 --- a/src/napari_chatgpt/omega/tools/napari/viewer_control_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/viewer_control_tool.py @@ -56,7 +56,7 @@ - Convert image arrays to the float type before processing when necessary. - Intermediate or local image arrays should be of type float. Constants like `np.full()`, `np.ones()`, `np.zeros()`, etc., should be floats (e.g., 1.0). - Resulting images should be of type float except for RGB images and labels layers. -- For RGB images check the range of the R, G, and B values: if the max value is 255 use that in your calculations. +- The dtype of a RGB or RGBA image must be uint8 within the range [0, 255] to be viewable in napari. - If the request mentions "this," "that," or "the image/layer," it most likely refers to the last added image/layer. - If you are unsure about the layer being referred to, assume it is the last layer of the type most appropriate for the request. - If the input mentions the 'selected image', it most likely refers to the active or selected image layer. @@ -122,38 +122,4 @@ def _run_code(self, request: str, code: str, viewer: Viewer) -> str: - def _run_code_catch_errors_fix_and_try_again(self, - code, - viewer, - error:str = '', - instructions:str = '', - nb_tries: int = 3) -> str: - try: - with asection(f"Running code:"): - aprint(f"Code:\n{code}") - captured_output = execute_as_module(code, viewer=viewer) - aprint(f"This is what the code returned:\n{captured_output}") - - except Exception as e: - if nb_tries >= 1: - traceback.print_exc() - description = error+'\n\n'+exception_description(e) - description = description.strip() - fixed_code = fix_code_given_error_message(code=code, - error=description, - instructions=instructions, - viewer=viewer, - llm=self.llm, - verbose=self.verbose) - # We try again: - return self._run_code_catch_errors_fix_and_try_again(fixed_code, - viewer=viewer, - error=error, - nb_tries = nb_tries-1) - else: - # No more tries available, we give up! - raise e - - - return captured_output diff --git a/src/napari_chatgpt/omega/tools/napari/viewer_execution_tool.py b/src/napari_chatgpt/omega/tools/napari/viewer_execution_tool.py index a00088e..eddd03a 100644 --- a/src/napari_chatgpt/omega/tools/napari/viewer_execution_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/viewer_execution_tool.py @@ -55,7 +55,8 @@ - Unless explicitly stated in the request, the result of operations on layers should be a new layer in napari and should not modify the existing layers. - Convert image arrays to the float type before processing when necessary. - Intermediate or local image arrays should be of type float. Constants like `np.full()`, `np.ones()`, `np.zeros()`, etc., should be floats (e.g., 1.0). -- Resulting images should be of type float except for RGB images and labels layers. +- Resulting images should be of type float except for: RGB images and labels layers. +- The dtype of a RGB or RGBA image must be uint8 within the range [0, 255] to be viewable in napari. - If the request mentions "this," "that," or "the image/layer," it most likely refers to the last added image/layer. - If you are unsure about the layer being referred to, assume it is the last layer of the type most appropriate for the request. - If the request mentions the 'selected image', it most likely refers to the active or selected image layer. @@ -119,38 +120,4 @@ def _run_code(self, request: str, code: str, viewer: Viewer) -> str: - def _run_code_catch_errors_fix_and_try_again(self, - code, - viewer, - error:str = '', - instructions:str = '', - nb_tries: int = 3) -> str: - try: - with asection(f"Running code:"): - aprint(f"Code:\n{code}") - captured_output = execute_as_module(code, viewer=viewer) - aprint(f"This is what the code returned:\n{captured_output}") - - except Exception as e: - if nb_tries >= 1: - traceback.print_exc() - description = error+'\n\n'+exception_description(e) - description = description.strip() - fixed_code = fix_code_given_error_message(code=code, - error=description, - instructions=instructions, - viewer=viewer, - llm=self.llm, - verbose=self.verbose) - # We try again: - return self._run_code_catch_errors_fix_and_try_again(fixed_code, - viewer=viewer, - error=error, - nb_tries = nb_tries-1) - else: - # No more tries available, we give up! - raise e - - - return captured_output diff --git a/src/napari_chatgpt/omega/tools/napari/viewer_query_tool.py b/src/napari_chatgpt/omega/tools/napari/viewer_query_tool.py index 1ad6cc2..4268fc0 100644 --- a/src/napari_chatgpt/omega/tools/napari/viewer_query_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/viewer_query_tool.py @@ -95,6 +95,10 @@ def _run_code(self, query: str, code: str, viewer: Viewer) -> str: # Run query code: response = query(viewer) + # Add successfully run code to notebook: + if self.notebook: + self.notebook.add_code_cell(code+'\n\nquery(viewer)') + # Get captured stdout: captured_output = f.getvalue() diff --git a/src/napari_chatgpt/omega/tools/napari/viewer_vision_tool.py b/src/napari_chatgpt/omega/tools/napari/viewer_vision_tool.py index 0c625b6..9375218 100644 --- a/src/napari_chatgpt/omega/tools/napari/viewer_vision_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/viewer_vision_tool.py @@ -32,17 +32,6 @@ class NapariViewerVisionTool(NapariBaseTool): "For example: '*some_layer_name* What is the background color?' or '*another_layer_name* How crowded are the objects on image?'. " "Refer to the *selected* layer if needed: '*selected* What is the background color?'. " - - # "Use this tool when you need a description of what is currently visible on the viewer's canvas or on one of the layers. " - # "This tool is usefull for deciding how to next use, process, or analyse the contents of layers. " - # "The input must be a request about what to focus on or pay attention to. " - # "For instance, if the input is 'Describe the geometric objects shown on the viewer' a description of the geometric object present on the canvas will be given. " - # "If the input contains the emphasised name of a layer (*layer_name*) the other layers are hidden from view and only the mentioned layer is visible. " - # "For example, you can request: 'What is the background color on image *some_layer_name*', or 'how crowded are the objects on image *some_layer_name*'. " - # "Please use the term 'image' in your input instead of 'layer' to avoid confusion. " - # "The response to the input will be based on a description of the visual contents of the canvas or layer. " - # "If you want to refer to the selected layer, you can refer to the *selected* layer in the input. " - # "Do NOT include code in your input. " ) prompt: str = None instructions: str = None diff --git a/src/napari_chatgpt/omega/tools/napari/widget_maker_tool.py b/src/napari_chatgpt/omega/tools/napari/widget_maker_tool.py index a3c42bb..3e2c15f 100644 --- a/src/napari_chatgpt/omega/tools/napari/widget_maker_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/widget_maker_tool.py @@ -7,7 +7,7 @@ from napari_chatgpt.omega.tools.napari.napari_base_tool import NapariBaseTool from napari_chatgpt.utils.python.dynamic_import import dynamic_import from napari_chatgpt.utils.strings.filter_lines import filter_lines -from napari_chatgpt.utils.strings.find_function_name import find_function_name +from napari_chatgpt.utils.strings.find_function_name import find_magicgui_decorated_function_name _napari_widget_maker_prompt = """ **Context** @@ -41,8 +41,9 @@ """ **Instructions for manipulating arrays from Image layers:** -- Convert arrays to the float type before processing. +- Convert arrays to the float type before processing if necessary. - Any intermediate or locally created image array should also be of type float. +- The dtype of a RGB or RGBA image must be uint8 within the range [0, 255] to be viewable in napari. - When creating image arrays using functions like np.full(), np.ones(), np.zeros(), etc., use float parameters (e.g., 1.0). - Do NOT clip (np.clip) the resulting image unless explicitly instructed. @@ -57,37 +58,47 @@ - Set 'result_widget=False' in the decorator, if the widget function returns an array or a napari layer. - To expose a float parameter as a slider, include ={{"widget_type": "FloatSlider", 'min':, 'max': }} in the decorator. - To expose a string parameter as dropdown choice, include ={{"choices": ['first', 'second', 'third']}}. -- Do NOT create a new instance of a napari viewer. Use the one provided in the variable 'viewer'. -- Do NOT manually add the widget to the napari window by calling viewer.window.add_dock_widget(). -- Do NOT add layers to the viewer within the function. Instead, use a return statement to return the result. - Do NOT use tuples for widget function parameters. - Do NOT expose *args and **kwargs as widget function parameters. - Pay attention to the data types required by the library functions you use, for example: convert a float to an int before passing to a function that requires an int. + **Instructions for using provided viewer instance:** +- Do NOT create a new instance of a napari viewer. Assume one is provided in the variable 'viewer'. +- Do NOT manually add the widget to the napari window by calling viewer.window.add_dock_widget(). +- Do NOT use the viewer to add layers to the napari window within the function. Instead, use a return statement to return the result. + **Instructions for usage of delegated functions:** - If you need to write and call an auxiliary function, then this auxiliary function MUST be defined within the widget function. **Instructions on how to choose the function parameter types:** -There are two mutually exclusive options for passing data: - -(i) The first kind of function signature should be typed with any of the following type hints: - - napari layer data types: ImageData, LabelsData, PointsData, ShapesData, SurfaceData, TracksData, VectorsData. - - Import these types using statements like: 'from napari.types import ImageData' +- There are two mutually exclusive options for passing data: + + (i) The first kind of function signature should be typed with any of the following type hints: + - napari layer data types: ImageData, LabelsData, PointsData, ShapesData, SurfaceData, TracksData, and VectorsData from 'napari.types'. + + (ii) The second kind of function signature should be typed with any of the following type hints: + - napari layer types: Data, Labels, Points, Shapes, Surface, Tracks, and Vectors from 'napari.layers'. + +- The function code must be consistent: if you use layers, access the data via 'layer.data'. Otherwise, you can directly operate on the arrays. +- Do not mix these two options for the function parameters. +- The function signature should include a type hint for the return value, such as '-> ImageData' or '-> Image'. + +**Instructions for imports:** +- The following imports are already included in the code prefix that is prepended to the code you provide: +```python +from napari.types import ImageData, LabelsData, PointsData, ShapesData, SurfaceData, TracksData, VectorsData +from napari.layers import Image, Labels, Points, Shapes, Surface, Tracks, Vectors +``` +- If you need to import other libraries, do so outside of the function. -(ii) The second kind of function signature should be typed with any of the following type hints: - - napari layer types: Data, Labels, Points, Shapes, Surface, Tracks, Vectors. - - Import these types using statements like: 'from napari.layers import Image' -The function code must be consistent: if you use layers, access the data via 'layer.data'. Otherwise, you can directly operate on the arrays. -Do not mix these two options for the function parameters. -The function signature should include a type hint for the return value, such as '-> ImageData' or '-> Image'. """ - _code_prefix = """ from magicgui import magicgui from napari.types import ImageData, LabelsData, PointsData, ShapesData, SurfaceData, TracksData, VectorsData +from napari.layers import Image, Labels, Points, Shapes, Surface, Tracks, Vectors import numpy as np from typing import Union """ @@ -130,7 +141,7 @@ def _run_code(self, query: str, code: str, viewer: Viewer) -> str: code = super()._prepare_code(code, do_fix_bad_calls=self.fix_bad_calls) # Extracts function name: - function_name = find_function_name(code) + function_name = find_magicgui_decorated_function_name(code) # If the function exists: if function_name: @@ -147,6 +158,11 @@ def _run_code(self, query: str, code: str, viewer: Viewer) -> str: # Load the widget in the viewer: viewer.window.add_dock_widget(function, name=function_name) + # At this point we assume the code ran successfully and we add it to the notebook: + if self.notebook: + self.notebook.add_code_cell(code+'\n\n' + +f'viewer.window.add_dock_widget(function, name={function_name})') + message = f"The requested widget has been successfully created and registered to the viewer." # If the function does not exist: diff --git a/src/napari_chatgpt/omega/tools/search/web_image_search_tool.py b/src/napari_chatgpt/omega/tools/search/web_image_search_tool.py index 3de3900..4d0852c 100644 --- a/src/napari_chatgpt/omega/tools/search/web_image_search_tool.py +++ b/src/napari_chatgpt/omega/tools/search/web_image_search_tool.py @@ -6,6 +6,7 @@ from napari import Viewer from napari_chatgpt.omega.tools.napari.napari_base_tool import NapariBaseTool +from napari_chatgpt.utils.download.download_files import download_file_stealth from napari_chatgpt.utils.strings.find_integer_in_parenthesis import \ find_integer_in_parenthesis from napari_chatgpt.utils.web.duckduckgo import search_images_ddg @@ -43,29 +44,46 @@ def _run_code(self, query: str, code: str, viewer: Viewer) -> str: nb_images = max(1, nb_images) # Search for image: - results = search_images_ddg(query=search_query) + results = search_images_ddg(query=search_query, num_results=2*nb_images) aprint(f'Found {len(results)} images.') # Extract URLs: urls = [r['image'] for r in results] + with asection(f'All URLs found:'): + for url in urls: + aprint(url) # Limit the number of images to open to the number found: nb_images = min(len(urls), nb_images) - # Keep only the required number of urls: - urls = urls[:nb_images] # open each image: number_of_opened_images = 0 for i, url in enumerate(urls): try: aprint(f'Trying to open image {i} from URL: {url}.') - image = imread(url) + + # Download the image: + file_path = download_file_stealth(url) + + # Open the image: + image = imread(file_path) + + # convert to array: image_array = numpy.array(image) + + # Add to napari: viewer.add_image(image_array, name=f'image_{i}') + + # Increment counter: number_of_opened_images += 1 aprint(f'Image {i} opened!') + + # Stop if we have opened enough images: + if number_of_opened_images >= nb_images: + break + except Exception as e: # We ignore single failures: aprint(f'Image {i} failed to open!') diff --git a/src/napari_chatgpt/omega/tools/special/exception_catcher_tool.py b/src/napari_chatgpt/omega/tools/special/exception_catcher_tool.py index 63e0ac4..33c7621 100644 --- a/src/napari_chatgpt/omega/tools/special/exception_catcher_tool.py +++ b/src/napari_chatgpt/omega/tools/special/exception_catcher_tool.py @@ -43,8 +43,9 @@ class ExceptionCatcherTool(AsyncBaseTool): name = "ExceptionCatcherTool" description = ( "This tool is usefull when the user is having problems with a widget ." - "This tool returns information about the exception and traceback that happened." - #"No input needed for this tool." + "This tool returns information about the exception that happened. " + "Traceback information is also provided to help find the source of the issue. " + "Input should be the number of expeptions to report on, should be a single integer (>0). " ) prompt: str = None @@ -54,8 +55,19 @@ def _run(self, query: str) -> str: with asection('ExceptionCatcherTool: List of caught exceptions:'): text = "Here is the list of exceptions that occurred:\n\n" + text += "```\n" - while exception_queue.qsize() > 0: + try: + # We try to convert the input to an integer: + number_of_exceptions = int(query.strip()) + except Exception as e: + # If the input is not an integer, or anything else goes wrong, we set the number of exceptions to the maximum: + number_of_exceptions = exception_queue.qsize() + + # ensure that the number of exceptions is strictly positive: + number_of_exceptions = max(number_of_exceptions, 1) + + while exception_queue.qsize() > 0 and number_of_exceptions > 0: value = exception_queue.get_nowait() description = exception_description(value) @@ -64,9 +76,8 @@ def _run(self, query: str) -> str: aprint(description) - # if last_generated_code: - # text += "\n\n" - # text += f"this exception might have been generated by this code: \n{last_generated_code}" - # last_generated_code = None + number_of_exceptions -= 1 + + text += "```\n" return text diff --git a/src/napari_chatgpt/omega/tools/special/pip_install_tool.py b/src/napari_chatgpt/omega/tools/special/pip_install_tool.py index ea7c27b..0153a9e 100644 --- a/src/napari_chatgpt/omega/tools/special/pip_install_tool.py +++ b/src/napari_chatgpt/omega/tools/special/pip_install_tool.py @@ -43,7 +43,7 @@ def _run(self, query: str) -> str: # Remove already installed packages from the list of packages to be installed: packages = [p for p in packages if p not in already_installed_packages] - # If no packages ned to be installed, return: + # If no packages need to be installed, return: if len(packages)==0: message = f"No packages need to be installed, all packages are already installed: '{','.join(already_installed_packages)}'.\n" aprint(message) @@ -55,6 +55,9 @@ def _run(self, query: str) -> str: ask_permission=False, included=False) + if self.notebook: + self.notebook.add_code_cell(f"!pip install {' '.join(packages)}") + aprint(message) return message diff --git a/src/napari_chatgpt/omega/tools/special/python_repl.py b/src/napari_chatgpt/omega/tools/special/python_repl.py index c639e7b..295fd45 100644 --- a/src/napari_chatgpt/omega/tools/special/python_repl.py +++ b/src/napari_chatgpt/omega/tools/special/python_repl.py @@ -66,6 +66,10 @@ def _run( try: if self.sanitize_input: query = sanitize_input(query) + + if self.notebook: + self.notebook.add_code_cell(query) + tree = ast.parse(query) module = ast.Module(tree.body[:-1], type_ignores=[]) exec(ast.unparse(module), self.globals, self.locals) # type: ignore diff --git a/src/napari_chatgpt/utils/configuration/__init__.py b/src/napari_chatgpt/utils/configuration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/napari_chatgpt/utils/configuration/app_configuration.py b/src/napari_chatgpt/utils/configuration/app_configuration.py new file mode 100644 index 0000000..9eedd84 --- /dev/null +++ b/src/napari_chatgpt/utils/configuration/app_configuration.py @@ -0,0 +1,78 @@ +import os +from threading import Lock +from typing import Union, Any + +import yaml + + +class AppConfiguration: + _instances = {} + _lock = Lock() + + def __new__(cls, app_name, default_config='default_config.yaml'): + with cls._lock: + if app_name not in cls._instances: + + # Instantiate class: + instance = super(AppConfiguration, cls).__new__(cls) + + # Save instance in class level dictionary: + cls._instances[app_name] = instance + + # if folder doesn't exist, create it: + if not os.path.exists(os.path.expanduser(f'~/.{app_name}')): + os.makedirs(os.path.expanduser(f'~/.{app_name}'), exist_ok=True) + + return cls._instances[app_name] + + def __init__(self, app_name, default_config: Union[str, dict]='default_config.yaml'): + self.app_name = app_name + self.default_config = default_config + self.config_file = os.path.expanduser(f'~/.{app_name}/config.yaml') + self.config_data = {} + self.load_configurations() + + def load_default_config(self): + if isinstance(self.default_config, dict): + return self.default_config + elif isinstance(self.default_config, str): + default_config_path = os.path.abspath(self.default_config) + if os.path.exists(default_config_path): + with open(default_config_path, 'r') as default_file: + return yaml.safe_load(default_file) + return {} + + def load_configurations(self): + # Load default configurations + default_config = self.load_default_config() + + # Load user-specific configurations + if os.path.exists(self.config_file): + with open(self.config_file, 'r') as user_file: + user_config = yaml.safe_load(user_file) + else: + user_config = {} + + # Merge configurations + self.config_data = {**default_config, **user_config} + + def save_configurations(self): + with open(self.config_file, 'w') as file: + yaml.dump(self.config_data, file) + + def get(self, key, default: Any = None): + value = self.config_data.get(key) + if value is None: + value = default + if value is not None: + self.__setitem__(key, value) + + return value + + def __getitem__(self, key): + return self.config_data.get(key) + + def __setitem__(self, key, value): + self.config_data[key] = value + self.save_configurations() + diff --git a/src/napari_chatgpt/utils/configuration/test/__init__.py b/src/napari_chatgpt/utils/configuration/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/napari_chatgpt/utils/configuration/test/app_configuration_test.py b/src/napari_chatgpt/utils/configuration/test/app_configuration_test.py new file mode 100644 index 0000000..36c9559 --- /dev/null +++ b/src/napari_chatgpt/utils/configuration/test/app_configuration_test.py @@ -0,0 +1,29 @@ +import pytest +from arbol import aprint + +from napari_chatgpt.utils.api_keys.api_key import is_api_key_available +from napari_chatgpt.utils.llm.summarizer import summarize +from napari_chatgpt.utils.web.scrapper import text_from_url +from napari_chatgpt.utils.configuration.app_configuration import \ + AppConfiguration + + +def test_app_configuration(): + + default_config = {'test_key_2':'default_value'} + + config_1 = AppConfiguration('test_app_configuration', default_config=default_config) + + assert config_1['test_key_2'] == 'default_value' + config_1['test_key'] = 'test_value' + assert config_1['test_key'] == 'test_value' + config_1['test_key'] = 'test_value2' + assert config_1['test_key'] == 'test_value2' + + config_2 = AppConfiguration('test_app_configuration') + assert config_2['test_key'] == 'test_value2' + assert config_2['test_key_2'] == 'default_value' + + + + diff --git a/src/napari_chatgpt/utils/download/download_files.py b/src/napari_chatgpt/utils/download/download_files.py index 1879942..4c85a84 100644 --- a/src/napari_chatgpt/utils/download/download_files.py +++ b/src/napari_chatgpt/utils/download/download_files.py @@ -26,3 +26,45 @@ def download_files(urls, path=None) -> List[str]: filenames.append(file_name) return filenames + + +import requests + +import requests +import tempfile +import os + +def download_file_stealth(url, file_path=None) -> str: + headers = { + "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3", + "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", + "Accept-Language": "en-US,en;q=0.5", + "Accept-Encoding": "gzip, deflate, br", + "DNT": "1", # Do Not Track Request Header + "Connection": "keep-alive", + "Upgrade-Insecure-Requests": "1" + } + + response = requests.get(url, headers=headers, stream=True) + + if response.status_code == 200: + if file_path is None: + # Use a temporary file + temp_file = tempfile.NamedTemporaryFile(delete=False) + file_path = temp_file.name + file_obj = temp_file + else: + # Use the specified file path + file_obj = open(file_path, 'wb') + + with file_obj as f: + for chunk in response.iter_content(1024): + f.write(chunk) + + print(f"File downloaded: {file_path}") + return file_path + else: + print(f"Failed to download file: status code {response.status_code}") + return None + + diff --git a/src/napari_chatgpt/utils/notebook/__init__.py b/src/napari_chatgpt/utils/notebook/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/napari_chatgpt/utils/notebook/jupyter_notebook.py b/src/napari_chatgpt/utils/notebook/jupyter_notebook.py new file mode 100644 index 0000000..0327324 --- /dev/null +++ b/src/napari_chatgpt/utils/notebook/jupyter_notebook.py @@ -0,0 +1,158 @@ +from base64 import b64encode +from datetime import datetime +from io import BytesIO +from mimetypes import guess_type +from os import path, makedirs +from typing import Optional, Callable + +from PIL import Image +from napari import Viewer +import nbformat +from nbformat.v4 import new_notebook, new_code_cell, new_markdown_cell + +from napari_chatgpt.utils.strings.markdown import extract_markdown_blocks + + +class JupyterNotebookFile: + + + def __init__(self, notebook_folder_path: Optional[str] = None): + self._modified = False + self.restart(notebook_folder_path=notebook_folder_path, + write_before_restart=False, + force_restart=True + ) + + + def restart(self, + notebook_folder_path: Optional[str] = None, + write_before_restart: bool = True, + force_restart: bool = False + ): + + # If the notebook has not been modified since last restart then we don't need to restart again: + if not force_restart and not self._modified: + return + + if write_before_restart: + # Write the notebook to disk before restarting + self.write() + + # path of system's desktop folder: + desktop_path = path.join(path.join(path.expanduser('~')), 'Desktop') + + # default folder path for notebooks: + notebook_folder_path = notebook_folder_path or path.join(desktop_path, 'omega_notebooks') + + # Create the folder if it does not exist: + if not path.exists(notebook_folder_path): + makedirs(notebook_folder_path, exist_ok=True) + + # Get current date and time + now = datetime.now() + + # Format date and time + formatted_date_time = now.strftime("%Y_%m_%d_%H_%M_%S") + + # path of default notebook file on desktop: + self.default_file_path = path.join(notebook_folder_path, f'{formatted_date_time}_omega_notebook.ipynb') + + # Restart the notebook: + self.notebook = new_notebook() + + # Mark as not modified: + self._modified = False + + def write(self, file_path: Optional[str] = None): + file_path = file_path or self.default_file_path + # Write the notebook to disk + with open(file_path, 'w') as f: + nbformat.write(self.notebook, f) + + def add_code_cell(self, code: str, remove_quotes: bool = False): + + if remove_quotes: + # Remove the quotes from the code block + code = '\n'.join(code.split('\n')[1:-1]) + + # Add a code cell + self.notebook.cells.append(new_code_cell(code)) + + # Mark as modified: + self._modified = True + + def add_markdown_cell(self, markdown: str, detect_code_blocks: bool = True): + + if detect_code_blocks: + # Extract code blocks from markdown: + blocks = extract_markdown_blocks(markdown) + if blocks: + # Add a code cell for each code block + for block in blocks: + if block.strip().startswith('```'): + self.add_code_cell(block, remove_quotes=True) + else: + self.add_markdown_cell(block, detect_code_blocks=False) + + else: + # Add a plain markdown cell without detecting code blocks: + self.notebook.cells.append(new_markdown_cell(markdown)) + + # Mark as modified: + self._modified = True + + def _add_image(self, base64_string: str, image_type: str, text: str = ""): + # Add a markdown cell with the image and optional text + image_html = f'' + markdown_content = f"{text}\n\n{image_html}" if text else image_html + new_image_cell = new_markdown_cell(markdown_content) + self.notebook.cells.append(new_image_cell) + + # Mark as modified: + self._modified = True + + def add_image_cell(self, image_path: str, text: str = ""): + # Read the image and convert it to base64 + image_type = guess_type(image_path)[0].split('/')[1] + with open(image_path, "rb") as image_file: + base64_string = b64encode(image_file.read()).decode() + + # Use the existing method to add the image with text + self._add_image(base64_string, image_type, text) + + + def add_image_cell_from_PIL_image(self, pil_image: Image, text: str = ""): + # Convert PIL image to base64 + buffered = BytesIO() + pil_image.save(buffered, format='PNG') + base64_string = b64encode(buffered.getvalue()).decode() + + # Use the existing method to add the image with text + self._add_image(base64_string, 'PNG', text) + + # Mark as modified: + self._modified = True + + def register_snapshot_function(self, snapshot_function: Callable): + self._snapshot_function = snapshot_function + + def take_snapshot(self, text: str = ""): + + # Call the snapshot function: + pil_image = self._snapshot_function() + + # Add this image to the notebook + self.add_image_cell_from_PIL_image(pil_image, text) + + +# def start_jupyter_server(folder_path): +# # Function to run the notebook server in a thread +# def notebook_thread(): +# notebookapp.main(['--notebook-dir', folder_path]) +# +# # Start the Jupyter server in a separate thread +# thread = threading.Thread(target=notebook_thread) +# thread.start() +# print(f"Jupyter server started at {folder_path} 😄") + + diff --git a/src/napari_chatgpt/utils/notebook/test/__init__.py b/src/napari_chatgpt/utils/notebook/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/napari_chatgpt/utils/notebook/test/add_and_split_markdown.py b/src/napari_chatgpt/utils/notebook/test/add_and_split_markdown.py new file mode 100644 index 0000000..a897e8a --- /dev/null +++ b/src/napari_chatgpt/utils/notebook/test/add_and_split_markdown.py @@ -0,0 +1,87 @@ +from napari_chatgpt.utils.notebook.jupyter_notebook import JupyterNotebookFile + +markdown = """ +### Agent: +Tool response: +```python +import numpy as np +from napari.types import ImageData, Image +from napari.layers import Image as ImageLayer +from skimage.color import rgb2hsv, rgb2lab, rgb2gray +from magicgui import magicgui +from napari import Viewer + +@magicgui( + color_space={"choices": ["HSV", "LAB", "Grayscale"]}, + hue={"widget_type": "FloatSlider", 'min': 0.0, 'max': 1.0}, + saturation={"widget_type": "FloatSlider", 'min': 0.0, 'max': 1.0}, + value={"widget_type": "FloatSlider", 'min': 0.0, 'max': 1.0}, + apply_conversion={"call_button": "Apply Conversion"}, + result_widget=True +) +def color_space_explorer( + viewer: Viewer, + color_space: str, + hue: float = 0.5, + saturation: float = 0.5, + value: float = 0.5, + apply_conversion: bool = False +) -> ImageData: + + selected_layer = viewer.layers.selection.active + if not isinstance(selected_layer, ImageLayer): + return "Please select an image layer." + + image_data = np.copy(selected_layer.data) + if image_data.dtype != np.float32: + image_data = image_data.astype(np.float32) / 255.0 + + def convert_color_space(image, color_space, hue, saturation, value): + if color_space == "HSV": + hsv_image = rgb2hsv(image) + hsv_image[..., 0] = hue + hsv_image[..., 1] = saturation + hsv_image[..., 2] = value + return hsv_image + elif color_space == "LAB": + lab_image = rgb2lab(image) + # LAB adjustments are not straightforward like HSV + # Typically, users don't adjust LAB channels directly + return lab_image + elif color_space == "Grayscale": + gray_image = rgb2gray(image) + return gray_image[..., np.newaxis] + else: + return image + + preview_image = convert_color_space(image_data, color_space, hue, saturation, value) + + if apply_conversion: + # Update the selected layer with the converted image + selected_layer.data = preview_image + return "Conversion applied." + else: + # Return the preview image without updating the layer + return preview_image + +# This function would be added to the napari viewer as a widget +# and is not meant to be called directly in a script. +# To add this widget to the napari viewer, you would use the following: +# viewer.window.add_dock_widget(color_space_explorer) +``` + +This widget function `color_space_explorer` allows the user to explore different color spaces and adjust parameters relevant to the selected color space. The user can select the target color space from a dropdown menu, adjust parameters using sliders, and apply the conversion to the selected image layer. The function also provides a preview of the converted image before applying it. The function is decorated with `magicgui` to create an interactive GUI within napari. +""" + +def test_add_and_split_markdown(): + + jnf = JupyterNotebookFile() + jnf.add_markdown_cell(markdown) + + assert len(jnf.notebook.cells) == 3 + assert '```' not in jnf.notebook.cells[0].source + assert '```' in jnf.notebook.cells[1].source + assert '```' not in jnf.notebook.cells[2].source + + + diff --git a/src/napari_chatgpt/utils/notebook/test/jupyter_notebook_test.py b/src/napari_chatgpt/utils/notebook/test/jupyter_notebook_test.py new file mode 100644 index 0000000..6dd7991 --- /dev/null +++ b/src/napari_chatgpt/utils/notebook/test/jupyter_notebook_test.py @@ -0,0 +1,59 @@ +import tempfile + +import requests + +from napari_chatgpt.utils.notebook.jupyter_notebook import JupyterNotebookFile + + +def test_notebook_creation(): + # Test if the notebook is created correctly + jnf = JupyterNotebookFile() + assert jnf.notebook is not None + assert len(jnf.notebook.cells) == 0 + +def test_add_code_cell(): + # Test adding a code cell + jnf = JupyterNotebookFile() + jnf.add_code_cell("print('Hello, World!')") + assert len(jnf.notebook.cells) == 1 + assert jnf.notebook.cells[0].cell_type == 'code' + assert jnf.notebook.cells[0].source == "print('Hello, World!')" + +def test_add_markdown_cell(): + # Test adding a markdown cell + jnf = JupyterNotebookFile() + jnf.add_markdown_cell("# Hello, World!") + assert len(jnf.notebook.cells) == 1 + assert jnf.notebook.cells[0].cell_type == 'markdown' + assert jnf.notebook.cells[0].source == "# Hello, World!" + + +def download_image(url, file): + """Download an image from a URL and save it to a file object.""" + response = requests.get(url) + file.write(response.content) + file.flush() # Ensure all data is written to the file + +def test_add_image_cell(): + # URL of the image + image_url = 'https://upload.wikimedia.org/wikipedia/commons/7/70/Example.png' + + # Use a temporary file + with tempfile.NamedTemporaryFile(delete=True, suffix=".png") as temp_image: + # Download the image into the temporary file + download_image(image_url, temp_image) + + # Create an instance of JupyterNotebookFile + notebook = JupyterNotebookFile() + + # Add the image cell with optional text + notebook.add_image_cell(temp_image.name, "Example Image") + + # Save the notebook + notebook_file_path = 'test_notebook.ipynb' + notebook.write(notebook_file_path) + + print(f"Test completed. Notebook saved at {notebook_file_path}. Please open this notebook to verify the image cell.") + + + diff --git a/src/napari_chatgpt/utils/openai/test/default_model_test.py b/src/napari_chatgpt/utils/openai/test/default_model_test.py index a72dad7..3bd7b6a 100644 --- a/src/napari_chatgpt/utils/openai/test/default_model_test.py +++ b/src/napari_chatgpt/utils/openai/test/default_model_test.py @@ -3,7 +3,6 @@ from napari_chatgpt.utils.api_keys.api_key import is_api_key_available from napari_chatgpt.utils.openai.default_model import \ get_default_openai_model_name -from napari_chatgpt.utils.openai.gpt_vision import is_gpt_vision_available @pytest.mark.skipif(not is_api_key_available('OpenAI'), diff --git a/src/napari_chatgpt/utils/python/consolidate_imports.py b/src/napari_chatgpt/utils/python/consolidate_imports.py new file mode 100644 index 0000000..9d74664 --- /dev/null +++ b/src/napari_chatgpt/utils/python/consolidate_imports.py @@ -0,0 +1,17 @@ +def consolidate_imports(code): + lines = code.split('\n') + unique_imports = set() + import_block_end = 0 + + # Identify and store unique import statements + for i, line in enumerate(lines): + stripped_line = line.strip() + if stripped_line.startswith('import ') or stripped_line.startswith('from '): + unique_imports.add(line) + import_block_end = i + elif stripped_line: # Non-empty line that's not an import statement + break + + # Reconstruct the code + consolidated_code = '\n'.join(sorted(unique_imports)) + '\n\n' + '\n'.join(lines[import_block_end+1:]) + return consolidated_code \ No newline at end of file diff --git a/src/napari_chatgpt/utils/python/dynamic_import.py b/src/napari_chatgpt/utils/python/dynamic_import.py index 0731641..063bd57 100644 --- a/src/napari_chatgpt/utils/python/dynamic_import.py +++ b/src/napari_chatgpt/utils/python/dynamic_import.py @@ -9,7 +9,8 @@ from arbol import asection, aprint -def dynamic_import(module_code: str, name: str = None) -> Optional[Any]: +def dynamic_import(module_code: str, + name: str = None) -> Optional[Any]: # Module name: if not name: name = f'some_code_{randint(0, 999999999)}' @@ -55,10 +56,10 @@ def execute_code({}): aprint(module_code) # Load the code as module: - module = dynamic_import(module_code, name) + _module_ = dynamic_import(module_code, name) # get the function from module: - execute_code = getattr(module, 'execute_code') + execute_code = getattr(_module_, 'execute_code') f = StringIO() with redirect_stdout(f): diff --git a/src/napari_chatgpt/utils/python/fix_bad_fun_calls.py b/src/napari_chatgpt/utils/python/fix_bad_fun_calls.py index 0fe1406..2f75508 100644 --- a/src/napari_chatgpt/utils/python/fix_bad_fun_calls.py +++ b/src/napari_chatgpt/utils/python/fix_bad_fun_calls.py @@ -5,10 +5,11 @@ from typing import Tuple from arbol import asection, aprint -from langchain import PromptTemplate, LLMChain from langchain.callbacks.manager import CallbackManager +from langchain.chains import LLMChain from langchain.chat_models import ChatOpenAI from langchain.llms import BaseLLM +from langchain_core.prompts import PromptTemplate from napari_chatgpt.chat_server.callbacks.callbacks_arbol_stdout import \ ArbolCallbackHandler @@ -21,7 +22,7 @@ def fix_all_bad_function_calls(code: str, llm: BaseLLM = None, - verbose: bool = False) -> Tuple[str, bool]: + verbose: bool = False) -> Tuple[str, bool, str]: with(asection(f'Automatically fix bad function calls for code of length: {len(code)}')): # Cleanup code: @@ -29,7 +30,7 @@ def fix_all_bad_function_calls(code: str, # If code is empty, nothing needs to be fixed! if len(code) == 0: - return code, False + return code, False, '' with asection(f'Code to fix:'): aprint(code) @@ -96,17 +97,17 @@ def fix_all_bad_function_calls(code: str, with asection(f'Differences between original code and fixed code:'): aprint(differences_text) - return fixed_code, True + return fixed_code, True, function_calls_report else: aprint(f"No bad function calls detected, no need to fix anything!") - return fixed_code, False + return fixed_code, False, '' except Exception as e: traceback.print_exc() aprint(f"Encoutered exception: {str(e)} while trying to fix code! Returning code unchanged!") # TOODOO: if code does not compile maybe use LLM to fix it? - return code, False + return code, False, '' _fix_bad_fun_calls_prompt = f""" diff --git a/src/napari_chatgpt/utils/python/fix_code_given_error.py b/src/napari_chatgpt/utils/python/fix_code_given_error.py index d3743dc..0ac2673 100644 --- a/src/napari_chatgpt/utils/python/fix_code_given_error.py +++ b/src/napari_chatgpt/utils/python/fix_code_given_error.py @@ -4,10 +4,11 @@ import napari from arbol import asection, aprint -from langchain import PromptTemplate, LLMChain from langchain.callbacks.manager import CallbackManager +from langchain.chains import LLMChain from langchain.chat_models import ChatOpenAI from langchain.llms import BaseLLM +from langchain_core.prompts import PromptTemplate from napari_chatgpt.chat_server.callbacks.callbacks_arbol_stdout import \ ArbolCallbackHandler diff --git a/src/napari_chatgpt/utils/python/missing_packages.py b/src/napari_chatgpt/utils/python/missing_packages.py index 5aa67ca..0f7cb07 100644 --- a/src/napari_chatgpt/utils/python/missing_packages.py +++ b/src/napari_chatgpt/utils/python/missing_packages.py @@ -1,10 +1,11 @@ import sys from arbol import aprint, asection -from langchain import LLMChain, PromptTemplate from langchain.callbacks.manager import CallbackManager +from langchain.chains import LLMChain from langchain.chat_models import ChatOpenAI from langchain.llms import BaseLLM +from langchain_core.prompts import PromptTemplate from napari_chatgpt.chat_server.callbacks.callbacks_arbol_stdout import \ ArbolCallbackHandler diff --git a/src/napari_chatgpt/utils/python/required_imports.py b/src/napari_chatgpt/utils/python/required_imports.py index d6c7a2d..5778b1d 100644 --- a/src/napari_chatgpt/utils/python/required_imports.py +++ b/src/napari_chatgpt/utils/python/required_imports.py @@ -3,10 +3,11 @@ import traceback from arbol import asection, aprint -from langchain import LLMChain, PromptTemplate from langchain.callbacks.manager import CallbackManager +from langchain.chains import LLMChain from langchain.chat_models import ChatOpenAI from langchain.llms import BaseLLM +from langchain_core.prompts import PromptTemplate from napari_chatgpt.chat_server.callbacks.callbacks_arbol_stdout import \ ArbolCallbackHandler diff --git a/src/napari_chatgpt/utils/python/test/consolidate_imports_test.py b/src/napari_chatgpt/utils/python/test/consolidate_imports_test.py new file mode 100644 index 0000000..5046085 --- /dev/null +++ b/src/napari_chatgpt/utils/python/test/consolidate_imports_test.py @@ -0,0 +1,44 @@ +from pprint import pprint + +from napari_chatgpt.utils.python.consolidate_imports import consolidate_imports +from napari_chatgpt.utils.python.dynamic_import import dynamic_import, \ + execute_as_module + + +code_to_consolidate_imports = \ +""" +import napari +import numpy as np +from scipy.ndimage import gaussian_filter + + + + +import napari +import numpy as np +from scipy.ndimage import gaussian_filter + +# Step 1: Retrieve the selected image layer from the viewer. +selected_image_layer = viewer.layers.selection.active + +# Step 2: Convert the image data to float type if it's not already. +image_data = np.asarray(selected_image_layer.data, dtype=float) + +# Step 3: Apply a Gaussian filter with sigma=2 to the image data. +filtered_image_data = gaussian_filter(image_data, sigma=2) + +# Step 4: Add the filtered image as a new layer to the viewer. +viewer.add_image(filtered_image_data, name=f"{selected_image_layer.name}_gaussian_filtered") + +# Step 5: Print the result of the operation. +print("Applied a Gaussian filter with sigma=2 to the selected image.") +""" + + +def test_consolidate_imports(): + result = consolidate_imports(code_to_consolidate_imports) + + print('') + print(result) + + assert len(result.split('\n')) == 20 diff --git a/src/napari_chatgpt/utils/python/test/fix_bad_function_calls_test.py b/src/napari_chatgpt/utils/python/test/fix_bad_function_calls_test.py index 9664bbe..bb46d60 100644 --- a/src/napari_chatgpt/utils/python/test/fix_bad_function_calls_test.py +++ b/src/napari_chatgpt/utils/python/test/fix_bad_function_calls_test.py @@ -69,8 +69,9 @@ def find_straight_lines(image: ImageData) -> LabelsData: @pytest.mark.skipif(not is_api_key_available('OpenAI'), reason="requires OpenAI key to run") def test_fix_bad_call_1(): - fixed_code, did_something = fix_all_bad_function_calls(_code_snippet_1) + fixed_code, did_something, report = fix_all_bad_function_calls(_code_snippet_1) aprint(fixed_code) + aprint(report) assert not did_something @@ -78,16 +79,18 @@ def test_fix_bad_call_1(): @pytest.mark.skipif(not is_api_key_available('OpenAI'), reason="requires OpenAI key to run") def test_fix_bad_call_2(): - fixed_code, did_something = fix_all_bad_function_calls(_code_snippet_2) + fixed_code, did_something, report = fix_all_bad_function_calls(_code_snippet_2) aprint(fixed_code) + aprint(report) assert not did_something @pytest.mark.skipif(not is_api_key_available('OpenAI'), reason="requires OpenAI key to run") def test_fix_bad_call_3(): - fixed_code, did_something = fix_all_bad_function_calls(_code_snippet_3) + fixed_code, did_something, report = fix_all_bad_function_calls(_code_snippet_3) aprint(fixed_code) + aprint(report) assert did_something assert 'skimage.draw.line' in fixed_code diff --git a/src/napari_chatgpt/utils/qt/warning_dialog.py b/src/napari_chatgpt/utils/qt/warning_dialog.py new file mode 100644 index 0000000..c4ed957 --- /dev/null +++ b/src/napari_chatgpt/utils/qt/warning_dialog.py @@ -0,0 +1,20 @@ +import sys + +from PyQt5.QtCore import Qt +from PyQt5.QtWidgets import QApplication, QMessageBox + +def show_warning_dialog(html_message): + dialog = QMessageBox() + dialog.setIcon(QMessageBox.Warning) + dialog.setText(html_message) + dialog.setWindowTitle("Warning") + dialog.setTextFormat(Qt.TextFormat.RichText) # Set text format to RichText to interpret HTML + dialog.setStandardButtons(QMessageBox.Ok) + dialog.exec_() + +# Example usage +if __name__ == '__main__': + app = QApplication(sys.argv) + html_message = "There is an issue. Please visit this link for more information." + show_warning_dialog(html_message) + sys.exit(app.exec_()) \ No newline at end of file diff --git a/src/napari_chatgpt/utils/strings/find_function_name.py b/src/napari_chatgpt/utils/strings/find_function_name.py index f909a97..ef362ad 100644 --- a/src/napari_chatgpt/utils/strings/find_function_name.py +++ b/src/napari_chatgpt/utils/strings/find_function_name.py @@ -14,3 +14,17 @@ def find_function_name(code: str): return function_name else: return None + +def find_magicgui_decorated_function_name(code: str): + # Define a regular expression pattern to match the @magicgui decorator followed by a function name + pattern = r"@magicgui\s*(?:\([^)]*\))?\s*def\s+(\w+)\(" + + # Use the re.search function to find the first occurrence of the pattern in the string + match = re.search(pattern, code) + + # If a match is found, extract the function name from the first group of the match object + if match: + function_name = match.group(1) + return function_name + else: + return None \ No newline at end of file diff --git a/src/napari_chatgpt/utils/strings/markdown.py b/src/napari_chatgpt/utils/strings/markdown.py new file mode 100644 index 0000000..a48c248 --- /dev/null +++ b/src/napari_chatgpt/utils/strings/markdown.py @@ -0,0 +1,45 @@ +def extract_markdown_blocks(markdown_str, remove_quotes: bool = False): + """ + Extracts and returns blocks of text and code from a markdown string. + + Args: + markdown_str (str): A string formatted in markdown. + remove_quotes (bool, optional): Whether to remove the quotes from the code blocks. Defaults to True. + + Returns: + List[str]: A list of strings, where each string is a block of text or code. + """ + + blocks = [] + current_block = [] + in_code_block = False + + for line in markdown_str.split('\n'): + # Check for code block delimiter + if line.strip().startswith('```'): + if in_code_block: + # End of code block + if not remove_quotes: + current_block.append(line) + blocks.append('\n'.join(current_block)) + current_block = [] + in_code_block = False + else: + # Start of code block + if current_block: + # Add the previous text block if exists + blocks.append('\n'.join(current_block)) + current_block = [] + in_code_block = True + if not remove_quotes: + current_block.append(line) + + else: + current_block.append(line) + + # Add the last block if exists + if current_block: + blocks.append('\n'.join(current_block)) + + return blocks + diff --git a/src/napari_chatgpt/utils/strings/test/find_magicgui_decorated_function_name_test.py b/src/napari_chatgpt/utils/strings/test/find_magicgui_decorated_function_name_test.py new file mode 100644 index 0000000..b8a3883 --- /dev/null +++ b/src/napari_chatgpt/utils/strings/test/find_magicgui_decorated_function_name_test.py @@ -0,0 +1,73 @@ +from napari_chatgpt.utils.strings.find_function_name import find_function_name, \ + find_magicgui_decorated_function_name + +__some_register = {} + +_some_code = """ +import numpy as np +from napari.types import ImageData +from magicgui import magicgui + +# Define the function to create different patterns +def create_pattern(pattern_type: str, size: int = 1024) -> np.ndarray: + image = np.zeros((size, size, 3), dtype=np.uint8) + + if pattern_type == 'Horizontal Gradient': + for i in range(size): + image[:, i] = i * 255 // size + elif pattern_type == 'Vertical Gradient': + for i in range(size): + image[i, :] = i * 255 // size + elif pattern_type == 'Diagonal Gradient': + for i in range(size): + image[i, i] = i * 255 // size + elif pattern_type == 'Sine Wave Horizontal': + for i in range(size): + image[:, i] = (np.sin(i / size * 2 * np.pi) * 127.5 + 127.5).astype(np.uint8) + elif pattern_type == 'Sine Wave Vertical': + for i in range(size): + image[i, :] = (np.sin(i / size * 2 * np.pi) * 127.5 + 127.5).astype(np.uint8) + elif pattern_type == 'Checkerboard': + checker_size = size // 8 + for i in range(0, size, checker_size): + for j in range(0, size, checker_size): + if (i // checker_size) % 2 == (j // checker_size) % 2: + image[i:i+checker_size, j:j+checker_size] = 255 + elif pattern_type == 'Circles': + for i in range(size): + for j in range(size): + if ((i - size // 2) ** 2 + (j - size // 2) ** 2) ** 0.5 < size // 4: + image[i, j] = 255 + elif pattern_type == 'Spiral': + x, y = np.ogrid[:size, :size] + r = np.hypot(x - size / 2, y - size / 2) + theta = np.arctan2(x - size / 2, y - size / 2) + image[..., 0] = (np.sin(5 * theta + r / 10) * 127.5 + 127.5).astype(np.uint8) + elif pattern_type == 'Random Noise': + image = np.random.randint(0, 256, (size, size, 3), dtype=np.uint8) + elif pattern_type == 'Stripes': + stripe_width = size // 10 + for i in range(0, size, stripe_width): + image[:, i:i+stripe_width] = (i // stripe_width) * 255 // (size // stripe_width) + + return image + +# Define the widget function with the magicgui decorator +@magicgui(pattern_type={"choices": [ + 'Horizontal Gradient', 'Vertical Gradient', 'Diagonal Gradient', + 'Sine Wave Horizontal', 'Sine Wave Vertical', 'Checkerboard', + 'Circles', 'Spiral', 'Random Noise', 'Stripes' +]}, call_button='Generate Pattern') +def generate_pattern_widget(pattern_type: str) -> ImageData: + return create_pattern(pattern_type) + +# The function can now be used as a widget in napari +# Example usage (assuming a napari viewer instance is available as `viewer`): +# viewer.window.add_dock_widget(generate_pattern_widget) +""" + + +def test_find_magicgui_decorated_function_name(): + function_name = find_magicgui_decorated_function_name(_some_code) + + assert function_name == 'generate_pattern_widget' diff --git a/src/napari_chatgpt/utils/strings/test/markdown_test.py b/src/napari_chatgpt/utils/strings/test/markdown_test.py new file mode 100644 index 0000000..ce777b5 --- /dev/null +++ b/src/napari_chatgpt/utils/strings/test/markdown_test.py @@ -0,0 +1,129 @@ +from napari_chatgpt.utils.strings.markdown import extract_markdown_blocks + +markdown_1 = """ +### Agent: +Tool response: +```python +import numpy as np +from napari.types import ImageData, Image +from napari.layers import Image as ImageLayer +from skimage.color import rgb2hsv, rgb2lab, rgb2gray +from magicgui import magicgui +from napari import Viewer + +@magicgui( + color_space={"choices": ["HSV", "LAB", "Grayscale"]}, + hue={"widget_type": "FloatSlider", 'min': 0.0, 'max': 1.0}, + saturation={"widget_type": "FloatSlider", 'min': 0.0, 'max': 1.0}, + value={"widget_type": "FloatSlider", 'min': 0.0, 'max': 1.0}, + apply_conversion={"call_button": "Apply Conversion"}, + result_widget=True +) +def color_space_explorer( + viewer: Viewer, + color_space: str, + hue: float = 0.5, + saturation: float = 0.5, + value: float = 0.5, + apply_conversion: bool = False +) -> ImageData: + + selected_layer = viewer.layers.selection.active + if not isinstance(selected_layer, ImageLayer): + return "Please select an image layer." + + image_data = np.copy(selected_layer.data) + if image_data.dtype != np.float32: + image_data = image_data.astype(np.float32) / 255.0 + + def convert_color_space(image, color_space, hue, saturation, value): + if color_space == "HSV": + hsv_image = rgb2hsv(image) + hsv_image[..., 0] = hue + hsv_image[..., 1] = saturation + hsv_image[..., 2] = value + return hsv_image + elif color_space == "LAB": + lab_image = rgb2lab(image) + # LAB adjustments are not straightforward like HSV + # Typically, users don't adjust LAB channels directly + return lab_image + elif color_space == "Grayscale": + gray_image = rgb2gray(image) + return gray_image[..., np.newaxis] + else: + return image + + preview_image = convert_color_space(image_data, color_space, hue, saturation, value) + + if apply_conversion: + # Update the selected layer with the converted image + selected_layer.data = preview_image + return "Conversion applied." + else: + # Return the preview image without updating the layer + return preview_image + +# This function would be added to the napari viewer as a widget +# and is not meant to be called directly in a script. +# To add this widget to the napari viewer, you would use the following: +# viewer.window.add_dock_widget(color_space_explorer) +``` + +This widget function `color_space_explorer` allows the user to explore different color spaces and adjust parameters relevant to the selected color space. The user can select the target color space from a dropdown menu, adjust parameters using sliders, and apply the conversion to the selected image layer. The function also provides a preview of the converted image before applying it. The function is decorated with `magicgui` to create an interactive GUI within napari. +""" + + + + +markdown_2 = \ +""" +To create a widget that inverts the colors of an image, we will follow these steps: +1. Define a function that accepts an image array as input. This image array can be 2D grayscale, 3D grayscale, or nD where the last dimension is assumed to be color channels (e.g., RGB). +2. Inside the function, convert the image array to float type for processing. +3. Invert the colors of the image by subtracting the image data from the maximum possible value. For an image with values ranging from 0 to 1, this would be `1 - image_data`. For an image with values ranging from 0 to 255, it would be `255 - image_data`. +4. Decorate the function with the `@magicgui` decorator, specifying the call button text and setting `result_widget=False` since the function will return an image array. +5. Return the inverted image array. +Now, let's write the corresponding code: +```python +from napari.types import ImageData +from magicgui import magicgui +import numpy as np +@magicgui(call_button='Invert Colors', result_widget=False) +def invert_colors(image: ImageData) -> ImageData: + # Convert the image to float for processing + image_float = image.astype(float) + + # Invert the image colors + inverted_image = 255.0 - image_float + + return inverted_image +# The function `invert_colors` can now be used as a widget in napari. +# When an image layer is selected, this widget will invert its colors. +``` +This code defines a widget function that inverts the colors of an image. The function is decorated with `@magicgui` to create a GUI element in napari. When the user presses the "Invert Colors" button, the selected image's colors will be inverted, and the result will be displayed in the napari viewer. +""" + +def test_extract_markdown_blocks_1(): + blocks = extract_markdown_blocks(markdown_1) + + print(blocks[0]) + print(blocks[1]) + print(blocks[2]) + + assert len(blocks) == 3 + assert '```' not in blocks[0] + assert '```' in blocks[1] + assert '```' not in blocks[2] + +def test_extract_markdown_blocks_2(): + blocks = extract_markdown_blocks(markdown_2) + + print(blocks[0]) + print(blocks[1]) + print(blocks[2]) + + assert len(blocks) == 3 + assert '```' not in blocks[0] + assert '```' in blocks[1] + assert '```' not in blocks[2] \ No newline at end of file diff --git a/src/napari_chatgpt/utils/web/duckduckgo.py b/src/napari_chatgpt/utils/web/duckduckgo.py index b7f76a6..97b00f7 100644 --- a/src/napari_chatgpt/utils/web/duckduckgo.py +++ b/src/napari_chatgpt/utils/web/duckduckgo.py @@ -73,7 +73,7 @@ def search_images_ddg(query: str, ) -> str: lang = 'en-us' if lang == 'en' else lang - results = DDGS.images(query, + results = DDGS().images(keywords=query, region=lang, safesearch=safesearch, size=None,