From 3995fe923be06e56cf93e7d3f05adc3bca2c246e Mon Sep 17 00:00:00 2001 From: "Loic A. Royer" Date: Wed, 13 Mar 2024 00:01:43 -0700 Subject: [PATCH] tested all and made minor tweaks. --- src/napari_chatgpt/omega/omega_agent/prompts.py | 12 +++++++----- .../napari/cell_nuclei_segmentation_tool.py | 16 ++++++++-------- .../tools/napari/delegated_code/cellpose.py | 2 +- .../tools/napari/delegated_code/stardist.py | 15 +++++++++------ .../omega/tools/napari/viewer_execution_tool.py | 2 +- .../omega/tools/napari/widget_maker_tool.py | 4 ++-- src/napari_chatgpt/utils/openai/gpt_vision.py | 7 ++++--- 7 files changed, 32 insertions(+), 26 deletions(-) diff --git a/src/napari_chatgpt/omega/omega_agent/prompts.py b/src/napari_chatgpt/omega/omega_agent/prompts.py index ddf7189..2e067f3 100644 --- a/src/napari_chatgpt/omega/omega_agent/prompts.py +++ b/src/napari_chatgpt/omega/omega_agent/prompts.py @@ -1,10 +1,12 @@ # flake8: noqa -SYSTEM = """You are Omega. Your expertise lies in image processing and analysis. You have the ability to assist with various tasks, including answering simple questions and engaging in in-depth discussions on a wide range of subjects. Your responses are designed to resemble natural human conversation, ensuring coherence and relevance to the topic at hand. - -You possess extensive knowledge and proficiency in image processing, image analysis, and computer vision. Moreover, you can generate your own text based on the input provided, enabling you to participate in discussions, offer explanations, and provide descriptions on diverse topics. Your responses are accurate and informative, aiming to address a broad spectrum of questions. - +SYSTEM = \ +"""You are Omega. You are an helpful assistant with expertise in image processing, image analysis, and computer vision. +You assist with various tasks, including answering simple questions and engaging in knowledgeable discussions on a wide range of subjects. +Your responses are designed to resemble natural human conversation, ensuring coherence and relevance to the topic at hand. +You offer explanations and descriptions on diverse topics, and your responses are accurate and informative. +You can use all the tools and functions at your disposal (see below) to assist the user with image processing and image analysis. +Since you are an helpful expert, you are polite and answer in the same language as the user's question. You have been created by Loic A. Royer, a Senior Group Leader and Director of Imaging AI at the Chan Zuckerberg Biohub San Francisco. - """ PERSONALITY = {} diff --git a/src/napari_chatgpt/omega/tools/napari/cell_nuclei_segmentation_tool.py b/src/napari_chatgpt/omega/tools/napari/cell_nuclei_segmentation_tool.py index 6c9df95..b8822ad 100644 --- a/src/napari_chatgpt/omega/tools/napari/cell_nuclei_segmentation_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/cell_nuclei_segmentation_tool.py @@ -47,15 +47,13 @@ image: ArrayLike, Valid parameter for both StarDist and Cellpose. - Image for which to segment cells + 2D or 3D Image for which to segment cells model_type: str, Valid parameter for both StarDist and Cellpose. Segmentation model: - For Cellpose it can be: cyto, nuclei. cyto -> cytoplasm (whole cell) model, nuclei -> nucleus model. - - For StarDist can be: '2D_versatile_fluo', '2D_versatile_he', '3D_versatile_fluo'. '2D_versatile_fluo' is trained on a broad range of fluorescent 2D semantic - segmentation images. '2D_versatile_he' is trained on H&E stained tissue (but may generalize to other - staining modalities). '3D_versatile_fluo' is trained for 3D fluorescence microscopy images. + - For StarDist can be: 'versatile_fluo', 'versatile_he'. 'versatile_fluo' is trained on a broad range of fluorescent images. 'versatile_he' is trained on H&E stained tissue (but may generalize to other staining modalities). normalize: Optional[bool] Valid parameter for both StarDist and Cellpose. @@ -117,11 +115,13 @@ If True, applies the watershed algorithm to the distance transform of the thresholded image. This is useful for separating cells that are touching. ``` -Note: some parameters above might refer to functions that are not available. -All functions provided above return the segmented image as a labels array. -When calling these functions, do not set optional parameters unless you have a good reason to change them. -Use either ***AVAILABLE_FUNCTIONS*** directly without importing or implementing these functions, they will be provided to you by the system. +**Notes:** +- some parameters above might refer to functions that are not available. +- All functions provided above return the segmented image as a labels array. +- When calling these functions, do not set optional parameters unless you have a good reason to change them. +- Use either ***AVAILABLE_FUNCTIONS*** directly without importing or implementing these functions, they will be provided to you by the system. +- Although StarDist or Cellpose cannot by default segment 3D images, the functions given above are capable of handling 2D *and* 3D images. **Instructions:** {instructions} diff --git a/src/napari_chatgpt/omega/tools/napari/delegated_code/cellpose.py b/src/napari_chatgpt/omega/tools/napari/delegated_code/cellpose.py index 39dc9f3..b11befc 100644 --- a/src/napari_chatgpt/omega/tools/napari/delegated_code/cellpose.py +++ b/src/napari_chatgpt/omega/tools/napari/delegated_code/cellpose.py @@ -20,7 +20,7 @@ def cellpose_segmentation(image: ArrayLike, ---------- image: ArrayLike - image for which to segment cells + image for which to segment cells, must be 2D or 3D. model_type: str Model type, can be: 'cyto' or 'nuclei' or 'cyto2'. diff --git a/src/napari_chatgpt/omega/tools/napari/delegated_code/stardist.py b/src/napari_chatgpt/omega/tools/napari/delegated_code/stardist.py index 9c3307f..c9e81a4 100644 --- a/src/napari_chatgpt/omega/tools/napari/delegated_code/stardist.py +++ b/src/napari_chatgpt/omega/tools/napari/delegated_code/stardist.py @@ -9,7 +9,7 @@ ### SIGNATURE def stardist_segmentation(image: ArrayLike, - model_type: str = '2D_versatile_fluo', + model_type: str = 'versatile_fluo', normalize: Optional[bool] = True, norm_range_low: Optional[float] = 1.0, norm_range_high: Optional[float] = 99.8, @@ -22,13 +22,12 @@ def stardist_segmentation(image: ArrayLike, ---------- image: ArrayLike - Image for which to segment cells. Must be 2D. + Image for which to segment cells. Must be 2D or 3D. model_type: str - Model type, pre-trained models include: '2D_versatile_fluo', '2D_versatile_he'. - '2D_versatile_fluo' is trained on a broad range of fluorescent 2D semantic - segmentation images. - '2D_versatile_he' is trained on H&E stained tissue (but may generalize to other + Model type, pre-trained models include: 'versatile_fluo', 'versatile_he'. + 'versatile_fluo' is trained on a broad range of fluorescent images. + 'versatile_he' is trained on H&E stained tissue (but may generalize to other staining modalities). @@ -60,6 +59,10 @@ def stardist_segmentation(image: ArrayLike, if len(image.shape) > 3: raise ValueError("The input image must be 2D or 3D.") + # Add '2D_' as prefix to the model if not yet a prefix: + if not model_type.startswith('2D_'): + model_type = '2D_' + model_type + # Convert image to float image = image.astype(float, copy=False) diff --git a/src/napari_chatgpt/omega/tools/napari/viewer_execution_tool.py b/src/napari_chatgpt/omega/tools/napari/viewer_execution_tool.py index a555238..b676160 100644 --- a/src/napari_chatgpt/omega/tools/napari/viewer_execution_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/viewer_execution_tool.py @@ -80,7 +80,7 @@ class NapariViewerExecutionTool(NapariBaseTool): "Use this tool when you need to perform tasks that require access to the napari viewer instance and its layers. " "This tool can perform any task that requires access to the viewer, its layers, and data contained in the layers. " "The input must be a plain text description of what you want to do, it should not contain code, it must not assume knowledge of our conversation, and it must be explicit about what is asked." - "For example, you can ask to 'save the selected image to a file', or 'write in a CSV file the list of segments in layer `segmented` ', or 'open a saved image with teh system viewer'. " + "For example, you can ask to 'save the selected image to a file', or 'write in a CSV file on the desktop the list of segments in layer `segmented` ', or 'open file with the system viewer'. " "This tool returns a message that summarises what was done. " ) prompt = _napari_viewer_execution_prompt diff --git a/src/napari_chatgpt/omega/tools/napari/widget_maker_tool.py b/src/napari_chatgpt/omega/tools/napari/widget_maker_tool.py index 831e71b..201ee03 100644 --- a/src/napari_chatgpt/omega/tools/napari/widget_maker_tool.py +++ b/src/napari_chatgpt/omega/tools/napari/widget_maker_tool.py @@ -59,8 +59,8 @@ - Accept integers, floats, booleans, or any other type that is compatible with the magicgui library. - Decorate the function with the magicgui decorator: '@magicgui(call_button='Run')'. - Ideally, replace the call_button text 'Run' with a few short words that more explicit describe the action of the widget. -- Set 'result_widget=True' in the decorator, if and only if, the widget function returns a string, or a single int, float, list or tuple. -- Set 'result_widget=False' in the decorator, if the widget function returns an array or a napari layer. +- Set 'result_widget=True' in the decorator, if and ONLY if, the widget function returns a string, or a *single* int, float, or a *short* list or tuple. +- Set 'result_widget=False' in the decorator, the most likely setting, if the widget function returns a numpy array, a napari data type *Data, or a napari layer. - To expose a float parameter as a slider, include ={{"widget_type": "FloatSlider", 'min':, 'max': }} in the decorator. - To expose a string parameter as dropdown choice, include ={{"choices": ['first', 'second', 'third']}}. - Do NOT use tuples for widget function parameters. diff --git a/src/napari_chatgpt/utils/openai/gpt_vision.py b/src/napari_chatgpt/utils/openai/gpt_vision.py index ebc81f0..2ba1502 100644 --- a/src/napari_chatgpt/utils/openai/gpt_vision.py +++ b/src/napari_chatgpt/utils/openai/gpt_vision.py @@ -56,7 +56,7 @@ def describe_image(image_path: str, """ - with asection(f"Asking GPT-vision to analyse a given image at path: '{image_path}':"): + with (asection(f"Asking GPT-vision to analyse a given image at path: '{image_path}':")): aprint(f"Query: '{query}'") aprint(f"Model: '{model}'") aprint(f"Max tokens: '{max_tokens}'") @@ -129,8 +129,9 @@ def describe_image(image_path: str, continue # if the response contains these words: "sorry" and ("I cannot" or "I can't") then try again: - if "sorry" in response_lc and ("i cannot" in response_lc or "i can't" in response_lc): - aprint(f"Response contains the words 'sorry' and 'I cannot' or 'I can't'. Trying again...") + if ("sorry" in response_lc and ("i cannot" in response_lc or "i can't" in response_lc or 'i am unable' in response_lc)) \ + or "i cannot assist" in response_lc: + aprint(f"Vision model refuses to assist (response: {response}). Trying again...") continue else: return response