Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev #38

Merged
merged 4 commits into from
Mar 13, 2024
Merged

Dev #38

Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[metadata]
name = napari-chatgpt
version = v2024.2.24
version = v2024.3.13
description = A napari plugin to process and analyse images with chatGPT.
long_description = file: README.md
long_description_content_type = text/markdown
Expand Down Expand Up @@ -36,9 +36,9 @@ install_requires =
scikit-image
qtpy
QtAwesome
langchain==0.1.5
langchain-openai==0.0.5
openai
langchain==0.1.11
langchain-openai==0.0.8
openai==1.13.3
anthropic
fastapi
uvicorn
Expand Down
3 changes: 3 additions & 0 deletions src/microplugin/code_editor/code_snippet_editor_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,6 +216,7 @@ def show_context_menu(self, position):
context_menu = QMenu(self)

# Instantiate actions for the context menu:
refresh_action = QAction("Refresh file list", self)
rename_action = QAction("Rename", self)
duplicate_action = QAction("Duplicate", self)
delete_action = QAction("Delete", self)
Expand All @@ -230,6 +231,7 @@ def show_context_menu(self, position):
modify_action = QAction("Modify", self)

# Add actions to the context menu:
context_menu.addAction(refresh_action)
context_menu.addAction(rename_action)
context_menu.addAction(duplicate_action)
context_menu.addAction(delete_action)
Expand All @@ -244,6 +246,7 @@ def show_context_menu(self, position):
context_menu.addAction(modify_action)

# Connect the actions to the corresponding slots:
refresh_action.triggered.connect(self.populate_list)
rename_action.triggered.connect(self.rename_file)
duplicate_action.triggered.connect(self.duplicate_file)
delete_action.triggered.connect(self.delete_file_from_context_menu)
Expand Down
4 changes: 3 additions & 1 deletion src/napari_chatgpt/_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,8 @@ def _model_selection(self):
model_list.append('claude-2.1')
model_list.append('claude-2.0')
model_list.append('claude-instant-1.2')
#model_list.append('claude-3-sonnet-20240229')
#model_list.append('claude-3-opus-20240229')


if is_ollama_running():
Expand Down Expand Up @@ -157,7 +159,7 @@ def _model_selection(self):
model_list = best_models + [m for m in model_list if m not in best_models]

# Ensure that the very best models are at the top of the list:
very_best_models = [m for m in model_list if ('1106' in m and 'gpt-4' in m) ]
very_best_models = [m for m in model_list if ('gpt-4-0125' in m) ]
model_list = very_best_models + [m for m in model_list if m not in very_best_models]

# normalise list:
Expand Down
12 changes: 7 additions & 5 deletions src/napari_chatgpt/omega/omega_agent/prompts.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
# flake8: noqa
SYSTEM = """You are Omega. Your expertise lies in image processing and analysis. You have the ability to assist with various tasks, including answering simple questions and engaging in in-depth discussions on a wide range of subjects. Your responses are designed to resemble natural human conversation, ensuring coherence and relevance to the topic at hand.

You possess extensive knowledge and proficiency in image processing, image analysis, and computer vision. Moreover, you can generate your own text based on the input provided, enabling you to participate in discussions, offer explanations, and provide descriptions on diverse topics. Your responses are accurate and informative, aiming to address a broad spectrum of questions.

SYSTEM = \
"""You are Omega. You are an helpful assistant with expertise in image processing, image analysis, and computer vision.
You assist with various tasks, including answering simple questions and engaging in knowledgeable discussions on a wide range of subjects.
Your responses are designed to resemble natural human conversation, ensuring coherence and relevance to the topic at hand.
You offer explanations and descriptions on diverse topics, and your responses are accurate and informative.
You can use all the tools and functions at your disposal (see below) to assist the user with image processing and image analysis.
Since you are an helpful expert, you are polite and answer in the same language as the user's question.
You have been created by Loic A. Royer, a Senior Group Leader and Director of Imaging AI at the Chan Zuckerberg Biohub San Francisco.

"""

PERSONALITY = {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -47,15 +47,13 @@

image: ArrayLike,
Valid parameter for both StarDist and Cellpose.
Image for which to segment cells
2D or 3D Image for which to segment cells

model_type: str,
Valid parameter for both StarDist and Cellpose.
Segmentation model:
- For Cellpose it can be: cyto, nuclei. cyto -> cytoplasm (whole cell) model, nuclei -> nucleus model.
- For StarDist can be: '2D_versatile_fluo', '2D_versatile_he', '3D_versatile_fluo'. '2D_versatile_fluo' is trained on a broad range of fluorescent 2D semantic
segmentation images. '2D_versatile_he' is trained on H&E stained tissue (but may generalize to other
staining modalities). '3D_versatile_fluo' is trained for 3D fluorescence microscopy images.
- For StarDist can be: 'versatile_fluo', 'versatile_he'. 'versatile_fluo' is trained on a broad range of fluorescent images. 'versatile_he' is trained on H&E stained tissue (but may generalize to other staining modalities).

normalize: Optional[bool]
Valid parameter for both StarDist and Cellpose.
Expand Down Expand Up @@ -117,11 +115,13 @@
If True, applies the watershed algorithm to the distance transform of the thresholded image.
This is useful for separating cells that are touching.
```
Note: some parameters above might refer to functions that are not available.

All functions provided above return the segmented image as a labels array.
When calling these functions, do not set optional parameters unless you have a good reason to change them.
Use either ***AVAILABLE_FUNCTIONS*** directly without importing or implementing these functions, they will be provided to you by the system.
**Notes:**
- some parameters above might refer to functions that are not available.
- All functions provided above return the segmented image as a labels array.
- When calling these functions, do not set optional parameters unless you have a good reason to change them.
- Use either ***AVAILABLE_FUNCTIONS*** directly without importing or implementing these functions, they will be provided to you by the system.
- Although StarDist or Cellpose cannot by default segment 3D images, the functions given above are capable of handling 2D *and* 3D images.

**Instructions:**
{instructions}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def cellpose_segmentation(image: ArrayLike,
----------

image: ArrayLike
image for which to segment cells
image for which to segment cells, must be 2D or 3D.

model_type: str
Model type, can be: 'cyto' or 'nuclei' or 'cyto2'.
Expand Down
15 changes: 9 additions & 6 deletions src/napari_chatgpt/omega/tools/napari/delegated_code/stardist.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

### SIGNATURE
def stardist_segmentation(image: ArrayLike,
model_type: str = '2D_versatile_fluo',
model_type: str = 'versatile_fluo',
normalize: Optional[bool] = True,
norm_range_low: Optional[float] = 1.0,
norm_range_high: Optional[float] = 99.8,
Expand All @@ -22,13 +22,12 @@ def stardist_segmentation(image: ArrayLike,
----------

image: ArrayLike
Image for which to segment cells. Must be 2D.
Image for which to segment cells. Must be 2D or 3D.

model_type: str
Model type, pre-trained models include: '2D_versatile_fluo', '2D_versatile_he'.
'2D_versatile_fluo' is trained on a broad range of fluorescent 2D semantic
segmentation images.
'2D_versatile_he' is trained on H&E stained tissue (but may generalize to other
Model type, pre-trained models include: 'versatile_fluo', 'versatile_he'.
'versatile_fluo' is trained on a broad range of fluorescent images.
'versatile_he' is trained on H&E stained tissue (but may generalize to other
staining modalities).


Expand Down Expand Up @@ -60,6 +59,10 @@ def stardist_segmentation(image: ArrayLike,
if len(image.shape) > 3:
raise ValueError("The input image must be 2D or 3D.")

# Add '2D_' as prefix to the model if not yet a prefix:
if not model_type.startswith('2D_'):
model_type = '2D_' + model_type

# Convert image to float
image = image.astype(float, copy=False)

Expand Down
12 changes: 6 additions & 6 deletions src/napari_chatgpt/omega/tools/napari/delegated_code/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,13 +41,13 @@ def get_description_of_algorithms() -> str:

for algo in algos:
if 'cellpose' in algo:
description += "'segment cell nuclei in selected layer with StarDist', "
description += "'segment cell's cytoplams in given layer with Cellpose', "

elif 'stardist' in algo:
description += "'segment cell's cytoplams in selected layer with Cellpose', "
description += "'segment cell nuclei in selected 3D layer with StarDist', "

elif 'classic' in algo:
description += "'segment cell nuclei in selected 3D image with Classic', "
description += "'segment cell nuclei in 3D layer named 'some_name' with Classic', "

# remove last comma:
description = description[:-2]
Expand All @@ -56,13 +56,13 @@ def get_description_of_algorithms() -> str:

for algo in algos:
if 'cellpose' in algo:
description += "cellpose for the cell cytoplasm/membrane outline, "
description += "Cellpose for segmenting irregular or non-convex cytoplasms or membrane outlines in 2D, "

elif 'stardist' in algo:
description += "stardist for segmenting nuclei, "
description += "StarDist for segmenting near-convex nuclei in 2D or 3D, "

elif 'classic' in algo:
description += "classic for 3D images, "
description += "Classic for very contrasted and easy to segment 2D or 3D images, "

# remove last comma:
description = description[:-2]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class NapariViewerExecutionTool(NapariBaseTool):
"Use this tool when you need to perform tasks that require access to the napari viewer instance and its layers. "
"This tool can perform any task that requires access to the viewer, its layers, and data contained in the layers. "
"The input must be a plain text description of what you want to do, it should not contain code, it must not assume knowledge of our conversation, and it must be explicit about what is asked."
"For example, you can ask to 'save the selected image to a file', or 'write in a CSV file the list of segments in layer `segmented` ', or 'open a saved image with teh system viewer'. "
"For example, you can ask to 'save the selected image to a file', or 'write in a CSV file on the desktop the list of segments in layer `segmented` ', or 'open file <filename> with the system viewer'. "
"This tool returns a message that summarises what was done. "
)
prompt = _napari_viewer_execution_prompt
Expand Down
11 changes: 11 additions & 0 deletions src/napari_chatgpt/omega/tools/napari/viewer_vision_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,17 @@ def _run_code(self, query: str, code: str, viewer: Viewer) -> str:
# Regex search for layer name:
match = re.search(r'\*(.*?)\*', query)

# If there is no match, look for words that start with '*':
if not match:
# This sometimes happens if the LLM gets confused about teh exact syntax requested:

# We find a match with just one star '*' and a word:
match = re.search(r'\*(.*?)[\s]+', query)

# If there is match, add the missing '*' at the end:
if match:
query = query.replace(match.group(1), f"{match.group(1)}*")

# Check if the layer name is present in the input:
if match or '*selected*' in query or '*active*' in query or '*current*' in query:

Expand Down
4 changes: 2 additions & 2 deletions src/napari_chatgpt/omega/tools/napari/widget_maker_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,8 +59,8 @@
- Accept integers, floats, booleans, or any other type that is compatible with the magicgui library.
- Decorate the function with the magicgui decorator: '@magicgui(call_button='Run')'.
- Ideally, replace the call_button text 'Run' with a few short words that more explicit describe the action of the widget.
- Set 'result_widget=True' in the decorator, if and only if, the widget function returns a string, or a single int, float, list or tuple.
- Set 'result_widget=False' in the decorator, if the widget function returns an array or a napari layer.
- Set 'result_widget=True' in the decorator, if and ONLY if, the widget function returns a string, or a *single* int, float, or a *short* list or tuple.
- Set 'result_widget=False' in the decorator, the most likely setting, if the widget function returns a numpy array, a napari data type *Data, or a napari layer.
- To expose a float parameter as a slider, include <parameter_name>={{"widget_type": "FloatSlider", 'min':<min_value>, 'max': <max_value>}} in the decorator.
- To expose a string parameter as dropdown choice, include <parameter_name>={{"choices": ['first', 'second', 'third']}}.
- Do NOT use tuples for widget function parameters.
Expand Down
3 changes: 2 additions & 1 deletion src/napari_chatgpt/utils/images/normalize.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import numpy as np
from napari.types import ArrayLike
from numpy import ravel, percentile


def normalize_img(image: ArrayLike,
Expand Down Expand Up @@ -28,7 +29,7 @@ def normalize_img(image: ArrayLike,
Normalized image
"""
# Calculate lower and higher percentiles:
v_low, v_high = np.percentile(image, [p_low, p_high])
v_low, v_high = percentile(ravel(image), [p_low, p_high])

# rescale the image:
normalized_image = (image - v_low) / (v_high - v_low + 1e-6)
Expand Down
3 changes: 2 additions & 1 deletion src/napari_chatgpt/utils/notebook/jupyter_notebook.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,10 +67,11 @@ def restart(self,
self._modified = False

def write(self, file_path: Optional[str] = None):
self.file_path = file_path or self.default_file_path
file_path = file_path or self.default_file_path
# Write the notebook to disk
with open(file_path, 'w') as f:
nbformat.write(self.notebook, f)
self.file_path = file_path

def add_code_cell(self, code: str, remove_quotes: bool = False):

Expand Down
57 changes: 40 additions & 17 deletions src/napari_chatgpt/utils/openai/gpt_vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ def is_gpt_vision_available(vision_model_name: str = 'gpt-4-vision-preview') ->
def describe_image(image_path: str,
query: str = 'Here is an image, please carefully describe it in detail.',
model: str = "gpt-4-vision-preview",
max_tokens: int = 4096
max_tokens: int = 4096,
number_of_tries: int = 4,
) -> str:
"""
Describe an image using GPT-vision.
Expand All @@ -45,6 +46,8 @@ def describe_image(image_path: str,
Model to use
max_tokens : int
Maximum number of tokens to use
number_of_tries : int
Number of times to try to send the request to GPT.

Returns
-------
Expand All @@ -53,7 +56,7 @@ def describe_image(image_path: str,

"""

with asection(f"Asking GPT-vision to analyse a given image at path: '{image_path}':"):
with (asection(f"Asking GPT-vision to analyse a given image at path: '{image_path}':")):
aprint(f"Query: '{query}'")
aprint(f"Model: '{model}'")
aprint(f"Max tokens: '{max_tokens}'")
Expand Down Expand Up @@ -97,21 +100,41 @@ def describe_image(image_path: str,
set_api_key('OpenAI')

try:
# Instantiate API entry points:
client = OpenAI()
completions = Completions(client)

# Send a request to GPT:
result = completions.create(model=model,
messages=prompt_messages,
max_tokens=max_tokens)

# Actual response:
response = result.choices[0].message.content

aprint(f"Response: '{response}'")

return response
for tries in range(number_of_tries):

# Instantiate API entry points:
client = OpenAI()
completions = Completions(client)

# Send a request to GPT:
result = completions.create(model=model,
messages=prompt_messages,
max_tokens=max_tokens)

# Actual response:
response = result.choices[0].message.content
aprint(f"Response: '{response}'")

# Check if the response is empty:
if not response:
aprint(f"Response is empty. Trying again...")
continue

# response in lower case and trimmed of white spaces
response_lc = response.lower().strip()

# Check if response is too short:
if len(response) < 3:
aprint(f"Response is empty. Trying again...")
continue

# if the response contains these words: "sorry" and ("I cannot" or "I can't") then try again:
if ("sorry" in response_lc and ("i cannot" in response_lc or "i can't" in response_lc or 'i am unable' in response_lc)) \
or "i cannot assist" in response_lc:
aprint(f"Vision model refuses to assist (response: {response}). Trying again...")
continue
else:
return response

except Exception as e:
# Log the error:
Expand Down
4 changes: 3 additions & 1 deletion src/napari_chatgpt/utils/openai/max_token_limit.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
def openai_max_token_limit(llm_model_name):
if 'gpt-4-1106-preview' in llm_model_name or 'gpt-4-vision-preview' in llm_model_name:
if ('gpt-4-1106-preview' in llm_model_name
or 'gpt-4-0125-preview' in llm_model_name
or 'gpt-4-vision-preview' in llm_model_name):
max_token_limit = 128000
elif '32k' in llm_model_name:
max_token_limit = 32000
Expand Down
Loading