Skip to content

Commit

Permalink
improved Jordao's napari reader pluginin file open tool,
Browse files Browse the repository at this point in the history
and OpenAI and Anthropic model list code. Also made web search tests more robust to rate limits.
  • Loading branch information
royerloic committed Aug 28, 2024
1 parent 49af4ea commit 7513324
Show file tree
Hide file tree
Showing 10 changed files with 239 additions and 84 deletions.
39 changes: 6 additions & 33 deletions src/napari_chatgpt/_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,13 @@
from qtpy.QtWidgets import QVBoxLayout, QComboBox

from microplugin.microplugin_window import MicroPluginMainWindow
from napari_chatgpt.utils.anthropic.model_list import get_anthropic_model_list
from napari_chatgpt.utils.configuration.app_configuration import \
AppConfiguration
from napari_chatgpt.utils.ollama.ollama_server import is_ollama_running, \
get_ollama_models
from napari_chatgpt.utils.openai.model_list import get_openai_model_list
from napari_chatgpt.utils.openai.model_list import get_openai_model_list, \
postprocess_openai_model_list
from napari_chatgpt.utils.python.installed_packages import \
is_package_installed
from napari_chatgpt.utils.qt.one_time_disclaimer_dialog import \
Expand Down Expand Up @@ -123,44 +125,15 @@ def _model_selection(self):

if is_package_installed('anthropic'):
# Add Anthropic models to the combo box:
model_list.append('claude-2.1')
model_list.append('claude-2.0')
model_list.append('claude-instant-1.2')
model_list.append('claude-3-sonnet-20240229')
model_list.append('claude-3-opus-20240229')

model_list.extend(get_anthropic_model_list())

if is_ollama_running():
ollama_models = get_ollama_models()
for ollama_model in ollama_models:
model_list.append('ollama_'+ollama_model)

# Postprocess model list:

# Special cases (common prefix):
if 'gpt-3.5-turbo' in model_list:
model_list.remove('gpt-3.5-turbo')

# get list of bad models for main LLM:
bad_models_filters = ['0613', 'vision', 'turbo-instruct', 'gpt-3.5-turbo-0301', 'gpt-3.5-turbo-16k']

# get list of best models for main LLM:
best_models_filters = ['0314', '0301', '1106', 'gpt-4']

# Ensure that some 'bad' or unsupported models are excluded:
bad_models = [m for m in model_list if any(bm in m for bm in bad_models_filters)]
for bad_model in bad_models:
if bad_model in model_list:
model_list.remove(bad_model)
# model_list.append(bad_model)

# Ensure that the best models are at the top of the list:
best_models = [m for m in model_list if any(bm in m for bm in best_models_filters)]
model_list = best_models + [m for m in model_list if m not in best_models]

# Ensure that the very best models are at the top of the list:
very_best_models = [m for m in model_list if ('gpt-4-turbo-2024-04-09' in m) ]
model_list = very_best_models + [m for m in model_list if m not in very_best_models]
# Postprocess OpenAI model list:
model_list = postprocess_openai_model_list(model_list)

# normalise list:
model_list = list(model_list)
Expand Down
33 changes: 21 additions & 12 deletions src/napari_chatgpt/omega/tools/napari/file_open_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class NapariFileOpenTool(NapariBaseTool):
"Use this tool when you need to open image files in napari. "
"Input must be a plain text list of local file paths or URLs to be opened. "
"The list must be \\n delimited, i.e one entry per line. "
"The first item on the list must be the requested 'napari-plugin', if none is provided, use 'napari'."
"For for each file a specific napari reader plugin can be specified within brackets: 'file_path_or_url [reader_plugin_name]'. "
"This tool can only open image files with these extensions: .tif, .png, .jpg, .zarr, and more... "
"For example, if the input is: 'file1.tif\\nfile2.tif\\nfile3.tif' then this tool will open three images in napari. "
"This tool cannot open text files or other non-image files. "
Expand All @@ -28,26 +28,34 @@ def _run_code(self, query: str, code: str, viewer: Viewer) -> str:

with asection(f"NapariFileOpenTool: query= {query} "):

# Split lines:
lines = query.splitlines()

# Files opened:
opened_files = []

# Errors encountered:
encountered_errors = []

plugin = lines[0]
# Split lines:
lines = query.splitlines()

# Remove any whitespace from the list entries:
lines = [line.strip() for line in lines]

for line in lines[1:]:
for line in lines:

# Remove whitespaces:
line = line.strip()

aprint(f"Trying to open file: '{line}' ")
# Check if a plugin is specified:
if '[' in line and ']' in line:
plugin = line[line.index('[') + 1:line.index(']')].strip()
line = line[:line.index('[')].strip()
else:
plugin = None

# Try to open file:
try:
aprint(f"Trying to open file: '{line}' with plugin '{plugin}'")

success = open_in_napari(viewer, line, plugin=plugin)

if success:
Expand All @@ -62,16 +70,17 @@ def _run_code(self, query: str, code: str, viewer: Viewer) -> str:
# Encountered errors string:
encountered_errors_str = '\n'.join(encountered_errors)

aprint(
f"Encountered the following errors while trying to open the files:\n" \
f"{encountered_errors_str}\n")
if encountered_errors:
aprint(
f"Encountered the following errors while trying to open the files:\n" \
f"{encountered_errors_str}\n")

# Return outcome:
if len(opened_files) == len(lines):
if len(opened_files) == len(lines) and len(encountered_errors) == 0:
result = f"All of the image files: '{', '.join(opened_files)}' could be successfully opened in napari. "
aprint(result)
return result
elif len(opened_files) > 0:
elif len(opened_files) > 0 and len(encountered_errors) > 0:
result = f"Some of the image files: '{', '.join(opened_files)}' could be successfully opened in napari.\n" \
f"Here are the exceptions, if any, that occurred:\n" \
f"{encountered_errors_str}.\n"
Expand Down
Empty file.
36 changes: 36 additions & 0 deletions src/napari_chatgpt/utils/anthropic/model_list.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import traceback

from arbol import asection, aprint

from napari_chatgpt.utils.api_keys.api_key import set_api_key




def get_anthropic_model_list() -> list:
"""
Get the list of all Anthropic models.
Parameters
----------
filter : str
Filter to apply to the list of models.
Returns
-------
list
List of models.
"""

with asection(f"Enumerating all Anthropic models:"):
model_list = []

model_list.append('claude-3-opus-20240229')
model_list.append('claude-3-sonnet-20240229')
model_list.append('claude-3-haiku-20240307')
model_list.append('claude-3-5-sonnet-20240620')

return model_list



63 changes: 63 additions & 0 deletions src/napari_chatgpt/utils/openai/model_list.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import traceback

from arbol import asection, aprint
from exceptiongroup import catch

from napari_chatgpt.utils.api_keys.api_key import set_api_key

Expand Down Expand Up @@ -60,3 +61,65 @@ def get_openai_model_list(filter: str = 'gpt', verbose: bool = False) -> list:

return []


def postprocess_openai_model_list(model_list: list) -> list:
"""
Postprocess the list of OpenAI models. This is usefull to remove problematic models from the list and sort models in decreasing order of quality.
Parameters
----------
model_list : list
List of models.
Returns
-------
list
Postprocessed list of models.
"""

try:

# get list of bad models for main LLM:
bad_models_filters = ['0613', 'vision',
'turbo-instruct',
'gpt-3.5-turbo',
'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-0301',
'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-0125',
'gpt-3.5-turbo-16k',
'chatgpt-4o-latest']

# get list of best models for main LLM:
best_models_filters = ['0314', '0301', '1106', 'gpt-4', 'gpt-4o']

# Ensure that some 'bad' or unsupported models are excluded:
bad_models = [m for m in model_list if
any(bm in m for bm in bad_models_filters)]
for bad_model in bad_models:
if bad_model in model_list:
model_list.remove(bad_model)
# model_list.append(bad_model)

# Ensure that the best models are at the top of the list:
best_models = [m for m in model_list if
any(bm in m for bm in best_models_filters)]
model_list = best_models + [m for m in model_list if m not in best_models]

# Ensure that the very best models are at the top of the list:
very_best_models = [m for m in model_list if
('gpt-4o' in m)]
model_list = very_best_models + [m for m in model_list if
m not in very_best_models]

except Exception as e:
aprint("Error {e} occured while postprocessing the list of OpenAI models!")

# print stacktrace:
traceback.print_exc()

finally:

# If anything goes wrong we safely return the model list:
return model_list
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ def test_get_function_signature():

signature = get_function_signature('numpy.zeros_like', include_docstring=True)
aprint(signature)
assert 'zeros_like(a, dtype, order, subok, shape)' in signature

assert 'zeros_like(a, dtype, order, subok, shape, device)' in signature or 'zeros_like(a, dtype, order, subok, shape)' in signature
assert 'shape : int or sequence of ints, optional.' in signature

print('\n\n')
Expand Down
37 changes: 27 additions & 10 deletions src/napari_chatgpt/utils/web/test/duckduckgo_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import pytest
from arbol import aprint
from duckduckgo_search.exceptions import RatelimitException

from napari_chatgpt.utils.api_keys.api_key import is_api_key_available
from napari_chatgpt.utils.web.duckduckgo import summary_ddg
Expand All @@ -8,16 +9,32 @@
@pytest.mark.skipif(not is_api_key_available('OpenAI'),
reason="requires OpenAI key to run")
def test_duckduckgo_search_overview_summary():
query = 'Mickey Mouse'
text = summary_ddg(query, do_summarize=True)
aprint(text)
assert 'Mickey' in text
assert 'Web search failed' not in text

try:
query = 'Mickey Mouse'
text = summary_ddg(query, do_summarize=True)
aprint(text)
assert 'Mickey' in text
assert 'Web search failed' not in text

except RatelimitException as e:
aprint(f"RatelimitException: {e}")
aprint(f"RatelimitException: {e.response}")
aprint(f"RatelimitException: {e.response.text}")




def test_duckduckgo_search_overview():
query = 'Mickey Mouse'
text = summary_ddg(query, do_summarize=False)
aprint(text)
assert 'Mickey' in text
assert 'Web search failed' not in text

try:
query = 'Mickey Mouse'
text = summary_ddg(query, do_summarize=False)
aprint(text)
assert 'Mickey' in text
assert 'Web search failed' not in text

except RatelimitException as e:
aprint(f"RatelimitException: {e}")
aprint(f"RatelimitException: {e.response}")
aprint(f"RatelimitException: {e.response.text}")
16 changes: 13 additions & 3 deletions src/napari_chatgpt/utils/web/test/google_test.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,20 @@
from arbol import aprint
from duckduckgo_search.exceptions import RatelimitException

from napari_chatgpt.utils.web.google import search_overview


def test_google_search_overview():
term = 'wiki Mickey Mouse'
text = search_overview(term)

aprint(text)
try:
term = 'wiki Mickey Mouse'
text = search_overview(term)

aprint(text)

except RatelimitException as e:
aprint(f"RatelimitException: {e}")
aprint(f"RatelimitException: {e.response}")
aprint(f"RatelimitException: {e.response.text}")


41 changes: 31 additions & 10 deletions src/napari_chatgpt/utils/web/test/metasearch_test.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import pytest
from arbol import aprint
from duckduckgo_search.exceptions import RatelimitException

from napari_chatgpt.utils.api_keys.api_key import is_api_key_available
from napari_chatgpt.utils.web.metasearch import metasearch
Expand All @@ -8,16 +9,36 @@
@pytest.mark.skipif(not is_api_key_available('OpenAI'),
reason="requires OpenAI key to run")
def test_metasearch_summary():
query = 'Mickey Mouse'
text = metasearch(query, do_summarize=True)
aprint(text)
assert 'Mickey' in text
assert 'Web search failed' not in text

try:
query = 'Mickey Mouse'
text = metasearch(query, do_summarize=True)
aprint(text)
assert 'Mickey' in text
assert 'Web search failed' not in text

except RatelimitException as e:
aprint(f"RatelimitException: {e}")
aprint(f"RatelimitException: {e.response}")
aprint(f"RatelimitException: {e.response.text}")





def test_metasearch():
query = 'Mickey Mouse'
text = metasearch(query, do_summarize=False)
aprint(text)
assert 'Mickey' in text
assert 'Web search failed' not in text

try:
query = 'Mickey Mouse'
text = metasearch(query, do_summarize=False)
aprint(text)
assert 'Mickey' in text
assert 'Web search failed' not in text

except RatelimitException as e:
aprint(f"RatelimitException: {e}")
aprint(f"RatelimitException: {e.response}")
aprint(f"RatelimitException: {e.response.text}")



Loading

0 comments on commit 7513324

Please sign in to comment.