Skip to content

Commit

Permalink
Merge branch 'main' into pr-40
Browse files Browse the repository at this point in the history
  • Loading branch information
royerloic committed Aug 21, 2024
2 parents a525a39 + 2efbb5d commit 5c83fd5
Show file tree
Hide file tree
Showing 25 changed files with 294 additions and 37 deletions.
93 changes: 93 additions & 0 deletions .github/workflows/codeql.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"

on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]
schedule:
- cron: '23 2 * * 6'

jobs:
analyze:
name: Analyze (${{ matrix.language }})
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners (GitHub.com only)
# Consider using larger runners or machines with greater resources for possible analysis time improvements.
runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
permissions:
# required for all workflows
security-events: write

# required to fetch internal or private CodeQL packs
packages: read

# only required for workflows in private repositories
actions: read
contents: read

strategy:
fail-fast: false
matrix:
include:
- language: python
build-mode: none
# CodeQL supports the following values keywords for 'language': 'c-cpp', 'csharp', 'go', 'java-kotlin', 'javascript-typescript', 'python', 'ruby', 'swift'
# Use `c-cpp` to analyze code written in C, C++ or both
# Use 'java-kotlin' to analyze code written in Java, Kotlin or both
# Use 'javascript-typescript' to analyze code written in JavaScript, TypeScript or both
# To learn more about changing the languages that are analyzed or customizing the build mode for your analysis,
# see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/customizing-your-advanced-setup-for-code-scanning.
# If you are analyzing a compiled language, you can modify the 'build-mode' for that language to customize how
# your codebase is analyzed, see https://docs.github.com/en/code-security/code-scanning/creating-an-advanced-setup-for-code-scanning/codeql-code-scanning-for-compiled-languages
steps:
- name: Checkout repository
uses: actions/checkout@v4

# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.

# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
# queries: security-extended,security-and-quality

# If the analyze step fails for one of the languages you are analyzing with
# "We were unable to automatically build your code", modify the matrix above
# to set the build mode to "manual" for that language. Then modify this step
# to build your code.
# ℹ️ Command-line programs to run using the OS shell.
# 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun
- if: matrix.build-mode == 'manual'
shell: bash
run: |
echo 'If you are using a "manual" build mode for one or more of the' \
'languages you are analyzing, replace this with the commands to build' \
'your code, for example:'
echo ' make bootstrap'
echo ' make release'
exit 1
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"
48 changes: 48 additions & 0 deletions .github/workflows/just_deploy.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
# This workflows will upload a Python Package using Twine when a release is created
# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries

name: tests

on:
push:
branches:
- main
- npe2
tags:
- "v*" # Push events to matching v*, i.e. v1.0, v20.15.10
pull_request:
branches:
- main
- npe2
workflow_dispatch:

env:
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

jobs:

deploy:
# this will run when you have tagged a commit, starting with "v*"
# and requires that you have put your twine API key in your
# github secrets (see readme for details)
needs: [ test ]
runs-on: ubuntu-latest
if: contains(github.ref, 'tags')
steps:
- uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: "3.x"
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -U setuptools setuptools_scm wheel twine build
- name: Build and publish
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.TWINE_API_KEY }}
run: |
git tag
python -m build .
twine upload dist/*
18 changes: 16 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,14 @@
[![License BSD-3](https://img.shields.io/pypi/l/napari-chatgpt.svg?color=green)](https://github.com/royerlab/napari-chatgpt/raw/main/LICENSE)
[![PyPI](https://img.shields.io/pypi/v/napari-chatgpt.svg?color=green)](https://pypi.org/project/napari-chatgpt)
[![Python Version](https://img.shields.io/pypi/pyversions/napari-chatgpt.svg?color=green)](https://python.org)
[![tests](https://github.com/royerlab/napari-chatgpt/workflows/tests/badge.svg)](https://github.com/royerlab/napari-chatgpt/actions)
[![tests](https://github.com/royerlab/napari-chatgpt/actions/workflows/test_and_deploy.yml/badge.svg)](https://github.com/royerlab/napari-chatgpt/actions/workflows/test_and_deploy.yml)
[![codecov](https://codecov.io/gh/royerlab/napari-chatgpt/branch/main/graph/badge.svg)](https://codecov.io/gh/royerlab/napari-chatgpt)
[![Downloads](https://pepy.tech/badge/napari-chatgpt)](https://pepy.tech/project/napari-chatgpt)
[![Downloads](https://pepy.tech/badge/napari-chatgpt/month)](https://pepy.tech/project/napari-chatgpt)
[![napari hub](https://img.shields.io/endpoint?url=https://api.napari-hub.org/shields/napari-chatgpt)](https://napari-hub.org/plugins/napari-chatgpt)
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.10828225.svg)](https://doi.org/10.5281/zenodo.10828225)
[![GitHub stars](https://img.shields.io/github/stars/royerlab/napari-chatgpt?style=social)](https://github.com/royerlab/napari-chatgpt/)
[![GitHub forks](https://img.shields.io/github/forks/royerlab/napari-chatgpt?style=social)](https://git:hub.com/royerlab/napari-chatgpt/)

<img src='https://github.com/royerlab/napari-chatgpt/assets/1870994/c85185d2-6d16-472d-a2c8-5680ea869bf2' height='300'>
<img height="300" alt="image" src="https://github.com/royerlab/napari-chatgpt/assets/1870994/f3ea245e-dd86-4ff2-802e-48c2073cb6f9">
Expand All @@ -20,7 +24,7 @@ in a conversational manner.
This repository started as a 'week-end project'
by [Loic A. Royer](https://twitter.com/loicaroyer)
who leads a [research group](https://royerlab.org) at
the [Chan Zuckerberg Biohub](https://czbiohub.org/sf/). It
the [Chan Zuckerberg Biohub](https://royerlab.org). It
leverages [OpenAI](https://openai.com)'s ChatGPT API via
the [LangChain](https://python.langchain.com/en/latest/index.html) Python
library, as well as [napari](https://napari.org), a fast, interactive,
Expand Down Expand Up @@ -146,6 +150,7 @@ your request.
Omega is generally safe as long as you do not make dangerous requests. To be 100% safe, and
if your experiments with Omega could be potentially problematic, I recommend using this
software from within a sandboxed virtual machine.
API keys are only as safe as the overall machine is, see the section below on API key hygiene.

THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
Expand All @@ -154,6 +159,15 @@ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CON
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
THE USE OR OTHER DEALINGS IN THE SOFTWARE.

## API key hygiene:

Best Practices for Managing Your API Keys:
- **Host Computer Hygiene:** Ensure that the machine you’re installing napari-chagot/Onega on is secure, free of malware and viruses, and otherwise not compromised. Make sure to install antivirus software on Windows.
- **Security:** Treat your API key like a password. Do not share it with others or expose it in public repositories or forums.
- **Cost Control:** Set spending limits on your OpenAI account (see [here](https://platform.openai.com/account/limits)).
- **Regenerate Keys:** If you believe your API key has been compromised, cancel and regenerate it from the OpenAI API dashboard immediately.
- **Key Storage:** Omega has a built-in 'API Key Vault' that encrypts keys using a password, this is the preferred approach. You can also store the key in an environment variable, but that is not encrypted and could compromise the key.

## Contributing

Contributions are extremely welcome. Tests can be run with [tox], please ensure
Expand Down
Binary file not shown.
File renamed without changes.
File renamed without changes.
18 changes: 11 additions & 7 deletions setup.cfg
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
[metadata]
name = napari-chatgpt
version = v2024.3.13.3
version = v2024.5.15
description = A napari plugin to process and analyse images with chatGPT.
long_description = file: README.md
long_description_content_type = text/markdown
url = https://github.com/royerlab/napari-chatgpt
author = Loic A. Royer
author = Loic A. Royer and contributors
author_email = [email protected]
license = BSD-3-Clause
license_files = LICENSE
Expand Down Expand Up @@ -36,9 +36,11 @@ install_requires =
scikit-image
qtpy
QtAwesome
langchain==0.1.11
langchain-openai==0.0.8
openai==1.13.3
langchain==0.2.0rc2
langchain-community==0.2.0rc1
langchain-openai==0.1.6
langchain-anthropic==0.1.11
openai==1.29.0
anthropic
fastapi
uvicorn
Expand All @@ -52,7 +54,7 @@ install_requires =
xarray
arbol
playwright
duckduckgo_search
duckduckgo_search==5.3.0b4
ome-zarr
transformers
cryptography
Expand All @@ -64,6 +66,9 @@ install_requires =
jedi
black

# needed because lxml has spun out this code out of its main repo:
lxml_html_clean


python_requires = >=3.9
include_package_data = True
Expand All @@ -90,6 +95,5 @@ testing =
napari
pyqt5


[options.package_data]
* = *.yaml
29 changes: 29 additions & 0 deletions src/microplugin/code_editor/clickable_icon.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
from typing import Union

from qtpy.QtCore import QSize
from qtpy.QtCore import QRect, QPoint
from qtpy.QtGui import QPainter
from qtpy.QtCore import Qt, Signal
from qtpy.QtGui import QIcon, QPixmap, QColor, QImage
from qtpy.QtWidgets import QLabel
Expand Down Expand Up @@ -58,10 +61,36 @@ def __init__(
# Change cursor to hand pointer when hovering over the label:
self.setCursor(Qt.PointingHandCursor)

# Highlight color when hovering over the label:
self.highlight_color = QColor(200, 200, 200,
50) # Semi-transparent gray color

# Flag to indicate if the mouse is hovering over the label:
self.is_hovered = False


def mousePressEvent(self, event):
if event.button() == Qt.LeftButton:
self.clicked.emit()

def enterEvent(self, event):
self.is_hovered = True
self.update()

def leaveEvent(self, event):
self.is_hovered = False
self.update()

def paintEvent(self, event):
super().paintEvent(event)

if self.is_hovered:
painter = QPainter(self)
painter.setCompositionMode(QPainter.CompositionMode_SourceAtop)
painter.fillRect(self.rect(),
self.highlight_color)
painter.end()

@staticmethod
def _modify_pixmap_for_dark_ui(pixmap):
# Convert QPixmap to QImage
Expand Down
12 changes: 6 additions & 6 deletions src/napari_chatgpt/_widget.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,8 +126,8 @@ def _model_selection(self):
model_list.append('claude-2.1')
model_list.append('claude-2.0')
model_list.append('claude-instant-1.2')
#model_list.append('claude-3-sonnet-20240229')
#model_list.append('claude-3-opus-20240229')
model_list.append('claude-3-sonnet-20240229')
model_list.append('claude-3-opus-20240229')


if is_ollama_running():
Expand Down Expand Up @@ -159,7 +159,7 @@ def _model_selection(self):
model_list = best_models + [m for m in model_list if m not in best_models]

# Ensure that the very best models are at the top of the list:
very_best_models = [m for m in model_list if ('gpt-4-0125' in m) ]
very_best_models = [m for m in model_list if ('gpt-4-turbo-2024-04-09' in m) ]
model_list = very_best_models + [m for m in model_list if m not in very_best_models]

# normalise list:
Expand Down Expand Up @@ -450,14 +450,14 @@ def _start_omega(self):
main_llm_model_name = self.model_combo_box.currentText()

# Warn users with a modal window that the selected model might be sub-optimal:
if 'gpt-4' not in main_llm_model_name:
if 'gpt-4' not in main_llm_model_name and 'claude-3-opus' not in main_llm_model_name:
aprint("Warning: you did not select a gpt-4 level model. Omega's cognitive and coding abilities will be degraded.")
show_warning_dialog(f"You have selected this model: '{main_llm_model_name}'. "
f"This is not a GPT4-level model. "
f"This is not a GPT4 or Claude-3-opus level model. "
f"Omega's cognitive and coding abilities will be degraded. "
f"It might even completely fail or be too slow. "
f"Please visit <a href='https://github.com/royerlab/napari-chatgpt/wiki/OpenAIKey'>our wiki</a> "
f"for information on how to gain access to GPT4.")
f"for information on how to gain access to GPT4 (or Claude-3).")

# Set tool LLM model name via configuration file.
tool_llm_model_name = self.config.get('tool_llm_model_name', 'same')
Expand Down
3 changes: 2 additions & 1 deletion src/napari_chatgpt/chat_server/chat_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,8 @@ def start_chat_server(viewer: napari.Viewer = None,
bridge = NapariBridge(viewer=viewer)

# Register snapshot function:
notebook.register_snapshot_function(bridge.take_snapshot)
if notebook:
notebook.register_snapshot_function(bridge.take_snapshot)

# Instantiates server:
chat_server = NapariChatServer(notebook=notebook,
Expand Down
15 changes: 11 additions & 4 deletions src/napari_chatgpt/llm/llms.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,17 +63,24 @@ def _instantiate_single_llm(llm_model_name: str,
elif 'claude' in llm_model_name:

# Import Claude LLM:
from langchain.chat_models import ChatAnthropic
from langchain_anthropic import ChatAnthropic

max_token_limit = 8000
llm_model_name_lc = llm_model_name.lower()

if 'opus' in llm_model_name_lc or 'sonnet' in llm_model_name_lc or 'hiaku' in llm_model_name_lc or '2.1':
max_tokens_to_sample = 4096
max_token_limit = 200000
else:
max_tokens_to_sample = 4096
max_token_limit = 8000

# Instantiates Main LLM:
llm = ChatAnthropic(
model=llm_model_name,
verbose=verbose,
streaming=streaming,
temperature=temperature,
max_tokens_to_sample=max_token_limit,
max_tokens_to_sample=max_tokens_to_sample,
callbacks=[callback_handler])

return llm, max_token_limit
Expand Down Expand Up @@ -103,7 +110,7 @@ def _instantiate_single_llm(llm_model_name: str,
# Wait a bit:
sleep(3)

# Make ure that Ollama is running
# Make sure that Ollama is running
if not is_ollama_running(ollama_host, ollama_port):
aprint(f"Ollama server is not running on '{ollama_host}'. Please start the Ollama server on this machine and make sure the port '{ollama_port}' is open. ")
raise Exception("Ollama server is not running!")
Expand Down
Loading

0 comments on commit 5c83fd5

Please sign in to comment.