From 361d1eabf873442d36583ca79c3144853f02330e Mon Sep 17 00:00:00 2001 From: Farkhod Sadykov Date: Fri, 19 Jan 2024 02:16:46 +0100 Subject: [PATCH] Improved tests, REPL stdin, documentation (#452) * Improved mocked response tests. * REPL mode can accept stdin as initial prompt. * Readme and Contributing documentation improvements. --- .github/workflows/lint_test.yml | 2 +- CONTRIBUITNG.md | 37 ------- CONTRIBUTING.md | 38 +++++++ README.md | 4 +- pyproject.toml | 2 +- scripts/test.sh | 2 +- sgpt/app.py | 30 ++++-- sgpt/handlers/repl_handler.py | 10 +- .../__init__.py | 0 .../common/execute_shell.py | 0 .../init_functions.py | 0 .../mac/apple_script.py | 0 tests/conftest.py | 29 +---- tests/test_code.py | 56 +++++----- tests/test_default.py | 102 ++++++++++++------ tests/test_roles.py | 27 +++-- tests/test_shell.py | 89 +++++++-------- tests/utils.py | 4 +- 18 files changed, 229 insertions(+), 203 deletions(-) delete mode 100644 CONTRIBUITNG.md create mode 100644 CONTRIBUTING.md rename sgpt/{default_functions => llm_functions}/__init__.py (100%) rename sgpt/{default_functions => llm_functions}/common/execute_shell.py (100%) rename sgpt/{default_functions => llm_functions}/init_functions.py (100%) rename sgpt/{default_functions => llm_functions}/mac/apple_script.py (100%) diff --git a/.github/workflows/lint_test.yml b/.github/workflows/lint_test.yml index b6421af1..5aee8d53 100644 --- a/.github/workflows/lint_test.yml +++ b/.github/workflows/lint_test.yml @@ -31,7 +31,7 @@ jobs: - name: ruff run: ruff sgpt tests scripts # - name: mypy -# run: mypy sgpt --exclude function.py --exclude handler.py --exclude default_functions +# run: mypy sgpt --exclude function.py --exclude handler.py --exclude llm_functions - name: tests run: | export OPENAI_API_KEY=test_api_key diff --git a/CONTRIBUITNG.md b/CONTRIBUITNG.md deleted file mode 100644 index 117ca3bf..00000000 --- a/CONTRIBUITNG.md +++ /dev/null @@ -1,37 +0,0 @@ -# Contributing to ShellGPT -Thank you for considering contributing to ShellGPT (sgpt)! In order to ensure a smooth and enjoyable experience for everyone, please follow the steps outlined below. - -## Find an issue to work on -* First, browse the existing issues to find one that interests you. If you find an issue you would like to work on, assign it to yourself and leave a comment expressing your interest in working on it soon. -* If you have a new feature in mind that doesn't have an existing issue, kindly create a discussion in the "ideas" category using GitHub Discussions. Gather feedback from the community, and if you receive approval from at least a couple of people, create an issue and assign it to yourself. -* If there is an urgent issue, such as a critical bug causing the app to crash, create a pull request right away. - -## Developing -> ShellGPT is written using strict types, which means you will need to define types. The project utilizes several linting and testing tools: ruff, mypy, isort, black, and pytest. - -### Virtual environment -Create a virtual environment using Python venv and activate it: - -```shell -python -m venv env && source ./env/bin/activate -``` - -### Install dependencies -Install the necessary dependencies, in this case you will need to install the development and test dependencies: - -```shell -pip install -e ."[dev,test]" -``` -### Start coding -With your environment set up and the issue assigned, you can begin working on your solution. Familiarize yourself with the existing codebase, and follow the project's coding style and conventions. Remember to write clean, modular, and maintainable code, which will make it easier for others to understand and review. As you make progress, commit your changes frequently to keep track of your work. - -### Testing -This is very important step. Every changes that implements a new feature or modifies the logic of existing features should include "integration" tests. These are tests that call `sgpt` with defined arguments, capture the output, and verify that the feature works as expected. See `test_integration.py` for examples. The tests should be easy to read and understand. - -### Pull request -Before creating a pull request, ensure that you run `scripts/lint.sh` and `scripts/tests.sh`. All linters and tests should pass. In the pull request, provide a high-level description of your changes and step-by-step instructions on how to test them. Include any necessary commands. - -### Code review -Once you've submitted your pull request, it's time for code review. Be patient and open to feedback from the reviewers. Address any concerns they may have and work collaboratively to improve the code. Together, we can make ShellGPT an even better project. - -Thank you once again for your contribution! We're excited to have you on board. \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 00000000..e86dcb7d --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,38 @@ +# Contributing to ShellGPT +Thank you for considering contributing to ShellGPT! To ensure a smooth and enjoyable experience for everyone, please follow the steps outlined below. + +## Find an Issue to Work On +- First, browse the existing issues to find one that interests you. If you find an issue you'd like to work on, assign it to yourself and leave a comment expressing your interest. +- If you have a new feature idea that doesn't have an existing issue, please create a discussion in the "ideas" category using GitHub Discussions. Gather feedback from the community, and if you receive approval from at least a couple of people, create an issue and assign it to yourself. +- If there is an urgent issue, such as a critical bug causing the app to crash, create a pull request immediately. + +## Development +ShellGPT is written with strict types, so you'll need to define types. The project uses several linting and testing tools: ruff, mypy, isort, black, and pytest. + +### Virtual Environment +Create and activate a virtual environment using Python venv: + +```shell +python -m venv env && source ./env/bin/activate +``` + +### Install Dependencies +Install the necessary dependencies, including development and test dependencies: + +```shell +pip install -e ."[dev,test]" +``` + +### Start Coding +With your environment set up and the issue assigned, you can start working on your solution. Get to know the existing codebase and adhere to the project's coding style and conventions. Write clean, modular, and maintainable code to facilitate understanding and review. Commit your changes frequently to document your progress. + +### Testing +**This is a crucial step.** Any changes that implement a new feature or modify existing features should include tests. **Unverified code will not be merged.** These tests should call `sgpt` with defined arguments, capture the output, and verify that the feature works as expected. Refer to the `tests` folder for examples. + +### Pull Request +Before creating a pull request, run `scripts/lint.sh` and `scripts/tests.sh` to ensure all linters and tests pass. In your pull request, provide a high-level description of your changes and detailed instructions for testing them, including any necessary commands. + +### Code Review +After submitting your pull request, be patient and receptive to feedback from reviewers. Address any concerns they raise and collaborate to refine the code. Together, we can enhance the ShellGPT project. + +Thank you once again for your contribution! We're excited to have you join us. \ No newline at end of file diff --git a/README.md b/README.md index cc6bd61c..b44b5a55 100644 --- a/README.md +++ b/README.md @@ -191,7 +191,7 @@ sgpt --chat conversation_3 "Convert the resulting file into an MP3" # -> ffmpeg -i output.mp4 -vn -acodec libmp3lame -ac 2 -ab 160k -ar 48000 final_output.mp3 ``` -To list all the sessions from either conversational mode, use the `--list-chats` or `lc` option: +To list all the sessions from either conversational mode, use the `--list-chats` or `-lc` option: ```shell sgpt --list-chats # .../shell_gpt/chat_cache/conversation_1 @@ -313,7 +313,7 @@ ShellGPT allows you to create custom roles, which can be utilized to generate co ```shell sgpt --create-role json_generator # Enter role description: Provide only valid json as response. -sgpt --role json "random: user, password, email, address" +sgpt --role json_generator "random: user, password, email, address" ``` ```json { diff --git a/pyproject.toml b/pyproject.toml index 1fae7c08..3411e68c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -82,7 +82,7 @@ skip = "__init__.py" [tool.mypy] strict = true -exclude = ["function.py", "handler.py", "default_functions"] +exclude = ["function.py", "handler.py", "llm_functions"] [tool.ruff] select = [ diff --git a/scripts/test.sh b/scripts/test.sh index c30bbb50..151866ac 100644 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -4,4 +4,4 @@ set -e set -x # shellcheck disable=SC2068 -pytest tests ${@} +pytest tests ${@} -p no:warnings diff --git a/sgpt/app.py b/sgpt/app.py index 023189a3..a9a2eaf1 100644 --- a/sgpt/app.py +++ b/sgpt/app.py @@ -8,11 +8,11 @@ from click.types import Choice from sgpt.config import cfg -from sgpt.default_functions.init_functions import install_functions as inst_funcs from sgpt.function import get_openai_schemas from sgpt.handlers.chat_handler import ChatHandler from sgpt.handlers.default_handler import DefaultHandler from sgpt.handlers.repl_handler import ReplHandler +from sgpt.llm_functions.init_functions import install_functions as inst_funcs from sgpt.role import DefaultRoles, SystemRole from sgpt.utils import ( get_edited_prompt, @@ -148,17 +148,25 @@ def main( ) -> None: stdin_passed = not sys.stdin.isatty() - if stdin_passed and not repl: - prompt = f"{sys.stdin.read()}\n\n{prompt or ''}" + if stdin_passed: + stdin = "" + # TODO: This is very hacky. + # In some cases, we need to pass stdin along with inputs. + # When we want part of stdin to be used as a init prompt, + # but rest of the stdin to be used as a inputs. For example: + # echo "hello\n__sgpt__eof__\nThis is input" | sgpt --repl temp + # In this case, "hello" will be used as a init prompt, and + # "This is input" will be used as a input to the REPL. + for line in sys.stdin: + if "__sgpt__eof__" in line: + break + stdin += line + prompt = f"{stdin}\n\n{prompt}" if prompt else stdin # Switch to stdin for interactive input. - try: - if os.name == "posix": - sys.stdin = open("/dev/tty", "r") - elif os.name == "nt": - sys.stdin = open("CON", "r") - except OSError: - # Non-interactive shell. - pass + if os.name == "posix": + sys.stdin = open("/dev/tty", "r") + elif os.name == "nt": + sys.stdin = open("CON", "r") if not prompt and not editor and not repl: raise MissingParameter(param_hint="PROMPT", param_type="string") diff --git a/sgpt/handlers/repl_handler.py b/sgpt/handlers/repl_handler.py index 7334a97a..2fd6f1f0 100644 --- a/sgpt/handlers/repl_handler.py +++ b/sgpt/handlers/repl_handler.py @@ -21,7 +21,7 @@ def _get_multiline_input(cls) -> str: multiline_input += user_input + "\n" return multiline_input - def handle(self, prompt: str, **kwargs: Any) -> None: # type: ignore + def handle(self, init_prompt: str, **kwargs: Any) -> None: # type: ignore if self.initiated: rich_print(Rule(title="Chat History", style="bold magenta")) self.show_messages(self.chat_id) @@ -37,6 +37,11 @@ def handle(self, prompt: str, **kwargs: Any) -> None: # type: ignore ) typer.secho(info_message, fg="yellow") + if init_prompt: + rich_print(Rule(title="Input", style="bold purple")) + typer.echo(init_prompt) + rich_print(Rule(style="bold purple")) + full_completion = "" while True: # Infinite loop until user exits with Ctrl+C. @@ -46,6 +51,9 @@ def handle(self, prompt: str, **kwargs: Any) -> None: # type: ignore if prompt == "exit()": # This is also useful during tests. raise typer.Exit() + if init_prompt: + prompt = f"{init_prompt}\n\n\n{prompt}" + init_prompt = "" if self.role.name == DefaultRoles.SHELL.value and prompt == "e": typer.echo() run_command(full_completion) diff --git a/sgpt/default_functions/__init__.py b/sgpt/llm_functions/__init__.py similarity index 100% rename from sgpt/default_functions/__init__.py rename to sgpt/llm_functions/__init__.py diff --git a/sgpt/default_functions/common/execute_shell.py b/sgpt/llm_functions/common/execute_shell.py similarity index 100% rename from sgpt/default_functions/common/execute_shell.py rename to sgpt/llm_functions/common/execute_shell.py diff --git a/sgpt/default_functions/init_functions.py b/sgpt/llm_functions/init_functions.py similarity index 100% rename from sgpt/default_functions/init_functions.py rename to sgpt/llm_functions/init_functions.py diff --git a/sgpt/default_functions/mac/apple_script.py b/sgpt/llm_functions/mac/apple_script.py similarity index 100% rename from sgpt/default_functions/mac/apple_script.py rename to sgpt/llm_functions/mac/apple_script.py diff --git a/tests/conftest.py b/tests/conftest.py index 27ff8eef..bf0903e1 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,29 +1,8 @@ -import datetime +import os import pytest -from openai.types.chat.chat_completion_chunk import ChatCompletionChunk -from openai.types.chat.chat_completion_chunk import Choice as StreamChoice -from openai.types.chat.chat_completion_chunk import ChoiceDelta -from sgpt.config import cfg - -@pytest.fixture -def completion(request): - tokens_string = request.param - return [ - ChatCompletionChunk( - id="foo", - model=cfg.get("DEFAULT_MODEL"), - object="chat.completion.chunk", - choices=[ - StreamChoice( - index=0, - finish_reason=None, - delta=ChoiceDelta(content=token, role="assistant"), - ), - ], - created=int(datetime.datetime.now().timestamp()), - ) - for token in tokens_string - ] +@pytest.fixture(autouse=True) +def mock_os_name(monkeypatch): + monkeypatch.setattr(os, "name", "test") diff --git a/tests/test_code.py b/tests/test_code.py index 2bc127c3..5df2c421 100644 --- a/tests/test_code.py +++ b/tests/test_code.py @@ -4,43 +4,41 @@ from sgpt.config import cfg from sgpt.role import DefaultRoles, SystemRole -from .utils import app, comp_args, comp_chunks, make_args, parametrize, runner +from .utils import app, cmd_args, comp_args, comp_chunks, runner role = SystemRole.get(DefaultRoles.CODE.value) -@parametrize("completion", ["print('Hello World')"], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_code_generation(mock, completion): - mock.return_value = completion +def test_code_generation(mock): + mock.return_value = comp_chunks("print('Hello World')") args = {"prompt": "hello world python", "--code": True} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) mock.assert_called_once_with(**comp_args(role, args["prompt"])) assert result.exit_code == 0 assert "print('Hello World')" in result.stdout -@parametrize("completion", ["# Hello\nprint('Hello')"], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_code_generation_stdin(mock, completion): - mock.return_value = completion +def test_code_generation_stdin(completion): + completion.return_value = comp_chunks("# Hello\nprint('Hello')") args = {"prompt": "make comments for code", "--code": True} stdin = "print('Hello')" - result = runner.invoke(app, make_args(**args), input=stdin) + result = runner.invoke(app, cmd_args(**args), input=stdin) expected_prompt = f"{stdin}\n\n{args['prompt']}" - mock.assert_called_once_with(**comp_args(role, expected_prompt)) + completion.assert_called_once_with(**comp_args(role, expected_prompt)) assert result.exit_code == 0 assert "# Hello" in result.stdout assert "print('Hello')" in result.stdout @patch("openai.resources.chat.Completions.create") -def test_code_chat(mock): - mock.side_effect = [ +def test_code_chat(completion): + completion.side_effect = [ comp_chunks("print('hello')"), comp_chunks("print('hello')\nprint('world')"), ] @@ -49,13 +47,13 @@ def test_code_chat(mock): chat_path.unlink(missing_ok=True) args = {"prompt": "print hello", "--code": True, "--chat": chat_name} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 0 assert "print('hello')" in result.stdout assert chat_path.exists() args["prompt"] = "also print world" - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 0 assert "print('hello')" in result.stdout assert "print('world')" in result.stdout @@ -68,11 +66,11 @@ def test_code_chat(mock): {"role": "assistant", "content": "print('hello')\nprint('world')"}, ] expected_args = comp_args(role, "", messages=expected_messages) - mock.assert_called_with(**expected_args) - assert mock.call_count == 2 + completion.assert_called_with(**expected_args) + assert completion.call_count == 2 args["--shell"] = True - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 2 assert "Error" in result.stdout chat_path.unlink() @@ -80,8 +78,8 @@ def test_code_chat(mock): @patch("openai.resources.chat.Completions.create") -def test_code_repl(mock_completion): - mock_completion.side_effect = [ +def test_code_repl(completion): + completion.side_effect = [ comp_chunks("print('hello')"), comp_chunks("print('hello')\nprint('world')"), ] @@ -90,8 +88,8 @@ def test_code_repl(mock_completion): chat_path.unlink(missing_ok=True) args = {"--repl": chat_name, "--code": True} - inputs = ["print hello", "also print world", "exit()"] - result = runner.invoke(app, make_args(**args), input="\n".join(inputs)) + inputs = ["__sgpt__eof__", "print hello", "also print world", "exit()"] + result = runner.invoke(app, cmd_args(**args), input="\n".join(inputs)) expected_messages = [ {"role": "system", "content": role.role}, @@ -101,8 +99,8 @@ def test_code_repl(mock_completion): {"role": "assistant", "content": "print('hello')\nprint('world')"}, ] expected_args = comp_args(role, "", messages=expected_messages) - mock_completion.assert_called_with(**expected_args) - assert mock_completion.call_count == 2 + completion.assert_called_with(**expected_args) + assert completion.call_count == 2 assert result.exit_code == 0 assert ">>> print hello" in result.stdout @@ -112,20 +110,20 @@ def test_code_repl(mock_completion): @patch("openai.resources.chat.Completions.create") -def test_code_and_shell(mock): +def test_code_and_shell(completion): args = {"--code": True, "--shell": True} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) - mock.assert_not_called() + completion.assert_not_called() assert result.exit_code == 2 assert "Error" in result.stdout @patch("openai.resources.chat.Completions.create") -def test_code_and_describe_shell(mock): +def test_code_and_describe_shell(completion): args = {"--code": True, "--describe-shell": True} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) - mock.assert_not_called() + completion.assert_not_called() assert result.exit_code == 2 assert "Error" in result.stdout diff --git a/tests/test_default.py b/tests/test_default.py index 246af719..418fb470 100644 --- a/tests/test_default.py +++ b/tests/test_default.py @@ -1,57 +1,58 @@ from pathlib import Path from unittest.mock import patch -from sgpt import config +import typer +from typer.testing import CliRunner + +from sgpt import config, main from sgpt.__version__ import __version__ from sgpt.role import DefaultRoles, SystemRole -from .utils import app, comp_args, comp_chunks, make_args, parametrize, runner +from .utils import app, cmd_args, comp_args, comp_chunks, runner role = SystemRole.get(DefaultRoles.DEFAULT.value) cfg = config.cfg -@parametrize("completion", ["Prague"], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_default(mock, completion): - mock.return_value = completion +def test_default(completion): + completion.return_value = comp_chunks("Prague") args = {"prompt": "capital of the Czech Republic?"} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) - mock.assert_called_once_with(**comp_args(role, **args)) + completion.assert_called_once_with(**comp_args(role, **args)) assert result.exit_code == 0 assert "Prague" in result.stdout -@parametrize("completion", ["Prague"], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_default_stdin(mock, completion): - mock.return_value = completion +def test_default_stdin(completion): + completion.return_value = comp_chunks("Prague") stdin = "capital of the Czech Republic?" - result = runner.invoke(app, make_args(), input=stdin) + result = runner.invoke(app, cmd_args(), input=stdin) - mock.assert_called_once_with(**comp_args(role, stdin)) + completion.assert_called_once_with(**comp_args(role, stdin)) assert result.exit_code == 0 assert "Prague" in result.stdout @patch("openai.resources.chat.Completions.create") -def test_default_chat(mock): - mock.side_effect = [comp_chunks("ok"), comp_chunks("4")] +def test_default_chat(completion): + completion.side_effect = [comp_chunks("ok"), comp_chunks("4")] chat_name = "_test" chat_path = Path(cfg.get("CHAT_CACHE_PATH")) / chat_name chat_path.unlink(missing_ok=True) args = {"prompt": "my number is 2", "--chat": chat_name} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 0 assert "ok" in result.stdout assert chat_path.exists() args["prompt"] = "my number + 2?" - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 0 assert "4" in result.stdout @@ -63,8 +64,8 @@ def test_default_chat(mock): {"role": "assistant", "content": "4"}, ] expected_args = comp_args(role, "", messages=expected_messages) - mock.assert_called_with(**expected_args) - assert mock.call_count == 2 + completion.assert_called_with(**expected_args) + assert completion.call_count == 2 result = runner.invoke(app, ["--list-chats"]) assert result.exit_code == 0 @@ -78,27 +79,27 @@ def test_default_chat(mock): assert "4" in result.stdout args["--shell"] = True - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 2 assert "Error" in result.stdout args["--code"] = True - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 2 assert "Error" in result.stdout chat_path.unlink() @patch("openai.resources.chat.Completions.create") -def test_default_repl(mock): - mock.side_effect = [comp_chunks("ok"), comp_chunks("8")] +def test_default_repl(completion): + completion.side_effect = [comp_chunks("ok"), comp_chunks("8")] chat_name = "_test" chat_path = Path(cfg.get("CHAT_CACHE_PATH")) / chat_name chat_path.unlink(missing_ok=True) args = {"--repl": chat_name} - inputs = ["my number is 6", "my number + 2?", "exit()"] - result = runner.invoke(app, make_args(**args), input="\n".join(inputs)) + inputs = ["__sgpt__eof__", "my number is 6", "my number + 2?", "exit()"] + result = runner.invoke(app, cmd_args(**args), input="\n".join(inputs)) expected_messages = [ {"role": "system", "content": role.role}, @@ -108,8 +109,8 @@ def test_default_repl(mock): {"role": "assistant", "content": "8"}, ] expected_args = comp_args(role, "", messages=expected_messages) - mock.assert_called_with(**expected_args) - assert mock.call_count == 2 + completion.assert_called_with(**expected_args) + assert completion.call_count == 2 assert result.exit_code == 0 assert ">>> my number is 6" in result.stdout @@ -118,10 +119,43 @@ def test_default_repl(mock): assert "8" in result.stdout -@parametrize("completion", ["Berlin"], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_llm_options(mock, completion): - mock.return_value = completion +def test_default_repl_stdin(completion): + completion.side_effect = [comp_chunks("ok init"), comp_chunks("ok another")] + chat_name = "_test" + chat_path = Path(cfg.get("CHAT_CACHE_PATH")) / chat_name + chat_path.unlink(missing_ok=True) + + my_runner = CliRunner() + my_app = typer.Typer() + my_app.command()(main) + + args = {"--repl": chat_name} + inputs = ["this is stdin", "__sgpt__eof__", "prompt", "another", "exit()"] + result = my_runner.invoke(my_app, cmd_args(**args), input="\n".join(inputs)) + + expected_messages = [ + {"role": "system", "content": role.role}, + {"role": "user", "content": "this is stdin\n\n\n\nprompt"}, + {"role": "assistant", "content": "ok init"}, + {"role": "user", "content": "another"}, + {"role": "assistant", "content": "ok another"}, + ] + expected_args = comp_args(role, "", messages=expected_messages) + completion.assert_called_with(**expected_args) + assert completion.call_count == 2 + + assert result.exit_code == 0 + assert "this is stdin" in result.stdout + assert ">>> prompt" in result.stdout + assert "ok init" in result.stdout + assert ">>> another" in result.stdout + assert "ok another" in result.stdout + + +@patch("openai.resources.chat.Completions.create") +def test_llm_options(completion): + completion.return_value = comp_chunks("Berlin") args = { "prompt": "capital of the Germany?", @@ -130,7 +164,7 @@ def test_llm_options(mock, completion): "--top-p": 0.5, "--no-functions": True, } - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) expected_args = comp_args( role=role, @@ -140,15 +174,15 @@ def test_llm_options(mock, completion): top_p=args["--top-p"], functions=None, ) - mock.assert_called_once_with(**expected_args) + completion.assert_called_once_with(**expected_args) assert result.exit_code == 0 assert "Berlin" in result.stdout @patch("openai.resources.chat.Completions.create") -def test_version(mock): +def test_version(completion): args = {"--version": True} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) - mock.assert_not_called() + completion.assert_not_called() assert __version__ in result.stdout diff --git a/tests/test_roles.py b/tests/test_roles.py index b70ba456..d184bfcd 100644 --- a/tests/test_roles.py +++ b/tests/test_roles.py @@ -5,30 +5,29 @@ from sgpt.config import cfg from sgpt.role import SystemRole -from .utils import app, comp_args, make_args, parametrize, runner +from .utils import app, cmd_args, comp_args, comp_chunks, runner -@parametrize("completion", ['{"foo": "bar"}'], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_role(mock, completion): - mock.return_value = completion +def test_role(completion): + completion.return_value = comp_chunks('{"foo": "bar"}') path = Path(cfg.get("ROLE_STORAGE_PATH")) / "json_gen_test.json" path.unlink(missing_ok=True) args = {"--create-role": "json_gen_test"} stdin = "you are a JSON generator" - result = runner.invoke(app, make_args(**args), input=stdin) - mock.assert_not_called() + result = runner.invoke(app, cmd_args(**args), input=stdin) + completion.assert_not_called() assert result.exit_code == 0 args = {"--list-roles": True} - result = runner.invoke(app, make_args(**args)) - mock.assert_not_called() + result = runner.invoke(app, cmd_args(**args)) + completion.assert_not_called() assert result.exit_code == 0 assert "json_gen_test" in result.stdout args = {"--show-role": "json_gen_test"} - result = runner.invoke(app, make_args(**args)) - mock.assert_not_called() + result = runner.invoke(app, cmd_args(**args)) + completion.assert_not_called() assert result.exit_code == 0 assert "you are a JSON generator" in result.stdout @@ -37,9 +36,9 @@ def test_role(mock, completion): "prompt": "generate foo, bar", "--role": "json_gen_test", } - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) role = SystemRole.get("json_gen_test") - mock.assert_called_once_with(**comp_args(role, args["prompt"])) + completion.assert_called_once_with(**comp_args(role, args["prompt"])) assert result.exit_code == 0 generated_json = json.loads(result.stdout) assert "foo" in generated_json @@ -47,8 +46,8 @@ def test_role(mock, completion): # Test with stdin prompt. args = {"--role": "json_gen_test"} stdin = "generate foo, bar" - result = runner.invoke(app, make_args(**args), input=stdin) - mock.assert_called_with(**comp_args(role, stdin)) + result = runner.invoke(app, cmd_args(**args), input=stdin) + completion.assert_called_with(**comp_args(role, stdin)) assert result.exit_code == 0 generated_json = json.loads(result.stdout) assert "foo" in generated_json diff --git a/tests/test_shell.py b/tests/test_shell.py index 3ce53fe1..43afde80 100644 --- a/tests/test_shell.py +++ b/tests/test_shell.py @@ -1,101 +1,101 @@ +import os from pathlib import Path from unittest.mock import patch from sgpt.config import cfg from sgpt.role import DefaultRoles, SystemRole -from .utils import app, comp_args, comp_chunks, make_args, parametrize, runner +from .utils import app, cmd_args, comp_args, comp_chunks, runner -@parametrize("completion", ["git commit -m test"], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_shell(mock, completion): +def test_shell(completion): role = SystemRole.get(DefaultRoles.SHELL.value) - mock.return_value = completion + completion.return_value = comp_chunks("git commit -m test") args = {"prompt": "make a commit using git", "--shell": True} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) - mock.assert_called_once_with(**comp_args(role, args["prompt"])) + completion.assert_called_once_with(**comp_args(role, args["prompt"])) assert result.exit_code == 0 assert "git commit" in result.stdout assert "[E]xecute, [D]escribe, [A]bort:" in result.stdout -@parametrize("completion", ["ls -l | sort"], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_shell_stdin(mock, completion): +def test_shell_stdin(completion): + completion.return_value = comp_chunks("ls -l | sort") role = SystemRole.get(DefaultRoles.SHELL.value) - mock.return_value = completion args = {"prompt": "Sort by name", "--shell": True} stdin = "What is in current folder" - result = runner.invoke(app, make_args(**args), input=stdin) + result = runner.invoke(app, cmd_args(**args), input=stdin) expected_prompt = f"{stdin}\n\n{args['prompt']}" - mock.assert_called_once_with(**comp_args(role, expected_prompt)) + completion.assert_called_once_with(**comp_args(role, expected_prompt)) assert result.exit_code == 0 assert "ls -l | sort" in result.stdout assert "[E]xecute, [D]escribe, [A]bort:" in result.stdout -@parametrize("completion", ["lists the contents of a folder"], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_describe_shell(mock, completion): +def test_describe_shell(completion): + completion.return_value = comp_chunks("lists the contents of a folder") role = SystemRole.get(DefaultRoles.DESCRIBE_SHELL.value) - mock.return_value = completion args = {"prompt": "ls", "--describe-shell": True} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) - mock.assert_called_once_with(**comp_args(role, args["prompt"])) + completion.assert_called_once_with(**comp_args(role, args["prompt"])) assert result.exit_code == 0 assert "lists" in result.stdout -@parametrize("completion", ["lists the contents of a folder"], indirect=True) @patch("openai.resources.chat.Completions.create") -def test_describe_shell_stdin(mock, completion): +def test_describe_shell_stdin(completion): + completion.return_value = comp_chunks("lists the contents of a folder") role = SystemRole.get(DefaultRoles.DESCRIBE_SHELL.value) - mock.return_value = completion args = {"--describe-shell": True} stdin = "What is in current folder" - result = runner.invoke(app, make_args(**args), input=stdin) + result = runner.invoke(app, cmd_args(**args), input=stdin) expected_prompt = f"{stdin}" - mock.assert_called_once_with(**comp_args(role, expected_prompt)) + completion.assert_called_once_with(**comp_args(role, expected_prompt)) assert result.exit_code == 0 assert "lists" in result.stdout -@parametrize("completion", ["echo hello"], indirect=True) +@patch("os.system") @patch("openai.resources.chat.Completions.create") -def test_shell_run_description(mock, completion): - mock.return_value = completion +def test_shell_run_description(completion, system): + completion.side_effect = [comp_chunks("echo hello"), comp_chunks("prints hello")] args = {"prompt": "echo hello", "--shell": True} - result = runner.invoke(app, make_args(**args), input="d\n") - # TODO: Doesn't input "d" automatically. + inputs = "__sgpt__eof__\nd\ne\n" + result = runner.invoke(app, cmd_args(**args), input=inputs) + shell = os.environ.get("SHELL", "/bin/sh") + system.assert_called_once_with(f"{shell} -c 'echo hello'") assert result.exit_code == 0 - # assert "prints hello" in result.stdout + assert "echo hello" in result.stdout + assert "prints hello" in result.stdout @patch("openai.resources.chat.Completions.create") -def test_shell_chat(mock): - mock.side_effect = [comp_chunks("ls"), comp_chunks("ls | sort")] +def test_shell_chat(completion): + completion.side_effect = [comp_chunks("ls"), comp_chunks("ls | sort")] role = SystemRole.get(DefaultRoles.SHELL.value) chat_name = "_test" chat_path = Path(cfg.get("CHAT_CACHE_PATH")) / chat_name chat_path.unlink(missing_ok=True) args = {"prompt": "list folder", "--shell": True, "--chat": chat_name} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 0 assert "ls" in result.stdout assert chat_path.exists() args["prompt"] = "sort by name" - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 0 assert "ls | sort" in result.stdout @@ -107,11 +107,11 @@ def test_shell_chat(mock): {"role": "assistant", "content": "ls | sort"}, ] expected_args = comp_args(role, "", messages=expected_messages) - mock.assert_called_with(**expected_args) - assert mock.call_count == 2 + completion.assert_called_with(**expected_args) + assert completion.call_count == 2 args["--code"] = True - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) assert result.exit_code == 2 assert "Error" in result.stdout chat_path.unlink() @@ -120,17 +120,18 @@ def test_shell_chat(mock): @patch("os.system") @patch("openai.resources.chat.Completions.create") -def test_shell_repl(mock_completion, mock_system): - mock_completion.side_effect = [comp_chunks("ls"), comp_chunks("ls | sort")] +def test_shell_repl(completion, mock_system): + completion.side_effect = [comp_chunks("ls"), comp_chunks("ls | sort")] role = SystemRole.get(DefaultRoles.SHELL.value) chat_name = "_test" chat_path = Path(cfg.get("CHAT_CACHE_PATH")) / chat_name chat_path.unlink(missing_ok=True) args = {"--repl": chat_name, "--shell": True} - inputs = ["list folder", "sort by name", "e", "exit()"] - result = runner.invoke(app, make_args(**args), input="\n".join(inputs)) - mock_system.called_once() + inputs = ["__sgpt__eof__", "list folder", "sort by name", "e", "exit()"] + result = runner.invoke(app, cmd_args(**args), input="\n".join(inputs)) + shell = os.environ.get("SHELL", "/bin/sh") + mock_system.called_once_with(f"{shell} -c 'ls | sort'") expected_messages = [ {"role": "system", "content": role.role}, @@ -140,8 +141,8 @@ def test_shell_repl(mock_completion, mock_system): {"role": "assistant", "content": "ls | sort"}, ] expected_args = comp_args(role, "", messages=expected_messages) - mock_completion.assert_called_with(**expected_args) - assert mock_completion.call_count == 2 + completion.assert_called_with(**expected_args) + assert completion.call_count == 2 assert result.exit_code == 0 assert ">>> list folder" in result.stdout @@ -151,10 +152,10 @@ def test_shell_repl(mock_completion, mock_system): @patch("openai.resources.chat.Completions.create") -def test_shell_and_describe_shell(mock): +def test_shell_and_describe_shell(completion): args = {"prompt": "ls", "--describe-shell": True, "--shell": True} - result = runner.invoke(app, make_args(**args)) + result = runner.invoke(app, cmd_args(**args)) - mock.assert_not_called() + completion.assert_not_called() assert result.exit_code == 2 assert "Error" in result.stdout diff --git a/tests/utils.py b/tests/utils.py index 8cccb4d6..13a7ce2f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,6 +1,5 @@ import datetime -import pytest import typer from openai.types.chat.chat_completion_chunk import ChatCompletionChunk from openai.types.chat.chat_completion_chunk import Choice as StreamChoice @@ -13,7 +12,6 @@ runner = CliRunner() app = typer.Typer() app.command()(main) -parametrize = pytest.mark.parametrize def comp_chunks(tokens_string): @@ -35,7 +33,7 @@ def comp_chunks(tokens_string): ] -def make_args(prompt="", **kwargs): +def cmd_args(prompt="", **kwargs): arguments = [prompt] for key, value in kwargs.items(): arguments.append(key)