Skip to content

Commit

Permalink
Improved mocked tests, REPL bug fix
Browse files Browse the repository at this point in the history
  • Loading branch information
TheR1D committed Jan 19, 2024
1 parent 0794868 commit 8327bb5
Show file tree
Hide file tree
Showing 17 changed files with 191 additions and 166 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/lint_test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ jobs:
- name: ruff
run: ruff sgpt tests scripts
# - name: mypy
# run: mypy sgpt --exclude function.py --exclude handler.py --exclude default_functions
# run: mypy sgpt --exclude function.py --exclude handler.py --exclude llm_functions
- name: tests
run: |
export OPENAI_API_KEY=test_api_key
Expand Down
File renamed without changes.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ sgpt --chat conversation_3 "Convert the resulting file into an MP3"
# -> ffmpeg -i output.mp4 -vn -acodec libmp3lame -ac 2 -ab 160k -ar 48000 final_output.mp3
```

To list all the sessions from either conversational mode, use the `--list-chats` or `lc` option:
To list all the sessions from either conversational mode, use the `--list-chats` or `-lc` option:
```shell
sgpt --list-chats
# .../shell_gpt/chat_cache/conversation_1
Expand Down Expand Up @@ -313,7 +313,7 @@ ShellGPT allows you to create custom roles, which can be utilized to generate co
```shell
sgpt --create-role json_generator
# Enter role description: Provide only valid json as response.
sgpt --role json "random: user, password, email, address"
sgpt --role json_generator "random: user, password, email, address"
```
```json
{
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ skip = "__init__.py"

[tool.mypy]
strict = true
exclude = ["function.py", "handler.py", "default_functions"]
exclude = ["function.py", "handler.py", "llm_functions"]

[tool.ruff]
select = [
Expand Down
2 changes: 1 addition & 1 deletion scripts/test.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,4 +4,4 @@ set -e
set -x

# shellcheck disable=SC2068
pytest tests ${@}
pytest tests ${@} -p no:warnings
30 changes: 19 additions & 11 deletions sgpt/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,11 @@
from click.types import Choice

from sgpt.config import cfg
from sgpt.default_functions.init_functions import install_functions as inst_funcs
from sgpt.function import get_openai_schemas
from sgpt.handlers.chat_handler import ChatHandler
from sgpt.handlers.default_handler import DefaultHandler
from sgpt.handlers.repl_handler import ReplHandler
from sgpt.llm_functions.init_functions import install_functions as inst_funcs
from sgpt.role import DefaultRoles, SystemRole
from sgpt.utils import (
get_edited_prompt,
Expand Down Expand Up @@ -148,17 +148,25 @@ def main(
) -> None:
stdin_passed = not sys.stdin.isatty()

if stdin_passed and not repl:
prompt = f"{sys.stdin.read()}\n\n{prompt or ''}"
if stdin_passed:
stdin = ""
# TODO: This is very hacky.
# In some cases, we need to pass stdin along with inputs.
# When we want part of stdin to be used as a init prompt,
# but rest of the stdin to be used as a inputs. For example:
# echo "hello\n__sgpt__eof__\nThis is input" | sgpt --repl temp
# In this case, "hello" will be used as a init prompt, and
# "This is input" will be used as a input to the REPL.
for line in sys.stdin:
if "__sgpt__eof__" in line:
break
stdin += line
prompt = f"{stdin}\n\n{prompt}" if prompt else stdin
# Switch to stdin for interactive input.
try:
if os.name == "posix":
sys.stdin = open("/dev/tty", "r")
elif os.name == "nt":
sys.stdin = open("CON", "r")
except OSError:
# Non-interactive shell.
pass
if os.name == "posix":
sys.stdin = open("/dev/tty", "r")
elif os.name == "nt":
sys.stdin = open("CON", "r")

if not prompt and not editor and not repl:
raise MissingParameter(param_hint="PROMPT", param_type="string")
Expand Down
10 changes: 9 additions & 1 deletion sgpt/handlers/repl_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def _get_multiline_input(cls) -> str:
multiline_input += user_input + "\n"
return multiline_input

def handle(self, prompt: str, **kwargs: Any) -> None: # type: ignore
def handle(self, init_prompt: str, **kwargs: Any) -> None: # type: ignore
if self.initiated:
rich_print(Rule(title="Chat History", style="bold magenta"))
self.show_messages(self.chat_id)
Expand All @@ -37,6 +37,11 @@ def handle(self, prompt: str, **kwargs: Any) -> None: # type: ignore
)
typer.secho(info_message, fg="yellow")

if init_prompt:
rich_print(Rule(title="Input", style="bold purple"))
typer.echo(init_prompt)
rich_print(Rule(style="bold purple"))

full_completion = ""
while True:
# Infinite loop until user exits with Ctrl+C.
Expand All @@ -46,6 +51,9 @@ def handle(self, prompt: str, **kwargs: Any) -> None: # type: ignore
if prompt == "exit()":
# This is also useful during tests.
raise typer.Exit()
if init_prompt:
prompt = f"{init_prompt}\n\n\n{prompt}"
init_prompt = ""
if self.role.name == DefaultRoles.SHELL.value and prompt == "e":
typer.echo()
run_command(full_completion)
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
29 changes: 4 additions & 25 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,29 +1,8 @@
import datetime
import os

import pytest
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
from openai.types.chat.chat_completion_chunk import Choice as StreamChoice
from openai.types.chat.chat_completion_chunk import ChoiceDelta

from sgpt.config import cfg


@pytest.fixture
def completion(request):
tokens_string = request.param
return [
ChatCompletionChunk(
id="foo",
model=cfg.get("DEFAULT_MODEL"),
object="chat.completion.chunk",
choices=[
StreamChoice(
index=0,
finish_reason=None,
delta=ChoiceDelta(content=token, role="assistant"),
),
],
created=int(datetime.datetime.now().timestamp()),
)
for token in tokens_string
]
@pytest.fixture(autouse=True)
def mock_os_name(monkeypatch):
monkeypatch.setattr(os, "name", "test")
56 changes: 27 additions & 29 deletions tests/test_code.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,43 +4,41 @@
from sgpt.config import cfg
from sgpt.role import DefaultRoles, SystemRole

from .utils import app, comp_args, comp_chunks, make_args, parametrize, runner
from .utils import app, cmd_args, comp_args, comp_chunks, runner

role = SystemRole.get(DefaultRoles.CODE.value)


@parametrize("completion", ["print('Hello World')"], indirect=True)
@patch("openai.resources.chat.Completions.create")
def test_code_generation(mock, completion):
mock.return_value = completion
def test_code_generation(mock):
mock.return_value = comp_chunks("print('Hello World')")

args = {"prompt": "hello world python", "--code": True}
result = runner.invoke(app, make_args(**args))
result = runner.invoke(app, cmd_args(**args))

mock.assert_called_once_with(**comp_args(role, args["prompt"]))
assert result.exit_code == 0
assert "print('Hello World')" in result.stdout


@parametrize("completion", ["# Hello\nprint('Hello')"], indirect=True)
@patch("openai.resources.chat.Completions.create")
def test_code_generation_stdin(mock, completion):
mock.return_value = completion
def test_code_generation_stdin(completion):
completion.return_value = comp_chunks("# Hello\nprint('Hello')")

args = {"prompt": "make comments for code", "--code": True}
stdin = "print('Hello')"
result = runner.invoke(app, make_args(**args), input=stdin)
result = runner.invoke(app, cmd_args(**args), input=stdin)

expected_prompt = f"{stdin}\n\n{args['prompt']}"
mock.assert_called_once_with(**comp_args(role, expected_prompt))
completion.assert_called_once_with(**comp_args(role, expected_prompt))
assert result.exit_code == 0
assert "# Hello" in result.stdout
assert "print('Hello')" in result.stdout


@patch("openai.resources.chat.Completions.create")
def test_code_chat(mock):
mock.side_effect = [
def test_code_chat(completion):
completion.side_effect = [
comp_chunks("print('hello')"),
comp_chunks("print('hello')\nprint('world')"),
]
Expand All @@ -49,13 +47,13 @@ def test_code_chat(mock):
chat_path.unlink(missing_ok=True)

args = {"prompt": "print hello", "--code": True, "--chat": chat_name}
result = runner.invoke(app, make_args(**args))
result = runner.invoke(app, cmd_args(**args))
assert result.exit_code == 0
assert "print('hello')" in result.stdout
assert chat_path.exists()

args["prompt"] = "also print world"
result = runner.invoke(app, make_args(**args))
result = runner.invoke(app, cmd_args(**args))
assert result.exit_code == 0
assert "print('hello')" in result.stdout
assert "print('world')" in result.stdout
Expand All @@ -68,20 +66,20 @@ def test_code_chat(mock):
{"role": "assistant", "content": "print('hello')\nprint('world')"},
]
expected_args = comp_args(role, "", messages=expected_messages)
mock.assert_called_with(**expected_args)
assert mock.call_count == 2
completion.assert_called_with(**expected_args)
assert completion.call_count == 2

args["--shell"] = True
result = runner.invoke(app, make_args(**args))
result = runner.invoke(app, cmd_args(**args))
assert result.exit_code == 2
assert "Error" in result.stdout
chat_path.unlink()
# TODO: Code chat can be recalled without --code option.


@patch("openai.resources.chat.Completions.create")
def test_code_repl(mock_completion):
mock_completion.side_effect = [
def test_code_repl(completion):
completion.side_effect = [
comp_chunks("print('hello')"),
comp_chunks("print('hello')\nprint('world')"),
]
Expand All @@ -90,8 +88,8 @@ def test_code_repl(mock_completion):
chat_path.unlink(missing_ok=True)

args = {"--repl": chat_name, "--code": True}
inputs = ["print hello", "also print world", "exit()"]
result = runner.invoke(app, make_args(**args), input="\n".join(inputs))
inputs = ["__sgpt__eof__", "print hello", "also print world", "exit()"]
result = runner.invoke(app, cmd_args(**args), input="\n".join(inputs))

expected_messages = [
{"role": "system", "content": role.role},
Expand All @@ -101,8 +99,8 @@ def test_code_repl(mock_completion):
{"role": "assistant", "content": "print('hello')\nprint('world')"},
]
expected_args = comp_args(role, "", messages=expected_messages)
mock_completion.assert_called_with(**expected_args)
assert mock_completion.call_count == 2
completion.assert_called_with(**expected_args)
assert completion.call_count == 2

assert result.exit_code == 0
assert ">>> print hello" in result.stdout
Expand All @@ -112,20 +110,20 @@ def test_code_repl(mock_completion):


@patch("openai.resources.chat.Completions.create")
def test_code_and_shell(mock):
def test_code_and_shell(completion):
args = {"--code": True, "--shell": True}
result = runner.invoke(app, make_args(**args))
result = runner.invoke(app, cmd_args(**args))

mock.assert_not_called()
completion.assert_not_called()
assert result.exit_code == 2
assert "Error" in result.stdout


@patch("openai.resources.chat.Completions.create")
def test_code_and_describe_shell(mock):
def test_code_and_describe_shell(completion):
args = {"--code": True, "--describe-shell": True}
result = runner.invoke(app, make_args(**args))
result = runner.invoke(app, cmd_args(**args))

mock.assert_not_called()
completion.assert_not_called()
assert result.exit_code == 2
assert "Error" in result.stdout
Loading

0 comments on commit 8327bb5

Please sign in to comment.