-
Notifications
You must be signed in to change notification settings - Fork 22
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- added experimental pytest plugin to isolate test collection,
preventing any memory state pollution from a test from affecting other tests;
- Loading branch information
1 parent
b981171
commit e3f79ec
Showing
2 changed files
with
189 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
import os | ||
import pytest | ||
import sys | ||
from pathlib import Path | ||
|
||
|
||
class IsolatePlugin: | ||
"""Pytest plugin to isolate test collection, so that if a test's collection pollutes the in-memory | ||
state, it doesn't affect the execution of other tests.""" | ||
|
||
def __init__(self): | ||
self._is_child = False | ||
self._test_failed = False | ||
|
||
def pytest_ignore_collect(self, path, config): | ||
if self._is_child: | ||
return True | ||
|
||
if (pid := os.fork()): | ||
pid, status = os.waitpid(pid, 0) | ||
if status: | ||
if os.WIFSIGNALED(status): | ||
exitstatus = os.WTERMSIG(status) + 128 | ||
else: | ||
exitstatus = os.WEXITSTATUS(status) | ||
else: | ||
exitstatus = 0 | ||
|
||
if exitstatus not in (pytest.ExitCode.OK, pytest.ExitCode.NO_TESTS_COLLECTED): | ||
self._test_failed = True | ||
|
||
return True | ||
else: | ||
self._is_child = True | ||
return False | ||
|
||
def pytest_collectreport(self, report): | ||
if self._is_child and report.failed and report.nodeid.endswith('.py'): | ||
self._test_failed = True | ||
|
||
def pytest_runtest_logreport(self, report): | ||
if self._is_child and report.failed: | ||
self._test_failed = True | ||
|
||
def pytest_unconfigure(self, config): | ||
if self._is_child: | ||
os._exit(self.get_exit_code()) | ||
|
||
def get_exit_code(self): | ||
# FIXME this error handling is very simplistic, extend to other cases | ||
return pytest.ExitCode.TESTS_FAILED if self._test_failed else pytest.ExitCode.OK | ||
|
||
|
||
def preload_pytest_plugins(): | ||
"""Preloads pytest plugins, in an attempt to speed things up.""" | ||
import pkg_resources | ||
import importlib | ||
import warnings | ||
|
||
for ep in pkg_resources.iter_entry_points(group='pytest11'): | ||
try: | ||
importlib.import_module(ep.module_name) | ||
except ImportError as e: | ||
warnings.warn(e) | ||
|
||
|
||
if __name__ == "__main__": | ||
preload_pytest_plugins() | ||
|
||
plugin = IsolatePlugin() | ||
exitcode = pytest.main(sys.argv[1:] + ['--forked'], plugins=[plugin]) | ||
if exitcode in (pytest.ExitCode.OK, pytest.ExitCode.NO_TESTS_COLLECTED): | ||
exitcode = plugin.get_exit_code() | ||
|
||
sys.exit(exitcode) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,114 @@ | ||
import pytest | ||
import sys | ||
import subprocess | ||
from pathlib import Path | ||
import json | ||
|
||
|
||
@pytest.mark.skipif(sys.platform == 'win32', reason='Unix-only') | ||
def test_isolate_all_ok(tmp_path): | ||
out = tmp_path / "out.json" | ||
test_file = str(Path('tests') / 'pyt.py') | ||
|
||
subprocess.run([sys.executable, '-m', 'slipcover', '--json', '--out', str(out), | ||
'-m', 'slipcover.isolate', test_file]) | ||
|
||
with out.open() as f: | ||
cov = json.load(f) | ||
|
||
assert test_file in cov['files'] | ||
assert {test_file} == set(cov['files'].keys()) | ||
cov = cov['files'][test_file] | ||
assert [1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 13, 14] == cov['executed_lines'] | ||
assert [] == cov['missing_lines'] | ||
|
||
|
||
@pytest.mark.skipif(sys.platform == 'win32', reason='Unix-only') | ||
def test_isolate_nontest_issue(tmp_path): | ||
out = tmp_path / "out.json" | ||
test_file = str(Path('tests') / 'pyt.py') | ||
|
||
p = subprocess.run([sys.executable, '-m', 'slipcover', '--json', '--out', str(out), | ||
'-m', 'slipcover.isolate', '--my-invalid-flag', test_file], | ||
check=False) | ||
assert p.returncode == pytest.ExitCode.USAGE_ERROR | ||
|
||
|
||
def seq2p(tests_dir, seq): | ||
return tests_dir / f"test_{seq}.py" | ||
|
||
|
||
N_TESTS=10 | ||
def make_polluted_suite(tests_dir: Path, pollute_fails_collect: bool): | ||
"""In a suite with 10 tests, test 6 fails; test 3 doesn't fail, but causes 6 to fail.""" | ||
|
||
for seq in range(N_TESTS): | ||
seq2p(tests_dir, seq).write_text('def test_foo(): pass') | ||
|
||
polluter = seq2p(tests_dir, 3) | ||
polluter.write_text("import sys\n" + "sys.foobar = True\n" + "def test_foo(): pass") | ||
|
||
failing = seq2p(tests_dir, 6) | ||
if pollute_fails_collect: | ||
failing.write_text("import sys\n" + "assert not getattr(sys, 'foobar', False)\n" + "def test_foo(): pass") | ||
else: | ||
failing.write_text("import sys\n" + "def test_foo(): assert not getattr(sys, 'foobar', False)") | ||
|
||
return failing, polluter | ||
|
||
|
||
def make_failing_suite(tests_dir: Path): | ||
"""In a suite with 10 tests, test 6 fails; test 3 doesn't fail, but causes 6 to fail.""" | ||
|
||
for seq in range(N_TESTS): | ||
seq2p(tests_dir, seq).write_text('def test_foo(): pass') | ||
|
||
failing = seq2p(tests_dir, 6) | ||
failing.write_text("def test_bar(): assert False") | ||
|
||
|
||
@pytest.mark.parametrize("pollute_fails_collect", [True, False]) | ||
def test_check_suite_fails(tmp_path, monkeypatch, pollute_fails_collect): | ||
out = tmp_path / "out.json" | ||
|
||
monkeypatch.chdir(tmp_path) | ||
tests_dir = Path('tests') | ||
tests_dir.mkdir() | ||
make_polluted_suite(tests_dir, pollute_fails_collect) | ||
|
||
p = subprocess.run([sys.executable, '-m', 'slipcover', '--json', '--out', str(out), | ||
'-m', 'pytest', tests_dir], check=False) | ||
assert p.returncode == pytest.ExitCode.INTERRUPTED if pollute_fails_collect else pytest.ExitCode.TESTS_FAILED | ||
|
||
|
||
@pytest.mark.skipif(sys.platform == 'win32', reason='Unix-only') | ||
@pytest.mark.parametrize("pollute_fails_collect", [True, False]) | ||
def test_isolate_polluted(tmp_path, monkeypatch, pollute_fails_collect): | ||
out = tmp_path / "out.json" | ||
|
||
monkeypatch.chdir(tmp_path) | ||
tests_dir = Path('tests') | ||
tests_dir.mkdir() | ||
make_polluted_suite(tests_dir, pollute_fails_collect) | ||
|
||
p = subprocess.run([sys.executable, '-m', 'slipcover', '--json', '--out', str(out), | ||
'-m', 'slipcover.isolate', tests_dir], check=False) | ||
assert p.returncode == pytest.ExitCode.OK | ||
|
||
|
||
@pytest.mark.skipif(sys.platform == 'win32', reason='Unix-only') | ||
@pytest.mark.parametrize("pollute_fails_collect", [True, False]) | ||
def test_isolate_failing(tmp_path, monkeypatch, pollute_fails_collect): | ||
out = tmp_path / "out.json" | ||
|
||
monkeypatch.chdir(tmp_path) | ||
tests_dir = Path('tests') | ||
tests_dir.mkdir() | ||
make_polluted_suite(tests_dir, pollute_fails_collect) | ||
|
||
failing = seq2p(tests_dir, 2) | ||
failing.write_text("def test_bar(): assert False") | ||
|
||
p = subprocess.run([sys.executable, '-m', 'slipcover', '--json', '--out', str(out), | ||
'-m', 'slipcover.isolate', tests_dir], check=False) | ||
assert p.returncode == pytest.ExitCode.TESTS_FAILED |