From a43e360874bc09f0e0f3b96c633f41d9613c3e6f Mon Sep 17 00:00:00 2001 From: Corey McCandless Date: Mon, 8 Feb 2021 13:52:35 -0500 Subject: [PATCH] merge test_exercises.py and test_exercises_docker.py --- bin/data.py | 31 ++++ bin/test_exercises.py | 186 ++++++++++++++++---- bin/test_runner_exercises.py | 77 -------- docker-compose.yml | 2 +- exercises/practice/paasio/.meta/config.json | 4 +- 5 files changed, 190 insertions(+), 110 deletions(-) delete mode 100755 bin/test_runner_exercises.py diff --git a/bin/data.py b/bin/data.py index 9de402aedd..44e34d33b3 100644 --- a/bin/data.py +++ b/bin/data.py @@ -35,6 +35,34 @@ class ExerciseStatus(str, Enum): Deprecated = 'deprecated' +@dataclass +class ExerciseFiles: + solution: List[str] + test: List[str] + exemplar: List[str] + + +@dataclass +class ExerciseConfig: + files: ExerciseFiles + authors: List[str] = None + forked_from: str = None + contributors: List[str] = None + language_versions: List[str] = None + + def __post_init__(self): + if isinstance(self.files, dict): + self.files = ExerciseFiles(**self.files) + for attr in ['authors', 'contributors', 'language_versions']: + if getattr(self, attr) is None: + setattr(self, attr, []) + + @classmethod + def load(cls, config_file: Path) -> 'ExerciseConfig': + with config_file.open() as f: + return cls(**json.load(f)) + + @dataclass class ExerciseInfo: path: Path @@ -93,6 +121,9 @@ def template_path(self): def config_file(self): return self.meta_dir / 'config.json' + def load_config(self) -> ExerciseConfig: + return ExerciseConfig.load(self.config_file) + @dataclass class Exercises: diff --git a/bin/test_exercises.py b/bin/test_exercises.py index 3d98294e96..afa14db038 100755 --- a/bin/test_exercises.py +++ b/bin/test_exercises.py @@ -1,49 +1,175 @@ #!/usr/bin/env python3 - +"""Meant to be run from inside python-test-runner container, +where this track repo is mounted at /python +""" +import argparse +from functools import wraps +from itertools import zip_longest +import json +from pathlib import Path import shutil import subprocess import sys import tempfile -from pathlib import Path - -from data import Config, ExerciseInfo +from data import Config, ExerciseConfig, ExerciseInfo # Allow high-performance tests to be skipped ALLOW_SKIP = ['alphametics', 'largest-series-product'] +TEST_RUNNER_DIR = Path('/opt/test-runner') + +RUNNERS = {} + + +def runner(name): + def _decorator(runner_func): + RUNNERS[name] = runner_func + @wraps(runner_func) + def _wrapper(exercise: ExerciseInfo, workdir: Path, quiet: bool = False): + return runner_func(exercise, workdir, quiet=quiet) + return _wrapper + return _decorator + -def check_assignment(exercise: ExerciseInfo, quiet=False) -> int: - # Returns the exit code of the tests - workdir = Path(tempfile.mkdtemp(exercise.slug)) - solution_file = exercise.solution_stub.name - try: - test_file_out = workdir / exercise.test_file.name - if exercise.slug in ALLOW_SKIP: - shutil.copyfile(exercise.test_file, test_file_out) +def copy_file(src: Path, dst: Path, strip_skips=False): + if strip_skips: + with src.open('r') as src_file: + lines = [line for line in src_file.readlines() + if not line.strip().startswith('@unittest.skip')] + with dst.open('w') as dst_file: + dst_file.writelines(lines) + else: + shutil.copy2(src, dst) + +def copy_solution_files(exercise: ExerciseInfo, workdir: Path, exercise_config: ExerciseConfig = None): + if exercise_config is not None: + solution_files = exercise_config.files.solution + exemplar_files = exercise_config.files.exemplar + else: + solution_files = [] + exemplar_files = [] + if not solution_files: + solution_files.append(exercise.solution_stub.name) + solution_files = [exercise.path / s for s in solution_files] + if not exemplar_files: + exemplar_files.append(exercise.exemplar_file.relative_to(exercise.path)) + exemplar_files = [exercise.path / e for e in exemplar_files] + for solution_file, exemplar_file in zip_longest(solution_files, exemplar_files): + if solution_file is None: + copy_file(exemplar_file, workdir / exemplar_file.name) + elif exemplar_file is None: + copy_file(solution_file, workdir / solution_file.name) else: - with exercise.test_file.open('r') as src_file: - lines = [line for line in src_file.readlines() - if not line.strip().startswith('@unittest.skip')] - with test_file_out.open('w') as dst_file: - dst_file.writelines(lines) - shutil.copyfile(exercise.exemplar_file, workdir / solution_file) - kwargs = {} - if quiet: - kwargs['stdout'] = subprocess.DEVNULL - kwargs['stderr'] = subprocess.DEVNULL - return subprocess.run([sys.executable, '-m', 'pytest', test_file_out], **kwargs).returncode - finally: - shutil.rmtree(workdir) + dst = workdir / solution_file.relative_to(exercise.path) + copy_file(exemplar_file, dst) + + +def copy_test_files(exercise: ExerciseInfo, workdir: Path, exercise_config = None): + if exercise_config is not None: + test_files = exercise_config.files.test + else: + test_files = [] + if not test_files: + test_files.append(exercise.test_file.name) + for test_file_name in test_files: + test_file = exercise.path / test_file_name + test_file_out = workdir / test_file_name + copy_file(test_file, test_file_out, strip_skips=(exercise.slug not in ALLOW_SKIP)) + + +def copy_exercise_files(exercise: ExerciseInfo, workdir: Path): + exercise_config = None + if exercise.config_file.is_file(): + workdir_meta = workdir / '.meta' + workdir_meta.mkdir(exist_ok=True) + copy_file(exercise.config_file, workdir_meta / exercise.config_file.name) + exercise_config = exercise.load_config() + copy_solution_files(exercise, workdir, exercise_config) + copy_test_files(exercise, workdir, exercise_config) + + +@runner('pytest') +def run_with_pytest(_exercise, workdir, quiet: bool = False) -> int: + kwargs = {'cwd': str(workdir)} + if quiet: + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL + return subprocess.run([sys.executable, '-m', 'pytest'], **kwargs).returncode + + +@runner('test-runner') +def run_with_test_runner(exercise, workdir, quiet: bool = False) -> int: + kwargs = {} + if quiet: + kwargs['stdout'] = subprocess.DEVNULL + kwargs['stderr'] = subprocess.DEVNULL + if TEST_RUNNER_DIR.is_dir(): + kwargs['cwd'] = str(TEST_RUNNER_DIR) + args = ['./bin/run.sh', exercise.slug, workdir, workdir] + else: + args = [ + 'docker-compose', + 'run', + '-w', str(TEST_RUNNER_DIR), + '--entrypoint', './bin/run.sh', + '-v', f'{workdir}:/{exercise.slug}', + 'test-runner', + exercise.slug, + f'/{exercise.slug}', + f'/{exercise.slug}', + ] + subprocess.run(args, **kwargs) + results_file = workdir / 'results.json' + if results_file.is_file(): + with results_file.open() as f: + results = json.load(f) + if results['status'] == 'pass': + return 0 + return 1 + + +def check_assignment(exercise: ExerciseInfo, runner: str = 'pytest', quiet: bool = False) -> int: + ret = 1 + with tempfile.TemporaryDirectory(exercise.slug) as workdir: + workdir = Path(workdir) + copy_exercise_files(exercise, workdir) + ret = RUNNERS[runner](exercise, workdir, quiet=quiet) + return ret + + +def get_cli() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser() + runners = list(RUNNERS.keys()) + if not runners: + print('No runners registered!') + raise SystemExit(1) + parser.add_argument('-q', '--quiet', action='store_true') + parser.add_argument('-r', '--runner', choices=runners, default=runners[0]) + parser.add_argument('exercises', nargs='*') + return parser def main(): + opts = get_cli().parse_args() config = Config.load() exercises = config.exercises.all() - if len(sys.argv) >= 2: + if opts.exercises: # test specific exercises exercises = [ - e for e in exercises if e.slug in sys.argv[1:] + e for e in exercises if e.slug in opts.exercises ] + not_found = [ + slug for slug in opts.exercises + if not any(e.slug == slug for e in exercises) + ] + if not_found: + for slug in not_found: + if slug not in exercises: + print(f"unknown exercise '{slug}'") + raise SystemExit(1) + + print(f'TestEnvironment: {sys.executable.capitalize()}') + print(f'Runner: {opts.runner}\n\n') failures = [] for exercise in exercises: @@ -52,12 +178,10 @@ def main(): print('FAIL: File with test cases not found') failures.append('{} (FileNotFound)'.format(exercise.slug)) else: - if check_assignment(exercise): + if check_assignment(exercise, runner=opts.runner, quiet=opts.quiet): failures.append('{} (TestFailed)'.format(exercise.slug)) print('') - print('TestEnvironment:', sys.executable.capitalize(), '\n\n') - if failures: print('FAILURES: ', ', '.join(failures)) raise SystemExit(1) @@ -65,5 +189,5 @@ def main(): print('SUCCESS!') -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/bin/test_runner_exercises.py b/bin/test_runner_exercises.py deleted file mode 100755 index 252552a305..0000000000 --- a/bin/test_runner_exercises.py +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env python3 -"""Meant to be run from inside python-test-runner container, -where this track repo is mounted at /python -""" -import json -from pathlib import Path -import shutil -import subprocess -import sys -import tempfile -from data import Config, ExerciseInfo - -# Allow high-performance tests to be skipped -ALLOW_SKIP = ['alphametics', 'largest-series-product'] - - -def check_assignment(exercise: ExerciseInfo) -> int: - # Returns the exit code of the tests - workdir = Path(tempfile.mkdtemp(exercise.slug)) - solution_file = exercise.solution_stub.name - try: - test_file_out = workdir / exercise.test_file.name - if exercise.slug in ALLOW_SKIP: - shutil.copy2(exercise.test_file, test_file_out) - else: - with exercise.test_file.open('r') as src_file: - lines = [line for line in src_file.readlines() - if not line.strip().startswith('@unittest.skip')] - with test_file_out.open('w') as dst_file: - dst_file.writelines(lines) - shutil.copyfile(exercise.exemplar_file, workdir / solution_file) - if exercise.config_file.is_file(): - tmp_meta = workdir / '.meta' - tmp_meta.mkdir(exist_ok=True) - shutil.copy2(exercise.config_file, tmp_meta / exercise.config_file.name) - args = ['./bin/run.sh', exercise.slug, workdir, workdir] - subprocess.run(args, cwd='/opt/test-runner') - results_file = workdir / 'results.json' - if results_file.is_file(): - with results_file.open() as f: - results = json.load(f) - if results['status'] == 'pass': - return 0 - return 1 - finally: - shutil.rmtree(workdir) - - -def main(): - config = Config.load() - exercises = config.exercises.all() - if len(sys.argv) >= 2: - # test specific exercises - exercises = [ - e for e in exercises if e.slug in sys.argv[1:] - ] - - failures = [] - for exercise in exercises: - print('# ', exercise.slug) - if not exercise.test_file: - print('FAIL: File with test cases not found') - failures.append('{} (FileNotFound)'.format(exercise.slug)) - else: - if check_assignment(exercise): - failures.append('{} (TestFailed)'.format(exercise.slug)) - print('') - - if failures: - print('FAILURES: ', ', '.join(failures)) - raise SystemExit(1) - else: - print('SUCCESS!') - - -if __name__ == "__main__": - main() diff --git a/docker-compose.yml b/docker-compose.yml index 4d0872997c..f21405f7e4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,6 +4,6 @@ services: test-runner: image: exercism/python-test-runner working_dir: /python - entrypoint: ./bin/test_runner_exercises.py + entrypoint: ./bin/test_exercises.py --runner test-runner volumes: - .:/python diff --git a/exercises/practice/paasio/.meta/config.json b/exercises/practice/paasio/.meta/config.json index c4820a8be8..615597a9a0 100644 --- a/exercises/practice/paasio/.meta/config.json +++ b/exercises/practice/paasio/.meta/config.json @@ -1,5 +1,7 @@ { "files": { - "test": ["paasio_test.py", "test_utils.py"] + "solution": ["paasio.py"], + "test": ["paasio_test.py", "test_utils.py"], + "exemplar": [".meta/example.py"] } }