diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..0a53e44 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,89 @@ +# Git +.git +.gitignore +.gitattributes + + +# CI +.codeclimate.yml +.travis.yml +.taskcluster.yml + +# Docker +docker-compose.yml +Dockerfile +.docker +.dockerignore + +# Byte-compiled / optimized / DLL files +**/__pycache__/ +**/*.py[cod] + +# C extensions +*.so + +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.cache +nosetests.xml +coverage.xml + +# Translations +*.mo +*.pot + +# Django stuff: +*.log + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Virtual environment +.env +.venv/ +venv/ + +# PyCharm +.idea + +# Python mode for VIM +.ropeproject +**/.ropeproject + +# Vim swap files +**/*.swp + +# VS Code +.vscode/ diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 80389d9..b946ea7 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -32,6 +32,7 @@ jobs: name: pypi url: https://pypi.org/p/manytask-checker permissions: + deployments: write id-token: write # IMPORTANT: this permission is mandatory for trusted publishing steps: - uses: actions/checkout@v4 @@ -49,27 +50,35 @@ jobs: - name: Publish to PyPI uses: pypa/gh-action-pypi-publish@release/v1 -# release-github-pages: -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@v4 -# - name: Setup Python 3.9 -# uses: actions/setup-python@v3 -# with: -# python-version: 3.9 -# - uses: actions/cache@v3 -# with: -# path: ${{ env.pythonLocation }} -# key: ${{ runner.os }}-python-3.9-${{ env.pythonLocation }}-${{ hashFiles('pyproject.toml', 'setup.cfg') }}-docs -# restore-keys: | -# ${{ runner.os }}-python-3.9- -# ${{ runner.os }}-python- -# ${{ runner.os }}- -# - name: Install dependencies -# run: python -m pip install -e .[docs] -# - name: Build and publish docs -# run: | -# git fetch --all -# # lazydocs -# python -m mkdocs build --config-file docs/mkdocs.yml -# python -m mkdocs gh-deploy --config-file docs/mkdocs.yml --force + release-github-pages: + permissions: + contents: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.12 + cache: 'pip' # caching pip dependencies + - name: Install dependencies + run: | + python -m pip install -e .[docs] + - name: Docs deploy + run: | + echo -n "${{github.ref_name}}" > VERSION + make docs-deploy + + release-docker: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.9', '3.10', '3.11', '3.12'] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: Build docker image + run: | + docker build --build-arg PYTHON_VERSION=${{ matrix.python-version }} -t manytask-checker:${{ github.sha }} . + - name: Test run --help in docker image + run: | + docker run --rm manytask-checker:${{ github.sha }} --help diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7b476c7..817d6c7 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,17 +1,23 @@ name: Lint and Test on: -# push: - pull_request: + push: branches: [ main ] + pull_request: + branches: [ main, new-1.x.x ] + + +concurrency: + group: ${{ github.workflow }}-${{ github.ref_name || github.ref }} + cancel-in-progress: true jobs: - lint-python: + package-lint: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12'] fail-fast: false steps: - uses: actions/checkout@v4 @@ -22,20 +28,15 @@ jobs: cache: 'pip' # caching pip dependencies - name: Install dependencies run: | - python -m pip install wheel python -m pip install -e .[test] - - name: Run isort - run: python -m isort checker --check - - name: Run ruff - run: python -m ruff checker - - name: Run mypy - run: python -m mypy checker + - name: Run linters + run: make lint - test-python: + package-test: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12'] fail-fast: false steps: - uses: actions/checkout@v4 @@ -46,12 +47,9 @@ jobs: cache: 'pip' # caching pip dependencies - name: Install dependencies run: | - python -m pip install wheel python -m pip install -e .[test] - sudo apt-get update - sudo apt-get install -y iputils-ping cmake ninja-build clang-tidy clang-format - - name: Run tests - run: python -m pytest --python --cpp --cov-report=xml tests/ + - name: Run all tests + run: make test - name: Upload coverage uses: codecov/codecov-action@v3 with: @@ -61,11 +59,11 @@ jobs: fail_ci_if_error: true # optional (default = false) verbose: true # optional (default = false) - build-python-package: + package-build: runs-on: ubuntu-latest strategy: matrix: - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12'] fail-fast: false steps: - uses: actions/checkout@v4 @@ -79,4 +77,85 @@ jobs: python -m pip install wheel - name: Build wheel run: | + # TODO: replace with make build python -m pip wheel . --no-deps --wheel-dir dist + + docker-build: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: ['3.9', '3.10', '3.11', '3.12'] + fail-fast: false + steps: + - uses: actions/checkout@v4 + - name: Build docker image + run: | + docker build --build-arg PYTHON_VERSION=${{ matrix.python-version }} -t manytask-checker:${{ github.sha }} . + - name: Test run --help in docker image + run: | + docker run --rm manytask-checker:${{ github.sha }} --help + + docs-test: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.12 + cache: 'pip' # caching pip dependencies + - name: Install dependencies + run: | + python -m pip install -e .[docs] + - name: Build docs + run: | + make docs-build + + # publish dev docs on push to main + docs-deploy-dev: + permissions: + contents: write + deployments: write + runs-on: ubuntu-latest + if: github.ref == 'refs/heads/main' && github.event_name == 'push' + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.12 + cache: 'pip' # caching pip dependencies + - name: Install dependencies + run: | + python -m pip install -e .[docs] + - name: Docs deploy + run: | + make docs-deploy-dev + + docs-preview: + permissions: + deployments: write + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + with: + python-version: 3.12 + cache: 'pip' # caching pip dependencies + - name: Install dependencies + run: | + python -m pip install -e .[docs] + - name: Docs preview + run: | + make docs-build + - name: Publish to Cloudflare Pages + id: deploy + uses: cloudflare/pages-action@v1 + with: + apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} + accountId: ${{ secrets.CLOUDFLARE_ACCOUNT_ID }} + projectName: manytask-checker + directory: ./site + # Optional: Enable this if you want to have GitHub Deployments triggered + gitHubToken: ${{ secrets.GITHUB_TOKEN }} + # Optional: Switch what branch you are publishing to. + # By default this will be the branch which triggered this workflow + branch: ${{ ( github.event.workflow_run.head_repository.full_name == github.repository && github.event.workflow_run.head_branch == 'main' && 'main' ) || ( github.event.workflow_run.head_sha ) }} diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..0d74ef7 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,27 @@ +ARG PYTHON_VERSION=3.12 + +# Stage 1: Build stage +FROM python:${PYTHON_VERSION}-alpine as builder + +WORKDIR /usr/src/app + +COPY pyproject.toml VERSION Makefile setup.py README.md ./ +COPY checker ./checker + +RUN python -m venv /opt/checker-venv +RUN /opt/checker-venv/bin/python -m pip install --no-cache-dir --require-virtualenv . +RUN find /opt/checker-venv -type d -name '__pycache__' -exec rm -r {} + && \ + find /opt/checker-venv -type d -name 'tests' -exec rm -rf {} + && \ + find /opt/checker-venv -name '*.pyc' -delete && \ + find /opt/checker-venv -name '*.pyo' -delete + + +# Stage 2: Runtime stage +FROM python:${PYTHON_VERSION}-alpine + +WORKDIR /usr/src/app + +COPY --from=builder /opt/checker-venv /opt/checker-venv + +ENTRYPOINT [ "/opt/checker-venv/bin/python", "-m", "checker" ] +CMD [ "--help" ] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..884d77e --- /dev/null +++ b/Makefile @@ -0,0 +1,81 @@ +#.RECIPEPREFIX = >> +# Default task to run when no task is specified +all: help + +# Help task to display callable targets +help: + @echo "Makefile commands:" + @echo "test-unit - Run unit tests with pytest" + @echo "test-integration - Run integration tests with pytest" + @echo "test-docstests - Run doctests with pytest" + @echo "test - Run all tests with pytest" + @echo "lint - Lint and typecheck the code" + @echo "format - Format the code with black" + @echo "docs-build - Build the documentation" + @echo "docs-serve - Serve the documentation in development mode" + @echo "help - Display this help" + +# Run unit tests only +.PHONY: test-unit +test-unit: + @echo "[make] Running unit tests..." + pytest --skip-integration --skip-doctest + +# Run integration tests only +.PHONY: test-integration +test-integration: + @echo "[make] Running integration tests..." + pytest --skip-unit --skip-doctest + +# Run doctests only +.PHONY: test-docstests +test-docstests: + @echo "[make] Running doctests..." + pytest --skip-unit --skip-integration + +# Run all tests +.PHONY: test +test: + @echo "[make] Running unit and integration tests..." + pytest + +# Lint and typecheck the code +.PHONY: lint +lint: + @echo "[make] Linting and typechecking the code..." + ruff check checker tests + mypy checker + black --check checker tests + isort --check-only checker tests + +# Format the code with black and isort +.PHONY: format +format: + @echo "[make] Formatting the code..." + black checker tests + isort checker tests + +# Deploy the documentation +.PHONY: docs-deploy +docs-deploy: + @echo "[make] Deploying the documentation..." + python -m mike deploy -b gh-pages `cat VERSION` --push --message "docs(auto): deploy docs for `cat VERSION`" + python -m mike set-default `cat VERSION` + +# Deploy dev version of the documentation +.PHONY: docs-deploy-dev +docs-deploy-dev: + @echo "[make] Deploying the documentation (dev)..." + python -m mike deploy -b gh-pages dev --push --message "docs(auto): deploy docs for dev" + +# Build the documentation +.PHONY: docs-build +docs-build: + @echo "[make] Building the documentation..." + python -m mkdocs build + +# Serve the documentation in development mode +.PHONY: docs-serve +docs-serve: + @echo "[make] Serve the documentation..." + python -m mkdocs serve diff --git a/README.md b/README.md index 61847c3..9248ae6 100644 --- a/README.md +++ b/README.md @@ -1,166 +1,101 @@ # Manytask Checker -[![Test](https://github.com/yandexdataschool/checker/actions/workflows/test.yml/badge.svg)](https://github.com/yandexdataschool/checker/actions/workflows/test.yml) -[![Publish](https://github.com/yandexdataschool/checker/actions/workflows/publish.yml/badge.svg)](https://github.com/yandexdataschool/checker/actions/workflows/publish.yml) -[![codecov](https://codecov.io/gh/yandexdataschool/checker/branch/main/graph/badge.svg?token=3F9J850FX2)](https://codecov.io/gh/yandexdataschool/checker) -[![github](https://img.shields.io/github/v/release/yandexdataschool/checker?logo=github&display_name=tag&sort=semver)](https://github.com/yandexdataschool/checker/releases) -[![docker](https://img.shields.io/pypi/v/manytask-checker.svg)](https://pypi.org/project/manytask-checker/) +[![Test-workflow](https://github.com/manytask/checker/actions/workflows/test.yml/badge.svg)](https://github.com/manytask/checker/actions/workflows/test.yml) +[![Publish-workflow](https://github.com/manytask/checker/actions/workflows/publish.yml/badge.svg)](https://github.com/manytask/checker/actions/workflows/publish.yml) +[![codecov](https://codecov.io/gh/manytask/checker/branch/main/graph/badge.svg?token=3F9J850FX2)](https://codecov.io/gh/manytask/checker) +[![github-version](https://img.shields.io/github/v/release/manytask/checker?logo=github&display_name=tag&sort=semver)](https://github.com/manytask/checker/releases) +[![pypi](https://img.shields.io/pypi/v/manytask-checker.svg)](https://pypi.org/project/manytask-checker/) -Script to test students' solutions with [manytask](https://github.com/yandexdataschool/manytask) integration +Checker is a Python cli script to test students' solutions with built-in [manytask](https://github.com/yandexdataschool/manytask) integration. -Key features: +* **production-like** - setup for students with gitlab-ci, language-specific testing etc +* **customizable** - testing pipeline configuration via yaml files +* **extensible** - build-in plugins and ability to write custom plugins + * [manytask](https://github.com/manytask/manytask) integration with plugin + * gitlab merge-requests checks with plugin + * etc +* **secure** - sandbox execution of students' code +* **fast** - lightweight script with testing parallelization -* git changes detection -* extension for different languages -* sandbox execution -* [manytask](https://github.com/yandexdataschool/manytask) integration - - -Please refer to the [manytask](https://github.com/yandexdataschool/manytask) documentation first to understand the drill +`Requires Python 3.9+` --- ## How it works -The `checker` lib is a relatively small cli script aiming to run tests in gitlab runner and push results to `manytask`. - - -The full `checker` and `manytask` setup roughly looks as follows - -* self-hosted `gitlab` instance - storing repos with assignments and students' repo - * private repo - a repository with tasks, public and private tests, gold solutions, ect. - * public repo - a repository available to students with tasks and solution templates - * students' group - the group where `manytask` will create repositories for students - each students' repo - fork from public repo -* `gitlab runners` - place where students' solutions likely to be tested -* `checker` script - some script to test students' solutions and push scores/grades to the `manytask` -* `manytask` instance - web application managing students' grades (in google sheet) and deadlines (web page) - - -The flow for checking students' solution looks like: - -1. Student push his solution to a gitlab repo -2. gitlab-ci runs separate docker in gitlab-runner -3. gitlab-ci runs this script with some parameters -4. the script detect the latest changes (via git) and select tasks to check -5. the tasks forwarded to `tester` and it returns obtained scores -6. the script push student scores to the manytask - -(additionally script can check ground-truth solutions, export new tasks etc) - - -## Usage - -### Pre requirements - -1. [manytask](https://github.com/yandexdataschool/manytask) web app - Currently, this lib is integrated with manytask **only**, - so you need it to be set up first, see installation instructions in manytask repo. -2. gitlab with access to greate groups, users and add runners - This pre-requirement for manytask; See manytask installation instructions for more info -3. Created and tested [tester](./checker/testers) for your course/language - -### Preparations - -Obtain service keys for this script to operate -1. manytask tester token you set up when run it -2. gitlab service user to operate with your repositories - (better to create a new one) - -Create gitlab repositories layout +The `checker` lib is a part of the `manytask` ecosystem and extends its functionality. +Please refer to the [manytask](https://github.com/manytask/manytask) documentation first to understand the drill. -1. Create private repository with tasks, public and private tests and ground-truth solution; - Choose one of the suitable layouts (see [driver.py](./checker/course/driver.py)) - Grant access to your service account -2. Create public empty repository - Grant access to your service account -3. Create private (!) group for students repositories - (You have already done it if you set up manytask) - Grant access to your service account +> tl;dr: Manytask is a web application to manage students', repos, grades and deadlines. +> It stores grades in google sheet and deadlines on a web page. +> It also automatically creates gitlab repositories for students as forks from Public Repo with tasks and solution templates. -Edit config files in repository +So you have `Public Repo` and `Students' Repositories` and `Web App` to collect grades and deadlines. -1. `.course.yml` - main endpoints config - (see [.course.yml example](./examples/.course.yml)) -2. `.deadlines.yml` - task deadlines - (see [.deadlines.yml example](./examples/.deadlines.yml)) -3. `.gitlab-ci.yml` - set up gitlab ci pipeline to test students tasks -4. `.releaser-ci.yml` - set up gitlab ci pipeline to test new added tasks and build dockers - -Setup dockers with env ready for testing, it's convenient to have 2 dockers: - -1. `base.docker` - base docker to build and test students solutions, install lib here -2. `testenv.docker` - docker on top of base docker, to save tasks and tests - - -## Structure - -### Course - -* **CourseConfig** - Manage course configuration. Wrapper around `.course.yml` file. - - -* **CourseSchedule** - Manage course deadlines. Wrapper around `.deadlines.yml` file. +--- +For the checker this setup extends with `Private Repo` with tasks, tests and solutions and `gitlab-ci` to run tests in. -* **CourseDriver** - Manage mapping of the Course to the Filesystem. (e.g. map Task to folders with tests and source files) - Available layouts are (see [driver.py](./checker/course/driver.py)): - * `flat` - all tasks in root folder of the repo - * `groups` - each group has its own folder +The `checker` in a nutshell is a CLI script providing the following functionality: +* **grade** - to run in a student's repository to test solution against private and public tests and push scores. +* **validate** - to run in a private (tutors') repository to validate tasks and deadlines integrity (will run in check automatically). +* **check** - to run in a private (tutors') repository to test gold solutions against private and public tests. +* **export** - to run in a private (tutors') repository to export tasks, templates and tests to the public repository. -### Testing +``` mermaid +flowchart LR + private(Private Repo) -->|checker check| private + private -->|checker export| public + student([Student's Repo]) -->|checker grade| manytask + subgraph gitlab + public(Public Repo) -.->|fork| student + public -->|updates| student + end +``` -* **Executor** is object to run commands with some isolation level. - Available modes are: +The flow for tutors looks like: - * `sandbox` - separate process (clean env variables, nouser/nogroup, disabled network) - * `docker` - TODO - +1. Have a manytask ready with empty public repo +2. Create private repo with tasks, tests and solutions +3. Configure checker with yaml files +4. Make docker with your environment and checker installed +5. Write ci file from students to run `checker grade` on each push/mr +6. Setup private repo ci to run `checker check` on each push/mr +7. Setup private repo ci to run `checker export` on each push/mr oor release or regularly or manually +8. Profit! -* **Tester** is object which can test single task: copy files, build, test it, cleanup. - Tester is extendable for each course/language. Now available: +The flow for students looks like: - * `python` +1. Register in manytask and get access to the public repo fork +2. Clone this repo and start working on tasks +3. Update from public repo regularly to get new tasks and tests +4. Push your solution to gitlab where `checker grade` will run and push scores +5. Profit! -## Developing +It is a short description, please refer to the [checker docs](./docs) for more details. -### Installation -Create venv -```shell -python -m venv .venv -source .venv/bin/activate -``` +## Installation -Install lib in dev mode +The `checker` is available on pypi, so you can install it with pip ```shell -(.venv)$ pip install -U --editable .[test] # .\[test\] in zsh +pip install manytask-checker ``` -### Running tests and linters - +Or use pre-built docker image (you can base your image on it) ```shell -pytest . --cpp --python +FROM manytask/checker:0.0.1-python3.12 ``` +Please check [docker hub](https://hub.docker.com/r/manytask/checker) for available tags. -```shell -ruff checker -mypy checker -isort --check . -``` -### Adding a new language tester +## Contributing -In order to add a new language to the test system you need to make a pull request. +Really appreciate any contributions! +For guidance on setting up a development environment see the [development guide](./docs/6_development). +For styleguide see organization [contribution guide](https://github.com/manytask/.github/CONTRIBUTING.md). -1. Add a new tester in [checker/testers](./checker/testers) - (see [python.py](./checker/testers/python.py) as example) -2. Update [tester.py](./checker/testers/tester.py) `create` method to run your tester -3. Write tests for a new tester in [./tests/testers](./tests/testers) diff --git a/VERSION b/VERSION index f76f913..835b796 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.9.2 \ No newline at end of file +1.0.0-dev \ No newline at end of file diff --git a/checker/__init__.py b/checker/__init__.py index e69de29..b9f8ef4 100644 --- a/checker/__init__.py +++ b/checker/__init__.py @@ -0,0 +1,7 @@ +from . import configs # noqa: F401 +from . import exceptions # noqa: F401 +from . import exporter # noqa: F401 +from . import pipeline # noqa: F401 +from . import plugins # noqa: F401 +from . import tester # noqa: F401 +from . import utils # noqa: F401 diff --git a/checker/__main__.py b/checker/__main__.py index b990515..a6b120c 100644 --- a/checker/__main__.py +++ b/checker/__main__.py @@ -1,362 +1,370 @@ -"""Main executable file. Refer to cli module""" from __future__ import annotations import json import os -import shutil -import sys -import tempfile from pathlib import Path -from typing import Any import click -from .actions.check import pre_release_check_tasks - -# from .actions.contributing import create_public_mr # type: ignore -from .actions.export import export_public_files -from .actions.grade import grade_on_ci -from .actions.grade_mr import grade_student_mrs, grade_students_mrs_to_master -from .course import CourseConfig, CourseSchedule, Task -from .course.driver import CourseDriver -from .testers import Tester -from .utils.glab import GitlabConnection -from .utils.print import print_info - - -ClickTypeReadableFile = click.Path(exists=True, file_okay=True, readable=True, path_type=Path) -ClickTypeReadableDirectory = click.Path(exists=True, file_okay=False, readable=True, path_type=Path) -ClickTypeWritableDirectory = click.Path(file_okay=False, writable=True, path_type=Path) - - -@click.group() -@click.option('-c', '--config', envvar='CHECKER_CONFIG', type=ClickTypeReadableFile, default=None, - help='Course config path') -@click.version_option(package_name='manytask-checker') +from checker.course import Course, FileSystemTask +from checker.exporter import Exporter +from checker.tester import Tester +from checker.utils import print_info + +from .configs import CheckerConfig, DeadlinesConfig, TaskConfig +from .exceptions import BadConfig, TestingError + + +ClickReadableFile = click.Path(exists=True, file_okay=True, readable=True, path_type=Path) +ClickReadableDirectory = click.Path(exists=True, file_okay=False, readable=True, path_type=Path) +ClickWritableDirectory = click.Path(file_okay=False, writable=True, path_type=Path) + + +@click.group(context_settings={"show_default": True}) +@click.option( + "--checker-config", + type=ClickReadableFile, + default=".checker.yml", + help="Path to the checker config file.", +) +@click.option( + "--deadlines-config", + type=ClickReadableFile, + default=".deadlines.yml", + help="Path to the deadlines config file.", +) +@click.version_option(package_name="manytask-checker") @click.pass_context -def main( - ctx: click.Context, - config: Path | None, +def cli( + ctx: click.Context, + checker_config: Path, + deadlines_config: Path, ) -> None: - """Students' solutions *checker*""" - # Read course config and pass it to any command - # If not provided - read .course.yml from the root - config = config or Path() / '.course.yml' - if not config.exists(): - config = Path() / 'tests' / '.course.yml' - if not config.exists(): - config = Path() / 'tools' / '.course.yml' - - if not config.exists(): - raise FileNotFoundError('Unable to find `.course.yml` config') - - execution_folder = Path() - + """Manytask checker - automated tests for students' assignments.""" + ctx.ensure_object(dict) ctx.obj = { - 'course_config': CourseConfig.from_yaml(config), - 'execution_folder': execution_folder, + "course_config_path": checker_config, + "deadlines_config_path": deadlines_config, } -@main.command() -@click.argument('root', required=False, type=ClickTypeReadableDirectory) -@click.option('--task', type=str, multiple=True, help='Task name to check') -@click.option('--group', type=str, multiple=True, help='Group name to check') -@click.option('--no-clean', is_flag=True, help='Clean or not check tmp folders') -@click.option('--dry-run', is_flag=True, help='Do not execute anything, only print') -@click.option('--parallelize', is_flag=True, help='Execute parallel checking of tasks') -@click.option('--num-processes', type=int, default=None, help='Num of processes parallel checking (default: unlimited)') -@click.option('--contributing', is_flag=True, help='Run task check for students` contribution (decrease verbosity)') +@cli.command() +@click.argument("root", type=ClickReadableDirectory, default=".") +@click.option("-v/-s", "--verbose/--silent", is_flag=True, default=True, help="Verbose output") +@click.pass_context +def validate( + ctx: click.Context, + root: Path, + verbose: bool, +) -> None: + """Validate the configuration files, plugins and tasks. + + 1. Validate the configuration files content. + 2. Validate mentioned plugins. + 3. Check all tasks are valid and consistent with the deadlines. + """ + + print_info("Validating configuration files...") + try: + checker_config = CheckerConfig.from_yaml(ctx.obj["course_config_path"]) + deadlines_config = DeadlinesConfig.from_yaml(ctx.obj["deadlines_config_path"]) + except BadConfig as e: + print_info("Configuration Failed", color="red") + print_info(e) + exit(1) + print_info("Ok", color="green") + + print_info("Validating Course Structure (and tasks configs)...") + try: + course = Course(deadlines_config, root) + course.validate() + except BadConfig as e: + print_info("Course Validation Failed", color="red") + print_info(e) + exit(1) + print_info("Ok", color="green") + + print_info("Validating Exporter...") + try: + exporter = Exporter( + course, + checker_config.structure, + checker_config.export, + root, + verbose=True, + dry_run=True, + ) + exporter.validate() + except BadConfig as e: + print_info("Exporter Validation Failed", color="red") + print_info(e) + exit(1) + print_info("Ok", color="green") + + print_info("Validating tester...") + try: + tester = Tester(course, checker_config, verbose=verbose) + tester.validate() + except BadConfig as e: + print_info("Tester Validation Failed", color="red") + print_info(e) + exit(1) + print_info("Ok", color="green") + + +@cli.command() +@click.argument("root", type=ClickReadableDirectory, default=".") +@click.argument("reference_root", type=ClickReadableDirectory, default=".") +@click.option( + "-t", + "--task", + type=str, + multiple=True, + default=None, + help="Task name to check (multiple possible)", +) +@click.option( + "-g", + "--group", + type=str, + multiple=True, + default=None, + help="Group name to check (multiple possible)", +) +@click.option( + "-p", + "--parallelize", + is_flag=True, + default=True, + help="Execute parallel checking of tasks", +) +@click.option( + "-n", + "--num-processes", + type=int, + default=os.cpu_count(), + help="Num of processes parallel checking", +) +@click.option("--no-clean", is_flag=True, help="Clean or not check tmp folders") +@click.option( + "-v/-s", + "--verbose/--silent", + is_flag=True, + default=True, + help="Verbose tests output", +) +@click.option("--dry-run", is_flag=True, help="Do not execute anything, only log actions") @click.pass_context def check( - ctx: click.Context, - root: Path | None = None, - task: list[str] | None = None, - group: list[str] | None = None, - no_clean: bool = False, - dry_run: bool = False, - parallelize: bool = False, - num_processes: int | None = None, - contributing: bool = False, + ctx: click.Context, + root: Path, + reference_root: Path, + task: list[str] | None, + group: list[str] | None, + parallelize: bool, + num_processes: int, + no_clean: bool, + verbose: bool, + dry_run: bool, ) -> None: - """Run task pre-release checking""" - context: dict[str, Any] = ctx.obj - course_config: CourseConfig = context['course_config'] - execution_folder: Path = context['execution_folder'] - - root = root or execution_folder - private_course_driver = CourseDriver( - root_dir=root, - repo_type='private', - layout=course_config.layout, - ) - course_schedule = CourseSchedule( - deadlines_config=private_course_driver.get_deadlines_file_path(), - ) - tester = Tester.create( - root=root, - course_config=course_config, + """Check private repository: run tests, lint etc. First forces validation. + + 1. Run `validate` command. + 2. Export tasks to temporary directory for testing. + 3. Run pipelines: global, tasks and (dry-run) report. + 4. Cleanup temporary directory. + """ + # validate first + ctx.invoke(validate, root=root, verbose=verbose) # TODO: check verbose level + + # load configs + checker_config = CheckerConfig.from_yaml(ctx.obj["course_config_path"]) + deadlines_config = DeadlinesConfig.from_yaml(ctx.obj["deadlines_config_path"]) + + # read filesystem, check existing tasks + course = Course(deadlines_config, root) + + # create exporter and export files for testing + exporter = Exporter( + course, + checker_config.structure, + checker_config.export, + root, + verbose=True, cleanup=not no_clean, dry_run=dry_run, ) - - tasks: list[Task] | None = None + exporter.export_for_testing(exporter.temporary_dir) + + # validate tasks and groups if passed + filesystem_tasks: dict[str, FileSystemTask] = dict() + if task: + for filesystem_task in course.get_tasks(enabled=True): + if filesystem_task.name in task: + filesystem_tasks[filesystem_task.name] = filesystem_task if group: - tasks = [] - for group_name in group: - if group_name in course_schedule.groups: - tasks.extend(course_schedule.groups[group_name].tasks) - else: - print_info(f'Provided wrong group name: {group_name}', color='red') - sys.exit(1) - elif task: - tasks = [] - for task_name in task: - if task_name in course_schedule.tasks: - tasks.append(course_schedule.tasks[task_name]) - else: - print_info(f'Provided wrong task name: {task_name}', color='red') - sys.exit(1) - - pre_release_check_tasks( - course_schedule, - private_course_driver, - tester, - tasks=tasks, - parallelize=parallelize, - num_processes=num_processes, - contributing=contributing, - ) - - -@main.command() -@click.argument('reference_root', required=False, type=ClickTypeReadableDirectory) -@click.option('--test-full-groups', is_flag=True, help='Test all tasks in changed groups') + for filesystem_group in course.get_groups(enabled=True): + if filesystem_group.name in group: + for filesystem_task in filesystem_group.tasks: + filesystem_tasks[filesystem_task.name] = filesystem_task + if filesystem_tasks: + print_info(f"Checking tasks: {', '.join(filesystem_tasks.keys())}") + + # create tester to... to test =) + tester = Tester(course, checker_config, verbose=verbose, dry_run=dry_run) + + # run tests + # TODO: progressbar on parallelize + try: + tester.run( + exporter.temporary_dir, + tasks=list(filesystem_tasks.values()) if filesystem_tasks else None, + report=False, + ) + except TestingError as e: + print_info("TESTING FAILED", color="red") + print_info(e) + exit(1) + except Exception as e: + print_info("UNEXPECTED ERROR", color="red") + print_info(e) + raise e + exit(1) + print_info("TESTING PASSED", color="green") + + +@cli.command() +@click.argument("root", type=ClickReadableDirectory, default=".") +@click.argument("reference_root", type=ClickReadableDirectory, default=".") +@click.option("--submit-score", is_flag=True, help="Submit score to the Manytask server") +@click.option("--timestamp", type=str, default=None, help="Timestamp to use for the submission") +@click.option("--username", type=str, default=None, help="Username to use for the submission") +@click.option("--no-clean", is_flag=True, help="Clean or not check tmp folders") +@click.option( + "-v/-s", + "--verbose/--silent", + is_flag=True, + default=False, + help="Verbose tests output", +) +@click.option("--dry-run", is_flag=True, help="Do not execute anything, only log actions") @click.pass_context def grade( - ctx: click.Context, - reference_root: Path | None = None, - test_full_groups: bool = False, + ctx: click.Context, + root: Path, + reference_root: Path, + submit_score: bool, + timestamp: str | None, + username: str | None, + no_clean: bool, + verbose: bool, + dry_run: bool, ) -> None: - """Run student's tasks (current ci user)""" - context: dict[str, Any] = ctx.obj - course_config: CourseConfig = context['course_config'] - execution_folder: Path = context['execution_folder'] - - reference_root = reference_root or execution_folder - public_course_driver = CourseDriver( - root_dir=Path(os.environ['CI_PROJECT_DIR']), - repo_type='public', - layout=course_config.layout, - ) - private_course_driver = CourseDriver( - root_dir=reference_root, - repo_type='private', - layout=course_config.layout, - ) - course_schedule = CourseSchedule( - deadlines_config=private_course_driver.get_deadlines_file_path(), - ) - tester = Tester.create( - root=execution_folder, - course_config=course_config, - ) - - grade_on_ci( - course_config, - course_schedule, - public_course_driver, - private_course_driver, - tester, - test_full_groups=test_full_groups, - ) - # TODO: think inspect - - -@main.command() -@click.argument('reference_root', required=False, type=ClickTypeReadableDirectory) -@click.option('--dry-run', is_flag=True, help='Do not execute anything, only print') -@click.pass_context -def grade_mrs( - ctx: click.Context, - reference_root: Path | None = None, - dry_run: bool = False, -) -> None: - """Run student's MRs grading (current git user)""" - context: dict[str, Any] = ctx.obj - course_config: CourseConfig = context['course_config'] - execution_folder: Path = context['execution_folder'] - - reference_root = reference_root or execution_folder - public_course_driver = CourseDriver( - root_dir=Path(os.environ['CI_PROJECT_DIR']), - repo_type='public', - layout=course_config.layout, - ) - private_course_driver = CourseDriver( - root_dir=reference_root, - repo_type='private', - layout=course_config.layout, - ) - course_schedule = CourseSchedule( - deadlines_config=private_course_driver.get_deadlines_file_path(), - ) - - username = os.environ['CI_PROJECT_NAME'] - - gitlab_connection = GitlabConnection( - gitlab_host_url=course_config.gitlab_url, - job_token=os.environ.get('CI_JOB_TOKEN'), - ) - - grade_student_mrs( - course_config, - course_schedule, - public_course_driver, - gitlab_connection, - username, + """Process the configuration file and grade the tasks. + + 1. Detect changes to test. + 2. Export tasks to temporary directory for testing. + 3. Run pipelines: global, tasks and report. + 4. Cleanup temporary directory. + """ + # load configs + checker_config = CheckerConfig.from_yaml(ctx.obj["course_config_path"]) + deadlines_config = DeadlinesConfig.from_yaml(ctx.obj["deadlines_config_path"]) + + # read filesystem, check existing tasks + course = Course(deadlines_config, root, reference_root) + + # create exporter and export files for testing + exporter = Exporter( + course, + checker_config.structure, + checker_config.export, + root, + verbose=False, + cleanup=not no_clean, dry_run=dry_run, ) - # TODO: think inspect - - -@main.command() -@click.argument('root', required=False, type=ClickTypeReadableDirectory) -@click.option('--dry-run', is_flag=True, help='Do not execute anything, only print') + exporter.export_for_testing(exporter.temporary_dir) + + # detect changes to test + filesystem_tasks: list[FileSystemTask] = list() + # TODO: detect changes + filesystem_tasks = [task for task in course.get_tasks(enabled=True) if task.name == "hello_world"] + + # create tester to... to test =) + tester = Tester(course, checker_config, verbose=verbose, dry_run=dry_run) + + # run tests + # TODO: progressbar on parallelize + try: + tester.run( + exporter.temporary_dir, + filesystem_tasks, + report=True, + ) + except TestingError as e: + print_info("TESTING FAILED", color="red") + print_info(e) + exit(1) + except Exception as e: + print_info("UNEXPECTED ERROR", color="red") + print_info(e) + exit(1) + print_info("TESTING PASSED", color="green") + + +@cli.command() +@click.argument("reference_root", type=ClickReadableDirectory, default=".") +@click.argument("export_root", type=ClickWritableDirectory, default="./export") +@click.option("--commit", is_flag=True, help="Commit and push changes to the repository") +@click.option("--dry-run", is_flag=True, help="Do not execute anything, only log actions") @click.pass_context -def grade_students_mrs( - ctx: click.Context, - root: Path | None = None, - dry_run: bool = False, +def export( + ctx: click.Context, + reference_root: Path, + export_root: Path, + commit: bool, + dry_run: bool, ) -> None: - """Run students' MRs grading (all users)""" - context: dict[str, Any] = ctx.obj - course_config: CourseConfig = context['course_config'] - execution_folder: Path = context['execution_folder'] - - root = root or execution_folder - private_course_driver = CourseDriver( - root_dir=root, - repo_type='private', - layout=course_config.layout, - ) - course_schedule = CourseSchedule( - deadlines_config=private_course_driver.get_deadlines_file_path(), - ) - - gitlab_connection = GitlabConnection( - gitlab_host_url=course_config.gitlab_url, - private_token=course_config.gitlab_service_token, - ) - - grade_students_mrs_to_master( - course_config, - course_schedule, - private_course_driver, - gitlab_connection, + """Export tasks from reference to public repository.""" + # load configs + checker_config = CheckerConfig.from_yaml(ctx.obj["course_config_path"]) + deadlines_config = DeadlinesConfig.from_yaml(ctx.obj["deadlines_config_path"]) + + # read filesystem, check existing tasks + course = Course(deadlines_config, reference_root) + + # create exporter and export files for public + exporter = Exporter( + course, + checker_config.structure, + checker_config.export, + reference_root, + verbose=True, dry_run=dry_run, ) + exporter.export_for_testing(exporter.temporary_dir) -@main.command() -@click.argument('root', required=False, type=ClickTypeReadableDirectory) -@click.option('--export-dir', type=ClickTypeWritableDirectory, help='TEMP dir to export into') -@click.option('--dry-run', is_flag=True, help='Do not execute anything, only print') -@click.option('--no-cleanup', is_flag=True, help='Do not cleanup export dir') +@cli.command(hidden=True) +@click.argument("output_folder", type=ClickReadableDirectory, default=".") @click.pass_context -def export_public( - ctx: click.Context, - root: Path | None = None, - export_dir: Path | None = None, - dry_run: bool = False, - no_cleanup: bool = False, +def schema( + ctx: click.Context, + output_folder: Path, ) -> None: - """Export enabled tasks to public repo""" - context: dict[str, Any] = ctx.obj - course_config: CourseConfig = context['course_config'] - execution_folder: Path = context['execution_folder'] - - root = root or execution_folder - - export_dir = export_dir or Path(tempfile.mkdtemp()) - if not export_dir.exists(): - export_dir.mkdir(exist_ok=True, parents=True) - - public_course_driver = CourseDriver( - root_dir=export_dir, - repo_type='public', - layout=course_config.layout, - ) - private_course_driver = CourseDriver( - root_dir=root, - repo_type='private', - layout=course_config.layout, - ) - course_schedule = CourseSchedule( - deadlines_config=private_course_driver.get_deadlines_file_path(), - ) - - export_public_files( - course_config, - course_schedule, - public_course_driver, - private_course_driver, - export_dir, - dry_run=dry_run, - ) - - if no_cleanup: - print_info(f'No cleanup flag. Exported files stored in {export_dir}.') - else: - print_info(f'Cleanup flag. Export dir {export_dir} removed.') - shutil.rmtree(export_dir) - - -# @main.command() -# @click.option('--dry-run', is_flag=True, help='Do not execute anything, only print') -# @click.pass_context -def create_contributing_mr( - ctx: click.Context, - dry_run: bool = False, -) -> None: - """Move public project to private as MR""" - context: dict[str, Any] = ctx.obj - course_config: CourseConfig = context['course_config'] - # execution_folder: Path = context['execution_folder'] - - trigger_payload = os.environ.get('TRIGGER_PAYLOAD', 'None') - print_info('trigger_payload', trigger_payload) - - # trigger_payload_dict = json.loads(trigger_payload) - with open(trigger_payload, 'r') as json_file: - trigger_payload_dict = json.load(json_file) - - event_type = trigger_payload_dict['event_type'] - - if event_type != 'merge_request': - print_info(f'event_type = {event_type}. Skip it.', color='orange') - return - - object_attributes = trigger_payload_dict['object_attributes'] - merge_commit_sha = object_attributes['merge_commit_sha'] - - if merge_commit_sha is None: - print_info('merge_commit_sha = None. Skip it.', color='orange') - return - - mr_state = object_attributes['state'] - target_branch = object_attributes['target_branch'] - - if mr_state != 'merged': - print_info(f'mr_state = {mr_state}. Skip it.', color='orange') - return - - if target_branch != course_config.default_branch: - print_info(f'target_branch = {target_branch}. Skip it.', color='orange') - return + """Generate json schema for the checker configs.""" + checker_schema = CheckerConfig.get_json_schema() + deadlines_schema = DeadlinesConfig.get_json_schema() + task_schema = TaskConfig.get_json_schema() - # create_public_mr(course_config, object_attributes, dry_run=dry_run) + with open(output_folder / "schema-checker.json", "w") as f: + json.dump(checker_schema, f, indent=2) + with open(output_folder / "schema-deadlines.json", "w") as f: + json.dump(deadlines_schema, f, indent=2) + with open(output_folder / "schema-task.json", "w") as f: + json.dump(task_schema, f, indent=2) -if __name__ == '__main__': # pragma: nocover - main() +if __name__ == "__main__": + cli() diff --git a/checker/actions/__init__.py b/checker/actions/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/checker/actions/check.py b/checker/actions/check.py deleted file mode 100644 index ce45617..0000000 --- a/checker/actions/check.py +++ /dev/null @@ -1,147 +0,0 @@ -from __future__ import annotations - -import io -import multiprocessing -import sys -from concurrent.futures import ProcessPoolExecutor, as_completed -from contextlib import redirect_stderr, redirect_stdout - -from ..course import CourseDriver, CourseSchedule, Task -from ..exceptions import RunFailedError -from ..testers import Tester -from ..utils.print import print_info, print_task_info - - -def _check_single_task( - task: Task, - tester: Tester, - private_course_driver: CourseDriver, - verbose: bool = False, - catch_output: bool = False, -) -> str | None: - reference_source_dir = private_course_driver.get_task_solution_dir(task) - reference_config_dir = private_course_driver.get_task_config_dir(task) - reference_public_tests_dir = private_course_driver.get_task_public_test_dir(task) - reference_private_tests_dir = private_course_driver.get_task_private_test_dir(task) - reference_tests_root_dir = private_course_driver.root_dir - assert reference_source_dir, 'reference_source_dir have to exists' - assert reference_config_dir, 'reference_config_dir have to exists' - assert reference_public_tests_dir or reference_private_tests_dir, \ - 'reference_public_tests_dir or reference_private_tests_dir have to exists' - - if catch_output: - f = io.StringIO() - with redirect_stderr(f), redirect_stdout(f): - print_task_info(task.full_name) - try: - tester.test_task( - reference_source_dir, - reference_config_dir, - reference_public_tests_dir, - reference_private_tests_dir, - reference_tests_root_dir, - verbose=verbose, - normalize_output=True, - ) - except RunFailedError as e: - out = f.getvalue() - raise RunFailedError(e.msg, out + (e.output or '')) from e - else: - out = f.getvalue() - return out - else: - print_task_info(task.full_name) - tester.test_task( - reference_source_dir, - reference_config_dir, - reference_public_tests_dir, - reference_private_tests_dir, - reference_tests_root_dir, - verbose=verbose, - normalize_output=True, - ) - return None - - -def _check_tasks( - tasks: list[Task], - tester: Tester, - private_course_driver: CourseDriver, - parallelize: bool = False, - num_processes: int | None = None, - verbose: bool = True, -) -> bool: - # Check itself - if parallelize: - _num_processes = num_processes or multiprocessing.cpu_count() - print_info(f'Parallelize task checks with <{_num_processes}> processes...', color='blue') - - success = True - # with ThreadPoolExecutor(max_workers=num_cores) as e: - with ProcessPoolExecutor(max_workers=_num_processes) as e: - check_futures = { - e.submit(_check_single_task, task, tester, private_course_driver, verbose=verbose, catch_output=True) - for task in tasks - } - - for future in as_completed(check_futures): - try: - captured_out = future.result() - except RunFailedError as e: - print_info(e.output) - success &= False - except Exception as e: - print_info('Unknown exception:', e, color='red') - raise e - else: - print_info(captured_out) - return success - else: - for task in tasks: - try: - _check_single_task(task, tester, private_course_driver, verbose=verbose, catch_output=False) - except RunFailedError: - return False - except Exception as e: - print_info('Unknown exception:', e, color='red') - raise e - - return True - - -def pre_release_check_tasks( - course_schedule: CourseSchedule, - private_course_driver: CourseDriver, - tester: Tester, - tasks: list[Task] | None = None, - *, - parallelize: bool = False, - num_processes: int | None = None, - contributing: bool = False, -) -> None: - # select tasks or use `tasks` param - if tasks: - print_info('Testing specifying tasks...', color='yellow') - print_info([i.full_name for i in tasks]) - else: - if contributing: - tasks = course_schedule.get_tasks(started=True) - print_info('Testing started groups...', color='yellow') - print_info([i.name for i in course_schedule.get_groups(started=True)]) - else: - tasks = course_schedule.get_tasks(enabled=True) - print_info('Testing enabled groups...', color='yellow') - print_info([i.name for i in course_schedule.get_groups(enabled=True)]) - - # tests itself - success = _check_tasks( - tasks, - tester, - private_course_driver, - parallelize=parallelize, - num_processes=num_processes, - verbose=not contributing, - ) - - if not success: - sys.exit(1) diff --git a/checker/actions/contributing.py b/checker/actions/contributing.py deleted file mode 100644 index 7e4a216..0000000 --- a/checker/actions/contributing.py +++ /dev/null @@ -1,376 +0,0 @@ -# type: ignore -from __future__ import annotations - -import os -import re -import subprocess -import time -from pathlib import Path -from tempfile import TemporaryDirectory - -import gitlab.v4.objects - -from ..course import CourseConfig -from ..utils.glab import GITLAB, GITLAB_HOST_URL, MASTER_BRANCH, get_private_project, get_public_project -from ..utils.print import print_info - - -MR_COPY_TOKEN = os.environ.get('MR_COPY_TOKEN') - - -def _student_mr_title_generator(merge_request: gitlab.v4.objects.MergeRequest) -> str: - iid = merge_request.iid - title = merge_request.title - username = merge_request.author['username'] - - return f'[STUDENT {username} MR {iid}] {title}' - - -def _get_student_mr_title_prefix(full_title: str) -> str: - _title_search = re.match(r'^[.*]', full_title) - assert _title_search - prefix = _title_search.group(0) - return prefix - - -def _student_mr_branch_name_generator(merge_request: gitlab.v4.objects.MergeRequest) -> str: - iid = merge_request.iid - username = merge_request.author['username'] - return f'students/{username}/mr-{iid}' - - -def _student_mr_desc_generator(merge_request: gitlab.v4.objects.MergeRequest) -> str: - source_branch = merge_request.source_branch - target_branch = merge_request.target_branch - iid = merge_request.iid - title = merge_request.title - url = merge_request.web_url - description = merge_request.description - username = merge_request.author['username'] - name = merge_request.author['name'] - user_url = merge_request.author['web_url'] - return ' \n'.join([ - f'Contribution by student [{username}]({user_url}): {name}', - '-'*16, - f'Original [MR{iid}]({url}) from {source_branch} to {target_branch} in public project', - f'Title: {title}', - f'Branch: {source_branch}', - '', - f'{description}', - ]) - - -def _get_student_mr_source_url(merge_request: gitlab.v4.objects.MergeRequest) -> tuple[str, str]: - source_project_id = merge_request.source_project_id - full_project = GITLAB.projects.get(source_project_id) - - source_branch = merge_request.source_branch - - return ''.join([ - 'https://', - f'kblack:{MR_COPY_TOKEN}@', - GITLAB_HOST_URL.removeprefix('https://'), - '/', - full_project.path_with_namespace, - ]), source_branch - - -def copy_merge_requests(course_config: CourseConfig, dry_run: bool = False) -> None: - """Copy changes from all open MR""" - raise NotImplementedError - - private_project = get_private_project(course_config.private_group, course_config.private_repo) - full_private_project = GITLAB.projects.get(private_project.id) - public_project = get_public_project(course_config.private_group, course_config.public_repo) - full_public_project = GITLAB.projects.get(public_project.id) - - # Run rebase - print_info('Go and rebase public project', color='pink') - public_mrs = full_public_project.mergerequests.list(state='opened') - for mr in public_mrs: - print_info(f'Rebase public MR {mr.iid} "{mr.title}":', color='pink') - # mr.notes.create({'body': 'Run auto rebase...'}) - print_info(mr, color='grey') - rebase_in_progress = mr.rebase()['rebase_in_progress'] - print_info("rebase_in_progress", rebase_in_progress) - - print_info('Waiting until rebase done...', color='pink') - time.sleep(60) # TODO: add check - - stdout = subprocess.run( - 'git fetch private', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - private_mrs = full_private_project.mergerequests.list(state='opened') - private_students_mr = { - prefix_match.group(0): mr for mr in private_mrs if (prefix_match := re.match(r'^\[.*\]', mr.title)) - } - print_info('private_students_mr', len(private_students_mr), private_students_mr.keys()) - - public_mrs = full_public_project.mergerequests.list(state='opened') - - print_info('Throughout public MR', color='pink') - for mr in public_mrs: - full_mr = GITLAB.mergerequests.get(mr.id) - print_info('full_mr', full_mr) - print_info(f'Look up public MR {mr.iid} "{mr.title}":', color='pink') - print_info(mr, color='grey') - - gen_mr_full_title = _student_mr_title_generator(mr) - gen_mr_prefix = _get_student_mr_title_prefix(gen_mr_full_title) - - if gen_mr_prefix in private_students_mr: - print_info(f'MR "{gen_mr_prefix}" exists. Updating it...') - # Already exists. Get branch name - private_mr = private_students_mr[gen_mr_prefix] - private_branch_name = private_mr.source_branch - - # An checkout existed branch - print_info(f'checkout {private_branch_name}...') - stdout = subprocess.run( - f'git checkout --force --track private/{private_branch_name}', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - # Update desc and so on - print_info('updating mr...') - private_mr.title = _student_mr_title_generator(mr) - private_mr.description = _student_mr_desc_generator(mr) - private_mr.labels = ['contributing'] - private_mr.save() - print_info('private_mr', private_mr, color='grey') - else: - # Not exist. So create - print_info(f'MR "{gen_mr_prefix}" not exists. Creating it...') - private_branch_name = _student_mr_branch_name_generator(mr) - - # Create and push branch - print_info(f'create and checkout {private_branch_name}...') - stdout = subprocess.run( - f'git checkout --force -b {private_branch_name} private/{MASTER_BRANCH} ' - f'&& git push --set-upstream private {private_branch_name}', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - # Create a MR - print_info('creating mr...') - private_mr = full_private_project.mergerequests.create({ - 'source_branch': private_branch_name, - 'target_branch': MASTER_BRANCH, - 'title': _student_mr_title_generator(mr), - 'description': _student_mr_desc_generator(mr), - 'labels': ['contributing'], - 'remove_source_branch': True, - 'squash': True, - 'allow_maintainer_to_push': True, - }) - print_info('private_mr', private_mr, color='grey') - - # Del processed private MR (leave only outdated) - if gen_mr_prefix in private_students_mr: - del private_students_mr[gen_mr_prefix] - - print_info(f'git status in {private_branch_name}:') - stdout = subprocess.run( - 'git status', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info('Current status:', color='grey') - print_info(stdout, color='grey') - - tmp_dir = TemporaryDirectory(dir=Path('.')) - tmp_dir_path = tmp_dir.name - # print_info(f'clone changes in {private_branch_name} from public mr:') - # tmp_dir_path = mkdtemp(dir=Path('.')) - - source_url, source_branch = _get_student_mr_source_url(mr) - # print(source_url, source_branch, tmp_dir_path) - - # Clone single branch in tmp folder - # TODO: not copy, but merge - print_info('Clone and update files...') - stdout = subprocess.run( - f'git clone --depth=1 --branch {source_branch} {source_url} {tmp_dir_path} && rm -rf {tmp_dir_path}/.git &&' - f'cp -a {tmp_dir_path}/* ./ && rm -rf {tmp_dir_path}', - # f'mv -f {tmp_dir_path}/* . && rm -rf {tmp_dir_path}', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - stdout = subprocess.run( - 'git status', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info('Current status:', color='grey') - print_info(stdout, color='grey') - - # Git add only modified and deleted - print_info('Git add modified and commit and push it...') - stdout = subprocess.run( - 'git add -u', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - stdout = subprocess.run( - 'git status && git branch', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info('git status && git branch', color='grey') - print_info(stdout, color='grey') - - stdout = subprocess.run( - f'git commit -m "Export mr files" --allow-empty && git push --set-upstream private {private_branch_name}', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - print_info('Deleting outdated private MR', color='pink') - for mr in private_students_mr: - print_info(f'Deleting outdated MR {mr.iid} {mr.title}...') - # mr.state_event = 'close' - # mr.save() - mr.delete() - - -def create_public_mr( - course_config: CourseConfig, - object_attributes: dict[str, str] | None = None, - *, - dry_run: bool = False, -) -> None: - """Copy changes from public repo""" - raise NotImplementedError - - object_attributes = object_attributes or {} - merge_commit_sha = object_attributes['merge_commit_sha'] - title = object_attributes['title'] - url = object_attributes['url'] - iid = object_attributes['iid'] - description = object_attributes['description'] - author_id = object_attributes['author_id'] - updated_at = object_attributes['updated_at'] - - private_project = get_private_project(course_config.private_group, course_config.private_repo) - full_private_project = GITLAB.projects.get(private_project.id) - # public_project = get_public_project() - # full_public_project = GITLAB.projects.get(public_project.id) - author = GITLAB.users.get(author_id) - - # Get public project sha and generate branch name - if merge_commit_sha: - public_sha = merge_commit_sha - else: - public_sha = subprocess.run( - f"git log public/{MASTER_BRANCH} --pretty=format:'%H' -n 1", - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(f'public/{MASTER_BRANCH}: {title} (sha {public_sha})') - new_branch_name = f'public/mr-{iid}' - new_title = f'[PUBLIC] {title}' - new_description = ' \n'.join([ - f'Merged public project [MR{iid}]({url})', - f'author: {author.username} - {author.name}', - f'updated_at: {updated_at}', - f'sha: {public_sha}', - '', - description, - ]) - - # Get all mrs from private repo - private_mrs = full_private_project.mergerequests.list(state='opened') - private_mrs = {mr.title: mr for mr in private_mrs if '[PUBLIC]' in mr.title} - - if new_title in private_mrs: - # Already exists - mr = private_mrs[new_title] - print_info(f'MR with sha {public_sha} already exists!') - print_info(mr, color='grey') - return - else: - # Not exist - - # Create branch - print_info('Create branch from remote', color='grey') - stdout = subprocess.run( - f'git checkout --force -b {new_branch_name} private/{MASTER_BRANCH}', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - print_info(f'Merge from public (by sha {public_sha[:7]})', color='grey') - stdout = subprocess.run( - # f'git merge -s ours --allow-unrelated-histories --no-commit public/{MASTER_BRANCH}', - f'git merge --strategy-option=theirs --allow-unrelated-histories --no-commit {public_sha}', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - # Check diff not empty - stdout = subprocess.run( - 'git status', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - if 'Changes to be committed:' not in stdout: - print_info( - f'Can not create MR. No changes to commit between {MASTER_BRANCH} and public/{MASTER_BRANCH}', - color='orange' - ) - return - - # Git add only modified and deleted - print_info('Git add modified and commit and push it...') - stdout = subprocess.run( - f'git commit -m "{new_title}" && git push private {new_branch_name}', - encoding='utf-8', - shell=True, check=True, - stdout=subprocess.PIPE, - ).stdout - print_info(stdout, color='grey') - - # Create mr - mr = full_private_project.mergerequests.create({ - 'source_branch': new_branch_name, - 'target_branch': MASTER_BRANCH, - 'title': new_title, - 'description': new_description, - 'labels': ['public'], - 'remove_source_branch': True, - 'squash': True, - 'allow_maintainer_to_push': True, - }) - - print_info('Ok. New MR created', color='green') - print_info(mr.web_url) diff --git a/checker/actions/export.py b/checker/actions/export.py deleted file mode 100644 index 0eb45cb..0000000 --- a/checker/actions/export.py +++ /dev/null @@ -1,200 +0,0 @@ -from __future__ import annotations - -import shutil -from pathlib import Path - -from ..course import CourseConfig, CourseDriver -from ..course.schedule import CourseSchedule -from ..utils.files import filename_match_patterns -from ..utils.git import commit_push_all_repo, setup_repo_in_dir -from ..utils.print import print_info - - -EXPORT_IGNORE_COMMON_FILE_PATTERNS = [ - '.git', '*.docker', '.releaser-ci.yml', '.deadlines.yml', '.course.yml', '.DS_Store', '.venv', - '.*_cache', '.github', '*.drawio', -] - - -def _get_enabled_files_and_dirs_private_to_public( - course_config: CourseConfig, - course_schedule: CourseSchedule, - public_course_driver: CourseDriver, - private_course_driver: CourseDriver, -) -> dict[Path, Path]: - # Common staff; files only, all from private repo except ignored - common_files: dict[Path, Path] = { - i: public_course_driver.root_dir / i.name - for i in private_course_driver.root_dir.glob('*') - if i.is_file() and not filename_match_patterns(i, EXPORT_IGNORE_COMMON_FILE_PATTERNS) - } - - # Course docs - course_docs: dict[Path, Path] = dict() - if (private_course_driver.root_dir / 'docs').exists(): - course_docs.update({ - private_course_driver.root_dir / 'docs': public_course_driver.root_dir / 'docs', - }) - if (private_course_driver.root_dir / 'images').exists(): - course_docs.update({ - private_course_driver.root_dir / 'images': public_course_driver.root_dir / 'images', - }) - - # Course tools - course_tools: dict[Path, Path] = dict() - if (private_course_driver.root_dir / 'tools').exists(): - course_tools = { - i: public_course_driver.root_dir / 'tools' / i.name - for i in (private_course_driver.root_dir / 'tools').glob('*') - if i.is_dir() or (i.is_file() and not filename_match_patterns(i, EXPORT_IGNORE_COMMON_FILE_PATTERNS)) - } - - # Started tasks: copy template to public repo - started_tasks_templates_dirs: dict[Path, Path] = { - private_template_dir: - public_course_driver.get_task_solution_dir(task, check_exists=False) # type: ignore - for task in course_schedule.get_tasks(enabled=True, started=True) - if (private_template_dir := private_course_driver.get_task_template_dir(task, check_exists=True)) - } - started_tasks_public_tests_dirs: dict[Path, Path] = { - private_public_tests_dir: - public_course_driver.get_task_public_test_dir(task, check_exists=False) # type: ignore - for task in course_schedule.get_tasks(enabled=True, started=True) - if (private_public_tests_dir := private_course_driver.get_task_public_test_dir(task, check_exists=True)) - } - started_tasks_common_files: dict[Path, Path] = { - i: public_course_driver.get_task_dir(task, check_exists=False) / i.name # type: ignore - for task in course_schedule.get_tasks(enabled=True, started=True) - if (private_task_dir := private_course_driver.get_task_dir(task)) - for i in private_task_dir.glob('*.*') - } - - # Lectures for enabled groups (if any) - started_lectures_dirs: dict[Path, Path] = { - private_lecture_dir: public_course_driver.get_group_lecture_dir(group, check_exists=False) # type: ignore - for group in course_schedule.get_groups(enabled=True, started=True) - if (private_lecture_dir := private_course_driver.get_group_lecture_dir(group, check_exists=True)) - } - - # Reviews for ended groups (if any) - ended_reviews_dirs: dict[Path, Path] = { - private_review_dir: - public_course_driver.get_group_submissions_review_dir(group, check_exists=False) # type: ignore - for group in course_schedule.get_groups(enabled=True, ended=True) - if (private_review_dir := private_course_driver.get_group_submissions_review_dir(group, check_exists=True)) - } - - return { - **common_files, - **course_docs, - **course_tools, - **started_tasks_templates_dirs, - **started_tasks_public_tests_dirs, - **started_tasks_common_files, - **started_lectures_dirs, - **ended_reviews_dirs, - } - - -def _dirs_to_files(files_and_dirs: set[Path]) -> set[Path]: - # Recursive add all files if we have dirs - all_files_dirs = set() - for i in files_and_dirs: - if i.is_file(): - all_files_dirs.add(i) - else: - all_files_dirs.update(i.glob('**/*')) - - return all_files_dirs # - set(PUBLIC_DIR.glob('.git/**/*')) - - -def _get_disabled_files( - enabled_files: set[Path], - course_driver: CourseDriver, -) -> set[Path]: - all_files = [ - i for i in course_driver.root_dir.glob('**/*') if i.is_file() - ] - - return set(all_files) - enabled_files - set(course_driver.root_dir.glob('.git/**/*')) - {course_driver.root_dir} - - -def export_public_files( - course_config: CourseConfig, - course_schedule: CourseSchedule, - public_course_driver: CourseDriver, - private_course_driver: CourseDriver, - export_dir: Path, - *, - dry_run: bool = False, -) -> None: - export_dir.mkdir(exist_ok=True, parents=True) - - if dry_run: - print_info('Dry run. No repo setup, only copy in export_dir dir.', color='orange') - - files_and_dirs_to_add_map: dict[Path, Path] = _get_enabled_files_and_dirs_private_to_public( - course_config, - course_schedule, - public_course_driver, - private_course_driver, - ) - - if not dry_run: - if not course_config.gitlab_service_token: - raise Exception('Unable to find service_token') # TODO: set exception correct type - - print_info('Setting up public repo...', color='orange') - print_info(f' Copy {course_config.gitlab_url}/{course_config.public_repo} repo in {export_dir}') - print_info( - f' username {course_config.gitlab_service_username} \n' - f' name {course_config.gitlab_service_name} \n' - f' branch {course_config.default_branch} \n', - color='grey' - ) - setup_repo_in_dir( - export_dir, - f'{course_config.gitlab_url}/{course_config.public_repo}', - service_username=course_config.gitlab_service_username, - service_token=course_config.gitlab_service_token, - git_user_email=course_config.gitlab_service_email, - git_user_name=course_config.gitlab_service_name, - branch=course_config.default_branch, - ) - - # remove all files from export_dir (to delete deleted files) - print_info('Delete all files from old export_dir (keep .git)...', color='orange') - deleted_files: set[str] = set() - for path in export_dir.glob('*'): - if path.name == '.git': - continue - - if path.is_file() or path.is_symlink(): - path.unlink() - elif path.is_dir(): - shutil.rmtree(path) - else: - print(f'wtf. {path}') - - deleted_files.add(str(path.as_posix())) - - # copy updated files - print_info('Copy updated files...', color='orange') - for filename_private, filename_public in sorted(files_and_dirs_to_add_map.items()): - relative_private_filename = str(filename_private.relative_to(private_course_driver.root_dir)) - relative_public_filename = str(filename_public.relative_to(public_course_driver.root_dir)) - print_info(f' {relative_private_filename}', color='grey') - print_info(f' \t-> {relative_public_filename}', color='grey') - - if filename_private.is_dir(): - shutil.copytree(filename_private, export_dir / relative_public_filename, dirs_exist_ok=True) - else: - (export_dir / relative_public_filename).parent.mkdir(exist_ok=True, parents=True) - shutil.copy(filename_private, export_dir / relative_public_filename) - - if not dry_run: - # files for git add - commit_push_all_repo( - export_dir, - branch=course_config.default_branch, - ) diff --git a/checker/actions/grade.py b/checker/actions/grade.py deleted file mode 100644 index 5b6f877..0000000 --- a/checker/actions/grade.py +++ /dev/null @@ -1,461 +0,0 @@ -from __future__ import annotations - -import os -import subprocess -import sys -import tempfile -from datetime import datetime -from pathlib import Path - -from ..course import CourseConfig, CourseDriver, CourseSchedule, Group, Task -from ..exceptions import RunFailedError -from ..testers import Tester -from ..utils import get_folders_diff_except_public, get_tracked_files_list -from ..utils.manytask import PushFailedError, push_report -from ..utils.print import print_info, print_task_info - - -class GitException(Exception): - pass - - -def _get_git_changes( - solution_root: str, - public_repo_url: str, - author_name: str | None = None, - current_commit_sha: str | None = None, - prev_commit_sha: str | None = None, - git_changes_type: str = 'log_between_no_upstream', -) -> list[str]: - """ - :param solution_root: Full path to solutions folder - :param public_repo_url: Full url to public repo - :param git_changes_type: one of - 'diff_last', 'diff_between', 'log_between_no_merges', 'log_between_by_author', 'log_between_no_upstream' - """ - if author_name is None and git_changes_type == 'log_between_by_author': - git_changes_type = 'log_between_no_merges' - - if prev_commit_sha and set(prev_commit_sha) == {'0'}: # first commit or merge request - prev_commit_sha = None - - if 'between' in git_changes_type and (current_commit_sha is None or prev_commit_sha is None): - print_info('CI_COMMIT_SHA or CI_COMMIT_BEFORE_SHA is wrong pipeline_diff can not be used. ' - 'Using std `git show`') - print_info(f'CI_COMMIT_SHA: {current_commit_sha}, CI_COMMIT_BEFORE_SHA: {prev_commit_sha}!') - git_changes_type = 'diff_last' - - changes = [] - if git_changes_type.startswith('diff'): - if git_changes_type == 'diff_between': - print_info(f'Looking diff between {prev_commit_sha} and {current_commit_sha}...') - prev_commit_sha = '' if prev_commit_sha is None else prev_commit_sha - git_status = subprocess.run( - f'cd {solution_root} && git diff {prev_commit_sha} {current_commit_sha} --stat --oneline', - encoding='utf-8', - stdout=subprocess.PIPE, - shell=True - ).stdout - print_info(git_status) - changes = git_status.split('\n')[:-2] - elif git_changes_type == 'diff_last': - print_info('Looking last commit diff...') - git_status = subprocess.run( - f'cd {solution_root} && git show --stat --oneline', - encoding='utf-8', - stdout=subprocess.PIPE, - shell=True - ).stdout - print_info(git_status) - changes = git_status.split('\n')[1:-2] - else: - raise GitException(f'Unknown git_changes_type={git_changes_type}') - - changes = [f.rsplit('|', maxsplit=1)[0].strip() for f in changes] - - elif git_changes_type.startswith('log'): - if git_changes_type == 'log_between_no_merges': - print_info(f'Looking log between {prev_commit_sha} and {current_commit_sha} without merges...') - prev_commit_sha = '' if prev_commit_sha is None else prev_commit_sha - git_status = subprocess.run( - f'cd {solution_root} && ' - f'git log --pretty="%H" --no-merges {prev_commit_sha or ""}..{current_commit_sha} | ' - f'while read commit_hash; do git show --oneline --name-only $commit_hash' - f'| tail -n+2; done | sort | uniq', - encoding='utf-8', - stdout=subprocess.PIPE, - shell=True - ).stdout - print_info(git_status) - changes = git_status.split('\n') - elif git_changes_type == 'log_between_by_author': - assert isinstance(author_name, str) - print_info( - f'Looking log between {prev_commit_sha} and {current_commit_sha} ' - f'by author="{author_name.split(" ")[0]}"...', - ) - prev_commit_sha = '' if prev_commit_sha is None else prev_commit_sha - git_status = subprocess.run( - f'cd {solution_root} && ' - f'git log --pretty="%H" --author="{author_name.split(" ")[0]}"' - f'{prev_commit_sha or ""}..{current_commit_sha} | ' - f'while read commit_hash; do git show --oneline --name-only $commit_hash ' - f'| tail -n+2; done | sort | uniq', - encoding='utf-8', - stdout=subprocess.PIPE, - shell=True, - ).stdout - print_info(git_status) - changes = git_status.split('\n') - elif git_changes_type == 'log_between_no_upstream': - print_info(f'Looking log_between_no_upstream between {prev_commit_sha} and {current_commit_sha} ' - f'which not in `{public_repo_url}`...') - - result = subprocess.run( - f'cd {solution_root} && ' - f'git fetch --unshallow &&' - f'(git remote rm upstream | true) &&' - f'git remote add upstream {public_repo_url}.git &&' - f'git fetch upstream', - encoding='utf-8', - capture_output=True, - shell=True - ) - print_info(result.stderr, color='grey') - print_info(result.stdout, color='grey') - - print_info('---') - - git_status = subprocess.run( - f'cd {solution_root} && ' - f'git log --pretty="%H" {prev_commit_sha or ""}..{current_commit_sha} ' - f'--no-merges --not --remotes=upstream | ' - f'while read commit_hash; do git show --oneline --name-only $commit_hash ' - f'| tail -n+2; done | sort | uniq', - encoding='utf-8', - stdout=subprocess.PIPE, - shell=True - ).stdout - print_info('Detected changes in the following files:') - print_info(git_status, color='grey') - changes = git_status.split('\n') - else: - raise GitException(f'Unknown git_changes_type={git_changes_type}') - - changes = [f for f in changes if len(f) > 0] - - return changes - - -def grade_single_task( - task: Task, - tester: Tester, - course_config: CourseConfig, - public_course_driver: CourseDriver, - private_course_driver: CourseDriver, - user_id: int, - send_time: datetime, - inspect: bool = False -) -> bool: - print_task_info(task.full_name) - source_dir = public_course_driver.get_task_solution_dir(task) - reference_config_dir = private_course_driver.get_task_config_dir(task) - reference_public_tests_dir = private_course_driver.get_task_public_test_dir(task) - reference_private_tests_dir = private_course_driver.get_task_private_test_dir(task) - reference_tests_root_dir = private_course_driver.root_dir - assert source_dir, 'source_dir have to exists' - assert reference_config_dir, 'reference_config_dir have to exists' - assert reference_public_tests_dir or reference_private_tests_dir, \ - 'reference_public_tests_dir or reference_private_tests_dir have to exists' - - try: - score_percentage = tester.test_task( - source_dir, - reference_config_dir, - reference_public_tests_dir, - reference_private_tests_dir, - reference_tests_root_dir, - verbose=inspect, - normalize_output=inspect, - ) - score = round(score_percentage * task.max_score) - if score_percentage == 1.: - print_info(f'\nSolution score is: {score}', color='green') - else: - print_info(f'\nSolution score percentage is: {score_percentage}', color='green') - print_info(f'\nSolution score is: [{task.max_score}*{score_percentage}]={score}', color='green') - if task.review: - print_info('\nThis task is "review-able", so, open MR and wait till review.', color='blue') - elif not inspect: - use_demand_multiplier = not task.marked - try: - if not course_config.manytask_token: - raise PushFailedError('Unable to find manytask token') - files = { - path.name: (str(path.relative_to(source_dir)), open(path, 'rb')) - for path in source_dir.glob('**/*') - if path.is_file() - } - username, set_score, result_commit_time, result_submit_time, demand_multiplier = push_report( - course_config.manytask_url, - course_config.manytask_token, - task.name, - user_id, - score, - files=files, - send_time=send_time, - use_demand_multiplier=use_demand_multiplier, - ) - print_info( - f'Final score for @{username} (according to deadlines and demand): {set_score}', - color='blue' - ) - if demand_multiplier and demand_multiplier != 1: - print_info( - f'Due to low demand, the task score is multiplied at {demand_multiplier:.4f}', - color='grey' - ) - if result_commit_time: - print_info(f'Commit at {result_commit_time} (are validated to Submit Time)', color='grey') - if result_submit_time: - print_info(f'Submit at {result_submit_time} (deadline is calculated relative to it)', color='grey') - except PushFailedError: - raise - return True - except RunFailedError: - # print_info(e) - return False - - -def grade_tasks( - tasks: list[Task], - tester: Tester, - course_config: CourseConfig, - public_course_driver: CourseDriver, - private_course_driver: CourseDriver, - user_id: int, - send_time: datetime, - inspect: bool = False -) -> bool: - success = True - for task in tasks: - success &= grade_single_task( - task, - tester, - course_config, - public_course_driver, - private_course_driver, - user_id, - send_time, - inspect=inspect - ) - return success - - -def _get_changes_using_real_folders( - course_config: CourseConfig, - current_folder: str, - old_hash: str, - current_repo_gitlab_path: str, - gitlab_token: str, -) -> list[str]: - gitlab_url_with_token = course_config.gitlab_url.replace('://', f'://gitlab-ci-token:{gitlab_token}@') - - with tempfile.TemporaryDirectory() as public_dir: - with tempfile.TemporaryDirectory() as old_dir: - # download public repo, minimal - print_info(f'Cloning {course_config.public_repo} of {course_config.default_branch}...', color='white') - # print_info('git clone:', color='grey') - subprocess.run( - f'git clone --depth=1 --branch={course_config.default_branch} ' - f'{course_config.gitlab_url}/{course_config.public_repo}.git {public_dir}', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - ) - # print_info(r.stdout, color='grey') - # print_info(f'ls -lah {public_dir}', color='grey') - subprocess.run( - f'ls -lah {public_dir}', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - ) - # print_info(r.stdout, color='grey') - - # download old repo by hash, minimal - print_info(f'Cloning {current_repo_gitlab_path} to get {old_hash}...', color='white') - # print_info('git clone:', color='grey') - subprocess.run( - f'git clone --depth=1 --branch={course_config.default_branch} ' - f'{gitlab_url_with_token}/{current_repo_gitlab_path}.git {old_dir}', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - ) - # print_info(r.stdout, color='grey') - # print_info(f'git fetch origin {old_hash} && git checkout FETCH_HEAD:', color='grey') - subprocess.run( - f'git fetch origin {old_hash} && git checkout FETCH_HEAD', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - cwd=old_dir, - ) - # print_info(r.stdout, color='grey') - # print_info(f'ls -lah {old_dir}', color='grey') - subprocess.run( - f'ls -lah {old_dir}', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - ) - # print_info(r.stdout, color='grey') - - # get diff - print_info('Detected changes (filtering by public repo and git tracked files)', color='white') - print_info('and filtering by git tracked files', color='white') - changes = get_folders_diff_except_public( - Path(public_dir), - Path(old_dir), - Path(current_folder), - exclude_patterns=['.git'], - ) - # filter by tracked by git - git_tracked_files = get_tracked_files_list(Path(current_folder)) - changes = [f for f in changes if f in git_tracked_files] - - print_info('\nchanged_files:', color='white') - for change in changes: - print_info(f' ->> {change}', color='white') - - return changes - - -def grade_on_ci( - course_config: CourseConfig, - course_schedule: CourseSchedule, - public_course_driver: CourseDriver, - private_course_driver: CourseDriver, - tester: Tester, - *, - test_full_groups: bool = False, -) -> None: - solution_root = os.environ['CI_PROJECT_DIR'] - - current_time = datetime.now() - commit_time = datetime.fromisoformat(os.environ['CI_COMMIT_TIMESTAMP']) - # TODO: check datetime format - pipeline_created_time: datetime | None = ( - datetime.strptime(os.environ['CI_PIPELINE_CREATED_AT'], '%Y-%m-%dT%H:%M:%SZ') - if 'CI_PIPELINE_CREATED_AT' in os.environ else None - ) - job_start_time: datetime | None = ( - datetime.strptime(os.environ['CI_JOB_STARTED_AT'], '%Y-%m-%dT%H:%M:%SZ') - if 'CI_JOB_STARTED_AT' in os.environ else None - ) - send_time = pipeline_created_time or current_time - - print_info(f'current_time {current_time}', color='grey') - print_info(f'-> commit_time {commit_time}', color='grey') - print_info(f'-> pipeline_created_time {pipeline_created_time}', color='grey') - print_info(f'-> job_start_time {job_start_time}', color='grey') - print_info(f'= using send_time {send_time}', color='grey') - - author_name = os.environ.get('CI_COMMIT_AUTHOR', None) - current_commit_sha = os.environ.get('CI_COMMIT_SHA', None) - prev_commit_sha = os.environ.get('CI_COMMIT_BEFORE_SHA', None) - print_info(f'CI_COMMIT_AUTHOR {author_name}', color='grey') - print_info(f'CI_COMMIT_SHA {current_commit_sha}', color='grey') - print_info(f'CI_COMMIT_BEFORE_SHA {prev_commit_sha}', color='grey') - - gitlab_job_token = os.environ.get('CI_JOB_TOKEN') or '' - - print_info('Loading changes...', color='orange') - # Get changes using real files difference - try: - current_repo_gitlab_path = os.environ['CI_PROJECT_PATH'] - changes = _get_changes_using_real_folders( - course_config, - current_folder=solution_root, - old_hash=prev_commit_sha or course_config.default_branch, - current_repo_gitlab_path=current_repo_gitlab_path, - gitlab_token=gitlab_job_token, - ) - except Exception as e: - print_info('Ooops... Loading changes failed', color='red') - print_info(e) - - print_info('Trying with git diff instead\n') - # Get changed files via git - try: - changes = _get_git_changes( - solution_root, - course_config.gitlab_url + '/' + course_config.public_repo, - author_name=author_name, - current_commit_sha=current_commit_sha, - prev_commit_sha=prev_commit_sha, - ) - except GitException as e: - print_info('Ooops... Loading changes failed', color='red') - print_info(e) - sys.exit(1) - - # Process Changed files to Changed tasks - tasks: list[Task] = [] - groups: list[Group] = [] - for changed_file in changes: - changed_task_dir = public_course_driver.get_task_dir_name(changed_file) - if changed_task_dir is None or changed_task_dir not in course_schedule.tasks: - continue - - # if changed_group_dir == '...': # if task name is too long it's hidden - # changed_group_dir = course_schedule.tasks[changed_task_dir].group.name - - task = course_schedule.tasks[changed_task_dir] - # group = course.groups[changed_group_dir] - group = task.group - - # filter tasks and groups - if group.is_started: - if task not in tasks: - tasks.append(task) - if group not in groups: - groups.append(group) - - # adding all tasks from group to testing - if test_full_groups: - print_info('Testing all tasks in changed groups...', color='orange') - print_info(f'Changed groups: {[i.name for i in groups]}\n') - - tasks = [] - for group in groups: - tasks.extend(group.tasks) - else: - print_info('Testing only changed tasks...', color='orange') - print_info(f'Changed tasks: {[i.full_name for i in tasks]}\n') - - # Grade itself - user_id = int(os.environ['GITLAB_USER_ID']) - if tasks: - success = grade_tasks( - tasks, - tester, - course_config, - public_course_driver, - private_course_driver, - user_id=user_id, - send_time=send_time, - ) - else: - print_info('No changed tasks found :(', color='blue') - print_info('Hint: commit some changes in tasks you are interested in') - success = False - - if not success: - sys.exit(1) diff --git a/checker/actions/grade_mr.py b/checker/actions/grade_mr.py deleted file mode 100644 index a8de39b..0000000 --- a/checker/actions/grade_mr.py +++ /dev/null @@ -1,464 +0,0 @@ -from __future__ import annotations - -import re -from datetime import datetime - -import gitlab.v4.objects - -from ..course import CourseConfig, CourseDriver -from ..course.schedule import CourseSchedule -from ..utils.glab import GitlabConnection -from ..utils.manytask import PushFailedError, push_report -from ..utils.print import print_header_info, print_info - - -BANNED_FILE_EXTENSIONS = {'csv', 'json', 'txt', 'db'} -ALLOWED_FILES = ['requirements.txt', 'runtime.txt'] -REVIEWED_TAG = 'reviewed' -CHECKLIST_TAG = 'checklist' -BASIC_CHECKLIST_BANNED_TAGS = {CHECKLIST_TAG, REVIEWED_TAG} - - -def grade_students_mrs_to_master( - course_config: CourseConfig, - course_schedule: CourseSchedule, - private_course_driver: CourseDriver, - gitlab_connection: GitlabConnection, - *, - dry_run: bool = False, -) -> None: - students_projects = gitlab_connection.get_students_projects(course_config.students_group) - # for project in students_projects: - # full_project: gitlab.v4.objects.Project = GITLAB.projects.get(project.id) - # username = full_project.name - usernames = [project.name for project in students_projects] - - _grade_mrs( - course_config, - course_schedule, - private_course_driver, - gitlab_connection, - usernames, - dry_run=dry_run, - ) - - -def grade_student_mrs( - course_config: CourseConfig, - course_schedule: CourseSchedule, - course_driver: CourseDriver, - gitlab_connection: GitlabConnection, - username: str, - *, - dry_run: bool = False, -) -> None: - _grade_mrs( - course_config, - course_schedule, - course_driver, - gitlab_connection, - [username], - dry_run=dry_run, - ) - - -def _grade_mrs( - course_config: CourseConfig, - course_schedule: CourseSchedule, - course_driver: CourseDriver, - gitlab_connection: GitlabConnection, - usernames: list[str], - *, - dry_run: bool = False, -) -> None: - """ - Grade all users from list; to be used with individual and massive MRs check - """ - - # print users to check - print_info('Users:', usernames, color='orange') - - # get open mrs to filter all users - students_group = gitlab_connection.get_group(course_config.students_group) - students_mrs: list[gitlab.v4.objects.GroupMergeRequest] = (students_group.mergerequests - .list(get_all=True)) # type: ignore - students_mrs_project_names: set[str] = set() - for mr in students_mrs: - students_mrs_project_names.update(mr.web_url.split('/')) - usernames = [i for i in usernames if i in students_mrs_project_names] - print_info('Users with MRs:', usernames, color='orange') - - if len(usernames) == 0: - print_info('Could not find MRs', color='orange') - return - - # get tasks we need to check - tag_to_folder = _get_tag_to_folder_dict(course_schedule, course_driver) - print_info('Tags and folders to check:', tag_to_folder, color='orange') - - # get tutors - tutors = gitlab_connection.get_all_tutors(course_config.course_group) - print_info('Tutors:', [f'<{t.username} {t.name}>' for t in tutors], color='orange') - id_to_tutor = {t.id: t for t in tutors} - - # get current user - for username in usernames: - try: - user = gitlab_connection.get_user_by_username(username) - except Exception: - print_info(f'Can not find user with username={username}>', color='orange') - continue - - user_id = user.id - print_header_info(f'Current user: <{user.username} {user.name}>') - - # get current user's project - project = gitlab_connection.get_project_from_group(course_config.students_group, user.username) - full_project = gitlab_connection.gitlab.projects.get(project.id) - print_info(f'project {project.path_with_namespace}: {project.web_url}') - - opened_master_mrs = full_project.mergerequests.list( - get_all=True, state='opened', target_branch=course_config.default_branch, - ) - merged_master_mrs = full_project.mergerequests.list( - get_all=True, state='merged', target_branch=course_config.default_branch, - ) - closed_master_mrs = full_project.mergerequests.list( - get_all=True, state='closed', target_branch=course_config.default_branch, - ) - - if not opened_master_mrs and not merged_master_mrs and not closed_master_mrs: - print_info('no open mrs; skip it') - continue - - print_info( - f'opened_master_mrs {len(opened_master_mrs)} \t ' - f'merged_master_mrs {len(merged_master_mrs)} \t ' - f'closed_master_mrs {len(closed_master_mrs)}', - color='grey', - ) - - # Check basic checklist - print_info('Lookup checklist') - for mr in opened_master_mrs: # type: ignore - print_info(f'Checking MR#{mr.iid} <{mr.title}> ({mr.state})...', color='white') - print_info(mr.web_url, color='white') - if mr.title.lower().startswith('wip:') or mr.title.lower().startswith('draft:'): - print_info('Draft MR - skip it.') - continue - _single_mr_check_basic_checklist( - mr, tag_to_folder, dry_run=dry_run, - ) - - # Check score - print_info('Lookup score to set') - for mr in [*opened_master_mrs, *merged_master_mrs, *closed_master_mrs]: - print_info(f'Checking MR#{mr.iid} <{mr.title}> ({mr.state})...', color='white') - print_info(mr.web_url, color='white') - if mr.title.lower().startswith('wip:') or mr.title.lower().startswith('draft:'): - print_info('Draft MR - skip it.') - continue - _singe_mr_grade_score_new( - course_config, course_schedule, mr, tag_to_folder, id_to_tutor, user_id, dry_run=dry_run, - ) - - -def _get_tag_to_folder_dict(course_schedule: CourseSchedule, course_driver: CourseDriver) -> dict[str, str]: - tag_to_folder: dict[str, str] = {} - for task in course_schedule.get_tasks(enabled=True): - if task.review: - source_dir = course_driver.get_task_dir(task) - print_info(f'task "{task.name}" review=true with source_dir={source_dir}', color='grey') - if source_dir is None: - print_info(' source_dir is None, skip it', color='grey') - continue - tag_to_folder[task.name] = str(source_dir.relative_to(course_driver.root_dir)) - - return tag_to_folder - - -def _singe_mr_grade_score_new( - course_config: CourseConfig, - course_schedule: CourseSchedule, - mr: gitlab.v4.objects.GroupMergeRequest, - tag_to_folder: dict[str, str], - tutors_dict: dict[int, gitlab.v4.objects.GroupMember], - user_id: int, - *, - dry_run: bool = False, -) -> None: - """ - Get single MR, find or create score discussion and set a score from it - Looking for comment by tutor under '#### MR score discussion:' - """ - print_info('labels', mr.labels, color='grey') - print_info('source_branch', mr.source_branch, color='grey') - - # Search for tags - tag = None - for search_tag in tag_to_folder: - if search_tag in mr.labels: - tag = search_tag - break - - if not tag: - print_info(f'Can not find any of {tag_to_folder.keys()} in MR tags ({mr.labels}). Skip it') - return - - # get actual task - task_name = tag_to_folder[tag].split('/')[-1] - print_info('task_name', task_name, color='grey') - max_task_score = course_schedule.tasks[task_name].max_score - print_info('max_task_score', max_task_score, color='grey') - - # Try to find score discussion - mr_score_discussion = None - for discussion in mr.discussions.list(get_all=True): - first_note_id = discussion.attributes['notes'][0]['id'] - first_note = discussion.notes.get(first_note_id) - - if '#### MR score discussion:' in first_note.body: - mr_score_discussion = discussion - break - if not mr_score_discussion: - mr_score_discussion = mr.discussions.create({ - 'body': '\n '.join([ - '#### MR score discussion:', - '', - 'After review an examiner will put your score in a response to this discussion', - f'than he/she will set `{REVIEWED_TAG}` label.', - '', - 'If the score is not registered in the table, you need to restart the `grade-mr` job', - '(last score accounted)' - ]) - }) - try: - mr_score_discussion.save() - except Exception: - print_info('ERROR with saving mr_score_discussion', color='orange') - mr.save() - - if REVIEWED_TAG not in mr.labels: - print_info(f'No `{REVIEWED_TAG}` tag. Skip it') - return - - # get scores - notes = [ - mr_score_discussion.notes.get(note['id']) - for note in mr_score_discussion.attributes['notes'] - ] - notes = notes[1:] - - if not notes: - print_info('No replays on discussion note. Skip it.', color='grey') - return - - if 'Score' in notes[-1].body and 'set' in notes[-1].body: - print_info('Score already set. Skip it.', color='grey') - return - - score_notes: list[tuple[int, gitlab.v4.objects.ProjectMergeRequestDiscussionNote]] = [] - for note in notes: - if note.author['id'] not in tutors_dict: - continue - - try: - note_score = int(note.body) - except Exception: - continue - - if note.updated_at != note.created_at: - print_info('Note was edited. Please, create a new one! Skip it.', color='grey') - note.body = note.body + '\n ' + '*(Note was edited. Please, create a new one! Skip it.)*' - note.save() - continue - score_notes.append((note_score, note)) - - if not score_notes: - print_info('No score replays on discussion note. Skip it.', color='grey') - return - - # set score from last score - last_score, last_note = score_notes[-1] - - try: - if not course_config.manytask_token: - raise PushFailedError('Unable to find manytask token') - - username, score, _, _, _ = push_report( - course_config.manytask_url, - course_config.manytask_token, - task_name, - user_id, - last_score, - check_deadline=False, - use_demand_multiplier=False, - ) - print_info( - f'Set score for @{username}: {score}', - color='blue', - ) - # print_info(f'Submit at {commit_time} (deadline is calculated relative to)', color='grey') - except PushFailedError: - raise - - mr_score_discussion.notes.create({'body': f'Score {last_score} set'}) - try: - mr_score_discussion.save() - except Exception: - print_info('ERROR with saving mr_score_discussion', color='orange') - print_info(f'Score {last_score} set', color='grey') - - -def _single_mr_check_basic_checklist( - mr: gitlab.v4.objects.GroupMergeRequest, - tag_to_folder: dict[str, str], - *, - dry_run: bool = False, -) -> None: - print_info('pipelines', [i.status for i in mr.pipelines.list(get_all=True)], color='grey') - print_info('labels', mr.labels, color='grey') - print_info('source_branch', mr.source_branch, color='grey') - - # Search for tags - tag = None - for search_tag in tag_to_folder: - if search_tag in mr.labels: - tag = search_tag - break - - if not tag: - print_info(f'Can not find any of {tag_to_folder.keys()} in MR tags ({mr.labels}). Skip it') - return - - for banned_tag in BASIC_CHECKLIST_BANNED_TAGS: - if banned_tag in mr.labels: - print_info(f'Have `{banned_tag}` tag. Skip it') - return - - # Check status - changes = mr.changes() - is_conflict = mr.has_conflicts or mr.merge_status == 'cannot_be_merged' - have_no_conflicts = not is_conflict - - # Check pipelines - if not changes or not changes['head_pipeline']: - head_pipeline_status = 'failed' - else: - head_pipeline_status = changes['head_pipeline']['status'] - pipeline_passed = head_pipeline_status == 'success' - - # changes files - file_changed: set[str] = set() - for change in changes['changes']: - file_changed.update({change['old_path'], change['new_path']}) - print_info('file_changed', file_changed, color='grey') - - # Check single folder or not - folder_prefix = tag_to_folder[tag] - print_info('folder_prefix', folder_prefix, color='grey') - is_single_folder = True - wrong_folder = None - for file in file_changed: - if not file.startswith(folder_prefix): - # print_info(f'file {file} not startswith {folder_prefix}', color='grey') - is_single_folder = False - wrong_folder = file - break - - # Check extensions - have_no_additional_files = True - wrong_file = None - for file in file_changed: - if file.split('.')[-1] in BANNED_FILE_EXTENSIONS: - print_info(f' check {file}', color='grey') - if any(file.endswith(allowed_file) for allowed_file in ALLOWED_FILES): - continue - have_no_additional_files = False - wrong_file = file - break - - # Get Mr checks discussions - mr_checklist_discussion = None - mr_checklist_note = None - for discussion in mr.discussions.list(get_all=True): - first_note_id = discussion.attributes['notes'][0]['id'] - note = discussion.notes.get(first_note_id) - - if '#### MR checklist (basic checks):' in note.body or '[MR check in progress...]' in note.body: - mr_checklist_discussion = discussion - mr_checklist_note = note - break - if not mr_checklist_discussion: - mr_checklist_discussion = mr.discussions.create({'body': '[MR check in progress...]'}) - first_note_id = mr_checklist_discussion.attributes['notes'][0]['id'] - mr_checklist_note = mr_checklist_discussion.notes.get(first_note_id) - - assert mr_checklist_discussion - assert mr_checklist_note - - # Generate note - checks_ok = is_single_folder and have_no_additional_files and pipeline_passed and have_no_conflicts - try: - _first_try_correct_search = re.search(r'first try correct: (False|True)', mr_checklist_note.body) - assert _first_try_correct_search - _first_try_correct_str = _first_try_correct_search.group(0) - _is_first_try_correct_search = re.search(r'(False|True)', _first_try_correct_str) - assert _is_first_try_correct_search - is_first_try_correct = _is_first_try_correct_search.group(0) == 'True' - except (ValueError, AttributeError, AssertionError): - is_first_try_correct = checks_ok - try: - _updates_num_search = re.search(r'checks num: (\d+)', mr_checklist_note.body) - assert _updates_num_search - _updates_num_str = _updates_num_search.group(0) - current_updates_num_search = re.search(r'(\d+)', _updates_num_str) - assert current_updates_num_search - current_updates_num = int(current_updates_num_search.group(0)) - except (ValueError, AttributeError, AssertionError): - current_updates_num = 0 - now_str = str(datetime.now()) - checklist_note_msg = [ - '#### MR checklist (basic checks):', - '', - 'Hi! I\'m course bot; I\'m here to check your merge request.', - 'Below you will find a checklist to verify your MR.', - '', - f'_first try correct: {is_first_try_correct}_', - f'_checks num: {current_updates_num + 1}_', - f'_last check time: {now_str}_', - '', - f'- [x] `{tag}` tag exists', - f'- [{"x" if is_single_folder else " "}] all changes in single folder ' - f'{f"(found {wrong_folder})" if not is_single_folder else ""}', - f'- [{"x" if have_no_additional_files else " "}] no additional files ' - f'{f"(found {wrong_file})" if not have_no_additional_files else ""}', - f'- [{"x" if pipeline_passed else " "}] pipeline passed (current status: {head_pipeline_status})', - f'- [{"x" if have_no_conflicts else " "}] have no merge conflicts', - '', - ] - - # TODO: fix it - if tag == 'cinemabot': - have_bot_tag = '@' in mr.description - checks_ok = checks_ok and have_bot_tag - checklist_note_msg.insert(-1, f'- [{"x" if have_bot_tag else " "}] placed @bot_tag in description') - - # Update MR check discussion - if checks_ok: - checklist_note_msg.append('💪 **Ok.** Basic checks have been passed!') - checklist_note_msg.append('Please, wait for a examiner to check it manually.') - mr_checklist_discussion.resolved = True - mr.labels = list({*mr.labels, 'checklist'}) - else: - checklist_note_msg.append('🔥 **Oops!** There are some errors;') - checklist_note_msg.append('Please, correct it.') - mr_checklist_discussion.resolved = False - mr.labels = list({*mr.labels, 'fix it'}) - # print_info(' \n '.join(checklist_note_msg)) - mr_checklist_note.body = ' \n'.join(checklist_note_msg) - mr_checklist_note.save() - mr_checklist_discussion.save() - mr.save() - - print_info('Checklist updated') diff --git a/checker/configs/__init__.py b/checker/configs/__init__.py new file mode 100644 index 0000000..4e2d54d --- /dev/null +++ b/checker/configs/__init__.py @@ -0,0 +1,4 @@ +from .checker import CheckerTestingConfig # noqa: F401 +from .checker import CheckerConfig, CheckerExportConfig, CheckerStructureConfig, PipelineStageConfig # noqa: F401 +from .deadlines import DeadlinesConfig, DeadlinesGroupConfig, DeadlinesSettingsConfig, DeadlinesTaskConfig # noqa: F401 +from .task import TaskConfig # noqa: F401 diff --git a/checker/configs/checker.py b/checker/configs/checker.py new file mode 100644 index 0000000..ce3d8c5 --- /dev/null +++ b/checker/configs/checker.py @@ -0,0 +1,111 @@ +from __future__ import annotations + +from enum import Enum +from typing import Optional, Union + +from pydantic import AnyUrl, Field, RootModel, ValidationError, field_validator + +from .utils import CustomBaseModel, YamlLoaderMixin + + +# Note: old Union style in definition for backward compatibility +TParamType = Union[bool, int, float, str, list[Union[int, float, str, None]], None] +TTemplate = Union[str, list[Union[TParamType, str]], dict[str, Union[TParamType, str]]] + + +class CheckerStructureConfig(CustomBaseModel): + # Note: use Optional/Union[...] instead of ... | None as pydantic does not support | in older python versions + ignore_patterns: Optional[list[str]] = None + private_patterns: Optional[list[str]] = None + public_patterns: Optional[list[str]] = None + # TODO: add check "**" is not allowed + + +class CheckerParametersConfig(RootModel[dict[str, TParamType]]): + root: dict[str, TParamType] + + def __getitem__(self, item: str) -> TParamType: + return self.root[item] + + def __contains__(self, item: str) -> bool: + return item in self.root + + @property + def __dict__(self) -> dict[str, TParamType]: + return self.root + + @__dict__.setter + def __dict__(self, value: dict[str, TParamType]) -> None: + self.root = value + + +class CheckerExportConfig(CustomBaseModel): + class TemplateType(Enum): + SEARCH = "search" + CREATE = "create" + + destination: AnyUrl + default_branch: str = "main" + commit_message: str = "chore(auto): export new tasks" + templates: TemplateType = TemplateType.SEARCH + + +class PipelineStageConfig(CustomBaseModel): + class FailType(Enum): + FAST = "fast" + AFTER_ALL = "after_all" + NEVER = "never" + + name: str + run: str + + # Note: use Optional/Union[...] instead of ... | None as pydantic does not support | in older python versions + args: dict[str, Union[TParamType, TTemplate]] = Field(default_factory=dict) + + run_if: Union[bool, TTemplate, None] = None + fail: FailType = FailType.FAST + + # save pipline stage result to context under this key + register_output: Optional[str] = None + + +class CheckerTestingConfig(CustomBaseModel): + class ChangesDetectionType(Enum): + BRANCH_NAME = "branch_name" + COMMIT_MESSAGE = "commit_message" + LAST_COMMIT_CHANGES = "last_commit_changes" + FILES_CHANGED = "files_changed" + + changes_detection: ChangesDetectionType = ChangesDetectionType.LAST_COMMIT_CHANGES + + search_plugins: list[str] = Field(default_factory=list) + + global_pipeline: list[PipelineStageConfig] = Field(default_factory=list) + tasks_pipeline: list[PipelineStageConfig] = Field(default_factory=list) + report_pipeline: list[PipelineStageConfig] = Field(default_factory=list) + + +class CheckerConfig(CustomBaseModel, YamlLoaderMixin["CheckerConfig"]): + """ + Checker configuration. + :ivar version: config version + :ivar default_parameters: default parameters for task pipeline + :ivar structure: describe the structure of the repo - private/public and allowed for change files + :ivar export: describe export (publishing to public repo) + :ivar manytask: describe connection to manytask + :ivar testing: describe testing/checking - pipeline, isolation etc + """ + + version: int + + default_parameters: CheckerParametersConfig = Field(default_factory=dict) + + structure: CheckerStructureConfig + export: CheckerExportConfig + testing: CheckerTestingConfig + + @field_validator("version") + @classmethod + def check_version(cls, v: int) -> None: + if v != 1: + raise ValidationError(f"Only version 1 is supported for {cls.__name__}") diff --git a/checker/configs/deadlines.py b/checker/configs/deadlines.py new file mode 100644 index 0000000..4cda0fa --- /dev/null +++ b/checker/configs/deadlines.py @@ -0,0 +1,181 @@ +from __future__ import annotations + +import sys +from datetime import datetime, timedelta +from enum import Enum +from typing import Optional, Union + + +if sys.version_info < (3, 8): + from pytz import ZoneInfoNotFoundError as ZoneInfoNotFoundError, timezone as ZoneInfo +else: + from zoneinfo import ZoneInfo, ZoneInfoNotFoundError + +from pydantic import AnyUrl, Field, field_validator, model_validator + +from .utils import CustomBaseModel, YamlLoaderMixin + + +class DeadlinesType(Enum): + HARD = "hard" + INTERPOLATE = "interpolate" + + +class DeadlinesSettingsConfig(CustomBaseModel): + timezone: str + + # Note: use Optional/Union[...] instead of ... | None as pydantic does not support | in older python versions + deadlines: DeadlinesType = DeadlinesType.HARD + max_submissions: Optional[int] = None + submission_penalty: float = 0 + + task_url: Optional[AnyUrl] = None # $GROUP_NAME $TASK_NAME vars are available + + @field_validator("task_url") + @classmethod + def check_task_url(cls, data: AnyUrl | None) -> AnyUrl | None: + if data is not None and data.scheme not in ("http", "https"): + raise ValueError("task_url should be http or https") + return data + + @field_validator("max_submissions") + @classmethod + def check_max_submissions(cls, data: int | None) -> int | None: + if data is not None and data <= 0: + raise ValueError("max_submissions should be positive") + return data + + @field_validator("timezone") + @classmethod + def check_valid_timezone(cls, timezone: str) -> str: + try: + ZoneInfo(timezone) + except ZoneInfoNotFoundError as e: + raise ValueError(str(e)) + return timezone + + +class DeadlinesTaskConfig(CustomBaseModel): + task: str + + enabled: bool = True + + score: int + bonus: int = 0 + special: int = 0 + + # Note: use Optional/Union[...] instead of ... | None as pydantic does not support | in older python versions + url: Optional[AnyUrl] = None + + @property + def name(self) -> str: + return self.task + + +class DeadlinesGroupConfig(CustomBaseModel): + group: str + + enabled: bool = True + + # Note: use Optional/Union[...] instead of ... | None as pydantic does not support | in older python versions + start: datetime + steps: dict[float, Union[datetime, timedelta]] = Field(default_factory=dict) + end: Union[datetime, timedelta, None] = None + + tasks: list[DeadlinesTaskConfig] = Field(default_factory=list) + + @property + def name(self) -> str: + return self.group + + @model_validator(mode="after") + def check_dates(self) -> "DeadlinesGroupConfig": + # check end + if isinstance(self.end, timedelta) and self.end < timedelta(): + raise ValueError(f"end timedelta <{self.end}> should be positive") + if isinstance(self.end, datetime) and self.end < self.start: + raise ValueError(f"end datetime <{self.end}> should be after the start <{self.start}>") + + # check steps + last_step_date_or_delta: datetime | timedelta = self.start + for _, date_or_delta in self.steps.items(): + step_date = self.start + date_or_delta if isinstance(date_or_delta, timedelta) else date_or_delta + last_step_date = ( + self.start + last_step_date_or_delta + if isinstance(last_step_date_or_delta, timedelta) + else last_step_date_or_delta + ) + + if isinstance(date_or_delta, timedelta) and date_or_delta < timedelta(): + raise ValueError(f"step timedelta <{date_or_delta}> should be positive") + if isinstance(date_or_delta, datetime) and date_or_delta <= self.start: + raise ValueError(f"step datetime <{date_or_delta}> should be after the start {self.start}") + + if step_date <= last_step_date: + raise ValueError( + f"step datetime/timedelta <{date_or_delta}> " + f"should be after the last step <{last_step_date_or_delta}>" + ) + last_step_date_or_delta = date_or_delta + + return self + + +class DeadlinesConfig(CustomBaseModel, YamlLoaderMixin["DeadlinesConfig"]): + """Deadlines configuration.""" + + version: int + + settings: DeadlinesSettingsConfig + schedule: list[DeadlinesGroupConfig] + + def get_groups( + self, + enabled: bool | None = None, + ) -> list[DeadlinesGroupConfig]: + groups = [group for group in self.schedule] + + if enabled is not None: + groups = [group for group in groups if group.enabled == enabled] + + # TODO: check time + + return groups + + def get_tasks( + self, + enabled: bool | None = None, + ) -> list[DeadlinesTaskConfig]: + tasks = [task for group in self.get_groups(enabled=enabled) for task in group.tasks] + + if enabled is not None: + tasks = [task for task in tasks if task.enabled == enabled] + + # TODO: check time + + return tasks + + @field_validator("version") + @classmethod + def check_version(cls, data: int) -> int: + if data != 1: + raise ValueError(f"Only version 1 is supported for {cls.__name__}") + return data + + @field_validator("schedule") + @classmethod + def check_group_names_unique(cls, data: list[DeadlinesGroupConfig]) -> list[DeadlinesGroupConfig]: + groups = [group.name for group in data] + duplicates = [name for name in groups if groups.count(name) > 1] + if duplicates: + raise ValueError(f"Group names should be unique, duplicates: {duplicates}") + return data + + @field_validator("schedule") + @classmethod + def check_task_names_unique(cls, data: list[DeadlinesGroupConfig]) -> list[DeadlinesGroupConfig]: + tasks_names = [task.name for group in data for task in group.tasks] + duplicates = [name for name in tasks_names if tasks_names.count(name) > 1] + if duplicates: + raise ValueError(f"Task names should be unique, duplicates: {duplicates}") + return data diff --git a/checker/configs/task.py b/checker/configs/task.py new file mode 100644 index 0000000..85eff5b --- /dev/null +++ b/checker/configs/task.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from typing import Optional + +from .checker import CheckerParametersConfig, CheckerStructureConfig, PipelineStageConfig +from .utils import CustomBaseModel, YamlLoaderMixin + + +class TaskConfig(CustomBaseModel, YamlLoaderMixin["TaskConfig"]): + """Task configuration file.""" + + version: int # if config exists, version is always present + + # Note: use Optional[...] instead of ... | None as pydantic does not support | in older python versions + structure: Optional[CheckerStructureConfig] = None + parameters: Optional[CheckerParametersConfig] = None + task_pipeline: Optional[list[PipelineStageConfig]] = None + report_pipeline: Optional[list[PipelineStageConfig]] = None diff --git a/checker/configs/utils.py b/checker/configs/utils.py new file mode 100644 index 0000000..d8d313b --- /dev/null +++ b/checker/configs/utils.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from pathlib import Path +from typing import Any, Generic, TypeVar + +import pydantic +import yaml + +from ..exceptions import BadConfig + + +class CustomBaseModel(pydantic.BaseModel): + model_config = pydantic.ConfigDict(extra="forbid", validate_default=True) + + +T = TypeVar("T", bound=pydantic.BaseModel) + + +class YamlLoaderMixin(Generic[T]): + @classmethod + def from_yaml(cls: type[T], path: Path) -> T: # type: ignore[misc] + try: + with path.open() as f: + return cls(**yaml.safe_load(f)) + except FileNotFoundError: + raise BadConfig(f"File {path} not found") + except TypeError as e: + raise BadConfig(f"Config YAML error:\n{e}") + except yaml.YAMLError as e: + raise BadConfig(f"Config YAML error:\n{e}") + except pydantic.ValidationError as e: + raise BadConfig(f"Config Validation error:\n{e}") + + def to_yaml(self: T, path: Path) -> None: # type: ignore[misc] + with path.open("w") as f: + yaml.dump(self.model_dump(), f) + + @classmethod + def get_json_schema(cls: type[T]) -> dict[str, Any]: # type: ignore[misc] + return cls.model_json_schema() diff --git a/checker/course.py b/checker/course.py new file mode 100644 index 0000000..d12ab33 --- /dev/null +++ b/checker/course.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +from collections.abc import Generator +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +from .configs import DeadlinesConfig, TaskConfig +from .exceptions import BadConfig + + +@dataclass +class FileSystemTask: + name: str + relative_path: str + config: TaskConfig | None = None + + +@dataclass +class FileSystemGroup: + name: str + relative_path: str + tasks: list[FileSystemTask] + + +class Course: + """ + Class operates deadlines (filter, search etc), timezones and mapping tasks and groups to file system. + Only operates with tasks and groups existing in file system. + """ + + TASK_CONFIG_NAME = ".task.yml" + + def __init__( + self, + deadlines: DeadlinesConfig, + repository_root: Path, + reference_root: Path | None = None, + ): + self.deadlines = deadlines + + self.repository_root = repository_root + self.reference_root = reference_root or repository_root + + self.potential_groups = {group.name: group for group in self._search_potential_groups(self.repository_root)} + self.potential_tasks = {task.name: task for group in self.potential_groups.values() for task in group.tasks} + + def validate(self) -> None: + # check all groups and tasks mentioned in deadlines exists + deadlines_groups = self.deadlines.get_groups(enabled=True) + for deadline_group in deadlines_groups: + if deadline_group.name not in self.potential_groups: + raise BadConfig(f"Group {deadline_group.name} not found in repository") + + deadlines_tasks = self.deadlines.get_tasks(enabled=True) + for deadlines_task in deadlines_tasks: + if deadlines_task.name not in self.potential_tasks: + raise BadConfig(f"Task {deadlines_task.name} of not found in repository") + + def get_groups( + self, + enabled: bool | None = None, + ) -> list[FileSystemGroup]: + return [ + self.potential_groups[deadline_group.name] + for deadline_group in self.deadlines.get_groups(enabled=enabled) + if deadline_group.name in self.potential_groups + ] + + def get_tasks( + self, + enabled: bool | None = None, + ) -> list[FileSystemTask]: + return [ + self.potential_tasks[deadline_task.name] + for deadline_task in self.deadlines.get_tasks(enabled=enabled) + if deadline_task.name in self.potential_tasks + ] + + @staticmethod + def _search_potential_groups(root: Path) -> list[FileSystemGroup]: + # search in the format $GROUP_NAME/$TASK_NAME starting root + potential_groups = [] + + for group_path in root.iterdir(): + if not group_path.is_dir(): + continue + + potential_tasks = [] + + for task_path in group_path.iterdir(): + if not task_path.is_dir(): + continue + + task_config_path = task_path / Course.TASK_CONFIG_NAME + task_config: TaskConfig | None = None + if task_config_path.exists(): + try: + task_config = TaskConfig.from_yaml(task_config_path) + except BadConfig as e: + raise BadConfig(f"Task config {task_config_path} is invalid:\n{e}") + + potential_tasks.append( + FileSystemTask( + name=task_path.name, + relative_path=str(task_path.relative_to(root)), + config=task_config, + ) + ) + + potential_groups.append( + FileSystemGroup( + name=group_path.name, + relative_path=str(group_path.relative_to(root)), + tasks=potential_tasks, + ) + ) + return potential_groups + + @staticmethod + def _search_for_tasks_by_configs( + root: Path, + ) -> Generator[FileSystemTask, Any, None]: + for task_config_path in root.glob(f"**/{Course.TASK_CONFIG_NAME}"): + task_config = TaskConfig.from_yaml(task_config_path) + yield FileSystemTask( + name=task_config_path.parent.name, + relative_path=str(task_config_path.parent.relative_to(root)), + config=task_config, + ) diff --git a/checker/course/__init__.py b/checker/course/__init__.py deleted file mode 100644 index 4d31b48..0000000 --- a/checker/course/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .config import CourseConfig # noqa: F401 -from .driver import CourseDriver # noqa: F401 -from .schedule import CourseSchedule, Group, Task # noqa: F401 diff --git a/checker/course/config.py b/checker/course/config.py deleted file mode 100644 index ef192f6..0000000 --- a/checker/course/config.py +++ /dev/null @@ -1,94 +0,0 @@ -""" -All course configurations -Include tests, layout, manytask url, gitlab urls, etc. settings -""" -from __future__ import annotations - -import os -from dataclasses import InitVar, dataclass -from pathlib import Path - -import yaml - -from ..exceptions import BadConfig -from ..utils.print import print_info - - -@dataclass -class CourseConfig: - # main course settings - name: str - - # course - deadlines: str - - # checker - system: str - templates: str # create or explicit - - # manytask - manytask_url: str - - # gitlab - course_group: str - public_repo: str - students_group: str - lectures_repo: str | None = None - default_branch: str = 'main' - gitlab_url: str = 'https://gitlab.manytask.org' - gitlab_service_username: str = 'manytask' - gitlab_service_email: str = 'no-reply@gitlab.manytask.org' - gitlab_service_name: str = 'Manytask Bot' - - # course default - second_deadline_max: float = 0.5 - low_demand_bonus_bound: float = 1.0 - max_low_demand_bonus: float = 1.0 - - # checker default - layout: str = 'groups' - executor: str = 'sandbox' - tester_path: str | None = None - - # info - links: dict[str, str] | None = None - - # credentials - manytask_token: str | None = None - gitlab_service_token: str | None = None - gitlab_api_token: str | None = None - - manytask_token_id: InitVar[str] = 'TESTER_TOKEN' - gitlab_service_token_id: InitVar[str] = 'GITLAB_SERVICE_TOKEN' - gitlab_api_token_id: InitVar[str] = 'GITLAB_API_TOKEN' - - def __post_init__( - self, - manytask_token_id: str, - gitlab_service_token_id: str, - gitlab_api_token_id: str, - ) -> None: - self.manytask_token = os.environ.get(manytask_token_id) - if not self.manytask_token: - print_info(f'Unable to find env <{manytask_token_id}>', color='orange') - - self.gitlab_service_token = os.environ.get(gitlab_service_token_id) - if not self.gitlab_service_token: - print_info(f'Unable to find env <{gitlab_service_token_id}>', color='orange') - - self.gitlab_api_token = os.environ.get(gitlab_api_token_id) - if not self.gitlab_api_token: - print_info(f'Unable to find env <{gitlab_api_token_id}>', color='orange') - - @classmethod - def from_yaml(cls, course_config: Path) -> 'CourseConfig': - try: - with open(course_config) as config_file: - config_dict = yaml.safe_load(config_file) - except (yaml.YAMLError, FileNotFoundError) as e: - raise BadConfig(f'Unable to load deadlines config file <{course_config}>') from e - - try: - return cls(**config_dict) - except (KeyError, TypeError, ValueError) as e: - raise BadConfig('Invalid course config') from e diff --git a/checker/course/driver.py b/checker/course/driver.py deleted file mode 100644 index abe972c..0000000 --- a/checker/course/driver.py +++ /dev/null @@ -1,408 +0,0 @@ -""" -Classes to map course schedule to real filesystem -""" -from __future__ import annotations - -import os -from pathlib import Path -from warnings import warn - -from ..exceptions import BadConfig -from ..utils import print_info -from .schedule import Group, Task - - -class CourseDriver: - """The interlayer between course and file system - Course can have different layouts; - You can select 2 layouts: for private and for public repo - for script to know how to read private script and how to write to the public repo - Now implemented: @see self.PUBLIC_LAYOUTS, self.PRIVATE_LAYOUTS - - * flat [deprecated] (public & private) - - .gitignore - - .gitlab-ci.yml - - .releaser-ci.yml - - README.md - - task_1/ - - ... - - tests/ - - .course.yml - - .deadlines.yml - - task_1/ - - ... - - * groups (public & private) - - .course.yml - - .deadlines.yml - - .gitignore - - .gitlab-ci.yml - - .releaser-ci.yml - - README.md - - group_1/ - - task_1/ - - ... - - ... - - lectures/ - - group_1/ - - ... - - solutions/ - - group_1/ - - ... - - tests/ - - group_1/ - - task_1/ - - ... - - * lectures (private) - - .course.yml - - .deadlines.yml - - .gitignore - - .gitlab-ci.yml - - .releaser-ci.yml - - README.md - - group_1/ - lecture/ [optional] - review/ [optional] - tasks/ - - task_1/ - - private/ [optional] - - test_private.py - - public/ [optional] - - test_public.py - - template/ [optional] - - solution.py - - solution/ - - solution.py - - README.md - - .tester.json [optional] - - ... - - ... - * lectures (public) - - ... - - group_1/ - lecture/ [optional] - review/ [optional] - tasks/ - - task_1/ - - test_public.py - - solution.py - - README.md - - .tester.json [optional] - - ... - - ... - - For templates: - * search - will search template in public folder - * create - will search gold solution in private folder and create template from it - * create_or_search - will search template in public folder or will create template from gold solution - """ - - LAYOUTS = ['flat', 'groups', 'lectures'] - TEMPLATES = ['create', 'search', 'create_or_search'] - REPO_TYPES = ['public', 'private'] - - def __init__( - self, - root_dir: Path, - repo_type: str = 'public', - layout: str = 'groups', - template: str = 'search', - ): - """ - @param root_dir: Root folder of the repo to be a driver on - @param repo_type: Type of repository public (students repos / public) or private (main private repo) - @param layout: @see available LAYOUTS in class docstring - @param template: @see available TEMPLATES in class var and utils -> clear_gold_solution function - """ - - assert root_dir.exists(), f'Root dir <{root_dir}> not exists' - self.root_dir = root_dir - - assert repo_type in CourseDriver.REPO_TYPES, f'Repo type <{repo_type}> not in private, public' - self.repo_type = repo_type - - assert layout in CourseDriver.LAYOUTS, f'Course layout <{layout}> are not implemented' - if layout == 'flat': - warn(f'<{layout}> layout is deprecated', DeprecationWarning) - self.layout = layout - - assert template in CourseDriver.TEMPLATES, f'Template <{layout}> are not implemented' - self.template = template - - def get_deadlines_file_path( - self, - raise_if_not_exists: bool = True, - ) -> Path: - if self.repo_type == 'public': - raise BadConfig('Unable to find `deadlines` file in public repo') - - deadlines_file_path: Path - if self.layout == 'lectures': - deadlines_file_path = self.root_dir / '.deadlines.yml' - elif self.layout == 'groups': - deadlines_file_path = self.root_dir / '.deadlines.yml' - elif self.layout == 'flat': - deadlines_file_path = self.root_dir / 'tests' / '.deadlines.yml' - else: - assert False, 'Not Reachable' # pragma: no cover - - if raise_if_not_exists and (not deadlines_file_path or not deadlines_file_path.exists()): - raise BadConfig(f'Deadlines file <{deadlines_file_path}> not exists') - - return deadlines_file_path - - def get_group_lecture_dir( - self, - group: Group, - check_exists: bool = True, - ) -> Path | None: - lecture_dir: Path | None = None - - if self.layout == 'lectures': - lecture_dir = self.root_dir / group.name / 'lecture' - elif self.layout == 'groups': - lecture_dir = self.root_dir / 'lectures' / group.name - elif self.layout == 'flat': - lecture_dir = None - else: - assert False, 'Not Reachable' # pragma: no cover - - if check_exists and lecture_dir and not lecture_dir.exists(): - print_info(f'Lecture dir <{lecture_dir}> not exists, set to None.') - lecture_dir = None - - return lecture_dir - - def get_group_submissions_review_dir( - self, - group: Group, - check_exists: bool = True, - ) -> Path | None: - review_dir: Path | None = None - - if self.layout == 'lectures': - # both public and private - review_dir = self.root_dir / group.name / 'review' - elif self.layout == 'groups': - # both public and private - review_dir = self.root_dir / 'solutions' / group.name - elif self.layout == 'flat': - review_dir = None - else: - assert False, 'Not Reachable' # pragma: no cover - - if check_exists and review_dir and not review_dir.exists(): - print_info(f'Review dir <{review_dir}> not exists, set to None.') - review_dir = None - - return review_dir - - def get_group_dir( - self, - group: Group, - check_exists: bool = True, - ) -> Path | None: - group_root_dir: Path | None = None - - if self.layout == 'lectures': - group_root_dir = self.root_dir / group.name - elif self.layout == 'groups': - group_root_dir = self.root_dir / group.name - elif self.layout == 'flat': - group_root_dir = None - else: - assert False, 'Not Reachable' # pragma: no cover - - if check_exists and group_root_dir and not group_root_dir.exists(): - print_info(f'Group dir <{group_root_dir}> not exists, set to None.') - group_root_dir = None - - return group_root_dir - - def get_task_dir( - self, - task: Task, - check_exists: bool = True, - ) -> Path | None: - task_root_dir: Path | None = None - - if self.layout == 'lectures': - task_root_dir = self.root_dir / task.group.name / 'tasks' / task.name - elif self.layout == 'groups': - task_root_dir = self.root_dir / task.group.name / task.name - elif self.layout == 'flat': - task_root_dir = self.root_dir / task.name - else: - assert False, 'Not Reachable' # pragma: no cover - - if check_exists and task_root_dir and not task_root_dir.exists(): - print_info(f'Task dir <{task_root_dir}> not exists, set to None.') - task_root_dir = None - - return task_root_dir - - def get_task_solution_dir( - self, - task: Task, - check_exists: bool = True, - ) -> Path | None: - task_solution_dir: Path | None = None - - if self.layout == 'lectures': - if self.repo_type == 'private': - task_solution_dir = self.root_dir / task.group.name / 'tasks' / task.name / 'solution' - else: - task_solution_dir = self.root_dir / task.group.name / 'tasks' / task.name - elif self.layout == 'groups': - if self.repo_type == 'private': - task_solution_dir = self.root_dir / 'tests' / task.group.name / task.name - else: - task_solution_dir = self.root_dir / task.group.name / task.name - elif self.layout == 'flat': - if self.repo_type == 'private': - task_solution_dir = self.root_dir / 'tests' / task.name - else: - task_solution_dir = self.root_dir / task.name - else: - assert False, 'Not Reachable' # pragma: no cover - - if check_exists and task_solution_dir and not task_solution_dir.exists(): - print_info(f'Task solution dir <{task_solution_dir}> not exists, set to None.') - task_solution_dir = None - - return task_solution_dir - - def get_task_template_dir( - self, - task: Task, - check_exists: bool = True, - ) -> Path | None: - task_template_dir: Path | None = None - - if self.layout == 'lectures': - if self.repo_type == 'private': - task_template_dir = self.root_dir / task.group.name / 'tasks' / task.name / 'template' - else: - task_template_dir = self.root_dir / task.group.name / 'tasks' / task.name - elif self.layout == 'groups': - # both public and private - task_template_dir = self.root_dir / task.group.name / task.name - elif self.layout == 'flat': - # both public and private - task_template_dir = self.root_dir / task.name - else: - assert False, 'Not Reachable' # pragma: no cover - - if check_exists and task_template_dir and not task_template_dir.exists(): - print_info(f'Task template dir <{task_template_dir}> not exists, set to None.') - task_template_dir = None - - return task_template_dir - - def get_task_public_test_dir( - self, - task: Task, - check_exists: bool = True, - ) -> Path | None: - public_tests_dir: Path | None = None - - if self.layout == 'lectures': - if self.repo_type == 'private': - public_tests_dir = self.root_dir / task.group.name / 'tasks' / task.name / 'public' - else: - public_tests_dir = self.root_dir / task.group.name / 'tasks' / task.name - elif self.layout == 'groups': - # both public and private - public_tests_dir = self.root_dir / task.group.name / task.name - elif self.layout == 'flat': - # both public and private - public_tests_dir = self.root_dir / task.name - else: - assert False, 'Not Reachable' # pragma: no cover - - if check_exists and public_tests_dir and not public_tests_dir.exists(): - print_info(f'Task public tests dir <{public_tests_dir}> not exists, set to None.') - public_tests_dir = None - - return public_tests_dir - - def get_task_private_test_dir( - self, - task: Task, - check_exists: bool = True, - ) -> Path | None: - private_tests_dir: Path | None = None - - if self.layout == 'lectures': - if self.repo_type == 'private': - private_tests_dir = self.root_dir / task.group.name / 'tasks' / task.name / 'private' - else: - private_tests_dir = None - elif self.layout == 'groups': - if self.repo_type == 'private': - private_tests_dir = self.root_dir / 'tests' / task.group.name / task.name - else: - private_tests_dir = None - elif self.layout == 'flat': - if self.repo_type == 'private': - private_tests_dir = self.root_dir / 'tests' / task.name - else: - private_tests_dir = None - else: - assert False, 'Not Reachable' # pragma: no cover - - if check_exists and private_tests_dir and not private_tests_dir.exists(): - print_info(f'Task private tests dir <{private_tests_dir}> not exists, set to None.') - private_tests_dir = None - - return private_tests_dir - - def get_task_config_dir( - self, - task: Task, - check_exists: bool = True, - ) -> Path | None: - config_dir: Path | None = None - - if self.layout == 'lectures': - if self.repo_type == 'private': - config_dir = self.root_dir / task.group.name / 'tasks' / task.name - else: - config_dir = None - elif self.layout == 'groups': - if self.repo_type == 'private': - config_dir = self.root_dir / 'tests' / task.group.name / task.name - else: - config_dir = None - elif self.layout == 'flat': - if self.repo_type == 'private': - config_dir = self.root_dir / 'tests' / task.name - else: - config_dir = None - else: - assert False, 'Not Reachable' # pragma: no cover - - if check_exists and config_dir and not config_dir.exists(): - print_info(f'Task config dir <{config_dir}> not exists, set to None.') - config_dir = None - - return config_dir - - def get_task_dir_name( - self, - path: str, - ) -> str | None: - path_split = path.split(os.path.sep, maxsplit=3) - if len(path_split) < 2: # Changed file not in subdir - return None - if self.layout == 'lectures': - if len(path_split) < 3: - return None - return path_split[2] - elif self.layout == 'groups': - return path_split[1] - elif self.layout == 'flat': - return path_split[0] - else: - assert False, 'Not Reachable' # pragma: no cover diff --git a/checker/course/schedule.py b/checker/course/schedule.py deleted file mode 100644 index c3d9cd3..0000000 --- a/checker/course/schedule.py +++ /dev/null @@ -1,243 +0,0 @@ -""" -Classes for Course, Groups and Tasks -They interact ONLY with deadlines, do not test anything, know nothing about physical folders -""" -from __future__ import annotations - -from collections import OrderedDict -from dataclasses import dataclass, field -from datetime import datetime, timedelta -from pathlib import Path - -import yaml - -from ..exceptions import BadConfig, BadGroupConfig, BadTaskConfig - - -RESERVED_TASK_NAMES = [ - 'task', 'tasks', 'solution', 'solutions', 'test', 'tests', 'lecture', 'lectures', 'template', - 'templates', 'private', 'privates', 'public', 'publics', 'review', 'reviews', -] - - -@dataclass -class Task: - group: 'Group' - name: str - full_name: str = field(init=False) - - max_score: int - enabled: bool = True - scoring_func: str = 'max' - review: bool = False - marked: bool = False - - def __post_init__(self) -> None: - self.full_name = self.group.name + '/' + self.name - - assert self.name not in RESERVED_TASK_NAMES, f'Can not use {self.name} as task name as it is reserved' - - @property - def is_enabled(self) -> bool: - return self.enabled and self.group.is_enabled - - @property - def is_started(self) -> bool: - return self.is_enabled and self.group.is_started - - @property - def is_ended(self) -> bool: - return self.is_enabled and self.group.is_ended - - def get_task_deadline_percentage( - self, - submit_time: datetime | None = None, - extra_time: timedelta | None = None, - ) -> float: - return self.group.get_deadline_percentage(submit_time, extra_time) - - def get_is_overdue_first( - self, - submit_time: datetime | None = None, - extra_time: timedelta | None = None, - ) -> bool: - return self.group.get_is_overdue_first(submit_time=submit_time, extra_time=extra_time) - - def get_is_overdue_second( - self, - submit_time: datetime | None = None, - extra_time: timedelta | None = None, - ) -> bool: - return self.group.get_is_overdue_second(submit_time=submit_time, extra_time=extra_time) - - -@dataclass -class Group: - name: str - - start: datetime - deadline: datetime - second_deadline: datetime - - enabled: bool = True - marked: bool = False - - tasks: list[Task] = field(default_factory=list) - - @property - def max_score(self) -> int: - return sum([task.max_score for task in self.tasks]) - - @property - def is_enabled(self) -> bool: - return self.enabled - - @property - def is_started(self) -> bool: - return self.is_enabled and self.start < datetime.now() # TODO: check timezone - - @property - def is_ended(self) -> bool: - return self.is_enabled and self.second_deadline < datetime.now() # TODO: check timezone - - def get_deadline_percentage( - self, - submit_time: datetime | None = None, - extra_time: timedelta | None = None, - ) -> float: - extra_time = extra_time or timedelta() - submit_time = submit_time or datetime.now() # TODO: check timezone - if self.second_deadline == self.deadline: - return 1. if submit_time < self.second_deadline + extra_time else 0. - - deadlines_timedelta = self.second_deadline - self.deadline - overdue_timedelta = self.second_deadline + extra_time - submit_time - percentage = overdue_timedelta / deadlines_timedelta - return max(0., min(percentage, 1.)) - - def get_is_overdue_first( - self, - submit_time: datetime | None = None, - extra_time: timedelta | None = None, - ) -> bool: - return self.get_deadline_percentage(submit_time, extra_time) < 1. - - def get_is_overdue_second( - self, - submit_time: datetime | None = None, - extra_time: timedelta | None = None, - ) -> bool: - return self.get_deadline_percentage(submit_time, extra_time) == 0. - - -class CourseSchedule: - def __init__( - self, - deadlines_config: Path, - ): - try: - with open(deadlines_config) as config_file: - deadlines = yaml.safe_load(config_file) - except (yaml.YAMLError, FileNotFoundError) as e: - raise BadConfig(f'Unable to load deadlines config file <{deadlines_config}>') from e - - if not deadlines: - raise BadConfig(f'Empty config file <{deadlines_config}>') - - self.groups: OrderedDict[str, Group] = OrderedDict() - self.tasks: OrderedDict[str, Task] = OrderedDict() - - for group_config in deadlines: - group_name = None - try: - group_name = str(group_config.get('group')) - group_enabled = bool(group_config.get('enabled', True)) - - group_start = datetime.strptime(group_config.get('start'), '%d-%m-%Y %H:%M') - group_deadline = datetime.strptime(group_config.get('deadline'), '%d-%m-%Y %H:%M') - if second_deadline := group_config.get('second_deadline', None): - group_second_deadline = datetime.strptime(second_deadline, '%d-%m-%Y %H:%M') - else: - group_second_deadline = group_deadline - - group_marked = bool(group_config.get('marked', False)) - except (KeyError, TypeError, ValueError, AttributeError) as e: - raise BadGroupConfig(f'Group {group_name} has bad config') from e - - group = Group( - name=group_name, - enabled=group_enabled, - start=group_start, - deadline=group_deadline, - second_deadline=group_second_deadline, - marked=group_marked, - ) - - for task_config in group_config.get('tasks', []): - task_name = None - try: - task_name = task_config['task'] - task_score = int(task_config['score']) - task_enabled = task_config.get('enabled', True) - task_scoring_func = task_config.get('scoring_func', 'max') - task_is_review = task_config.get('review', False) - task_marked = task_config.get('marked', False) or group_marked - except (KeyError, TypeError, ValueError, AttributeError) as e: - raise BadTaskConfig(f'Task {task_name} has bad config') from e - - task = Task( - group=group, - name=task_name, - max_score=task_score, - enabled=task_enabled, - scoring_func=task_scoring_func, - review=task_is_review, - marked=task_marked, - ) - - if task_name in self.tasks: - raise BadTaskConfig(f'Unique violation error: task {task_name} already exists') - - self.tasks[task_name] = task - group.tasks.append(task) - - if group_name in self.groups: - raise BadGroupConfig(f'Unique violation error: group {group_name} already exists') - - self.groups[group_name] = group - - def get_tasks( - self, - *, - enabled: bool | None = None, - started: bool | None = None, - ended: bool | None = None, - ) -> list[Task]: - tasks: list[Task] = [task for task_name, task in self.tasks.items()] - - if enabled is not None: - tasks = [task for task in tasks if (task.is_enabled and task.group.is_enabled) == enabled] - if started is not None: - tasks = [task for task in tasks if task.group.is_started == started] - if ended is not None: - tasks = [task for task in tasks if task.group.is_ended == ended] - - return tasks - - def get_groups( - self, - *, - enabled: bool | None = None, - started: bool | None = None, - ended: bool | None = None, - ) -> list[Group]: - groups: list[Group] = [group for group_name, group in self.groups.items()] - - if enabled is not None: - groups = [group for group in groups if group.is_enabled == enabled] - if started is not None: - groups = [group for group in groups if group.is_started == started] - if ended is not None: - groups = [group for group in groups if group.is_ended == ended] - - return groups diff --git a/checker/exceptions.py b/checker/exceptions.py index 3ac7668..2775701 100644 --- a/checker/exceptions.py +++ b/checker/exceptions.py @@ -3,84 +3,47 @@ from dataclasses import dataclass -# Base exception for all package class CheckerException(Exception): + """Base exception for Checker package""" + __test__ = False # to disable pytest detecting it as Test class pass -# All deadlines and conf exceptions -class BadConfig(CheckerException): - pass - +class CheckerValidationError(CheckerException): + """Base validation error of configs, project structure, etc.""" -class BadTaskConfig(BadConfig): pass -class BadGroupConfig(BadConfig): - pass - +class BadConfig(CheckerValidationError): + """All configs exceptions: deadlines, checker and tasks configs""" -# Tests exceptions -class TesterException(CheckerException): pass -class TesterNotImplemented(TesterException): - pass - +class BadStructure(CheckerValidationError): + """Course structure exception: some files are missing, etc.""" -class TaskTesterException(TesterException): pass -class TaskTesterTestConfigException(TaskTesterException): - pass - +class ExportError(CheckerException): + """Export stage exception""" -# Tests exceptions (with output) -@dataclass -class RunFailedError(TesterException): - msg: str = '' - output: str | None = None - - def __repr__(self) -> str: - return f'{self.__class__.__name__}: {self.msg}' - - -class ExecutionFailedError(RunFailedError): pass -class TimeoutExpiredError(ExecutionFailedError): - pass - +class TestingError(CheckerException): + """All testers exceptions can occur during testing stage""" -class BuildFailedError(RunFailedError): pass -class RegexpCheckFailedError(RunFailedError): - pass - - -class StylecheckFailedError(RunFailedError): - pass - - -class TestsFailedError(RunFailedError): - pass - - -# Manytask exceptions -class ManytaskRequestFailedError(CheckerException): - pass - - -class PushFailedError(ManytaskRequestFailedError): - pass - +@dataclass +class PluginExecutionFailed(TestingError): + """Exception raised when plugin execution failed""" -class GetFailedError(ManytaskRequestFailedError): - pass + message: str = "" + output: str | None = None + percentage: float = 0.0 diff --git a/checker/executors/__init__.py b/checker/executors/__init__.py deleted file mode 100644 index d7a696c..0000000 --- a/checker/executors/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sandbox import Sandbox # noqa: F401 diff --git a/checker/executors/sandbox.py b/checker/executors/sandbox.py deleted file mode 100644 index 30802ee..0000000 --- a/checker/executors/sandbox.py +++ /dev/null @@ -1,185 +0,0 @@ -from __future__ import annotations - -import grp -import io -import os -import pwd -import subprocess -import sys -import time -from collections.abc import Callable -from contextlib import redirect_stderr, redirect_stdout -from typing import Any - - -try: - import unshare -except ImportError: - unshare = None - -from ..exceptions import ExecutionFailedError, TimeoutExpiredError -from ..utils.print import print_info - - -class Sandbox: - ENV_WHITELIST = ['PATH'] - - def __init__( - self, - *, - dry_run: bool = False, - ) -> None: - self.dry_run = dry_run - - def _execute_external( - self, - command: str | list[str], - *, - capture_output: bool = False, - verbose: bool = False, - **kwargs: Any, - ) -> str | None: - if verbose or self.dry_run: - if isinstance(command, str): - cmdline = command - else: - cmdline = ' '.join(command) - if 'preexec_fn' in kwargs: - cmdline = 'sandbox ' + cmdline - if 'cwd' in kwargs: - cmdline = f'cd {kwargs["cwd"]} && {cmdline}' - print_info('$', cmdline, color='grey') - print_info(' execution kwargs: ', kwargs, color='grey') - - if self.dry_run: - return None - - kwargs['check'] = kwargs.get('check', True) # set check if missing - try: - if capture_output: - start_time = time.monotonic() - completed_process = subprocess.run( - command, - close_fds=False, - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, # https://docs.python.org/3/library/subprocess.html -> capture_output - **kwargs - ) - elapsed_time_seconds = time.monotonic() - start_time - timeout_msg = '' - if verbose and 'timeout' in kwargs: - timeout_msg = f'\nElapsed time is {elapsed_time_seconds:.2f} ' \ - f'with a limit of {kwargs["timeout"]:.0f} seconds\n' - if completed_process.stdout: - return completed_process.stdout + timeout_msg - return None - else: - start_time = time.monotonic() - subprocess.run( - command, - close_fds=False, - **kwargs - ) - elapsed_time_seconds = time.monotonic() - start_time - if verbose and 'timeout' in kwargs: - print_info(f'Elapsed time is {elapsed_time_seconds:.2f} ' - f'with a limit of {kwargs["timeout"]:.0f} seconds') - return None - except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e: - timeout_msg = '' - if isinstance(e, subprocess.TimeoutExpired): - timeout_msg = f'Your solution exceeded time limit: {kwargs["timeout"]} seconds' - if not capture_output: - print_info(timeout_msg, color='red') - - output = e.output or '' - output = output if isinstance(output, str) else output.decode('utf-8') - output = output + timeout_msg if capture_output else None - if isinstance(e, subprocess.TimeoutExpired): - raise TimeoutExpiredError(output=output) from e - else: - raise ExecutionFailedError(output=output) from e - - def _execute_callable( - self, - command: Callable[..., Any], - *, - capture_output: bool = False, - verbose: bool = False, - **kwargs: Any, - ) -> str | None: - if verbose or self.dry_run: - args = ', '.join(f'{k}={repr(v)}' for k, v in sorted(kwargs.items())) - print_info(f'> {command.__name__}({args})', color='grey') - - if self.dry_run: - return None - - if capture_output: - f = io.StringIO() - with redirect_stdout(f), redirect_stderr(sys.stdout): - command(**kwargs) - return f.getvalue() - else: - command(**kwargs) - return None - - def __call__( - self, - command: str | list[str] | Callable[..., Any], - *, - timeout: float | None = None, - sandbox: bool = False, - env_sandbox: bool = False, - capture_output: bool = False, - verbose: bool = False, - **kwargs: Any, - ) -> str | None: - if isinstance(command, list) or isinstance(command, str): - - def set_up_env_sandbox() -> None: # pragma: nocover - env = os.environ.copy() - os.environ.clear() - for variable in self.ENV_WHITELIST: - os.environ[variable] = env[variable] - - def set_up_sandbox() -> None: # pragma: nocover - set_up_env_sandbox() - - # if unshare: - # try: - # unshare.unshare(unshare.CLONE_NEWNET) - # subprocess.run(['ip', 'link', 'set', 'lo', 'up'], check=True) - # except Exception as e: - # print_info('WARNING: unable to create new net namespace, running with current one') - # if verbose: - # print_info(e.__class__.__name__, e) - # else: - # print_info('WARNING: unshare is not installed, running without ip namespace') - - try: - uid = pwd.getpwnam('nobody').pw_uid - gid = grp.getgrnam('nogroup').gr_gid - os.setgroups([]) - if sys.platform.startswith('linux'): - os.setresgid(gid, gid, gid) - os.setresuid(uid, uid, uid) - except Exception as e: - print_info('WARNING: UID and GID change failed, running with current user') - if verbose: - print_info(e.__class__.__name__, e) - - set_up_env_sandbox() - - if env_sandbox: - kwargs['preexec_fn'] = set_up_env_sandbox - if sandbox: - kwargs['preexec_fn'] = set_up_sandbox - if timeout is not None: - kwargs['timeout'] = timeout - return self._execute_external(command, capture_output=capture_output, verbose=verbose, **kwargs) - elif callable(command): - if env_sandbox or sandbox or timeout: - print_info('WARNING: env_sandbox, sandbox and timeout unavailable for callable execution, skip it') - return self._execute_callable(command, capture_output=capture_output, verbose=verbose, **kwargs) diff --git a/checker/exporter.py b/checker/exporter.py new file mode 100644 index 0000000..f44d83e --- /dev/null +++ b/checker/exporter.py @@ -0,0 +1,335 @@ +from __future__ import annotations + +import shutil +import tempfile +from pathlib import Path +from typing import Iterable + +from checker.configs import CheckerExportConfig, CheckerStructureConfig +from checker.course import Course + + +class Exporter: + def __init__( + self, + course: Course, + structure_config: CheckerStructureConfig, + export_config: CheckerExportConfig, + repository_root: Path, + reference_root: Path | None = None, + *, + cleanup: bool = True, + verbose: bool = False, + dry_run: bool = False, + ) -> None: + self.course = course + + self.structure_config = structure_config + self.export_config = export_config + + self.repository_root = repository_root + self.reference_root = reference_root or repository_root + + self._temporary_dir_manager = tempfile.TemporaryDirectory() + self.temporary_dir = Path(self._temporary_dir_manager.name) + + self.cleanup = cleanup + self.verbose = verbose + self.dry_run = dry_run + + def validate(self) -> None: + # TODO: implement validation + pass + + def export_public( + self, + target: Path, + push: bool = False, + commit_message: str = "chore(auto): Update public files [skip-ci]", + ) -> None: + target.mkdir(parents=True, exist_ok=True) + + tasks = self.course.get_tasks(enabled=True) + + global_ignore_patterns = self.structure_config.ignore_patterns or [] + global_public_patterns = self.structure_config.public_patterns or [] + global_private_patterns = self.structure_config.private_patterns or [] + + # TODO: implement template searcher + + print("REFERENCE") + print(f"Copy files from {self.reference_root} to {target}") + self._copy_files_accounting_sub_rules( + self.reference_root, + target, + search_pattern="*", + copy_patterns=[ + "*", + *global_public_patterns, + ], + ignore_patterns=[ + *global_private_patterns, + *global_ignore_patterns, + ], + sub_rules={ + self.reference_root + / task.relative_path: ( + [ + "*", + *( + task_ignore + if (task_ignore := task.config.structure.public_patterns) is not None + else global_public_patterns + ), + ], + [ + *( + task_ignore + if (task_ignore := task.config.structure.private_patterns) is not None + else global_private_patterns + ), + *( + task_ignore + if (task_ignore := task.config.structure.ignore_patterns) is not None + else global_ignore_patterns + ), + ], + ) + for task in tasks + if task.config is not None and task.config.structure is not None + }, + ) + + def export_for_testing( + self, + target: Path, + ) -> None: + target.mkdir(parents=True, exist_ok=True) + + tasks = self.course.get_tasks(enabled=True) + + global_ignore_patterns = self.structure_config.ignore_patterns or [] + global_public_patterns = self.structure_config.public_patterns or [] + global_private_patterns = self.structure_config.private_patterns or [] + + print("REPO") + print(f"Copy files from {self.repository_root} to {target}") + self._copy_files_accounting_sub_rules( + self.repository_root, + target, + search_pattern="*", + copy_patterns=["*"], + ignore_patterns=[ + *global_ignore_patterns, + *global_public_patterns, + *global_private_patterns, + ], + sub_rules={ + self.repository_root + / task.relative_path: ( + ["*"], + [ + *( + task_ignore + if (task_ignore := task.config.structure.ignore_patterns) is not None + else global_ignore_patterns + ), + *( + task_public + if (task_public := task.config.structure.public_patterns) is not None + else global_public_patterns + ), + *( + task_private + if (task_private := task.config.structure.private_patterns) is not None + else global_private_patterns + ), + ], + ) + for task in tasks + if task.config is not None and task.config.structure is not None + }, + ) + + print("REFERENCE") + print(f"Copy files from {self.reference_root} to {target}") + self._copy_files_accounting_sub_rules( + self.reference_root, + target, + search_pattern="*", + copy_patterns=[ + *global_public_patterns, + *global_private_patterns, + ], + ignore_patterns=[ + *global_ignore_patterns, + ], + sub_rules={ + self.reference_root + / task.relative_path: ( + [ + *( + task_public + if (task_public := task.config.structure.public_patterns) is not None + else global_public_patterns + ), + *( + task_private + if (task_private := task.config.structure.private_patterns) is not None + else global_private_patterns + ), + ], + [ + *( + task_ignore + if (task_ignore := task.config.structure.ignore_patterns) is not None + else global_ignore_patterns + ), + ], + ) + for task in tasks + if task.config is not None and task.config.structure is not None + }, + ) + + def export_for_contribution( + self, + target: Path, + ) -> None: + target.mkdir(parents=True, exist_ok=True) + + tasks = self.course.get_tasks(enabled=True) + + global_ignore_patterns = self.structure_config.ignore_patterns or [] + global_public_patterns = self.structure_config.public_patterns or [] + global_private_patterns = self.structure_config.private_patterns or [] # noqa: F841 + + print("REPO") + print(f"Copy files from {self.repository_root} to {target}") + self._copy_files_accounting_sub_rules( + self.repository_root, + target, + search_pattern="*", + copy_patterns=[ + *global_public_patterns, + ], + ignore_patterns=[ + *global_ignore_patterns, + ], + sub_rules={ + self.repository_root + / task.relative_path: ( + [ + *( + task_public + if (task_public := task.config.structure.public_patterns) is not None + else global_public_patterns + ), + ], + [ + *( + task_ignore + if (task_ignore := task.config.structure.ignore_patterns) is not None + else global_ignore_patterns + ), + ], + ) + for task in tasks + if task.config is not None and task.config.structure is not None + }, + ) + + print("REFERENCE") + print(f"Copy files from {self.reference_root} to {target}") + self._copy_files_accounting_sub_rules( + self.reference_root, + target, + search_pattern="*", + copy_patterns=["*"], + ignore_patterns=[ + *global_public_patterns, + *global_ignore_patterns, + ], + sub_rules={ + self.reference_root + / task.relative_path: ( + ["*"], + [ + *( + task_ignore + if (task_ignore := task.config.structure.public_patterns) is not None + else global_public_patterns + ), + *( + task_ignore + if (task_ignore := task.config.structure.ignore_patterns) is not None + else global_ignore_patterns + ), + ], + ) + for task in tasks + if task.config is not None and task.config.structure is not None + }, + ) + + def _copy_files_accounting_sub_rules( + self, + root: Path, + destination: Path, + search_pattern: str, + copy_patterns: Iterable[str], + ignore_patterns: Iterable[str], + sub_rules: dict[Path, tuple[Iterable[str], Iterable[str]]], + ) -> None: + """ + Copy files as usual, if face some folder from `sub_rules`, apply patterns from `sub_rules[folder]`. + :param root: Copy files from this directory + :param destination: Copy files to this directory + :param search_pattern: Glob pattern to search files (then apply to ignore or copy) + :param copy_patterns: List of glob patterns to copy, None to have *. Apply recursively + :param ignore_patterns: List of glob patterns to ignore, None to have []. Apply recursively + :param sub_rules: dict of folder -> [patterns, ignore_patterns] to apply to this folder (and recursively) + """ + copy_patterns = copy_patterns or ["*"] + ignore_patterns = ignore_patterns or [] + + for path in root.glob(search_pattern): + # check if the file name matches the patterns + if any(path.match(ignore_pattern) for ignore_pattern in ignore_patterns): + print(f" - Skip {path} because of ignore patterns") + continue + + relative_filename = str(path.relative_to(root)) + if path.is_dir(): + if path in sub_rules: + print(f" - Check Dir {path} to {destination / relative_filename} with sub rules (rec)") + self._copy_files_accounting_sub_rules( + path, + destination / relative_filename, + search_pattern="*", + copy_patterns=sub_rules[path][0], + ignore_patterns=sub_rules[path][1], + sub_rules=sub_rules, + ) + else: + print(f" - Check Dir {path} to {destination / relative_filename} (rec)") + self._copy_files_accounting_sub_rules( + path, + destination / relative_filename, + search_pattern="*", + copy_patterns=copy_patterns, + ignore_patterns=ignore_patterns, + sub_rules=sub_rules, + ) + else: + if any(path.match(copy_pattern) for copy_pattern in copy_patterns): + print(f" - Copy File {path} to {destination / relative_filename}") + destination.mkdir(parents=True, exist_ok=True) + shutil.copyfile( + path, + destination / relative_filename, + ) + + def __del__(self) -> None: + if self.__dict__.get("cleanup") and self._temporary_dir_manager: + self._temporary_dir_manager.cleanup() diff --git a/checker/pipeline.py b/checker/pipeline.py new file mode 100644 index 0000000..bc67917 --- /dev/null +++ b/checker/pipeline.py @@ -0,0 +1,280 @@ +from __future__ import annotations + +import time +from dataclasses import dataclass +from typing import Any + +import jinja2.nativetypes + +from .configs import PipelineStageConfig +from .exceptions import BadConfig, PluginExecutionFailed +from .plugins import PluginABC +from .utils import print_info + + +@dataclass +class PipelineStageResult: + """Result of a single pipeline stage. + :param name: name of the stage + :param failed: if True, stage failed + :param skipped: if True, stage was skipped + :param percentage: optional percentage of points earned + :param elapsed_time: optional elapsed time in seconds + :param output: output of the stage + """ + + name: str + failed: bool + skipped: bool + percentage: float | None = None + elapsed_time: float | None = None + output: str = "" + + def __str__(self) -> str: # pragma: no cover + return ( + f"PipelineStageResult: failed={int(self.failed)}, " + f"skipped={int(self.skipped)}, percentage={self.percentage or 1.0:.2f}, name='{self.name}'" + ) + + +@dataclass +class PipelineResult: + failed: bool + stage_results: list[PipelineStageResult] + + def __bool__(self) -> bool: + return not self.failed + + def __str__(self) -> str: # pragma: no cover + return f"PipelineResult: failed={int(self.failed)}\n" + "\n".join( + [f" {stage_result}" for stage_result in self.stage_results] + ) + + +class ParametersResolver: + def __init__(self) -> None: + self.template_env = jinja2.nativetypes.NativeEnvironment( + loader=jinja2.BaseLoader(), + variable_start_string="${{", + variable_end_string="}}", + ) + + def resolve(self, template: str | list[str] | Any, context: dict[str, Any]) -> Any: + """ + Resolve the template with context. + * If template is a string, resolve it with jinja2 + * If template is a list, resolve each element of the list + * If template is a dict, resolve each value of the dict + * Otherwise, return the template as is + :param template: template string to resolve, following jinja2 syntax. + :param context: context to use for resolving. + :return: resolved template of Any type. + :raises BadConfig: if template is invalid. + """ + if isinstance(template, str): + try: + template_obj = self.template_env.from_string(template.strip()) + return template_obj.render(**context) + except jinja2.TemplateError as e: + raise BadConfig(f"Invalid template {template}") from e + elif isinstance(template, list): + return [self.resolve(item, context) for item in template] + elif isinstance(template, dict): + return {key: self.resolve(value, context) for key, value in template.items()} + else: + return template + + +class PipelineRunner: + """Class encapsulating the pipeline execution logic.""" + + def __init__( + self, + pipeline: list[PipelineStageConfig], + plugins: dict[str, type[PluginABC]], + *, + verbose: bool = False, + ): + """ + Init pipeline runner with predefined stages/plugins to use, parameters (placeholders) resolved later. + :param pipeline: list of pipeline stages + :param plugins: dict of plugins available to use + :param verbose: if True, print more debug info for teachers + :raises BadConfig: if plugin does not exist or does not support isolation (no placeholders are checked) + """ + self.pipeline = pipeline + self.plugins = plugins + + self.verbose = verbose + + self.parameters_resolver = ParametersResolver() + + self.validate({}, validate_placeholders=False) + + def validate( + self, + context: dict[str, Any], + validate_placeholders: bool = True, + ) -> None: + """ + Validate the pipeline configuration. + :param context: context to use for resolving placeholders + :param validate_placeholders: if True, validate placeholders in pipeline stages + """ + + for pipeline_stage in self.pipeline: + # validate plugin exists + if pipeline_stage.run not in self.plugins: + raise BadConfig(f"Unknown plugin {pipeline_stage.run} in pipeline stage {pipeline_stage.name}") + plugin_class = self.plugins[pipeline_stage.run] + + # validate args of the plugin (first resolve placeholders) + if validate_placeholders: + resolved_args = self.parameters_resolver.resolve(pipeline_stage.args, context) + plugin_class.validate(resolved_args) + + # validate run_if condition + if validate_placeholders and pipeline_stage.run_if: + resolved_run_if = self.parameters_resolver.resolve(pipeline_stage.run_if, context) + if not isinstance(resolved_run_if, bool): + raise BadConfig( + f"Invalid run_if condition {pipeline_stage.run_if} in pipeline stage {pipeline_stage.name}" + ) + + # add output to context if set register parameter + if pipeline_stage.register_output: + context.setdefault("outputs", {})[pipeline_stage.register_output] = PipelineStageResult( + name=pipeline_stage.name, + failed=False, + skipped=True, + percentage=1.0, + ) + + def run( + self, + context: dict[str, Any], + *, + dry_run: bool = False, + ) -> PipelineResult: + pipeline_stage_results = [] + pipeline_passed = True + skip_the_rest = False + for pipeline_stage in self.pipeline: + # resolve placeholders in arguments + resolved_args = self.parameters_resolver.resolve(pipeline_stage.args, context=context) + resolved_run_if = ( + self.parameters_resolver.resolve(pipeline_stage.run_if, context=context) + if pipeline_stage.run_if is not None + else None + ) + + print_info(f'--> Running "{pipeline_stage.name}" stage:', color="orange") + if self.verbose: + print_info(f" run_if: {pipeline_stage.run_if}", color="grey") + print_info(f" resolved_run_if: {resolved_run_if}", color="grey") + print_info(f" fail: {pipeline_stage.fail}", color="grey") + print_info(f" run: {pipeline_stage.run}", color="grey") + print_info(f" args: {pipeline_stage.args}", color="grey") + print_info(f" resolved_args: {resolved_args}", color="grey") + + # skip the rest of stages if failed before + if skip_the_rest: + print_info("skipped! (got error above)", color="blue") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=False, + skipped=True, + ) + ) + continue + + # resolve run condition if any; skip if run_if=False + if pipeline_stage.run_if is not None: + if not resolved_run_if: + print_info(f"skipped! (run_if={resolved_run_if})", color="blue") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=False, + skipped=True, + ) + ) + continue + + # select the plugin to run + plugin_class = self.plugins[pipeline_stage.run] + plugin = plugin_class() + + # skip if dry run + if dry_run: + print_info("[output here]") + print_info("dry run!", color="blue") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=False, + skipped=False, + percentage=1.0, + ) + ) + + # register output if required + if pipeline_stage.register_output: + context.setdefault("outputs", {})[pipeline_stage.register_output] = pipeline_stage_results[-1] + + continue + + # run the plugin with executor + _start_time = time.perf_counter() + try: + result = plugin.run(resolved_args, verbose=self.verbose) + _end_time = time.perf_counter() + print_info(result.output or "[empty output]") + print_info(f"> elapsed time: {_end_time-_start_time:.2f}s", color="grey") + print_info("ok!", color="green") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=False, + skipped=False, + output=result.output, + percentage=result.percentage, + elapsed_time=_end_time - _start_time, + ) + ) + except PluginExecutionFailed as e: + _end_time = time.perf_counter() + print_info(e.output or "[empty output]") + print_info(f"> elapsed time: {_end_time-_start_time:.2f}s", color="grey") + pipeline_stage_results.append( + PipelineStageResult( + name=pipeline_stage.name, + failed=True, + skipped=False, + output=e.output or "", + percentage=e.percentage, + elapsed_time=_end_time - _start_time, + ) + ) + if pipeline_stage.fail == PipelineStageConfig.FailType.FAST: + print_info("error! (now as fail=fast)", color="red") + skip_the_rest = True + pipeline_passed = False + elif pipeline_stage.fail == PipelineStageConfig.FailType.AFTER_ALL: + print_info("error! (later as fail=after_all)", color="red") + pipeline_passed = False + elif pipeline_stage.fail == PipelineStageConfig.FailType.NEVER: + print_info("error! (ignored as fail=never)", color="red") + pass + else: + assert False, f"Unknown fail type {pipeline_stage.fail}" # pragma: no cover + + # register output if required + if pipeline_stage.register_output: + context.setdefault("outputs", {})[pipeline_stage.register_output] = pipeline_stage_results[-1] + + return PipelineResult( + failed=not pipeline_passed, + stage_results=pipeline_stage_results, + ) diff --git a/checker/plugins/__init__.py b/checker/plugins/__init__.py new file mode 100644 index 0000000..ee53417 --- /dev/null +++ b/checker/plugins/__init__.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import importlib +import importlib.util +import pkgutil +import sys +from collections.abc import Sequence +from pathlib import Path + +from .base import PluginABC, PluginOutput # noqa: F401 + + +__all__ = [ + "PluginABC", + "PluginOutput", + "load_plugins", +] + + +def get_all_subclasses(cls: type[PluginABC]) -> set[type[PluginABC]]: + return set(cls.__subclasses__()).union([s for c in cls.__subclasses__() for s in get_all_subclasses(c)]) + + +def load_plugins( + search_directories: Sequence[str | Path] | None = None, + *, + verbose: bool = False, +) -> dict[str, type[PluginABC]]: + """ + Load plugins from the plugins directory. + :param search_directories: list of directories to search for plugins + :param verbose: verbose output + """ + search_directories = search_directories or [] + search_directories = [ + Path(__file__).parent, + *search_directories, + ] # add local plugins first + + # force load plugins + print("Loading plugins...") + for module_info in pkgutil.iter_modules([str(path) for path in search_directories]): + if module_info.name == "__init__": + continue + if verbose: + print(f"- {module_info.name} from {module_info.module_finder.path}") # type: ignore[union-attr] + + spec = module_info.module_finder.find_spec(fullname=module_info.name) # type: ignore[call-arg] + if spec is None: + raise ImportError(f"Could not find {module_info.name}") + module = importlib.util.module_from_spec(spec) + module.__package__ = __package__ # TODO: check for external plugins + + sys.modules[module_info.name] = module + assert spec.loader is not None + spec.loader.exec_module(module) + + # collect plugins as abstract class subclasses + plugins = {} + for subclass in get_all_subclasses(PluginABC): # type: ignore[type-abstract] + plugins[subclass.name] = subclass + if verbose: + print(f"Loaded: {', '.join(plugins.keys())}") + return plugins diff --git a/checker/plugins/aggregate.py b/checker/plugins/aggregate.py new file mode 100644 index 0000000..c780ad3 --- /dev/null +++ b/checker/plugins/aggregate.py @@ -0,0 +1,61 @@ +from __future__ import annotations + +from typing import Literal, Union + +from ..exceptions import PluginExecutionFailed +from .base import PluginABC, PluginOutput + + +class AggregatePlugin(PluginABC): + """Given scores and optional weights and strategy, aggregate them, return the score.""" + + name = "aggregate" + + class Args(PluginABC.Args): + scores: list[float] + weights: Union[list[float], None] = None # as pydantic does not support | in older python versions + strategy: Literal["mean", "sum", "min", "max", "product"] = "mean" + # TODO: validate for weights: len weights should be equal to len scores + # TODO: validate not empty scores + + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: # type: ignore[override] + weights = args.weights or ([1.0] * len(args.scores)) + + if len(args.scores) != len(weights): + raise PluginExecutionFailed( + f"Length of scores ({len(args.scores)}) and weights ({len(weights)}) does not match", + output=f"Length of scores ({len(args.scores)}) and weights ({len(weights)}) does not match", + ) + + if len(args.scores) == 0 or len(weights) == 0: + raise PluginExecutionFailed( + f"Length of scores ({len(args.scores)}) or weights ({len(weights)}) is zero", + output=f"Length of scores ({len(args.scores)}) or weights ({len(weights)}) is zero", + ) + + weighted_scores = [score * weight for score, weight in zip(args.scores, weights)] + + if args.strategy == "mean": + score = sum(weighted_scores) / len(weighted_scores) + elif args.strategy == "sum": + score = sum(weighted_scores) + elif args.strategy == "min": + score = min(weighted_scores) + elif args.strategy == "max": + score = max(weighted_scores) + elif args.strategy == "product": + from functools import reduce + + score = reduce(lambda x, y: x * y, weighted_scores) + else: # pragma: no cover + assert False, "Not reachable" + + return PluginOutput( + output=( + f"Get scores: {args.scores}\n" + f"Get weights: {args.weights}\n" + f"Aggregate weighted scores {weighted_scores} with strategy {args.strategy}\n" + f"Score: {score:.2f}" + ), + percentage=score, + ) diff --git a/checker/plugins/base.py b/checker/plugins/base.py new file mode 100644 index 0000000..5b313b0 --- /dev/null +++ b/checker/plugins/base.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Any + +from pydantic import BaseModel, ValidationError + +from checker.exceptions import BadConfig, BadStructure + + +@dataclass +class PluginOutput: + """Plugin output dataclass. + :ivar output: str plugin output + :ivar percentage: float plugin percentage + """ + + output: str + percentage: float = 1.0 + + +class PluginABC(ABC): + """Abstract base class for plugins. + :ivar name: str plugin name, searchable by this name + """ + + name: str + + class Args(BaseModel): + """Base class for plugin arguments. + You have to subclass this class in your plugin. + """ + + pass + + def run(self, args: dict[str, Any], *, verbose: bool = False) -> PluginOutput: + """Run the plugin. + :param args: dict plugin arguments to pass to subclass Args + :param verbose: if True should print teachers debug info, if False student mode + :raises BadConfig: if plugin arguments are invalid + :raises ExecutionFailedError: if plugin failed + :return: PluginOutput with stdout/stderr and percentage + """ + args_obj = self.Args(**args) + + return self._run(args_obj, verbose=verbose) + + @classmethod + def validate(cls, args: dict[str, Any]) -> None: + """Validate the plugin arguments. + :param args: dict plugin arguments to pass to subclass Args + :raises BadConfig: if plugin arguments are invalid + :raises BadStructure: if _run method is not implemented + """ + try: + cls.Args(**args) + except ValidationError as e: + raise BadConfig(f"Plugin {cls.name} arguments validation error:\n{e}") + + if not hasattr(cls, "_run"): + raise BadStructure(f"Plugin {cls.name} does not implement _run method") + + @abstractmethod + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: + """Actual run the plugin. + You have to implement this method in your plugin. + In case of failure, raise ExecutionFailedError with an error message and output. + :param args: plugin arguments, see Args subclass + :param verbose: if True should print teachers debug info, if False student mode + :return: PluginOutput with stdout/stderr and percentage + :raises ExecutionFailedError: if plugin failed + """ + pass diff --git a/checker/plugins/gitlab.py b/checker/plugins/gitlab.py new file mode 100644 index 0000000..db4dde4 --- /dev/null +++ b/checker/plugins/gitlab.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from pydantic import AnyUrl + +from .base import PluginABC, PluginOutput + + +class CheckGitlabMergeRequestPlugin(PluginABC): + """Plugin for checking gitlab merge request.""" + + name = "check_gitlab_merge_request" + + class Args(PluginABC.Args): + token: str + task_dir: str + repo_url: AnyUrl + requre_approval: bool = False + search_for_score: bool = False + + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: # type: ignore[override] + # TODO: implement + assert NotImplementedError() + + return PluginOutput( + output="", + ) + + +class CollectScoreGitlabMergeRequestPlugin(PluginABC): + """Plugin for collecting score from gitlab merge request.""" + + name = "collect_score_gitlab_merge_request" + + class Args(PluginABC.Args): + token: str + task_dir: str + repo_url: AnyUrl + requre_approval: bool = False + search_for_score: bool = False + + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: # type: ignore[override] + # TODO: implement + assert NotImplementedError() + + # TODO: implement arithmetical operations in score comment + # e.g 0.3 + 2*0.7 - 0.2 + # TODO: auto detect percentage or 0-1 score + # e.g. valid: + # 0.3 + 2*0.7 - 0.2 = 0.8 + # 30% + 70% - 20% = 80% (return as 0.8) + # 30 + 70 - 20 = 80 (return as 0.8) + + return PluginOutput( + output="", + percentage=1.0, + ) diff --git a/checker/plugins/manytask.py b/checker/plugins/manytask.py new file mode 100644 index 0000000..53f3bc6 --- /dev/null +++ b/checker/plugins/manytask.py @@ -0,0 +1,99 @@ +from __future__ import annotations + +import json +from datetime import datetime +from pathlib import Path +from typing import IO, Any, Optional + +import requests +import requests.adapters +import urllib3 +from pydantic import AnyUrl + +from checker.exceptions import PluginExecutionFailed + +from .base import PluginABC, PluginOutput + + +class ManytaskPlugin(PluginABC): + """Given score report it to the manytask. + Datetime format in args should be: '%Y-%m-%dT%H:%M:%S.%f%z'""" + + DEFAULT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f%z" + + name = "report_score_manytask" + + class Args(PluginABC.Args): + origin: Optional[str] = None # as pydantic does not support | in older python versions + patterns: list[str] = ["*"] + username: str + task_name: str + score: float # TODO: validate score is in [0, 1] (bonus score is higher than 1) + report_url: AnyUrl + report_token: str + check_deadline: bool + send_time: datetime = datetime.now().astimezone() + + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: # type: ignore[override] + output: list[str] = [] + + if not args.send_time.tzinfo: + output.append("Warning: No timezone provided for send_time, possible time miscalculations") + send_time_formatted = args.send_time.strftime(self.DEFAULT_TIME_FORMAT) + + # Do not expose token in logs. + data = { + "token": args.report_token, + "task": args.task_name, + "username": args.username, + "score": args.score, + "check_deadline": args.check_deadline, + "submit_time": send_time_formatted, + } + + files = None + if args.origin is not None: + files = self._collect_files_to_send(args.origin, args.patterns) + + if verbose: + output.append(str(files)) + + response = self._post_with_retries(args.report_url, data, files) + + try: + result = response.json() + output.append( + f"Report for task '{args.task_name}' for user '{args.username}', " + f"requested score: {args.score}, result score: {result['score']}" + ) + return PluginOutput(output="\n".join(output)) + except (json.JSONDecodeError, KeyError): + raise PluginExecutionFailed("Unable to decode response") + + @staticmethod + def _post_with_retries( + report_url: AnyUrl, + data: dict[str, Any], + files: dict[str, tuple[str, IO[bytes]]] | None, + ) -> requests.Response: + retry_strategy = urllib3.Retry(total=3, backoff_factor=1, status_forcelist=[408, 500, 502, 503, 504]) + adapter = requests.adapters.HTTPAdapter(max_retries=retry_strategy) + session = requests.Session() + session.mount("https://", adapter) + session.mount("http://", adapter) + response = session.post(url=f"{report_url}api/report", data=data, files=files) + + if response.status_code >= 400: + raise PluginExecutionFailed(f"{response.status_code}: {response.text}") + + return response + + @staticmethod + def _collect_files_to_send(origin: str, patterns: list[str]) -> dict[str, tuple[str, IO[bytes]]]: + source_dir = Path(origin) + return { + path.name: (str(path.relative_to(source_dir)), open(path, "rb")) + for pattern in patterns + for path in source_dir.glob(pattern) + if path.is_file() + } diff --git a/checker/plugins/regex.py b/checker/plugins/regex.py new file mode 100644 index 0000000..0d48ba9 --- /dev/null +++ b/checker/plugins/regex.py @@ -0,0 +1,44 @@ +from __future__ import annotations + +from ..exceptions import PluginExecutionFailed +from .base import PluginABC, PluginOutput + + +class CheckRegexpsPlugin(PluginABC): + """Plugin for checking forbidden regexps in a files.""" + + name = "check_regexps" + + class Args(PluginABC.Args): + origin: str + patterns: list[str] + regexps: list[str] + # TODO: Add validation for patterns and regexps + + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: # type: ignore[override] + # TODO: add verbose output with files list + import re + from pathlib import Path + + # TODO: move to Args validation + if not Path(args.origin).exists(): + raise PluginExecutionFailed( + f"Origin '{args.origin}' does not exist", + output=f"Origin {args.origin} does not exist", + ) + + for pattern in args.patterns: + for file in Path(args.origin).glob(pattern): + if file.is_file(): + with file.open() as f: + file_content = f.read() + + for regexp in args.regexps: + if re.search(regexp, file_content, re.MULTILINE): + raise PluginExecutionFailed( + f"File '{file.name}' matches regexp '{regexp}'", + output=f"File '{file}' matches regexp '{regexp}'", + ) + return PluginOutput( + output="No forbidden regexps found", + ) diff --git a/checker/plugins/scripts.py b/checker/plugins/scripts.py new file mode 100644 index 0000000..1ec4814 --- /dev/null +++ b/checker/plugins/scripts.py @@ -0,0 +1,62 @@ +from __future__ import annotations + +from typing import Union + +from pydantic import Field + +from ..exceptions import PluginExecutionFailed +from .base import PluginABC, PluginOutput + + +class RunScriptPlugin(PluginABC): + """Plugin for running scripts.""" + + name = "run_script" + + class Args(PluginABC.Args): + origin: str + script: Union[str, list[str]] # as pydantic does not support | in older python versions + timeout: Union[float, None] = None # as pydantic does not support | in older python versions + isolate: bool = False + env_whitelist: list[str] = Field(default_factory=lambda: ["PATH"]) + + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: # type: ignore[override] + import subprocess + + def set_up_env_sandbox() -> None: # pragma: nocover + import os + + env = os.environ.copy() + os.environ.clear() + for variable in args.env_whitelist: + os.environ[variable] = env[variable] + + try: + result = subprocess.run( + args.script, + shell=True, + cwd=args.origin, + timeout=args.timeout, # kill process after timeout, raise TimeoutExpired + check=True, # raise CalledProcessError if return code is non-zero + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, # merge stderr & stdout to single output + preexec_fn=set_up_env_sandbox, + ) + except (subprocess.CalledProcessError, subprocess.TimeoutExpired) as e: + output = e.output or "" + output = output if isinstance(output, str) else output.decode("utf-8") + + if isinstance(e, subprocess.TimeoutExpired): + raise PluginExecutionFailed( + f"Script timed out after {e.timeout}s ({args.timeout}s limit)", + output=output, + ) from e + else: + raise PluginExecutionFailed( + f"Script failed with exit code {e.returncode}", + output=output, + ) from e + + return PluginOutput( + output=result.stdout.decode("utf-8"), + ) diff --git a/checker/tester.py b/checker/tester.py new file mode 100644 index 0000000..5c03245 --- /dev/null +++ b/checker/tester.py @@ -0,0 +1,216 @@ +from __future__ import annotations + +import os +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +from .configs.checker import CheckerConfig, CheckerParametersConfig +from .course import Course, FileSystemTask +from .exceptions import TestingError +from .pipeline import PipelineResult, PipelineRunner, PipelineStageResult +from .plugins import load_plugins +from .utils import print_header_info, print_info, print_separator + + +@dataclass +class GlobalPipelineVariables: + """Base variables passed in pipeline stages.""" + + ref_dir: str + repo_dir: str + temp_dir: str + task_names: list[str] + task_sub_paths: list[str] + + +@dataclass +class TaskPipelineVariables: + """Variables passed in pipeline stages for each task.""" + + task_name: str + task_sub_path: str + + +class Tester: + """ + Class to encapsulate all testing logic. + 1. Accept directory with files ready for testing + 2. Execute global pipeline once + 3. For each task: + 3.1. Execute task pipeline + 3.2. Execute report pipeline (optional) + """ + + __test__ = False # do not collect this class for pytest + + def __init__( + self, + course: Course, + checker_config: CheckerConfig, + *, + verbose: bool = False, + dry_run: bool = False, + ): + """ + Init tester in specific public and private dirs. + + :param course: Course object for iteration with physical course + :param checker_config: Full checker config with testing,structure and params folders + :param verbose: Whatever to print private outputs and debug info + :param dry_run: Do not execute anything, just print what would be executed + :raises exception.ValidationError: if config is invalid or repo structure is wrong + """ + self.course = course + + self.testing_config = checker_config.testing + self.structure_config = checker_config.structure + self.default_params = checker_config.default_parameters + + self.plugins = load_plugins(self.testing_config.search_plugins, verbose=verbose) + + self.global_pipeline = PipelineRunner(self.testing_config.global_pipeline, self.plugins, verbose=verbose) + self.task_pipeline = PipelineRunner(self.testing_config.tasks_pipeline, self.plugins, verbose=verbose) + self.report_pipeline = PipelineRunner(self.testing_config.report_pipeline, self.plugins, verbose=verbose) + + self.repository_dir = self.course.repository_root + self.reference_dir = self.course.reference_root + + self.verbose = verbose + self.dry_run = dry_run + + def _get_global_pipeline_parameters( + self, + origin: Path, + tasks: list[FileSystemTask], + ) -> GlobalPipelineVariables: + return GlobalPipelineVariables( + ref_dir=self.reference_dir.absolute().as_posix(), + repo_dir=self.repository_dir.absolute().as_posix(), + temp_dir=origin.absolute().as_posix(), + task_names=[task.name for task in tasks], + task_sub_paths=[task.relative_path for task in tasks], + ) + + def _get_task_pipeline_parameters( + self, + task: FileSystemTask, + ) -> TaskPipelineVariables: + return TaskPipelineVariables( + task_name=task.name, + task_sub_path=task.relative_path, + ) + + def _get_context( + self, + global_variables: GlobalPipelineVariables, + task_variables: TaskPipelineVariables | None, + outputs: dict[str, PipelineStageResult], + default_parameters: CheckerParametersConfig, + task_parameters: CheckerParametersConfig | None, + ) -> dict[str, Any]: + return { + "global": global_variables, + "task": task_variables, + "outputs": outputs, + "parameters": default_parameters.__dict__ | (task_parameters.__dict__ if task_parameters else {}), + "env": os.environ.__dict__, + } + + def validate(self) -> None: + # get all tasks + tasks = self.course.get_tasks(enabled=True) + + # create outputs to pass to pipeline + outputs: dict[str, PipelineStageResult] = {} + + # validate global pipeline (only default params and variables available) + print("- global pipeline...") + global_variables = self._get_global_pipeline_parameters(Path(), tasks) + context = self._get_context(global_variables, None, outputs, self.default_params, None) + self.global_pipeline.validate(context, validate_placeholders=True) + print(" ok") + + for task in tasks: + # validate task with global + task-specific params + print(f"- task {task.name} pipeline...") + + # create task context + task_variables = self._get_task_pipeline_parameters(task) + context = self._get_context( + global_variables, + task_variables, + outputs, + self.default_params, + task.config.parameters if task.config else None, + ) + + # check task parameter are + # TODO: read pipeline from task config if any + self.task_pipeline.validate(context, validate_placeholders=True) + self.report_pipeline.validate(context, validate_placeholders=True) + + print(" ok") + + def run( + self, + origin: Path, + tasks: list[FileSystemTask] | None = None, + report: bool = True, + ) -> None: + # get all tasks + tasks = tasks or self.course.get_tasks(enabled=True) + + # create outputs to pass to pipeline + outputs: dict[str, PipelineStageResult] = {} + + # run global pipeline + print_header_info("Run global pipeline:", color="pink") + global_variables = self._get_global_pipeline_parameters(origin, tasks) + context = self._get_context(global_variables, None, outputs, self.default_params, None) + global_pipeline_result: PipelineResult = self.global_pipeline.run(context, dry_run=self.dry_run) + print_separator("-") + print_info(str(global_pipeline_result), color="pink") + + if not global_pipeline_result: + raise TestingError("Global pipeline failed") + + failed_tasks = [] + for task in tasks: + # run task pipeline + print_header_info(f"Run <{task.name}> task pipeline:", color="pink") + + # create task context + task_variables = self._get_task_pipeline_parameters(task) + context = self._get_context( + global_variables, + task_variables, + outputs, + self.default_params, + task.config.parameters if task.config else None, + ) + + # TODO: read pipeline from task config if any + task_pipeline_result: PipelineResult = self.task_pipeline.run(context, dry_run=self.dry_run) + print_separator("-") + + print_info(str(task_pipeline_result), color="pink") + print_separator("-") + + # Report score if task pipeline succeeded + if task_pipeline_result: + print_info(f"Reporting <{task.name}> task tests:", color="pink") + if report: + task_report_result: PipelineResult = self.report_pipeline.run(context, dry_run=self.dry_run) + if task_report_result: + print_info("->Reporting succeeded") + else: + print_info("->Reporting failed") + else: + print_info("->Reporting disabled") + print_separator("-") + else: + failed_tasks.append(task.name) + + if failed_tasks: + raise TestingError(f"Task pipelines failed: {failed_tasks}") diff --git a/checker/testers/__init__.py b/checker/testers/__init__.py deleted file mode 100644 index 4a392de..0000000 --- a/checker/testers/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .python import PythonTester # noqa: F401 -from .tester import Tester # noqa: F401 diff --git a/checker/testers/cpp.py b/checker/testers/cpp.py deleted file mode 100644 index 3a165f2..0000000 --- a/checker/testers/cpp.py +++ /dev/null @@ -1,187 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass, field -from pathlib import Path - -from ..exceptions import ( - BuildFailedError, - ExecutionFailedError, - StylecheckFailedError, - TestsFailedError, - TimeoutExpiredError, -) -from ..utils.files import check_files_contains_regexp, copy_files -from ..utils.print import print_info -from .tester import Tester - - -class CppTester(Tester): - - @dataclass - class TaskTestConfig(Tester.TaskTestConfig): - allow_change: list[str] = field(default_factory=list) - forbidden_regexp: list[str] = field(default_factory=list) - copy_to_build: list[str] = field(default_factory=list) - - linter: bool = True - build_type: str = 'Asan' - is_crash_me: bool = False - - tests: list[str] = field(default_factory=list) - input_file: dict[str, str] = field(default_factory=dict) - args: dict[str, list[str]] = field(default_factory=dict) - timeout: float = 60. - capture_output: bool = True - - def __post_init__( - self, - ) -> None: - assert self.tests - assert self.allow_change - - def _gen_build( # type: ignore[override] - self, - test_config: TaskTestConfig, - build_dir: Path, - source_dir: Path, - public_tests_dir: Path | None, - private_tests_dir: Path | None, - tests_root_dir: Path, - sandbox: bool = True, - verbose: bool = False, - normalize_output: bool = False, - ) -> None: - check_files_contains_regexp( - source_dir, - regexps=test_config.forbidden_regexp, - patterns=test_config.allow_change, - raise_on_found=True, - ) - task_dir = public_tests_dir - self._executor( - copy_files, - source=source_dir, - target=task_dir, - patterns=test_config.allow_change, - verbose=verbose, - ) - self._executor( - copy_files, - source=task_dir, - target=build_dir, - patterns=test_config.copy_to_build, - verbose=verbose, - ) - - try: - print_info('Running cmake...', color='orange') - self._executor( - ['cmake', '-G', 'Ninja', str(tests_root_dir), - '-DGRADER=YES', '-DENABLE_PRIVATE_TESTS=YES', - f'-DCMAKE_BUILD_TYPE={test_config.build_type}'], - cwd=build_dir, - verbose=verbose, - ) - except ExecutionFailedError: - print_info('ERROR', color='red') - raise BuildFailedError('cmake execution failed') - - for test_binary in test_config.tests: - try: - print_info(f'Building {test_binary}...', color='orange') - self._executor( - ['ninja', '-v', test_binary], - cwd=build_dir, - verbose=verbose, - ) - except ExecutionFailedError: - print_info('ERROR', color='red') - raise BuildFailedError(f'Can\'t build {test_binary}') - - if not test_config.linter: - return - - try: - print_info('Running clang format...', color='orange') - format_path = tests_root_dir / 'run-clang-format.py' - self._executor( - [str(format_path), '-r', str(task_dir)], - cwd=build_dir, - verbose=verbose, - ) - print_info('[No issues]') - print_info('OK', color='green') - except ExecutionFailedError: - print_info('ERROR', color='red') - raise StylecheckFailedError('Style error (clang format)') - - try: - print_info('Running clang tidy...', color='orange') - files = [str(file) for file in task_dir.rglob('*.cpp')] # type: ignore - self._executor( - ['clang-tidy', '-p', '.', *files], - cwd=build_dir, - verbose=verbose, - ) - print_info('[No issues]') - print_info('OK', color='green') - except ExecutionFailedError: - print_info('ERROR', color='red') - raise StylecheckFailedError('Style error (clang tidy)') - - def _clean_build( # type: ignore[override] - self, - test_config: TaskTestConfig, - build_dir: Path, - verbose: bool = False, - ) -> None: - self._executor( - ['rm', '-rf', str(build_dir)], - check=False, - verbose=verbose, - ) - - def _run_tests( # type: ignore[override] - self, - test_config: TaskTestConfig, - build_dir: Path, - sandbox: bool = False, - verbose: bool = False, - normalize_output: bool = False, - ) -> float: - for test_binary in test_config.tests: - stdin = None - try: - print_info(f'Running {test_binary}...', color='orange') - args = test_config.args.get(test_binary, []) - if test_binary in test_config.input_file: - stdin = open(build_dir / test_config.input_file[test_binary], 'r') - self._executor( - [str(build_dir / test_binary), *args], - sandbox=True, - cwd=build_dir, - verbose=verbose, - capture_output=test_config.capture_output, - timeout=test_config.timeout, - stdin=stdin - ) - if test_config.is_crash_me: - print_info('ERROR', color='red') - raise TestsFailedError('Program has not crashed') - print_info('OK', color='green') - except TimeoutExpiredError: - print_info('ERROR', color='red') - message = f'Your solution exceeded time limit: {test_config.timeout} seconds' - raise TestsFailedError(message) - except ExecutionFailedError: - if not test_config.is_crash_me: - print_info('ERROR', color='red') - raise TestsFailedError("Test failed (wrong answer or sanitizer error)") - finally: - if stdin is not None: - stdin.close() - if test_config.is_crash_me: - print_info('Program has crashed', color='green') - else: - print_info('All tests passed', color='green') - return 1. diff --git a/checker/testers/make.py b/checker/testers/make.py deleted file mode 100644 index 41861ce..0000000 --- a/checker/testers/make.py +++ /dev/null @@ -1,101 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass, field -from pathlib import Path - -from ..exceptions import ExecutionFailedError, TestsFailedError -from ..utils.files import copy_files -from ..utils.print import print_info -from .tester import Tester - - -class MakeTester(Tester): - @dataclass - class TaskTestConfig(Tester.TaskTestConfig): - test_timeout: int = 60 # seconds - - public_test_files: list[str] = field(default_factory=list) - private_test_files: list[str] = field(default_factory=list) - - def _gen_build( # type: ignore[override] - self, - test_config: TaskTestConfig, - build_dir: Path, - source_dir: Path, - public_tests_dir: Path | None, - private_tests_dir: Path | None, - tests_root_dir: Path, - sandbox: bool = True, - verbose: bool = False, - normalize_output: bool = False, - ) -> None: - self._executor( - copy_files, - source=source_dir, - target=build_dir, - ignore_patterns=[], - verbose=verbose, - ) - - if public_tests_dir is not None: - self._executor( - copy_files, - source=public_tests_dir, - target=build_dir, - patterns=test_config.public_test_files, - verbose=verbose, - ) - - if private_tests_dir is not None: - self._executor( - copy_files, - source=private_tests_dir, - target=build_dir, - patterns=test_config.private_test_files, - verbose=verbose, - ) - - def _clean_build( # type: ignore[override] - self, - test_config: TaskTestConfig, - build_dir: Path, - verbose: bool = False, - ) -> None: - self._executor( - ['rm', '-rf', str(build_dir)], - check=False, - verbose=verbose, - ) - - def _run_tests( # type: ignore[override] - self, - test_config: TaskTestConfig, - build_dir: Path, - sandbox: bool = False, - verbose: bool = False, - normalize_output: bool = False, - ) -> float: - tests_cmd = ['make', '-B'] - - tests_err = None - try: - print_info('Running tests...', color='orange') - output = self._executor( - tests_cmd, - sandbox=sandbox, - cwd=str(build_dir), - timeout=test_config.test_timeout, - verbose=verbose, - capture_output=True, - ) - print_info(output, end='') - print_info('OK', color='green') - except ExecutionFailedError as e: - tests_err = e - print_info(e.output, end='') - print_info('ERROR', color='red') - - if tests_err is not None: - raise TestsFailedError('Tests error', output=tests_err.output) from tests_err - - return 1. diff --git a/checker/testers/python.py b/checker/testers/python.py deleted file mode 100644 index 0b533e9..0000000 --- a/checker/testers/python.py +++ /dev/null @@ -1,389 +0,0 @@ -from __future__ import annotations - -import re -from dataclasses import InitVar, dataclass, field -from pathlib import Path - -from ..exceptions import BuildFailedError, ExecutionFailedError, RunFailedError, StylecheckFailedError, TestsFailedError -from ..utils.files import check_folder_contains_regexp, copy_files -from ..utils.print import print_info -from .tester import Tester - - -IGNORE_FILE_PATTERNS = ['*.md', 'build', '__pycache__', '.pytest_cache', '.mypy_cache', '.tester.json'] -COVER_IGNORE_FILES = ['setup.py'] - - -class PythonTester(Tester): - - SOURCE_FILES_EXTENSIONS: list[str] = ['.py'] - - @dataclass - class TaskTestConfig(Tester.TaskTestConfig): - partially_scored: bool = False - verbose_tests_output: bool = False - module_test: bool = False - build_wheel: bool = False - run_mypy: bool = True - - forbidden_regexp: list[re.Pattern[str]] = field(default_factory=list) - - public_test_files: list[str] = field(default_factory=list) - private_test_files: list[str] = field(default_factory=list) - - test_timeout: int = 60 # seconds - coverage: bool | int = False - - # Created on init - test_files: list[str] = field(init=False, default_factory=list) - # Init only - explicit_public_tests: InitVar[list[str]] = None - explicit_private_tests: InitVar[list[str]] = None - - def __post_init__( - self, - explicit_public_tests: list[str] | None, - explicit_private_tests: list[str] | None, - ) -> None: - self.forbidden_regexp += [r'exit\(0\)'] # type: ignore - for regexp in self.forbidden_regexp: - re.compile(regexp) - - self.public_test_files = ['test_public.py'] + (explicit_public_tests or []) - self.private_test_files = ['test_private.py'] + (explicit_private_tests or []) - self.test_files = self.public_test_files + self.private_test_files - - def _gen_build( # type: ignore[override] - self, - test_config: TaskTestConfig, - build_dir: Path, - source_dir: Path, - public_tests_dir: Path | None, - private_tests_dir: Path | None, - tests_root_dir: Path, - sandbox: bool = True, - verbose: bool = False, - normalize_output: bool = False, - ) -> None: - # Copy submitted code (ignore tests) - self._executor( - copy_files, - source=source_dir, - target=build_dir, - ignore_patterns=test_config.test_files + IGNORE_FILE_PATTERNS, - verbose=verbose, - ) - - # Check submitted code using forbidden regexp - self._executor( - check_folder_contains_regexp, - folder=build_dir, - extensions=self.SOURCE_FILES_EXTENSIONS, - regexps=test_config.forbidden_regexp, - raise_on_found=True, - verbose=verbose, - ) - - # Install submitted code as module if needed - if test_config.module_test: - # assert setup files exists - setup_files = {i.name for i in build_dir.glob(r'setup.*')} | \ - {i.name for i in build_dir.glob(r'pyproject.*')} - if 'setup.py' not in setup_files and 'setup.cfg' not in setup_files and 'pyproject.toml' not in setup_files: - raise BuildFailedError( - 'This task is in editable `module` mode. You have to provide pyproject.toml/setup.cfg/setup.py file' - ) - if 'setup.py' not in setup_files: - raise BuildFailedError('This task is in editable `module` mode. You have to provide setup.py file') - - if test_config.build_wheel: - task_build_dir_dist = build_dir / 'dist' - output = self._executor( - ['pip3', 'wheel', '--wheel-dir', str(task_build_dir_dist), str(build_dir)], - verbose=verbose, - env_sandbox=sandbox, - capture_output=normalize_output, - ) - if normalize_output: - print_info(output or '', end='') - - output = self._executor( - ['pip3', 'install', '--prefer-binary', '--force-reinstall', '--find-links', - str(task_build_dir_dist), str(build_dir)], - verbose=verbose, - env_sandbox=sandbox, - capture_output=normalize_output, - ) - if normalize_output: - print_info(output or '', end='') - - if (build_dir / 'build').exists(): - output = self._executor( - ['rm', '-rf', str(build_dir / 'build')], - verbose=verbose, - env_sandbox=sandbox, - capture_output=normalize_output, - ) - if normalize_output: - print_info(output or '', end='') - else: - output = self._executor( - ['pip3', 'install', '-e', str(build_dir), '--force'], - verbose=verbose, - env_sandbox=sandbox, - capture_output=normalize_output, - ) - if normalize_output: - print_info(output or '', end='') - - # Copy public test files - if public_tests_dir is not None: - self._executor( - copy_files, - source=public_tests_dir, - target=build_dir, - patterns=test_config.public_test_files, - verbose=verbose, - ) - - # Copy private test files - if private_tests_dir is not None: - self._executor( - copy_files, - source=private_tests_dir, - target=build_dir, - patterns=test_config.private_test_files, - verbose=verbose, - ) - - def _clean_build( # type: ignore[override] - self, - test_config: TaskTestConfig, - build_dir: Path, - verbose: bool = False, - ) -> None: - self._executor( - ['rm', '-rf', str(build_dir)], - check=False, - verbose=verbose, - ) - - @staticmethod - def _parse_summary_score( - output: str, - ) -> float: - score = 0.0 - for line in output.splitlines(): - if 'Summary score percentage is: ' in line: - score += float(line.strip().split('Summary score percentage is: ')[1]) - break - return score - - def _run_tests( # type: ignore[override] - self, - test_config: TaskTestConfig, - build_dir: Path, - sandbox: bool = False, - verbose: bool = False, - normalize_output: bool = False, - ) -> float: - # TODO: replace with preserved setup.cfg - codestyle_cmd = [ - 'flake8', - '--exclude', ','.join(test_config.private_test_files), - '--max-line-length', '120', - str(build_dir) - ] - # codestyle_cmd = [ - # 'ruff', - # '--exclude', ','.join(test_config.private_test_files), - # '--line-length', '120', - # '--no-fix', - # str(build_dir) - # ] - mypy_cmd = [ - 'mypy', - '--no-incremental', - '--cache-dir', '/dev/null', - '--ignore-missing-imports', - '--disallow-untyped-defs', - '--disallow-incomplete-defs', - '--disallow-subclassing-any', - '--disallow-any-generics', - '--no-implicit-optional', - '--warn-redundant-casts', - '--warn-unused-ignores', - '--warn-unreachable', - '--allow-untyped-decorators', - str(build_dir) - ] - tests_collection_cmd = [ - 'pytest', - '-p', 'no:cacheprovider', - '-p', 'no:requests_mock', - '-p', 'no:cov', - '-p', 'no:mock', - '-p', 'no:socket', - '-qq', - '--collect-only', - str(build_dir) - ] - tests_cmd = [ - 'pytest', - '-p', 'no:cacheprovider', - '-p', 'no:requests_mock', - '-p', 'no:timeout', - '-p', 'no:socket', - # '--timeout=60', - str(build_dir) - ] - if not verbose: - tests_cmd += ['--no-header'] - if not verbose and not test_config.verbose_tests_output: - tests_cmd += ['--tb=no'] - # if test_config.partially_scored: - # tests_cmd += ['-s'] - if test_config.coverage: - tests_cmd += ['--cov-report', 'term-missing'] - - # exclude test files - dirs_to_cover = { - i.relative_to(build_dir) for i in build_dir.iterdir() - if i.suffix in ['', '.py'] and i.name not in test_config.test_files and i.name not in COVER_IGNORE_FILES - } - if dirs_to_cover: - for _dir in dirs_to_cover: - tests_cmd += ['--cov', str(_dir).replace(r'.', r'\.')] - else: - tests_cmd += ['--cov', str(build_dir)] - - # tests_cmd += ['--cov-config', '.coveragerc'] - if test_config.coverage is not True: - tests_cmd += ['--cov-fail-under', str(test_config.coverage)] - else: - tests_cmd += ['-p', 'no:cov'] - - # Check style - styles_err = None - try: - print_info('Running codestyle checks...', color='orange') - output = self._executor( - codestyle_cmd, - sandbox=sandbox, - cwd=str(build_dir), - verbose=verbose, - capture_output=normalize_output, - ) - if normalize_output: - print_info(output or '', end='') - print_info('[No issues]') - print_info('OK', color='green') - except ExecutionFailedError as e: - # Always reraise for style checks - styles_err = e - - if normalize_output: - print_info(e.output, end='') - e.output = '' - output = '' - print_info('ERROR', color='red') - - # Check typing - typing_err = None - try: - if test_config.run_mypy: - print_info('Running mypy checks...', color='orange') - output = self._executor( - mypy_cmd, - sandbox=sandbox, - cwd=str(build_dir.parent), # mypy didn't work from cwd - verbose=verbose, - capture_output=normalize_output, - ) - if normalize_output: - print_info(output, end='') - print_info('OK', color='green') - else: - print_info('Type check is skipped for this task!', color='orange') - except ExecutionFailedError as e: - # Always reraise for typing checks - typing_err = e - - if normalize_output: - print_info(e.output, end='') - e.output = '' - output = '' - print_info('ERROR', color='red') - - # Check import and tests collecting - import_err = None - try: - print_info('Collecting tests...', color='orange') - output = self._executor( - tests_collection_cmd, - sandbox=sandbox, - cwd=str(build_dir), - verbose=verbose, - capture_output=normalize_output, - ) - if normalize_output: - print_info(output, end='') - output = '' - print_info('OK', color='green') - except ExecutionFailedError as e: - # Always reraise for import checks - import_err = e - - if normalize_output: - print_info(e.output, end='') - e.output = '' - output = '' - print_info('ERROR', color='red') - - # Check tests - tests_err = None - tests_output = '' - try: - print_info('Running tests...', color='orange') - output = self._executor( - tests_cmd, - sandbox=sandbox, - cwd=str(build_dir), - timeout=test_config.test_timeout, - verbose=verbose, - capture_output=test_config.partially_scored or normalize_output, - ) - if normalize_output or test_config.partially_scored: - print_info(output, end='') - print_info('OK', color='green') - except ExecutionFailedError as e: - tests_err = e - tests_output = e.output or '' - - if normalize_output or test_config.partially_scored: - print_info(e.output, end='') - e.output = '' - output = '' - - if test_config.partially_scored: - print_info('ERROR? (Some tests failed, but this is partially_scored task)', color='orange') - else: - print_info('ERROR', color='red') - - if import_err is not None: - raise RunFailedError('Import error', output=import_err.output) from import_err - - if tests_err is not None and not test_config.partially_scored: # Reraise only if all tests should pass - raise TestsFailedError('Public or private tests error', output=tests_err.output) from tests_err - - if styles_err is not None: - raise StylecheckFailedError('Style error', output=styles_err.output) from styles_err - - if typing_err is not None: - raise StylecheckFailedError('Typing error', output=typing_err.output) from typing_err - - if test_config.partially_scored: - tests_output = tests_output or '' # for mypy only - return self._parse_summary_score(tests_output) - else: - return 1. diff --git a/checker/testers/tester.py b/checker/testers/tester.py deleted file mode 100644 index 73f4a7b..0000000 --- a/checker/testers/tester.py +++ /dev/null @@ -1,251 +0,0 @@ -from __future__ import annotations - -import json -import tempfile -from abc import abstractmethod -from dataclasses import dataclass -from pathlib import Path -from typing import Any, Dict - -from ..course import CourseConfig -from ..exceptions import RunFailedError, TaskTesterTestConfigException, TesterNotImplemented -from ..executors.sandbox import Sandbox -from ..utils.print import print_info - - -def _create_external_tester(tester_path: Path, dry_run: bool, cleanup: bool) -> Tester: - globls: Dict[str, Any] = {} - with open(tester_path) as f: - tester_code = compile(f.read(), tester_path.absolute(), 'exec') - exec(tester_code, globls) - tester_cls = globls.get('CustomTester') - if tester_cls is None: - raise TesterNotImplemented(f'class CustomTester not found in file {tester_path}') - if not issubclass(tester_cls, Tester): - raise TesterNotImplemented(f'class CustomTester in {tester_path} is not inherited from testers.Tester') - return tester_cls(dry_run=dry_run, cleanup=cleanup) - - -class Tester: - """Entrypoint to testing system - Tester holds the course object and manage testing of single tasks, - as well as manage Tasks-Folders mapping - (Task names for tests should be provided) - """ - - SOURCE_FILES_EXTENSIONS: list[str] = [] - __test__ = False # to disable pytest detecting it as Test class - - @dataclass - class TaskTestConfig: - """Task Tests Config - Configure how task will copy files, check, execute and so on - """ - - @classmethod - def from_json( - cls, - test_config: Path, - ) -> 'Tester.TaskTestConfig': - """ - Create TaskTestConfig from json config - @param test_config: Path to the config - @return: TaskTestConfig object - """ - try: - task_config_path = test_config - if task_config_path.exists(): - with open(task_config_path) as f: - raw_config = json.load(f) - if not isinstance(raw_config, dict): - raise TaskTesterTestConfigException(f'Got <{type(raw_config).__name__}> instead of ') - else: - raw_config = {} - except (json.JSONDecodeError, TypeError) as e: - raise TaskTesterTestConfigException(f'Got invalid Test Config <{test_config}>') from e - - # Go throughout config fields and pop it from json if any - config_kwargs: dict[str, Any] = {} - for config_field in cls.__annotations__: - if (field_value := raw_config.pop(config_field, None)) is not None: - config_kwargs[config_field] = field_value - - if raw_config: - bad_keys = ','.join(raw_config.keys()) - raise TaskTesterTestConfigException(f'Test Config {test_config} has unknown key(s) <{bad_keys}>') - - return cls(**config_kwargs) - - def __init__( - self, - cleanup: bool = True, - dry_run: bool = False, - ): - self.cleanup = cleanup - self.dry_run = dry_run - self._executor = Sandbox(dry_run=dry_run) - - @classmethod - def create( - cls, - root: Path, - course_config: CourseConfig, - cleanup: bool = True, - dry_run: bool = False, - ) -> 'Tester': - """ - Main creation entrypoint to Tester - Create one of existed Testers (python, cpp, etc.) - @param system: Type of the testing system - @param cleanup: Perform cleanup after testing - @param dry_run: Setup dry run mode (really executes nothing) - @return: Configured Tester object (python, cpp, etc.) - """ - system = course_config.system - if system == 'python': - from . import python - return python.PythonTester(cleanup=cleanup, dry_run=dry_run) - elif system == 'make': - from . import make - return make.MakeTester(cleanup=cleanup, dry_run=dry_run) - elif system == 'cpp': - from . import cpp - return cpp.CppTester(cleanup=cleanup, dry_run=dry_run) - elif system == 'external': - path = course_config.tester_path - if path is None: - raise TesterNotImplemented('tester_path is not specified in course config') - return _create_external_tester(root / path, cleanup=cleanup, dry_run=dry_run) - else: - raise TesterNotImplemented(f'Tester for <{system}> are not supported right now') - - @abstractmethod - def _gen_build( - self, - test_config: TaskTestConfig, - build_dir: Path, - source_dir: Path, - public_tests_dir: Path | None, - private_tests_dir: Path | None, - tests_root_dir: Path, - sandbox: bool = True, - verbose: bool = False, - normalize_output: bool = False, - ) -> None: # pragma: nocover - """ - Copy all files for testing and build the program (if necessary) - @param test_config: Test config to pass into each stage - @param build_dir: Directory to copy files into and build there - @param source_dir: Solution source code directory - @param public_tests_dir: Directory to copy public tests from - @param private_tests_dir: Directory to copy private tests from - @param sandbox: Wrap all student's code to sandbox; @see Executor.sandbox - @param verbose: Verbose output (can exhibit private tests information) - @param normalize_output: Normalize all stages output to stderr - @return: None - """ - pass - - @abstractmethod - def _clean_build( - self, - test_config: TaskTestConfig, - build_dir: Path, - verbose: bool = False, - ) -> None: # pragma: nocover - """ - Clean build directory after testing - @param test_config: Test config to pass into each stage - @param build_dir: Build directory to clean up - @param verbose: Verbose output (can exhibit private tests information) - @return: None - """ - pass - - @abstractmethod - def _run_tests( - self, - test_config: TaskTestConfig, - build_dir: Path, - sandbox: bool = False, - verbose: bool = False, - normalize_output: bool = False, - ) -> float: # pragma: nocover - """ - Run tests for already built task and return solution score - @param test_config: Test config to pass into each stage - @param build_dir: Directory with task ready for testing - @param sandbox: Wrap all student's code to sandbox; @see Executor.sandbox - @param verbose: Verbose output (can exhibit private tests information) - @param normalize_output: Normalize all stages output to stderr - @return: Percentage of the final score - """ - pass - - def test_task( - self, - source_dir: Path, - config_dir: Path, - public_tests_dir: Path | None, - private_tests_dir: Path | None, - tests_root_dir: Path, - verbose: bool = False, - normalize_output: bool = False, - ) -> float: - """ Inner function to test the task (Folders already specified) - Perform the following actions: - * _gen_build: copy source and test files, check forbidden regxp, build and install if necessary - * _run_tests: run testing and linting - * _clean_build: cleanup if necessary - @param source_dir: Solution dir (student's solution or authors' solution) - @param config_dir: Directory with task config - @param public_tests_dir: Directory to copy public tests from - @param private_tests_dir: Directory to copy private tests from - @param verbose: Verbose output (can exhibit private tests information) - @param normalize_output: Normalize all stages output to stderr - @raise RunFailedError: on any build/test error - @return: Percentage of the final score - """ - # Read test config - test_config = self.TaskTestConfig.from_json(config_dir / '.tester.json') - - # Create build dir as tmp dir - build_dir = Path(tempfile.mkdtemp()) - build_dir.chmod(0o777) # Set mode for build directory (for code generation and so on) - - try: - self._gen_build( - test_config, - build_dir, - source_dir, - public_tests_dir, - private_tests_dir, - tests_root_dir, - sandbox=True, - verbose=verbose, - normalize_output=normalize_output, - ) - - # Do not disable sandbox (otherwise it will not clear environ, - # so environ-related issues may be missed, such as empty locale) - score_percentage = self._run_tests( - test_config, - build_dir, - sandbox=True, - verbose=verbose, - normalize_output=normalize_output - ) - except RunFailedError as e: - print_info('\nOoops... Something went wrong: ' + e.msg + (e.output or ''), color='red') - raise e - finally: - if self.cleanup: - self._clean_build( - test_config, - build_dir, - verbose=verbose - ) - else: - print_info(f'Keeping build directory: {build_dir}') - - return score_percentage diff --git a/checker/utils.py b/checker/utils.py new file mode 100644 index 0000000..4d7312a --- /dev/null +++ b/checker/utils.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +import sys +from typing import Any + + +def print_info( + *args: Any, + file: Any = None, + color: str | None = None, + **kwargs: Any, +) -> None: + colors = { + "white": "\033[97m", + "cyan": "\033[96m", + "pink": "\033[95m", + "blue": "\033[94m", + "orange": "\033[93m", + "green": "\033[92m", + "red": "\033[91m", + "grey": "\033[90m", + "endc": "\033[0m", + } + + file = file or sys.stderr + + data = " ".join(map(str, args)) + if color in colors: + print(colors[color] + data + colors["endc"], file=file, **kwargs) + else: + print(data, file=file, **kwargs) + file.flush() + + +def print_separator( + symbol: str, + file: Any = None, + color: str = "pink", + string_length: int = 80, +) -> None: + print_info(symbol * string_length, color=color) + + +def print_header_info( + header_string: str, + file: Any = None, + color: str = "pink", + string_length: int = 80, + **kwargs: Any, +) -> None: + info_extended_string = " " + header_string + " " + print_info("", file=file) + print_separator(symbol="+", string_length=string_length, color=color, file=file) + print_info(f"{info_extended_string:+^{string_length}}", color=color, file=file) + print_separator(symbol="+", string_length=string_length, color=color, file=file) diff --git a/checker/utils/__init__.py b/checker/utils/__init__.py deleted file mode 100644 index 6f53df1..0000000 --- a/checker/utils/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -from .files import * # noqa: F403 -from .git import * # noqa: F403 -from .glab import * # noqa: F403 -from .manytask import * # noqa: F403 -from .print import * # noqa: F403 -from .template import * # noqa: F403 diff --git a/checker/utils/files.py b/checker/utils/files.py deleted file mode 100644 index 4f4ee96..0000000 --- a/checker/utils/files.py +++ /dev/null @@ -1,246 +0,0 @@ -from __future__ import annotations - -import re -import shutil -import subprocess -from pathlib import Path - -from .print import print_info - - -def filename_match_patterns( - file: Path, - patterns: list[str], -) -> bool: - """ - Check if filename match any of patterns given - @param file: Path object to check - @param patterns: list of regexp pattern (?) - @return: True if any of patterns applicable - """ - for pattern in patterns: - if file.match(pattern): - return True - return False - - -def copy_files( - source: Path | None, - target: Path, - patterns: list[str] | None = None, - ignore_patterns: list[str] | None = None, -) -> None: - """ - Copy files between 2 directories - @param source: Directory or file to copy from (none to skip) - @param target: Directory or file to copy to - @param patterns: Patterns to copy - @param ignore_patterns: Patterns to ignore during copy - @return: None - """ - ignore_patterns = ignore_patterns or [] - target.mkdir(parents=True, exist_ok=True) - - if source is None: - print_info(f'Warning: Skip copying files from <{source}> to <{target}>') - return - - ignore_files: list[Path] = sum([ - list(source.glob(ignore_pattern)) - for ignore_pattern in ignore_patterns - ], []) - for pattern in (patterns or ['*']): - for file in source.glob(pattern): - if file in ignore_files: - continue - relative_filename = str(file.relative_to(source)) - source_path = source / relative_filename - target_path = target / relative_filename - if file.is_dir(): - copy_files( - source_path, target_path, - patterns=['*'], - ignore_patterns=ignore_patterns, - ) - continue - - shutil.copyfile(str(source_path), str(target_path)) - - -def check_file_contains_regexp( - filename: Path, - regexps: list[str], -) -> bool: - """ - Check regexps appears in the file - @param filename: Filename to check - @param regexps: list of forbidden regexp - @raise AssertionError: if file does not exist - @return: True if any of regexp found - """ - assert filename.exists() and filename.is_file() - - with open(filename, 'r', encoding='utf-8') as f: - file_content = f.read() - - for regexp in regexps: - if re.search(regexp, file_content, re.MULTILINE): - return True - - return False - - -def check_folder_contains_regexp( - folder: Path, - extensions: list[str], - regexps: list[str], - raise_on_found: bool = False, -) -> bool: - """ - Check regexps appears in any file in the folder - @param folder: Folder to check - @param extensions: Source files extensions - @param regexps: list of forbidden regexp - @param raise_on_found: Raise Error on Found Exception - @raise AssertionError: if folder or any file does not exist - @return: True if any of regexp found - """ - assert folder.exists() and folder.is_dir() - - for source_path in folder.glob('**/*.*'): - if any(str(source_path).endswith(ext) for ext in extensions): - if check_file_contains_regexp(source_path, regexps): - if raise_on_found: - raise AssertionError(f'File <{source_path}> contains one of <{regexps}>') - return True - return False - - -def check_files_contains_regexp( - folder: Path, - regexps: list[str], - patterns: list[str] | None = None, - raise_on_found: bool = False, -) -> bool: - """ - Check regexps appears in files that matching patterns - @param folder: Folder to check - @param regexps: list of forbidden regexp - @param patterns: list of patterns for file matching - @param raise_on_found: Raise Error on Found Exception - @raise AssertionError: if folder or any file does not exist - @return: True if any of regexp found - """ - assert folder.exists() and folder.is_dir() - if patterns is None: - patterns = ['**/*.*'] - for pattern in patterns: - for source_path in folder.glob(pattern): - if check_file_contains_regexp(source_path, regexps): - if raise_on_found: - raise AssertionError(f'File <{source_path}> contains one of <{regexps}>') - return True - return False - - -def get_folders_diff( - old_folder: Path, - new_folder: Path, - skip_binary: bool = True, - exclude_patterns: list[str] | None = None, -) -> list[str]: - """ - Return diff files between 2 folders - @param old_folder: Old folder - @param new_folder: New folder with some changes files, based on old folder - @param skip_binary: Skip binary files - @param exclude_patterns: Exclude files that match pattern - @return: list of changed files as strings - """ - # diff docs https://www.gnu.org/software/diffutils/manual/html_node/diff-Options.html - # -N/--new-file - If one file is missing, treat it as present but empty - # -w/--ignore-all-space - ignore all spaces and tabs e.g. if ( a == b) is equal to if(a==b) - # -r/--recursive - recursively compare any subdirectories found - # -q/--brief - report only when files differ - # --strip-trailing-cr - strip trailing carriage return on input - # -x/--exclude [pattern] - exclude files that match pattern - - # TODO: check format options to work, or --left-column options - exclude_args = [f'--exclude={pattern}' for pattern in exclude_patterns] if exclude_patterns else [] - # exclude_args = [] - result = subprocess.run( - [ - 'diff', - '--brief', - '--recursive', - '--new-file', - '--strip-trailing-cr', - *exclude_args, - old_folder.absolute(), - new_folder.absolute() - ], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - ) - output = result.stdout.decode() - - # TODO: make it work with whitespace in filenames - - changed = [] - for line in output.split('\n'): - if line.startswith('Only in'): - assert False, 'Will be treated as change due to --new-file option' - elif line.startswith('Files'): - _, file1, _, file2, _ = line.split() - changed.append(Path(file2).relative_to(new_folder)) - elif line.startswith('Binary files'): - if skip_binary: - continue - _, _, file1, _, file2, _ = line.split() - changed.append(Path(file2).relative_to(new_folder)) - - return [str(i) for i in changed] - - -def get_folders_diff_except_public( - public_folder: Path, - old_folder: Path, - new_folder: Path, - skip_binary: bool = True, - exclude_patterns: list[str] | None = None, -) -> list[str]: - """ - Return diff files between 2 folders except files that are equal to public folder files - @param public_folder: Public folder - @param old_folder: Old folder - @param new_folder: New folder with some changes files, based on old folder - @param skip_binary: Skip binary files - @param exclude_patterns: Exclude files that match pattern - @return: list of changed files as strings - """ - - changed_files_old_new = get_folders_diff( - old_folder, - new_folder, - skip_binary=skip_binary, - exclude_patterns=exclude_patterns, - ) - changed_files_public_new = get_folders_diff( - public_folder, - new_folder, - skip_binary=skip_binary, - exclude_patterns=exclude_patterns, - ) - - # TODO: Remove logging - print_info('\nchanged_files_old_new:', color='grey') - for i in changed_files_old_new: - print_info(f' {i}', color='grey') - print_info('\nchanged_files_public_new:', color='grey') - for i in changed_files_public_new: - print_info(f' {i}', color='grey') - - return [ - str(i) - for i in set(changed_files_old_new) & set(changed_files_public_new) - ] diff --git a/checker/utils/git.py b/checker/utils/git.py deleted file mode 100644 index abca271..0000000 --- a/checker/utils/git.py +++ /dev/null @@ -1,142 +0,0 @@ -from __future__ import annotations - -import shutil -import subprocess -from pathlib import Path - -from .print import print_info - - -DEFAULT_BRANCH = 'main' - - -def get_tracked_files_list( - repo_dir: Path, -) -> list[str]: - r = subprocess.run( - 'git ls-files', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - check=True, - cwd=repo_dir, - ) - return r.stdout.splitlines() - - -def setup_repo_in_dir( - repo_dir: Path, - remote_repo_url: str, - service_username: str, - service_token: str, - git_user_email: str = 'no-reply@gitlab.manytask.org', - git_user_name: str = 'Manytask Bot', - branch: str = DEFAULT_BRANCH, -) -> None: - remote_repo_url = remote_repo_url.replace('https://', '').replace('http://', '').replace('.git', '') - remote_repo_url = f'https://{service_username}:{service_token}@{remote_repo_url}.git' - print_info(f'remote_repo_url {remote_repo_url}', color='grey') - - shutil.rmtree(repo_dir) - repo_dir.mkdir() - - print_info('* git clone...') - r = subprocess.run( - f'git clone --depth=1 --branch={branch} {remote_repo_url} ./', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - cwd=repo_dir, - ) - print_info(r.stdout, color='grey') - if r.returncode != 0: - print_info('empty repo, init it first') - r = subprocess.run( - f'git init --initial-branch {branch} && git remote add origin {remote_repo_url} && git fetch', - encoding='utf-8', - stdout=subprocess.PIPE, - shell=True, - check=True, - cwd=repo_dir, - ) - print_info(r.stdout, color='grey') - - print_info('* git remote...') - r = subprocess.run( - 'git remote -v', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - cwd=repo_dir, - ) - print_info(r.stdout, color='grey') - - print_info('* git config set...') - r = subprocess.run( - f'git config --local user.email "{git_user_email}" && ' - f'git config --local user.name "{git_user_name}"', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - cwd=repo_dir, - ) - print_info(r.stdout, color='grey') - - -def commit_push_all_repo( - repo_dir: Path, - branch: str = DEFAULT_BRANCH, - message: str = 'Export public files', -) -> None: - print_info('* git status...') - r = subprocess.run( - 'git status', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - check=True, - cwd=repo_dir, - ) - print_info(r.stdout, color='grey') - - print_info('* adding files...') - r = subprocess.run( - 'git add .', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - cwd=repo_dir, - ) - print_info(r.stdout, color='grey') - - print_info('* committing...') - r = subprocess.run( - f'git commit --all -m "{message}"', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - cwd=repo_dir, - ) - print_info(r.stdout, color='grey') - - print_info('* git pushing...') - r = subprocess.run( - f'git push -o ci.skip origin {branch}', - encoding='utf-8', - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - shell=True, - cwd=repo_dir, - ) - print_info(r.stdout, color='grey') - if r.returncode != 0: - raise Exception('Can not export files in public repo') # TODO: make right type - - print_info('Done.') diff --git a/checker/utils/glab.py b/checker/utils/glab.py deleted file mode 100644 index 8f32cb9..0000000 --- a/checker/utils/glab.py +++ /dev/null @@ -1,191 +0,0 @@ -from __future__ import annotations - -import typing -from pathlib import Path - -import gitlab -import gitlab.v4.objects - -from .print import print_info - - -class GitlabConnection: - def __init__( - self, - gitlab_host_url: str, - api_token: str | None = None, - private_token: str | None = None, - job_token: str | None = None, - ): - if api_token: - self.gitlab = gitlab.Gitlab(gitlab_host_url, private_token=api_token) - elif private_token: - self.gitlab = gitlab.Gitlab(gitlab_host_url, private_token=private_token) - elif job_token: - self.gitlab = gitlab.Gitlab(gitlab_host_url, job_token=job_token) - else: - print_info( - 'None of `api_token`/`private_token` or `job_token` provided; use without credentials', - color='orange', - ) - self.gitlab = gitlab.Gitlab(gitlab_host_url) - - def get_project_from_group( - self, - group_name: str, - project_name: str, - ) -> gitlab.v4.objects.GroupProject: - print_info('Get private Project', color='grey') - - _groups = self.gitlab.groups.list(get_all=True, search=group_name) - - print_info(_groups, color='grey') - assert len(_groups) >= 1, f'Could not find group_name={group_name}' - group = _groups[0] # type: ignore - - project = {i.name: i for i in group.projects.list(all=True)}[project_name] - - project = typing.cast(gitlab.v4.objects.GroupProject, project) - print_info(f'Got private project: <{project.name}>', color='grey') - - return project - - def get_public_project( - self, - private_group_name: str, - public_repo_name: str, - ) -> gitlab.v4.objects.GroupProject: - return self.get_project_from_group(private_group_name, public_repo_name) - - def get_projects_in_group( - self, - group_name: str, - ) -> list[gitlab.v4.objects.GroupProject]: - print_info(f'Get projects in group_name={group_name}', color='grey') - - _groups = self.gitlab.groups.list(get_all=True, search=group_name) - - assert len(_groups) >= 1, f'Could not find group_name={group_name}' - group = _groups[0] # type: ignore - - print_info(f'Got group: <{group.name}>', color='grey') - - projects = group.projects.list(all=True) - - projects = typing.cast(list[gitlab.v4.objects.GroupProject], projects) - print_info(f'Got {len(projects)} projects', color='grey') - - return projects - - def get_group_members( - self, - group_name: str, - ) -> list[gitlab.v4.objects.GroupMember]: - print_info(f'Get members in group_name={group_name}', color='grey') - - _groups = self.gitlab.groups.list(get_all=True, search=group_name) - - print_info(_groups, color='grey') - assert len(_groups) >= 1, f'Could not find group_name={group_name}' - group = _groups[0] # type: ignore - - print_info(f'Got group: <{group.name}>', color='grey') - - members = group.members_all.list(all=True, get_all=True) - - members = typing.cast(list[gitlab.v4.objects.GroupMember], members) - print_info(f'Got {len(members)} members', color='grey') - - return members - - def get_project_members( - self, - project_name: str, - ) -> list[gitlab.v4.objects.ProjectMember]: - print_info(f'Get members in project_name={project_name}', color='grey') - - _projects = self.gitlab.projects.list(get_all=True, search=project_name) - - assert len(_projects) >= 1, f'Could not find project_name={project_name}' - project = _projects[0] # type: ignore - - print_info(f'Got project: <{project.name}>', color='grey') - - members = project.members_all.list(all=True, get_all=True) - - members = typing.cast(list[gitlab.v4.objects.ProjectMember], members) - print_info(f'Got {len(members)} members', color='grey') - - return members - - def get_user_by_username( - self, - username: str, - ) -> gitlab.v4.objects.User: - print_info(f'Get user with username={username}', color='grey') - - _users = self.gitlab.users.list(get_all=True, search=username) - assert len(_users) > 0, f'Could not find username={username}' - - if len(_users) > 1: - print_info( - f'Got multiple users: <{[(user.username, user.name) for user in _users]}>', - color='grey' - ) - _username_to_user = {user.username: user for user in _users} - assert username in _username_to_user, f'Could not find username={username}' - user = _username_to_user[username] - else: - user = _users[0] # type: ignore - - user = typing.cast(gitlab.v4.objects.User, user) - print_info(f'Got user: <{user.name}>', color='grey') - - return user - - def get_all_tutors( - self, - private_group_name: str, - ) -> list[gitlab.v4.objects.GroupMember]: - return self.get_group_members(private_group_name) - - def get_students_projects( - self, - students_group_name: str, - ) -> list[gitlab.v4.objects.GroupProject]: - return self.get_projects_in_group(students_group_name) - - def get_student_file_link( - self, - gitlab_url: str, - default_branch: str, - students_group_name: str, - username: str, - path: str | Path, - ) -> str: - return f'{gitlab_url}/{students_group_name}/{username}/-/blob/{default_branch}/{path}' - - def get_current_user( - self, - ) -> gitlab.v4.objects.CurrentUser: - self.gitlab.auth() - current_user = self.gitlab.user - - current_user = typing.cast(gitlab.v4.objects.CurrentUser, current_user) - - return current_user - - def get_group( - self, - name: str, - ) -> gitlab.v4.objects.Group: - print_info(f'Get group name={name}', color='grey') - - _groups = self.gitlab.groups.list(get_all=True, search=name) - - assert len(_groups) >= 1, f'Could not find group name={name}' - group = _groups[0] # type: ignore - - group = typing.cast(gitlab.v4.objects.Group, group) - - return group diff --git a/checker/utils/manytask.py b/checker/utils/manytask.py deleted file mode 100644 index 5a002dc..0000000 --- a/checker/utils/manytask.py +++ /dev/null @@ -1,97 +0,0 @@ -"""Helpers to interact with manytask (push scores tasks)""" -from __future__ import annotations - -import io -import json -import time -from datetime import datetime - -import requests - -from ..exceptions import GetFailedError, PushFailedError - - -def push_report( - report_base_url: str, - tester_token: str, - task_name: str, - user_id: int, - score: float, - files: dict[str, tuple[str, io.BufferedReader]] | None = None, - send_time: datetime | None = None, - check_deadline: bool = True, - use_demand_multiplier: bool = True, -) -> tuple[str, int, str | None, str | None, float | None]: - # Do not expose token in logs. - data = { - 'token': tester_token, - 'task': task_name, - 'user_id': user_id, - 'score': score, - 'check_deadline': check_deadline, - 'use_demand_multiplier': use_demand_multiplier, - } - if send_time: - data['commit_time'] = send_time - - response = None - for _ in range(3): - response = requests.post(url=f'{report_base_url}/api/report', data=data, files=files) - - if response.status_code < 500: - break - time.sleep(1.0) - assert response is not None - - if response.status_code >= 500: - response.raise_for_status() - assert False, 'Not Reachable' # pragma: no cover - elif response.status_code >= 400: - # Client error often means early submission - raise PushFailedError(f'{response.status_code}: {response.text}') - else: - try: - result = response.json() - result_commit_time = result.get('commit_time', None) - result_submit_time = result.get('submit_time', None) - demand_multiplier = float(result.get('demand_multiplier', 1)) - return result['username'], result['score'], result_commit_time, result_submit_time, demand_multiplier - except (json.JSONDecodeError, KeyError) as e: - raise PushFailedError('Unable to decode response') from e - - -def get_score( - report_base_url: str, - tester_token: str, - task_name: str, - user_id: int, -) -> float | None: - # Do not expose token in logs. - data = { - 'token': tester_token, - 'task': task_name, - 'user_id': user_id, - } - # response = None - for _ in range(3): - response = requests.get(url=f'{report_base_url}/api/score', data=data) - - if response.status_code < 500: - break - time.sleep(1.0) - - if response.status_code >= 500: - response.raise_for_status() - assert False, 'Not Reachable' # pragma: no cover - # Client error often means early submission - elif response.status_code >= 400: - raise GetFailedError(f'{response.status_code}: {response.text}') - else: - try: - result = response.json() - return result['score'] - except (json.JSONDecodeError, KeyError): - # raise GetFailedError() - pass - - return None diff --git a/checker/utils/print.py b/checker/utils/print.py deleted file mode 100644 index 9296a16..0000000 --- a/checker/utils/print.py +++ /dev/null @@ -1,51 +0,0 @@ -from __future__ import annotations - -import sys -from typing import Any - - -def print_info( - *args: Any, - file: Any = None, - color: str | None = None, - **kwargs: Any, -) -> None: - colors = { - 'white': '\033[97m', - 'cyan': '\033[96m', - 'pink': '\033[95m', - 'blue': '\033[94m', - 'orange': '\033[93m', - 'green': '\033[92m', - 'red': '\033[91m', - 'grey': '\033[90m', - 'endc': '\033[0m', - } - - file = file or sys.stderr - - data = ' '.join(map(str, args)) - if color in colors: - print(colors[color] + data + colors['endc'], file=file, **kwargs) - else: - print(data, file=file, **kwargs) - file.flush() - - -def print_header_info( - info_string: str, -) -> None: - info_extended_string = ' ' + info_string + ' ' - info_extended_string = '+' * (50 - len(info_extended_string) // 2) + \ - info_extended_string + \ - '+' * (50 - len(info_extended_string) // 2) - print_info('') - print_info('+' * len(info_extended_string), color='pink') - print_info(info_extended_string, color='pink') - print_info('+' * len(info_extended_string), color='pink') - - -def print_task_info( - task_name: str, -) -> None: - print_header_info(f'Testing tasks: <{task_name}>') diff --git a/checker/utils/template.py b/checker/utils/template.py deleted file mode 100644 index acc1929..0000000 --- a/checker/utils/template.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import annotations - -import re -from pathlib import Path - - -PRECOMPILED_REGEXPS = {} - - -def cut_marked_code_from_string( - content: str, - clear_mark: str | tuple[str, str], - clear_mark_replace: str, - raise_not_found: bool = False, -) -> str: - """ - Cut the marked source code from the gold solution inplace - - @param content: code to inplace replace code - @param clear_mark: string or (string, string) to cut from gold solution - @param clear_mark_replace: string to replace cut code - @param raise_not_found: raise assertion error if no paired found - @return: clear content or original content if no found - @raises: assertion error if no paired CLEAR_MARK found - - Cut all content between pairs or CLEAR_MARK strings. - given clear_mark="YOUR CODE" or clear_mark=("YOUR CODE", "YOUR CODE") - ```python - a = 1 - # YOUR CODE - b = 1 - # YOUR CODE - ``` - You'll get the following result - ```python - a = 1 - # YOUR CODE - ``` - """ - global PRECOMPILED_REGEXPS - clear_mark_start, clear_mark_end = clear_mark if isinstance(clear_mark, tuple) else (clear_mark, clear_mark) - - cut_regexp = rf'{clear_mark_start}(.|\s)*?{clear_mark_end}' - if cut_regexp not in PRECOMPILED_REGEXPS: - PRECOMPILED_REGEXPS[cut_regexp] = re.compile(cut_regexp) - - template_content, replace_count = re.subn(PRECOMPILED_REGEXPS[cut_regexp], clear_mark_replace, content) - - if raise_not_found and replace_count == 0: - raise AssertionError(f'Can not find "{clear_mark_start}" to "{clear_mark_end}" pair') - - return template_content - - -def create_template_from_gold_solution( - source_filename: Path | str, - target_filename: Path | str | None = None, - clear_mark: str | tuple[str, str] = 'TODO: CODE HERE', - clear_mark_replace: str = 'TODO: CODE HERE', - raise_not_found: bool = False, -) -> bool: - """ - Cut the marked source code from the gold solution inplace - - @param source_filename: filename to replace code of - @param target_filename: filename to write replace code, if None - inplace - @param clear_mark: string or (string, string) to cut from gold solution - @param clear_mark_replace: string to replace cut code - @param raise_not_found: raise assertion error if no paired found - @return: true if successfully replaces and false if not found - @raises: assertion error if no paired clear_mark found - @see: cut_marked_code_from_string - """ - source_filename = Path(source_filename) - assert source_filename.exists() and source_filename.is_file(), f'{source_filename.as_posix()} does not exist' - - target_filename = target_filename or source_filename - - with open(source_filename, 'r') as f: - content = f.read() - - template_content = cut_marked_code_from_string( - content, - clear_mark, - clear_mark_replace, - raise_not_found=raise_not_found, - ) - - with open(target_filename, 'w') as f: - f.write(template_content) - - return content != template_content diff --git a/conftest.py b/conftest.py deleted file mode 100644 index 10903bd..0000000 --- a/conftest.py +++ /dev/null @@ -1,6 +0,0 @@ -import pytest - - -def pytest_addoption(parser: pytest.Parser) -> None: - parser.addoption('--python', action='store_true', dest="python", default=False, help="enable python tests") - parser.addoption('--cpp', action='store_true', dest="cpp", default=False, help="enable cpp tests") diff --git a/docs/0_concepts.md b/docs/0_concepts.md new file mode 100644 index 0000000..3a16bd5 --- /dev/null +++ b/docs/0_concepts.md @@ -0,0 +1,172 @@ +# Concepts + +This page describes the main concepts used in the `manytask` project - public/private repository, students' repositories, etc. + + +## Manytask + +This project extends the [manytask](https://github.com/manytask/manytask) project, so please refer to its documentation first. + +The key `manytask` concepts are: + +* **Gitlab** - a gitlab.com or self-hosted gitlab instance where students' repositories will be created. + + +* **Manytask/Web App** - a web application to manage students', repos, grades and deadlines. + It stores grades in google sheet and display deadlines on a web page. + It also automatically creates gitlab repositories for students as forks from Public Repo with tasks and solution templates. + When used self-hosted gitlab instance, it create gitlab users automatically. + + +* **Public Repo** - a public (only gitlab at the moment) repository with tasks and solution templates. + + +* **Students' Group** - a group where `manytask` will create repositories for students. + + +* **Students' Repositories** - repositories for students as forks from Public Repo. + + +!!! note + Manytask do not restrict repository structure in any way, except + 1. Students' must have access to the public repository. + 2. Students' group should be private, for students not to see each other solutions. + 3. `.gitlab-ci-students.yml` should be present in the public repository root to set up it as `external ci file` for all students' repositories. + + +## Checker + +Checker is much nore strict in terms of repository structure and overall workflow. +However, you can use all or some functions of the checker, which will influence the strictness. In this docs we assume that you use full recommended workflow. + +First of all, it introduces the following main concepts: + +* **Checker** - a CLI script providing the following functionality: + + * **grade** - to run in a student's repository to test solution against private and public tests and push scores. + * **validate** - to run in a private (tutors') repository to validate tasks and deadlines integrity (will run in check automatically). + * **check** - to run in a private (tutors') repository to test gold solutions against private and public tests. + * **export** - to run in a private (tutors') repository to export tasks, templates and tests to the public repository. + + +* **Docker Env**/**Testenv** - a docker image with your course environment and checker installed. + E.g. cpp compiler, go compiler, python interpreter, additional libraries, etc. + Also, it should contain copy of the private repository with private tests. + It is used to run `checker grade` in students' repositories and `checker check` in private repository to have consistent environment and dependencies. + You may use provided `checker` docker image to base on or create your own from scratch. + + +* **Private Repo** - a private (tutors) repository with tasks, tests, templates and solutions (and any additional you may need). + This repository have to be private as it contains solutions and optional private tests. + We highly recommend to have testing of gold solution against public and private tests. + + +* **Private CI** - a gitlab ci or github workflow or whatever you use to run + * `checker check` on each push/mr in private repository to test gold solution against private and public tests + * `checker export` on each push/mr/release/regularly to export tasks, templates and tests to the public repository. + It should be set up to use the docker image with your environment and checker installed. + + +* **Students' CI** - a gitlab ci file (only gitlab at the moment) set to run + * `checker grade` on each push/mr in students' repositories to test solution against private and public tests and push scores. + * `checker contribute` on each mr in public repository to check students' contribution in public tests and test it against gold solution. + It should be set up to use the docker image with your environment and checker installed. + + +* **Runner** - a gitlab-ci (only gitlab at the moment) to run students's tests in it. + As you will have a lot of students' solutions, so it is better to have self-hosted gitlab runner. + It should be connected to the students' group or gitlab self-hosted instance for students' pipelines to run in. + + +Also checker introduces the following inner concepts: + +* **Layout** - a structure of the private repository (and respectively public repository). + It is described in the [Getting Started docs](./1_getting_started.md) page. + + +* **Config-s** - a yaml files with configuration for checker - `.checker.yml` and `.deadlines.yml`. + It is described in the [Configuration docs](./2_configuration.md) page. + + +* **Pipeline** - a yaml-described pipeline in `.checker.yml` file to run during `checker check` and `checker export` commands. + It is described in the [Configuration docs](./2_configuration.md) page. + + +* **Plugin** - a single stage of the pipeline, have arguments, return exclusion result. In a nutshell, it is a Python class with `run` method and `Args` pydantic class. + Checker have some built-in plugins, but you can write your own. + It is described in the [Configuration docs](./2_configuration.md) page and [Plugins docs](./3_plugins.md) page. + + +* **Group** - a group of tasks with the same deadlined, can refer as lecture. + + +* **Task** - a task ready to be tested within your environment. + + +* **Public Tests/Files** - a files to be copied to public repository from private repository, used in testing. + + +* **Private Tests/Files** - a files to NOT be copied to public repository from private repository, but used in testing. + + +* **Gold Solution** - a tutors-written task solution to be tested against public and private tests. + It is used to check tests integrity. Never exposed to students. + + +* **Template** - a solution template files, copied to students' repositories instead of gold solution. + + +* **Verbose True/False** - a flag to set level of verbosity of the checker - private/public. + + 1. When `verbose` is `True` - checker will print all logs and results and debug info. + 2. When `verbose` is `False` - checker will print only public-friendly outputs - less info, hidden private tests results, etc. + + It is set automatically as True for `checker check` and False for `checker grade`/`checker check --contribute` commands. + Plugins have to implement `verbose` flag. + + +## Manytask vs Checker + +Manytask and checker waaay different things. + +**Manytask** - web application to host, responsible for the following things: + +1. Get and show deadlines on the web page +2. Get and store students' grades +3. Get and store students' submissions for anti-cheat purposes +4. Create Students' Repositories as forks from Public Repo or as empty repositories +5. (self-hosted gitlab only) Create gitlab users for students + +It is language-agnostic and do not care about your course environment and tests. Just create repositories and store grades. +Here is the scheme of the manytask workflow: +``` mermaid +flowchart LR + subgraph gitlab + public(Public Repo) -.->|fork| student + public -->|updates| student + student([Student's Repo]) + end + student -->|push scores| manytask + manytask[manytask] -.->|creates| student +``` + + +**Checker** - CLI script to run in CI, responsible for the following things: +1. Test students' solutions against public and private tests +2. Test gold solution against public and private tests +3. Export tasks, templates and tests from private to public repository +4. Validate tasks and deadlines integrity + +It is language-agnostic, but requires docker with your course environment and pipeline configures in yaml files how to run tests. +Here is the scheme of the checker workflow: +``` mermaid +flowchart LR + private(Private Repo) -->|checker check| private + private -->|checker export| public + student([Student's Repo]) -->|checker grade| manytask + subgraph gitlab + public(Public Repo) -.->|fork| student + public -->|updates| student + end + manytask -.->|creates| student +``` diff --git a/docs/1_getting_started.md b/docs/1_getting_started.md new file mode 100644 index 0000000..a68e340 --- /dev/null +++ b/docs/1_getting_started.md @@ -0,0 +1,225 @@ +# Getting Started + +This page will help you to get started to use `checker` with your course. + +This guide assumes that you have already learned the [concepts](./0_concepts.md). + + +## Starting point + +What you already have: + +1. You are a going to create a course with manytask and checker. +2. You have installed (or going to install) [manytask](https://github.com/manytask/manytask). + (So you have empty public repo, private group for students and manytask instance running) + +What you need to do: + +1. Create a private repo with specific structure - [Layout](#layout) +2. Create a testing environment - [Testing environment](#testing-environment) +3. Configure your course - [Configuration](#configuration) +4. Learn how to test locally - [Local testing](#local-testing) +5. Learn how to setup Infrastructure - [Infrastructure](#infrastructure) +6. Learn how to set up CI in private and public repos - [CI setup](#ci-set-up) + +A good starting point is to check out the [course-template](https://github.com/manytask/course-template). This is an example private repo for a python course with tests and tasks. You can fork it and use as a base for your course. + + +## Layout + +The private repository layout is crucial for the checker to function correctly. +Each task should be organized into dedicated folders within group directories. +Also, there are 2 config files `.course.yml` and `.deadlines.yml` that are required for checker to work. +Here's a detailed breakdown: + +```yaml +group_1/ + task_1/ + .template/ # optional, can extract from solution files + [some files] + [gold solution files] + [some private tests] + [some public tests] + .task.yml # task config with default parameters overwriting + task_2/ + ... +group_2/ + task_3/ + ... + task_4/ + ... +.checker.yml # checker config with default parameters and pipelines +.deadlines.yml # deadlines config with task scores to send to manytask +``` + +!!! warning + Group and files naming have to be unique. + +!!! note + By default ".*" files are considered as private and not copied to public repo, but you can change it in config. + +Additionally, you can have any files in like `group_1/.lecture` folder or `tools/my_course_tools` folder. +Also, probably you want to have `.docker` file with your test environment and `.gitlab-ci-students.yml` file to run tests in CI. + +After [Configuration](#configuration), you can validate your layout with `checker validate` command. + + +## Testing environment + +To run tests you need to have a docker testing environment that includes your course's specific environment and the pre-installed checker. Here’s how you can prepare and utilize it: + +You have 2 options: + +1. Build you docker image from scratch and install checker there. This way you have full control and can minimize the size of the image as much as you like. + The `checker` is available on pypi, so you can install it with pip + ```shell + pip install manytask-checker + ``` + +2. Use checker pre-built base-docker image to base on. This way you can save some time and effort and just add your course-specific environment to the image. + + [//]: # (TODO: add pre-build docker image ref ) + ```shell + FROM TBA:1.2.3 + ``` + + +## Configuration + +The configuration of the checker and Manytask requires setting up 2 main files: `.course.yml` and `.deadlines.yml` and custom `.task.yml` files for each task when needed. +Here is the short overview of the configuration files: + +* **Checker Configuration** (`.checker.yml`): + This file specifies the default parameters for the checker script and defines the pipelines for task checking and exporting. + + +* **Deadlines Configuration** (`.deadlines.yml`): + This file outlines the deadlines for each group, task max score and etc. + In the checker it is used a) to validate deadlines integrity and b) to send scores to manytask. + + +* **Task Configuration** (`.task.yml`): + Optional file located in task directory, this file allows for task-specific settings. It can override default parameters, private/public files and pipelines set in .checker.yml for individual tasks. + + +For the full guide on configuration, see the [Configuration docs](./2_configuration.md) page. + + +## Local testing + +For local testing of private repo you have 2 options: + +1. Install checker on your machine + ```shell + # create virtualenv + python -m venv .venv + source .venv/bin/activate + # install checker + (.venv) pip install manytask-checker + ``` + And use it as a cli application from inside your private repo + ```shell + (.venv) checker check --task hello-world + (.venv) checker check --group lecture-1 + (.venv) checker check + ``` + + +2. Use test environment docker you made before in interactive mode + + [//]: # (TODO: add pre-build docker image ref ) + ```shell + # run docker in interactive mode mounting your repo as /course + docker run -it --rm -v $(pwd):/course -w /course manytask/checker:latest bash + ``` + And use it as a cli application from inside your private repo + ```shell + # inside docker + > checker check --task hello-world + > checker check --group lecture-1 + > checker check + ``` + +!!! note + \#1 is faster and easier to debug, it is ok for local testing, \#2 ensure that your tests will run in the same environment as in CI. + + +## Infrastructure + +Setting up the infrastructure for Manytask and checker involves configuring the runtime environment: + +Manytask requite the following: + +1. (optional) **Self-hosted GitLab** instance - storing public repo and students' repos. + Manytask and checker can work with gitlab.com, but you can use self-hosted gitlab instance for better control, privacy and performance. + Please refer to [gitlab docs](https://about.gitlab.com/install/) for installation instructions. + + +2. **Manytask instance** - web application managing students' grades (in google sheet) and deadlines (web page). + Please refer to [manytask docs](https://github.com/manytask/manytask). + +So the checker extends it with the following: + +1. **Gitlab Runner** - place where students' solutions will be tested. + You definitely need it as the students will consume your free CI minutes extremely fast. + Please refer to [gitlab runners docs](https://docs.gitlab.com/runner/) for installation instructions. + Add this runner as a student group runner to your course group or as a shared runner to your gitlab instance. + + +2. (optional) **GitHub Runner** - if you are using GitHub for your private repo, you may need to set up GitHub runner. + Please refer to [github runners docs](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners) for installation instructions. + However, at the moment, GitHub provides 2000 CI minutes for org, so it may be to start with. + + +3. (optional) **Private Docker Registry** - to store testing environment docker image (it contains private tests). + You can use anything you like, but we recommend to use gitlab registry as it is already integrated with gitlab. + + +4. **Gitlab token** - with public repos access to export files to the public repo. + You need to add it as a secret to your private repo and use it in CI. Also if you want to use in it pipelines in students' repos, you need to add it as a secret to your course group. + If you have self-hosted gitlab instance or premium account, you can create service account for the course group using this [guide](https://docs.gitlab.com/ce/user/profile/service_accounts.html). + Otherwise, you have to create a separate account, grant access to the course group and use its [personal access token](https://docs.gitlab.com/ce/user/profile/personal_access_tokens.html). + +!!! note + For an automated setup, refer to the [manytask/infrastructure](https://github.com/manytask/infrastructure) repository with ansible playbooks. + These playbooks provide a stable and tested setup for the self-hosted gitlab instance, manytask instance and gitlab runners (configuration included). + + +## CI set up + +Configuring Continuous Integration (CI) is essential for automating the testing and deployment processes. Here's how to set it up for both private and public repositories. + +1. **Private Repo** + You can refer to the [course-template](https://github.com/manytask/course-template) for an example of a private repo with CI set up. + * Private repo on GitHub (recommended way) + If your private repo is on GitHub, you can use GitHub Actions and [Reusable Workflows](https://github.com/manytask/workflows) provided by us to set up CI in a few clicks. + + * Private repo on GitLab + If your private repo is on GitLab, you can use GitLab CI, no pre-configured workflows are available at the moment. + + You need to set up the following CI jobs: + + 1. on each push/mr/release - build testing environment docker image and keep as artifact to run tests in. + 2. on each push/mr - run `checker check` inside docker image to test gold solution against private and public tests. + 3. on each push/release - run `checker export` inside docker image to export to the public repository (requires gitlab token). + 4. on each push/release - call manytask api to update deadlines (requires manytask push token). + 5. on each push/release - build and publish testing environment docker image to the private docker registry (requires gitlab token). + + !!! note + Don't forget to add MANYTASK_TOKEN and GITLAB_TOKEN as protected secrets to your private repo. + + +2. **Public Repo** + Checker will push to this repo automatically and no pipelines to run, so nothing to configure directly here. + However the public repo should have `.gitlab-ci-students.yml` file in the root to set up it as `external ci file` for all students' repositories. + This file should contain 2 jobs, both running inside test environment docker image: + + 1. on each push/mr - run `checker grade` to test solution against private and public tests and push scores to manytask (requires manytask push token). + 2. on each mr in public repo - run `checker check --contribute` to test contributed public tests against gold solution. + + +3. **Students' Group** + Students' repos will use groups or shared runners from this group, so make sure that they are enabled. + + !!! note + Don't forget to add MANYTASK_TOKEN and GITLAB_TOKEN (optional) as protected secrets to your group. diff --git a/docs/2_configuration.md b/docs/2_configuration.md new file mode 100644 index 0000000..8d82aca --- /dev/null +++ b/docs/2_configuration.md @@ -0,0 +1,175 @@ +# Configuration + +This page describes how to configure checker with `.checker.yml`, `.deadlines.yml` and `.task.yml` files. + +You can refer to the [course-template](https://github.com/manytask/course-template) repository for examples of configuration files. + + +## `.checker.yml` + +This file describes how the checker will operate - how to export files, how to run pipelines and so on. + +[//]: # (TODO: Add json schema) +TBA + +No json schema available yet, but you can refer to the checker.configs.checker.CheckerConfig in [checker](https://github.com/manytask/checker) repository. +Or [course-template](https://github.com/manytask/course-template) repository. + + +!!! warning + The structure section requires glob patterns to be valid and will apply the same patterns recursively to all subdirectories. + The moment it faces `.task.yml` file, it will stop and use the parameters from this file recursively. + No `**` patterns are allowed. + +Please refer to the [plugins](./3_plugins.md) and [pipelines](./4_pipelines.md) sections for more information on how to configure pipelines. + +### Example + +The simple `.checker.yml` file is: + +[//]: # (TODO: include file directly from course-template) +[//]: # (TODO: add pydantic validation for include files) +```yaml +# .checker.yml +version: 1 + +# can be overwritten in .task.yml for individual tasks +structure: + # ignore patterns: exclude from export, overwrite during testing + ignore_patterns: [".git", ".idea", ".vscode", "__pycache__", ".venv", ".*_cache", "*.pyc"] + # public patterns: include in export, overwrite during testing + public_patterns: ["*", ".gitlab-ci-students.yml", ".gitignore"] + # private patterns: exclude from export, overwrite during testing + private_patterns: [".*"] + +# default values for all tasks, can be overwritten in .task.yml params: +default_parameters: + run_testing: true + timeout: 10 # in seconds + +# settings for export command, uses .deadlines.yml and `params` and each task params (in .task.yml) +export: + destination: https://gitlab.manytask.org/test/public-test-repo + +# settings for Tester, uses .checker.yml and `params` and each task params (in .task.yml) +testing: + search_plugins: ["tools/plugins"] + + # run once per repo + global_pipeline: + - ... + # run once per task + tasks_pipeline: + - ... + - ... + # will run once per task only if task_pipeline NOT failed + report_pipeline: + - ... +``` + + +## `.deadlines.yml` + +This file describes deadlines for tasks. It is used by `checker export` command to export only tasks that are started. +Additionally, it is used by `checker validate` to ensure integrity of the deadlines and local files. + +[//]: # (TODO: Add json schema) + +No json schema available yet, but you can refer to the checker.configs.deadlines.DeadlinesConfig in [checker](https://github.com/manytask/checker) repository. +Or [course-template](https://github.com/manytask/course-template) repository. + +### Example + +[//]: # (TODO: include file directly from course-template) +[//]: # (TODO: add pydantic validation for include files) +The simple `.deadlines.yml` file is: +```yaml +# .deadlines.yml +version: 1 + +settings: + timezone: Europe/Moscow + + deadlines: hard # hard/interpolate + max_submissions: 10 # optional + submission_penalty: 0.1 # optional + + task_url: https://example.com/$GROUP_NAME/$TASK_NAME # optional + +schedule: + - group: 1.FirstGroup + enabled: true + start: 2020-01-01 18:00:00 + steps: + 0.5: 7d + end: 13d 03:00:00 + tasks: + - task: hello_world + score: 10 + bonus: 0 + special: 1 + - task: sum_a_b + score: 5 + bonus: 5 + special: 0 + - task: disabled_task + enabled: false + score: 5 + + - group: 2.SecondGroup + start: 2020-02-01 18:00:00 + steps: + 0.9: 2020-02-08 18:00:00 + 0.1: 14d + tasks: + - task: factorial + score: 20 + + - group: 3.ThirdGroup + start: 2020-03-01 18:00:00 + tasks: + - task: palindrome + score: 0 + special: 2 + url: https://example.com + + - group: 4.FourthGroup + enabled: false + start: 2020-04-01 18:00:00 + tasks: [] +``` + + +## `.task.yml` + +This optional file describes task parameters. In used by checker when running `tasks_pipeline` to override default parameters, pipeline or layout (public or private files). + +[//]: # (TODO: Add json schema) + +No json schema available yet, but you can refer to the checker.configs.task.TaskConfig in [checker](https://github.com/manytask/checker) repository. +Or [course-template](https://github.com/manytask/course-template) repository. + +### Example + +[//]: # (TODO: include file directly from course-template) +[//]: # (TODO: add pydantic validation for include files) + +```yaml +# .task.yml +version: 1 + +structure: # optional + ignore_patterns: ["*.pyc"] + public_patterns: ["custom_public_file.txt"] + private_patterns: [".*", "custom_private_tests.py"] + +parameters: # optional + run_testing: true + timeout: 10 # in seconds + +task_pipeline: # optional + ... + +report_pipeline: # optional + ... +``` \ No newline at end of file diff --git a/docs/3_plugins.md b/docs/3_plugins.md new file mode 100644 index 0000000..00bcdd1 --- /dev/null +++ b/docs/3_plugins.md @@ -0,0 +1,86 @@ +# Plugins + +This page describes how to use and write plugins for checker pipelines. + +You can refer to the [course-template](https://github.com/manytask/course-template) repository for examples of plugins usage and custom plugins development. + + +## What is the Plugin + +Plugin is a single stage of the pipeline, have arguments, return exclusion result. +In a nutshell, it is a Python class overriding abstract class `checker.plugins.PluginABC`: + +> ::: checker.plugins.base.PluginABC + +Note that each plugin should override `checker.plugins.PluginABC.Args` class to provide arguments validation. Otherwise, empty arguments will be passed to `run` method. + +> ::: checker.plugins.base.PluginABC.Args + + +Each plugin output `checker.plugins.PluginOutput` class when executed successfully. + +> ::: checker.plugins.base.PluginOutput + +In case of error, `checker.exceptions.PluginExecutionFailed` have to be raised. +> ::: checker.exceptions.PluginExecutionFailed + +!!! note + Base Plugin class will handle all ValidationErrors of Args and raise error by itself. + So try to move all arguments validation to `Args` class in `pydantic` way. + + +## How to use plugins + +Plugins are used in the pipelines described in `.checker.yml` file. When running a pipeline the checker will validate plugin arguments and run it. + +The following plugins are available out of the box: + +TBA + +[//]: # (::: checker.plugins) + +[//]: # ( handler: python) + +[//]: # (TODO: list here all plugins available out of the box) + + +## How to write a custom plugin + +To write a custom plugin you need to create a class inheriting from `checker.plugins.PluginABC` and override `_run` method, `Args` inner class and set `name` class attribute. + +[//]: # (TODO: test example) + +```python +from random import randint +from checker.plugins import PluginABC, PluginOutput +from checker.exceptions import PluginExecutionFailed +from pydantic import AnyUrl + +class PrintUrlPlugin(PluginABC): + """Plugin to print url""" + + name = "print_url" + + class Args(PluginABC.Args): + url: AnyUrl + + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: + if randint(0, 1): + if verbose: + raise PluginExecutionFailed("Verbose error, we got randint=1") + else: + raise PluginExecutionFailed("Random error") + + return PluginOutput( + output=f"Url is {args.url}", + percentage=1.0, # optional, default 1.0 on success + ) +``` + +!!! important + The Plugin must implement `verbose` functionality! + If `verbose` is `True` the plugin should provide all info and possible debug info. + If `verbose` is `False` the plugin should provide only public-friendly info, e.g. excluding private test output. + +!!! note + It is a nice practice to write a small tests for your custom plugins to be sure that it works as expected. diff --git a/docs/4_pipelines.md b/docs/4_pipelines.md new file mode 100644 index 0000000..6e2bcdd --- /dev/null +++ b/docs/4_pipelines.md @@ -0,0 +1,122 @@ +# Pipelines + +This is the most important part of the checker. Pipelines are used to actually check and grade the solution. +Each pipeline is a sequence of plugins. Each plugin (pipeline stage) have arguments, run_if condition and return exclusion result. + +Please refer to the [plugins configuration](./3_plugins.md) for more details on pre-defined and custom plugins. + + +## 3 pipeline types + +There are 3 types of pipelines you need to define in `.checker.yml` file: +* `global_pipeline` - pipeline to be executed once for all checker repository. + You can place here any general compilation, installation, etc. +* `task_pipeline` - pipeline to be executed for each task. + You can place here any task-specific compilation, installation, etc. + For example, you run `pytest` by default, but for some tasks you want to have MR checked first. + (can be re-defined in `.task.yml` file) +* `report_pipeline` - pipeline to be executed for each task after all tests are passed (not failed). + You can place here any task-specific score reporting, etc. + For example, you can report the score to the Manytask platform, but for some tasks you want to have MR checked first. + (can be re-defined in `.task.yml` file) + +```yaml +# .checker.yml +... +testing: + # once + global_pipeline: + - name: "Install requirements" + run: "run_script" + args: + ... + # for each task + task_pipeline: + - name: "Run pytest" + run: "pytest" + args: + ... + # for each task after task_pipeline is passed + report_pipeline: + - name: "Report Score Manytask" + run: "report_score_manytask" + args: + ... +... +``` + +## Single pipeline stage + +Each pipeline stage is a plugin called with arguments. Here is the example of a single pipeline stage: +```yaml + - name: "Check forbidden regexps" + fail: fast + run: "check_regexps" + run_if: true + register_output: "forbidden_regexps" + args: + origin: "/tmp/origin" + patterns: ["**/*.py"] + regexps: ["exit(0)"] +``` + +* `name`: Human-readable name of the pipeline stage to be shown in the logs. +* `fail`: Defines how to handle the failure of this pipeline stage. + * `fast` - (default) fail fast, stop the pipeline and fail the task immediately. + * `after_all` - fail after all pipeline stages are executed. + * `never` - ignore the failure of this pipeline stage. +* `run`: key name of the plugin to be executed. Will be searched within pre-defined and custom plugins. +* `run_if`: condition to run this pipeline stage. Cast to bool, `true` by default. +* `register_output`: name of the output to be registered in `outputs` variable. The `PipelineStageResult` object will be stored in `outputs` dict with this name. +* `args`: arguments to be passed to the plugin. + Arguments are validated by `pydantic` library as defined by each individual plugin. + + +### Templating in Tester Pipelines + +You can use [jinja2](https://jinja.palletsprojects.com/en/3.0.x/) templating in `.checker.yml` file pipeline arguments and `run_if` conditions. +They can be used with `${{ ... }}` syntax, expression within this brackets will be evaluated before plugin execution. For example: +```yaml + report_pipeline: + - name: "Report Score Manytask" + run: "report_score_manytask" + args: + origin: "${{ global.temp_dir }}/${{ task.task_sub_path }}" + patterns: ["**/*.py"] + username: ${{ global.username }} + task_name: ${{ task.task_name }} + score: ${{ outputs.test_output.percentage }} +``` + + +The available variables are: + +* `global` - global parameters + + > ::: checker.tester.GlobalPipelineVariables + +* `task` - task parameters + + > ::: checker.tester.TaskPipelineVariables + +* `parameters` - default parameters + + > ::: checker.configs.checker.CheckerParametersConfig + +* `env` - environment variables dict in the moment of running checker + +* `outputs` - outputs of previous pipeline step if `register_output` is set, dict of string to `checker.plugins.PluginOutput` objects + + > ::: checker.pipeline.PipelineStageResult + + +### Pipeline stage result + +Each stage can optionally register its output in `outputs` context to be used by the next stages. +e.g. register percentage of passed tests to be used in the next stage to report the score. + +Each pipeline processes internally as `PipelineStageResult` object. It contains the following fields: + +> ::: checker.pipeline.PipelineStageResult + +And can be accessed in the next pipeline stages using templating syntax `${{ outputs.. }}` diff --git a/docs/5_usage.md b/docs/5_usage.md new file mode 100644 index 0000000..84b39bc --- /dev/null +++ b/docs/5_usage.md @@ -0,0 +1,35 @@ +# Usage + +This section describes advanced usage of the checker. + + +## CLI + +The main checker functionality is available via CLI. + +::: mkdocs-click + :module: checker.__main__ + :command: cli + :prog_name: checker + :list_subcommands: True + :style: table + :depth: 2 + + +## Docker + +Also, we provide a docker image with checker installed. +We have tried to optimize it, but you may want to create your own image from scratch. + +The docker entrypoint is `checker` script, so you can use it as a CLI application. + +```shell +docker run --rm -it manytask/checker --help +``` + +or you can build it from your Dockerfile + +```dockerfile +FROM manytask/checker:0.0.1-python3.8 +# ... +``` \ No newline at end of file diff --git a/docs/6_development.md b/docs/6_development.md new file mode 100644 index 0000000..91fb3b2 --- /dev/null +++ b/docs/6_development.md @@ -0,0 +1,92 @@ +# Developing + +This section describes how to contribute and develop the project itself. +For plugins development please refer to [plugins usage and development guide](./3_plugins.md). + +First of all, please refer to organization contribution guide [CONTRIBUTING.md](https://github.com/manytask/.github/CONTRIBUTING.md). + + +## Installation + +After cloning the repo, you can install it in development mode with all dev dependencies. + +Recommended way is you use virtualenv +```shell +python -m venv .venv +source .venv/bin/activate +``` + +Install lib in dev mode +```shell +(.venv)$ pip install -U --editable .[test,docs] # .\[test\] in zsh +``` + +Also, you need to install pre-commit hooks + +[//]: # (TODO: make pre-commit hooks) +```shell +TBA +``` + +## Testing and linting + +This project uses makefile to manage testing and linting. +The formatting, linting and testing is mandatory for each PR. + + +To apply formatting use +```shell +(.venv)$ make format +``` + +To run linting use +```shell +(.venv)$ make lint +``` + +To running all test or integration/unit/doctests separately use +```shell +(.venv)$ make test +(.venv)$ make test-integration +(.venv)$ make test-unit +(.venv)$ make test-doctest +``` +Note: integration tests require docker to be installed and running. TBA + +[//]: # (TODO: describe how to run manytask for testing and connect gitlab) + +## Documentation + +This project uses `mkdocs` to generate documentation. +All documentation locating in the *.md root files and in the docs folder. + +To run docs locally use +```shell +(.venv)$ make docs-serve +``` +This will start local server with hot reload. + +To build docs use +```shell +(.venv)$ make docs-build +``` +This will build docs in the `site` folder. + + +## Contributing + +Really appreciate any contributions! + +Feel free to open issues and PRs. Please check on existing issues and PRs before opening new ones. + + +## CI + +This project uses GitHub actions to run tests and build docs on each push and pull request. + +Your PR will not be merged if tests or docs build will fail. The following checks are mandatory: + +1. Testing +2. Linting/typechecks/formatting +3. Docs build and Docs Tests +4. PR title should follow [conventional commits](https://www.conventionalcommits.org/en/v1.0.0/) diff --git a/docs/7_changelog.md b/docs/7_changelog.md new file mode 100644 index 0000000..dda7a1e --- /dev/null +++ b/docs/7_changelog.md @@ -0,0 +1,4 @@ +{% + include-markdown "../CHANGELOG.md" + heading-offset=0 +%} \ No newline at end of file diff --git a/docs/production.md b/docs/ignore_me__production.md similarity index 92% rename from docs/production.md rename to docs/ignore_me__production.md index 7e93e68..b4e3713 100644 --- a/docs/production.md +++ b/docs/ignore_me__production.md @@ -1,13 +1,13 @@ # Production On this page you can find documentation on how to run `checker` itself -NB: Please first refer to the [system setup documentation](./system_setup.md) +NB: Please first refer to the [system setup documentation](./ignore_me__system_setup) ## Installation In a nutshell `checker` is just a python pkg available with pip as [manytask-checker](https://pypi.org/project/manytask-checker/). -So, as it was mentioned in [system setup documentation](./system_setup.md) you can install it with +So, as it was mentioned in [system setup documentation](./ignore_me__system_setup) you can install it with ```shell python -m pip install manytask-checker ``` diff --git a/docs/system_setup.md b/docs/ignore_me__system_setup.md similarity index 100% rename from docs/system_setup.md rename to docs/ignore_me__system_setup.md diff --git a/docs/images/logo-manytask.png b/docs/images/logo-manytask.png new file mode 100644 index 0000000..da7da0b Binary files /dev/null and b/docs/images/logo-manytask.png differ diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 0000000..1361910 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,4 @@ +{% + include-markdown "../README.md" + heading-offset=0 +%} \ No newline at end of file diff --git a/examples/.course.yml b/examples/.course.yml deleted file mode 100644 index 8e1d377..0000000 --- a/examples/.course.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: python - -# course -deadlines: hard -second_deadline_max: 0.5 -low_demand_bonus_bound: 0.2 # optional -max_low_demand_bonus: 1.0 # optional - - -# checker -system: python -layout: groups -executor: sandbox -templates: create - - -# gitlab -gitlab_url: https://gitlab.manytask.org -default_branch: main -course_group: python -public_repo: python/public-2022-fall -students_group: python/students-fall-2022 -#lectures_repo: py-tasks/lectures-2022-spring -gitlab_service_username: manytask # optional -gitlab_service_token_id: GITLAB_SERVICE_TOKEN # optional - - -# manytask -manytask_url: https://py.manytask.org -manytask_token_id: TESTER_TOKEN # optional -gitlab_api_token_id: GITLAB_API_TOKEN # optional - - -# info -links: # optional, any number of links - "TG Channel": https://t.me/joinchat/ - "TG Chat": https://t.me/joinchat/ - "LMS": https://lk.yandexdataschool.ru/ - "Contribute Manytask": https://github.com/yandexdataschool/manytask diff --git a/examples/.deadlines.yml b/examples/.deadlines.yml deleted file mode 100644 index be426e4..0000000 --- a/examples/.deadlines.yml +++ /dev/null @@ -1,55 +0,0 @@ -- group: 5.5.group_name - start: 10-05-2022 18:00 - deadline: 23-05-2022 23:59 - second_deadline: 23-05-2022 23:59 - enabled: true - tasks: - - task: task_1 - enabled: true - score: 100 - - task: task_2 - score: 50 - - task: task_3 - score: 50 - -- group: 4.4.other_group_name - start: 10-05-2022 18:00 - deadline: 23-05-2022 23:59 - second_deadline: 23-05-2022 23:59 - enabled: true - tasks: - - task: big_hw - review: true - score: 400 - marked: true - - task: task_4 - score: 50 - scoring_func: latest - -- group: 3.empty_group_name - start: 10-05-2022 18:00 - deadline: 23-05-2022 23:59 - second_deadline: 23-05-2022 23:59 - tasks: [] - -- group: 2.disabled_group_name - start: 10-05-2022 18:00 - deadline: 23-05-2022 23:59 - second_deadline: 23-05-2022 23:59 - enabled: false - marked: true - tasks: - - task: task_5 - score: 50 - scoring_func: latest - - -- group: 1.marked_group_name - start: 10-05-2022 18:00 - deadline: 23-05-2022 23:59 - second_deadline: 23-05-2022 23:59 - marked: true - tasks: - - task: task_6 - score: 20 - scoring_func: max diff --git a/examples/.gitlab-ci.yml b/examples/.gitlab-ci.yml deleted file mode 100644 index 02bda7e..0000000 --- a/examples/.gitlab-ci.yml +++ /dev/null @@ -1,56 +0,0 @@ -variables: - GIT_DEPTH: 10 - GIT_STRATEGY: clone - REGISTRY: cr.yandex/crp9onavos88ug32d5r2/grader/py - - -# Testing and Grading all changed tasks -grade: - image: $REGISTRY/testenv:latest - rules: - - if: $CI_PROJECT_NAME =~ /^public-(fall|spring)-20\d\d/ - when: never - - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /contributing/ || $CI_COMMIT_BRANCH =~ /contributing/ - when: never - - if: $CI_PIPELINE_SOURCE == 'merge_request_event' - when: on_success - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - when: on_success - - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH - when: manual - - when: on_success - script: - - cd /opt/shad/ - - python3 -m checker grade - timeout: 10 minutes - -# Auto on Merge Requests: check current student mege requests -grade-mrs: - image: $REGISTRY/testenv:latest - rules: - - if: $CI_PROJECT_NAME =~ /^public-(fall|spring)-20\d\d/ - when: never - - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /contributing/ || $CI_COMMIT_BRANCH =~ /contributing/ - when: never - - if: $CI_PIPELINE_SOURCE == 'merge_request_event' - when: manual - allow_failure: true - - when: never - script: - - cd /opt/shad/ - - python3 -m checker grade-mrs - timeout: 10 minutes - -# Checking contribution to the main repo -check: - image: $REGISTRY/testenv:latest - rules: - - if: $CI_PROJECT_NAME =~ /^public-(fall|spring)-20\d\d/ - when: never - - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME =~ /contributing/ || $CI_COMMIT_BRANCH =~ /contributing/ - when: on_success - - when: never - script: - - cp -R /opt/shad/tests tests/ - - python3 -m checker check --contributing - timeout: 30 minutes diff --git a/examples/.releaser-ci.yml b/examples/.releaser-ci.yml deleted file mode 100644 index 6b63bc6..0000000 --- a/examples/.releaser-ci.yml +++ /dev/null @@ -1,162 +0,0 @@ -variables: - GIT_STRATEGY: clone - REGISTRY: cr.yandex/crp9onavos88ug32d5r2/grader/py - - -stages: - - build - - check-tools - - check-tasks - - deploy - - manual - - -.docker: - image: docker:20.10.17 - before_script: - # register `config.json` to be able to push - - mkdir -p $HOME/.docker && echo $DOCKER_AUTH_CONFIG > $HOME/.docker/config.json - - -rebuild-base-image: - extends: .docker - stage: build - rules: - - if: $TRIGGER_PAYLOAD - when: never - - if: $CI_PIPELINE_SOURCE == 'merge_request_event' - when: on_success - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - when: on_success - - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH - when: never - - when: on_success - script: - # Note: Expected docker registry `$REGISTRY` to be already authenticated - - echo "building base image = $REGISTRY/base-image:$CI_COMMIT_REF_NAME" - - docker pull $REGISTRY/base-image:latest || true - - docker pull $REGISTRY/base-image:$CI_COMMIT_REF_NAME || true - - docker build - --tag $REGISTRY/base-image:$CI_COMMIT_REF_NAME - --cache-from $REGISTRY/base-image:latest - --cache-from $REGISTRY/base-image:$CI_COMMIT_REF_NAME - --file base.docker - -- . - - docker push $REGISTRY/base-image:$CI_COMMIT_REF_NAME - # Push as latest image if master branch - - > - if [ "$CI_COMMIT_BRANCH" == "$CI_DEFAULT_BRANCH" ]; then - echo "pushing base image = $REGISTRY/base-image:latest" - docker tag $REGISTRY/base-image:$CI_COMMIT_REF_NAME $REGISTRY/base-image:latest - docker push $REGISTRY/base-image:latest - fi - - -check-tools-testlib: - image: $REGISTRY/base-image:$CI_COMMIT_REF_NAME - stage: check-tools - rules: - - if: $TRIGGER_PAYLOAD - when: never - - if: $CI_PIPELINE_SOURCE == 'merge_request_event' - when: on_success - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - when: on_success - - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH - when: never - - when: on_success - script: - - python3 -m pip install --editable tools/testlib - - python3 -m flake8 tools/testlib - - python3 -m mypy tools/testlib - - python3 -m pytest tools/testlib --cov=tools/testlib/testlib --cov-report=term-missing - - -check-tasks: - image: $REGISTRY/base-image:$CI_COMMIT_REF_NAME - stage: check-tasks - rules: - - if: $TRIGGER_PAYLOAD - when: never - - if: $CI_PIPELINE_SOURCE == 'merge_request_event' - when: on_success - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - when: on_success - - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH - when: never - - when: on_success - script: - - python3 -m pip install --editable tools/testlib - - python3 -m checker check --parallelize - - -deploy-public: - image: $REGISTRY/base-image:latest - stage: deploy - rules: - - if: $TRIGGER_PAYLOAD - when: never - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - when: on_success - - when: never - script: - # Note: GITLAB_API_TOKEN expected as a env secret (token with push access to public repo) - # Copy all files to public repo - - python3 -m checker export-public - -deploy-docker: - extends: .docker - stage: deploy - rules: - - if: $TRIGGER_PAYLOAD - when: never - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - when: on_success - - when: never - script: - # Note: Expected docker registry `$REGISTRY` to be already authenticated - - echo "using image = $REGISTRY/base-image:latest" - - docker pull $REGISTRY/testenv:latest || true - - docker build -f testenv.docker -t $REGISTRY/testenv:latest --cache-from $REGISTRY/testenv:latest . - - docker push $REGISTRY/testenv:latest - -deploy-manytask: - image: curlimages/curl:latest - stage: deploy - rules: - - if: $TRIGGER_PAYLOAD - when: never - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - when: on_success - - when: never - script: - - 'curl --fail --silent -X POST -H "Authorization: Bearer $TESTER_TOKEN" - -H "Content-type: application/x-yaml" --data-binary "@.course.yml" - "https://py.manytask.org/api/update_course_config"' - - 'curl --fail --silent -X POST -H "Authorization: Bearer $TESTER_TOKEN" - -H "Content-type: application/x-yaml" --data-binary "@.deadlines.yml" - "https://py.manytask.org/api/update_deadlines"' - - 'curl --fail --silent -X POST -H "Authorization: Bearer $TESTER_TOKEN" - "https://py.manytask.org/api/update_cache"' - - -grade-students-mr: - image: $REGISTRY/base-image:$CI_COMMIT_REF_NAME - stage: manual - rules: - - if: $CI_PIPELINE_SOURCE == "schedule" - when: always - - if: $TRIGGER_PAYLOAD - when: never - - if: $CI_PIPELINE_SOURCE == 'merge_request_event' - when: manual - allow_failure: true - - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH - when: manual - allow_failure: true - - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH - when: never - - when: manual - allow_failure: true - script: - - python3 -m checker grade-students-mrs diff --git a/examples/.tester.python.json b/examples/.tester.python.json deleted file mode 100644 index 241498d..0000000 --- a/examples/.tester.python.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "partially_scored": false, - "verbose_tests_output": false, - "module_test": false, - "build_wheel": false, - "run_mypy": false, - - "forbidden_regexp": [".*"], - - "public_test_files": ["test_other_public.py"], - "private_test_files": ["test_other_private.py"], - - "test_timeout": 60, - "coverage": 90 -} \ No newline at end of file diff --git a/examples/base.docker b/examples/base.docker deleted file mode 100644 index 05754ed..0000000 --- a/examples/base.docker +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3.11.5-slim - -ENV TZ Europe/Moscow -RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone - -RUN apt-get update && \ - apt-get install -y build-essential curl wget git zip && \ - apt-get autoremove -qyy && \ - apt-get clean && rm -rf /var/lib/apt/lists/* - -COPY requirements.txt /tmp/requirements.txt -RUN python3 -m pip install --upgrade -r /tmp/requirements.txt - -RUN python3 -m pip install --upgrade manytask-checker==0.6.11 diff --git a/examples/config.gitlab.toml b/examples/config.gitlab.toml deleted file mode 100644 index 24d2bea..0000000 --- a/examples/config.gitlab.toml +++ /dev/null @@ -1,45 +0,0 @@ -concurrent = 16 -log_level = "info" - -[session_server] - session_timeout = 1800 - -[[runners]] - name = "manytask-private-heavy-0" - limit = 4 - url = "https://gitlab.com/" - token = "TOKEN" - executor = "docker" - environment = ["DOCKER_HOST=unix:///var/run/docker.sock", "DOCKER_TLS_CERTDIR=\"\""] - [runners.docker] - tls_verify = false - tls_certs_path = "" - image = "docker:stable" - privileged = true - disable_entrypoint_overwrite = false - oom_kill_disable = false - disable_cache = false - volumes = ["/cache", "/var/run/docker.sock:/var/run/docker.sock"] - pull_policy = ["always"] - shm_size = 0 - -[[runners]] - name = "manytask-public-heavy-0" - limit = 16 - url = "https://gitlab.manytask.org/" - token = "TOKEN" - executor = "docker" - pre_build_script = "cmp $CI_CONFIG_PATH /opt/shad/.grader-ci.yml # echo 'If this fails you need to pull latest changes'" - [runners.docker] - tls_verify = false - image = "docker:latest" - memory = "512m" - cpus = "4" - privileged = false - disable_entrypoint_overwrite = false - oom_kill_disable = false - disable_cache = false - volumes = ["/cache"] - allowed_images = ["cr.yandex/crp9onavos88ug32d5r2/grader/py/*"] - pull_policy = ["always"] - shm_size = 0 diff --git a/examples/testenv.docker b/examples/testenv.docker deleted file mode 100644 index b3dd7f9..0000000 --- a/examples/testenv.docker +++ /dev/null @@ -1,7 +0,0 @@ -FROM cr.yandex/crp9onavos88ug32d5r2/grader/py/base-image:latest - -COPY . /opt/shad -RUN python3 -m pip install /opt/shad/tools/testlib -RUN cp /opt/shad/.gitlab-ci.yml /opt/shad/.grader-ci.yml # Create reference for comparison when running tests - -RUN find /opt/shad/tests -name '*.py' | xargs chmod o-rwx diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000..ac7f0ea --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,102 @@ +site_name: Checker +site_description: Python CLI script to run build and run tests against students solutions +site_url: https://manytask.github.io/checker/ + +docs_dir: ./docs +site_dir: ./site + +theme: + name: material + + palette: + # Palette toggle for light mode + - media: "(prefers-color-scheme: dark)" + scheme: default + toggle: + icon: material/lightbulb + name: Switch to dark mode + primary: teal + accent: purple + + # Palette toggle for dark mode + - media: "(prefers-color-scheme: light)" + scheme: slate + toggle: + icon: material/lightbulb + name: Switch to light mode + primary: teal + accent: lime + + features: + - navigation.tabs + - navigation.sections + - navigation.top + - search.suggest + - search.highlight + - content.tabs.link + - content.code.copy + - content.code.annotation + + language: en + + font: + text: Roboto + code: Roboto Mono + + icon: + repo: fontawesome/brands/github + + favicon: images/logo-manytask.png + logo: images/logo-manytask.png + +validation: + omitted_files: warn + absolute_links: warn + unrecognized_links: info + +extra: + version: + provider: mike + +repo_name: manytask/checker +repo_url: https://github.com/manytask/checker + +nav: + - Overview: index.md + - Concepts: 0_concepts.md + - Setup and Usage: + - Getting Started: 1_getting_started.md + - Configuration: 2_configuration.md + - Plugins: 3_plugins.md + - Pipelines: 4_pipelines.md + - Usage: 5_usage.md + - Development: 6_development.md + - Changelog: 7_changelog.md + +markdown_extensions: + - def_list + - admonition + - pymdownx.details + - pymdownx.superfences + - pymdownx.highlight: + pygments_lang_class: true + - pymdownx.extra + - pymdownx.tabbed: + alternate_style: true + - mkdocs_click + +plugins: + - mike: + alias_type: symlink + canonical_version: latest + - search + - include-markdown + - mkdocstrings: + default_handler: python + handlers: + python: + options: + heading_level: 4 +# show_source: false +# exclude_private: true + - mermaid2 diff --git a/pyproject.toml b/pyproject.toml index 3d60969..58d45b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ authors = [ ] description = "Students' solution checker" readme = "README.md" -requires-python = ">=3.8" +requires-python = ">=3.9" classifiers = [ "Development Status :: 4 - Beta", "Typing :: Typed", @@ -18,52 +18,65 @@ classifiers = [ "Topic :: Education", "Topic :: Education :: Testing", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", ] dependencies = [ + "Jinja2 >=3.0.0,<4.0.0", "PyYAML >=5.0.0,<7.0.0", "click >=8.0,<9.0", + "pydantic >=2.0.0,<3.0.0", + "rich >=13.0.0,<14.0.0", "python-gitlab >=3.0.0,<4.0.0", "requests >=2.20.0,<3.0.0", - "unshare >=0.22,<0.30; sys_platform != 'darwin'", + "pytz >=2022.0,<2023.4; python_version < '3.9'", + "urllib3 >=2.0.0,<3.0.0", ] dynamic = ["version"] [project.urls] Source = "https://github.com/yandexdataschool/checker" +Documentation = "https://manytask.github.io/checker/" [project.optional-dependencies] test = [ "pytest >=6.0.0,<8.0.0", "pytest-cov >=4.0.0,<5.0.0", "pytest-mock >=3.0.0,<4.0.0", + "requests-mock >=1.0.0,<2.0.0", "black ==23.12.1", "mypy >=1.0.0", "flake8 >=4.0.0,<7.0.0", "isort >=5.0.0,<7.0.0", - "ruff ==0.1.5", + "ruff ==0.1.7", "types-requests >=2.20.0,<3.0.0", "types-PyYAML >=6.0.0,<7.0.0", "wheel >=0.40.0", ] +docs = [ + "mike >=1.1.0,<3.0.0", + "mkdocs >=1.4.0,<2.0.0", + "mkdocs-autorefs ==0.5.0", + "mkdocs-click >=0.8.0", + "mkdocs-include-markdown-plugin >=4.0.0,<7.0.0", + "mkdocs-material >=9.0.0,<10.0.0", + "mkdocs-material-extensions ==1.3.1", + "mkdocstrings[python] ==0.24.0", + "mkdocs-mermaid2-plugin ==1.1.1", +] [tool.setuptools.dynamic] version = {file = "VERSION"} [project.scripts] -checker = "checker.__main__:main" +checker = "checker.__main__:cli" [tool.setuptools.packages.find] exclude = ["tests*"] -# --------------------------------------------------------------------------- # - - [tool.mypy] no_incremental = true ignore_missing_imports = true @@ -98,7 +111,7 @@ exclude = [ [tool.pytest.ini_options] minversion = "6.0" python_files = "test_*.py" -addopts = "--cov=checker/ --cov-report=term-missing tests/" +addopts = "--cov=checker/ --cov-report=term-missing --cov-report=xml tests/" testpaths = [ "tests", ".tmp" @@ -138,7 +151,8 @@ exclude = [ "venv", "tests", - ".tmp" + ".tmp", + "__init__.py", ] # Allow autofix for all enabled rules (when `--fix`) is provided. @@ -155,3 +169,7 @@ lines_after_imports = 2 combine_as_imports = true default_section = "THIRDPARTY" known_first_party = "checker,tests" + +[tool.black] +line-length = 120 +target-version = ['py312'] diff --git a/tests/configs/test_config_utils.py b/tests/configs/test_config_utils.py new file mode 100644 index 0000000..1282986 --- /dev/null +++ b/tests/configs/test_config_utils.py @@ -0,0 +1,99 @@ +import inspect +from pathlib import Path + +import pydantic +import pytest + +from checker.configs.utils import CustomBaseModel, YamlLoaderMixin +from checker.exceptions import BadConfig + + +class TestCustomBaseModel: + class SomeTestModel(CustomBaseModel): + a: int + b: str + + def test_valid_config(self) -> None: + self.SomeTestModel(a=1, b="123") + + def test_extra_argument_error(self) -> None: + with pytest.raises(pydantic.ValidationError): + self.SomeTestModel(a=1, b="123", c=1) + + def test_invalid_type_error(self) -> None: + with pytest.raises(pydantic.ValidationError): + self.SomeTestModel(a=1, b=123) + + def test_no_required_argument_error(self) -> None: + with pytest.raises(pydantic.ValidationError): + self.SomeTestModel(a=1) + + +class TestYamlLoader: + class SomeTestModel(CustomBaseModel, YamlLoaderMixin["SomeTestModel"]): + a: int + b: str + + def test_load_valid_yaml(self, tmp_path: Path) -> None: + yaml_content = inspect.cleandoc( + """ + a: 1 + b: "123" + """ + ) + yaml_path = tmp_path / "test.yaml" + yaml_path.write_text(yaml_content) + + self.SomeTestModel.from_yaml(yaml_path) + + def test_no_file_error(self, tmp_path: Path) -> None: + yaml_path = tmp_path / "test.yaml" + + with pytest.raises(BadConfig): + self.SomeTestModel.from_yaml(yaml_path) + + def test_invalid_yaml_error(self, tmp_path: Path) -> None: + yaml_content = inspect.cleandoc( + """ + a: 1 b: 123 + """ + ) + yaml_path = tmp_path / "test.yaml" + yaml_path.write_text(yaml_content) + + with pytest.raises(BadConfig): + self.SomeTestModel.from_yaml(yaml_path) + + def test_invalid_types_error(self, tmp_path: Path) -> None: + yaml_content = inspect.cleandoc( + """ + a: 1 + b: 123 + """ + ) + yaml_path = tmp_path / "test.yaml" + yaml_path.write_text(yaml_content) + + with pytest.raises(BadConfig): + self.SomeTestModel.from_yaml(yaml_path) + + def test_to_yaml_method(self, tmp_path: Path) -> None: + yaml_path = tmp_path / "test.yaml" + model = self.SomeTestModel(a=1, b="123") + model.to_yaml(yaml_path) + + assert yaml_path.exists() + assert yaml_path.read_text() == "a: 1\nb: '123'\n" + + def test_get_json_schema(self, tmp_path: Path) -> None: + schema = self.SomeTestModel.get_json_schema() + assert schema == { + "title": "SomeTestModel", + "type": "object", + "properties": { + "a": {"title": "A", "type": "integer"}, + "b": {"title": "B", "type": "string"}, + }, + "required": ["a", "b"], + "additionalProperties": False, + } diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..ac1cfda --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import pytest + + +def pytest_addoption(parser: pytest.Parser) -> None: + parser.addoption( + "--skip-integration", + action="store_true", + dest="skip_integration", + default=False, + help="skip integration tests", + ) + parser.addoption( + "--skip-unit", + action="store_true", + dest="skip_unit", + default=False, + help="skip unit tests", + ) + parser.addoption( + "--skip-doctest", + action="store_true", + dest="skip_unit", + default=False, + help="skip doctest", + ) + + +def pytest_configure(config: pytest.Config) -> None: + config.addinivalue_line("markers", "integration: mark test as integration test") + + # Add --doctest-modules by default if --skip-doctest is not set + if not config.getoption("--skip-doctest"): + config.addinivalue_line("addopts", "--doctest-modules") + + +def pytest_collection_modifyitems( + config: pytest.Config, + items: list[pytest.Item], +) -> None: + skip_integration = pytest.mark.skip(reason="--skip-integration option was provided") + skip_unit = pytest.mark.skip(reason="--skip-unit option was provided") + skip_doctest = pytest.mark.skip(reason="--skip-doctest option was provided") + + for item in items: + if isinstance(item, pytest.DoctestItem): + item.add_marker(skip_doctest) + elif "integration" in item.keywords: + if config.getoption("--skip-integration"): + item.add_marker(skip_integration) + else: + if config.getoption("--skip-unit"): + item.add_marker(skip_unit) diff --git a/tests/course/test_config.py b/tests/course/test_config.py deleted file mode 100644 index 7e7a20f..0000000 --- a/tests/course/test_config.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import annotations - -from pathlib import Path - -import pytest - -from checker.course.config import CourseConfig -from checker.exceptions import BadConfig - - -DATA_FOLDER = Path(__file__).parents[1] / 'data' / 'config' - - -class TestConfig: - def test_(self) -> None: - pass - - def test_wrong_file(self, tmp_path: Path) -> None: - with pytest.raises(BadConfig): - CourseConfig.from_yaml(DATA_FOLDER / 'not-existed-file.yml') - - with pytest.raises(BadConfig): - CourseConfig.from_yaml(DATA_FOLDER / 'bad-config.yml') - - tmp_file = tmp_path / 'empty-file.yml' - tmp_file.touch() - with pytest.raises(BadConfig): - CourseConfig.from_yaml(tmp_file) diff --git a/tests/course/test_driver.py b/tests/course/test_driver.py deleted file mode 100644 index b53fe11..0000000 --- a/tests/course/test_driver.py +++ /dev/null @@ -1,180 +0,0 @@ -from __future__ import annotations - -from datetime import datetime -from pathlib import Path - -import pytest - -from checker.course import Group, Task -from checker.course.driver import CourseDriver -from checker.exceptions import BadConfig - - -DATA_FOLDER = Path(__file__).parents[1] / 'data' / 'driver' - - -@pytest.fixture(scope='function') -def test_group() -> Group: - group = Group( - name='test_group', - start=datetime(2020, 1, 1), - deadline=datetime(2020, 1, 2), - second_deadline=datetime(2020, 1, 3), - enabled=True, - marked=False, - tasks=[], - ) - group.tasks.extend([ - Task( - group=group, - name=f'test_task_{i}', - max_score=1, - enabled=True, - scoring_func='max', - review=False, - marked=False, - ) - for i in range(5) - ]) - return group - -@pytest.fixture(scope='function') -def test_task(test_group: Group) -> Task: - return test_group.tasks[0] - - -class TestDriver: - - @pytest.mark.parametrize('layout,location', [ - ('flat', 'tests'), - ('groups', '.'), - ('lectures', '.'), - ]) - def test_deadlines_config(self, layout: str, location: str) -> None: - driver = CourseDriver(Path(''), repo_type='private', layout=layout) - assert driver.get_deadlines_file_path(raise_if_not_exists=False) == Path('') / location / '.deadlines.yml' - - with pytest.raises(BadConfig): - driver.get_deadlines_file_path(raise_if_not_exists=True) - - def test_deadlines_not_in_public_repo(self) -> None: - driver = CourseDriver(Path(''), repo_type='public', layout='flat') - with pytest.raises(BadConfig): - driver.get_deadlines_file_path(raise_if_not_exists=False) - - @pytest.mark.parametrize('repo_type,layout,location', [ - ('public', 'flat', None), - ('private', 'flat', None), - ('public', 'groups', 'lectures/test_group'), - ('private', 'groups', 'lectures/test_group'), - ('public', 'lectures', 'test_group/lecture'), - ('private', 'lectures', 'test_group/lecture'), - ]) - def test_get_lecture_dir(self, repo_type: str, layout: str, location: str | None, test_group: Group) -> None: - driver = CourseDriver(Path(''), repo_type=repo_type, layout=layout) - assert driver.get_group_lecture_dir(test_group, check_exists=False) == ((Path('') / location) if location else None) - assert driver.get_group_lecture_dir(test_group, check_exists=True) is None - - @pytest.mark.parametrize('repo_type,layout,location', [ - ('public', 'flat', None), - ('private', 'flat', None), - ('public', 'groups', 'solutions/test_group'), - ('private', 'groups', 'solutions/test_group'), - ('public', 'lectures', 'test_group/review'), - ('private', 'lectures', 'test_group/review'), - ]) - def test_get_review_dir(self, repo_type: str, layout: str, location: str | None, test_group: Group) -> None: - driver = CourseDriver(Path(''), repo_type=repo_type, layout=layout) - assert driver.get_group_submissions_review_dir(test_group, check_exists=False) == ((Path('') / location) if location else None) - assert driver.get_group_submissions_review_dir(test_group, check_exists=True) is None - - @pytest.mark.parametrize('repo_type,layout,location', [ - ('public', 'flat', None), - ('private', 'flat', None), - ('public', 'groups', 'test_group'), - ('private', 'groups', 'test_group'), - ('public', 'lectures', 'test_group'), - ('private', 'lectures', 'test_group'), - ]) - def test_get_group_dir(self, repo_type: str, layout: str, location: str | None, test_group: Group) -> None: - driver = CourseDriver(Path(''), repo_type=repo_type, layout=layout) - assert driver.get_group_dir(test_group, check_exists=False) == ((Path('') / location) if location else None) - assert driver.get_group_dir(test_group, check_exists=True) is None - - @pytest.mark.parametrize('repo_type,layout,location', [ - ('public', 'flat', 'test_task_0'), - ('private', 'flat', 'test_task_0'), - ('public', 'groups', 'test_group/test_task_0'), - ('private', 'groups', 'test_group/test_task_0'), - ('public', 'lectures', 'test_group/tasks/test_task_0'), - ('private', 'lectures', 'test_group/tasks/test_task_0'), - ]) - def test_get_task_dir(self, repo_type: str, layout: str, location: str | None, test_task: Task) -> None: - driver = CourseDriver(Path(''), repo_type=repo_type, layout=layout) - assert driver.get_task_dir(test_task, check_exists=False) == ((Path('') / location) if location else None) - assert driver.get_task_dir(test_task, check_exists=True) is None - - @pytest.mark.parametrize('repo_type,layout,location', [ - ('public', 'flat', 'test_task_0'), - ('private', 'flat', 'tests/test_task_0'), - ('public', 'groups', 'test_group/test_task_0'), - ('private', 'groups', 'tests/test_group/test_task_0'), - ('public', 'lectures', 'test_group/tasks/test_task_0'), - ('private', 'lectures', 'test_group/tasks/test_task_0/solution'), - ]) - def test_get_task_solution_dir(self, repo_type: str, layout: str, location: str | None, test_task: Task) -> None: - driver = CourseDriver(Path(''), repo_type=repo_type, layout=layout) - assert driver.get_task_solution_dir(test_task, check_exists=False) == ((Path('') / location) if location else None) - assert driver.get_task_solution_dir(test_task, check_exists=True) is None - - @pytest.mark.parametrize('repo_type,layout,location', [ - ('public', 'flat', 'test_task_0'), - ('private', 'flat', 'test_task_0'), - ('public', 'groups', 'test_group/test_task_0'), - ('private', 'groups', 'test_group/test_task_0'), - ('public', 'lectures', 'test_group/tasks/test_task_0'), - ('private', 'lectures', 'test_group/tasks/test_task_0/template'), - ]) - def test_get_task_template_dir(self, repo_type: str, layout: str, location: str | None, test_task: Task) -> None: - driver = CourseDriver(Path(''), repo_type=repo_type, layout=layout) - assert driver.get_task_template_dir(test_task, check_exists=False) == ((Path('') / location) if location else None) - assert driver.get_task_template_dir(test_task, check_exists=True) is None - - @pytest.mark.parametrize('repo_type,layout,location', [ - ('public', 'flat', 'test_task_0'), - ('private', 'flat', 'test_task_0'), - ('public', 'groups', 'test_group/test_task_0'), - ('private', 'groups', 'test_group/test_task_0'), - ('public', 'lectures', 'test_group/tasks/test_task_0'), - ('private', 'lectures', 'test_group/tasks/test_task_0/public'), - ]) - def test_get_task_public_test_dir(self, repo_type: str, layout: str, location: str | None, test_task: Task) -> None: - driver = CourseDriver(Path(''), repo_type=repo_type, layout=layout) - assert driver.get_task_public_test_dir(test_task, check_exists=False) == ((Path('') / location) if location else None) - assert driver.get_task_public_test_dir(test_task, check_exists=True) is None - - @pytest.mark.parametrize('repo_type,layout,location', [ - ('public', 'flat', None), - ('private', 'flat', 'tests/test_task_0'), - ('public', 'groups', None), - ('private', 'groups', 'tests/test_group/test_task_0'), - ('public', 'lectures', None), - ('private', 'lectures', 'test_group/tasks/test_task_0/private'), - ]) - def test_get_task_private_test_dir(self, repo_type: str, layout: str, location: str | None, test_task: Task) -> None: - driver = CourseDriver(Path(''), repo_type=repo_type, layout=layout) - assert driver.get_task_private_test_dir(test_task, check_exists=False) == ((Path('') / location) if location else None) - assert driver.get_task_private_test_dir(test_task, check_exists=True) is None - - @pytest.mark.parametrize('layout,raw,task_name', [ - ('flat', 'foo', None), - ('flat', 'task0/main.c', 'task0'), - ('flat', 'task0/dir/main.c', 'task0'), - ('groups', 'group0/task0/main.c', 'task0'), - ('groups', 'group0/task0/dir/main.c', 'task0'), - ('lectures', 'group0/tasks/task0/main.c', 'task0'), - ('lectures', 'group0/tasks/task0/dir/main.c', 'task0'), - ]) - def test_get_task_dir_name(self, layout: str, raw: str, task_name: str | None) -> None: - driver = CourseDriver(Path(''), layout=layout) - assert driver.get_task_dir_name(raw) == task_name diff --git a/tests/course/test_schedule.py b/tests/course/test_schedule.py deleted file mode 100644 index 8a2d042..0000000 --- a/tests/course/test_schedule.py +++ /dev/null @@ -1,134 +0,0 @@ -from __future__ import annotations - -from datetime import datetime, timedelta -from pathlib import Path - -import pytest - -from checker.course.schedule import CourseSchedule, Group, Task -from checker.exceptions import BadConfig - - -DATA_FOLDER = Path(__file__).parents[1] / 'data' / 'schedule' - - -class TestTask: - @pytest.fixture(scope='function') - def sample_group(self) -> Group: - return Group( - name='test_group', - start=datetime(2000, 1, 1), - deadline=datetime(2050, 1, 1), - second_deadline=datetime(2100, 1, 1), - ) - - def test_basics(self, sample_group: Group) -> None: - task_minimal = Task( - group=sample_group, - name='test_task', - max_score=10, - enabled=True, - ) - - assert task_minimal.is_enabled - - assert task_minimal.is_started - assert not task_minimal.is_ended - - @pytest.mark.parametrize('reserved_name', [ - 'task', 'test', 'solution', 'template', - ]) - def test_reserved_task_name(self, sample_group: Group, reserved_name: str) -> None: - with pytest.raises(AssertionError, match=f'.*{reserved_name}.*reserved.*'): - Task( - group=sample_group, - name=reserved_name, - max_score=10, - ) - - -class TestGroup: - def test_basics(self) -> None: - group_minimal = Group( - name='test_group', - start=datetime(2000, 1, 1), - deadline=datetime(2002, 1, 1), - second_deadline=datetime(2004, 1, 1), - ) - - assert group_minimal.is_enabled - # TODO: mock and test ended started ect - - group_max = Group( - name='test_group', - start=datetime(2000, 1, 1), - deadline=datetime(2002, 1, 1), - second_deadline=datetime(2004, 1, 1), - enabled=True, - marked=True, - ) - - assert group_max.is_enabled - # TODO: mock and test ended started ect - - @pytest.mark.parametrize('deadline,second_deadline,submit_time,extra_time,percentage', [ - (datetime(2020, 1, 1), datetime(2024, 1, 1), datetime(2024, 1, 1), None, 0), - (datetime(2020, 1, 1), datetime(2024, 1, 1), datetime(2026, 1, 1), None, 0), - (datetime(2020, 1, 1), datetime(2024, 1, 1), datetime(2020, 1, 1), None, 1), - (datetime(2020, 1, 1), datetime(2024, 1, 1), datetime(2018, 1, 1), None, 1), - (datetime(2020, 1, 1), datetime(2024, 1, 1), datetime(2022, 1, 1), None, 0.5), - (datetime(2020, 1, 1), datetime(2024, 1, 1), datetime(2021, 1, 1), None, 0.75), - (datetime(2020, 1, 1), datetime(2024, 1, 1), datetime(2023, 1, 1), None, 0.25), - (datetime(2020, 1, 1), datetime(2024, 1, 1), datetime(2021, 1, 1), timedelta(days=365), 1), - (datetime(2020, 1, 1), datetime(2024, 1, 1), datetime(2024, 1, 1), timedelta(days=2*365), 0.5), - ]) - def test_get_deadline_percentage( - self, - deadline: datetime, - second_deadline: datetime, - submit_time: datetime, - extra_time: timedelta | None, - percentage: float, - ) -> None: - group = Group( - name='test_group', - start=datetime(2000, 1, 1), - deadline=deadline, - second_deadline=second_deadline, - ) - assert group.get_deadline_percentage(submit_time, extra_time) == pytest.approx(percentage, 0.01) - - def test_get_is_overdue(self) -> None: - group = Group( - name='test_group', - start=datetime(2000, 1, 1), - deadline=datetime(2002, 1, 1), - second_deadline=datetime(2004, 1, 1), - ) - - assert not group.get_is_overdue_first(datetime(2001, 1, 1)) - assert group.get_is_overdue_first(datetime(2003, 1, 1)) - assert group.get_is_overdue_first(datetime(2005, 1, 1)) - - assert not group.get_is_overdue_second(datetime(2003, 1, 1)) - assert group.get_is_overdue_second(datetime(2005, 1, 1)) - - -class TestSchedule: - def test_wrong_file(self, tmp_path: Path) -> None: - with pytest.raises(BadConfig): - CourseSchedule(DATA_FOLDER / 'not-existed-file.yml') - - with pytest.raises(BadConfig): - CourseSchedule(DATA_FOLDER / 'bad-config.yml') - - tmp_file = tmp_path / 'empty-file.yml' - tmp_file.touch() - with pytest.raises(BadConfig): - CourseSchedule(tmp_file) - - def test_get_tasks(self) -> None: - pass - - def test_get_groups(self) -> None: - pass diff --git a/tests/examples/test_course_configs.py b/tests/examples/test_course_configs.py deleted file mode 100644 index 3e44e46..0000000 --- a/tests/examples/test_course_configs.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations - -from pathlib import Path - -from checker.course.config import CourseConfig -from checker.course.schedule import CourseSchedule - - -EXAMPLES_FOLDER = Path(__file__).parents[2] / 'examples' - - -class TestCourse: - def test_read_course(self) -> None: - CourseConfig.from_yaml(EXAMPLES_FOLDER / '.course.yml') - - -class TestDeadlines: - def test_read_course(self) -> None: - CourseSchedule(EXAMPLES_FOLDER / '.deadlines.yml') diff --git a/tests/examples/test_testers_configs.py b/tests/examples/test_testers_configs.py deleted file mode 100644 index 3c811a4..0000000 --- a/tests/examples/test_testers_configs.py +++ /dev/null @@ -1,12 +0,0 @@ -from pathlib import Path - -from checker.testers.python import PythonTester - - -EXAMPLES_FOLDER = Path(__file__).parents[2] / 'examples' - - -class TestTestersConfigs: - - def test_python_config(self) -> None: - PythonTester.TaskTestConfig.from_json(EXAMPLES_FOLDER / '.tester.python.json') diff --git a/tests/executors/test_sandbox.py b/tests/executors/test_sandbox.py deleted file mode 100644 index 1191461..0000000 --- a/tests/executors/test_sandbox.py +++ /dev/null @@ -1,188 +0,0 @@ -from __future__ import annotations - -import os -import sys -from pathlib import Path - -import pytest - -from checker.exceptions import ExecutionFailedError - - -try: - import unshare -except ImportError: - unshare = None - -from checker.executors.sandbox import Sandbox - - -skip_without_unshare = pytest.mark.skipif( - unshare is None, reason='unshare lib is unavailable' -) - - -class TestSandbox: - def test_dry_run(self, capsys: pytest.CaptureFixture[str]) -> None: - sandbox = Sandbox(dry_run=True) - - failed_command_never_execute = 'false' - sandbox(failed_command_never_execute) - - captured = capsys.readouterr() - assert len(captured.out) + len(captured.err) > 0 - assert failed_command_never_execute in (captured.out + captured.err) - - def test_verbose(self, capsys: pytest.CaptureFixture[str]) -> None: - sandbox = Sandbox() - - simple_command = 'true' - sandbox(simple_command, verbose=True) - - captured = capsys.readouterr() - assert len(captured.out) + len(captured.err) > 0 - assert simple_command in (captured.out + captured.err) - - def test_execute_external_str(self, tmp_path: Path) -> None: - sandbox = Sandbox() - - tmp_file = tmp_path / 'test.tmp' - - create_file = f'touch {tmp_file.as_posix()}' - - sandbox(create_file, shell=True) - assert tmp_file.exists() - - def test_execute_external_list(self, tmp_path: Path) -> None: - sandbox = Sandbox() - - tmp_file = tmp_path / 'test.tmp' - - create_file = ['touch', str(tmp_file.as_posix())] - - sandbox(create_file) - assert tmp_file.exists() - - def test_execute_callable(self, tmp_path: Path) -> None: - sandbox = Sandbox() - - tmp_file = tmp_path / 'test.tmp' - - def create_file() -> None: - tmp_file.touch() - - sandbox(create_file) - assert tmp_file.exists() - - def test_sandbox_blocks_env(self) -> None: - sandbox = Sandbox() - - # test clear all not allowed variables - os.environ['NOT_EXISTED_VAR_123'] = 'true' - cmd_assert_blacklist_env_not_exists = '[ -z "${NOT_EXISTED_VAR_123}" ]' - cmd_assert_blacklist_env_exists = '[ ! -z "${NOT_EXISTED_VAR_123}" ]' - - sandbox(cmd_assert_blacklist_env_not_exists, env_sandbox=True, shell=True) - sandbox(cmd_assert_blacklist_env_not_exists, sandbox=True, shell=True) - sandbox(cmd_assert_blacklist_env_exists, env_sandbox=False, shell=True) - - del os.environ['NOT_EXISTED_VAR_123'] - - # test not clear allowed other variables - if 'PATH' not in os.environ: - os.environ['PATH'] = 'true' - cmd_assert_whitelist_env_not_exists = '[ -z "${PATH}" ]' - cmd_assert_whitelist_env_exists = '[ ! -z "${PATH}" ]' - - sandbox(cmd_assert_whitelist_env_exists, env_sandbox=True, shell=True) - sandbox(cmd_assert_whitelist_env_exists, sandbox=True, shell=True) - with pytest.raises(ExecutionFailedError): - sandbox(cmd_assert_whitelist_env_not_exists, env_sandbox=False, shell=True) - - if os.environ['PATH'] == 'true': - del os.environ['PATH'] - - @skip_without_unshare - def test_sandbox_blocks_web(self) -> None: - sandbox = Sandbox() - - command_ping = ['ping', '-c 2', 'google.com.'] - with pytest.raises(ExecutionFailedError): - sandbox(command_ping, sandbox=True) - - def test_timeout(self) -> None: - sandbox = Sandbox() - - timeout_command = 'sleep 0.5' - with pytest.raises(ExecutionFailedError): - sandbox(timeout_command, timeout=0.2, shell=True) - - @pytest.mark.parametrize('command,output', [ - ('>&1 echo "std"', 'std\n'), - ('>&2 echo "err"', 'err\n'), - ('>&1 echo "std" && >&2 echo "err"', 'std\nerr\n'), - ('>&1 echo "std1" && >&2 echo "err1" && >&1 echo "std2" && >&2 echo "err2"', 'std1\nerr1\nstd2\nerr2\n'), - ]) - def test_output_catching_external(self, command: str, output: str) -> None: - sandbox = Sandbox() - - assert sandbox(command, shell=True) is None - - assert sandbox(command, capture_output=True, shell=True) == output - - def test_output_catching_callable(self) -> None: - sandbox = Sandbox() - - def print_std() -> None: - print('std') - assert sandbox(print_std) is None - assert sandbox(print_std, capture_output=True) == 'std\n' - - def print_error() -> None: - print('error', file=sys.stderr) - assert sandbox(print_error, capture_output=True) == 'error\n' - - def print_std_error() -> None: - print('std') - print('error', file=sys.stderr) - assert sandbox(print_std_error, capture_output=True) == 'std\nerror\n' - - def print_std_error_complicated() -> None: - print('std1') - print('error1', file=sys.stderr) - print('std2') - print('error2', file=sys.stderr) - assert sandbox(print_std_error_complicated, capture_output=True) == 'std1\nerror1\nstd2\nerror2\n' - - @pytest.mark.parametrize('command,output', [ - ('>&1 echo "std"', 'std\n'), - ('>&2 echo "err"', 'err\n'), - ('>&1 echo "std" && >&2 echo "err"', 'std\nerr\n'), - ('>&1 echo "std1" && >&2 echo "err1" && >&1 echo "std2" && >&2 echo "err2"', 'std1\nerr1\nstd2\nerr2\n'), - ]) - def test_output_catching_while_error(self, command: str, output: str) -> None: - sandbox = Sandbox() - - command += ' && false' - - with pytest.raises(ExecutionFailedError) as exc_info: - sandbox(command, capture_output=True, shell=True) - - assert exc_info.value.output == output - - @pytest.mark.parametrize('command,output', [ - ('>&1 echo "std"', 'std\n'), - ('>&2 echo "err"', 'err\n'), - ('>&1 echo "std" && >&2 echo "err"', 'std\nerr\n'), - ('>&1 echo "std1" && >&2 echo "err1" && >&1 echo "std2" && >&2 echo "err2"', 'std1\nerr1\nstd2\nerr2\n'), - ]) - def test_output_catching_while_timeout(self, command: str, output: str) -> None: - sandbox = Sandbox() - - command += ' && sleep 0.5' - - with pytest.raises(ExecutionFailedError) as exc_info: - sandbox(command, capture_output=True, timeout=0.2, shell=True) - - assert output in exc_info.value.output - assert 'exceeded time limit' in exc_info.value.output diff --git a/tests/plugins/test_aggregate.py b/tests/plugins/test_aggregate.py new file mode 100644 index 0000000..7944af4 --- /dev/null +++ b/tests/plugins/test_aggregate.py @@ -0,0 +1,89 @@ +from __future__ import annotations + +from typing import Any + +import pytest +from pydantic import ValidationError + +from checker.exceptions import PluginExecutionFailed +from checker.plugins.aggregate import AggregatePlugin + + +class TestAggregatePlugin: + @pytest.mark.parametrize( + "parameters, expected_exception", + [ + ({"scores": [0.5, 1.0, 1], "weights": [1, 2, 3], "strategy": "mean"}, None), + ({"scores": [0.5, 1.0, 1], "weights": [1, 2, 3]}, None), + ({"scores": [0.5, 1.0, 1], "weights": None}, None), + ({"scores": [0.5, 1.0, 1], "strategy": "mean"}, None), + ({"scores": [0.5, 1.0, 1]}, None), + ( + { + "scores": [0.5, 1.0, 1], + "weights": [1, 2, 3], + "strategy": "invalid_strategy", + }, + ValidationError, + ), + ({}, ValidationError), + ], + ) + def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None: + if expected_exception: + with pytest.raises(expected_exception): + AggregatePlugin.Args(**parameters) + else: + AggregatePlugin.Args(**parameters) + + @pytest.mark.parametrize( + "scores, weights, strategy, expected", + [ + ([10, 20, 30], None, "mean", 20.0), + ([1, 2, 3], [0.5, 0.5, 0.5], "sum", 3.0), + ([2, 4, 6], [1, 2, 3], "min", 2.0), + ([5, 10, 15], [1, 1, 1], "max", 15.0), + ([3, 3, 3], [1, 1, 1], "product", 27.0), + ], + ) + def test_aggregate_strategies( + self, + scores: list[float], + weights: list[float] | None, + strategy: str, + expected: float, + ) -> None: + plugin = AggregatePlugin() + args = AggregatePlugin.Args(scores=scores, weights=weights, strategy=strategy) + + result = plugin._run(args) + assert expected == result.percentage + assert f"Score: {expected:.2f}" in result.output + + def test_wrong_strategy(self) -> None: + with pytest.raises(ValidationError): + AggregatePlugin.Args(scores=[1, 2, 3], strategy="invalid_strategy") + + @pytest.mark.parametrize( + "scores, weights", + [ + ([1, 2, 3], [1, 2]), + ([1], [1, 2]), + ([], []), + ], + ) + def test_length_mismatch(self, scores: list[float], weights: list[float]) -> None: + # TODO: move to args validation + plugin = AggregatePlugin() + args = AggregatePlugin.Args(scores=scores, weights=weights) + + with pytest.raises(PluginExecutionFailed) as exc_info: + plugin._run(args) + assert "Length of scores" in str(exc_info.value) + + def test_default_weights(self) -> None: + plugin = AggregatePlugin() + args = AggregatePlugin.Args(scores=[10, 20, 30], strategy="mean") + + result = plugin._run(args) + assert result.percentage == 20.0 diff --git a/tests/plugins/test_load_plugins.py b/tests/plugins/test_load_plugins.py new file mode 100644 index 0000000..53529f0 --- /dev/null +++ b/tests/plugins/test_load_plugins.py @@ -0,0 +1 @@ +# TODO: test plugin loader diff --git a/tests/plugins/test_manytask.py b/tests/plugins/test_manytask.py new file mode 100644 index 0000000..2eaf465 --- /dev/null +++ b/tests/plugins/test_manytask.py @@ -0,0 +1,264 @@ +from __future__ import annotations + +from datetime import datetime +from os.path import basename +from tempfile import NamedTemporaryFile, TemporaryDirectory +from typing import Any, Type + +import pytest +from pydantic import HttpUrl, ValidationError +from pytest_mock import MockFixture +from requests_mock import Mocker + +from checker.plugins.manytask import ManytaskPlugin, PluginExecutionFailed + + +class TestManytaskPlugin: + BASE_URL = HttpUrl("https://test.manytask.org") + REPORT_TOKEN = "report_token" + TEST_TASK_NAME = "some_task" + TEST_USERNAME = "username" + TEST_SCORE = 1.0 + TEST_ORIGIN = "./" + TEST_PATTERNS = ["*"] + TEST_NOW_DATETIME = datetime(2023, 12, 21, 0, 52, 36, 166028).astimezone() + TEST_NOW_DATETIME_STR = "2023-12-21T00:52:36.166028+0600" + TEST_CHECK_DEADLINE = True + + @staticmethod + def get_default_args_dict() -> dict[str, Any]: + return { + "username": TestManytaskPlugin.TEST_USERNAME, + "task_name": TestManytaskPlugin.TEST_TASK_NAME, + "score": TestManytaskPlugin.TEST_SCORE, + "report_url": TestManytaskPlugin.BASE_URL, + "report_token": TestManytaskPlugin.REPORT_TOKEN, + "check_deadline": TestManytaskPlugin.TEST_CHECK_DEADLINE, + } + + @staticmethod + def get_default_full_args_dict() -> dict[str, Any]: + args_dict = TestManytaskPlugin.get_default_args_dict() + args_dict.update( + { + "origin": TestManytaskPlugin.TEST_ORIGIN, + "patterns": TestManytaskPlugin.TEST_PATTERNS, + "send_time": TestManytaskPlugin.TEST_NOW_DATETIME_STR, + } + ) + return args_dict + + @pytest.mark.parametrize( + "parameters, expected_exception", + [ + ({}, None), + ( + { + "origin": "test/", + "patterns": ["*.py"], + }, + None, + ), + ( + { + "origin": "/test/test/test", + "patterns": ["*.py", "**.*", "test"], + }, + None, + ), + ( + { + "origin": "./", + }, + None, + ), + ( + { + "origin": "", + "patterns": [], + }, + None, + ), + ( + { + "score": 0.01, + }, + None, + ), + ( + { + "score": 1.0, + }, + None, + ), + ( + { + "score": 1.5, + }, + None, + ), + ({"send_time": TEST_NOW_DATETIME}, None), + ({"send_time": TEST_NOW_DATETIME_STR}, None), + ({"send_time": "invalidtime"}, ValidationError), + ({"report_url": "invalidurl"}, ValidationError), + ], + ) + def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Type[BaseException] | None) -> None: + args = self.get_default_args_dict() + args.update(parameters) + if expected_exception: + with pytest.raises(expected_exception): + ManytaskPlugin.Args(**args) + else: + ManytaskPlugin.Args(**args) + + def test_empty_args_raise_validation_error(self) -> None: + with pytest.raises(ValidationError): + ManytaskPlugin.Args(**{}) + + def test_date_without_timezone_throws_warning(self) -> None: + plugin = ManytaskPlugin() + args = self.get_default_args_dict() + args["send_time"] = self.TEST_NOW_DATETIME.replace(tzinfo=None) + + with Mocker() as mocker: + mocker.post(f"{self.BASE_URL}api/report", status_code=200, text='{"score": 1.0}') + + output = plugin.run(args) + + assert "Warning: No timezone" in output.output + + def test_date_with_timezone_doesnt_throw_warning(self) -> None: + plugin = ManytaskPlugin() + args = self.get_default_args_dict() + args["send_time"] = self.TEST_NOW_DATETIME.astimezone() + + with Mocker() as mocker: + mocker.post(f"{self.BASE_URL}api/report", status_code=200, text='{"score": 1.0}') + + output = plugin.run(args) + + assert "Warning: No timezone" not in output.output + + @pytest.mark.parametrize( + "extensions_to_create, patterns_to_take, taken_files_num", + [ + ([".py", ".yml", ".txt"], ["*"], 3), + ([".py", ".yml", ".txt"], ["*.py"], 1), + ([".py", ".yml", ".py", ".yml", ".txt"], ["*.py", "*.yml"], 4), + ([".py", ".yml", ".txt"], ["*.not"], 0), + ], + ) + def test_collect_files_to_send( + self, + mocker: MockFixture, + extensions_to_create: list[str], + patterns_to_take: list[str], + taken_files_num: int, + ) -> None: + with TemporaryDirectory() as tdir: + tempfiles = [] + expected_filenames = [] + + for extension in extensions_to_create: + ntfile = NamedTemporaryFile(dir=tdir, suffix=extension) + tempfiles.append(ntfile) + if f"*{extension}" in patterns_to_take or "*" in patterns_to_take: + expected_filenames.append(basename(tempfiles[-1].name)) + + mocker.patch("builtins.open", mocker.mock_open(read_data=b"File content")) + result = ManytaskPlugin._collect_files_to_send(tdir, patterns_to_take) + + assert result is not None, "Didn't collect files" + assert len(result) == taken_files_num, "Wrong file quantity are collected" + assert sorted(result.keys()) == sorted(expected_filenames), "Wrong files are collected" + + if taken_files_num: + open.assert_called_with(mocker.ANY, "rb") # type: ignore[attr-defined] + + @pytest.mark.parametrize( + "response_status_code, response_text, expected_exception", + [ + (200, "Success", None), + (408, "Request Timeout", PluginExecutionFailed), + (503, "Service Unavailable", PluginExecutionFailed), + ], + ) + def test_post_with_retries( + self, + response_status_code: int, + response_text: str, + expected_exception: Type[BaseException], + ) -> None: + with Mocker() as mocker: + mocker.post( + f"{self.BASE_URL}api/report", + status_code=response_status_code, + text=response_text, + ) + + if expected_exception: + with pytest.raises(expected_exception) as exc: + ManytaskPlugin._post_with_retries(self.BASE_URL, {"key": "value"}, None) + assert str(response_status_code) in str(exc.value), "Status code wasn't provided in exception message" + assert response_text in str(exc.value), "Error text wasn't provided in exception message" + else: + result = ManytaskPlugin._post_with_retries(self.BASE_URL, {"key": "value"}, None) + assert result.status_code == 200 + assert result.text == "Success" + + def test_plugin_run(self, mocker: MockFixture) -> None: + args_dict = self.get_default_full_args_dict() + result_score = 1.0 + expected_files = {"files": "good"} + expected_data = { + "token": self.REPORT_TOKEN, + "task": self.TEST_TASK_NAME, + "username": self.TEST_USERNAME, + "score": self.TEST_SCORE, + "check_deadline": self.TEST_CHECK_DEADLINE, + "submit_time": self.TEST_NOW_DATETIME_STR, + } + + mocker.patch.object(ManytaskPlugin, "_collect_files_to_send") + ManytaskPlugin._collect_files_to_send.return_value = expected_files # type: ignore[attr-defined] + mocker.patch.object(ManytaskPlugin, "_post_with_retries") + ManytaskPlugin._post_with_retries.return_value.json.return_value = { + "score": result_score + } # type: ignore[attr-defined] + result = ManytaskPlugin().run(args_dict) + + assert result.output == ( + f"Report for task '{self.TEST_TASK_NAME}' for user '{self.TEST_USERNAME}', " + f"requested score: {self.TEST_SCORE}, result score: {result_score}" + ) + + ManytaskPlugin._post_with_retries.assert_called_once_with( + self.BASE_URL, expected_data, expected_files + ) # type: ignore[attr-defined] + + def test_verbose(self, mocker: MockFixture) -> None: + args_dict = self.get_default_full_args_dict() + expected_files = {"files": "good"} + result_score = 1.0 + + mocker.patch.object(ManytaskPlugin, "_collect_files_to_send") + ManytaskPlugin._collect_files_to_send.return_value = expected_files # type: ignore[attr-defined] + mocker.patch.object(ManytaskPlugin, "_post_with_retries") + ManytaskPlugin._post_with_retries.return_value.json.return_value = { + "score": result_score + } # type: ignore[attr-defined] + result = ManytaskPlugin().run(args_dict, verbose=True) + + assert str(expected_files) in result.output + + def test_bad_response(self, mocker: MockFixture) -> None: + args_dict = self.get_default_args_dict() + + mocker.patch.object(ManytaskPlugin, "_post_with_retries") + ManytaskPlugin._post_with_retries.return_value.json.return_value = {} # type: ignore[attr-defined] + + with pytest.raises(PluginExecutionFailed) as exc: + ManytaskPlugin().run(args_dict) + + assert str(exc.value) == "Unable to decode response" diff --git a/tests/plugins/test_regex.py b/tests/plugins/test_regex.py new file mode 100644 index 0000000..5226df5 --- /dev/null +++ b/tests/plugins/test_regex.py @@ -0,0 +1,135 @@ +from __future__ import annotations + +from collections.abc import Callable +from inspect import cleandoc +from pathlib import Path +from typing import Any + +import pytest +from pydantic import ValidationError + +from checker.exceptions import PluginExecutionFailed +from checker.plugins.regex import CheckRegexpsPlugin + + +T_CREATE_TEST_FILES = Callable[[dict[str, str]], Path] + + +@pytest.fixture +def create_test_files(tmpdir: Path) -> T_CREATE_TEST_FILES: + def _create_test_files(files_content: dict[str, str]) -> Path: + for filename, content in files_content.items(): + file = Path(tmpdir / filename) + file.parent.mkdir(parents=True, exist_ok=True) + with open(file, "w") as f: + f.write(cleandoc(content)) + return tmpdir + + return _create_test_files + + +class TestCheckRegexpsPlugin: + # TODO: add tests with wrong patterns and regexps + @pytest.mark.parametrize( + "parameters, expected_exception", + [ + ( + {"origin": "/tmp/123", "patterns": ["*", "*.py"], "regexps": ["error"]}, + None, + ), + ({"patterns": ["*", "*.py"], "regexps": ["error"]}, ValidationError), + ({"origin": "/tmp/123", "patterns": ["*", "*.py"]}, ValidationError), + ( + {"origin": "/tmp/123", "patterns": None, "regexps": None}, + ValidationError, + ), + ], + ) + def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None: + if expected_exception: + with pytest.raises(expected_exception): + CheckRegexpsPlugin.Args(**parameters) + else: + CheckRegexpsPlugin.Args(**parameters) + + @pytest.mark.parametrize( + "patterns, expected_exception", + [ + (["*.txt"], PluginExecutionFailed), + (["test2.txt", "*cpp"], None), + (["*"], PluginExecutionFailed), + (["*.md"], PluginExecutionFailed), + (["test?.txt"], PluginExecutionFailed), + (["test2.txt", "test1.txt"], PluginExecutionFailed), + ], + ) + def test_pattern_matching( + self, + create_test_files: T_CREATE_TEST_FILES, + patterns: list[str], + expected_exception: Exception | None, + ) -> None: + files_content = { + "test1.txt": "This is a test file with forbidden content", + "test2.txt": "This file is safe", + "test3.md": "Markdown file with forbidden content", + "test4.py": "Python file with forbidden content", + "test5.cpp": "Cpp file with safe content", + } + origin = create_test_files(files_content) + regexps = ["forbidden"] + + plugin = CheckRegexpsPlugin() + args = CheckRegexpsPlugin.Args(origin=str(origin), patterns=patterns, regexps=regexps) + + if expected_exception: + with pytest.raises(expected_exception): + plugin._run(args) + else: + assert plugin._run(args).output == "No forbidden regexps found" + + @pytest.mark.parametrize( + "regexps, expected_exception", + [ + (["not_found"], None), + (["forbidden"], PluginExecutionFailed), + (["fo.*en"], PluginExecutionFailed), + (["not_found", "fo.?bi.?den"], PluginExecutionFailed), + (["fo.?bi.?den", "not_found"], PluginExecutionFailed), + ], + ) + def test_check_regexps( + self, + create_test_files: T_CREATE_TEST_FILES, + regexps: list[str], + expected_exception: Exception | None, + ) -> None: + files_content = { + "test1.txt": "This is a test file with forbidden content", + "test2.txt": "This file is safe", + "test3.md": "Markdown file with forbidden content", + "test4.py": "Python file with forbidden content", + "test5.cpp": "Cpp file with safe content", + } + origin = create_test_files(files_content) + patterns = ["*"] + + plugin = CheckRegexpsPlugin() + args = CheckRegexpsPlugin.Args(origin=str(origin), patterns=patterns, regexps=regexps) + + if expected_exception: + with pytest.raises(expected_exception) as exc_info: + plugin._run(args) + assert "matches regexp" in str(exc_info.value) + else: + assert plugin._run(args).output == "No forbidden regexps found" + assert plugin._run(args, verbose=True).output == "No forbidden regexps found" + assert plugin._run(args, verbose=False).output == "No forbidden regexps found" + + def test_non_existent_origin(self) -> None: + plugin = CheckRegexpsPlugin() + args = CheckRegexpsPlugin.Args(origin="/tmp/non_existent", patterns=["*.txt"], regexps=["forbidden"]) + + with pytest.raises(PluginExecutionFailed) as exc_info: + plugin._run(args) + assert "does not exist" in str(exc_info.value) diff --git a/tests/plugins/test_scripts.py b/tests/plugins/test_scripts.py new file mode 100644 index 0000000..e9ab017 --- /dev/null +++ b/tests/plugins/test_scripts.py @@ -0,0 +1,104 @@ +from __future__ import annotations + +from typing import Any +from unittest.mock import patch + +import pytest +from pydantic import ValidationError + +from checker.exceptions import PluginExecutionFailed +from checker.plugins.scripts import RunScriptPlugin + + +class TestRunScriptPlugin: + @pytest.mark.parametrize( + "parameters, expected_exception", + [ + ({"origin": "/tmp", "script": "echo Hello"}, None), + ({"origin": "/tmp", "script": 123}, ValidationError), + ({"origin": "/tmp", "script": ["echo", "Hello"]}, None), + ({"origin": "/tmp", "script": "echo Hello", "timeout": 10}, None), + # TODO: check why timeout is not validated + pytest.param( + {"origin": "/tmp", "script": "echo Hello", "timeout": "10"}, + ValidationError, + marks=pytest.mark.xfail(), + ), + ({"origin": "/tmp", "script": "echo Hello", "isolate": True}, None), + ( + { + "origin": "/tmp", + "script": "echo Hello", + "env_whitelist": ["PATH"], + }, + None, + ), + ], + ) + def test_plugin_args(self, parameters: dict[str, Any], expected_exception: Exception | None) -> None: + if expected_exception: + with pytest.raises(expected_exception): + RunScriptPlugin.Args(**parameters) + else: + RunScriptPlugin.Args(**parameters) + + @pytest.mark.parametrize( + "script, output, expected_exception", + [ + ("echo Hello", "Hello", None), + ("sleep 0.1", "", None), + ("true", "", None), + ("false", "", PluginExecutionFailed), + ("echo Hello && false", "Hello", PluginExecutionFailed), + ], + ) + def test_simple_cases(self, script: str, output: str, expected_exception: Exception | None) -> None: + plugin = RunScriptPlugin() + args = RunScriptPlugin.Args(origin="/tmp", script=script) + + if expected_exception: + with pytest.raises(expected_exception) as exc_info: + plugin._run(args) + assert output in exc_info.value.output + else: + result = plugin._run(args) + assert result.output.strip() == output + + @pytest.mark.parametrize( + "script, timeout, expected_exception", + [ + ("echo Hello", 10, None), + ("sleep 0.5", 1, None), + ("sleep 0.5", None, None), + ("sleep 1", 0.5, PluginExecutionFailed), + ], + ) + def test_timeout(self, script: str, timeout: float, expected_exception: Exception | None) -> None: + # TODO: check if timeout float + plugin = RunScriptPlugin() + args = RunScriptPlugin.Args(origin="/tmp", script=script, timeout=timeout) + + if expected_exception: + with pytest.raises(expected_exception): + plugin._run(args) + else: + plugin._run(args) + + @pytest.mark.parametrize( + "script, env_whitelist, mocked_env", + [ + ("env", ["CUSTOM_VAR"], {"FILTERED_ONE": "1", "CUSTOM_VAR": "test_value"}), + # TODO: expand this test + ], + ) + def test_run_with_environment_variable( + self, script: str, env_whitelist: list[str], mocked_env: dict[str, str] + ) -> None: + plugin = RunScriptPlugin() + args = RunScriptPlugin.Args(origin="/tmp", script=script, env_whitelist=env_whitelist) + + with patch.dict("os.environ", mocked_env, clear=True): + result = plugin._run(args) + assert "CUSTOM_VAR" in result.output + assert mocked_env["CUSTOM_VAR"] in result.output + assert "FILTERED_ONE" not in result.output diff --git a/tests/test_course.py b/tests/test_course.py new file mode 100644 index 0000000..b20fc3d --- /dev/null +++ b/tests/test_course.py @@ -0,0 +1,160 @@ +from __future__ import annotations + +import shutil +from pathlib import Path + +import pytest + +from checker.configs.deadlines import DeadlinesConfig +from checker.course import Course, FileSystemGroup, FileSystemTask +from checker.exceptions import BadConfig + + +TEST_TIMEZONE = "Europe/Berlin" +TEST_FILE_STRUCTURE = { + "group1": { + "task1_1": {".task.yml": "version: 1", "file1_1_1": "", "file1_1_2": ""}, + "task1_2": {"file1_2_1": "", "file1_2_2": ""}, + }, + "group2": { + "task2_1": {"file2_1_1": "", "file2_1_2": ""}, + "task2_2": {".task.yml": "version: 1"}, + "task2_3": {"file2_3_1": "", "file2_3_2": "", "file2_3_3": "", "file2_3_4": ""}, + }, + "group3": {}, + "group4": { + "task4_1": {".task.yml": "version: 1"}, + }, +} +TEST_EXTRA_FILES = [ + "extra_file1", + "group1/extra_file2", + "group1/task1_1/extra_file3", +] +TEST_DEADLINES_CONFIG = DeadlinesConfig( + version=1, + settings={"timezone": TEST_TIMEZONE}, + schedule=[ + { + "group": "group1", + "start": "2020-10-10 00:00:00", + "enabled": True, + "tasks": [ + {"task": "task1_1", "score": 10}, + {"task": "task1_2", "score": 20}, + ], + }, + { + "group": "group2", + "start": "2020-10-10 00:00:00", + "enabled": False, + "tasks": [ + {"task": "task2_1", "score": 30}, + {"task": "task2_2", "score": 40}, + {"task": "task2_3", "score": 50}, + ], + }, + { + "group": "group3", + "start": "2020-10-10 00:00:00", + "enabled": True, + "tasks": [], + }, + { + "group": "group4", + "start": "2020-10-10 00:00:00", + "enabled": True, + "tasks": [{"task": "task4_1", "score": 50}], + }, + ], +) + + +@pytest.fixture() +def repository_root(tmp_path: Path) -> Path: + """Creates a test repository structure in the temporary directory.""" + for group_name, group in TEST_FILE_STRUCTURE.items(): + group_path = tmp_path / group_name + group_path.mkdir() + for task_name, task in group.items(): + task_path = group_path / task_name + task_path.mkdir() + for filename, content in task.items(): + with open(task_path / filename, "w") as f: + f.write(content) + + for extra_file in TEST_EXTRA_FILES: + with open(tmp_path / extra_file, "w") as f: + f.write("") + + return tmp_path + + +class TestCourse: + def test_init(self, repository_root: Path) -> None: + test_course = Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) + assert test_course.repository_root == repository_root + assert test_course.deadlines == TEST_DEADLINES_CONFIG + + def test_validate(self, repository_root: Path) -> None: + test_course = Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) + + try: + test_course.validate() + except Exception as e: + pytest.fail(f"Validation failed: {e}") + + def test_validate_with_no_group(self, repository_root: Path) -> None: + shutil.rmtree(repository_root / "group1") + with pytest.raises(BadConfig): + Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root).validate() + + def test_validate_with_no_task(self, repository_root: Path) -> None: + shutil.rmtree(repository_root / "group1" / "task1_1") + with pytest.raises(BadConfig): + Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root).validate() + + def test_init_task_with_bad_config(self, repository_root: Path) -> None: + with open(repository_root / "group1" / "task1_1" / Course.TASK_CONFIG_NAME, "w") as f: + f.write("bad_config") + + with pytest.raises(BadConfig): + Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) + + @pytest.mark.parametrize("enabled, expected_num_groups", [(None, 4), (True, 3), (False, 1)]) + def test_get_groups(self, enabled: bool | None, expected_num_groups, repository_root: Path) -> None: + test_course = Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) + + groups = test_course.get_groups(enabled=enabled) + assert isinstance(groups, list) + assert all(isinstance(group, FileSystemGroup) for group in groups) + assert len(groups) == expected_num_groups + + @pytest.mark.parametrize( + "enabled, expected_num_tasks", + [(None, 6), (True, 3), pytest.param(False, 3, marks=pytest.mark.xfail())], + ) + def test_get_tasks(self, enabled: bool | None, expected_num_tasks, repository_root: Path) -> None: + test_course = Course(deadlines=TEST_DEADLINES_CONFIG, repository_root=repository_root) + + tasks = test_course.get_tasks(enabled=enabled) + assert isinstance(tasks, list) + assert all(isinstance(task, FileSystemTask) for task in tasks) + assert len(tasks) == expected_num_tasks + + def test_search_potential_groups(self, repository_root: Path) -> None: + potential_groups = Course._search_potential_groups(repository_root) + assert len(potential_groups) == len(TEST_FILE_STRUCTURE) + for group in potential_groups: + assert isinstance(group, FileSystemGroup) + assert len(group.tasks) == len(TEST_FILE_STRUCTURE[group.name]) + for task in group.tasks: + assert isinstance(task, FileSystemTask) + assert (repository_root / task.relative_path).exists() + + def test_search_for_tasks_by_configs(self, repository_root: Path) -> None: + tasks = list(Course._search_for_tasks_by_configs(repository_root)) + assert len(tasks) == 3 + for task in tasks: + assert isinstance(task, FileSystemTask) + assert (repository_root / task.relative_path).exists() diff --git a/tests/test_exporter.py b/tests/test_exporter.py new file mode 100644 index 0000000..8d209dd --- /dev/null +++ b/tests/test_exporter.py @@ -0,0 +1,243 @@ +from __future__ import annotations + +from inspect import cleandoc +from pathlib import Path + +import pytest + +from checker.configs import CheckerExportConfig, CheckerStructureConfig, DeadlinesConfig +from checker.course import Course +from checker.exceptions import BadConfig +from checker.exporter import Exporter + + +def create_test_files(tmpdir: Path, files_content: dict[str, str]) -> None: + for filename, content in files_content.items(): + file = Path(tmpdir / filename) + file.parent.mkdir(parents=True, exist_ok=True) + with open(file, "w") as f: + f.write(cleandoc(content)) + + +def assert_files_in_folder(folder: Path, expected_files: list[str]) -> None: + for file in expected_files: + assert (folder / file).exists() + + +# TODO: extend tests +class TestExporter: + SAMPLE_TEST_DEADLINES_CONFIG = DeadlinesConfig( + version=1, + settings={"timezone": "Europe/Berlin"}, + schedule=[ + { + "group": "group", + "enabled": True, + "start": "2021-01-01 00:00:00", + "tasks": [{"task": "task1", "score": 1}, {"task": "task2", "score": 1}], + }, + ], + ) + SAMPLE_TEST_STRUCTURE_CONFIG = CheckerStructureConfig( + ignore_patterns=[".ignore_folder"], + private_patterns=[".*", "private.*"], + public_patterns=["*", ".private_exception"], + ) + SAMPLE_TEST_FILES = { + ".ignore_folder/test.txt": "Hello1\n", + ".ignore_folder/folder/test.txt": "Hello2\n", + ".ignore_folder/folder/test.py": "print('Hello2')\n", + "folder/test.txt": "Hello2\n", + "folder/.test.py": "print('Hello2')\n", + "folder/folder/test.txt": "Hello2\n", + ".private_folder/test.txt": "Hello3\n", + ".private_folder/folder/.test.py": "print('Hello3')\n", + ".private_folder/folder/test.txt": "Hello4\n", + "other_folder/test.txt": "Hello5\n", + "test.py": "print('Hello')\n", + "test.txt": "Hello\n", + ".some_file": "Some line\n", + ".private_exception": "Some line\n", + "private.txt": "Private\n", + "private.py": "print('Private')\n", + "group/task1/.task.yml": "version: 1\nstructure:\n private_patterns: []\n", + "group/task1/test.txt": "Hello\n", + "group/task1/.test.py": "print('Hello')\n", + "group/task2/private.txt": "Private\n", + "group/task2/private.py": "print('Private')\n", + "group/task2/valid.txt": "Valid\n", + } + + def test_validate_ok_no_task_configs(self, tmpdir: Path) -> None: + structure_config = CheckerStructureConfig( + ignore_patterns=[".gitignore"], + private_patterns=[".*"], + public_patterns=["*"], + ) + create_test_files( + Path(tmpdir / "repository"), + { + "test.py": "print('Hello')\n", + "folder/test.txt": "Hello\n", + }, + ) + course = Course( + deadlines=self.SAMPLE_TEST_DEADLINES_CONFIG, + repository_root=Path(tmpdir / "repository"), + ) + exporter = Exporter( + course, + structure_config, + CheckerExportConfig(destination="https://example.com"), + Path(tmpdir / "repository"), + ) + exporter.validate() + + def test_validate_ok_task_configs(self, tmpdir: Path) -> None: + structure_config = CheckerStructureConfig( + ignore_patterns=[".gitignore"], + private_patterns=[".*"], + public_patterns=["*"], + ) + create_test_files( + Path(tmpdir / "repository"), + { + "test.py": "print('Hello')\n", + "folder/.task.yml": "version: 1\n", + "folder/test.txt": "Hello\n", + }, + ) + course = Course( + deadlines=self.SAMPLE_TEST_DEADLINES_CONFIG, + repository_root=Path(tmpdir / "repository"), + ) + exporter = Exporter( + course, + structure_config, + CheckerExportConfig(destination="https://example.com"), + Path(tmpdir / "repository"), + ) + + exporter.validate() + + def test_validate_fail_wrong_task_config(self, tmpdir: Path) -> None: + structure_config = CheckerStructureConfig( + ignore_patterns=[".gitignore"], + private_patterns=[".*"], + public_patterns=["*"], + ) + create_test_files( + Path(tmpdir / "repository"), + { + "test.py": "print('Hello')\n", + "folder/.task.yml": "wrong_field: HEHE\n", + "folder/test.txt": "Hello\n", + }, + ) + course = Course( + deadlines=self.SAMPLE_TEST_DEADLINES_CONFIG, + repository_root=Path(tmpdir / "repository"), + ) + with pytest.raises(BadConfig): + course.validate() + exporter = Exporter( + course, + structure_config, + CheckerExportConfig(destination="https://example.com"), + Path(tmpdir / "repository"), + ) + + exporter.validate() + + def test_export_public(self, tmpdir: Path) -> None: + create_test_files(Path(tmpdir / "repository"), self.SAMPLE_TEST_FILES) + course = Course( + deadlines=self.SAMPLE_TEST_DEADLINES_CONFIG, + repository_root=Path(tmpdir / "repository"), + ) + exporter = Exporter( + course, + self.SAMPLE_TEST_STRUCTURE_CONFIG, + CheckerExportConfig(destination="https://example.com"), + Path(tmpdir / "repository"), + ) + + exporter.export_public(Path(tmpdir / "export")) + + assert_files_in_folder( + tmpdir / "export", + [ + "folder/test.txt", + "folder/folder/test.txt", + "other_folder/test.txt", + "test.py", + "test.txt", + # ".private_exception", # TODO: fix private exception here not applied + "group/task1/.task.yml", + "group/task1/test.txt", + "group/task1/.test.py", + "group/task2/valid.txt", + ], + ) + + def test_export_for_testing(self, tmpdir: Path) -> None: + create_test_files(Path(tmpdir / "repository"), self.SAMPLE_TEST_FILES) + course = Course( + deadlines=self.SAMPLE_TEST_DEADLINES_CONFIG, + repository_root=Path(tmpdir / "repository"), + ) + exporter = Exporter( + course, + self.SAMPLE_TEST_STRUCTURE_CONFIG, + CheckerExportConfig(destination="https://example.com"), + Path(tmpdir / "repository"), + ) + + exporter.export_for_testing(Path(tmpdir / "export")) + + assert_files_in_folder( + tmpdir / "export", + [ + "folder/test.txt", + "folder/folder/test.txt", + "other_folder/test.txt", + "test.py", + "test.txt", + ".private_exception", + "group/task1/.task.yml", + "group/task1/test.txt", + "group/task1/.test.py", + "group/task2/valid.txt", + ], + ) + + def test_export_for_contribution(self, tmpdir: Path) -> None: + create_test_files(Path(tmpdir / "repository"), self.SAMPLE_TEST_FILES) + course = Course( + deadlines=self.SAMPLE_TEST_DEADLINES_CONFIG, + repository_root=Path(tmpdir / "repository"), + ) + exporter = Exporter( + course, + self.SAMPLE_TEST_STRUCTURE_CONFIG, + CheckerExportConfig(destination="https://example.com"), + Path(tmpdir / "repository"), + ) + + exporter.export_for_contribution(Path(tmpdir / "export")) + + assert_files_in_folder( + tmpdir / "export", + [ + "folder/test.txt", + "folder/folder/test.txt", + "other_folder/test.txt", + "test.py", + "test.txt", + ".private_exception", + "group/task1/.task.yml", + "group/task1/test.txt", + "group/task1/.test.py", + "group/task2/valid.txt", + ], + ) diff --git a/tests/test_pipeline.py b/tests/test_pipeline.py new file mode 100644 index 0000000..053a920 --- /dev/null +++ b/tests/test_pipeline.py @@ -0,0 +1,320 @@ +from __future__ import annotations + +from typing import Type + +import pytest + +from checker.configs import PipelineStageConfig +from checker.exceptions import BadConfig, PluginExecutionFailed +from checker.pipeline import PipelineRunner +from checker.plugins import PluginABC +from checker.plugins.base import PluginOutput + + +class _FailPlugin(PluginABC): + name = "fail" + + def _run(self, args: PluginABC.Args, *, verbose: bool = False) -> PluginOutput: + raise PluginExecutionFailed("Failed") + + +class _ScorePlugin(PluginABC): + name = "score" + + class Args(PluginABC.Args): + score: float = 0.5 + + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: + if verbose: + return PluginOutput( + output=f"Score: {args.score:.2f}\nSome secret verbose line", + percentage=args.score, + ) + else: + return PluginOutput(output=f"Score: {args.score:.2f}", percentage=args.score) + + +class _EchoPlugin(PluginABC): + name = "echo" + + class Args(PluginABC.Args): + message: str + + def _run(self, args: Args, *, verbose: bool = False) -> PluginOutput: + return PluginOutput(output=args.message) + + +@pytest.fixture +def sample_plugins() -> dict[str, Type[PluginABC]]: + return { + "fail": _FailPlugin, + "score": _ScorePlugin, + "echo": _EchoPlugin, + } + + +@pytest.fixture +def sample_correct_pipeline() -> list[PipelineStageConfig]: + return [ + PipelineStageConfig( + name="stage1 - echo", + run="echo", + args={"message": "${{ message }}"}, + ), + PipelineStageConfig( + name="stage2 - score", + run="score", + args={"score": 0.5}, + register_output="score_stage", + ), + PipelineStageConfig( + name="stage3 - ignore fail", + run="fail", + fail="never", + ), + PipelineStageConfig( + name="stage4 - skip fail if run_if=False", + run="fail", + run_if=False, + ), + PipelineStageConfig( + name="stage5 - skip echo if run_if=False", + run="echo", + args={"message": "skipped message"}, + run_if=False, + ), + PipelineStageConfig( + name="stage6 - skip fail if registered output", + run="fail", + run_if="${{ outputs.score_stage.percentage > 0.7 }}", + ), + PipelineStageConfig( + name="stage7 - second echo", + run="echo", + args={"message": "second message"}, + ), + ] + + +class TestSampleFixtures: + def test_plugins(self, sample_plugins: dict[str, Type[PluginABC]]) -> None: + plugin = sample_plugins["echo"]() + plugin.validate({"message": "Hello"}) + result = plugin.run({"message": "Hello"}, verbose=True) + assert result.percentage == 1.0 + assert result.output == "Hello" + + plugin = sample_plugins["score"]() + plugin.validate({"score": 0.2}) + result = plugin.run({"score": 0.2}) + assert result.percentage == 0.2 + assert result.output == "Score: 0.20" + + plugin = sample_plugins["fail"]() + plugin.validate({}) + with pytest.raises(PluginExecutionFailed): + plugin.run({}) + + +class TestPipelineRunnerValidation: + def test_correct_pipeline_validation( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + ) -> None: + pipeline_runner = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins=sample_plugins, + verbose=False, + ) + pipeline_runner.validate({}, validate_placeholders=False) + pipeline_runner.validate({"message": "Hello"}, validate_placeholders=True) + with pytest.raises(BadConfig): + pipeline_runner.validate({}, validate_placeholders=True) + + def test_unknown_plugin(self, sample_plugins: dict[str, Type[PluginABC]]) -> None: + with pytest.raises(BadConfig) as exc_info: + _ = PipelineRunner( + pipeline=[ + PipelineStageConfig( + name="stage1 - echo", + run="unknown", + args={"message": "Hello"}, + ), + ], + plugins=sample_plugins, + verbose=False, + ) + assert "Unknown plugin" in str(exc_info.value) + + def test_validate_placeholders(self, sample_correct_pipeline: list[PipelineStageConfig]) -> None: + with pytest.raises(BadConfig) as exc_info: + _ = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins={}, + verbose=False, + ) + assert "Unknown plugin" in str(exc_info.value) + + def test_unknown_placeholder( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + ) -> None: + pipeline_runner = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins=sample_plugins, + verbose=False, + ) + with pytest.raises(BadConfig): + pipeline_runner.validate({}, validate_placeholders=True) + # TODO: fix it, now throwing Validation Error + # assert "Unknown placeholder" in str(exc_info.value) + + def test_invalid_run_if( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + ) -> None: + pipeline_runner = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins=sample_plugins, + verbose=False, + ) + with pytest.raises(BadConfig): + pipeline_runner.validate({"score": 0.5}, validate_placeholders=True) + + def test_invalid_register_output( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + ) -> None: + sample_correct_pipeline[1].register_output = "unknown" + pipeline_runner = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins=sample_plugins, + verbose=False, + ) + with pytest.raises(BadConfig) as exc_info: + pipeline_runner.validate({"message": "some valid message"}, validate_placeholders=True) + assert "Invalid template" in str(exc_info.value) + + def test_run_correct_pipeline_verbose( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + capsys: pytest.CaptureFixture[str], + ) -> None: + pipeline_runner = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins=sample_plugins, + verbose=True, + ) + result = pipeline_runner.run({"message": "Hello"}) + assert not result.failed + captured = capsys.readouterr().err + assert "Hello" in captured + assert "Score: 0.50" in captured + assert "second message" in captured + # in args print, so in the verbose output + # assert "skipped message" not in captured.out and "skipped message" not in captured.err + # verbose output + assert "Some secret verbose line" in captured + # stages names are printed + for stage_name in ["stage1", "stage2", "stage3", "stage4", "stage5", "stage6"]: + assert stage_name in captured + + def test_run_correct_pipeline_not_verbose( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + capsys: pytest.CaptureFixture[str], + ) -> None: + pipeline_runner = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins=sample_plugins, + verbose=False, + ) + result = pipeline_runner.run({"message": "Hello"}) + assert not result.failed + captured = capsys.readouterr().err + assert "Hello" in captured + assert "Score: 0.50" in captured + assert "second message" in captured + # in args print, so not in the non-verbose output + assert "skipped message" not in captured + # verbose output + assert "Some secret verbose line" not in captured + # stages names are printed + for stage_name in ["stage1", "stage2", "stage3", "stage4", "stage5", "stage6"]: + assert stage_name in captured + + def test_dry_run( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + capsys: pytest.CaptureFixture[str], + ) -> None: + pipeline_runner = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins=sample_plugins, + verbose=False, + ) + result = pipeline_runner.run({"message": "Hello"}, dry_run=True) + assert not result.failed + captured = capsys.readouterr().err + # no "error!" msg as all is skipped + assert "error!" not in captured + assert "[output here]" in captured + assert "dry run!" in captured + # stages names are printed + for stage_name in ["stage1", "stage2", "stage3", "stage4", "stage5", "stage6"]: + assert stage_name in captured + + def test_fail_fast( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + capsys: pytest.CaptureFixture[str], + ) -> None: + sample_correct_pipeline[2].fail = PipelineStageConfig.FailType.FAST + pipeline_runner = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins=sample_plugins, + verbose=False, + ) + result = pipeline_runner.run({"message": "Hello"}) + assert result.failed + captured = capsys.readouterr().err + # no "error!" msg as all is skipped + assert "error!" in captured + # fist echo works, second not + assert "Hello" in captured + assert "second message" not in captured + # stages names are printed + for stage_name in ["stage1", "stage2", "stage3", "stage4", "stage5", "stage6"]: + assert stage_name in captured + + def test_fail_after_all( + self, + sample_correct_pipeline: list[PipelineStageConfig], + sample_plugins: dict[str, Type[PluginABC]], + capsys: pytest.CaptureFixture[str], + ) -> None: + sample_correct_pipeline[2].fail = PipelineStageConfig.FailType.AFTER_ALL + pipeline_runner = PipelineRunner( + pipeline=sample_correct_pipeline, + plugins=sample_plugins, + verbose=False, + ) + result = pipeline_runner.run({"message": "Hello"}) + assert result.failed + captured = capsys.readouterr().err + # no "error!" msg as all is skipped + assert "error!" in captured + # fist echo works, second not + assert "Hello" in captured + assert "second message" in captured + # stages names are printed + for stage_name in ["stage1", "stage2", "stage3", "stage4", "stage5", "stage6"]: + assert stage_name in captured diff --git a/tests/test_resolver.py b/tests/test_resolver.py new file mode 100644 index 0000000..29ecbf5 --- /dev/null +++ b/tests/test_resolver.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +import copy +from typing import Any + +import pytest + +from checker.exceptions import BadConfig +from checker.pipeline import ParametersResolver + + +class TestParametersResolver: + @pytest.mark.parametrize( + "template, context, expected", + [ + ("${{ a }}", {"a": 2}, 2), + pytest.param("${{ b }}", {"b": "2"}, "2", marks=pytest.mark.xfail()), # TODO: check why returned as int + ("${{ c }}", {"c": [1, 2, "4"]}, [1, 2, "4"]), + (" ${{ d }}", {"d": 2}, 2), + ("${{ e }} ", {"e": 2}, 2), + ("${{ f }} some string", {"f": 2}, "2 some string"), + ("${{ g }} + ${{ g }}", {"g": 2}, "2 + 2"), + ("${{ h }}", {"h": 2.1}, 2.1), + ("${{ i }}", {"i": 2.0}, 2.0), + ], + ) + def test_keep_native_type(self, template: str, context: dict[str, Any], expected: Any) -> None: + resolver = ParametersResolver() + assert resolver.resolve(template, context) == expected + + @pytest.mark.parametrize( + "template, context, expected", + [ + ("${{ a }}", {"a": 2}, 2), + ("Hello, ${{ name }}!", {"name": "World"}, "Hello, World!"), + ("${{ a }} + ${{ b }} = ${{ a + b }}", {"a": 2, "b": 3}, "2 + 3 = 5"), + ("${{ a }}", {"a": 2, "b": 3}, 2), + ], + ) + def test_string_input(self, template: str, context: dict[str, Any], expected: Any) -> None: + resolver = ParametersResolver() + assert resolver.resolve(template, context) == expected + + @pytest.mark.parametrize( + "template, context, expected", + [ + (["${{ item }}", "${{ item }}2"], {"item": "test"}, ["test", "test2"]), + (["${{ a }}", "${{ b }}"], {"a": 1, "b": 2}, [1, 2]), + ( + ["${{ a }}", ["${{ b }}", "${{ c }}"]], + {"a": 1, "b": 2, "c": 3}, + [1, [2, 3]], + ), + ], + ) + def test_list_input(self, template: list[Any], context: dict[str, Any], expected: list[Any]) -> None: + resolver = ParametersResolver() + assert resolver.resolve(template, context) == expected + + @pytest.mark.parametrize( + "template, context, expected", + [ + ( + {"key1": "${{ a }}", "key2": "${{ b }}"}, + {"a": "x", "b": "y"}, + {"key1": "x", "key2": "y"}, + ), + ( + {"name": "Hello, ${{ name }}!"}, + {"name": "Alice"}, + {"name": "Hello, Alice!"}, + ), + ( + {"key1": "${{ a }}", "key2": {"key3": "${{ b }}"}}, + {"a": 1, "b": 2}, + {"key1": 1, "key2": {"key3": 2}}, + ), + ], + ) + def test_dict_input( + self, + template: dict[str, Any], + context: dict[str, Any], + expected: dict[str, Any], + ) -> None: + resolver = ParametersResolver() + assert resolver.resolve(template, context) == expected + + @pytest.mark.parametrize( + "template, context", + [ + (1, {}), + (1, {"a": 1}), + (1.0, {"a": 1}), + ("some string", {"a": 1}), + ("a", {"a": 1}), + ("{a}", {"a": 1}), + ({}, {"a": 1}), + ([None, {1, 2, 3}, ["a", "b"]], {"a": 1}), + ], + ) + def test_non_template(self, template: Any, context: dict[str, Any]) -> None: + resolver = ParametersResolver() + template_copy = copy.deepcopy(template) + assert resolver.resolve(template, context) == template_copy + + @pytest.mark.parametrize( + "template, context", + [ + ("${{ invalid_syntax", {"invalid_syntax": 2}), + pytest.param( + "${{ valid_var.invalid_field }}", + {"valid_var": {"valid_field": 1}}, + marks=pytest.mark.xfail(), + ), + pytest.param("${{ not_existing }} ${{ a }}", {"a": 2}, marks=pytest.mark.xfail()), + pytest.param("${{ not_existing }}", {"a": 2}, marks=pytest.mark.xfail()), + pytest.param("invalid_syntax }}", {"invalid_syntax": 2}, marks=pytest.mark.xfail()), + ], + ) + def test_invalid_template(self, template: Any, context: dict[str, Any]) -> None: + resolver = ParametersResolver() + with pytest.raises(BadConfig): + a = resolver.resolve(template, context) + print(a) diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000..a454ef3 --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +import pytest + +from checker.utils import print_header_info, print_info, print_separator + + +class TestPrint: + # TODO: test print colors etc + + def test_print_info(self, capsys: pytest.CaptureFixture): + print_info("123") + + captured = capsys.readouterr() + assert captured.err == "123\n" + + def test_print_separator(self, capsys: pytest.CaptureFixture): + print_separator("*", string_length=10) + + captured = capsys.readouterr() + assert "**********" in captured.err + + def test_print_header_info(self, capsys: pytest.CaptureFixture): + print_header_info("123", string_length=10) + + captured = capsys.readouterr() + assert "123" in captured.err + assert "++++++++++" in captured.err diff --git a/tests/testers/test_cpp.py b/tests/testers/test_cpp.py deleted file mode 100644 index 6b3fef0..0000000 --- a/tests/testers/test_cpp.py +++ /dev/null @@ -1,167 +0,0 @@ -from __future__ import annotations - -import json -import stat -from pathlib import Path - -import pytest - -from checker.exceptions import BuildFailedError, StylecheckFailedError, TestsFailedError -from checker.testers.cpp import CppTester -from checker.utils import copy_files - - -cpp_tests = pytest.mark.skipif("not config.getoption('cpp')") - -@pytest.fixture(scope='function') -def cpp_tester() -> CppTester: - return CppTester(cleanup=True, dry_run=False) - - -def init_task(tmp_path: Path, code: str, **kwargs): - course_dir = Path(__file__).parent / 'test_cpp_course' - reference_dir = tmp_path / 'reference' - root_dir = tmp_path / 'student' - copy_files(course_dir, reference_dir) - copy_files(course_dir, root_dir) - - source_dir = root_dir / 'foo' - public_tests_dir = reference_dir / 'foo' - private_tests_dir = reference_dir / 'tests' / 'foo' - with open(source_dir / 'foo.h', 'w') as f: - f.write(code) - - tester_path = private_tests_dir / '.tester.json' - with open(tester_path, 'r') as f: - config = json.load(f) - config.update(kwargs) - with open(tester_path, 'w') as f: - json.dump(config, f) - - format_path = reference_dir / 'run-clang-format.py' - format_path.chmod(format_path.stat().st_mode | stat.S_IEXEC) - return ( - source_dir, - private_tests_dir, # config_dir - public_tests_dir, - private_tests_dir, - reference_dir, # tests_root_dir - ) - - -STAGE_BUILD = 1 -STAGE_CLANG_FORMAT = 2 -STAGE_CLANG_TIDY = 3 -STAGE_TEST = 4 -STAGE_UNREACHABLE = 5 - -def check_fail_on_stage(err: str, stage: int): - messages = [ - 'Running cmake...', - 'Building test_foo...', - 'Running clang format...', - 'Running clang tidy...', - 'Running test_foo...', - ] - for i, message in enumerate(messages): - if i <= stage: - assert message in err - else: - assert message not in err - - -@cpp_tests -class TestCppTester: - def test_simple( - self, - tmp_path: Path, - cpp_tester: CppTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - code = 'int Foo() {\n return 42;\n}\n' - cpp_tester.test_task(*init_task(tmp_path, code)) - check_fail_on_stage(capsys.readouterr().err, STAGE_UNREACHABLE) - - def test_clang_format_error( - self, - tmp_path: Path, - cpp_tester: CppTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - code = 'int Foo() {\n return 42;\n}\n' - with pytest.raises(StylecheckFailedError): - cpp_tester.test_task(*init_task(tmp_path, code)) - check_fail_on_stage(capsys.readouterr().err, STAGE_CLANG_FORMAT) - - def test_clang_tidy_error( - self, - tmp_path: Path, - cpp_tester: CppTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - code = 'int Foo() {\n auto A = 42;\n return A;\n}\n' - with pytest.raises(StylecheckFailedError): - cpp_tester.test_task(*init_task(tmp_path, code)) - check_fail_on_stage(capsys.readouterr().err, STAGE_CLANG_TIDY) - - def test_build_error( - self, - tmp_path: Path, - cpp_tester: CppTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - code = 'int Foo() {\n return 42\n}\n' - with pytest.raises(BuildFailedError): - cpp_tester.test_task(*init_task(tmp_path, code)) - check_fail_on_stage(capsys.readouterr().err, STAGE_BUILD) - - def test_test_error( - self, - tmp_path: Path, - cpp_tester: CppTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - code = 'int Foo() {\n return 43;\n}\n' - with pytest.raises(TestsFailedError): - cpp_tester.test_task(*init_task(tmp_path, code)) - check_fail_on_stage(capsys.readouterr().err, STAGE_TEST) - - def test_timeout_error( - self, - tmp_path: Path, - cpp_tester: CppTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - sleep_code = '{\n std::this_thread::sleep_for(std::chrono::hours{1});\n}\n' - code = f'#include \n#include \n\nint Foo() {sleep_code}' - timeout = 1e-3 - with pytest.raises(TestsFailedError): - cpp_tester.test_task(*init_task(tmp_path, code, timeout=timeout)) - err = capsys.readouterr().err - check_fail_on_stage(err, STAGE_TEST) - assert f'exceeded time limit: {timeout}' in err - - def test_crash_me_success( - self, - tmp_path: Path, - cpp_tester: CppTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - code = 'int Foo() {\n return 43;\n}\n' - cpp_tester.test_task(*init_task(tmp_path, code, is_crash_me=True)) - err = capsys.readouterr().err - check_fail_on_stage(err, STAGE_UNREACHABLE) - assert 'Program has crashed' in err - - def test_crash_me_fail( - self, - tmp_path: Path, - cpp_tester: CppTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - code = 'int Foo() {\n return 42;\n}\n' - with pytest.raises(TestsFailedError): - cpp_tester.test_task(*init_task(tmp_path, code, is_crash_me=True)) - err = capsys.readouterr().err - check_fail_on_stage(err, STAGE_TEST) - assert 'Program has not crashed' in err diff --git a/tests/testers/test_cpp_course/.clang-format b/tests/testers/test_cpp_course/.clang-format deleted file mode 100644 index fe699e0..0000000 --- a/tests/testers/test_cpp_course/.clang-format +++ /dev/null @@ -1,10 +0,0 @@ -BasedOnStyle: Google -IndentWidth: 4 -AccessModifierOffset: -4 -ColumnLimit: 100 -AllowShortFunctionsOnASingleLine: None -AllowShortIfStatementsOnASingleLine: false -AllowShortLoopsOnASingleLine: false -DerivePointerAlignment: true -KeepEmptyLinesAtTheStartOfBlocks: true -SortIncludes: false diff --git a/tests/testers/test_cpp_course/.clang-tidy b/tests/testers/test_cpp_course/.clang-tidy deleted file mode 100644 index 30bbc6a..0000000 --- a/tests/testers/test_cpp_course/.clang-tidy +++ /dev/null @@ -1,41 +0,0 @@ ---- -Checks: '-*,cppcoreguidelines-avoid-goto,cppcoreguidelines-pro-type-const-cast, google-readability-casting, google-runtime-int, modernize-replace-random-shuffle, modernize-use-nullptr, readability-braces-around-statements, readability-container-size-empty, readability-redundant-control-flow, readability-redundant-string-init, readability-identifier-naming, google-build-using-namespace' -HeaderFilterRegex: '\.h$' -WarningsAsErrors: '*' -CheckOptions: - - key: readability-identifier-naming.NamespaceCase - value: lower_case - - key: readability-identifier-naming.ClassCase - value: CamelCase - - key: readability-identifier-naming.TypedefCase - value: CamelCase - - key: readability-identifier-naming.TypeAliasCase - value: CamelCase - - key: readability-identifier-naming.PrivateMemberSuffix - value: '_' - - key: readability-identifier-naming.StructCase - value: CamelCase - - key: readability-identifier-naming.FunctionCase - value: CamelCase - - key: readability-identifier-naming.VariableCase - value: lower_case - - key: readability-identifier-naming.PrivateMemberCase - value: lower_case - - key: readability-identifier-naming.ParameterCase - value: lower_case - - key: readability-identifier-naming.GlobalConstantPrefix - value: k - - key: readability-identifier-naming.GlobalConstantCase - value: CamelCase - - key: readability-identifier-naming.StaticConstantPrefix - value: k - - key: readability-identifier-naming.StaticConstantCase - value: CamelCase - - key: readability-identifier-naming.ConstexprVariableCase - value: CamelCase - - key: readability-identifier-naming.ConstexprVariablePrefix - value: k - - key: google-runtime-int.TypeSuffix - value: _t - - key: readability-identifier-naming.TypeTemplateParameterCase - value: CamelCase diff --git a/tests/testers/test_cpp_course/CMakeLists.txt b/tests/testers/test_cpp_course/CMakeLists.txt deleted file mode 100644 index 6cc444f..0000000 --- a/tests/testers/test_cpp_course/CMakeLists.txt +++ /dev/null @@ -1,10 +0,0 @@ -project(test_cpp_course) - -cmake_minimum_required(VERSION 3.5) - -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}") -set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}") -set(CMAKE_EXPORT_COMPILE_COMMANDS ON) - -add_executable(test_foo foo/test.cpp) diff --git a/tests/testers/test_cpp_course/foo/foo.h b/tests/testers/test_cpp_course/foo/foo.h deleted file mode 100644 index 66b944b..0000000 --- a/tests/testers/test_cpp_course/foo/foo.h +++ /dev/null @@ -1,7 +0,0 @@ -#pragma once - -#include - -int Foo() { - throw std::runtime_error("Not implemented"); -} diff --git a/tests/testers/test_cpp_course/foo/test.cpp b/tests/testers/test_cpp_course/foo/test.cpp deleted file mode 100644 index 6cdc930..0000000 --- a/tests/testers/test_cpp_course/foo/test.cpp +++ /dev/null @@ -1,6 +0,0 @@ -#include "foo.h" -#include - -int main() { - assert(Foo() == 42); -} diff --git a/tests/testers/test_cpp_course/run-clang-format.py b/tests/testers/test_cpp_course/run-clang-format.py deleted file mode 100755 index f790da2..0000000 --- a/tests/testers/test_cpp_course/run-clang-format.py +++ /dev/null @@ -1,325 +0,0 @@ -#!/usr/bin/env python3 -"""A wrapper script around clang-format, suitable for linting multiple files -and to use for continuous integration. - -This is an alternative API for the clang-format command line. -It runs over multiple files and directories in parallel. -A diff output is produced and a sensible exit code is returned. - -""" - -from __future__ import print_function, unicode_literals - -import argparse -import codecs -import difflib -import fnmatch -import io -import multiprocessing -import os -import signal -import subprocess -import sys -import traceback -from functools import partial - - -DEFAULT_EXTENSIONS = 'c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx' - - -class ExitStatus: - SUCCESS = 0 - DIFF = 1 - TROUBLE = 2 - - -def list_files(files, recursive=False, extensions=None, exclude=None): - if extensions is None: - extensions = [] - if exclude is None: - exclude = [] - - out = [] - for file in files: - if recursive and os.path.isdir(file): - for dirpath, dnames, fnames in os.walk(file): - fpaths = [os.path.join(dirpath, fname) for fname in fnames] - for pattern in exclude: - # os.walk() supports trimming down the dnames list - # by modifying it in-place, - # to avoid unnecessary directory listings. - dnames[:] = [ - x for x in dnames - if - not fnmatch.fnmatch(os.path.join(dirpath, x), pattern) - ] - fpaths = [ - x for x in fpaths if not fnmatch.fnmatch(x, pattern) - ] - for f in fpaths: - ext = os.path.splitext(f)[1][1:] - if ext in extensions: - out.append(f) - else: - out.append(file) - return out - - -def make_diff(file, original, reformatted): - return list( - difflib.unified_diff( - original, - reformatted, - fromfile='{}\t(original)'.format(file), - tofile='{}\t(reformatted)'.format(file), - n=3)) - - -class DiffError(Exception): - def __init__(self, message, errs=None): - super(DiffError, self).__init__(message) - self.errs = errs or [] - - -class UnexpectedError(Exception): - def __init__(self, message, exc=None): - super(UnexpectedError, self).__init__(message) - self.formatted_traceback = traceback.format_exc() - self.exc = exc - - -def run_clang_format_diff_wrapper(args, file): - try: - ret = run_clang_format_diff(args, file) - return ret - except DiffError: - raise - except Exception as e: - raise UnexpectedError('{}: {}: {}'.format(file, e.__class__.__name__, - e), e) - - -def run_clang_format_diff(args, file): - try: - with io.open(file, 'r', encoding='utf-8') as f: - original = f.readlines() - except IOError as exc: - raise DiffError(str(exc)) - invocation = [args.clang_format_executable, file] - - # Use of utf-8 to decode the process output. - # - # Hopefully, this is the correct thing to do. - # - # It's done due to the following assumptions (which may be incorrect): - # - clang-format will returns the bytes read from the files as-is, - # without conversion, and it is already assumed that the files use utf-8. - # - if the diagnostics were internationalized, they would use utf-8: - # > Adding Translations to Clang - # > - # > Not possible yet! - # > Diagnostic strings should be written in UTF-8, - # > the client can translate to the relevant code page if needed. - # > Each translation completely replaces the format string - # > for the diagnostic. - # > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation - # - # It's not pretty, due to Python 2 & 3 compatibility. - encoding_py3 = {} - if sys.version_info[0] >= 3: - encoding_py3['encoding'] = 'utf-8' - - try: - proc = subprocess.Popen( - invocation, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True, - **encoding_py3) - except OSError as exc: - raise DiffError(str(exc)) - proc_stdout = proc.stdout - proc_stderr = proc.stderr - if sys.version_info[0] < 3: - # make the pipes compatible with Python 3, - # reading lines should output unicode - encoding = 'utf-8' - proc_stdout = codecs.getreader(encoding)(proc_stdout) - proc_stderr = codecs.getreader(encoding)(proc_stderr) - # hopefully the stderr pipe won't get full and block the process - outs = list(proc_stdout.readlines()) - errs = list(proc_stderr.readlines()) - proc.wait() - if proc.returncode: - raise DiffError("clang-format exited with status {}: '{}'".format( - proc.returncode, file), errs) - return make_diff(file, original, outs), errs - - -def bold_red(s): - return '\x1b[1m\x1b[31m' + s + '\x1b[0m' - - -def colorize(diff_lines): - def bold(s): - return '\x1b[1m' + s + '\x1b[0m' - - def cyan(s): - return '\x1b[36m' + s + '\x1b[0m' - - def green(s): - return '\x1b[32m' + s + '\x1b[0m' - - def red(s): - return '\x1b[31m' + s + '\x1b[0m' - - for line in diff_lines: - if line[:4] in ['--- ', '+++ ']: - yield bold(line) - elif line.startswith('@@ '): - yield cyan(line) - elif line.startswith('+'): - yield green(line) - elif line.startswith('-'): - yield red(line) - else: - yield line - - -def print_diff(diff_lines, use_color): - if use_color: - diff_lines = colorize(diff_lines) - if sys.version_info[0] < 3: - sys.stdout.writelines((line.encode('utf-8') for line in diff_lines)) - else: - sys.stdout.writelines(diff_lines) - - -def print_trouble(prog, message, use_colors): - error_text = 'error:' - if use_colors: - error_text = bold_red(error_text) - print("{}: {} {}".format(prog, error_text, message), file=sys.stderr) - - -def main(): - parser = argparse.ArgumentParser(description=__doc__) - parser.add_argument( - '--clang-format-executable', - metavar='EXECUTABLE', - help='path to the clang-format executable', - default='clang-format') - parser.add_argument( - '--extensions', - help='comma separated list of file extensions (default: {})'.format( - DEFAULT_EXTENSIONS), - default=DEFAULT_EXTENSIONS) - parser.add_argument( - '-r', - '--recursive', - action='store_true', - help='run recursively over directories') - parser.add_argument('files', metavar='file', nargs='+') - parser.add_argument( - '-q', - '--quiet', - action='store_true') - parser.add_argument( - '-j', - metavar='N', - type=int, - default=0, - help='run N clang-format jobs in parallel' - ' (default number of cpus + 1)') - parser.add_argument( - '--color', - default='auto', - choices=['auto', 'always', 'never'], - help='show colored diff (default: auto)') - parser.add_argument( - '-e', - '--exclude', - metavar='PATTERN', - action='append', - default=[], - help='exclude paths matching the given glob-like pattern(s)' - ' from recursive search') - - args = parser.parse_args() - - # use default signal handling, like diff return SIGINT value on ^C - # https://bugs.python.org/issue14229#msg156446 - signal.signal(signal.SIGINT, signal.SIG_DFL) - try: - signal.SIGPIPE - except AttributeError: - # compatibility, SIGPIPE does not exist on Windows - pass - else: - signal.signal(signal.SIGPIPE, signal.SIG_DFL) - - colored_stdout = False - colored_stderr = False - if args.color == 'always': - colored_stdout = True - colored_stderr = True - elif args.color == 'auto': - colored_stdout = sys.stdout.isatty() - colored_stderr = sys.stderr.isatty() - - retcode = ExitStatus.SUCCESS - files = list_files( - args.files, - recursive=args.recursive, - exclude=args.exclude, - extensions=args.extensions.split(',')) - - if not files: - return - - njobs = args.j - if njobs == 0: - njobs = multiprocessing.cpu_count() + 1 - njobs = min(len(files), njobs) - - if njobs == 1: - # execute directly instead of in a pool, - # less overhead, simpler stacktraces - it = (run_clang_format_diff_wrapper(args, file) for file in files) - pool = None - else: - pool = multiprocessing.Pool(njobs) - it = pool.imap_unordered( - partial(run_clang_format_diff_wrapper, args), files) - while True: - try: - outs, errs = next(it) - except StopIteration: - break - except DiffError as e: - print_trouble(parser.prog, str(e), use_colors=colored_stderr) - retcode = ExitStatus.TROUBLE - sys.stderr.writelines(e.errs) - except UnexpectedError as e: - print_trouble(parser.prog, str(e), use_colors=colored_stderr) - sys.stderr.write(e.formatted_traceback) - retcode = ExitStatus.TROUBLE - # stop at the first unexpected error, - # something could be very wrong, - # don't process all files unnecessarily - if pool: - pool.terminate() - break - else: - sys.stderr.writelines(errs) - if outs == []: - continue - if not args.quiet: - print_diff(outs, use_color=colored_stdout) - if retcode == ExitStatus.SUCCESS: - retcode = ExitStatus.DIFF - return retcode - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/tests/testers/test_cpp_course/tests/foo/.tester.json b/tests/testers/test_cpp_course/tests/foo/.tester.json deleted file mode 100644 index 1a455c8..0000000 --- a/tests/testers/test_cpp_course/tests/foo/.tester.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "allow_change": ["foo.h"], - "tests": ["test_foo"] -} diff --git a/tests/testers/test_python.py b/tests/testers/test_python.py deleted file mode 100644 index 3c0446d..0000000 --- a/tests/testers/test_python.py +++ /dev/null @@ -1,244 +0,0 @@ -from __future__ import annotations - -import inspect -from pathlib import Path - -import pytest - -from checker.exceptions import StylecheckFailedError, TestsFailedError -from checker.testers.python import PythonTester - - -py_tests = pytest.mark.skipif("not config.getoption('python')") - - -@pytest.fixture(scope='function') -def python_tester() -> PythonTester: - return PythonTester(cleanup=True, dry_run=False) - - -def create_single_file_task( - path: Path, - file_content: str, - public_tests: str = '', - private_tests: str = '', - tester_config: str = '{}', - setup_file: str = '', - *, - task_name: str = 'task.py', -) -> None: - files = {task_name: file_content} - if public_tests: - files['test_public.py'] = public_tests - if private_tests: - files['test_private.py'] = private_tests - if tester_config: - files['.tester.json'] = tester_config - if setup_file: - files['setup.py'] = setup_file - create_task(path, files) - - -def create_task(path: Path, files: dict[str, str]) -> None: - for filename, content in files.items(): - with open(path / filename, 'w') as f: - content = inspect.cleandoc(content) - if not content.endswith('\n\n'): - content += '\n' - f.write(content) - - -@py_tests -class TestPythonTester: - def test_simple_task( - self, - tmp_path: Path, - python_tester: PythonTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - CODE = """ - def foo() -> str: - return 'Hello world!' - """ - PUBLIC_TESTS = """ - from task import foo - - - def test_foo() -> None: - assert foo() == 'Hello world!' - """ - PRIVATE_TESTS = """ - def test_nothing() -> None: - assert True - """ - create_single_file_task(tmp_path, CODE, PUBLIC_TESTS, PRIVATE_TESTS) - - score = python_tester.test_task(tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, normalize_output=True) - assert score == 1 - - captures = capsys.readouterr() - assert 'Running codestyle checks...' in captures.err - assert 'Running mypy checks...' in captures.err - assert 'Running tests' in captures.err - assert '2 passed' in captures.err - - def test_mypy_error( - self, - tmp_path: Path, - python_tester: PythonTester, - ) -> None: - CODE = """ - def foo() -> int: - return 'Hello world!' - """ - PUBLIC_TESTS = """ - def test_nothing() -> None: - assert True - """ - create_single_file_task(tmp_path, CODE, PUBLIC_TESTS) - - with pytest.raises(StylecheckFailedError): - python_tester.test_task(tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, normalize_output=True) - - def test_disabled_mypy_error( - self, - tmp_path: Path, - python_tester: PythonTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - CODE = """ - def foo() -> int: - return 'Hello world!' - """ - PUBLIC_TESTS = """ - def test_nothing() -> None: - assert True - """ - CONFIG = """ - {"run_mypy": false} - """ - create_single_file_task(tmp_path, CODE, PUBLIC_TESTS, tester_config=CONFIG) - - python_tester.test_task(tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, normalize_output=True) - - captures = capsys.readouterr() - assert 'Running mypy checks...' not in captures.err - - def test_flake8_error( - self, - tmp_path: Path, - python_tester: PythonTester, - ) -> None: - CODE = """ - def foo() -> str: - return 'Hello world!' - """ - PUBLIC_TESTS = """ - def test_nothing() -> None: - assert True - """ - CONFIG = """ - {"run_mypy": false} - """ - create_single_file_task(tmp_path, CODE, PUBLIC_TESTS, tester_config=CONFIG) - - with pytest.raises(StylecheckFailedError): - python_tester.test_task(tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, normalize_output=True) - - def test_ruff_error( - self, - tmp_path: Path, - python_tester: PythonTester, - ) -> None: - CODE = """ - def foo() -> str: - return 'Hello looolooolooolooolooolooollooolooolooolooolooolooolooolooolooolooolooolooolooolooolooolooolooolooolooolooolooolooolooo world!' - """ - PUBLIC_TESTS = """ - def test_nothing() -> None: - assert True - """ - CONFIG = """ - {"run_mypy": false} - """ - create_single_file_task(tmp_path, CODE, PUBLIC_TESTS, tester_config=CONFIG) - - with pytest.raises(StylecheckFailedError): - python_tester.test_task(tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, normalize_output=True) - - def test_pytest_error( - self, - tmp_path: Path, - python_tester: PythonTester, - ) -> None: - CODE = """ - def foo() -> str: - return 'Hello world!' - """ - PUBLIC_TESTS = """ - def test_nothing() -> None: - assert False - """ - CONFIG = """ - {"run_mypy": false} - """ - create_single_file_task(tmp_path, CODE, PUBLIC_TESTS, tester_config=CONFIG) - - with pytest.raises(TestsFailedError) as ex: - python_tester.test_task(tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, normalize_output=True) - - def test_pytest_error_no_duble_error( - self, - tmp_path: Path, - python_tester: PythonTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - CODE = """ - def foo() -> str: - return 'Hello world!' - """ - PUBLIC_TESTS = """ - def test_nothing() -> None: - assert False - """ - CONFIG = """ - {"run_mypy": false} - """ - with capsys.disabled(): - create_single_file_task(tmp_path, CODE, PUBLIC_TESTS, tester_config=CONFIG) - - with pytest.raises(TestsFailedError) as ex: - python_tester.test_task(tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, normalize_output=True) - captured = capsys.readouterr() - - assert captured.err.count('short test summary info ') == 1 - - def test_wheel_build( - self, - tmp_path: Path, - python_tester: PythonTester, - capsys: pytest.CaptureFixture[str], - ) -> None: - CODE = """ - def foo() -> str: - return 'Hello world!' - """ - PUBLIC_TESTS = """ - def test_nothing() -> None: - assert True - """ - SETUP = """ - from setuptools import setup - - setup(name="foo_pkg") - """ - CONFIG = """ - {"run_mypy": false, "module_test": true, "build_wheel": true} - """ - create_single_file_task(tmp_path, CODE, PUBLIC_TESTS, tester_config=CONFIG, setup_file=SETUP) - - score = python_tester.test_task(tmp_path, tmp_path, tmp_path, tmp_path, tmp_path, normalize_output=True) - assert score == 1 - - captures = capsys.readouterr() - assert 'Running mypy checks...' not in captures.err diff --git a/tests/testers/test_tester.py b/tests/testers/test_tester.py deleted file mode 100644 index 1676d6f..0000000 --- a/tests/testers/test_tester.py +++ /dev/null @@ -1,155 +0,0 @@ -from __future__ import annotations - -import inspect -from dataclasses import dataclass -from pathlib import Path -from typing import Type - -import pytest - -from checker.exceptions import TaskTesterTestConfigException, TesterNotImplemented -from checker.testers.cpp import CppTester -from checker.testers.make import MakeTester -from checker.testers.python import PythonTester -from checker.testers.tester import Tester -from checker.course import CourseConfig - - -def create_test_course_config(**kwargs) -> CourseConfig: - return CourseConfig( - name='test', - deadlines='', - templates='', - manytask_url='', - course_group='', - public_repo='', - students_group='', - **kwargs, - ) - -def write_tester_to_file(path: Path, content: str) -> Path: - filename = path / 'tester.py' - content = inspect.cleandoc(content) - with open(filename, 'w') as f: - f.write(content) - return filename - - -class TestTester: - @pytest.mark.parametrize('tester_name,tester_class', [ - ('python', PythonTester), - ('cpp', CppTester), - ('make', MakeTester), - ]) - def test_right_tester_created(self, tester_name: str, tester_class: Type[Tester]) -> None: - course_config = create_test_course_config(system=tester_name) - tester = Tester.create(root=Path(), course_config=course_config) - assert isinstance(tester, tester_class) - - def test_external_tester(self, tmp_path: Path): - TESTER = """ - from checker.testers import Tester - class CustomTester(Tester): - definitely_external_tester = 'Yes!' - """ - course_config = create_test_course_config(system='external', tester_path='tester.py') - write_tester_to_file(tmp_path, TESTER) - tester = Tester.create(root=tmp_path, course_config=course_config) - assert hasattr(tester, 'definitely_external_tester') - - NOT_A_TESTER = """ - class NotATester: - definitely_external_tester = 'Yes!' - """ - - NOT_INHERITED_TESTER = """ - class CustomTester: - definitely_external_tester = 'Yes!' - """ - - @pytest.mark.parametrize('tester_content', [ - NOT_A_TESTER, - NOT_INHERITED_TESTER, - ]) - def test_invalid_external_tester(self, tmp_path: Path, tester_content): - course_config = create_test_course_config(system='external', tester_path='tester.py') - write_tester_to_file(tmp_path, tester_content) - with pytest.raises(TesterNotImplemented): - Tester.create(root=tmp_path, course_config=course_config) - - def test_wrong_tester(self) -> None: - course_config = create_test_course_config(system='definitely-wrong-tester') - with pytest.raises(TesterNotImplemented): - Tester.create(root=Path(), course_config=course_config) - - -@dataclass -class SampleTaskTestConfig(Tester.TaskTestConfig): - digit: int = 0 - flag: bool = True - string: str = 'string' - - -def write_config_to_file(path: Path, content: str) -> Path: - filename = path / '.tester.json' - content = inspect.cleandoc(content) - with open(filename, 'w') as f: - f.write(content) - return filename - - -class TestTaskTestConfig: - def test_read_json_empty_file(self, tmp_path: Path) -> None: - CONFIG = """ - """ - filename = write_config_to_file(tmp_path, CONFIG) - with pytest.raises(TaskTesterTestConfigException): - SampleTaskTestConfig.from_json(filename) - - def test_read_json_wrong_layout(self, tmp_path: Path) -> None: - CONFIG = """ - "a": 1 - """ - filename = write_config_to_file(tmp_path, CONFIG) - with pytest.raises(TaskTesterTestConfigException): - SampleTaskTestConfig.from_json(filename) - - def test_read_json_wrong_format(self, tmp_path: Path) -> None: - CONFIG = """ - a: 1 - """ - filename = write_config_to_file(tmp_path, CONFIG) - with pytest.raises(TaskTesterTestConfigException): - SampleTaskTestConfig.from_json(filename) - - def test_read_json_extra_fields(self, tmp_path: Path) -> None: - CONFIG = """ - {"a": "a"} - """ - filename = write_config_to_file(tmp_path, CONFIG) - with pytest.raises(TaskTesterTestConfigException): - SampleTaskTestConfig.from_json(filename) - - def test_simple_case_empty_json(self, tmp_path: Path) -> None: - CONFIG = '{}' - filename = write_config_to_file(tmp_path, CONFIG) - - config = SampleTaskTestConfig.from_json(filename) - assert config.digit == 0 - assert config.flag - assert config.string == 'string' - - def test_simple_case_read_json(self, tmp_path: Path) -> None: - CONFIG = """ - { - "digit": 1, - "flag": false, - "string": "hard" - } - """ - filename = write_config_to_file(tmp_path, CONFIG) - - config = SampleTaskTestConfig.from_json(filename) - assert config.digit == 1 - assert not config.flag - assert config.string == 'hard' diff --git a/tests/utils/test_files.py b/tests/utils/test_files.py deleted file mode 100644 index 5487edf..0000000 --- a/tests/utils/test_files.py +++ /dev/null @@ -1,364 +0,0 @@ -from __future__ import annotations - -import os -from pathlib import Path - -import pytest - -from checker.utils.files import ( - check_file_contains_regexp, - check_files_contains_regexp, - check_folder_contains_regexp, - copy_files, - filename_match_patterns, - get_folders_diff, - get_folders_diff_except_public, -) - - -class TestFilenameMatch: - @pytest.mark.parametrize('filename,patterns,matched', [ - ('tmp.py', [r'*.py'], True), - ('tmp.py', [r'123', r'*.py'], True), - ('tmp.py', [r'123'], False), - ]) - def test_filename_match( - self, tmp_path: Path, - filename: str, patterns: list[str], matched: bool - ) -> None: - with open(tmp_path / filename, 'w') as f: - f.write('123') - assert filename_match_patterns(tmp_path / filename, patterns) == matched - - @pytest.mark.parametrize('filenames,patterns,ignore_patterns,result_filenames', [ - (['a.tmp'], [r'*'], None, ['a.tmp']), - (['a.tmp'], [r'b.tmp'], None, []), - (['a.tmp'], None, [r'*'], []), - (['a.tmp', 'b.tmp'], [r'b.*'], None, ['b.tmp']), - ]) - def test_copy_files_flat( - self, tmp_path: Path, - filenames: list[str], - patterns: list[str] | None, - ignore_patterns: list[str] | None, - result_filenames: list[str], - ) -> None: - src_path = tmp_path / 'src' - src_path.mkdir() - dst_path = tmp_path / 'dst' - dst_path.mkdir() - - for file in filenames: - with open(src_path / file, 'w') as f: - f.write('123') - - copy_files(src_path, dst_path, patterns, ignore_patterns) - - assert [f.name for f in dst_path.iterdir()] == result_filenames - - -class TestFileRegexpSearch: - @pytest.mark.parametrize('file_content,regexps,contains', [ - ('123\n321', [r'123'], True), - ('123\n321', [r'1*1'], True), - ('123\n321', [r'32[23]'], False), - ('123\n321', [r'abab'], False), - ]) - def test_file_regexps(self, tmp_path: Path, file_content: str, regexps: list[str], contains: bool) -> None: - tmp_file = tmp_path / 'file.tmp' - with open(tmp_file, 'w') as f: - f.write(file_content) - - assert check_file_contains_regexp(tmp_file, regexps) == contains - - def test_file_regexps_no_file(self, tmp_path: Path) -> None: - tmp_file = tmp_path / 'not-existed-file.tmp' - with pytest.raises(AssertionError): - check_file_contains_regexp(tmp_file, [r'.*']) - - def test_file_regexps_no_regexps(self, tmp_path: Path) -> None: - tmp_file = tmp_path / 'file.tmp' - with open(tmp_file, 'w') as f: - f.write('123') - assert not check_file_contains_regexp(tmp_file, []) - - def test_file_regexps_empty_file(self, tmp_path: Path) -> None: - tmp_file = tmp_path / 'empty-file.tmp' - tmp_file.touch() - - assert check_file_contains_regexp(tmp_file, [r'.*']) - assert not check_file_contains_regexp(tmp_file, []) - - @pytest.mark.parametrize('files_content,extensions,regexps,contains', [ - ({'1.txt': '123\n321', '2.txt': 'aaa\nbbb'}, ['txt'], [r'123'], True), - ({'1.txt': '123\n321', '2.tmp': 'aaa\nbbb'}, ['ini', 'tmp', 'txt'], [r'aaa'], True), - ({'1.tmp': '123\n321', '2.tmp': 'aaa\nbbb'}, ['txt'], [r'123'], False), - ({'1.txt': '123\n321', '2.txt': 'aaa\nbbb'}, ['txt'], [r'ttt', r'nnn', r'a.*a'], True), - ({'1.txt': '123\n321', '2.txt': 'aaa\nbbb'}, ['txt'], [r'ac.*b'], False), - ]) - def test_folder_regexps( - self, - tmp_path: Path, - files_content: dict[str], extensions: list[str], regexps: list[str], contains: bool, - ) -> None: - for file_name, file_content in files_content.items(): - with open(tmp_path / file_name, 'w') as f: - f.write(file_content) - - assert check_folder_contains_regexp(tmp_path, extensions, regexps) == contains - - def test_folder_regexps_no_folder(self, tmp_path: Path) -> None: - with pytest.raises(AssertionError): - check_folder_contains_regexp(tmp_path / 'folder-not-exists', ['py', 'tmp'], [r'.*']) - - def test_folder_regexps_no_regexps(self, tmp_path: Path) -> None: - with open(tmp_path / 'a.tmp', 'w') as f1, open(tmp_path / 'b.tmp', 'w') as f2: - f1.write('123') - f2.write('321') - assert not check_folder_contains_regexp(tmp_path, ['py', 'tmp'], []) - - def test_folder_regexps_empty_folder(self, tmp_path: Path) -> None: - assert not check_folder_contains_regexp(tmp_path, ['py', 'tmp'], [r'.*']) - - def test_folder_regexps_raise_on_found(self, tmp_path: Path) -> None: - with open(tmp_path / 'a.tmp', 'w') as f1, open(tmp_path / 'b.tmp', 'w') as f2: - f1.write('123') - f2.write('321') - - with pytest.raises(AssertionError): - check_folder_contains_regexp(tmp_path, ['tmp'], [r'.*'], raise_on_found=True) - - assert not check_folder_contains_regexp(tmp_path, ['tmp'], [r'ttt'], raise_on_found=True) - - @pytest.mark.parametrize('regexps,regexps_for_files,contains', [ - (['12'], ['aba.tmp2'], False), - (['2'], ['aba.tmp2'], True), - (['12'], ['*b*'], False), - (['12'], ['*a*'], True), - (['12'], ['a'], False), - (['12'], ['*tmp2'], False), - (['12'], ['**/*a.tmp*'], True), - (['32'], ['**/*a.tmp*'], True), - (['32'], ['**/a.tmp*'], False), - (['.*4.*'], ['a.tmp', 'aba.tmp2'], False), - (['.*3.*'], ['a.tmp', 'aba.tmp2'], True), - (['.*3.'], ['a.tmp', 'aba.tmp2'], True), - (['.*3.'], ['a.tmp'], False), - (['.*'], None, True), - (['123'], None, True), - (['.*'], [], False), - ([], None, False), - ]) - def test_check_files_contains_regexp( - self, - tmp_path: Path, - regexps: list[str], - regexps_for_files: list[str], - contains: bool - ) -> None: - with open(tmp_path / 'a.tmp', 'w') as f1, open(tmp_path / 'aba.tmp2', 'w') as f2: - f1.write('123') - f2.write('321') - assert check_files_contains_regexp(tmp_path, regexps, regexps_for_files) == contains - - -class TestFolderDiff: - @pytest.fixture(scope='function') - def public_folder(self, tmp_path: Path) -> Path: - public_folder = tmp_path / 'public' - public_folder.mkdir() - return public_folder - - @pytest.fixture(scope='function') - def old_folder(self, tmp_path: Path) -> Path: - old_folder = tmp_path / 'old' - old_folder.mkdir() - return old_folder - - @pytest.fixture(scope='function') - def new_folder(self, tmp_path: Path) -> Path: - new_folder = tmp_path / 'new' - new_folder.mkdir() - return new_folder - - @staticmethod - def fill_folder(folder: Path, files: list[str], content: str) -> None: - folder.mkdir(parents=True, exist_ok=True) - for file in files: - with open(folder / file, 'w') as f: - f.write(content) - - @staticmethod - def fill_folder_binary_files(folder: Path, files: list[str], content: bytes) -> None: - folder.mkdir(parents=True, exist_ok=True) - for file in files: - with open(folder / file, 'wb') as f: - f.write(content) - - def test_flat_folders(self, old_folder: Path, new_folder: Path) -> None: - # same files - for i in range(10): - self.fill_folder(old_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - self.fill_folder(new_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - - # completely different files - different_files = ['a.py', 'b.cpp', 'c.go'] - self.fill_folder(old_folder, different_files, '1\n2\n3\n'*16) - self.fill_folder(new_folder, different_files, '4\n5\n6\n'*16) - - changed_files = get_folders_diff(old_folder, new_folder) - assert sorted(changed_files) == sorted(different_files) - - # def test_flat_folders_spaces_diff(self, old_folder: Path, new_folder: Path) -> None: - # # same files - # for i in range(10): - # self.fill_folder(old_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - # self.fill_folder(new_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - # - # # completely different files - # space_different_files = ['a.py', 'b.cpp', 'c.go'] - # self.fill_folder(old_folder, space_different_files, 'Here lyeth muche rychnesse in lytell space.-- John Heywood$') - # self.fill_folder(new_folder, space_different_files, ' He relyeth much erychnes seinly tells pace. --John Heywood ^M$') - # - # changed_files = get_folders_diff(old_folder, new_folder) - # assert len(changed_files) == 0 - - def test_flat_folders_only_same_files(self, old_folder: Path, new_folder: Path) -> None: - # same files - for i in range(10): - self.fill_folder(old_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - self.fill_folder(new_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - - changed_files = get_folders_diff(old_folder, new_folder) - assert len(changed_files) == 0 - - def test_flat_folders_new_and_deleted_files(self, old_folder: Path, new_folder: Path) -> None: - # same files - for i in range(10): - self.fill_folder(old_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - self.fill_folder(new_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - - # deleted files - deleted_files = ['to_be_deleted_a.py', 'to_be_deleted_b.cpp', 'to_be_deleted_c.go'] - self.fill_folder(old_folder, deleted_files, '1\n2\n3\n'*16) - # new files - new_files = ['new_file_a.py', 'new_file_.cpp', 'new_file_.go'] - self.fill_folder(new_folder, new_files, '1\n2\n3\n'*16) - - changed_files = get_folders_diff(old_folder, new_folder) - assert sorted(changed_files) == sorted(deleted_files + new_files) - - # def test_flat_folders_spaces_in_filename(self, old_folder: Path, new_folder: Path) -> None: - # # same files - # for i in range(10): - # self.fill_folder(old_folder, [f'{i} some {i}.py', f'{i} some {i}.cpp', f'{i} some {i}.go'], '1\n2\n3\n'*16) - # self.fill_folder(new_folder, [f'{i} some {i}.py', f'{i} some {i}.cpp', f'{i} some {i}.go'], '1\n2\n3\n'*16) - # - # # completely different files - # different_files = ['a some a.py', 'b some b.cpp', 'c some c.go'] - # self.fill_folder(old_folder, different_files, '1\n2\n3\n'*16) - # self.fill_folder(new_folder, different_files, '4\n5\n6\n'*16) - # - # changed_files = get_folders_diff(old_folder, new_folder) - # assert sorted(changed_files) == sorted(different_files) - - # TODO: make binary files detection to work on ubuntu - # def test_flat_folders_skip_binary_files(self, old_folder: Path, new_folder: Path) -> None: - # # same files - # for i in range(10): - # self.fill_folder(old_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - # self.fill_folder(new_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - # - # # completely different files - # different_files = ['a.py', 'b.cpp', 'c.go'] - # self.fill_folder_binary_files(old_folder, different_files, b'\x00'+os.urandom(64)+b'\x00') - # self.fill_folder_binary_files(new_folder, different_files, b'\x00'+os.urandom(64)+b'\x00') - # - # changed_files = get_folders_diff(old_folder, new_folder, skip_binary=False) - # assert sorted(changed_files) == sorted(different_files) - # - # changed_files = get_folders_diff(old_folder, new_folder) - # assert len(changed_files) == 0 - # changed_files = get_folders_diff(old_folder, new_folder, skip_binary=True) - # assert len(changed_files) == 0 - - def test_deep_structure(self, old_folder: Path, new_folder: Path) -> None: - # same files - for i in range(10): - self.fill_folder(old_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - self.fill_folder(new_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - - # changed files in top folder - different_files = ['a.py', 'b.cpp', 'c.go'] - self.fill_folder(old_folder, different_files, '1\n2\n3\n'*16) - self.fill_folder(new_folder, different_files, '4\n3\n2\n'*16) - - # changed files in inner folders - inner_folder_different_files = ['o.py', 'p.cpp', 'q.go'] - self.fill_folder(old_folder / 'inner-folder', inner_folder_different_files, '1\n2\n3\n'*16) - self.fill_folder(new_folder / 'inner-folder', inner_folder_different_files, '4\n3\n2\n'*16) - - # new inner folder - new_inner_folder_files = ['t.py', 'r.cpp', 'n.go'] - self.fill_folder(new_folder / 'new-inner-folder', new_inner_folder_files, '1\n2\n3\n'*16) - - changed_files = get_folders_diff(old_folder, new_folder) - assert len(changed_files) == len(different_files + inner_folder_different_files + new_inner_folder_files) - assert all(file in changed_files for file in different_files) - assert all(f'inner-folder/{file}' in changed_files for file in inner_folder_different_files) - assert all(f'new-inner-folder/{file}' in changed_files for file in new_inner_folder_files) - - def test_deep_structure_skip_folders(self, old_folder: Path, new_folder: Path) -> None: - # same files - for i in range(10): - self.fill_folder(old_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - self.fill_folder(new_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - - # changed files in inner folders - inner_folder_different_files = ['o.py', 'p.cpp', 'q.go'] - self.fill_folder(old_folder / 'inner-folder', inner_folder_different_files, '1\n2\n3\n'*16) - self.fill_folder(new_folder / 'inner-folder', inner_folder_different_files, '4\n3\n2\n'*16) - - # changed files in inner folders - skip_inner_folder_different_files = ['a.py', 'b.cpp', 'c.go'] - self.fill_folder(old_folder / 'skip-inner-folder', skip_inner_folder_different_files, '1\n2\n3\n'*16) - self.fill_folder(new_folder / 'skip-inner-folder', skip_inner_folder_different_files, '4\n3\n2\n'*16) - - # changed files in inner folders - git_folder_different_files = ['aa.py', 'bb.cpp', 'cc.go'] - self.fill_folder(old_folder / '.git', git_folder_different_files, '1\n2\n3\n'*16) - self.fill_folder(new_folder / '.git', git_folder_different_files, '4\n3\n2\n'*16) - - changed_files = get_folders_diff(old_folder, new_folder, exclude_patterns=['.git', 'skip-inner-folder']) - assert sorted(changed_files) == sorted([f'inner-folder/{i}' for i in inner_folder_different_files]) - - def test_flat_public_folder_filtering(self, public_folder: Path, old_folder: Path, new_folder: Path) -> None: - # same files - for i in range(10): - self.fill_folder(old_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - self.fill_folder(new_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - self.fill_folder(public_folder, [f'{i}.py', f'{i}.cpp', f'{i}.go'], '1\n2\n3\n'*16) - - # new files in public not in old/new - new_files_in_public = ['new_in_public_a.py', 'new_in_public_b.cpp', 'new_in_public_c.go'] - self.fill_folder(public_folder, new_files_in_public, '1\n2\n3\n'*16) - - # totally new files in new - new_files_in_new = ['new_in_new_a.py', 'new_in_new_b.cpp', 'new_in_new_c.go'] - self.fill_folder(new_folder, new_files_in_new, '1\n2\n3\n'*16) - - # new in public and transfer in new - new_files_in_public_and_new = ['new_in_public_and_new_a.py', 'new_in_public_and_new_b.cpp', 'new_in_public_and_new_c.go'] - self.fill_folder(public_folder, new_files_in_public_and_new, '1\n2\n3\n'*16) - self.fill_folder(new_folder, new_files_in_public_and_new, '1\n2\n3\n'*16) - - # new in public than changes in new - new_files_in_public_and_new_changed = ['new_in_public_and_new_changed_a.py', 'new_in_public_and_new_changed_b.cpp', 'new_in_public_and_new_changed_c.go'] - self.fill_folder(public_folder, new_files_in_public_and_new_changed, '1\n2\n3\n'*16) - self.fill_folder(new_folder, new_files_in_public_and_new_changed, '4\n3\n2\n'*16) - - changed_files = get_folders_diff_except_public(public_folder, old_folder, new_folder) - print('\nchanged_files') - for i in changed_files: - print('-', i) - assert sorted(changed_files) == sorted(new_files_in_new + new_files_in_public_and_new_changed) diff --git a/tests/utils/test_git.py b/tests/utils/test_git.py deleted file mode 100644 index ca588d5..0000000 --- a/tests/utils/test_git.py +++ /dev/null @@ -1,20 +0,0 @@ -from __future__ import annotations - -from pathlib import Path - -from checker.utils import get_tracked_files_list - - -ROOT_DIR = Path(__file__).parent.parent.parent - - -class TestGitStats: - - def test_get_tracked_files_list(self) -> None: - current_file = Path(__file__).absolute().relative_to(ROOT_DIR) - main_file = (ROOT_DIR / 'checker' / '__main__.py').absolute().relative_to(ROOT_DIR) - git_tracked_files = get_tracked_files_list(ROOT_DIR) - - assert len(git_tracked_files) > 0 - assert str(current_file) in git_tracked_files - assert str(main_file) in git_tracked_files diff --git a/tests/utils/test_manytask.py b/tests/utils/test_manytask.py deleted file mode 100644 index 4cfcf4d..0000000 --- a/tests/utils/test_manytask.py +++ /dev/null @@ -1,202 +0,0 @@ -from __future__ import annotations - -import datetime - -import pytest -from pytest_mock import MockFixture - -from checker.exceptions import GetFailedError, PushFailedError -from checker.utils import get_score, push_report - - -BASE_URL = 'https://test.manytask.org' -TESTER_TOKEN = 'test_token' -TEST_NOW_DATETIME = datetime.datetime(2021, 1, 1, 0, 0, 0) -TEST_DEADLINE_DATETIME = TEST_NOW_DATETIME + datetime.timedelta(hours=1) -TEST_TASK_NAME = 'some_task' -TEST_USER_ID = 1 -TEST_USERNAME = 'username' -TEST_SCORE = 40.0 - - -@pytest.fixture -def mock_report_request(mocker: MockFixture) -> MockFixture: - def mock_side_effect(*args, **kwargs): - url = args[0] if len(args) > 0 else kwargs.get('url', '') - data = args[1] if len(args) > 1 else kwargs.get('data', dict()) - files = kwargs.get('files', dict()) - - if ( - url != f'{BASE_URL}/api/report' or - data.get('token', None) != TESTER_TOKEN or - data.get('task', None) != TEST_TASK_NAME or - data.get('user_id', None) != TEST_USER_ID - ): - mock_response = mocker.Mock() - mock_response.status_code = 400 - mock_response.text = 'Some error' - return mock_response - - commit_time = data.get('commit_time', None) - submit_time = commit_time + datetime.timedelta(minutes=5) if commit_time is not None else None - - mock_response = mocker.Mock() - mock_response.json.return_value = { - 'username': TEST_USERNAME, - 'commit_time': commit_time, - 'submit_time': submit_time, - 'demand_multiplier': 1, - 'score': 0.0 if data.get('check_deadline', True) and commit_time > TEST_DEADLINE_DATETIME else TEST_SCORE, - } - mock_response.status_code = 200 - return mock_response - - - mock = mocker.patch('requests.post') - mock.side_effect = mock_side_effect - return mock - - -class TestPushReport: - - def test_simple(self, mock_report_request: MockFixture) -> None: - username, score, result_commit_time, result_submit_time, demand_multiplier = push_report( - report_base_url=BASE_URL, - tester_token=TESTER_TOKEN, - task_name=TEST_TASK_NAME, - user_id=TEST_USER_ID, - score=TEST_SCORE, - files=None, - send_time=TEST_NOW_DATETIME, - check_deadline=True, - use_demand_multiplier=False, - ) - assert username == TEST_USERNAME - assert score == TEST_SCORE - assert result_commit_time == TEST_NOW_DATETIME - assert result_submit_time > TEST_NOW_DATETIME - assert demand_multiplier == 1.0 - - mock_report_request.assert_called_once() - - @pytest.mark.parametrize('check_deadline', [True, False]) - def test_check_deadline(self, mock_report_request: MockFixture, check_deadline: bool) -> None: - username, score, result_commit_time, result_submit_time, demand_multiplier = push_report( - report_base_url=BASE_URL, - tester_token=TESTER_TOKEN, - task_name=TEST_TASK_NAME, - user_id=TEST_USER_ID, - score=TEST_SCORE, - send_time=TEST_DEADLINE_DATETIME + datetime.timedelta(days=1), - check_deadline=check_deadline, - use_demand_multiplier=False, - ) - - if check_deadline: - assert score == 0.0 - else: - assert score == TEST_SCORE - - def test_wrong_tester_token(self, mock_report_request: MockFixture) -> None: - with pytest.raises(PushFailedError): - push_report( - report_base_url=BASE_URL, - tester_token='wrong_token', - task_name=TEST_TASK_NAME, - user_id=TEST_USER_ID, - score=TEST_SCORE, - send_time=TEST_NOW_DATETIME, - ) - - def test_wrong_task_name(self, mock_report_request: MockFixture) -> None: - with pytest.raises(PushFailedError): - push_report( - report_base_url=BASE_URL, - tester_token=TESTER_TOKEN, - task_name='wrong_task_name', - user_id=TEST_USER_ID, - score=TEST_SCORE, - send_time=TEST_NOW_DATETIME, - ) - - def test_wrong_user_id(self, mock_report_request: MockFixture) -> None: - with pytest.raises(PushFailedError): - push_report( - report_base_url=BASE_URL, - tester_token=TESTER_TOKEN, - task_name=TEST_TASK_NAME, - user_id=1000, - score=TEST_SCORE, - send_time=TEST_NOW_DATETIME, - ) - - -@pytest.fixture -def mock_score_request(mocker: MockFixture) -> MockFixture: - def mock_side_effect(*args, **kwargs): - url = args[0] if len(args) > 0 else kwargs.get('url', '') - data = args[1] if len(args) > 1 else kwargs.get('data', dict()) - - if ( - url != f'{BASE_URL}/api/score' or - data.get('token', None) != TESTER_TOKEN or - data.get('user_id', None) != TEST_USER_ID - ): - mock_response = mocker.Mock() - mock_response.status_code = 400 - mock_response.text = 'Some error' - return mock_response - - mock_response = mocker.Mock() - mock_response.json.return_value = { - 'score': TEST_SCORE if data.get('task') == TEST_TASK_NAME else None, - } - mock_response.status_code = 200 - return mock_response - - - mock = mocker.patch('requests.get') - mock.side_effect = mock_side_effect - return mock - - -class TestGetScore: - - def test_simple(self, mock_score_request: MockFixture) -> None: - score = get_score( - report_base_url=BASE_URL, - tester_token=TESTER_TOKEN, - task_name=TEST_TASK_NAME, - user_id=TEST_USER_ID, - ) - assert score == TEST_SCORE - - mock_score_request.assert_called_once() - - def test_wrong_user_id(self, mock_score_request: MockFixture) -> None: - with pytest.raises(GetFailedError): - get_score( - report_base_url=BASE_URL, - tester_token=TESTER_TOKEN, - task_name=TEST_TASK_NAME, - user_id=1000, - ) - - def test_wrong_task_name(self, mock_score_request: MockFixture) -> None: - score = get_score( - report_base_url=BASE_URL, - tester_token=TESTER_TOKEN, - task_name='wrong_task_name', - user_id=TEST_USER_ID, - ) - - assert score is None - - def test_wrong_tester_token(self, mock_score_request: MockFixture) -> None: - with pytest.raises(GetFailedError): - get_score( - report_base_url=BASE_URL, - tester_token='wrong_token', - task_name=TEST_TASK_NAME, - user_id=TEST_USER_ID, - ) diff --git a/tests/utils/test_print.py b/tests/utils/test_print.py deleted file mode 100644 index 5dd8435..0000000 --- a/tests/utils/test_print.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import annotations - -import pytest - -from checker.utils import print_info, print_task_info - - -class TestPrint: - def test_print_info(self, capsys: pytest.CaptureFixture): - print_info('123') - - captured = capsys.readouterr() - assert captured.err == '123\n' - - def test_print_task_info(self, capsys: pytest.CaptureFixture): - print_task_info('123') - - captured = capsys.readouterr() - assert '123' in captured.err diff --git a/tests/utils/test_template.py b/tests/utils/test_template.py deleted file mode 100644 index 57376c2..0000000 --- a/tests/utils/test_template.py +++ /dev/null @@ -1,265 +0,0 @@ -from __future__ import annotations - -from dataclasses import dataclass -from inspect import cleandoc -from pathlib import Path - -import pytest - -from checker.utils import create_template_from_gold_solution, cut_marked_code_from_string - - -def create_file(filename: Path, content: str) -> None: - with open(filename, 'w') as f: - content = cleandoc(content) - f.write(content) - - -@dataclass -class TemplateTestcase: - name: str - code: str - template: str - start_end: str | tuple[str, str] = ('TODO: CODE HERE', 'TODO: CODE HERE') - replace: str = 'TODO: CODE HERE' - - def __repr__(self) -> str: - return self.name - - -TEMPLATE_TEST_CASES = [ - TemplateTestcase( - name='simple', - code=cleandoc(""" - a = 1 - # TODO: CODE HERE - b = 2 - # TODO: CODE HERE - c = 3 - """), - template=cleandoc(""" - a = 1 - # TODO: CODE HERE - c = 3 - """), - start_end=('TODO: CODE HERE', 'TODO: CODE HERE'), - replace='TODO: CODE HERE', - ), - TemplateTestcase( - name='empty_template', - code=cleandoc(""" - a = 1 - # TODO: CODE HERE - # TODO: CODE HERE - c = 3 - """), - template=cleandoc(""" - a = 1 - # TODO: CODE HERE - c = 3 - """), - start_end='TODO: CODE HERE', - replace='TODO: CODE HERE', - ), - TemplateTestcase( - name='different_strings', - code=cleandoc(""" - a = 1 - # SOLUTION START - b = 2 - # SOLUTION END - c = 3 - """), - template=cleandoc(""" - a = 1 - # TODO: CODE HERE - c = 3 - """), - start_end=('# SOLUTION START', '# SOLUTION END'), - replace='# TODO: CODE HERE', - ), - TemplateTestcase( - name='2_templates', - code=cleandoc(""" - a = 1 - # TODO: CODE HERE - b = 2 - # TODO: CODE HERE - other = 1 - # TODO: CODE HERE - b = 2 - # TODO: CODE HERE - c = 3 - """), - template=cleandoc(""" - a = 1 - # TODO: CODE HERE - other = 1 - # TODO: CODE HERE - c = 3 - """), - start_end='TODO: CODE HERE', - replace='TODO: CODE HERE', - ), - TemplateTestcase( - name='intentions', - code=cleandoc(""" - def foo() -> int: - # TODO: CODE HERE - a = 1 - # TODO: CODE HERE - - with open('f.tmp') as f: - # TODO: CODE HERE - f.read() - # TODO: CODE HERE - """), - template=cleandoc(""" - def foo() -> int: - # TODO: CODE HERE - - with open('f.tmp') as f: - # TODO: CODE HERE - """), - start_end='TODO: CODE HERE', - replace='TODO: CODE HERE', - ), - TemplateTestcase( - name='complex_intentions', - code=cleandoc(""" - import os - - class A: - def foo(self) -> int: - # TODO: CODE HERE - return 1 - # TODO: CODE HERE - - def bar(self) -> int: - return 2 - - if __name__ == '__main__': - # TODO: CODE HERE - a = A() - print(a.foo()) - # TODO: CODE HERE - """), - template=cleandoc(""" - import os - - class A: - def foo(self) -> int: - # TODO: CODE HERE - - def bar(self) -> int: - return 2 - - if __name__ == '__main__': - # TODO: CODE HERE - """), - start_end='TODO: CODE HERE', - replace='TODO: CODE HERE', - ), - TemplateTestcase( - name='cpp_hello_world', - code=cleandoc(""" - #include - - int main() { - // TODO: CODE HERE - std::cout << "Hello World!"; - // TODO: CODE HERE - return 0; - } - """), - template=cleandoc(""" - #include - - int main() { - // TODO: CODE HERE - return 0; - } - """), - start_end='TODO: CODE HERE', - replace='TODO: CODE HERE', - ), -] - - -class TestTemplate: - @pytest.mark.parametrize('test_case', TEMPLATE_TEST_CASES, ids=repr) - def test_cut_marked_code(self, test_case: TemplateTestcase) -> None: - template = cut_marked_code_from_string( - content=test_case.code, - clear_mark=test_case.start_end, - clear_mark_replace=test_case.replace, - raise_not_found=False, - ) - assert template == test_case.template - - def test_cut_marked_code_wrong(self) -> None: - CODE = cleandoc(""" - a = 1 - # Start string - b = 2 - # Start string - c = 3 - """) - assert cut_marked_code_from_string( - content=CODE, - clear_mark=('Start string', 'End string'), - clear_mark_replace='Start string', - raise_not_found=False, - ) == CODE - with pytest.raises(AssertionError): - cut_marked_code_from_string( - content=CODE, - clear_mark=('Start string', 'End string'), - clear_mark_replace='Start string', - raise_not_found=True, - ) - - def test_file_template(self, tmp_path: Path) -> None: - test_case = TEMPLATE_TEST_CASES[0] - - tmp_file_code = tmp_path / f'{test_case.name}.py' - create_file(tmp_file_code, test_case.code) - - create_template_from_gold_solution(tmp_file_code) - - with open(tmp_file_code, 'r') as file: - content = file.read() - - assert content == cleandoc(test_case.template) - - def test_no_file(self, tmp_path: Path) -> None: - tmp_file_code = tmp_path / 'this_file_does_not_exist.py' - - with pytest.raises(AssertionError): - create_template_from_gold_solution(tmp_file_code) - - def test_no_mark(self, tmp_path: Path) -> None: - CODE = """ - a = 1 - """ - tmp_file_code = tmp_path / 'wrong.py' - create_file(tmp_file_code, CODE) - - assert not create_template_from_gold_solution(tmp_file_code) - assert not create_template_from_gold_solution(tmp_file_code, raise_not_found=False) - - with pytest.raises(AssertionError): - create_template_from_gold_solution(tmp_file_code, raise_not_found=True) - - def test_single_mark(self, tmp_path: Path) -> None: - CODE = """ - a = 1 - # TODO: CODE HERE - """ - tmp_file_code = tmp_path / 'wrong.py' - create_file(tmp_file_code, CODE) - - assert not create_template_from_gold_solution(tmp_file_code, raise_not_found=False) - - with pytest.raises(AssertionError): - create_template_from_gold_solution(tmp_file_code, raise_not_found=True)