Skip to content

Reduce services.php by using attributes, and better structured config #3842

Reduce services.php by using attributes, and better structured config

Reduce services.php by using attributes, and better structured config #3842

Workflow file for this run

# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ** Dynamic analysis **
#
# Dynamic analysis requires the Share community platform to run during evaluation. Currently Catroweb's dynamic tests
# are mainly UI-tests and a few unit tests. Hence, the tests take multiple hours. To reduce the run time, the tests
# are executed in multiple independent jobs. However, all jobs use the same Catroweb Docker image to reduce build time.
#
name: 'Dynamic analysis'
# Run-on every creation, update and merge of a pull request.
# However, prevent checks to be initiated twice if the pull request is a branch on the same repository. (E.g dependabot)
on:
pull_request:
branches:
- '**' # all branches
- '!master' # excludes master to prevent double test runs during release prep.
jobs:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build:
#
# - In order to save computation time the "app.catroweb" image is only build once during this build phase.
# Other jobs can re-use this image to reduce their build time. With several jobs + the matrix build total
# computation time for this workflow can be highly reduced. This is important since we do not have unlimited
# resources/machines to run the jobs.
#
build:
name: Build Catroweb Image
runs-on: ubuntu-latest
outputs:
image: ${{ steps.upload-artifact.outputs.artifact-path }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Cache Docker layers
uses: actions/cache@v4
with:
path: |
~/.docker/cache
~/.docker/buildx-cache
key: ${{ runner.os }}-docker-${{ github.sha }}
restore-keys: |
${{ runner.os }}-docker-
- name: Build Catroweb App Image
run: |
cd docker
docker compose -f docker-compose.test.yaml build app.catroweb
- name: Save Docker Image
run: |
cd docker
docker save app.catroweb | gzip > catroweb-image.tar.gz
- name: Upload Docker Image
uses: actions/upload-artifact@v4
id: upload-artifact
with:
name: catroweb-image
path: docker/catroweb-image.tar.gz
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# PhpUnit:
#
# - the tests are executed in our docker since we have many integration tests which access the database, etc.
# One might consider to strictly separate integration and unit tests. Units tests could be executed using
# composer scripts only to reduce the runtime to a few seconds. No build needed + dependencies can be easy cached.
#
# - A code coverage report is pushed to the artifacts where it can be downloaded directly on GitHub.
# Keep in mind the report is not including the tests written for behat.
#
tests_phpunit:
name: PHPUnit
needs: build
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download Docker Image
uses: actions/download-artifact@v4
with:
name: catroweb-image
path: docker
- name: Load Docker Image
run: |
cd docker
gunzip -c catroweb-image.tar.gz | docker load
- name: Build and Start Docker Containers
run: |
cd docker
docker compose -f docker-compose.test.yaml up -d
sleep 60
- name: Run PHPUnit Tests
run: |
docker exec app.catroweb bin/phpunit --coverage-html tests/TestReports/CoverageReports/PhpUnit \
--coverage-clover=tests/TestReports/CoverageReports/PhpUnit/coverage.xml
- name: Upload PHPUnit Code Coverage Report
uses: actions/upload-artifact@v4
if: always()
with:
name: PhpUnitTestReport
path: tests/TestReports/CoverageReports/PhpUnit
- name: Upload Coverage to Codecov
uses: codecov/codecov-action@v4
with:
file: tests/TestReports/CoverageReports/PhpUnit/coverage.xml
flags: phpunit # optional
name: codecov-umbrella # optional
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Behat:
#
# - This job runs all Behat suites parallel using a matrix strategy. This is done since integrations tests which
# are interacting with a browser and the GUI are extremely slow. With a total run-time of over an hour, using this
# approach the run-time can be drastically reduced. The disadvantage, we can't easily create a coverage report
# for the behat scenarios. Something that is considered to be a bad practice anyway since Behat is intended to
# deliver scenario automation in BDD.
#
# - Behat and especially UI tests using Mink tend to flaky.
# A test will only be marked as failed if a test fails more than 3 times in a row.
# Flaky tests should be reduced to a minimum in the codebase!
#
# - Behat only reruns failed tests - Not pending/missing tests or those with exceptions!
# A pending/missing test will NOT result in a failed pipeline!
# This is the reason why the explicit check for the log file had to be added.
#
# - To ease the debugging, besides a log file, screenshots of failing tests are uploaded as artifacts.
#
# Notes:
# - Check the behat.yaml when changing / creating new suites
# - suites will finish their work even if another suite fails (fail-fast: false)
#
tests_behat:
name: Behat
needs: build
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
testSuite:
- api-authentication
- api-mediapackage
- api-projects
- api-user
- api-notifications
- api-utility
- api-translation
- api-tag
- api-studio
- api-deprecated-general
- api-deprecated-project
- api-deprecated-profile
- api-deprecated-upload
- web-achievements
- web-admin
- web-apk-generation
- web-authentication
- web-code-statistics
- web-code-view
- web-comments
- web-general
- web-media-library
- web-notifications
- web-profile
- web-project
- web-project-details
- web-project-list
- web-reactions
- web-recommendations
- web-remix-system
- web-scratch-integration
- web-search
- web-studio
- web-top-bar
- web-translation
- web-system
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Download Docker Image
uses: actions/download-artifact@v4
with:
name: catroweb-image
path: docker
- name: Load Docker Image
run: |
cd docker
gunzip -c catroweb-image.tar.gz | docker load
- name: Build and Start Docker Containers
run: |
cd docker
docker compose -f docker-compose.test.yaml up -d
sleep 30
# Test Run
- name: Behat ${{ matrix.testSuite }} tests
id: test-run
continue-on-error: true
# - The output will of the tests will be piped to the stdout and into the log file.
# - A return code != 0 stops the execution of further commands in the pipeline.
# "tee" always returns 0, even if the behat test fails. Therefore, we need to exit with the first entry of
# the pipe status, which contains the correct exit code.
run: |
docker exec app.catroweb chmod -R 777 var
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} -f pretty \
|& tee tests/TestReports/Behat/${{ matrix.testSuite }}.log; \
TEST_RUN_EXIT_CODE=${PIPESTATUS[0]}
echo "test-run-exit-code=$TEST_RUN_EXIT_CODE" > $GITHUB_ENV
( exit $TEST_RUN_EXIT_CODE )
# Missing steps are not rerun by behat, without this step they will be lost in the process
# We must explicitly kill the pipeline if the log contains undefined steps
- name: Check that suite has NO missing steps
if: always()
id: missing-check
run: |
if grep -q 'has missing steps. Define them with these snippets:' tests/TestReports/Behat/${{ matrix.testSuite }}.log; then
cat tests/TestReports/Behat/${{ matrix.testSuite }}.log
exit 1
fi
# Pending steps are not rerun by behat, without this step they will be lost in the process
# We must explicitly kill the pipeline if the log contains pending steps
- name: Check that suite has NO pending steps
if: always()
id: pending-check
run: |
if grep -q 'pending)' tests/TestReports/Behat/${{ matrix.testSuite }}.log; then
cat tests/TestReports/Behat/${{ matrix.testSuite }}.log
exit 1
fi
# Chrome exception are problems that can't be fixed with a rerun. However, we can try to run the whole suite one more time
- name: Check for Chrome Exceptions
if: always()
id: chrome-exception-check
run: |
if grep -q '\[DMore\\ChromeDriver\\StreamReadException\]' tests/TestReports/Behat/${{ matrix.testSuite }}.log; then
cat tests/TestReports/Behat/${{ matrix.testSuite }}.log
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} -f pretty
fi
- name: Upload Behat Test Artifacts
uses: actions/upload-artifact@v4
with:
name: logs_${{ matrix.testSuite }}
path: |
tests/TestReports/Behat/${{ matrix.testSuite }}.log
tests/TestReports/TestScreenshots
- name: Upload Coverage to Codecov
uses: codecov/codecov-action@v4
with:
file: tests/TestReports/CoverageReports/Behat/coverage.xml
flags: behat # optional
name: codecov-umbrella # optional
# fail_ci_if_error: true # optional (default = false)
- name: Rerun Behat Tests (1st Attempt)
if: env.test-run-exit-code != '0'
id: test-rerun-1
continue-on-error: true
run: |
docker exec app.catroweb chmod -R 777 var
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} --rerun
TEST_RUN_EXIT_CODE=$?
echo "test-run-exit-code=$TEST_RUN_EXIT_CODE" > $GITHUB_ENV
( exit $TEST_RUN_EXIT_CODE )
- name: Rerun Behat Tests (2nd Attempt)
if: env.test-run-exit-code != '0'
id: test-rerun-2
continue-on-error: true
run: |
docker exec app.catroweb bin/behat -s ${{ matrix.testSuite }} --rerun
TEST_RUN_EXIT_CODE=$?
echo "test-run-exit-code=$TEST_RUN_EXIT_CODE" > $GITHUB_ENV
( exit $TEST_RUN_EXIT_CODE )
- name: Rerun Behat Tests (3rd Attempt)
if: env.test-run-exit-code != '0'
id: test-rerun-3
run: |
docker exec app.catroweb bin/behat -f pretty -s ${{ matrix.testSuite }} --rerun
## Failure debugging
- name: Debug Failure
if: failure()
run: |
docker ps -a
echo "--- App Logs ---"
docker logs app.catroweb
echo "--- DB Logs ---"
docker logs db.catroweb.test
echo "--- Chrome Logs ---"
docker logs chrome.catroweb