diff --git a/.dockerignore b/.dockerignore index 567708f05..ff43d450d 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,7 @@ ci +cuebot/src/compiled_protobuf images samples sandbox/db-data tsc -venv \ No newline at end of file +venv diff --git a/.github/workflows/packaging-pipeline.yml b/.github/workflows/packaging-pipeline.yml index 7ff1568fe..c7ab1f85c 100644 --- a/.github/workflows/packaging-pipeline.yml +++ b/.github/workflows/packaging-pipeline.yml @@ -6,8 +6,25 @@ on: branches: [ master ] jobs: - build_components: + integration_test: + name: Run Integration Test runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Run test + run: ci/run_integration_test.sh + + - name: Archive log files + uses: actions/upload-artifact@v3 + if: ${{ always() }} + with: + name: test-logs + path: /tmp/opencue-test/*.log + + build_components: + needs: integration_test strategy: matrix: component: [cuebot, rqd] @@ -41,9 +58,10 @@ jobs: ARTIFACTS: cueadmin-${BUILD_ID}-all.tar.gz name: Build ${{ matrix.NAME }} + runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: # Fetch all Git history, otherwise the current version number will # not be correctly calculated. @@ -58,6 +76,12 @@ jobs: role-to-assume: ${{ secrets.AWS_S3_ROLE }} role-duration-seconds: 1800 + - name: Login to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USER }} + password: ${{ secrets.DOCKER_PASS }} + - name: Set build ID run: | set -e @@ -66,13 +90,12 @@ jobs: echo "BUILD_ID=$(cat ./VERSION)" >> ${GITHUB_ENV} - name: Build Docker image - uses: docker/build-push-action@v1 + uses: docker/build-push-action@v3 with: - username: ${{ secrets.DOCKER_USER }} - password: ${{ secrets.DOCKER_PASS }} - dockerfile: ${{ matrix.component }}/Dockerfile - repository: opencuebuild/${{ matrix.component }} - tags: ${{ env.BUILD_ID }} + file: ${{ matrix.component }}/Dockerfile + tags: opencuebuild/${{ matrix.component }}:${{ env.BUILD_ID }} + context: . + push: true - name: Extract Artifacts run: | @@ -96,12 +119,12 @@ jobs: done create_other_artifacts: + name: Create Other Build Artifacts needs: build_components runs-on: ubuntu-latest - name: Create Other Build Artifacts steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: # Fetch all Git history, otherwise the current version number will # not be correctly calculated. @@ -140,7 +163,7 @@ jobs: aws s3 cp LICENSE s3://${S3_BUCKET}/opencue/${BUILD_ID}/ aws s3 cp VERSION s3://${S3_BUCKET}/opencue/${BUILD_ID}/ aws s3 cp "${GITHUB_WORKSPACE}/artifacts/schema-${BUILD_ID}.sql" s3://${S3_BUCKET}/opencue/${BUILD_ID}/ - aws s3 cp "${GITHUB_WORKSPACE}/artifacts/demo_data-${BUILD_ID}.sql" s3://${S3_BUCKET}/opencue/${BUILD_ID}/ + aws s3 cp "${GITHUB_WORKSPACE}/artifacts/seed_data-${BUILD_ID}.sql" s3://${S3_BUCKET}/opencue/${BUILD_ID}/ aws s3 cp "${GITHUB_WORKSPACE}/artifacts/build_metadata.json" s3://${S3_BUCKET}/opencue/${BUILD_ID}/ - name: Display artifacts diff --git a/.github/workflows/release-pipeline.yml b/.github/workflows/release-pipeline.yml index 59f5da59d..48aeb9f5a 100644 --- a/.github/workflows/release-pipeline.yml +++ b/.github/workflows/release-pipeline.yml @@ -12,7 +12,7 @@ jobs: name: Preflight steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 @@ -45,7 +45,7 @@ jobs: name: Release ${{ matrix.component }} Docker image steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 @@ -61,14 +61,19 @@ jobs: set -e docker pull opencuebuild/${{ matrix.component }}:${BUILD_ID} - - name: Rebuild and push Docker image - uses: docker/build-push-action@v1 + - name: Login to Docker Hub + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKER_USER }} password: ${{ secrets.DOCKER_PASS }} - dockerfile: ${{ matrix.component }}/Dockerfile - repository: opencue/${{ matrix.component }} - tags: ${{ env.BUILD_ID }}, latest + + - name: Rebuild and push Docker image + uses: docker/build-push-action@v3 + with: + file: ${{ matrix.component }}/Dockerfile + tags: opencue/${{ matrix.component }}:${{ env.BUILD_ID }},opencue/${{ matrix.component }}:latest + context: . + push: true create_release: needs: preflight @@ -76,7 +81,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: fetch-depth: 0 @@ -103,7 +108,7 @@ jobs: run: | mkdir -p "${GITHUB_WORKSPACE}/artifacts/" aws s3 sync "s3://${S3_BUCKET}/opencue/${BUILD_ID}/" "${GITHUB_WORKSPACE}/artifacts/" - echo "::set-output name=filenames::$(ls "${GITHUB_WORKSPACE}/artifacts/" | xargs)" + echo "filenames=$(ls "${GITHUB_WORKSPACE}/artifacts/" | xargs)" >> ${GITHUB_OUTPUT} - name: List artifacts run: | @@ -114,9 +119,12 @@ jobs: run: | last_tagged_version=$(git describe --tags --abbrev=0 $(git rev-list --tags --skip=1 --max-count=1)) commits_since_last_release=$(git log --reverse --pretty="* %H %s" ${last_tagged_version}..HEAD) + # Use a delimiter to preserve the multiline string. # See https://github.community/t/set-output-truncates-multiline-strings/16852 - commits_since_last_release="${commits_since_last_release//$'\n'/'%0A'}" - echo "::set-output name=commits::${commits_since_last_release}" + delimiter="$(openssl rand -hex 8)" + echo "commits<<${delimiter}" >> ${GITHUB_OUTPUT} + echo "${commits_since_last_release}" >> ${GITHUB_OUTPUT} + echo "${delimiter}" >> ${GITHUB_OUTPUT} - name: Create release id: create_release @@ -161,8 +169,8 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: upload_url: ${{ steps.create_release.outputs.upload_url }} - asset_path: ${{ github.workspace }}/artifacts/demo_data-${{ env.BUILD_ID }}.sql - asset_name: demo_data-${{ env.BUILD_ID }}.sql + asset_path: ${{ github.workspace }}/artifacts/seed_data-${{ env.BUILD_ID }}.sql + asset_name: seed_data-${{ env.BUILD_ID }}.sql asset_content_type: application/octet-stream - name: Upload Cuebot JAR diff --git a/.github/workflows/sonar-cloud-pipeline.yml b/.github/workflows/sonar-cloud-pipeline.yml index ec330da8c..1319f828b 100644 --- a/.github/workflows/sonar-cloud-pipeline.yml +++ b/.github/workflows/sonar-cloud-pipeline.yml @@ -12,7 +12,7 @@ jobs: name: Analyze Python Components steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: # Fetch all Git history, otherwise the current version number will # not be correctly calculated. @@ -33,7 +33,7 @@ jobs: name: Analyze Cuebot steps: - name: Checkout - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: # Fetch all Git history, otherwise the current version number will # not be correctly calculated. diff --git a/.github/workflows/testing-pipeline.yml b/.github/workflows/testing-pipeline.yml index 5d8acd88a..15967a4a9 100644 --- a/.github/workflows/testing-pipeline.yml +++ b/.github/workflows/testing-pipeline.yml @@ -7,55 +7,82 @@ on: branches: [ master ] jobs: - test_python_2019: - name: Run Python Unit Tests (CY2019) + test_python_2022: + name: Run Python Unit Tests (CY2022) runs-on: ubuntu-latest - container: aswf/ci-opencue:2019 + container: aswf/ci-opencue:2022 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Run Python Tests - run: ci/run_python_tests.sh + run: ci/run_python_tests.sh --no-gui - test_cuebot_2019: - name: Build Cuebot and Run Unit Tests (CY2019) + test_cuebot_2022: + name: Build Cuebot and Run Unit Tests (CY2022) runs-on: ubuntu-latest container: - image: aswf/ci-opencue:2019 + image: aswf/ci-opencue:2022 steps: - - uses: actions/checkout@v2 - - name: Build with Gradle - run: | - chown -R aswfuser:aswfgroup . - su -c "cd cuebot && ./gradlew build --stacktrace --info" aswfuser + - uses: actions/checkout@v3 + - name: Build with Gradle + run: | + chown -R aswfuser:aswfgroup . + su -c "cd cuebot && ./gradlew build --stacktrace --info" aswfuser - test_python_2020: - name: Run Python Unit Tests (CY2020) + test_python_2023: + name: Run Python Unit Tests (CY2023) runs-on: ubuntu-latest - container: aswf/ci-opencue:2020 + container: aswf/ci-opencue:2023 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Run Python Tests - run: ci/run_python_tests.sh --lint + run: ci/run_python_tests.sh - test_cuebot_2020: - name: Build Cuebot and Run Unit Tests (CY2020) + test_cuebot_2023: + name: Build Cuebot and Run Unit Tests (CY2023) runs-on: ubuntu-latest container: - image: aswf/ci-opencue:2020 + image: aswf/ci-opencue:2023 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Build with Gradle run: | chown -R aswfuser:aswfgroup . su -c "cd cuebot && ./gradlew build --stacktrace --info" aswfuser + test_python2: + name: Run Python Unit Tests using Python2 + runs-on: ubuntu-latest + container: aswf/ci-opencue:2019 + steps: + - uses: actions/checkout@v3 + - name: Run Python Tests + run: ci/run_python_tests.sh + + test_pyside6: + name: Run CueGUI Tests using PySide6 + runs-on: ubuntu-latest + container: almalinux:9 + steps: + - uses: actions/checkout@v2 + - name: Run CueGUI Tests + run: ci/test_pyside6.sh + + lint_python: + name: Lint Python Code + runs-on: ubuntu-latest + container: aswf/ci-opencue:2022 + steps: + - uses: actions/checkout@v3 + - name: Lint Python Code + run: ci/run_python_lint.sh + test_sphinx: name: Test Documentation Build runs-on: ubuntu-latest container: - image: aswf/ci-opencue:2020 + image: aswf/ci-opencue:2023 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Run Sphinx build run: ci/build_sphinx_docs.sh @@ -63,18 +90,18 @@ jobs: name: Check Changed Files runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Get Changed Files id: get_changed_files - uses: jitterbit/get-changed-files@v1 + uses: tj-actions/changed-files@v35 - name: Check for Version Change - run: ci/check_changed_files.py ${{ steps.get_changed_files.outputs.modified }} ${{ steps.get_changed_files.outputs.removed }} + run: ci/check_changed_files.py ${{ steps.get_changed_files.outputs.modified_files }} ${{ steps.get_changed_files.outputs.deleted_files }} check_migration_files: name: Check Database Migration Files runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Check Migration Files run: ci/check_database_migrations.py @@ -82,9 +109,9 @@ jobs: name: Check for Version Bump runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Get Changed Files id: get_changed_files - uses: jitterbit/get-changed-files@v1 + uses: tj-actions/changed-files@v35 - name: Check for Version Change - run: ci/check_version_bump.py ${{ steps.get_changed_files.outputs.all }} + run: ci/check_version_bump.py ${{ steps.get_changed_files.outputs.all_changed_and_modified_files }} diff --git a/.gitignore b/.gitignore index 9de17bdf1..12d65dce4 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,4 @@ venv*/ coverage.xml htmlcov/ /.env +.envrc diff --git a/CODEOWNERS b/CODEOWNERS index ff91076cd..09f526fc5 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1 @@ -* @bcipriano @gregdenton @jrray @smith1511 @larsbijl @DiegoTavares @IdrisMiles -/tsc/gsoc @bcipriano @gregdenton @shiva-kannan +* @bcipriano @gregdenton @jrray @smith1511 @larsbijl @DiegoTavares @IdrisMiles @splhack diff --git a/CREDITS b/CREDITS deleted file mode 100644 index 988a755e9..000000000 --- a/CREDITS +++ /dev/null @@ -1,37 +0,0 @@ -cue3bot - 3720 Matt Chambers - 167 John Welborn - 123 Michael Zhang - 50 J Robert Ray - 27 Bond-Jay Ting - 22 Kasra Faghihi - 14 Blair Zajac - 2 Kevin Coats - 1 Jordon Phillips - -rqd - 473 John Welborn - 25 J Robert Ray - 6 Yudi Xue - 2 Blair Zajac - 2 Michael Zhang - 2 Jordon Phillips - 1 Kasra Faghihi - -spi_cue - 252 John Welborn - 226 Matt Chambers - 12 Jordon Phillips - 8 Yudi Xue - 5 J Robert Ray - 4 Michael Zhang - 3 Blair Zajac - -python_ice_server - 27 Blair Zajac - 8 Cottalango Leon - 5 J Robert Ray - 3 John Welborn - 2 Michael Zhang - 1 Geo Snelling - 1 Sam Richards diff --git a/README.md b/README.md index a198513c8..a556d1ae5 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ ![OpenCue](/images/opencue_logo_with_text.png) -[![Supported VFX Platform Versions](https://img.shields.io/badge/vfx%20platform-2019--2020-lightgrey.svg)](http://www.vfxplatform.com/) -![Supported Python Versions](https://img.shields.io/badge/python-2.7%2C%203.6%2C%203.7-blue.svg) +[![Supported VFX Platform Versions](https://img.shields.io/badge/vfx%20platform-2019--2021-lightgrey.svg)](http://www.vfxplatform.com/) +![Supported Python Versions](https://img.shields.io/badge/python-2.7%2C%203.6%2C%203.7%2C%203.8%2C%203.9-blue.svg) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/2837/badge)](https://bestpractices.coreinfrastructure.org/projects/2837) - [Introduction](#Introduction) diff --git a/VERSION.in b/VERSION.in index ce609caf8..e34629406 100644 --- a/VERSION.in +++ b/VERSION.in @@ -1 +1 @@ -0.8 \ No newline at end of file +0.22 diff --git a/ci/extract_schema.sh b/ci/extract_schema.sh index c4365962e..ec38b6344 100755 --- a/ci/extract_schema.sh +++ b/ci/extract_schema.sh @@ -34,7 +34,7 @@ docker exec -t --user=${DB_USER} ${PG_CONTAINER} pg_dump --no-privileges --no-ow | tee "${ARTIFACT_DIRECTORY}/schema-${BUILD_ID}.sql" # The demo data gets its own build artifact too. -cp "${SCHEMA_DIRECTORY}/demo_data.sql" "${ARTIFACT_DIRECTORY}/demo_data-${BUILD_ID}.sql" +cp "${SCHEMA_DIRECTORY}/seed_data.sql" "${ARTIFACT_DIRECTORY}/seed_data-${BUILD_ID}.sql" docker kill ${FLYWAY_CONTAINER} docker kill ${PG_CONTAINER} diff --git a/ci/pylintrc_main b/ci/pylintrc_main index b4510f658..85edaf4b4 100644 --- a/ci/pylintrc_main +++ b/ci/pylintrc_main @@ -60,7 +60,8 @@ confidence= # --enable=similarities". If you want to run only the classes checker, but have # no Warning level messages displayed, use "--disable=all --enable=classes # --disable=W". -disable=duplicate-code, +disable=c-extension-no-member, + duplicate-code, fixme, invalid-name, locally-disabled, diff --git a/ci/pylintrc_test b/ci/pylintrc_test index e4f09c50d..4116f7e5f 100644 --- a/ci/pylintrc_test +++ b/ci/pylintrc_test @@ -70,6 +70,7 @@ disable=arguments-differ, no-self-use, protected-access, raise-missing-from, + too-many-arguments, too-many-lines, too-many-locals, too-many-public-methods, diff --git a/ci/run_gui_test.sh b/ci/run_gui_test.sh new file mode 100755 index 000000000..3c7d92a6d --- /dev/null +++ b/ci/run_gui_test.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Wrapper script for CueGUI tests. +# +# xvfb-run sometimes crashes on exit, we haven't been able to figure out why yet. +# This means that tests may pass but the xvfb-run crash will generate a non-zero exit code +# and cause our CI pipeline to fail. +# +# We work around this by capturing unit test output and looking for the text that indicates +# tests have passed: +# +# > Ran 209 tests in 4.394s +# > +# > OK +# + +py="$(command -v python3)" +if [[ -z "$py" ]]; then + py="$(command -v python)" +fi +echo "Using Python binary ${py}" + +test_log="/tmp/cuegui_result.log" +PYTHONPATH=pycue xvfb-run -d "${py}" cuegui/setup.py test | tee ${test_log} + +grep -Pz 'Ran \d+ tests in [0-9\.]+s\n\nOK' ${test_log} +if [ $? -eq 0 ]; then + echo "Detected passing tests" + exit 0 +fi + +echo "Detected test failure" +exit 1 diff --git a/ci/run_integration_test.sh b/ci/run_integration_test.sh new file mode 100755 index 000000000..8bde5335d --- /dev/null +++ b/ci/run_integration_test.sh @@ -0,0 +1,262 @@ +#!/bin/bash +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# OpenCue integration test script +# +# Stands up a clean environment using Docker compose and verifies all +# components are functioning as expected. +# +# Run with: +# ./run_integration_test.sh + +set -e + +RQD_ROOT="/tmp/rqd" +TEST_LOGS="/tmp/opencue-test" +DOCKER_COMPOSE_LOG="${TEST_LOGS}/docker-compose.log" +DB_DATA_DIR="sandbox/db-data" +VENV="/tmp/opencue-integration-venv" + +log() { + echo "$(date "+%Y-%m-%d %H:%M:%S") $1 $2" +} + +kill_descendant_processes() { + local pid="$1" + local and_self="${2:-false}" + if children="$(pgrep -P "$pid")"; then + for child in $children; do + kill_descendant_processes "$child" true + done + fi + if [[ "$and_self" == true ]]; then + kill "$pid" 2>/dev/null || true + fi +} + +verify_command_exists() { + if ! command -v $1 &> /dev/null; then + log ERROR "command \"$1\" was not found" + exit 1 + fi +} + +verify_no_database() { + if [ -e "${DB_DATA_DIR}" ]; then + log ERROR "Postgres data directory ${DB_DATA_DIR} already exists" + exit 1 + fi +} + +verify_no_containers() { + num_containers=$(docker compose ps --format json | jq length) + if [[ $num_containers -gt 0 ]]; then + log ERROR "Found ${num_containers} Docker compose containers, clean these up with \`docker compose rm\` before continuing" + exit 1 + fi +} + +create_rqd_root() { + if [ -e "$RQD_ROOT" ]; then + log ERROR "log root ${RQD_ROOT} already exists" + exit 1 + fi + + mkdir -p "${RQD_ROOT}/logs" + mkdir "${RQD_ROOT}/shots" +} + +wait_for_service_state() { + log INFO "Waiting for service \"$1\" to have state \"$2\"..." + while true; do + current_time=$(date +%s) + if [[ $current_time -gt $3 ]]; then + log ERROR "Timed out waiting for Docker compose to come up" + exit 1 + fi + container=$(docker compose ps --all --format json | jq ".[] | select(.Service==\"$1\")") + if [[ ${container} = "" ]]; then + log INFO "Service \"$1\": no container yet" + else + container_name=$(echo "$container" | jq -r '.Name') + current_state=$(echo "$container" | jq -r '.State') + log INFO "Service \"$1\": container \"${container_name}\" state = ${current_state}" + if [[ ${current_state} = $2 ]]; then + break + fi + fi + sleep 5 + done +} + +verify_flyway_success() { + container=$(docker compose ps --all --format json | jq '.[] | select(.Service=="flyway")') + container_name=$(echo "$container" | jq -r '.Name') + exit_code=$(echo "$container" | jq -r '.ExitCode') + if [[ ${exit_code} = 0 ]]; then + log INFO "Service \"flyway\": container \"${container_name}\" exit code = 0 (PASS)" + else + log ERROR "Service \"flyway\": container \"${container_name}\" exit code = ${exit_code} (FAIL)" + exit 1 + fi +} + +verify_migration_versions() { + migrations_in_db=$(docker compose exec -e PGUSER=cuebot db psql -Aqtc "SELECT COUNT(*) FROM flyway_schema_history") + migrations_in_code=$(ls cuebot/src/main/resources/conf/ddl/postgres/migrations/ | wc -l | tr -d ' ') + if [[ ${migrations_in_db} = ${migrations_in_code} ]]; then + log INFO "Database and code both contain ${migrations_in_db} migrations (PASS)" + else + log ERROR "Database contains ${migrations_in_db} migrations, code contains ${migrations_in_code} (FAIL)" + exit 1 + fi +} + +create_and_activate_venv() { + if [[ -d "${VENV}" ]]; then + rm -rf "${VENV}" + fi + python3 -m venv "${VENV}" + source "${VENV}/bin/activate" +} + +test_pycue() { + want_shows="['testing']" + got_shows=$(python -c 'import opencue; print([show.name() for show in opencue.api.getShows()])') + if [[ "${got_shows}" = "${want_shows}" ]]; then + log INFO "(pycue) Got expected show list (PASS)" + else + log ERROR "(pycue) Got unexpected show list (FAIL)" + log ERROR "got: ${got_shows}, want: ${want_shows}" + exit 1 + fi + + rqd_name=$(docker compose ps --format json | jq -r '.[] | select(.Service=="rqd") | .Name') + rqd_ip=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${rqd_name}") + want_hosts="['${rqd_ip}']" + got_hosts=$(python -c 'import opencue; print([host.name() for host in opencue.api.getHosts()])') + if [[ "${got_hosts}" = "${want_hosts}" ]]; then + log INFO "(pycue) Got expected host list (PASS)" + else + log ERROR "(pycue) Got unexpected host list (FAIL)" + log ERROR "got: ${got_hosts}, want: ${want_hosts}" + exit 1 + fi +} + +test_cueadmin() { + want_show="testing" + ls_response=$(cueadmin -ls) + got_show=$(echo "${ls_response}" | tail -n 1 | cut -d ' ' -f 1) + if [[ "${got_show}" = "${want_show}" ]]; then + log INFO "(cueadmin) Got expected -ls response (PASS)" + else + log ERROR "(cueadmin) Got unexpected -ls response (FAIL)" + log ERROR "got show: ${got_show}, want show: ${want_show}" + log ERROR "full response: ${ls_response}" + exit 1 + fi + + rqd_name=$(docker compose ps --format json | jq -r '.[] | select(.Service=="rqd") | .Name') + want_host=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' "${rqd_name}") + lh_response=$(cueadmin -lh) + got_host=$(echo "${lh_response}" | tail -n 1 | cut -d ' ' -f 1) + if [[ "${got_host}" = "${want_host}" ]]; then + log INFO "(cueadmin) Got expected -lh response (PASS)" + else + log ERROR "(cueadmin) Got unexpected -lh response (FAIL)" + log ERROR "got host: ${got_host}, want host: ${want_host}" + log ERROR "full response: ${lh_response}" + exit 1 + fi +} + +run_job() { + samples/pyoutline/basic_job.py + job_name="testing-shot01-${USER}_basic_job" + samples/pycue/wait_for_job.py "${job_name}" --timeout 300 + log INFO "Job succeeded (PASS)" +} + +cleanup() { + docker compose rm --stop --force >>"${DOCKER_COMPOSE_LOG}" 2>&1 + rm -rf "${RQD_ROOT}" || true + rm -rf "${DB_DATA_DIR}" || true + rm -rf "${VENV}" || true +} + +main() { + # Ensure all subshells in the background are terminated when the main script exits. + trap "{ kill_descendant_processes $$; exit; }" SIGINT SIGTERM EXIT + + mkdir -p "${TEST_LOGS}" + if [[ "${CI:-false}" == true ]]; then + log INFO "More logs can be found under the test-logs artifact attached to this workflow execution" + else + log INFO "More logs can be found at ${TEST_LOGS}" + fi + + CI_DIRECTORY=$(cd "$(dirname "${BASH_SOURCE[0]}")" &> /dev/null && pwd) + OPENCUE_ROOT=$(dirname "${CI_DIRECTORY}") + log INFO "OpenCue project is located at ${OPENCUE_ROOT}" + cd "${OPENCUE_ROOT}" + + verify_command_exists docker + verify_command_exists "docker compose" + verify_command_exists jq + verify_no_database + verify_no_containers + create_rqd_root + + log INFO "$(docker --version)" + log INFO "$(docker compose version)" + + log INFO "Building Cuebot image..." + docker build -t opencue/cuebot -f cuebot/Dockerfile . &>"${TEST_LOGS}/docker-build-cuebot.log" + log INFO "Building RQD image..." + docker build -t opencue/rqd -f rqd/Dockerfile . &>"${TEST_LOGS}/docker-build-rqd.log" + + log INFO "Starting Docker compose..." + docker compose up &>"${DOCKER_COMPOSE_LOG}" & + if [[ "$(uname -s)" == "Darwin" ]]; then + docker_timeout=$(date -v +5M +%s) + else + docker_timeout=$(date -d '5 min' +%s) + fi + wait_for_service_state "db" "running" $docker_timeout + wait_for_service_state "flyway" "exited" $docker_timeout + wait_for_service_state "cuebot" "running" $docker_timeout + wait_for_service_state "rqd" "running" $docker_timeout + + verify_flyway_success + verify_migration_versions + log INFO "Creating Python virtual environment..." + create_and_activate_venv + log INFO "Installing OpenCue Python libraries..." + install_log="${TEST_LOGS}/install-client-sources.log" + sandbox/install-client-sources.sh &>"${install_log}" + log INFO "Testing pycue library..." + test_pycue + log INFO "Testing cueadmin..." + test_cueadmin + + run_job + + cleanup + + log INFO "Success" +} + +main diff --git a/ci/run_python_lint.sh b/ci/run_python_lint.sh new file mode 100755 index 000000000..bd86c9188 --- /dev/null +++ b/ci/run_python_lint.sh @@ -0,0 +1,54 @@ +#!/bin/bash + +set -e + +python_version=$(python -V) +echo "Will run Python lint using ${python_version}" + +pip install --user -r requirements.txt -r requirements_gui.txt + +# Protos need to have their Python code generated in order for tests to pass. +python -m grpc_tools.protoc -I=proto/ --python_out=pycue/opencue/compiled_proto --grpc_python_out=pycue/opencue/compiled_proto proto/*.proto +python -m grpc_tools.protoc -I=proto/ --python_out=rqd/rqd/compiled_proto --grpc_python_out=rqd/rqd/compiled_proto proto/*.proto + +# Fix imports to work in both Python 2 and 3. See +# for more info. +2to3 -wn -f import pycue/opencue/compiled_proto/*_pb2*.py +2to3 -wn -f import rqd/rqd/compiled_proto/*_pb2*.py + +echo "Running lint for pycue/..." +cd pycue +python -m pylint --rcfile=../ci/pylintrc_main FileSequence +python -m pylint --rcfile=../ci/pylintrc_main opencue --ignore=opencue/compiled_proto +python -m pylint --rcfile=../ci/pylintrc_test tests +cd .. + +echo "Running lint for pyoutline/..." +cd pyoutline +PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_main outline +PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_test tests +cd .. + +echo "Running lint for cueadmin/..." +cd cueadmin +PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_main cueadmin +PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_test tests +cd .. + +echo "Running lint for cuegui/..." +cd cuegui +PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_main cuegui --ignore=cuegui/images,cuegui/images/crystal +PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_test tests +cd .. + +echo "Running lint for cuesubmit/..." +cd cuesubmit +PYTHONPATH=../pycue:../pyoutline python -m pylint --rcfile=../ci/pylintrc_main cuesubmit +PYTHONPATH=../pycue:../pyoutline python -m pylint --rcfile=../ci/pylintrc_test tests +cd .. + +echo "Running lint for rqd/..." +cd rqd +python -m pylint --rcfile=../ci/pylintrc_main rqd --ignore=rqd/compiled_proto +python -m pylint --rcfile=../ci/pylintrc_test tests +cd .. diff --git a/ci/run_python_tests.sh b/ci/run_python_tests.sh index db3f68fd0..5f1bfe294 100755 --- a/ci/run_python_tests.sh +++ b/ci/run_python_tests.sh @@ -1,7 +1,16 @@ #!/bin/bash +# Script for running OpenCue unit tests with PySide2. +# +# This script is written to be run within the OpenCue GitHub Actions environment. +# See `.github/workflows/testing-pipeline.yml`. + set -e +args=("$@") +python_version=$(python -V 2>&1) +echo "Will run tests using ${python_version}" + pip install --user -r requirements.txt -r requirements_gui.txt # Protos need to have their Python code generated in order for tests to pass. @@ -16,29 +25,10 @@ python -m grpc_tools.protoc -I=proto/ --python_out=rqd/rqd/compiled_proto --grpc python pycue/setup.py test PYTHONPATH=pycue python pyoutline/setup.py test PYTHONPATH=pycue python cueadmin/setup.py test -PYTHONPATH=pycue xvfb-run -d python cuegui/setup.py test PYTHONPATH=pycue:pyoutline python cuesubmit/setup.py test python rqd/setup.py test -# Some environments don't have pylint available, for ones that do they should pass this flag. -if [[ "$1" == "--lint" ]]; then - cd pycue && python -m pylint --rcfile=../ci/pylintrc_main FileSequence && cd .. - cd pycue && python -m pylint --rcfile=../ci/pylintrc_main opencue --ignore=opencue/compiled_proto && cd .. - cd pycue && python -m pylint --rcfile=../ci/pylintrc_test tests && cd .. - - cd pyoutline && PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_main outline && cd .. - cd pyoutline && PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_test tests && cd .. - - cd cueadmin && PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_main cueadmin && cd .. - cd cueadmin && PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_test tests && cd .. - - cd cuegui && PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_main cuegui --ignore=cuegui/images,cuegui/images/crystal && cd .. - cd cuegui && PYTHONPATH=../pycue python -m pylint --rcfile=../ci/pylintrc_test tests && cd .. - - cd cuesubmit && PYTHONPATH=../pycue:../pyoutline python -m pylint --rcfile=../ci/pylintrc_main cuesubmit && cd .. - cd cuesubmit && PYTHONPATH=../pycue:../pyoutline python -m pylint --rcfile=../ci/pylintrc_test tests && cd .. - - cd rqd && python -m pylint --rcfile=../ci/pylintrc_main rqd --ignore=rqd/compiled_proto && cd .. - cd rqd && python -m pylint --rcfile=../ci/pylintrc_test tests && cd .. +# Xvfb no longer supports Python 2. +if [[ "$python_version" =~ "Python 3" && ${args[0]} != "--no-gui" ]]; then + ci/run_gui_test.sh fi - diff --git a/ci/run_python_tests_pyside6.sh b/ci/run_python_tests_pyside6.sh new file mode 100755 index 000000000..384841cfe --- /dev/null +++ b/ci/run_python_tests_pyside6.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Script for running OpenCue unit tests with PySide6. +# +# This script is written to be run within the OpenCue GitHub Actions environment. +# See `.github/workflows/testing-pipeline.yml`. + +set -e + +python_version=$(python -V 2>&1) +echo "Will run tests using ${python_version}" + +# NOTE: To run this in an almalinux environment, install these packages: +# yum -y install \ +# dbus-libs \ +# fontconfig \ +# gcc \ +# libxkbcommon-x11 \ +# mesa-libEGL-devel \ +# python-devel \ +# which \ +# xcb-util-keysyms \ +# xcb-util-image \ +# xcb-util-renderutil \ +# xcb-util-wm \ +# Xvfb + +# Install Python requirements. +python3 -m pip install --user -r requirements.txt -r requirements_gui.txt +# Replace PySide2 with PySide6. +python3 -m pip uninstall -y PySide2 +python3 -m pip install --user PySide6==6.3.2 + +# Protos need to have their Python code generated in order for tests to pass. +python -m grpc_tools.protoc -I=proto/ --python_out=pycue/opencue/compiled_proto --grpc_python_out=pycue/opencue/compiled_proto proto/*.proto +python -m grpc_tools.protoc -I=proto/ --python_out=rqd/rqd/compiled_proto --grpc_python_out=rqd/rqd/compiled_proto proto/*.proto + +# Fix compiled proto code for Python 3. +2to3 -wn -f import pycue/opencue/compiled_proto/*_pb2*.py +2to3 -wn -f import rqd/rqd/compiled_proto/*_pb2*.py + +python pycue/setup.py test +PYTHONPATH=pycue python pyoutline/setup.py test +PYTHONPATH=pycue python cueadmin/setup.py test +PYTHONPATH=pycue:pyoutline python cuesubmit/setup.py test +python rqd/setup.py test + +ci/run_gui_test.sh diff --git a/ci/test_pyside6.sh b/ci/test_pyside6.sh new file mode 100755 index 000000000..05bd4c173 --- /dev/null +++ b/ci/test_pyside6.sh @@ -0,0 +1,36 @@ +#!/bin/bash + +# Script for testing CueGUI with PySide6. +# +# This script is written to be run within an almalinux environment in the OpenCue +# GitHub Actions environment. See .github/workflows/testing-pipeline.yml. + +set -e + +# Install needed packages. +yum -y install \ + dbus-libs \ + fontconfig \ + gcc \ + libxkbcommon-x11 \ + mesa-libEGL-devel \ + python-devel \ + which \ + xcb-util-keysyms \ + xcb-util-image \ + xcb-util-renderutil \ + xcb-util-wm \ + Xvfb + +# Install Python requirements. +python3 -m pip install --user -r requirements.txt -r requirements_gui.txt +# Replace PySide2 with PySide6. +python3 -m pip uninstall -y PySide2 +python3 -m pip install --user PySide6==6.3.2 + +# Fix compiled proto code for Python 3. +python3 -m grpc_tools.protoc -I=proto/ --python_out=pycue/opencue/compiled_proto --grpc_python_out=pycue/opencue/compiled_proto proto/*.proto +2to3 -wn -f import pycue/opencue/compiled_proto/*_pb2*.py + +# Run tests. +ci/run_gui_test.sh diff --git a/connectors/prometheus_metrics/metrics b/connectors/prometheus_metrics/metrics index 960132938..4db675a27 100755 --- a/connectors/prometheus_metrics/metrics +++ b/connectors/prometheus_metrics/metrics @@ -7,15 +7,22 @@ from prometheus_client import start_http_server from prometheus_client import Gauge -CLUE_HOSTS_HARDWARE = Gauge( - 'cue_hosts_hardware_total', 'hosts hardware status', ['status']) +WEB_SERVICE_PORT = 8302 +REPORT_INTERVAL_SECONDS = 30 + + +CLUE_HOSTS_HARDWARE = Gauge('cue_hosts_hardware_total', 'hosts hardware status', ['status']) CLUE_HOSTS_LOCK = Gauge('cue_hosts_lock_total', 'hosts lock status', ['status']) CLUE_PROCS = Gauge('cue_procs_total', 'number of Procs') CLUE_PROCS_USABLE = Gauge('cue_procs_usable_total', 'number of usable Procs') CLUE_PROCS_USED = Gauge('cue_procs_used_total', 'number of Procs currently in use') -CLUE_FRAMES = Gauge('cue_frames', 'number of frames ', ['status', 'show']) -CLUE_REMAIN = Gauge('cue_remain', 'remaining core seconds (estimated) ', ['show']) +CLUE_FRAMES_PER_SHOW = Gauge('cue_frames', 'number of frames ', ['status', 'show']) +CLUE_FRAMES_LIMIT_PER_SHOW = Gauge('cue_frames_limit', 'number of limits', ['limit', 'status', 'show']) +CLUE_REMAIN_FRAME_PER_SHOW = Gauge('cue_remain', 'remaining core seconds (estimated) ', ['show']) + +CLUE_LIMITS = Gauge('cue_limits', 'limit stats ', ['name', 'value']) +CLUE_LIMITS_CAPACITY = Gauge('cue_limits_capacity', 'limits capacity ', ['name']) MANAGE_WAITING = Gauge('cue_manage_waiting_total', '') MANAGE_REMAINING_CAPACITY = Gauge('cue_manage_remaining_capacity_total', '') @@ -55,20 +62,31 @@ STRANDED_CORES = Gauge('cue_stranded_cores_total', '') def main(): + default_frame_stats = { + 'pending': 0, + 'dead': 0, + 'eaten': 0, + 'succeeded': 0, + 'running': 0 + } + while True: jobs = opencue.api.getJobs() shows = {} shows_remaining = {} + limits = {} for job in jobs: show = job.show() if show not in shows: - shows[show] = {'pending': 0, 'dead': 0, - 'eaten': 0, 'succeeded': 0, 'running': 0} + shows[show] = default_frame_stats.copy() if show not in shows_remaining: shows_remaining[show] = 0 + if show not in limits: + limits[show] = {} + shows[show]['pending'] += job.pendingFrames() shows[show]['dead'] += job.deadFrames() shows[show]['eaten'] += job.eatenFrames() @@ -77,12 +95,36 @@ def main(): shows_remaining[show] += job.coreSecondsRemaining() + show_limits = limits[show] + for layer in job.getLayers(): + for limit in layer.limits(): + if limit not in show_limits: + show_limits[limit] = default_frame_stats.copy() + + show_limits[limit]['pending'] += layer.pendingFrames() + show_limits[limit]['dead'] += layer.deadFrames() + show_limits[limit]['eaten'] += layer.eatenFrames() + show_limits[limit]['succeeded'] += layer.succeededFrames() + show_limits[limit]['running'] += layer.runningFrames() + for show in shows: - for k, v in shows[show].items(): - CLUE_FRAMES.labels(status=k, show=show).set(v) + for frame_status, frame_count in shows[show].items(): + CLUE_FRAMES_PER_SHOW.labels(status=frame_status, show=show).set(frame_count) + + for limit, frame_stats in limits[show].items(): + for status, frame_count in frame_stats.items(): + CLUE_FRAMES_LIMIT_PER_SHOW.labels(limit=limit, status=status, show=show).set(frame_count) for show in shows_remaining: - CLUE_REMAIN.labels(show=show).set(shows_remaining[show]) + CLUE_REMAIN_FRAME_PER_SHOW.labels(show=show).set(shows_remaining[show]) + + for limit in opencue.api.getLimits(): + limit_name = limit.name() + current_running = limit.currentRunning() + max_value = limit.maxValue() + CLUE_LIMITS.labels(name=limit_name, value='current_running').set(current_running) + CLUE_LIMITS.labels(name=limit_name, value='max').set(max_value) + CLUE_LIMITS_CAPACITY.labels(name=limit_name).set(current_running/(max_value or 1) * 100.) # Handle the Host information hosts = opencue.api.getHosts() @@ -174,9 +216,9 @@ def main(): PICKED_UP_CORES.set(system_stats.picked_up_cores) STRANDED_CORES.set(system_stats.stranded_cores) - time.sleep(30) + time.sleep(REPORT_INTERVAL_SECONDS) if __name__ == '__main__': - start_http_server(8302) + start_http_server(WEB_SERVICE_PORT) main() diff --git a/cueadmin/Dockerfile b/cueadmin/Dockerfile index 37caf1d5f..74b318d46 100644 --- a/cueadmin/Dockerfile +++ b/cueadmin/Dockerfile @@ -1,35 +1,14 @@ -# ----------------- -# BUILD -# ----------------- -FROM centos:7 as build - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "CueAdmin build stage" +FROM python:3.9.14 WORKDIR /src -RUN yum -y install \ - epel-release \ - gcc \ - python-devel - -RUN yum -y install \ - python-pip \ - python36 \ - python36-devel \ - python36-pip - -RUN python -m pip install --upgrade 'pip<21' -RUN python3.6 -m pip install --upgrade pip - -RUN python -m pip install --upgrade 'setuptools<45' -RUN python3.6 -m pip install --upgrade setuptools +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools COPY LICENSE ./ COPY requirements.txt ./ -RUN python -m pip install -r requirements.txt -RUN python3.6 -m pip install -r requirements.txt +RUN python3 -m pip install -r requirements.txt COPY proto/ ./proto COPY pycue/README.md ./pycue/ @@ -37,7 +16,7 @@ COPY pycue/setup.py ./pycue/ COPY pycue/opencue ./pycue/opencue COPY pycue/FileSequence ./pycue/FileSequence -RUN python -m grpc_tools.protoc \ +RUN python3 -m grpc_tools.protoc \ -I=./proto \ --python_out=./pycue/opencue/compiled_proto \ --grpc_python_out=./pycue/opencue/compiled_proto \ @@ -55,32 +34,13 @@ COPY cueadmin/cueadmin ./cueadmin/cueadmin COPY VERSION.in VERSIO[N] ./ RUN test -e VERSION || echo "$(cat VERSION.in)-custom" | tee VERSION -RUN cd pycue && python setup.py install - -RUN cd pycue && python3.6 setup.py install - -# TODO(bcipriano) Lint the code here. (Issue #78) - -RUN cd cueadmin && python setup.py test - -RUN cd cueadmin && python3.6 setup.py test +RUN cd pycue && python3 setup.py install +RUN cd cueadmin && python3 setup.py test RUN cp LICENSE requirements.txt VERSION cueadmin/ RUN versioned_name="cueadmin-$(cat ./VERSION)-all" \ - && mv cueadmin $versioned_name \ - && tar -cvzf $versioned_name.tar.gz $versioned_name/* - - -# ----------------- -# RUN -# ----------------- -FROM centos:7 - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "CueAdmin runtime stage" - -WORKDIR /opt/opencue - -COPY --from=build /src/cueadmin-*-all.tar.gz ./ - + && mv cueadmin "${versioned_name}" \ + && tar -cvzf "${versioned_name}.tar.gz" ${versioned_name}/* \ + && mkdir -p /opt/opencue \ + && cp "${versioned_name}.tar.gz" /opt/opencue/ diff --git a/cueadmin/setup.py b/cueadmin/setup.py index 6b6ec92e6..3b829a089 100644 --- a/cueadmin/setup.py +++ b/cueadmin/setup.py @@ -46,6 +46,8 @@ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', ], packages=find_packages(), entry_points={ diff --git a/cuebot/Dockerfile b/cuebot/Dockerfile index 56ae9acb1..d82b861cc 100644 --- a/cuebot/Dockerfile +++ b/cuebot/Dockerfile @@ -1,19 +1,16 @@ # ----------------- # BUILD # ----------------- -FROM gradle:6.0.1-jdk11 AS build +FROM gradle:6.0.1-jdk13 AS build USER gradle -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "Cuebot build stage" - COPY --chown=gradle:gradle ./cuebot /home/gradle/cuebot/ COPY --chown=gradle:gradle ./proto /home/gradle/proto/ WORKDIR /home/gradle/cuebot -RUN gradle build --stacktrace +RUN gradle build --info --stacktrace COPY --chown=gradle:gradle VERSION.in VERSIO[N] ./ RUN test -e VERSION || echo "$(cat VERSION.in)-custom" | tee VERSION @@ -25,12 +22,8 @@ RUN mv ./build/libs/cuebot.jar ./build/libs/cuebot-$(cat ./VERSION)-all.jar # ----------------- FROM jc21/rpmbuild-centos7:latest AS rpm -# Random first line after from - USER rpmbuilder -RUN echo "Cuebot RPM Stage" - COPY --chown=rpmbuilder:rpmbuilder LICENSE ./ COPY --from=build \ @@ -49,10 +42,7 @@ RUN chmod +x create_rpm.sh && ./create_rpm.sh cuebot "$(cat VERSION)" # ----------------- # RUN # ----------------- -FROM openjdk:11-jre-slim - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "Cuebot runtime stage" +FROM openjdk:18-slim-buster ARG CUEBOT_GRPC_CUE_PORT=8443 ARG CUEBOT_GRPC_RQD_PORT=8444 diff --git a/cuebot/build.gradle b/cuebot/build.gradle index 2af5f04d4..6715a0ae6 100644 --- a/cuebot/build.gradle +++ b/cuebot/build.gradle @@ -1,4 +1,7 @@ +import org.gradle.api.tasks.testing.logging.TestExceptionFormat +import org.gradle.api.tasks.testing.logging.TestLogEvent + buildscript { repositories { mavenCentral() @@ -44,7 +47,7 @@ dependencies { compile group: 'com.google.guava', name: 'guava', version: '26.0-android' compile group: 'com.sun.mail', name: 'mailapi', version: '1.5.4' compile group: 'commons-lang', name: 'commons-lang', version: '2.6' - compile group: 'io.grpc', name: 'grpc-all', version: '1.14.0' + compile group: 'io.grpc', name: 'grpc-all', version: '1.47.0' compile group: 'org.apache.activemq', name: 'activemq-pool', version: activemqVersion compile group: 'org.apache.velocity', name: 'velocity', version: '1.7' compile group: 'org.jdom', name: 'jdom', version: '1.1.3' @@ -54,17 +57,21 @@ dependencies { compile group: 'org.springframework', name: 'spring-jms' compile group: 'org.quartz-scheduler', name: 'quartz', version: '2.2.1', { exclude group: 'c3p0', module: 'c3p0' } compile group: 'org.postgresql', name: 'postgresql', version: '42.2.2' - compile group: 'com.google.protobuf', name: 'protobuf-java', version: '3.13.0' - compile group: 'log4j', name: 'log4j', version: '1.2.17' - compile group: 'org.slf4j', name: 'slf4j-log4j12', version: '1.7.26' + compile group: 'com.google.protobuf', name: 'protobuf-java', version: '3.21.2' + compile group: 'org.apache.logging.log4j', name: 'log4j-api', version: '2.16.0' + compile group: 'org.apache.logging.log4j', name: 'log4j-core', version: '2.16.0' + compile group: 'org.apache.logging.log4j', name: 'log4j-slf4j-impl', version: '2.16.0' protobuf fileTree("../proto/") testCompile group: 'junit', name: 'junit', version: '4.12' testCompile group: 'org.springframework.boot', name: 'spring-boot-starter-test' testCompile group: 'org.assertj', name: 'assertj-core', version: '3.8.0' - testCompile group: 'com.opentable.components', name: 'otj-pg-embedded', version: '0.13.3' + testCompile group: 'io.zonky.test', name: 'embedded-postgres', version: '1.3.1' testCompile group: 'org.flywaydb', name: 'flyway-core', version: '5.2.0' + + // Use newer version of Postgres for tests: https://github.com/zonkyio/embedded-postgres/issues/78 + implementation enforcedPlatform('io.zonky.test.postgres:embedded-postgres-binaries-bom:11.13.0') } compileJava { @@ -78,12 +85,12 @@ compileTestJava { protobuf { protoc { // The protoc compiler - artifact = 'com.google.protobuf:protoc:3.13.0' + artifact = 'com.google.protobuf:protoc:3.21.2' } plugins { grpc { // Generate gRPC stubs. - artifact = 'io.grpc:protoc-gen-grpc-java:1.7.0' + artifact = 'io.grpc:protoc-gen-grpc-java:1.47.0' } } generateProtoTasks { @@ -130,12 +137,7 @@ jacocoTestReport { fileTree(dir: it, exclude: [ // Exclude proto files' generated Java code. - 'com/imageworks/spcue/grpc/**', - // Exclude Oracle classes; our standard test environment isn't currently - // able to run Oracle unit tests, so coverage of those files will always - // read as 0% even though tests do exist. - 'com/imageworks/spcue/dao/oracle/**', - 'com/imageworks/spcue/dao/criteria/oracle/**',]) + 'com/imageworks/spcue/grpc/**']) })) } } @@ -146,11 +148,6 @@ sonarqube { property "sonar.organization", "academysoftwarefoundation" property "sonar.projectKey", "AcademySoftwareFoundation_OpenCue_Cuebot" property "sonar.projectName", "OpenCue Cuebot" - // SonarCloud will pick up the JaCoCo report automatically, but has its own options - // for excluding files. We don't need to exclude generated code here as it isn't - // checked into the repository, so SonarCloud doesn't even know it exists. - property "sonar.coverage.exclusions", "src/main/java/com/imageworks/spcue/dao/oracle/**," + - "src/main/java/com/imageworks/spcue/dao/criteria/oracle/**" // NOTE: sonar.login must be provided manually, like: // ./gradlew sonarqube -Dsonar.login= @@ -173,3 +170,26 @@ tasks.withType(AbstractArchiveTask) { preserveFileTimestamps = false reproducibleFileOrder = true } + +tasks.withType(Test) { + // Configure logging when running Gradle with --info or --debug. + testLogging { + info { + // Don't show STANDARD_OUT messages, these clutter up the output + // and make it hard to find actual failures. + events TestLogEvent.FAILED + exceptionFormat TestExceptionFormat.FULL + showStandardStreams false + } + debug { + // Show everything. + events TestLogEvent.STARTED, + TestLogEvent.FAILED, + TestLogEvent.PASSED, + TestLogEvent.SKIPPED, + TestLogEvent.STANDARD_ERROR, + TestLogEvent.STANDARD_OUT + exceptionFormat TestExceptionFormat.FULL + } + } +} diff --git a/cuebot/oracle/xe/.gitignore b/cuebot/oracle/xe/.gitignore deleted file mode 100644 index 110868150..000000000 --- a/cuebot/oracle/xe/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/docker_setup \ No newline at end of file diff --git a/cuebot/oracle/xe/README.md b/cuebot/oracle/xe/README.md deleted file mode 100644 index 80cae882b..000000000 --- a/cuebot/oracle/xe/README.md +++ /dev/null @@ -1,43 +0,0 @@ - -NOTE: This is a stop gap only intended to be used until the postgres swap is ready. - - -### Initial Setup -To install and run Oracle XE, you'll need to clone the `docker-images` repo from Oracle. You'll also -need to download the Oracle XE rpm from Oracle and copy it into the `docker_setup` folder. -The following commands detail the steps including cleanup. Please note that the `docker_setup` folder is in the -`.gitignore`. - -``` -mkdir docker_setup -cd docker_setup -git clone https://github.com/oracle/docker-images.git -cp docker-images/OracleDatabase/SingleInstance/dockerfiles/11.2.0.2/* ./ -cp ../oracle-xe-11.2.0-1.0.x86_64.rpm.zip ./ -rm -rf docker-images -``` - -### To Build -Oracle requires more shared mem than what Docker is provided by default, make sure to include the "--shm-size" flag. -`docker build --shm-size=1G -t oracle-xe Dockerfile.xe .` - -### To Run -Set the password to whatever is the password for the application. -`docker run --shm-size=1G -p 1521:1521 -e ORACLE_PWD= oracle-xe` - -### To Connect -Oracle SID: `XE` -Port: `1521` -Use `sys as sysdba` user with provided password -Failing to set a password in the docker run command will generate a random password - -### Script to build -The above steps can all be accomplished by running the `./run_db_container.sh` script. -It requires two arguments, the system password for the database and a password to create for the cue user. - -### To Run: -`export PROJECT_ID='YOUR GCP PROJECT ID'` -`./run_db_container.sh [--build-prod]` - -### Populating the Schema -Using the `--build-prod` will apply the db schema from `src/main/resources/conf/ddl/oracle/schema.sql`. diff --git a/cuebot/oracle/xe/apply_schema.py b/cuebot/oracle/xe/apply_schema.py deleted file mode 100644 index a6e2cbe20..000000000 --- a/cuebot/oracle/xe/apply_schema.py +++ /dev/null @@ -1,40 +0,0 @@ - -import argparse -import cx_Oracle - - -SPLITTER_KEY = '-- SPLIT HERE!' - - -def get_statements(sql_file): - with open(sql_file) as file_handle: - statement = '' - for line in file_handle: - if line.startswith(SPLITTER_KEY): - yield statement - statement = '' - else: - statement += line - - -def main(user, pwd, sql_file, sql_data_file=None): - print "CONNECTING: {} {} {}".format(user, pwd, sql_file) - connection = cx_Oracle.connect(user, pwd) - cursor = connection.cursor() - for statement in get_statements(sql_file): - cursor.execute(statement) - if sql_data_file: - print 'APPLYING DATA FILE: {}'.format(sql_data_file) - for statement in get_statements(sql_data_file): - cursor.execute(statement) - cursor.close() - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('-u', '--user', help='db user', default='cue') - parser.add_argument('-p', '--pwd', help='db password to connect to cue user') - parser.add_argument('-s', '--sql', help='path to SQL schema file') - parser.add_argument('-d', '--sql-data', help='path to SQL file with inital data to populate') - args = parser.parse_args() - main(args.user, args.pwd, args.sql, args.sql_data) diff --git a/cuebot/oracle/xe/apply_schema.sh b/cuebot/oracle/xe/apply_schema.sh deleted file mode 100755 index 1a088480a..000000000 --- a/cuebot/oracle/xe/apply_schema.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash - -echo "Applying Database schema..." - -# Install pip -curl "https://bootstrap.pypa.io/get-pip.py" -o "/tmp/get-pip.py" -python /tmp/get-pip.py -rm /tmp/get-pip.py - -# install cx_Oracle -pip install cx_Oracle - -su oracle -c "LD_LIBRARY_PATH=/u01/app/oracle/product/11.2.0/xe/lib/ bash -c \"python /tmp/apply_schema.py -p $1 -u $2 -s $3 -d $4\"" -echo "Finished applying schema." diff --git a/cuebot/oracle/xe/build_oracle_base_image.sh b/cuebot/oracle/xe/build_oracle_base_image.sh deleted file mode 100644 index 507c0659d..000000000 --- a/cuebot/oracle/xe/build_oracle_base_image.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/sh - -set -e - -timestamp=$(date +%Y%m%d%H%M%S) - - -INSTANCE_NAME="oracle-build-$timestamp" -ZONE="us-central1-c" -SSH_OPTS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" -IMAGE_NAME="oracle-base-$timestamp" - -echo "Starting instance $INSTANCE_NAME..." - -gcloud --project $PROJECT_ID compute instances create $INSTANCE_NAME \ - --machine-type=n1-standard-2 --network=default --zone=$ZONE \ - --image-project=eip-images --image-family=centos-7-drawfork - -INSTANCE_IP=$(gcloud --project $PROJECT_ID compute instances describe $INSTANCE_NAME \ - --zone $ZONE --format="value(networkInterfaces[0].accessConfigs[0].natIP)") - -echo "Waiting for instance to become available..." - -sleep 60 - -ssh $SSH_OPTS $INSTANCE_IP sudo yum install -y yum-utils device-mapper-persistent-data lvm2 -ssh $SSH_OPTS $INSTANCE_IP sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo -ssh $SSH_OPTS $INSTANCE_IP sudo yum install -y docker-ce - -script_path=$(dirname $0)/setupDB.sh -script_dest="/etc/opencue/" -ssh $SSH_OPTS $INSTANCE_IP sudo mkdir $script_dest -ssh $SSH_OPTS $INSTANCE_IP sudo chmod 777 $script_dest -scp $script_path $INSTANCE_IP:$script_dest - -gcloud --project $PROJECT_ID --quiet compute instances delete $INSTANCE_NAME \ - --zone=$ZONE --keep-disks=boot -gcloud --project $PROJECT_ID compute images create $IMAGE_NAME \ - --family=cue-oracle-base --source-disk=$INSTANCE_NAME --source-disk-zone=$ZONE - -echo "Image $IMAGE_NAME created." - diff --git a/cuebot/oracle/xe/run_db_container.sh b/cuebot/oracle/xe/run_db_container.sh deleted file mode 100755 index fd307bd02..000000000 --- a/cuebot/oracle/xe/run_db_container.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/sh - -set -e - -cd $(dirname $0) - -DEP_BUCKET="GCS BUCKET NAME" # Where the oracle rpm was copied. - -XE_DIRECTORY=`pwd` -CUEBOT_ROOT_DIRECTORY=$(dirname $(dirname $XE_DIRECTORY)) -ORACLE_RPM="oracle-xe-11.2.0-1.0.x86_64.rpm.zip" -ORACLE_DOCKER_REPO="https://github.com/oracle/docker-images.git" -DOCKER_NAME="oracle-xe" -ORACLE_SQL_FILE='/tmp/oracle_ddl/schema.sql' -ORACLE_SQL_DATA_FILE='/tmp/oracle_ddl/demo_data.sql' -CUE_DB_USER='CUE' - - -if (( $# < 2 )); then - echo "Please pass a password as the first argument!" - echo "Usage:" - echo " ./run_db_container.sh sys_db_password cue_db_password [--build-prod]" - exit 1 -fi - -if [ ! -d "./docker_setup" ]; then - mkdir docker_setup - cd docker_setup - gsutil -m cp "${DEP_BUCKET}/${ORACLE_RPM}" ./ - git clone "${ORACLE_DOCKER_REPO}" - cp docker-images/OracleDatabase/SingleInstance/dockerfiles/11.2.0.2/* ./ - rm -rf docker-images -else - cd docker_setup -fi - -echo "Attempting to stop any running docker images" -docker stop "${DOCKER_NAME}" || : -docker rm "${DOCKER_NAME}" || : -echo "Building new docker container" -docker build --shm-size=4g -t "${DOCKER_NAME}" -f Dockerfile.xe . -echo "Running new docker container" -docker run -itd --shm-size=1g --name "${DOCKER_NAME}" -p 1521:1521 -p 8080:8080 -e ORACLE_PWD=$1 "${DOCKER_NAME}" - -echo "Waiting for DB to come up..." -sleep 90 - -echo "Configuring DB..." -docker cp ../setup_db.sh oracle-xe:/tmp/setup_db.sh -docker exec oracle-xe /bin/bash -c "/tmp/setup_db.sh $CUE_DB_USER $2" - - -if [ "$3" = "--build-prod" ]; then - echo "Applying Schema..." - docker exec oracle-xe /bin/bash -c "mkdir $(dirname $ORACLE_SQL_FILE)" - docker cp ${CUEBOT_ROOT_DIRECTORY}/src/main/resources/conf/ddl/oracle/schema.sql oracle-xe:$ORACLE_SQL_FILE - docker cp ${CUEBOT_ROOT_DIRECTORY}/src/test/resources/conf/ddl/oracle/demo_data.sql oracle-xe:$ORACLE_SQL_DATA_FILE - docker cp ${CUEBOT_ROOT_DIRECTORY}/oracle/xe/apply_schema.sh oracle-xe:/tmp/ - docker cp ${CUEBOT_ROOT_DIRECTORY}/oracle/xe/apply_schema.py oracle-xe:/tmp/ - docker exec oracle-xe /bin/bash -c "/tmp/apply_schema.sh $2 $CUE_DB_USER $ORACLE_SQL_FILE $ORACLE_SQL_DATA_FILE" -fi diff --git a/cuebot/oracle/xe/setup_db.sh b/cuebot/oracle/xe/setup_db.sh deleted file mode 100755 index 7b6803c9d..000000000 --- a/cuebot/oracle/xe/setup_db.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -echo "Setting up Database settings..." -echo "CREATING USER: $1 - $2" -results=`su -p oracle -c "sqlplus system/$1 as sysdba" << EOF - alter system set processes=300 scope=spfile; - alter system reset sessions scope=spfile sid='*'; - alter system set local_listener='(DESCRIPTION=(ADDRESS=(PROTOCOL=IPC)(KEY=EXTPROC_FOR_XE)))' SCOPE=BOTH; - CREATE USER $1 IDENTIFIED BY $2; - GRANT CONNECT, RESOURCE, DBA TO $1; - shutdown immediate; - startup; - exit; -EOF` -echo $results -echo "Finished configuring database." diff --git a/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java b/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java index 69c448b11..31ebeb12e 100644 --- a/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java +++ b/cuebot/src/main/java/com/imageworks/common/spring/remoting/CueServerInterceptor.java @@ -7,13 +7,14 @@ import io.grpc.ServerCallHandler; import io.grpc.ServerInterceptor; import io.grpc.Status; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class CueServerInterceptor implements ServerInterceptor { - private static final Logger logger = Logger.getLogger(CueServerInterceptor.class); - private static final Logger accessLogger = Logger.getLogger("API"); + private static final Logger logger = LogManager.getLogger(CueServerInterceptor.class); + private static final Logger accessLogger = LogManager.getLogger("API"); @Override public ServerCall.Listener interceptCall( diff --git a/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java b/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java index 37c1c408d..a5038f82c 100644 --- a/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java +++ b/cuebot/src/main/java/com/imageworks/common/spring/remoting/GrpcServer.java @@ -6,7 +6,8 @@ import io.grpc.Server; import io.grpc.ServerBuilder; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.BeansException; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; @@ -40,7 +41,7 @@ public class GrpcServer implements ApplicationContextAware { - private static final Logger logger = Logger.getLogger(GrpcServer.class); + private static final Logger logger = LogManager.getLogger(GrpcServer.class); private static final String DEFAULT_NAME = "CueGrpcServer"; private static final String DEFAULT_PORT = "8443"; diff --git a/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java b/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java index cfa60ed60..2c9d213a9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/BuildableJob.java @@ -37,6 +37,12 @@ public class BuildableJob { */ public JobDetail detail; + /** + * Maximum CPU cores and GPU units overrides. + */ + public Integer maxCoresOverride = null; + public Integer maxGpusOverride = null; + /** * List of layers */ @@ -70,12 +76,12 @@ public void addBuildableLayer(BuildableLayer layer) { } /** - * Add a key/value pair environement var to job + * Add a key/value pair environment var to job * * @param key * @param value */ - public void addEnvironementVariable(String key, String value) { + public void addEnvironmentVariable(String key, String value) { env.put(key, value); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java b/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java index a5747e558..6ef64080c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java +++ b/cuebot/src/main/java/com/imageworks/spcue/CuebotApplication.java @@ -16,18 +16,49 @@ */ - package com.imageworks.spcue; +import java.util.Arrays; +import java.util.Optional; +import java.util.stream.Stream; + +import org.apache.commons.lang.StringUtils; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; @SpringBootApplication public class CuebotApplication extends SpringApplication { + private static String[] checkArgs(String[] args) { + Optional deprecatedFlag = Arrays.stream(args) + .filter(arg -> arg.startsWith("--log.frame-log-root=")).findFirst(); + if (deprecatedFlag.isPresent()) { + // Log a deprecation warning. + Logger warning_logger = LogManager.getLogger(CuebotApplication.class); + warning_logger.warn("`--log.frame-log-root` is deprecated and will be removed in an " + + "upcoming release. It has been replaced with `--log.frame-log-root.default_os`. " + + "See opencue.properties for details on OpenCue's new OS-dependent root directories."); + // If new flags are not present, swap in the value provided using the new flag. + // If the new flags are already present, don't do anything. + Optional newFlags = Arrays.stream(args) + .filter(arg -> arg.startsWith("--log.frame-log-root.")).findAny(); + if (!newFlags.isPresent()) { + String fixedFlag = "--log.frame-log-root.default_os=" + + StringUtils.substringAfter(deprecatedFlag.get(), "="); + args = Stream.concat( + Arrays.stream(args).filter(arg -> !arg.startsWith("--log.frame-log-root=")), + Stream.of(fixedFlag)) + .toArray(String[]::new); + } + } + return args; + } public static void main(String[] args) { // Cuebot startup - SpringApplication.run(CuebotApplication.class, args); + String[] filteredArgs = checkArgs(args); + SpringApplication.run(CuebotApplication.class, filteredArgs); } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java b/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java index 51d252352..fe518ef20 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DeedEntity.java @@ -25,20 +25,6 @@ public class DeedEntity extends Entity { public String host; public String show; - /** - * The owner can set a black out time for booking where Cue will not - * automatically book the cores, even if NIMBY locked. - * - * This is measured in seconds past midnight. - */ - public int blackoutStart = 0; - public int blackoutStop = 0; - - /** - * Quickly disable and enable the current black out time settings. - */ - public boolean isBlackoutEnabled = false; - public String getName() { return String.format("%s.%s", owner, host); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java b/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java index db946b90c..781401165 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DispatchFrame.java @@ -43,7 +43,9 @@ public class DispatchFrame extends FrameEntity implements FrameInterface { public int maxCores; public boolean threadable; public long minMemory; - public long minGpu; + public int minGpus; + public int maxGpus; + public long minGpuMemory; public String services; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java b/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java index e1b3cc8f2..495d0a9b1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/DispatchHost.java @@ -35,13 +35,16 @@ public class DispatchHost extends Entity public int cores; public int idleCores; + public int gpus; + public int idleGpus; + // Basically an 0 = auto, 1 = all. public int threadMode; public long memory; public long idleMemory; - public long gpu; - public long idleGpu; + public long gpuMemory; + public long idleGpuMemory; public String tags; public String os; @@ -53,11 +56,13 @@ public class DispatchHost extends Entity * booked to this host. */ public int strandedCores = 0; + public int strandedGpus = 0; // To reserve resources for future gpu job long idleMemoryOrig = 0; int idleCoresOrig = 0; - long idleGpuOrig = 0; + long idleGpuMemoryOrig = 0; + int idleGpusOrig = 0; public String getHostId() { return id; @@ -72,7 +77,7 @@ public String getFacilityId() { } @Override - public boolean hasAdditionalResources(int minCores, long minMemory, long minGpu) { + public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, long minGpuMemory) { if (idleCores < minCores) { return false; @@ -80,7 +85,10 @@ public boolean hasAdditionalResources(int minCores, long minMemory, long minGpu) else if (idleMemory < minMemory) { return false; } - else if (idleGpu < minGpu) { + else if (idleGpus < minGpus) { + return false; + } + else if (idleGpuMemory < minGpuMemory) { return false; } @@ -88,10 +96,11 @@ else if (idleGpu < minGpu) { } @Override - public void useResources(int coreUnits, long memory, long gpu) { + public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { idleCores = idleCores - coreUnits; idleMemory = idleMemory - memory; - idleGpu = idleGpu - gpu; + idleGpus = idleGpus - gpuUnits; + idleGpuMemory = idleGpuMemory - gpuMemory; } /** @@ -99,14 +108,16 @@ public void useResources(int coreUnits, long memory, long gpu) { * */ public void removeGpu() { - if (idleGpu > 0 && idleGpuOrig == 0) { + if (idleGpuMemory > 0 && idleGpuMemoryOrig == 0) { idleMemoryOrig = idleMemory; idleCoresOrig = idleCores; - idleGpuOrig = idleGpu; + idleGpuMemoryOrig = idleGpuMemory; + idleGpusOrig = idleGpus; idleMemory = idleMemory - Math.min(CueUtil.GB4, idleMemory); idleCores = idleCores - Math.min(100, idleCores); - idleGpu = 0; + idleGpuMemory = idleGpuMemory - Math.min(CueUtil.GB4, idleGpuMemory); + idleGpus = idleGpus - Math.min(1, idleGpus); } } @@ -115,14 +126,16 @@ public void removeGpu() { * */ public void restoreGpu() { - if (idleGpuOrig > 0) { + if (idleGpuMemoryOrig > 0) { idleMemory = idleMemoryOrig; idleCores = idleCoresOrig; - idleGpu = idleGpuOrig; + idleGpuMemory = idleGpuMemoryOrig; + idleGpus = idleGpusOrig; idleMemoryOrig = 0; idleCoresOrig = 0; - idleGpuOrig = 0; + idleGpuMemoryOrig = 0; + idleGpusOrig = 0; } } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java b/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java index a13529ad8..afe85121a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ExecutionSummary.java @@ -28,6 +28,9 @@ public class ExecutionSummary { public long coreTime; public long coreTimeSuccess; public long coreTimeFail; + public long gpuTime; + public long gpuTimeSuccess; + public long gpuTimeFail; public long highMemoryKb; public long getHighMemoryKb() { @@ -69,5 +72,29 @@ public long getCoreTimeFail() { public void setCoreTimeFail(long coreTimeFail) { this.coreTimeFail = coreTimeFail; } + + public long getGpuTime() { + return gpuTime; + } + + public void setGpuTime(long gpuTime) { + this.gpuTime = gpuTime; + } + + public long getGpuTimeSuccess() { + return gpuTimeSuccess; + } + + public void setGpuTimeSuccess(long gpuTimeSuccess) { + this.gpuTimeSuccess = gpuTimeSuccess; + } + + public long getGpuTimeFail() { + return gpuTimeFail; + } + + public void setGpuTimeFail(long gpuTimeFail) { + this.gpuTimeFail = gpuTimeFail; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java b/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java index 60c07a030..891523805 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameDetail.java @@ -30,7 +30,7 @@ public class FrameDetail extends FrameEntity implements FrameInterface { public int dependCount; public int retryCount; public int exitStatus; - public int maxRss; + public long maxRss; public int dispatchOrder; public String lastResource; diff --git a/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java b/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java index eff22768f..945685444 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java +++ b/cuebot/src/main/java/com/imageworks/spcue/FrameInterface.java @@ -1,4 +1,3 @@ - /* * Copyright Contributors to the OpenCue Project * diff --git a/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java b/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java index b67a53b77..cd9f8a998 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/GroupDetail.java @@ -23,11 +23,16 @@ public class GroupDetail extends Entity implements GroupInterface, DepartmentInt public int jobMinCores = -1; public int jobMaxCores = -1; + public int jobMinGpus = -1; + public int jobMaxGpus = -1; public int jobPriority = -1; public int minCores = -1; public int maxCores = -1; + public int minGpus = -1; + public int maxGpus = -1; + public String parentId = null; public String showId; public String deptId; diff --git a/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java b/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java index 96defaf61..5a019e8f1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/HostEntity.java @@ -36,10 +36,12 @@ public class HostEntity extends Entity implements HostInterface { public int procs; public int cores; public int idleCores; - public int memory; - public int idleMemory; - public int gpu; - public int idleGpu; + public long memory; + public long idleMemory; + public int gpus; + public int idleGpus; + public long gpuMemory; + public long idleGpuMemory; public boolean unlockAtBoot; @@ -57,10 +59,12 @@ public HostEntity(Host grpcHost) { this.nimbyEnabled = grpcHost.getNimbyEnabled(); this.cores = (int) grpcHost.getCores(); this.idleCores = (int) grpcHost.getIdleCores(); - this.memory = (int) grpcHost.getMemory(); - this.idleMemory = (int) grpcHost.getIdleMemory(); - this.gpu = (int) grpcHost.getGpu(); - this.idleGpu = (int) grpcHost.getIdleGpu(); + this.memory = grpcHost.getMemory(); + this.idleMemory = grpcHost.getIdleMemory(); + this.gpus = (int) grpcHost.getGpus(); + this.idleGpus = (int) grpcHost.getIdleGpus(); + this.gpuMemory = grpcHost.getGpuMemory(); + this.idleGpuMemory = grpcHost.getIdleGpuMemory(); } public String getHostId() { diff --git a/cuebot/src/main/java/com/imageworks/spcue/Inherit.java b/cuebot/src/main/java/com/imageworks/spcue/Inherit.java index 73651c33d..1fdb23336 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/Inherit.java +++ b/cuebot/src/main/java/com/imageworks/spcue/Inherit.java @@ -28,6 +28,8 @@ public enum Inherit { Priority, MinCores, MaxCores, + MinGpus, + MaxGpus, All } diff --git a/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java b/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java index 29286ffe3..dad6f8a6d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/JobDetail.java @@ -46,12 +46,15 @@ public class JobDetail extends JobEntity implements JobInterface, DepartmentInte public int priority = 1; public int minCoreUnits = 100; public int maxCoreUnits = 200000; + public int minGpuUnits = 0; + public int maxGpuUnits = 1000; public boolean isLocal = false; public String localHostName; public int localMaxCores; - public int localMaxMemory; + public long localMaxMemory; public int localThreadNumber; - public int localMaxGpu; + public int localMaxGpus; + public long localMaxGpuMemory; public String getDepartmentId() { return deptId; diff --git a/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java b/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java index 3b473f8c1..565995d9d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LayerDetail.java @@ -32,9 +32,11 @@ public class LayerDetail extends LayerEntity implements LayerInterface { public LayerType type; public int minimumCores; public int maximumCores; + public int minimumGpus; + public int maximumGpus; public boolean isThreadable; public long minimumMemory; - public long minimumGpu; + public long minimumGpuMemory; public int chunkSize; public int timeout; public int timeout_llu; @@ -116,12 +118,20 @@ public void setMinimumMemory(long minimumMemory) { this.minimumMemory = minimumMemory; } - public long getMinimumGpu() { - return minimumGpu; + public int getMinimumGpus() { + return minimumGpus; } - public void setMinimumGpu(long minimumGpu) { - this.minimumGpu = minimumGpu; + public void setMinimumGpus(int minimumGpus) { + this.minimumGpus = minimumGpus; + } + + public long getMinimumGpuMemory() { + return minimumGpuMemory; + } + + public void setMinimumGpuMemory(long minimumGpuMemory) { + this.minimumGpuMemory = minimumGpuMemory; } public int getChunkSize() { diff --git a/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java b/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java index cc6287253..3e073fa73 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java +++ b/cuebot/src/main/java/com/imageworks/spcue/LocalHostAssignment.java @@ -35,11 +35,13 @@ public class LocalHostAssignment extends Entity private int idleCoreUnits; private long idleMemory; - private long idleGpu; + private int idleGpuUnits; + private long idleGpuMemory; private long maxMemory; - private long maxGpu; + private long maxGpuMemory; private int maxCoreUnits; + private int maxGpuUnits; private int threads; @@ -52,15 +54,16 @@ public class LocalHostAssignment extends Entity public LocalHostAssignment() { } - public LocalHostAssignment(int maxCores, int threads, long maxMemory, long maxGpu) { + public LocalHostAssignment(int maxCores, int threads, long maxMemory, int maxGpus, long maxGpuMemory) { this.maxCoreUnits = maxCores; this.threads = threads; this.maxMemory = maxMemory; - this.maxGpu = maxGpu; + this.maxGpuUnits = maxGpus; + this.maxGpuMemory = maxGpuMemory; } @Override - public boolean hasAdditionalResources(int minCores, long minMemory, long minGpu) { + public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, long minGpuMemory) { if (idleCoreUnits < minCores) { return false; @@ -68,7 +71,10 @@ public boolean hasAdditionalResources(int minCores, long minMemory, long minGpu) else if (idleMemory < minMemory) { return false; } - else if (idleGpu < minGpu) { + else if (idleGpuUnits < minGpus) { + return false; + } + else if (idleGpuMemory < minGpuMemory) { return false; } @@ -76,10 +82,11 @@ else if (idleGpu < minGpu) { } @Override - public void useResources(int coreUnits, long memory, long gpu) { + public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory) { idleCoreUnits = idleCoreUnits - coreUnits; idleMemory = idleMemory - memory; - idleGpu = idleGpu - gpu; + idleGpuUnits = idleGpuUnits - gpuUnits; + idleGpuMemory = idleGpuMemory - gpuMemory; } public int getThreads() { @@ -110,16 +117,24 @@ public long getIdleMemory() { return this.idleMemory; } - public long getMaxGpu() { - return maxGpu; + public int getMaxGpuUnits() { + return maxGpuUnits; + } + + public void setMaxGpuUnits(int maxGpuUnits) { + this.maxGpuUnits = maxGpuUnits; + } + + public long getMaxGpuMemory() { + return maxGpuMemory; } - public void setMaxGpu(long maxGpu) { - this.maxGpu = maxGpu; + public void setMaxGpuMemory(long maxGpuMemory) { + this.maxGpuMemory = maxGpuMemory; } - public long getIdleGpu() { - return this.idleGpu; + public long getIdleGpuMemory() { + return this.idleGpuMemory; } public int getIdleCoreUnits() { @@ -134,8 +149,16 @@ public void setIdleMemory(long idleMemory) { this.idleMemory = idleMemory; } - public void setIdleGpu(long idleGpu) { - this.idleGpu = idleGpu; + public int getIdleGpuUnits() { + return this.idleGpuUnits; + } + + public void setIdleGpuUnits(int idleGpuUnits) { + this.idleGpuUnits = idleGpuUnits; + } + + public void setIdleGpuMemory(long idleGpuMemory) { + this.idleGpuMemory = idleGpuMemory; } public String getHostId() { diff --git a/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java b/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java index aae8921e4..b45af0838 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ResourceUsage.java @@ -25,9 +25,10 @@ public class ResourceUsage { private final long coreTimeSeconds; + private final long gpuTimeSeconds; private final long clockTimeSeconds; - public ResourceUsage(long clockTime, int corePoints) { + public ResourceUsage(long clockTime, int corePoints, int gpuPoints) { if (clockTime < 1) { clockTime = 1; @@ -38,14 +39,21 @@ public ResourceUsage(long clockTime, int corePoints) { coreTime = 1; } + long gpuTime = clockTime * gpuPoints; + clockTimeSeconds = clockTime; coreTimeSeconds = coreTime; + gpuTimeSeconds = gpuTime; } public long getCoreTimeSeconds() { return coreTimeSeconds; } + public long getGpuTimeSeconds() { + return gpuTimeSeconds; + } + public long getClockTimeSeconds() { return clockTimeSeconds; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java index 373877e69..0bb47c02c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ServiceEntity.java @@ -40,6 +40,17 @@ public class ServiceEntity extends Entity { */ public int maxCores = 0; + /** + * Determines the default minimum gpus per frame. + */ + public int minGpus = 0; + + /** + * Determines the default minimum gpus per frame. 0 indicates + * the feature is disabled. + */ + public int maxGpus = 0; + /** * Determines the default minimum memory per frame. */ @@ -48,7 +59,7 @@ public class ServiceEntity extends Entity { /** * Determines the default minimum gpu per frame. */ - public long minGpu = Dispatcher.GPU_RESERVED_DEFAULT; + public long minGpuMemory = Dispatcher.MEM_GPU_RESERVED_DEFAULT; /** * Determines the default tags. @@ -59,5 +70,7 @@ public class ServiceEntity extends Entity { public int timeout_llu = 0; + public long minMemoryIncrease = Dispatcher.MINIMUM_MEMORY_INCREASE; + } diff --git a/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java b/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java index 8a4d768af..1d2f675e1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java +++ b/cuebot/src/main/java/com/imageworks/spcue/ShowEntity.java @@ -25,6 +25,8 @@ public class ShowEntity extends Entity implements ShowInterface { public boolean paused; public int defaultMinCores; public int defaultMaxCores; + public int defaultMinGpus; + public int defaultMaxGpus; public String[] commentMail; public String getShowId() { diff --git a/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java b/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java index 855b95857..f13fbaae2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java +++ b/cuebot/src/main/java/com/imageworks/spcue/SortableShow.java @@ -24,11 +24,12 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; public class SortableShow implements Comparable { - private static final Logger logger = Logger.getLogger(SortableShow.class); + private static final Logger logger = LogManager.getLogger(SortableShow.class); private String show; private float tier; @@ -77,7 +78,9 @@ public boolean isSkipped(AllocationInterface a) { } public void skip(String tags, long cores, long memory) { - failed.put(tags, new long[] { cores, memory}); + if (tags != null) { + failed.put(tags, new long[] { cores, memory}); + } } /** diff --git a/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java b/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java index 4316b708d..a9032eb43 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/VirtualProc.java @@ -29,6 +29,7 @@ public class VirtualProc extends FrameEntity implements ProcInterface { public String frameId; public String hostName; public String os; + public byte[] childProcesses; public int coresReserved; public long memoryReserved; @@ -36,7 +37,11 @@ public class VirtualProc extends FrameEntity implements ProcInterface { public long memoryMax; public long virtualMemoryUsed; public long virtualMemoryMax; - public long gpuReserved; + + public int gpusReserved; + public long gpuMemoryReserved; + public long gpuMemoryUsed; + public long gpuMemoryMax; public boolean unbooked; public boolean usageRecorded = false; @@ -91,8 +96,8 @@ public static final VirtualProc build(DispatchHost host, DispatchFrame frame) { proc.coresReserved = frame.minCores; proc.memoryReserved = frame.minMemory; - // This reserves all the gpu memory on a host for one frame - proc.gpuReserved = (frame.minGpu > 0) ? host.idleGpu : 0; + proc.gpusReserved = frame.minGpus; + proc.gpuMemoryReserved = frame.minGpuMemory; /* * Frames that are announcing cores less than 100 are not multi-threaded @@ -208,7 +213,8 @@ public static final VirtualProc build(DispatchHost host, proc.coresReserved = lja.getThreads() * 100; proc.memoryReserved = frame.minMemory; - proc.gpuReserved = frame.minGpu; + proc.gpusReserved = frame.minGpus; + proc.gpuMemoryReserved = frame.minGpuMemory; int wholeCores = (int) (Math.floor(host.idleCores / 100.0)); if (wholeCores == 0) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java b/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java index 6aea9b48f..adf6e0368 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java +++ b/cuebot/src/main/java/com/imageworks/spcue/config/AppConfig.java @@ -40,17 +40,11 @@ "classpath:conf/spring/applicationContext-grpcServer.xml", "classpath:conf/spring/applicationContext-service.xml", "classpath:conf/spring/applicationContext-jms.xml", - "classpath:conf/spring/applicationContext-trackit.xml", "classpath:conf/spring/applicationContext-criteria.xml"}) @EnableConfigurationProperties @PropertySource({"classpath:opencue.properties"}) public class AppConfig { - @Configuration - @Conditional(OracleDatabaseCondition.class) - @ImportResource({"classpath:conf/spring/applicationContext-dao-oracle.xml"}) - static class OracleEngineConfig {} - @Configuration @Conditional(PostgresDatabaseCondition.class) @ImportResource({"classpath:conf/spring/applicationContext-dao-postgres.xml"}) @@ -63,12 +57,6 @@ public DataSource cueDataSource() { return DataSourceBuilder.create().build(); } - @Bean - @ConfigurationProperties(prefix="datasource.trackit-data-source") - public DataSource trackitDataSource() { - return DataSourceBuilder.create().build(); - } - @Bean public ServletRegistrationBean jobLaunchServlet() { ServletRegistrationBean b = new ServletRegistrationBean<>(); @@ -77,6 +65,5 @@ public ServletRegistrationBean jobLaunchServlet() { b.setServlet(new JobLaunchServlet()); return b; } - } diff --git a/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java b/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java index 71a059305..0ae36a296 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java +++ b/cuebot/src/main/java/com/imageworks/spcue/config/DatabaseEngine.java @@ -1,14 +1,9 @@ package com.imageworks.spcue.config; public enum DatabaseEngine { - ORACLE, POSTGRES; public static DatabaseEngine fromEnv() { - String envValue = System.getenv("CUEBOT_DB_ENGINE"); - if (envValue == null) { - return POSTGRES; - } - return DatabaseEngine.valueOf(envValue.toUpperCase()); + return POSTGRES; } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/config/OracleDatabaseCondition.java b/cuebot/src/main/java/com/imageworks/spcue/config/OracleDatabaseCondition.java deleted file mode 100644 index 7739ec885..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/config/OracleDatabaseCondition.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.imageworks.spcue.config; - -import org.springframework.context.annotation.Condition; -import org.springframework.context.annotation.ConditionContext; -import org.springframework.core.type.AnnotatedTypeMetadata; - -public class OracleDatabaseCondition implements Condition { - - @Override - public boolean matches(ConditionContext context, AnnotatedTypeMetadata metadata) { - String dbEngine = System.getenv("CUEBOT_DB_ENGINE"); - if (dbEngine == null) { - return false; - } - DatabaseEngine selectedDatabaseEngine = DatabaseEngine.valueOf(dbEngine.toUpperCase()); - return selectedDatabaseEngine.equals(DatabaseEngine.ORACLE); - } - -} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java index 4b362f000..f3bb09915 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/BookingDao.java @@ -38,6 +38,15 @@ public interface BookingDao { */ boolean updateMaxCores(LocalHostAssignment l, int maxCoreUnits); + /** + * Updates the maximum number of gpus the given local + * host assignment should use. + * + * @param l + * @return + */ + boolean updateMaxGpus(LocalHostAssignment l, int gpus); + /** * Updates the maximum amount of memory a given local host * assignment should use. @@ -54,7 +63,7 @@ public interface BookingDao { * @param l * @return */ - boolean updateMaxGpu(LocalHostAssignment l, long maxGpu); + boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory); /** * Create a new LocalHostAssignment attached to the given job. @@ -126,14 +135,6 @@ void insertLocalHostAssignment(HostInterface host, FrameInterface frame, */ boolean hasActiveLocalJob(HostInterface host); - /** - * Return true if the host is in blackout time. - * - * @param h - * @return - */ - boolean isBlackoutTime(HostInterface h); - /** * Delete the given LocalHostAssignment. * @@ -158,6 +159,16 @@ void insertLocalHostAssignment(HostInterface host, FrameInterface frame, */ int getCoreUsageDifference(LocalHostAssignment l, int coreUnits); + /** + * Return the difference between the number of assigned gpus and + * the given gpuUnits. + * + * @param l + * @param gpuUnits + * @return + */ + int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits); + /** * Allocate additional cores from the given host. * @@ -176,6 +187,24 @@ void insertLocalHostAssignment(HostInterface host, FrameInterface frame, */ boolean deallocateCoresFromHost(HostInterface h, int cores); + /** + * Allocate additional gpus from the given host. + * + * @param h + * @param gpus + * @return + */ + boolean allocateGpusFromHost(HostInterface h, int gpus); + + /** + * Deallocate gpu from the given host, returning them to its pool. + * + * @param h + * @param gpus + * @return + */ + boolean deallocateGpusFromHost(HostInterface h, int gpus); + /** * Return true if the Host has a resource deficit. A * deficit can occur if there are more resources in use than the diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java index 5568bb2fd..08bea59b1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/CommentDao.java @@ -23,6 +23,8 @@ import com.imageworks.spcue.HostInterface; import com.imageworks.spcue.JobInterface; +import java.util.List; + public interface CommentDao { /** @@ -32,6 +34,26 @@ public interface CommentDao { */ public void deleteComment(String id); + /** + * Deletes comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return boolean: returns true if one or more comments where deleted + */ + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject); + + /** + * Get comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return List + */ + public List getCommentsByHostUserAndSubject(HostInterface host, String user, String subject); + /** * Retrieves the specified comment. * diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java index 555e6beeb..b02b58222 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DeedDao.java @@ -66,22 +66,6 @@ public interface DeedDao { */ List getDeeds(OwnerEntity owner); - /** - * Enable/Disable the blackout time. - * - * @param value - */ - void updateBlackoutTimeEnabled(DeedEntity deed, boolean value); - - /** - * Set blackout times. During blackout times, machines - * cannot be booked. - * - * @param start - * @param stop - */ - void setBlackoutTime(DeedEntity deed, int startSeconds, int stopSeconds); - /** * * diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java index 8ca8b07e4..ce74224b1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/DispatcherDao.java @@ -82,7 +82,7 @@ public interface DispatcherDao { * @param numJobs * @return */ - Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); + List findDispatchJobsForAllShows(DispatchHost host, int numJobs); /** * Return a list of jobs which could use resources of the specified @@ -92,7 +92,7 @@ public interface DispatcherDao { * @param numJobs * @return */ - Set findDispatchJobs(DispatchHost host, int numJobs); + List findDispatchJobs(DispatchHost host, int numJobs); /** * Return a list of jobs which could use resources of the specified @@ -102,7 +102,7 @@ public interface DispatcherDao { * @param numJobs * @return */ - Set findDispatchJobs(DispatchHost host, GroupInterface g); + List findDispatchJobs(DispatchHost host, GroupInterface g); /** * Finds an under proced job if one exists and returns it, @@ -131,7 +131,7 @@ public interface DispatcherDao { * @param numJobs * @return */ - Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); + List findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); /** * Find a list of local dispatch jobs. @@ -162,6 +162,40 @@ List findNextDispatchFrames(LayerInterface layer, VirtualProc pro */ List findNextDispatchFrames(LayerInterface layer, DispatchHost host, int limit); + + /** + * Return Scheduling Mode selected + * + * @return + */ + SchedulingMode getSchedulingMode(); + + /** + * Set Scheduling Mode. + * + * @param schedulingMode + */ + void setSchedulingMode(SchedulingMode schedulingMode); + + /** + * - PRIORITY_ONLY: Sort by priority only + * - FIFO: Whether or not to enable FIFO scheduling in the same priority. + * - BALANCED: Use a rank formula that takes into account time waiting, and number + * of cores required: rank = priority + (100 * (1 - (job.cores/job.int_min_cores))) + age in days + */ + enum SchedulingMode { + PRIORITY_ONLY, + FIFO, + BALANCED + } + + /** + * Clear bookableShows cache + * + * @return + */ + void clearCache(); } + diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java index e576a0f59..4dbb0e987 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/FrameDao.java @@ -202,6 +202,21 @@ boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatu * @return */ boolean updateFrameCleared(FrameInterface frame); + /** + * Sets a frame exitStatus to EXIT_STATUS_MEMORY_FAILURE + * + * @param frame + * @return whether the frame has been updated + */ + boolean updateFrameMemoryError(FrameInterface frame); + + /** + * Sets a frame to an unreserved waiting state. + * + * @param frame + * @return + */ + boolean updateFrameHostDown(FrameInterface frame); /** * Returns a DispatchFrame object from the frame's uinique ID. @@ -316,20 +331,7 @@ boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatu ResourceUsage getResourceUsage(FrameInterface f); /** - * Update Frame usage values for the given frame. The - * frame must be in the Running state. If the frame - * is locked by another thread, the process is aborted because - * we'll most likely get a new update one minute later. - * - * @param f - * @param lluTime - * @throws FrameReservationException if the frame is locked - * by another thread. - */ - void updateFrameUsage(FrameInterface f, long lluTime); - - /** - * Update memory usage values for the given frame. The + * Update memory usage values and LLU time for the given frame. The * frame must be in the Running state. If the frame * is locked by another thread, the process is aborted because * we'll most likely get a new update one minute later. @@ -337,10 +339,11 @@ boolean updateFrameStopped(FrameInterface frame, FrameState state, int exitStatu * @param f * @param maxRss * @param rss + * @param lluTime * @throws FrameReservationException if the frame is locked * by another thread. */ - void updateFrameMemoryUsage(FrameInterface f, long maxRss, long rss); + void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, long lluTime); /** * Attempt to put a exclusive row lock on the given diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java index 181062df6..87cd950d0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/GroupDao.java @@ -137,6 +137,40 @@ public interface GroupDao { */ public void updateMinCores(GroupInterface group, int value); + + /** + * Sets the maximum number of gpus the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMaxGpus(GroupInterface group, int value); + + /** + * Sets the minimum number of gpus the group should be running. + * + * @param group + * @param value + */ + void updateDefaultJobMinGpus(GroupInterface group, int value); + + /** + * Sets the maximum number of gpus for this group + * + * @param group + * @param value + */ + public void updateMaxGpus(GroupInterface group, int value); + + /** + * Set the minimum number of gpus for this group + * + * @param group + * @param value + */ + + public void updateMinGpus(GroupInterface group, int value); + /** * Renames the group * diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java index 31b7d475d..94ba316b1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/HostDao.java @@ -65,6 +65,11 @@ public interface HostDao { */ void deleteHost(HostInterface host); + /** + * deletes the down state hosts + */ + void deleteDownHosts(); + /** * updates a host with the passed hardware state * @@ -73,6 +78,14 @@ public interface HostDao { */ void updateHostState(HostInterface host, HardwareState state); + /** + * updates a host with the passed free temporary directory + * + * @param host + * @param freeTempDir + */ + void updateHostFreeTempDir(HostInterface host, Long freeTempDir); + /** * returns a full host detail * @@ -239,15 +252,6 @@ public interface HostDao { */ void updateThreadMode(HostInterface host, ThreadMode mode); - /** - * When a host is in kill mode that means its 256MB+ into the swap and the - * the worst memory offender is killed. - * - * @param h HostInterface - * @return boolean - */ - boolean isKillMode(HostInterface h); - /** * Update the specified host's hardware information. * @@ -258,8 +262,8 @@ public interface HostDao { * @param freeSwap long * @param totalMcp long * @param freeMcp long - * @param totalGpu long - * @param freeGpu long + * @param totalGpuMemory long + * @param freeGpuMemory long * @param load int * @param os String */ @@ -267,7 +271,7 @@ void updateHostStats(HostInterface host, long totalMemory, long freeMemory, long totalSwap, long freeSwap, long totalMcp, long freeMcp, - long totalGpu, long freeGpu, + long totalGpuMemory, long freeGpuMemory, int load, Timestamp bootTime, String os); /** @@ -288,6 +292,16 @@ void updateHostStats(HostInterface host, */ int getStrandedCoreUnits(HostInterface h); + /** + * Return the number of whole stranded gpus on this host. The must have + * less than Dispacher.MEM_STRANDED_THRESHHOLD for the gpus to be + * considered stranded. + * + * @param h HostInterface + * @return int + */ + int getStrandedGpus(HostInterface h); + /** * Return true if the host is preferring a particular show. * diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java index 4ffaf2f43..11df75ffe 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/JobDao.java @@ -59,6 +59,24 @@ public interface JobDao { */ public void updateMinCores(GroupInterface g, int cores); + /** + * Updates all jobs in the speficed group to the + * max gpu value. + * + * @param g + * @param gpu + */ + public void updateMaxGpus(GroupInterface g, int gpus); + + /** + * Updates all jobs in the specifid group to the + * min gpu value. + * + * @param g + * @param gpu + */ + public void updateMinGpus(GroupInterface g, int gpus); + /** * Updates all jobs in the specified group to the * set priority. @@ -255,6 +273,31 @@ public interface JobDao { */ boolean isOverMaxCores(JobInterface job, int coreUnits); + /** + * returns true if job is over max gpus + * + * @param job + * @return + */ + boolean isOverMaxGpus(JobInterface job); + + /** + * returns true if job is at its max gpus + * + * @param job + * @return + */ + boolean isAtMaxGpus(JobInterface job); + + /** + * Return true if adding given gpus to the job + * will set the job over its max gpus value. + * + * @param job + * @param gpus + * @return + */ + boolean isOverMaxGpus(JobInterface job, int gpus); /** * sets the jobs new priority value @@ -280,6 +323,22 @@ public interface JobDao { */ void updateMaxCores(JobInterface j, int v); + /** + * sets the jobs new min gpu value + * + * @param j + * @param v + */ + void updateMinGpus(JobInterface j, int v); + + /** + * sets the jobs new max gpu value + * + * @param j + * @param v + */ + void updateMaxGpus(JobInterface j, int v); + /** * Update a job's paused state * diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java index 7843d8e8d..9343c3aa0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/LayerDao.java @@ -124,13 +124,22 @@ public interface LayerDao { LayerInterface findLayer(JobInterface job, String name); /** - * update the number of cores the layer requires + * update the number of min cores the layer requires * * @param layer * @param val */ void updateLayerMinCores(LayerInterface layer, int val); + + /** + * update the number of gpus the layer requires + * + * @param layer + * @param val + */ + void updateLayerMinGpus(LayerInterface layer, int val); + /** * update the amount of memory required by all subsequent * running frames in the specified layer. @@ -147,7 +156,7 @@ public interface LayerDao { * @param layer * @param val */ - void updateLayerMinGpu(LayerInterface layer, long gpu); + void updateLayerMinGpuMemory(LayerInterface layer, long val); /** * Update a layer with new host tags. @@ -167,7 +176,7 @@ public interface LayerDao { void insertLayerEnvironment(LayerInterface layer, String key, String value); /** - * Insert a map key/value pairs into the layer environement + * Insert a map key/value pairs into the layer environment * * @param layer * @param env @@ -207,9 +216,9 @@ public interface LayerDao { * value is larger than the current value * * @param layer - * @param gpu + * @param val */ - void increaseLayerMinGpu(LayerInterface layer, long gpu); + void increaseLayerMinGpuMemory(LayerInterface layer, long val); /** * Tries to find a max RSS value for layer in the specified job. The @@ -256,10 +265,20 @@ public interface LayerDao { * job with the new gpu requirement. * * @param job - * @param gpu + * @param mem * @param type */ - void updateMinGpu(JobInterface job, long gpu, LayerType type); + void updateMinGpuMemory(JobInterface job, long mem, LayerType type); + + /** + * Update all layers of the set type in the specified job + * with the new max cores requirement. + * + * @param job + * @param cores + * @param type + */ + void updateMaxCores(JobInterface job, int cores, LayerType type); /** * Update all layers of the set type in the specified job @@ -271,6 +290,16 @@ public interface LayerDao { */ void updateMinCores(JobInterface job, int cores, LayerType type); + /** + * Update all layers of the set type in the specified job + * with the new min cores requirement. + * + * @param job + * @param gpus + * @param type + */ + void updateMinGpus(JobInterface job, int gpus, LayerType type); + /** * Update a layer's max cores value, which limits how * much threading can go on. @@ -395,6 +424,16 @@ public interface LayerDao { */ void updateLayerMaxCores(LayerInterface layer, int val); + /** + * Set the layer's max gpus value to the given int. The + * max gpu value will not allow the dispatcher to + * book over the given number of gpu. + * + * @param layer + * @param val + */ + void updateLayerMaxGpus(LayerInterface layer, int val); + /** * Add a limit to the given layer. * diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java index 5efdd01d2..206b19e22 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ProcDao.java @@ -54,7 +54,7 @@ public interface ProcDao { * @return */ - long getReservedGpu(ProcInterface proc); + long getReservedGpuMemory(ProcInterface proc); /** * Return the proc that has exceeded its reserved memory by the largest factor. @@ -150,7 +150,8 @@ public interface ProcDao { * @param maxKb */ void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, - long vsize, long maxVsize); + long vsize, long maxVsize, long usedGpuMemory, + long maxUsedGpuMemory, byte[] children); /** * get aq virual proc from its unique id diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java b/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java index 1853662a5..b12c0b097 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/ShowDao.java @@ -81,6 +81,21 @@ public interface ShowDao { */ void updateShowDefaultMaxCores(ShowInterface s, int val); + /** + * + * @param s + * @param val + */ + void updateShowDefaultMinGpus(ShowInterface s, int val); + + /** + * + * @param s + * @param val + */ + void updateShowDefaultMaxGpus(ShowInterface s, int val); + + /** * Disabling this would stop new proc assignement. The show would get no new * procs, but any procs already assigned to a job would continue to @@ -131,5 +146,11 @@ public interface ShowDao { * @param emails */ void updateShowCommentEmail(ShowInterface s, String[] emails); + + /** + * Scheduled task to update shows. Set show as inactive if it has at + * least 1 job in job_history service th + */ + void updateShowsStatus(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java index b2a30b7b0..7a957bdd9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/FrameSearchFactory.java @@ -30,14 +30,7 @@ public class FrameSearchFactory { private DatabaseEngine dbEngine; public FrameSearchInterface create() { - if (dbEngine.equals(DatabaseEngine.POSTGRES)) { - return new FrameSearch(); - } else if (dbEngine.equals(DatabaseEngine.ORACLE)) { - return new com.imageworks.spcue.dao.criteria.oracle.FrameSearch(); - } else { - throw new RuntimeException( - "current database engine is not supported by FrameSearchFactory"); - } + return new FrameSearch(); } public FrameSearchInterface create(List frameIds) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java index a1f7fe6f5..d11093810 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/HostSearchFactory.java @@ -27,15 +27,7 @@ public class HostSearchFactory { private DatabaseEngine dbEngine; public HostSearchInterface create(HostSearchCriteria criteria) { - if (dbEngine.equals(DatabaseEngine.POSTGRES)) { - return new HostSearch(criteria); - } else if (dbEngine.equals(DatabaseEngine.ORACLE)) { - return new com.imageworks.spcue.dao.criteria.oracle.HostSearch(criteria); - } else { - throw new RuntimeException( - "current database engine is not supported by HostSearchFactory"); - } - + return new HostSearch(criteria); } public HostSearchInterface create(AllocationEntity allocEntity) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java index df3b7635c..a9468a098 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/JobSearchFactory.java @@ -26,14 +26,7 @@ public class JobSearchFactory { private DatabaseEngine dbEngine; public JobSearchInterface create() { - if (dbEngine.equals(DatabaseEngine.POSTGRES)) { - return new JobSearch(); - } else if (dbEngine.equals(DatabaseEngine.ORACLE)) { - return new com.imageworks.spcue.dao.criteria.oracle.JobSearch(); - } else { - throw new RuntimeException( - "current database engine is not supported by JobSearchFactory"); - } + return new JobSearch(); } public JobSearchInterface create(JobSearchCriteria criteria) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java index 58ace1be5..d754dce13 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/ProcSearchFactory.java @@ -25,14 +25,7 @@ public class ProcSearchFactory { private DatabaseEngine dbEngine; public ProcSearchInterface create() { - if (dbEngine.equals(DatabaseEngine.POSTGRES)) { - return new ProcSearch(); - } else if (dbEngine.equals(DatabaseEngine.ORACLE)) { - return new com.imageworks.spcue.dao.criteria.oracle.ProcSearch(); - } else { - throw new RuntimeException( - "current database engine is not supported by ProcSearchFactory"); - } + return new ProcSearch(); } public ProcSearchInterface create(ProcSearchCriteria criteria) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/Criteria.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/Criteria.java deleted file mode 100644 index 393848454..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/Criteria.java +++ /dev/null @@ -1,304 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.imageworks.spcue.dao.criteria.oracle; - -import java.security.Timestamp; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.Set; -import java.util.stream.Collectors; - -import com.google.common.collect.ImmutableList; - -import com.imageworks.spcue.dao.criteria.CriteriaInterface; -import com.imageworks.spcue.dao.criteria.Phrase; -import com.imageworks.spcue.dao.criteria.Sort; -import com.imageworks.spcue.grpc.criterion.EqualsFloatSearchCriterion; -import com.imageworks.spcue.grpc.criterion.EqualsIntegerSearchCriterion; -import com.imageworks.spcue.grpc.criterion.GreaterThanFloatSearchCriterion; -import com.imageworks.spcue.grpc.criterion.GreaterThanIntegerSearchCriterion; -import com.imageworks.spcue.grpc.criterion.InRangeFloatSearchCriterion; -import com.imageworks.spcue.grpc.criterion.InRangeIntegerSearchCriterion; -import com.imageworks.spcue.grpc.criterion.LessThanFloatSearchCriterion; -import com.imageworks.spcue.grpc.criterion.LessThanIntegerSearchCriterion; - -public abstract class Criteria implements CriteriaInterface { - - List chunks = new ArrayList(12); - List values = new ArrayList(32); - - boolean built = false; - private int firstResult = 1; - private int maxResults = 0; - private ArrayList order = new ArrayList(); - - abstract void buildWhereClause(); - - public String toString() { return this.getWhereClause(); } - - public void setFirstResult(int firstResult) { - this.firstResult = Math.max(firstResult, 1); - } - - public void setMaxResults(int maxResults) { - this.maxResults = maxResults; - } - - public void addSort(Sort sort) { - this.order.add(sort); - } - - public List getValues() { - return values; - } - - public Object[] getValuesArray() { - return values.toArray(); - } - - public String getWhereClause() { - build(); - return generateWhereClause(); - } - - public String getFilteredQuery(String query) { - build(); - return queryWithPaging(query); - } - - private void build() { - if (!built) { - buildWhereClause(); - } - built = true; - } - - private String generateWhereClause() { - return chunks.stream() - .map(StringBuilder::toString) - .collect(Collectors.joining(" AND ")); - } - - private String queryWithPaging(String query) { - if (firstResult > 1 || maxResults > 0) { - if (order.size() == 0) { - query = query.replaceFirst("SELECT ", "SELECT ROWNUM AS RN,"); - } else { - query = query.replaceFirst("SELECT ", "SELECT row_number() OVER (" + getOrder() + ") AS RN, "); - } - } - - StringBuilder sb = new StringBuilder(4096); - if (maxResults > 0 || firstResult > 1) { - sb.append("SELECT * FROM ( "); - } - - sb.append(query); - sb.append(" "); - if (chunks.size() > 0) { - sb.append("AND "); - sb.append( - chunks.stream() - .map(StringBuilder::toString) - .collect(Collectors.joining(" AND "))); - } - - if (firstResult > 1 || maxResults > 0) { - sb.append(") WHERE "); - } - - if (firstResult > 1) { - sb.append (" RN >= ? "); - values.add(firstResult); - } - - if (maxResults > 0) { - if (firstResult > 1) { - sb.append(" AND "); - } - sb.append(" RN < ? "); - values.add(firstResult + maxResults); - } - - return sb.toString(); - } - - private String getOrder() { - if (order.size() < 1) { - return ""; - } - return " ORDER BY " + order.stream() - .map(sort -> sort.getColumn() + " " + sort.getDirection().toString()) - .collect(Collectors.joining(", ")); - } - - void addPhrase(String col, Collection s) { - if (s == null || s.size() == 0) { return; } - - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (String w: s) { - sb.append(col); - sb.append("=?"); - sb.append(" OR "); - values.add(w); - } - sb.delete(sb.length()-4, sb.length()); - sb.append(")"); - chunks.add(sb); - } - - void addPhrases(Collection phrases, String inclusion) { - if (phrases.size() == 0) { return; } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (Phrase p: phrases) { - sb.append(p.getColumn()); - sb.append(p.getComparison()); - sb.append("?"); - sb.append(" "); - sb.append(inclusion); - sb.append(" "); - values.add(p.getValue()); - } - sb.delete(sb.length()-4, sb.length()); - sb.append(")"); - chunks.add(sb); - } - - void addPhrase(String col, String v) { - if (v == null) { return; } - addPhrase(col, ImmutableList.of(v)); - } - - void addRegexPhrase(String col, Set s) { - if (s == null) { return; } - if (s.size() == 0) { return; } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (String w: s) { - sb.append(String.format("REGEXP_LIKE(%s,?)", col)); - sb.append(" OR "); - values.add(w); - } - sb.delete(sb.length()-4, sb.length()); - sb.append(")"); - chunks.add(sb); - } - - void addLikePhrase(String col, Set s) { - if (s == null) { return; } - if (s.size() == 0) { return; } - StringBuilder sb = new StringBuilder(1024); - sb.append("("); - for (String w: s) { - sb.append(col); - sb.append(" LIKE ?"); - sb.append(" OR "); - values.add("%" + w + "%"); - } - sb.delete(sb.length()-4, sb.length()); - sb.append(")"); - chunks.add(sb); - } - - void addGreaterThanTimestamp(String col, Timestamp timestamp) { - if (timestamp == null) { return; } - StringBuilder sb = new StringBuilder(128); - sb.append("("); - sb.append(col); - sb.append(" > ?"); - sb.append(") "); - values.add(timestamp); - chunks.add(sb); - } - - void addLessThanTimestamp(String col, Timestamp timestamp) { - if (timestamp == null) { return; } - StringBuilder sb = new StringBuilder(128); - sb.append("("); - sb.append(col); - sb.append(" < ?"); - sb.append(") "); - values.add(timestamp); - chunks.add(sb); - } - - void addRangePhrase(String col, EqualsIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " = ?"); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, LessThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + "<=? "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, GreaterThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, InRangeIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getMin()); - values.add(criterion.getMax()); - } - - void addRangePhrase(String col, EqualsFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " = ?"); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, LessThanFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " <= ? "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, GreaterThanFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - void addRangePhrase(String col, InRangeFloatSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" " + col + " >= ? "); - chunks.add(sb); - values.add(criterion.getMin()); - values.add(criterion.getMax()); - } - - boolean isValid(String v) { - return v != null && !v.isEmpty(); - } -} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/FrameSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/FrameSearch.java deleted file mode 100644 index f04e1de71..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/FrameSearch.java +++ /dev/null @@ -1,217 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.imageworks.spcue.dao.criteria.oracle; - -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import java.util.stream.Collectors; - -import com.google.common.collect.ImmutableList; -import org.apache.log4j.Logger; - -import com.imageworks.spcue.FrameInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.dao.criteria.FrameSearchInterface; -import com.imageworks.spcue.grpc.job.FrameSearchCriteria; -import com.imageworks.spcue.grpc.job.FrameState; -import com.imageworks.spcue.util.CueUtil; -import com.imageworks.spcue.util.FrameSet; - -public class FrameSearch extends Criteria implements FrameSearchInterface { - private static final int MAX_RESULTS = 1000; - private static final Logger logger = Logger.getLogger(FrameSearch.class); - private static final Pattern PATTERN_SINGLE_FRAME = Pattern.compile("^([0-9]+)$"); - private static final Pattern PATTERN_RANGE = Pattern.compile("^([0-9]+)\\-([0-9]+)$"); - private static final Pattern PATTERN_FLOAT_RANGE = Pattern.compile("^([0-9\\.]+)\\-([0-9\\.]+)$"); - private static final int RANGE_MAX_SIZE = 1000; - - private FrameSearchCriteria criteria; - private String sortedQuery; - - public FrameSearch() { - criteria = FrameSearchInterface.criteriaFactory(); - } - - @Override - public FrameSearchCriteria getCriteria() { - return criteria; - } - - @Override - public void setCriteria(FrameSearchCriteria criteria) { - this.criteria = criteria; - } - - @Override - public String getSortedQuery(String query) { - if (built) { - return sortedQuery; - } - - int limit = criteria.getLimit(); - int page = criteria.getPage(); - - if (limit <= 0 || limit >= MAX_RESULTS) { - criteria = criteria.toBuilder().setLimit(MAX_RESULTS).build(); - } - if (page <= 0) { - page = 1; - } - - StringBuilder sb = new StringBuilder(query.length() + 256); - sb.append("SELECT * FROM ("); - sb.append(getFilteredQuery(query)); - sb.append(" ) WHERE row_number > ?"); - sb.append(" AND row_number <= ?"); - values.add((page - 1) * limit); - values.add(page * limit); - sortedQuery = sb.toString(); - return sortedQuery; - } - - @Override - public void filterByFrameIds(List frameIds) { - criteria = criteria.toBuilder().addAllIds(frameIds).build(); - } - - @Override - public void filterByJob(JobInterface job) { - addPhrase("job.pk_job", job.getJobId()); - } - - @Override - public void filterByFrame(FrameInterface frame) { - filterByFrameIds(ImmutableList.of(frame.getFrameId())); - } - - @Override - public void filterByLayer(LayerInterface layer) { - addPhrase("layer.pk_layer", layer.getLayerId()); - } - - @Override - public void filterByLayers(List layers) { - addPhrase( - "layer.pk_layer", - layers.stream().map(LayerInterface::getLayerId).collect(Collectors.toList())); - } - - @Override - public void filterByFrameStates(List frameStates) { - addPhrase( - "frame.str_state", - frameStates.stream().map(FrameState::toString).collect(Collectors.toSet())); - } - - @Override - public void filterByFrameSet(String frameSet) { - StringBuilder sb = new StringBuilder(8096); - Matcher matchRange = PATTERN_RANGE.matcher(frameSet); - Matcher matchSingle = PATTERN_SINGLE_FRAME.matcher(frameSet); - - if (matchSingle.matches()) { - sb.append("frame.int_number=?"); - values.add(Integer.valueOf(matchSingle.group(1))); - } else if (matchRange.matches()) { - sb.append(" ( frame.int_number >= ? AND "); - sb.append(" frame.int_number <= ? )"); - values.add(Integer.valueOf(matchRange.group(1))); - values.add(Integer.valueOf(matchRange.group(2))); - } else { - FrameSet set = new FrameSet(frameSet); - int num_frames = set.size(); - if (num_frames <= RANGE_MAX_SIZE) { - sb.append("("); - for (int i=0; i= ? AND frame.int_mem_max_used <= ?) "); - } - else { - values.add(CueUtil.GB * Float.valueOf(range)); - sb.append(" frame.int_mem_max_used >= ? "); - } - } catch (RuntimeException e) { - logger.warn("Failed to convert float range: " + range + "," + e); - } - chunks.add(sb); - } - - @Override - public void filterByDurationRange(String range) { - StringBuilder sb = new StringBuilder(128); - Matcher matchRange = PATTERN_FLOAT_RANGE.matcher(range); - try { - if (matchRange.matches()) { - values.add((int) (3600 * Float.valueOf(matchRange.group(1)))); - values.add((int) (3600 * Float.valueOf(matchRange.group(2)))); - sb.append(" (frame.str_state != 'Waiting' "); - sb.append(" AND find_duration(frame.ts_started, frame.ts_stopped) "); - sb.append(" BETWEEN ? AND ? )"); - } - else { - values.add((int) (3600 * Float.valueOf(range))); - sb.append(" (frame.str_state != 'Waiting' AND "); - sb.append("find_duration(frame.ts_started, frame.ts_stopped) >= ?) "); - } - } catch (RuntimeException e) { - logger.warn("Failed to convert float range: " + range + "," + e); - // a cast failed, ignore for now. - } - chunks.add(sb); - } - - @Override - public void filterByChangeDate(int changeDate) { - StringBuilder sb = new StringBuilder(); - sb.append("frame.ts_updated > ?"); - chunks.add(sb); - values.add(new java.sql.Timestamp( changeDate * 1000L)); - } - - @Override - void buildWhereClause() { - addPhrase("frame.pk_frame", criteria.getIdsList()); - - addPhrase("frame.str_name", criteria.getFramesList()); - addPhrase("layer.str_name", criteria.getLayersList()); - filterByFrameStates(criteria.getStates().getFrameStatesList()); - if (isValid(criteria.getFrameRange())) { filterByFrameSet(criteria.getFrameRange()); } - if (isValid(criteria.getMemoryRange())) { filterByMemoryRange(criteria.getMemoryRange()); } - if (isValid(criteria.getDurationRange())) { filterByDurationRange(criteria.getDurationRange()); } - if (criteria.getChangeDate() > 0) { filterByChangeDate(criteria.getChangeDate()); } - } -} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/HostSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/HostSearch.java deleted file mode 100644 index b7720537d..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/HostSearch.java +++ /dev/null @@ -1,56 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.imageworks.spcue.dao.criteria.oracle; - -import java.util.HashSet; -import java.util.Set; - -import com.imageworks.spcue.AllocationInterface; -import com.imageworks.spcue.dao.criteria.HostSearchInterface; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.host.HostSearchCriteria; - -public class HostSearch extends Criteria implements HostSearchInterface { - private HostSearchCriteria criteria; - - public HostSearch(HostSearchCriteria criteria) { - this.criteria = criteria; - } - - public HostSearchCriteria getCriteria() { - return this.criteria; - } - - public void filterByAlloc(AllocationInterface alloc) { - addPhrase("host.pk_alloc", alloc.getAllocationId()); - } - - @Override - public void buildWhereClause() { - addPhrase("host.pk_host", criteria.getIdsList()); - addPhrase("host.str_name", criteria.getHostsList()); - addPhrase("host.str_name", new HashSet<>(criteria.getSubstrList())); - addRegexPhrase("host.str_name", new HashSet<>(criteria.getRegexList())); - addPhrase("alloc.str_name", criteria.getAllocsList()); - Set items = new HashSet<>(criteria.getStates().getStateCount()); - for (HardwareState w: criteria.getStates().getStateList()) { - items.add(w.toString()); - } - addPhrase("host_stat.str_state", items); - } -} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/JobSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/JobSearch.java deleted file mode 100644 index c22631274..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/JobSearch.java +++ /dev/null @@ -1,63 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.imageworks.spcue.dao.criteria.oracle; - -import java.util.HashSet; - -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.dao.criteria.JobSearchInterface; -import com.imageworks.spcue.grpc.job.JobSearchCriteria; - -public final class JobSearch extends Criteria implements JobSearchInterface { - private JobSearchCriteria criteria; - - public JobSearch() { - criteria = JobSearchInterface.criteriaFactory(); - } - - @Override - public JobSearchCriteria getCriteria() { - return criteria; - } - - @Override - public void setCriteria(JobSearchCriteria criteria) { - this.criteria = criteria; - } - - @Override - public void filterByShow(ShowInterface show) { - addPhrase("job.pk_show", show.getShowId()); - } - - @Override - void buildWhereClause() { - addPhrase("job.pk_job", criteria.getIdsList()); - addPhrase("job.str_name", criteria.getJobsList()); - addLikePhrase("job.str_name", new HashSet<>(criteria.getSubstrList())); - addRegexPhrase("job.str_name", new HashSet<>(criteria.getRegexList())); - addPhrase("job.str_shot", criteria.getShotsList()); - addPhrase("show.str_name", criteria.getShowsList()); - addPhrase("job.str_user", criteria.getUsersList()); - if (criteria.getIncludeFinished()) { - chunks.add(new StringBuilder(" ROWNUM < 200 ")); - } else { - addPhrase("job.str_state", "Pending"); - } - } -} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/ProcSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/ProcSearch.java deleted file mode 100644 index 078b90cb6..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/oracle/ProcSearch.java +++ /dev/null @@ -1,127 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.imageworks.spcue.dao.criteria.oracle; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; - -import com.imageworks.spcue.GroupInterface; -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.dao.criteria.Phrase; -import com.imageworks.spcue.dao.criteria.ProcSearchInterface; -import com.imageworks.spcue.dao.criteria.Sort; -import com.imageworks.spcue.grpc.criterion.GreaterThanIntegerSearchCriterion; -import com.imageworks.spcue.grpc.criterion.InRangeIntegerSearchCriterion; -import com.imageworks.spcue.grpc.criterion.LessThanIntegerSearchCriterion; -import com.imageworks.spcue.grpc.host.ProcSearchCriteria; - -public class ProcSearch extends Criteria implements ProcSearchInterface { - - private ProcSearchCriteria criteria; - private Set notJobs = new HashSet<>(); - private Set notGroups = new HashSet<>(); - - public ProcSearch() { - criteria = ProcSearchInterface.criteriaFactory(); - } - - public ProcSearchCriteria getCriteria() { - return criteria; - } - - public void setCriteria(ProcSearchCriteria criteria) { - this.criteria = criteria; - } - - public void notJobs(List jobs) { - for (JobInterface job: jobs) { - notJobs.add(new Phrase("proc.pk_job","!=", job.getJobId())); - } - } - - public void notGroups(List groups) { - for (GroupInterface group: groups) { - notGroups.add(new Phrase("folder.pk_folder","!=", group.getGroupId())); - } - } - - public void filterByDurationRange(LessThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" (find_duration(proc.ts_dispatched, null) <= ?) "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - public void filterByDurationRange(GreaterThanIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" (find_duration(proc.ts_dispatched, null) >= ?) "); - chunks.add(sb); - values.add(criterion.getValue()); - } - - public void filterByDurationRange(InRangeIntegerSearchCriterion criterion) { - StringBuilder sb = new StringBuilder(128); - sb.append(" (find_duration(proc.ts_dispatched, null) BETWEEN ? AND ? )"); - chunks.add(sb); - values.add(criterion.getMin()); - values.add(criterion.getMax()); - } - - public void filterByHost(HostInterface host) { - addPhrase("host.pk_host", host.getHostId()); - } - - public void sortByHostName() { - addSort(Sort.asc("host.str_name")); - } - - public void sortByDispatchedTime() { - addSort(Sort.asc("proc.ts_dispatched")); - } - - public void sortByBookedTime() { - addSort(Sort.asc("proc.ts_booked")); - } - - @Override - void buildWhereClause() { - addPhrases(notJobs, "AND"); - addPhrases(notGroups, "AND"); - - addPhrase("host.str_name", criteria.getHostsList()); - addPhrase("job.str_name", criteria.getJobsList()); - addPhrase("layer.str_name", criteria.getLayersList()); - addPhrase("show.str_name", criteria.getShowsList()); - addPhrase("alloc.str_name", criteria.getAllocsList()); - - if (criteria.getMemoryRangeCount() > 0) { - addRangePhrase("proc.int_mem_reserved", criteria.getMemoryRange(0)); - } - - if (criteria.getDurationRangeCount() > 0) { - filterByDurationRange(criteria.getDurationRange(0)); - } - - setFirstResult(criteria.getFirstResult()); - if (criteria.getMaxResultsCount() > 0) { - setMaxResults(criteria.getMaxResults(0)); - } - } -} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java index ee3a1f841..de33e29cd 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/FrameSearch.java @@ -23,7 +23,8 @@ import java.util.stream.Collectors; import com.google.common.collect.ImmutableList; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.imageworks.spcue.FrameInterface; import com.imageworks.spcue.JobInterface; @@ -36,7 +37,7 @@ public class FrameSearch extends Criteria implements FrameSearchInterface { private static final int MAX_RESULTS = 1000; - private static final Logger logger = Logger.getLogger(FrameSearch.class); + private static final Logger logger = LogManager.getLogger(FrameSearch.class); private static final Pattern PATTERN_SINGLE_FRAME = Pattern.compile("^([0-9]+)$"); private static final Pattern PATTERN_RANGE = Pattern.compile("^([0-9]+)\\-([0-9]+)$"); private static final Pattern PATTERN_FLOAT_RANGE = Pattern.compile("^([0-9\\.]+)\\-([0-9\\.]+)$"); diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java index 5d8de8586..9e84d3ed0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/criteria/postgres/HostSearch.java @@ -44,7 +44,7 @@ public void filterByAlloc(AllocationInterface alloc) { public void buildWhereClause() { addPhrase("host.pk_host", criteria.getIdsList()); addPhrase("host.str_name", criteria.getHostsList()); - addPhrase("host.str_name", new HashSet<>(criteria.getSubstrList())); + addLikePhrase("host.str_name", new HashSet<>(criteria.getSubstrList())); addRegexPhrase("host.str_name", new HashSet<>(criteria.getRegexList())); addPhrase("alloc.str_name", criteria.getAllocsList()); Set items = new HashSet<>(criteria.getStates().getStateCount()); diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ActionDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ActionDaoJdbc.java deleted file mode 100644 index a393caa82..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ActionDaoJdbc.java +++ /dev/null @@ -1,164 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.ActionEntity; -import com.imageworks.spcue.ActionInterface; -import com.imageworks.spcue.FilterInterface; -import com.imageworks.spcue.SpcueRuntimeException; -import com.imageworks.spcue.dao.ActionDao; -import com.imageworks.spcue.grpc.filter.ActionType; -import com.imageworks.spcue.grpc.filter.ActionValueType; -import com.imageworks.spcue.util.SqlUtil; - -public class ActionDaoJdbc extends JdbcDaoSupport implements ActionDao { - - public static final String INSERT_ACTION = - "INSERT INTO " + - "action " + - "(" + - "pk_action,pk_filter,str_action,str_value_type,b_stop" + - ") VALUES (?,?,?,?,?)"; - - public void createAction(ActionEntity action) { - action.id = SqlUtil.genKeyRandom(); - boolean stopAction = ActionType.STOP_PROCESSING.equals(action.type); - getJdbcTemplate().update(INSERT_ACTION, - action.id, action.filterId,action.type.toString(), - action.valueType.toString(), stopAction); - updateAction(action); - } - - private static final String GET_ACTION = - "SELECT "+ - "action.*," + - "filter.pk_show "+ - "FROM " + - "action,"+ - "filter " + - "WHERE " + - "action.pk_filter = filter.pk_filter"; - - public ActionEntity getAction(String id) { - return getJdbcTemplate().queryForObject( - GET_ACTION + " AND pk_action=?", - ACTION_DETAIL_MAPPER, id); - } - - public ActionEntity getAction(ActionInterface action) { - return getJdbcTemplate().queryForObject( - GET_ACTION + " AND pk_action=?", - ACTION_DETAIL_MAPPER, action.getActionId()); - } - - public List getActions(FilterInterface filter) { - return getJdbcTemplate().query( - GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC", - ACTION_DETAIL_MAPPER, filter.getFilterId()); - } - - public void updateAction(ActionEntity action) { - if (action.isNew()) { - throw new SpcueRuntimeException("unable to update action that is not already commited"); - } - - // first we clear out all values - - getJdbcTemplate().update( - "UPDATE action SET str_value=NULL,int_value=NULL,b_value=NULL,float_value=NULL WHERE pk_action=?", - action.getActionId()); - - StringBuilder query = new StringBuilder(1024); - query.append("UPDATE action SET str_action=?,str_value_type=?"); - - List args = new ArrayList(4); - args.add(action.type.toString()); - args.add(action.valueType.toString()); - - switch(action.valueType) { - case GROUP_TYPE: - query.append(",pk_folder=? WHERE pk_action=?"); - args.add(action.groupValue); - break; - - case STRING_TYPE: - query.append(",str_value=? WHERE pk_action=?"); - args.add(action.stringValue); - break; - - case INTEGER_TYPE: - query.append(",int_value=? WHERE pk_action=?"); - args.add(action.intValue); - break; - - case FLOAT_TYPE: - query.append(",float_value=? WHERE pk_action=?"); - args.add(action.floatValue); - break; - - case BOOLEAN_TYPE: - query.append(",b_value=? WHERE pk_action=?"); - args.add(action.booleanValue); - break; - - case NONE_TYPE: - query.append(" WHERE pk_action=?"); - break; - - default: - throw new SpcueRuntimeException("invalid action value type: " + action.valueType); - } - - args.add(action.id); - getJdbcTemplate().update(query.toString(), - args.toArray()); - - } - - public void deleteAction(ActionInterface action) { - getJdbcTemplate().update("DELETE FROM action WHERE pk_action=?",action.getActionId()); - } - - public static final RowMapper ACTION_DETAIL_MAPPER = new RowMapper() { - public ActionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ActionEntity action = new ActionEntity(); - action.id = rs.getString("pk_action"); - action.showId = rs.getString("pk_show"); - action.filterId = rs.getString("pk_filter"); - action.booleanValue = rs.getBoolean("b_value"); - action.groupValue = rs.getString("pk_folder"); - action.intValue = rs.getLong("int_value"); - action.floatValue = rs.getFloat("float_value"); - action.type = ActionType.valueOf(rs.getString("str_action")); - action.valueType = ActionValueType.valueOf(rs.getString("str_value_type")); - action.stringValue = rs.getString("str_value"); - return action; - } - }; -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/AllocationDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/AllocationDaoJdbc.java deleted file mode 100644 index 2f4a44687..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/AllocationDaoJdbc.java +++ /dev/null @@ -1,205 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.CallableStatement; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Map; -import java.util.regex.Pattern; - -import org.springframework.dao.DataIntegrityViolationException; -import org.springframework.jdbc.core.CallableStatementCreator; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.SqlParameter; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.AllocationEntity; -import com.imageworks.spcue.AllocationInterface; -import com.imageworks.spcue.EntityRemovalError; -import com.imageworks.spcue.FacilityInterface; -import com.imageworks.spcue.dao.AllocationDao; -import com.imageworks.spcue.util.SqlUtil; - -public class AllocationDaoJdbc extends JdbcDaoSupport implements AllocationDao { - - public static RowMapper ALLOC_MAPPER = new RowMapper() { - public AllocationEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - AllocationEntity alloc = new AllocationEntity(); - alloc.id = rs.getString("pk_alloc"); - alloc.facilityId = rs.getString("pk_facility"); - alloc.name = rs.getString("str_name"); - alloc.tag = rs.getString("str_tag"); - return alloc; - } - }; - - private static final String GET_ALLOCATION = - "SELECT " + - "alloc.pk_facility,"+ - "alloc.pk_alloc, " + - "alloc.str_name, "+ - "alloc.str_tag, " + - "facility.str_name AS facility_name " + - "FROM " + - "alloc, " + - "facility " + - "WHERE " + - "alloc.pk_facility = facility.pk_facility "; - - public AllocationEntity getAllocationEntity(String id) { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND pk_alloc=?", - ALLOC_MAPPER, id); - } - - public AllocationEntity findAllocationEntity(String facility, String name) { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND alloc.str_name=?", - ALLOC_MAPPER, String.format("%s.%s", facility, name)); - } - - @Override - public AllocationEntity findAllocationEntity(String name) { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND alloc.str_name=?", - ALLOC_MAPPER, name); - } - - private static final String INSERT_ALLOCATION = - "INSERT INTO " + - "alloc " + - "(" + - "pk_alloc,"+ - "pk_facility,"+ - "str_name, "+ - "str_tag "+ - ") VALUES (?,?,?,?)"; - - public void insertAllocation(FacilityInterface facility, AllocationEntity detail) { - - String new_alloc_name = String.format("%s.%s", - facility.getName(), detail.getName()); - /* - * Checks if the allocation already exits. - */ - if (getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM alloc WHERE str_name=?", - Integer.class, new_alloc_name) > 0) { - - getJdbcTemplate().update( - "UPDATE alloc SET b_enabled=1 WHERE str_name=?", - new_alloc_name); - } - else { - detail.id = SqlUtil.genKeyRandom(); - detail.name = new_alloc_name; - getJdbcTemplate().update(INSERT_ALLOCATION, - detail.id, facility.getFacilityId(), - detail.name, detail.tag); - } - } - - public void deleteAllocation(AllocationInterface a) { - if (getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host WHERE pk_alloc=?", Integer.class, - a.getAllocationId()) > 0) { - throw new EntityRemovalError("allocation still contains hosts", a); - } - - if (getJdbcTemplate().queryForObject( - "SELECT b_default FROM alloc WHERE pk_alloc=?", Integer.class, - a.getAllocationId()) > 0) { - throw new EntityRemovalError("you cannot delete the default allocation", a); - } - - /* - * Allocations are logged in historical data so once they are used you - * can't specifically delete them. They are disabled instead. - */ - try { - getJdbcTemplate().update("DELETE FROM alloc WHERE pk_alloc=?", - a.getAllocationId()); - } catch (DataIntegrityViolationException e) { - getJdbcTemplate().update("UPDATE alloc SET b_enabled = 0 WHERe pk_alloc = ?", - a.getAllocationId()); - } - } - - public void updateAllocationName(AllocationInterface a, String name) { - - if (!Pattern.matches("^\\w+$", name)) { - throw new IllegalArgumentException("The new allocation name" + - "must be alpha numeric and not contain the facility prefix."); - } - - String[] parts = a.getName().split("\\.", 2); - String new_name = String.format("%s.%s", parts[0], name); - - getJdbcTemplate().update( - "UPDATE alloc SET str_name=? WHERE pk_alloc=?", - new_name, a.getAllocationId()); - } - - public void updateAllocationTag(AllocationInterface a, String tag) { - getJdbcTemplate().update("UPDATE alloc SET str_tag=? WHERE pk_alloc=?", - tag, a.getAllocationId()); - - getJdbcTemplate().update("UPDATE host_tag SET str_tag=? WHERE " + - "host_tag.str_tag_type='Alloc' AND pk_host IN " + - "(SELECT pk_host FROM host WHERE host.pk_alloc=?)", tag, - a.getAllocationId()); - - for (Map e: getJdbcTemplate().queryForList( - "SELECT pk_host FROM host WHERE pk_alloc=?",a.getAllocationId())) { - final String pk_host = (String) e.get("pk_host"); - getJdbcTemplate().call(new CallableStatementCreator() { - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); - c.setString(1, pk_host); - return c; - } - }, new ArrayList()); - } - } - - public void setDefaultAllocation(AllocationInterface a) { - getJdbcTemplate().update("UPDATE alloc SET b_default = 0 WHERE b_default = 1"); - getJdbcTemplate().update("UPDATE alloc SET b_default = 1 WHERe pk_alloc=?", - a.getAllocationId()); - } - - public AllocationEntity getDefaultAllocationEntity() { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND alloc.b_default = 1 AND ROWNUM = 1", - ALLOC_MAPPER); - } - - @Override - public void updateAllocationBillable(AllocationInterface alloc, boolean value) { - getJdbcTemplate().update( - "UPDATE alloc SET b_billable = ? WHERE pk_alloc = ?", - value, alloc.getAllocationId()); - - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/BookingDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/BookingDaoJdbc.java deleted file mode 100644 index d44423045..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/BookingDaoJdbc.java +++ /dev/null @@ -1,418 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Calendar; -import java.util.List; - -import org.springframework.dao.DataAccessException; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.FrameInterface; -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LocalHostAssignment; -import com.imageworks.spcue.dao.BookingDao; -import com.imageworks.spcue.dispatcher.ResourceReservationFailureException; -import com.imageworks.spcue.grpc.renderpartition.RenderPartitionType; -import com.imageworks.spcue.util.SqlUtil; - -public class BookingDaoJdbc extends - JdbcDaoSupport implements BookingDao { - - private static final String INSERT_LOCAL_JOB_ASSIGNMENT = - "INSERT INTO " + - "host_local " + - "(" + - "pk_host_local,"+ - "pk_job,"+ - "pk_layer,"+ - "pk_frame,"+ - "str_type,"+ - "pk_host,"+ - "int_mem_max,"+ - "int_mem_idle,"+ - "int_cores_max,"+ - "int_cores_idle,"+ - "int_gpu_idle,"+ - "int_gpu_max,"+ - "int_threads "+ - ") " + - "VALUES " + - "(?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertLocalHostAssignment(HostInterface h, JobInterface job, LocalHostAssignment l) { - l.id = SqlUtil.genKeyRandom(); - l.name = String.format("%s->%s", h.getName(), job.getName()); - l.setHostId(h.getHostId()); - l.setJobId(job.getJobId()); - l.setType(RenderPartitionType.JOB_PARTITION); - l.setIdleCoreUnits(l.getMaxCoreUnits()); - l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpu(l.getMaxGpu()); - - getJdbcTemplate().update( - INSERT_LOCAL_JOB_ASSIGNMENT, - l.id, - job.getJobId(), - l.getLayerId(), - l.getFrameId(), - l.getType().toString(), - h.getHostId(), - l.getMaxMemory(), - l.getMaxMemory(), - l.getMaxCoreUnits(), - l.getMaxCoreUnits(), - l.getMaxGpu(), - l.getMaxGpu(), - l.getThreads()); - } - - @Override - public void insertLocalHostAssignment(HostInterface h, LayerInterface layer, LocalHostAssignment l) { - l.id = SqlUtil.genKeyRandom(); - l.name = String.format("%s->%s", h.getName(), layer.getName()); - l.setHostId(h.getHostId()); - l.setJobId(layer.getJobId()); - l.setLayerId(layer.getLayerId()); - l.setType(RenderPartitionType.LAYER_PARTITION); - l.setIdleCoreUnits(l.getMaxCoreUnits()); - l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpu(l.getMaxGpu()); - - getJdbcTemplate().update( - INSERT_LOCAL_JOB_ASSIGNMENT, - l.id, - l.getJobId(), - l.getLayerId(), - l.getFrameId(), - l.getType().toString(), - h.getHostId(), - l.getMaxMemory(), - l.getMaxMemory(), - l.getMaxCoreUnits(), - l.getMaxCoreUnits(), - l.getMaxGpu(), - l.getMaxGpu(), - l.getThreads()); - } - - @Override - public void insertLocalHostAssignment(HostInterface h, FrameInterface frame, LocalHostAssignment l) { - l.id = SqlUtil.genKeyRandom(); - l.name = String.format("%s->%s", h.getName(), frame.getName()); - l.setHostId(h.getHostId()); - l.setJobId(frame.getJobId()); - l.setLayerId(frame.getLayerId()); - l.setFrameId(frame.getFrameId()); - l.setType(RenderPartitionType.FRAME_PARTITION); - l.setIdleCoreUnits(l.getMaxCoreUnits()); - l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpu(l.getMaxGpu()); - - getJdbcTemplate().update( - INSERT_LOCAL_JOB_ASSIGNMENT, - l.id, - l.getJobId(), - l.getLayerId(), - l.getFrameId(), - l.getType().toString(), - h.getHostId(), - l.getMaxMemory(), - l.getMaxMemory(), - l.getMaxCoreUnits(), - l.getMaxCoreUnits(), - l.getMaxGpu(), - l.getMaxGpu(), - l.getThreads()); - } - public static final RowMapper LJA_MAPPER = - new RowMapper() { - public LocalHostAssignment mapRow(final ResultSet rs, int rowNum) throws SQLException { - LocalHostAssignment l = new LocalHostAssignment(); - l.id = rs.getString("pk_host_local"); - l.setMaxCoreUnits(rs.getInt("int_cores_max")); - l.setMaxMemory(rs.getLong("int_mem_max")); - l.setMaxGpu(rs.getLong("int_gpu_max")); - l.setThreads(rs.getInt("int_threads")); - l.setIdleCoreUnits(rs.getInt("int_cores_idle")); - l.setIdleMemory(rs.getLong("int_mem_idle")); - l.setIdleGpu(rs.getLong("int_gpu_idle")); - l.setJobId(rs.getString("pk_job")); - l.setLayerId(rs.getString("pk_layer")); - l.setFrameId(rs.getString("pk_frame")); - l.setHostId(rs.getString("pk_host")); - l.setType(RenderPartitionType.valueOf(rs.getString("str_type"))); - return l; - } - }; - - private static final String QUERY_FOR_LJA = - "SELECT " + - "pk_host_local,"+ - "pk_job,"+ - "pk_layer," + - "pk_frame,"+ - "pk_host,"+ - "int_mem_idle,"+ - "int_mem_max,"+ - "int_cores_idle,"+ - "int_cores_max,"+ - "int_gpu_idle,"+ - "int_gpu_max,"+ - "int_threads, "+ - "str_type " + - "FROM " + - "host_local "; - - @Override - public List getLocalJobAssignment(HostInterface host) { - return getJdbcTemplate().query( - QUERY_FOR_LJA + - "WHERE " + - "host_local.pk_host = ? ", - LJA_MAPPER, host.getHostId()); - } - - @Override - public LocalHostAssignment getLocalJobAssignment(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_LJA + " WHERE pk_host_local = ?", - LJA_MAPPER, id); - } - - @Override - public LocalHostAssignment getLocalJobAssignment(String hostId, String jobId) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_LJA + " WHERE pk_host = ? and pk_job = ?", - LJA_MAPPER, hostId, jobId); - } - - @Override - public boolean deleteLocalJobAssignment(LocalHostAssignment l) { - return getJdbcTemplate().update( - "DELETE FROM host_local WHERE pk_host_local = ?", - l.getId()) > 0; - } - - private static final String HAS_LOCAL_JOB = - "SELECT " + - "COUNT(1) " + - "FROM " + - "host_local " + - "WHERE " + - "host_local.pk_host = ? "; - - @Override - public boolean hasLocalJob(HostInterface host) { - return getJdbcTemplate().queryForObject(HAS_LOCAL_JOB, - Integer.class, host.getHostId()) > 0; - } - - private static final String HAS_ACTIVE_LOCAL_JOB = - "SELECT " + - "COUNT(1) " + - "FROM " + - "host_local, " + - "proc " + - "WHERE " + - "host_local.pk_host = proc.pk_host " + - "AND " + - "proc.b_local = 1 " + - "AND " + - "host_local.pk_host = ? "; - - @Override - public boolean hasActiveLocalJob(HostInterface host) { - return getJdbcTemplate().queryForObject(HAS_ACTIVE_LOCAL_JOB, - Integer.class, host.getHostId()) > 0; - } - - private static final String IS_BLACKOUT_TIME = - "SELECT " + - "int_blackout_start,"+ - "int_blackout_duration " + - "FROM " + - "host,"+ - "deed "+ - "WHERE " + - "host.pk_host = deed.pk_host " + - "AND " + - "deed.b_blackout = 1 " + - "AND " + - "host.pk_host = ? "; - - public static final RowMapper BLACKOUT_MAPPER = - new RowMapper() { - public Boolean mapRow(final ResultSet rs, int rowNum) throws SQLException { - - int startTimeSeconds = rs.getInt("int_backout_start"); - int stopTimeSeconds = rs.getInt("int_blackout_stop"); - if (stopTimeSeconds <= startTimeSeconds) { - stopTimeSeconds = stopTimeSeconds + 86400; - } - - Calendar startTime = Calendar.getInstance(); - startTime.set(Calendar.HOUR_OF_DAY, 0); - startTime.set(Calendar.MINUTE, 0); - startTime.set(Calendar.SECOND, 0); - startTime.add(Calendar.SECOND, startTimeSeconds); - - Calendar stopTime = Calendar.getInstance(); - stopTime.set(Calendar.HOUR_OF_DAY, 0); - stopTime.set(Calendar.MINUTE, 0); - stopTime.set(Calendar.SECOND, 0); - stopTime.add(Calendar.SECOND, stopTimeSeconds); - - Calendar now = Calendar.getInstance(); - if (now.compareTo(startTime) >= 0 && now.compareTo(stopTime) <= 0) { - return true; - } - - return false; - } - }; - - @Override - public boolean isBlackoutTime(HostInterface h) { - try { - return getJdbcTemplate().queryForObject(IS_BLACKOUT_TIME, - BLACKOUT_MAPPER, h.getHostId()); - } catch (Exception e) { - return false; - } - } - - @Override - public int getCoreUsageDifference(LocalHostAssignment l, int coreUnits) { - return getJdbcTemplate().queryForObject( - "SELECT ? - int_cores_max FROM host_local WHERE pk_host_local=?", - Integer.class, coreUnits, l.getId()); - } - - private static final String UPDATE_MAX_CORES = - "UPDATE " + - "host_local " + - "SET " + - "int_cores_idle = int_cores_idle + (? - int_cores_max), " + - "int_cores_max = ? "+ - "WHERE " + - "pk_host_local = ? "; - - @Override - public boolean updateMaxCores(LocalHostAssignment l, int coreUnits) { - return getJdbcTemplate().update(UPDATE_MAX_CORES, - coreUnits, coreUnits, l.getId()) > 0; - } - - private static final String UPDATE_MAX_MEMORY = - "UPDATE " + - "host_local " + - "SET " + - "int_mem_idle = int_mem_idle + (? - int_mem_max), " + - "int_mem_max = ? "+ - "WHERE " + - "pk_host_local = ? "; - - @Override - public boolean updateMaxMemory(LocalHostAssignment l, long maxMemory) { - return getJdbcTemplate().update( - UPDATE_MAX_MEMORY, maxMemory, maxMemory, l.getId()) > 0; - } - - private static final String UPDATE_MAX_GPU = - "UPDATE " + - "host_local " + - "SET " + - "int_gpu_idle = int_gpu_idle + (? - int_gpu_max), " + - "int_gpu_max = ? "+ - "WHERE " + - "pk_host_local = ? "; - - @Override - public boolean updateMaxGpu(LocalHostAssignment l, long maxGpu) { - return getJdbcTemplate().update( - UPDATE_MAX_GPU, maxGpu, maxGpu, l.getId()) > 0; - } - - @Override - public boolean deactivate(LocalHostAssignment l) { - return getJdbcTemplate().update( - "UPDATE host_local SET b_active = 0 WHERE " + - "pk_host_local = ? AND b_active = 1", - l.getId()) > 0; - } - - /** - * - * @param id - * @param cores - * @return - */ - @Override - public boolean allocateCoresFromHost(HostInterface h, int cores) { - - try { - return getJdbcTemplate().update( - "UPDATE host SET int_cores_idle = int_cores_idle - ? " + - "WHERE pk_host = ?", - cores, h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException("Failed to allocate " + - cores + " from host, " + e); - } - - } - - /** - * - * @param id - * @param cores - * @return - */ - @Override - public boolean deallocateCoresFromHost(HostInterface h, int cores) { - try { - return getJdbcTemplate().update( - "UPDATE host SET int_cores_idle = int_cores_idle + ? WHERE pk_host = ?", - cores, h.getHostId()) > 0; - } catch (DataAccessException e) { - throw new ResourceReservationFailureException("Failed to de-allocate " + - cores + " from host, " + e); - } - } - - @Override - public boolean hasResourceDeficit(HostInterface host) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host_local WHERE " + - "(int_cores_max < int_cores_max - int_cores_idle OR " + - "int_gpu_max < int_gpu_max - int_gpu_idle OR " + - "int_mem_max < int_mem_max - int_mem_idle) AND " + - "host_local.pk_host= ?", - Integer.class, host.getHostId()) > 0; - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/CommentDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/CommentDaoJdbc.java deleted file mode 100644 index 3da40621b..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/CommentDaoJdbc.java +++ /dev/null @@ -1,134 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Map; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.CommentDetail; -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.dao.CommentDao; -import com.imageworks.spcue.util.SqlUtil; - -public class CommentDaoJdbc extends JdbcDaoSupport implements CommentDao { - - public void deleteComment(String id) { - /* - * Checks what type of comment we have. - */ - Map type = getJdbcTemplate().queryForMap( - "SELECT pk_job, pk_host FROM comments WHERE pk_comment=?",id); - - /* - * If the comment is deleted successfully, check if we need to unset - * the b_comment boolean flag. - */ - if (getJdbcTemplate().update( - "DELETE FROM comments WHERE pk_comment=?",id) > 0) { - if (type.get("pk_job") != null) { - getJdbcTemplate().update("UPDATE job SET b_comment=0 WHERE job.pk_job = ? AND " + - "(SELECT COUNT(1) FROM comments c WHERE c.pk_job = job.pk_job) = 0",type.get("pk_job")); - } - else if (type.get("pk_host") != null) { - getJdbcTemplate().update("UPDATE host SET b_comment=0 WHERE host.pk_host = ? AND " + - "(SELECT COUNT(1) FROM comments c WHERE c.pk_host = host.pk_host) = 0",type.get("pk_host")); - } - } - } - - private static final RowMapper COMMENT_DETAIL_MAPPER = - new RowMapper() { - public CommentDetail mapRow(ResultSet rs, int row) throws SQLException { - CommentDetail d = new CommentDetail(); - d.id = rs.getString("pk_comment"); - d.message = rs.getString("str_message"); - d.subject = rs.getString("str_subject"); - d.timestamp = rs.getTimestamp("ts_created"); - d.user = rs.getString("str_user"); - return d; - } - }; - - public CommentDetail getCommentDetail(String id) { - return getJdbcTemplate().queryForObject( - "SELECT * FROM comments WHERE pk_comment=?", - COMMENT_DETAIL_MAPPER, id); - } - - public void updateComment(CommentDetail comment) { - getJdbcTemplate().update( - "UPDATE comments SET str_message=?,str_subject=? WHERE pk_comment=?", - comment.message, comment.subject, comment.id); - } - - public void updateCommentMessage(String id, String message) { - getJdbcTemplate().update( - "UPDATE comments SET str_message=? WHERE pk_comment=?", - message,id); - } - - public void updateCommentSubject(String id, String subject) { - getJdbcTemplate().update( - "UPDATE comments SET str_subject=? WHERE pk_comment=?", - subject,id); - } - - private static final String INSERT_JOB_COMMENT = - "INSERT INTO " + - "comments " + - "(" + - "pk_comment,pk_job,str_user,str_subject,str_message"+ - ") VALUES (?,?,?,?,?)"; - - public void insertComment(JobInterface job, CommentDetail comment) { - comment.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_JOB_COMMENT, - comment.id, job.getJobId(), comment.user, - comment.subject, comment.message); - getJdbcTemplate().update( - "UPDATE job SET b_comment=1 WHERE pk_job=?", - job.getJobId()); - } - - private static final String INSERT_HOST_COMMENT = - "INSERT INTO " + - "comments " + - "(" + - "pk_comment,pk_host,str_user,str_subject,str_message"+ - ") VALUES (?,?,?,?,?)"; - - - public void insertComment(HostInterface host, CommentDetail comment) { - comment.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_HOST_COMMENT, - comment.id, host.getHostId(), comment.user, - comment.subject, comment.message); - getJdbcTemplate().update( - "UPDATE host SET b_comment=1 WHERE pk_host=?", - host.getHostId()); - } - -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DeedDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DeedDaoJdbc.java deleted file mode 100644 index 80adf6db5..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DeedDaoJdbc.java +++ /dev/null @@ -1,140 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.List; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.DeedEntity; -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.OwnerEntity; -import com.imageworks.spcue.dao.DeedDao; -import com.imageworks.spcue.util.SqlUtil; - -public class DeedDaoJdbc extends JdbcDaoSupport implements DeedDao { - - public static final RowMapper - DEED_MAPPER = new RowMapper() { - public DeedEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - DeedEntity o = new DeedEntity(); - o.id = rs.getString("pk_deed"); - o.owner = rs.getString("str_username"); - o.host = rs.getString("str_hostname"); - o.isBlackoutEnabled = rs.getBoolean("b_blackout"); - o.blackoutStart = rs.getInt("int_blackout_start"); - o.blackoutStop = rs.getInt("int_blackout_stop"); - return o; - } - }; - - @Override - public boolean deleteDeed(DeedEntity deed) { - return getJdbcTemplate().update( - "DELETE FROM deed WHERE pk_deed = ?", - deed.getId()) > 0; - } - - @Override - public boolean deleteDeed(HostInterface host) { - return getJdbcTemplate().update( - "DELETE FROM deed WHERE pk_host = ?", - host.getHostId()) > 0; - } - - @Override - public void deleteDeeds(OwnerEntity owner) { - getJdbcTemplate().update( - "DELETE FROM deed WHERE pk_owner = ?", - owner.getId()); - } - - private static final String INSERT_DEED = - "INSERT INTO " + - "deed " + - "("+ - "pk_deed,"+ - "pk_owner,"+ - "pk_host " + - ") "+ - "VALUES (?,?,?)"; - - public DeedEntity insertDeed(OwnerEntity owner, HostInterface host) { - DeedEntity deed = new DeedEntity(); - deed.id = SqlUtil.genKeyRandom(); - deed.host = host.getName(); - deed.owner = owner.name; - - getJdbcTemplate().update(INSERT_DEED, - deed.getId(), owner.getId(), host.getId()); - - return deed; - } - - private static final String QUERY_FOR_DEED = - "SELECT " + - "deed.pk_deed, "+ - "deed.b_blackout,"+ - "deed.int_blackout_start,"+ - "deed.int_blackout_stop, " + - "host.str_name as str_hostname, " + - "owner.str_username " + - "FROM " + - "deed,"+ - "host,"+ - "owner " + - "WHERE " + - "deed.pk_owner = owner.pk_owner " + - "AND " + - "deed.pk_host = host.pk_host "; - - @Override - public DeedEntity getDeed(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_DEED + " AND pk_deed = ?", - DEED_MAPPER, id); - } - - @Override - public List getDeeds(OwnerEntity owner) { - return getJdbcTemplate().query( - QUERY_FOR_DEED + " AND owner.pk_owner = ?", - DEED_MAPPER, owner.getId()); - } - - @Override - public void setBlackoutTime(DeedEntity deed, int startSeconds, int stopSeconds) { - getJdbcTemplate().update( - "UPDATE deed SET int_blackout_start = ?, " + - "int_blackout_stop = ? WHERE deed.pk_deed = ?", - startSeconds, stopSeconds, deed.getId()); - } - - @Override - public void updateBlackoutTimeEnabled(DeedEntity deed, boolean bool) { - getJdbcTemplate().update( - "UPDATE deed SET b_blackout = ? WHERE deed.pk_deed = ?", - bool, deed.getId()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DepartmentDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DepartmentDaoJdbc.java deleted file mode 100644 index 32ddaaa28..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DepartmentDaoJdbc.java +++ /dev/null @@ -1,84 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.DepartmentEntity; -import com.imageworks.spcue.DepartmentInterface; -import com.imageworks.spcue.dao.DepartmentDao; -import com.imageworks.spcue.util.SqlUtil; - -public class DepartmentDaoJdbc extends JdbcDaoSupport implements DepartmentDao { - - public static final RowMapper DEPARTMENT_MAPPER = new RowMapper() { - public DepartmentInterface mapRow(ResultSet rs, int rowNum) throws SQLException { - DepartmentEntity d = new DepartmentEntity(); - d.id = rs.getString("pk_dept"); - d.name = rs.getString("str_name"); - return d; - } - }; - - @Override - public boolean departmentExists(String name) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM dept WHERE str_name=?", - Integer.class, name) > 0; - } - - @Override - public DepartmentInterface findDepartment(String name) { - return getJdbcTemplate().queryForObject( - "SELECT pk_dept, str_name FROM dept WHERE str_name=?", - DEPARTMENT_MAPPER, name); - } - - @Override - public DepartmentInterface getDefaultDepartment() { - return getJdbcTemplate().queryForObject( - "SELECT pk_dept, str_name FROM dept WHERE b_default=1", - DEPARTMENT_MAPPER); - } - - @Override - public DepartmentInterface getDepartment(String id) { - return getJdbcTemplate().queryForObject( - "SELECT pk_dept, str_name FROM dept WHERE pk_dept=?", - DEPARTMENT_MAPPER, id); - } - - @Override - public void deleteDepartment(DepartmentInterface d) { - getJdbcTemplate().update("DELETE FROM dept WHERE pk_dept=?", - d.getDepartmentId()); - } - - @Override - public void insertDepartment(String name) { - getJdbcTemplate().update("INSERT INTO dept (pk_dept,str_name) VALUES (?,?)", - SqlUtil.genKeyRandom(), name); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DependDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DependDaoJdbc.java deleted file mode 100644 index 72c03da34..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DependDaoJdbc.java +++ /dev/null @@ -1,751 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.List; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.FrameInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LightweightDependency; -import com.imageworks.spcue.dao.DependDao; -import com.imageworks.spcue.depend.DependException; -import com.imageworks.spcue.depend.FrameByFrame; -import com.imageworks.spcue.depend.FrameOnFrame; -import com.imageworks.spcue.depend.FrameOnJob; -import com.imageworks.spcue.depend.FrameOnLayer; -import com.imageworks.spcue.depend.JobOnFrame; -import com.imageworks.spcue.depend.JobOnJob; -import com.imageworks.spcue.depend.JobOnLayer; -import com.imageworks.spcue.depend.LayerOnFrame; -import com.imageworks.spcue.depend.LayerOnJob; -import com.imageworks.spcue.depend.LayerOnLayer; -import com.imageworks.spcue.depend.PreviousFrame; -import com.imageworks.spcue.grpc.depend.DependTarget; -import com.imageworks.spcue.grpc.depend.DependType; -import com.imageworks.spcue.util.SqlUtil; - -public class DependDaoJdbc extends JdbcDaoSupport implements DependDao { - - public static final RowMapper DEPEND_MAPPER = new RowMapper() { - public LightweightDependency mapRow(ResultSet rs, int row) throws SQLException { - LightweightDependency d = new LightweightDependency(); - d.id = rs.getString("pk_depend"); - d.type = DependType.valueOf(rs.getString("str_type")); - d.target = DependTarget.valueOf(rs.getString("str_target")); - d.anyFrame = rs.getBoolean("b_any"); - d.parent = rs.getString("pk_parent"); - d.active = rs.getBoolean("b_active"); - d.dependErFrameId = rs.getString("pk_frame_depend_er"); - d.dependOnFrameId = rs.getString("pk_frame_depend_on"); - d.dependErLayerId = rs.getString("pk_layer_depend_er"); - d.dependOnLayerId =rs.getString("pk_layer_depend_on"); - d.dependOnJobId = rs.getString("pk_job_depend_on"); - d.dependErJobId = rs.getString("pk_job_depend_er"); - return d; - } - }; - - private static final String INSERT_DEPEND = - "INSERT INTO " + - "depend " + - "(" + - "pk_depend,"+ - "pk_parent,"+ - "pk_job_depend_er," + - "pk_layer_depend_er," + - "pk_frame_depend_er," + - "pk_job_depend_on," + - "pk_layer_depend_on," + - "pk_frame_depend_on," + - "str_type," + - "b_any, " + - "str_target, " + - "b_active, " + - "str_signature, "+ - "b_composite " + - ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertDepend(JobOnJob d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErJob().getJobId(), - null, - null, - d.getDependOnJob().getJobId(), - null, - null, - DependType.JOB_ON_JOB.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(JobOnLayer d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErJob().getJobId(), - null, - null, - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.JOB_ON_LAYER.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(JobOnFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErJob().getJobId(), - null, - null, - d.getDependOnFrame().getJobId(), - d.getDependOnFrame().getLayerId(), - d.getDependOnFrame().getFrameId(), - DependType.JOB_ON_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(LayerOnJob d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnJob().getJobId(), - null, - null, - DependType.LAYER_ON_JOB.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(LayerOnLayer d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.LAYER_ON_LAYER.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(LayerOnFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnFrame().getJobId(), - d.getDependOnFrame().getLayerId(), - d.getDependOnFrame().getFrameId(), - DependType.LAYER_ON_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(FrameOnJob d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErFrame().getJobId(), - d.getDependErFrame().getLayerId(), - d.getDependErFrame().getFrameId(), - d.getDependOnJob().getJobId(), - null, - null, - DependType.FRAME_ON_JOB.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(FrameOnLayer d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErFrame().getJobId(), - d.getDependErFrame().getLayerId(), - d.getDependErFrame().getFrameId(), - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.FRAME_ON_LAYER.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(PreviousFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.PREVIOUS_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(FrameOnFrame d) { - d.setId(SqlUtil.genKeyRandom()); - String parentId = null; - if (d.getParent() != null) { - parentId =d.getParent().getId(); - } - - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - parentId, - d.getDependErFrame().getJobId(), - d.getDependErFrame().getLayerId(), - d.getDependErFrame().getFrameId(), - d.getDependOnFrame().getJobId(), - d.getDependOnFrame().getLayerId(), - d.getDependOnFrame().getFrameId(), - DependType.FRAME_ON_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - @Override - public void insertDepend(FrameByFrame d) { - d.setId(SqlUtil.genKeyRandom()); - getJdbcTemplate().update(INSERT_DEPEND, - d.getId(), - null, - d.getDependErLayer().getJobId(), - d.getDependErLayer().getLayerId(), - null, - d.getDependOnLayer().getJobId(), - d.getDependOnLayer().getLayerId(), - null, - DependType.FRAME_BY_FRAME.toString(), - d.isAnyFrame(), - d.getTarget().toString(), - d.isActive(), - d.getSignature(), - d.isComposite()); - } - - private static final String UPDATE_FRAME_STATE = - "UPDATE " + - "frame " + - "SET " + - "str_state='DEPEND' " + - "WHERE " + - "int_depend_count != 0 " + - "AND " + - "frame.str_state NOT IN ('SUCCEEDED','EATEN','RUNNING','DEPEND') " + - "AND " + - "frame.pk_frame = ?"; - - @Override - public void updateFrameState(FrameInterface f) { - getJdbcTemplate().update(UPDATE_FRAME_STATE, - f.getFrameId()); - } - - private static final String UPDATE_DEPEND_COUNT = - "UPDATE " + - "frame " + - "SET " + - "int_depend_count = int_depend_count + 1 " + - "WHERE " + - "pk_frame = ?"; - - @Override - public void incrementDependCount(FrameInterface f) { - int result = getJdbcTemplate().update(UPDATE_DEPEND_COUNT, - f.getFrameId()); - if (result == 0) { - throw new DependException("updating the depend count for " + - " the frame " + f.getName() + " in job " + f.getJobId() + - "failed."); - } - } - - private static final String DECREMENT_DEPEND_COUNT = - "UPDATE " + - "frame " + - "SET " + - "int_depend_count = int_depend_count -1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_depend_count > 0"; - - @Override - public boolean decrementDependCount(FrameInterface f) { - return getJdbcTemplate().update(DECREMENT_DEPEND_COUNT, - f.getFrameId()) == 1; - } - - private static final String[] DELETE_DEPEND = { - "DELETE FROM depend WHERE pk_parent=?", - "DELETE FROM depend WHERE pk_depend=?" - }; - - @Override - public void deleteDepend(LightweightDependency depend) { - if (depend.type.equals(DependType.FRAME_BY_FRAME)) { - getJdbcTemplate().update(DELETE_DEPEND[0], depend.getId()); - } - getJdbcTemplate().update(DELETE_DEPEND[1], depend.getId()); - } - - private static final String GET_LIGHTWEIGHT_DEPEND = - "SELECT * FROM depend WHERE pk_depend=?"; - - @Override - public LightweightDependency getDepend(String id) { - return getJdbcTemplate().queryForObject( - GET_LIGHTWEIGHT_DEPEND, - DEPEND_MAPPER, id); - } - - private static final String GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE = - "SELECT * FROM depend WHERE str_signature=?"; - - @Override - public LightweightDependency getDependBySignature(String s) { - return getJdbcTemplate().queryForObject( - GET_LIGHTWEIGHT_DEPEND_BY_SIGNATURE, - DEPEND_MAPPER, s); - } - - private static final String GET_WHAT_DEPENDS_ON_JOB = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "pk_job_depend_on=? " + - "AND " + - "b_active = 1 " + - "AND " + - "str_type IN (?,?,?)"; - - @Override - public List getWhatDependsOn(JobInterface job) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB, - DEPEND_MAPPER, job.getJobId(), - DependType.JOB_ON_JOB.toString(), - DependType.LAYER_ON_JOB.toString(), - DependType.FRAME_ON_JOB.toString()); - } - - private static final String GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "pk_job_depend_on=? " + - "AND " + - "b_active = 1 " + - "AND " + - "str_target = ? " + - "AND " + - "str_type IN (?,?,?)"; - - @Override - public List getWhatDependsOn(JobInterface job, DependTarget target) { - if (target.equals(DependTarget.ANY_TARGET)) { - return getWhatDependsOn(job); - } - else { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_JOB_WITH_TARGET, - DEPEND_MAPPER, job.getJobId(), target.toString(), - DependType.JOB_ON_JOB.toString(), - DependType.LAYER_ON_JOB.toString(), - DependType.FRAME_ON_JOB.toString()); - } - } - - private static final String GET_WHAT_DEPENDS_ON_LAYER = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "pk_job_depend_on=? " + - "AND " + - "pk_layer_depend_on=? " + - "AND " + - "str_type IN (?,?,?) " + - "AND " + - "b_active = ?"; - - @Override - public List getWhatDependsOn(LayerInterface layer) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, - DEPEND_MAPPER, layer.getJobId(), layer.getLayerId(), - DependType.JOB_ON_LAYER.toString(), - DependType.LAYER_ON_LAYER.toString(), - DependType.FRAME_ON_LAYER.toString(), - true); - } - - @Override - public List getWhatDependsOn(LayerInterface layer, boolean active) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_LAYER, - DEPEND_MAPPER, layer.getJobId(), layer.getLayerId(), - DependType.JOB_ON_LAYER.toString(), - DependType.LAYER_ON_LAYER.toString(), - DependType.FRAME_ON_LAYER.toString(), - active); - } - - - private static final String GET_WHAT_DEPENDS_ON_FRAME = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "b_active = ? " + - "AND " + - "pk_job_depend_on = ? " + - "AND " + - "(pk_frame_depend_on = ? " + - "AND " + - "str_type IN (?,?,?)) " + - "OR " + - "(pk_layer_depend_on = ? AND str_type = ? AND b_any = 1)"; - - @Override - public List getWhatDependsOn(FrameInterface frame) { - return getWhatDependsOn(frame, true); - } - - @Override - public List getWhatDependsOn(FrameInterface frame, boolean active) { - return getJdbcTemplate().query(GET_WHAT_DEPENDS_ON_FRAME, - DEPEND_MAPPER, active, frame.getJobId(), frame.getFrameId(), - DependType.FRAME_ON_FRAME.toString(), - DependType.LAYER_ON_FRAME.toString(), - DependType.JOB_ON_FRAME.toString(), - frame.getLayerId(), - DependType.LAYER_ON_LAYER.toString()); - } - - private static final String SET_INACTIVE = - "UPDATE " + - "depend " + - "SET " + - "b_active=0,"+ - "ts_satisfied=systimestamp,"+ - "str_signature=pk_depend "+ - "WHERE " + - "pk_depend = ? " + - "AND " + - "b_active = 1 " + - "AND " + - "b_composite = 0"; - - @Override - public boolean setInactive(LightweightDependency depend) { - depend.active = getJdbcTemplate().update(SET_INACTIVE, depend.getId()) == 1; - return depend.active; - } - - private static final String SET_ACTIVE = - "UPDATE " + - "depend " + - "SET " + - "b_active=1 "+ - "WHERE " + - "pk_depend=? " + - "AND "+ - "b_active=0"; - - @Override - public boolean setActive(LightweightDependency depend) { - if (!depend.type.equals(DependType.FRAME_ON_FRAME) - && !depend.type.equals(DependType.LAYER_ON_LAYER)) { - return false; - } - depend.active = getJdbcTemplate().update( - SET_ACTIVE, depend.getId()) == 1; - return depend.active; - } - - private static final String GET_CHILD_DEPENDS = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "depend.pk_job_depend_er = ? " + - "AND " + - "depend.pk_job_depend_on = ? " + - "AND " + - "depend.pk_parent = ? " + - "AND " + - "depend.b_active = 1 "; - - @Override - public List getChildDepends(LightweightDependency depend) { - return getJdbcTemplate().query(GET_CHILD_DEPENDS, DEPEND_MAPPER, - depend.dependErJobId, depend.dependOnJobId, depend.id); - } - - private static final String GET_WHAT_THIS_JOB_DEPENDS_ON = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "depend.pk_job_depend_er=? " + - "AND " + - "depend.b_active=1 " + - "AND " + - "depend.pk_parent IS NULL "; - - @Override - public List getWhatThisDependsOn(JobInterface job, DependTarget target) { - String query = GET_WHAT_THIS_JOB_DEPENDS_ON; - Object[] values = new Object[] { job.getJobId() }; - if (!target.equals(DependTarget.ANY_TARGET)) { - query = query + " AND depend.str_target = ?"; - values = new Object[] { job.getJobId(), target.toString() }; - } - return getJdbcTemplate().query(query,DEPEND_MAPPER, values); - - } - - private static final String GET_WHAT_THIS_LAYER_DEPENDS_ON = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "depend.pk_layer_depend_er=? " + - "AND " + - "depend.b_active=1 " + - "AND " + - "depend.pk_parent IS NULL " + - "AND " + - "depend.str_type IN (?,?,?,?) "; - - @Override - public List getWhatThisDependsOn(LayerInterface layer, DependTarget target) { - if (!target.equals(DependTarget.ANY_TARGET)) { - String query = GET_WHAT_THIS_LAYER_DEPENDS_ON + " AND str_target = ?"; - return getJdbcTemplate().query(query, DEPEND_MAPPER, - layer.getLayerId(), DependType.LAYER_ON_JOB.toString(), - DependType.LAYER_ON_LAYER.toString(), DependType.LAYER_ON_FRAME.toString(), - DependType.FRAME_BY_FRAME.toString(), target.toString()); - } - else { - return getJdbcTemplate().query(GET_WHAT_THIS_LAYER_DEPENDS_ON, DEPEND_MAPPER, - layer.getLayerId(), DependType.LAYER_ON_JOB.toString(), - DependType.LAYER_ON_LAYER.toString(), DependType.LAYER_ON_FRAME.toString(), - DependType.FRAME_BY_FRAME.toString()); - } - } - - private static final String GET_WHAT_THIS_FRAME_DEPENDS_ON = - "SELECT " + - "depend.pk_depend," + - "depend.str_type," + - "depend.str_target,"+ - "depend.b_any,"+ - "depend.pk_parent,"+ - "depend.b_active," + - "depend.pk_frame_depend_er,"+ - "depend.pk_frame_depend_on,"+ - "depend.pk_layer_depend_er,"+ - "depend.pk_layer_depend_on,"+ - "depend.pk_job_depend_er,"+ - "depend.pk_job_depend_on "+ - "FROM " + - "depend " + - "WHERE " + - "depend.pk_frame_depend_er=? " + - "AND " + - "depend.b_active=1 " + - "AND " + - "depend.str_type IN (?,?,?) "; - - @Override - public List getWhatThisDependsOn(FrameInterface frame, DependTarget target) { - if (!target.equals(DependTarget.ANY_TARGET)) { - String query = GET_WHAT_THIS_FRAME_DEPENDS_ON + " AND depend.str_target = ?"; - return getJdbcTemplate().query(query, DEPEND_MAPPER, - frame.getFrameId(), DependType.FRAME_ON_JOB.toString(), - DependType.FRAME_ON_LAYER.toString(), DependType.FRAME_ON_FRAME.toString(), - target.toString()); - } - else { - return getJdbcTemplate().query(GET_WHAT_THIS_FRAME_DEPENDS_ON, DEPEND_MAPPER, - frame.getFrameId(), DependType.FRAME_ON_JOB.toString(), - DependType.FRAME_ON_LAYER.toString(), DependType.FRAME_ON_FRAME.toString()); - } - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DispatchQuery.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DispatchQuery.java deleted file mode 100644 index 1577921d7..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DispatchQuery.java +++ /dev/null @@ -1,1293 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -public class DispatchQuery { - - public static final String FIND_JOBS_BY_SHOW = - "/* FIND_JOBS_BY_SHOW */ " + - "SELECT pk_job, int_priority, rank FROM ( " + - "SELECT " + - "ROW_NUMBER() OVER (ORDER BY job_resource.int_priority DESC) AS rank, " + - "job.pk_job, " + - "job_resource.int_priority " + - "FROM " + - "job , " + - "job_resource , " + - "folder , " + - "folder_resource, " + - "point , " + - "layer , " + - "layer_stat , " + - "host " + - "WHERE " + - "job.pk_job = job_resource.pk_job " + - "AND job.pk_folder = folder.pk_folder " + - "AND folder.pk_folder = folder_resource.pk_folder " + - "AND folder.pk_dept = point.pk_dept " + - "AND folder.pk_show = point.pk_show " + - "AND job.pk_job = layer.pk_job " + - "AND job_resource.pk_job = job.pk_job " + - "AND (CASE WHEN layer_stat.int_waiting_count > 0 THEN layer_stat.pk_layer ELSE NULL END) = layer.pk_layer " + - "AND " + - "(" + - "folder_resource.int_max_cores = -1 " + - "OR " + - "folder_resource.int_cores < folder_resource.int_max_cores " + - ") " + - "AND job.str_state = 'PENDING' " + - "AND job.b_paused = 0 " + - "AND job.pk_show = ? " + - "AND job.pk_facility = ? " + - "AND (job.str_os = ? OR job.str_os IS NULL)" + - "AND (CASE WHEN layer_stat.int_waiting_count > 0 THEN 1 ELSE NULL END) = 1 " + - "AND layer.int_cores_min <= ? " + - "AND layer.int_mem_min <= ? " + - "AND layer.b_threadable >= ? " + - "AND layer.int_gpu_min BETWEEN ? AND ? " + - "AND job_resource.int_cores + layer.int_cores_min < job_resource.int_max_cores " + - "AND CATSEARCH(host.str_tags, layer.str_tags, ?) > 0 " + - "AND layer.pk_layer IN (" + - "SELECT " + - "l.pk_layer " + - "FROM " + - "layer l " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = l.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") WHERE rank < ?"; - - - public static final String FIND_JOBS_BY_GROUP = - FIND_JOBS_BY_SHOW - .replace( - "FIND_JOBS_BY_SHOW", - "FIND_JOBS_BY_GROUP") - .replace( - "AND job.pk_show = ? ", - "AND job.pk_folder = ? "); - - - /** - * Dispatch a host in local booking mode. - */ - public static final String FIND_JOBS_BY_LOCAL = - "/* FIND_JOBS_BY_LOCAL */ SELECT pk_job,float_tier, rank FROM ( " + - "SELECT " + - "ROW_NUMBER() OVER (ORDER BY " + - "host_local.float_tier ASC " + - ") AS rank, " + - "job.pk_job, " + - "host_local.float_tier " + - "FROM " + - "job, " + - "host_local " + - "WHERE " + - "job.pk_job = host_local.pk_job " + - "AND " + - "host_local.pk_host = ? " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "job.b_paused = 0 " + - "AND " + - "job.pk_facility = ? " + - "AND " + - "(job.str_os = ? OR job.str_os IS NULL)" + - "AND " + - "job.pk_job IN ( " + - "SELECT " + - "l.pk_job " + - "FROM " + - "job j, " + - "layer l, " + - "layer_stat lst, " + - "host h, " + - "host_local " + - "WHERE " + - "j.pk_job = l.pk_job " + - "AND " + - "j.pk_job = host_local.pk_job " + - "AND " + - "h.pk_host = host_local.pk_host " + - "AND " + - "h.pk_host = ? " + - "AND " + - "j.str_state = 'PENDING' " + - "AND " + - "j.b_paused = 0 " + - "AND " + - "j.pk_facility = ? " + - "AND " + - "(j.str_os = ? OR j.str_os IS NULL)" + - "AND " + - "(CASE WHEN lst.int_waiting_count > 0 THEN lst.pk_layer ELSE NULL END) = l.pk_layer " + - "AND " + - "(CASE WHEN lst.int_waiting_count > 0 THEN 1 ELSE NULL END) = 1 " + - "AND " + - "l.int_mem_min <= host_local.int_mem_idle " + - "AND " + - "l.int_gpu_min <= host_local.int_gpu_idle " + - "AND " + - "l.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") " + - ") WHERE rank < 5"; - - /** - * This query is run before a proc is dispatched to the next frame. - * It checks to see if there is another job someplace that is - * under its minimum and can take the proc. - * - * The current job the proc is on is excluded. This should only be run - * if the excluded job is actually over its min proc. - * - * Does not unbook for Utility frames - * - */ - public static final String FIND_UNDER_PROCED_JOB_BY_FACILITY = - "SELECT " + - "1 " + - "FROM " + - "job, " + - "job_resource, " + - "folder, " + - "folder_resource " + - "WHERE " + - "job.pk_job = job_resource.pk_job " + - "AND " + - "job.pk_folder = folder.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder " + - "AND " + - "(folder_resource.int_max_cores = -1 OR folder_resource.int_cores < folder_resource.int_max_cores) " + - "AND " + - "job_resource.float_tier < 1.00 " + - "AND " + - "job_resource.int_cores < job_resource.int_min_cores " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "job.b_paused = 0 " + - "AND " + - "job.pk_show = ? " + - "AND " + - "job.pk_facility = ? " + - "AND " + - "(job.str_os = ? OR job.str_os IS NULL)" + - "AND " + - "job.pk_job IN ( " + - "SELECT /* index (h i_str_host_tag) */ " + - "l.pk_job " + - "FROM " + - "job j, " + - "layer l, " + - "layer_stat lst, " + - "host h " + - "WHERE " + - "j.pk_job = l.pk_job " + - "AND " + - "j.str_state = 'PENDING' " + - "AND " + - "j.b_paused = 0 " + - "AND " + - "j.pk_show = ? " + - "AND " + - "j.pk_facility = ? " + - "AND " + - "(j.str_os = ? OR j.str_os IS NULL)" + - "AND " + - "(CASE WHEN lst.int_waiting_count > 0 THEN lst.pk_layer ELSE NULL END) = l.pk_layer " + - "AND " + - "(CASE WHEN lst.int_waiting_count > 0 THEN 1 ELSE NULL END) = 1 " + - "AND " + - "l.int_cores_min <= ? " + - "AND " + - "l.int_mem_min <= ? " + - "AND " + - "l.int_gpu_min = ? " + - "AND " + - "CATSEARCH(h.str_tags, l.str_tags, ?) > 0" + - "AND " + - "l.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") " + - "AND ROWNUM < 2 "; - - /** - * This query is run before a proc is dispatched to the next frame. - * It checks to see if there is another job someplace that is - * at a higher priority and can take the proc. - * - * The current job the proc is on is excluded. This should only be run - * if the excluded job is actually over its min proc. - * - * Does not unbook for Utility frames - * - */ - public static final String HIGHER_PRIORITY_JOB_BY_FACILITY_EXISTS = - "SELECT " + - "1 " + - "FROM " + - "job, " + - "job_resource, " + - "folder, " + - "folder_resource " + - "WHERE " + - "job.pk_job = job_resource.pk_job " + - "AND " + - "job.pk_folder = folder.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder " + - "AND " + - "(folder_resource.int_max_cores = -1 OR folder_resource.int_cores < folder_resource.int_max_cores) " + - "AND " + - "job_resource.int_priority > ?" + - "AND " + - "job_resource.int_cores < job_resource.int_max_cores " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "job.b_paused = 0 " + - "AND " + - "job.pk_facility = ? " + - "AND " + - "(job.str_os = ? OR job.str_os IS NULL)" + - "AND " + - "job.pk_job IN ( " + - "SELECT /* index (h i_str_host_tag) */ " + - "l.pk_job " + - "FROM " + - "job j, " + - "layer l, " + - "layer_stat lst, " + - "host h " + - "WHERE " + - "j.pk_job = l.pk_job " + - "AND " + - "j.str_state = 'PENDING' " + - "AND " + - "j.b_paused = 0 " + - "AND " + - "j.pk_facility = ? " + - "AND " + - "(j.str_os = ? OR j.str_os IS NULL)" + - "AND " + - "(CASE WHEN lst.int_waiting_count > 0 THEN lst.pk_layer ELSE NULL END) = l.pk_layer " + - "AND " + - "(CASE WHEN lst.int_waiting_count > 0 THEN 1 ELSE NULL END) = 1 " + - "AND " + - "l.int_cores_min <= ? " + - "AND " + - "l.int_mem_min <= ? " + - "AND " + - "l.int_gpu_min = ? " + - "AND " + - "CATSEARCH(h.str_tags, l.str_tags, ?) > 0" + - "AND " + - "l.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") " + - "AND ROWNUM < 2 "; - - /** - * Finds the next frame in a job for a proc. - */ - public static final String FIND_DISPATCH_FRAME_BY_JOB_AND_PROC = - "SELECT "+ - "show_name, "+ - "job_name, " + - "pk_job,"+ - "pk_show,"+ - "pk_facility,"+ - "str_name,"+ - "str_shot,"+ - "str_user,"+ - "int_uid,"+ - "str_log_dir,"+ - "frame_name, "+ - "frame_state, "+ - "pk_frame, "+ - "pk_layer, "+ - "int_retries, "+ - "int_version, " + - "layer_name, " + - "layer_type, "+ - "b_threadable,"+ - "int_cores_min,"+ - "int_cores_max,"+ - "int_mem_min,"+ - "int_gpu_min,"+ - "str_cmd, "+ - "str_range,"+ - "int_chunk_size, "+ - "str_services " + - "FROM (SELECT " + - "ROW_NUMBER() OVER ( ORDER BY " + - "frame.int_dispatch_order ASC, " + - "frame.int_layer_order ASC " + - ") LINENUM, " + - "job.str_show AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, " + - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.b_threadable,"+ - "layer.int_cores_min,"+ - "layer.int_cores_max,"+ - "layer.int_mem_min,"+ - "layer.int_gpu_min,"+ - "layer.str_cmd, "+ - "layer.str_range, "+ - "layer.int_chunk_size, "+ - "layer.str_services " + - "FROM " + - "job,"+ - "frame," + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.int_cores_min <= ? " + - "AND " + - "layer.int_mem_min <= ? " + - "AND " + - "layer.int_gpu_min BETWEEN ? AND ? " + - "AND " + - "frame.str_state='WAITING' " + - "AND " + - "job.pk_job=? "+ - "AND layer.pk_layer IN ( " + - "SELECT /*+ index (h i_str_host_tag) */ " + - "pk_layer " + - "FROM " + - "layer l,"+ - "host h " + - "WHERE " + - "l.pk_job= ? " + - "AND " + - "CATSEARCH(h.str_tags, l.str_tags, ?) > 0 "+ - ") " + - "AND " + - "layer.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") WHERE LINENUM <= ?"; - - /** - * Find the next frame in a job for a host. - */ - public static final String FIND_DISPATCH_FRAME_BY_JOB_AND_HOST = - "SELECT " + - "show_name, "+ - "job_name, " + - "pk_job,"+ - "pk_show,"+ - "pk_facility,"+ - "str_name,"+ - "str_shot,"+ - "str_user,"+ - "int_uid,"+ - "str_log_dir,"+ - "frame_name, "+ - "frame_state, "+ - "pk_frame, "+ - "pk_layer, "+ - "int_retries, "+ - "int_version, " + - "layer_name, " + - "layer_type, "+ - "int_cores_min,"+ - "int_cores_max,"+ - "b_threadable,"+ - "int_mem_min,"+ - "int_gpu_min,"+ - "str_cmd, "+ - "str_range,"+ - "int_chunk_size, "+ - "str_services " + - "FROM (SELECT " + - "ROW_NUMBER() OVER ( ORDER BY " + - "frame.int_dispatch_order ASC, " + - "frame.int_layer_order ASC " + - ") LINENUM, " + - "job.str_show AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, "+ - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.int_cores_min,"+ - "layer.int_cores_max,"+ - "layer.b_threadable,"+ - "layer.int_mem_min,"+ - "layer.int_gpu_min,"+ - "layer.str_cmd, "+ - "layer.str_range, "+ - "layer.int_chunk_size, "+ - "layer.str_services " + - "FROM " + - "job,"+ - "frame," + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.int_cores_min <= ? " + - "AND " + - "layer.int_mem_min <= ? " + - "AND " + - "layer.b_threadable >= ? " + - "AND " + - "layer.int_gpu_min BETWEEN ? AND ? " + - "AND " + - "frame.str_state='WAITING' " + - "AND " + - "job.pk_job=? "+ - "AND " + - "layer.pk_layer IN ( " + - "SELECT /*+ index (h i_str_host_tag) */ " + - "pk_layer " + - "FROM " + - "layer l,"+ - "host h " + - "WHERE " + - "l.pk_job=? " + - "AND " + - "CATSEARCH(h.str_tags, l.str_tags,?) > 0 "+ - ") " + - "AND " + - "layer.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") WHERE LINENUM <= ?"; - - - public static final String FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_PROC = - "SELECT "+ - "show_name, "+ - "job_name, " + - "pk_job,"+ - "pk_show,"+ - "pk_facility,"+ - "str_name,"+ - "str_shot,"+ - "str_user,"+ - "int_uid,"+ - "str_log_dir,"+ - "frame_name, "+ - "frame_state, "+ - "pk_frame, "+ - "pk_layer, "+ - "int_retries, "+ - "int_version, " + - "layer_name, " + - "layer_type, "+ - "b_threadable,"+ - "int_cores_min,"+ - "int_cores_max,"+ - "int_mem_min,"+ - "int_gpu_min,"+ - "str_cmd, "+ - "str_range,"+ - "int_chunk_size, "+ - "str_services " + - "FROM (SELECT " + - "ROW_NUMBER() OVER ( ORDER BY " + - "frame.int_dispatch_order ASC, " + - "frame.int_layer_order ASC " + - ") LINENUM, " + - "job.str_show AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, " + - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.b_threadable,"+ - "layer.int_cores_min,"+ - "layer.int_cores_max,"+ - "layer.int_mem_min,"+ - "layer.int_gpu_min,"+ - "layer.str_cmd, "+ - "layer.str_range, "+ - "layer.int_chunk_size, "+ - "layer.str_services " + - "FROM " + - "job,"+ - "frame," + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.int_mem_min <= ? " + - "AND " + - "layer.int_gpu_min <= ? " + - "AND " + - "frame.str_state='WAITING' " + - "AND " + - "job.pk_job=? "+ - "AND " + - "layer.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") WHERE LINENUM <= ?"; - - /** - * Find the next frame in a job for a host. - */ - public static final String FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_HOST = - "SELECT " + - "show_name, "+ - "job_name, " + - "pk_job,"+ - "pk_show,"+ - "pk_facility,"+ - "str_name,"+ - "str_shot,"+ - "str_user,"+ - "int_uid,"+ - "str_log_dir,"+ - "frame_name, "+ - "frame_state, "+ - "pk_frame, "+ - "pk_layer, "+ - "int_retries, "+ - "int_version, " + - "layer_name, " + - "layer_type, "+ - "int_cores_min,"+ - "int_cores_max,"+ - "b_threadable,"+ - "int_mem_min,"+ - "int_gpu_min,"+ - "str_cmd, "+ - "str_range,"+ - "int_chunk_size, "+ - "str_services " + - "FROM (SELECT " + - "ROW_NUMBER() OVER ( ORDER BY " + - "frame.int_dispatch_order ASC, " + - "frame.int_layer_order ASC " + - ") LINENUM, " + - "job.str_show AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, "+ - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.int_cores_min,"+ - "layer.int_cores_max,"+ - "layer.b_threadable,"+ - "layer.int_mem_min,"+ - "layer.int_gpu_min,"+ - "layer.str_cmd, "+ - "layer.str_range, "+ - "layer.int_chunk_size, "+ - "layer.str_services " + - "FROM " + - "job,"+ - "frame," + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.int_mem_min <= ? " + - "AND " + - "layer.int_gpu_min <= ? " + - "AND " + - "frame.str_state='WAITING' " + - "AND " + - "job.pk_job=? "+ - "AND " + - "layer.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") WHERE LINENUM <= ?"; - - - /**** LAYER DISPATCHING **/ - - /** - * Finds the next frame in a job for a proc. - */ - public static final String FIND_DISPATCH_FRAME_BY_LAYER_AND_PROC = - - "SELECT "+ - "show_name, "+ - "job_name, " + - "pk_job,"+ - "pk_show,"+ - "pk_facility,"+ - "str_name,"+ - "str_shot,"+ - "str_user,"+ - "int_uid,"+ - "str_log_dir,"+ - "frame_name, "+ - "frame_state, "+ - "pk_frame, "+ - "pk_layer, "+ - "int_retries, "+ - "int_version, " + - "layer_name, " + - "layer_type, "+ - "b_threadable,"+ - "int_cores_min,"+ - "int_cores_max,"+ - "int_mem_min,"+ - "int_gpu_min,"+ - "str_cmd, "+ - "str_range,"+ - "int_chunk_size, "+ - "str_services " + - "FROM (SELECT " + - "ROW_NUMBER() OVER ( ORDER BY " + - "frame.int_dispatch_order ASC, " + - "frame.int_layer_order ASC " + - ") LINENUM, " + - "job.str_show AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, " + - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.b_threadable,"+ - "layer.int_cores_min,"+ - "layer.int_cores_max,"+ - "layer.int_mem_min,"+ - "layer.int_gpu_min,"+ - "layer.str_cmd, "+ - "layer.str_range, "+ - "layer.int_chunk_size, "+ - "layer.str_services " + - "FROM " + - "job,"+ - "frame," + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.int_cores_min <= ? " + - "AND " + - "layer.int_mem_min <= ? " + - "AND " + - "layer.int_gpu_min = ? " + - "AND " + - "frame.str_state='WAITING' " + - "AND " + - "job.pk_layer=? "+ - "AND layer.pk_layer IN ( " + - "SELECT /*+ index (h i_str_host_tag) */ " + - "pk_layer " + - "FROM " + - "layer l,"+ - "host h " + - "WHERE " + - "l.pk_layer= ? " + - "AND " + - "CATSEARCH(h.str_tags, l.str_tags, ?) > 0 "+ - ") " + - "AND layer.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL) " + - ") WHERE LINENUM <= ?"; - - /** - * Find the next frame in a job for a host. - */ - public static final String FIND_DISPATCH_FRAME_BY_LAYER_AND_HOST = - "SELECT " + - "show_name, "+ - "job_name, " + - "pk_job,"+ - "pk_show,"+ - "pk_facility,"+ - "str_name,"+ - "str_shot,"+ - "str_user,"+ - "int_uid,"+ - "str_log_dir,"+ - "frame_name, "+ - "frame_state, "+ - "pk_frame, "+ - "pk_layer, "+ - "int_retries, "+ - "int_version, " + - "layer_name, " + - "layer_type, "+ - "int_cores_min,"+ - "int_cores_max,"+ - "b_threadable,"+ - "int_mem_min,"+ - "int_gpu_min,"+ - "str_cmd, "+ - "str_range,"+ - "int_chunk_size, "+ - "str_services " + - "FROM (SELECT " + - "ROW_NUMBER() OVER ( ORDER BY " + - "frame.int_dispatch_order ASC, " + - "frame.int_layer_order ASC " + - ") LINENUM, " + - "job.str_show AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, "+ - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.int_cores_min,"+ - "layer.int_cores_max,"+ - "layer.b_threadable,"+ - "layer.int_mem_min,"+ - "layer.int_gpu_min,"+ - "layer.str_cmd, "+ - "layer.str_range, "+ - "layer.int_chunk_size, "+ - "layer.str_services " + - "FROM " + - "job,"+ - "frame," + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.int_cores_min <= ? " + - "AND " + - "layer.int_mem_min <= ? " + - "AND " + - "layer.b_threadable >= ? " + - "AND " + - "layer.int_gpu_min <= ? " + - "AND " + - "frame.str_state='WAITING' " + - "AND " + - "layer.pk_layer=? "+ - "AND " + - "layer.pk_layer IN ( " + - "SELECT /*+ index (h i_str_host_tag) */ " + - "pk_layer " + - "FROM " + - "layer l,"+ - "host h " + - "WHERE " + - "l.pk_layer=? " + - "AND " + - "CATSEARCH(h.str_tags, l.str_tags,?) > 0 "+ - ") " + - "AND " + - "layer.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") WHERE LINENUM <= ?"; - - - public static final String FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_PROC = - "SELECT "+ - "show_name, "+ - "job_name, " + - "pk_job,"+ - "pk_show,"+ - "pk_facility,"+ - "str_name,"+ - "str_shot,"+ - "str_user,"+ - "int_uid,"+ - "str_log_dir,"+ - "frame_name, "+ - "frame_state, "+ - "pk_frame, "+ - "pk_layer, "+ - "int_retries, "+ - "int_version, " + - "layer_name, " + - "layer_type, "+ - "b_threadable,"+ - "int_cores_min,"+ - "int_cores_max,"+ - "int_mem_min,"+ - "int_gpu_min,"+ - "str_cmd, "+ - "str_range,"+ - "int_chunk_size, "+ - "str_services " + - "FROM (SELECT " + - "ROW_NUMBER() OVER ( ORDER BY " + - "frame.int_dispatch_order ASC, " + - "frame.int_layer_order ASC " + - ") LINENUM, " + - "job.str_show AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, " + - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.b_threadable,"+ - "layer.int_cores_min,"+ - "layer.int_mem_min,"+ - "layer.int_gpu_min,"+ - "layer.int_cores_max,"+ - "layer.str_cmd, "+ - "layer.str_range, "+ - "layer.int_chunk_size, "+ - "layer.str_services " + - "FROM " + - "job,"+ - "frame," + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.int_mem_min <= ? " + - "AND " + - "layer.int_gpu_min <= ? " + - "AND " + - "frame.str_state='WAITING' " + - "AND " + - "layer.pk_layer =? "+ - "AND " + - "layer.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") WHERE LINENUM <= ?"; - - /** - * Find the next frame in a job for a host. - */ - public static final String FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_HOST = - "SELECT " + - "show_name, "+ - "job_name, " + - "pk_job,"+ - "pk_show,"+ - "pk_facility,"+ - "str_name,"+ - "str_shot,"+ - "str_user,"+ - "int_uid,"+ - "str_log_dir,"+ - "frame_name, "+ - "frame_state, "+ - "pk_frame, "+ - "pk_layer, "+ - "int_retries, "+ - "int_version, " + - "layer_name, " + - "layer_type, "+ - "int_cores_min,"+ - "int_cores_max,"+ - "b_threadable,"+ - "int_mem_min,"+ - "int_gpu_min,"+ - "str_cmd, "+ - "str_range,"+ - "int_chunk_size, "+ - "str_services " + - "FROM (SELECT " + - "ROW_NUMBER() OVER ( ORDER BY " + - "frame.int_dispatch_order ASC, " + - "frame.int_layer_order ASC " + - ") LINENUM, " + - "job.str_show AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, "+ - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.int_cores_min,"+ - "layer.int_cores_max,"+ - "layer.b_threadable,"+ - "layer.int_mem_min,"+ - "layer.int_gpu_min,"+ - "layer.str_cmd, "+ - "layer.str_range, "+ - "layer.int_chunk_size, "+ - "layer.str_services "+ - "FROM " + - "job,"+ - "frame," + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.int_mem_min <= ? " + - "AND " + - "layer.int_gpu_min <= ? " + - "AND " + - "frame.str_state='WAITING' " + - "AND " + - "layer.pk_layer=? "+ - "AND " + - "layer.pk_layer IN (" + - "SELECT " + - "la.pk_layer " + - "FROM " + - "layer la " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = la.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) AS sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ") " + - ") WHERE LINENUM <= ?"; - - /** - * Looks for shows that are under their burst for a particular - * type of proc. The show has to be at least one whole proc - * under their burst to be considered for booking. - */ - public static final String FIND_SHOWS = - "SELECT " + - "vs_waiting.pk_show,"+ - "s.float_tier, " + - "s.int_burst " + - "FROM " + - "subscription s,"+ - "vs_waiting " + - "WHERE "+ - "vs_waiting.pk_show = s.pk_show " + - "AND " + - "s.pk_alloc = ? " + - "AND " + - "s.int_burst > 0 " + - "AND " + - "s.int_burst - s.int_cores >= 100 " + - "AND " + - "s.int_cores < s.int_burst "; - -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DispatcherDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DispatcherDaoJdbc.java deleted file mode 100644 index ff56fe35a..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/DispatcherDaoJdbc.java +++ /dev/null @@ -1,412 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Collections; -import java.util.LinkedHashSet; -import java.util.LinkedList; -import java.util.List; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; - -import org.apache.log4j.Logger; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.AllocationInterface; -import com.imageworks.spcue.DispatchFrame; -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.GroupInterface; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.SortableShow; -import com.imageworks.spcue.VirtualProc; -import com.imageworks.spcue.dao.DispatcherDao; -import com.imageworks.spcue.grpc.host.ThreadMode; -import com.imageworks.spcue.util.CueUtil; - -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_DISPATCH_FRAME_BY_JOB_AND_HOST; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_DISPATCH_FRAME_BY_JOB_AND_PROC; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_DISPATCH_FRAME_BY_LAYER_AND_HOST; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_DISPATCH_FRAME_BY_LAYER_AND_PROC; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_JOBS_BY_GROUP; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_JOBS_BY_LOCAL; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_JOBS_BY_SHOW; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_HOST; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_PROC; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_HOST; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_PROC; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_SHOWS; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.FIND_UNDER_PROCED_JOB_BY_FACILITY; -import static com.imageworks.spcue.dao.oracle.DispatchQuery.HIGHER_PRIORITY_JOB_BY_FACILITY_EXISTS; - - - -/** - * Dispatcher DAO - * - * @category DAO - */ -public class DispatcherDaoJdbc extends JdbcDaoSupport implements DispatcherDao { - - private static final Logger logger = Logger.getLogger(DispatcherDaoJdbc.class); - - public static final RowMapper PKJOB_MAPPER = - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("pk_job"); - } - }; - - private static final RowMapper SHOW_MAPPER = new RowMapper() { - public SortableShow mapRow(ResultSet rs, int rowNum) throws SQLException { - return new SortableShow( - rs.getString("pk_show"), - rs.getFloat("float_tier")); - } - }; - - private int threadMode(int mode) { - if (mode == ThreadMode.ALL_VALUE) - return mode; - return ThreadMode.AUTO_VALUE; - } - - /** - * Number of milliseconds before the show cache expires and - * a new show cache is created. - */ - private static final long SHOW_CACHE_EXPIRE_TIME_SEC = 8000; - - /** - * Wraps a list of SortableShows along with an expiration time. - */ - private class ShowCache { - final private long expireTime = System.currentTimeMillis() + SHOW_CACHE_EXPIRE_TIME_SEC; - final private List shows; - - public ShowCache(List shows) { - this.shows = shows; - Collections.sort(this.shows); - } - - public boolean isExpired() { - return System.currentTimeMillis() > expireTime; - } - - public List getShows() { - return shows; - } - } - - /** - * A cache of SortableShows keyed on host tags. - */ - private final ConcurrentHashMap bookableShows = - new ConcurrentHashMap(); - - /** - * Returns a sorted list of shows that have pending jobs - * which could benefit from the specified allocation. - * - * @param alloc - * ' - * @return a sorted list of shows. - */ - private List getBookableShows(AllocationInterface alloc) { - String key = alloc.getAllocationId(); - - ShowCache cached = bookableShows.get(key); - if (cached == null) { - bookableShows.put(key, new ShowCache(getJdbcTemplate().query( - FIND_SHOWS, - SHOW_MAPPER, alloc.getAllocationId()))); - } - else if (cached.isExpired()) { - bookableShows.put(key, new ShowCache(getJdbcTemplate().query( - FIND_SHOWS, - SHOW_MAPPER, alloc.getAllocationId()))); - } - return bookableShows.get(key).shows; - } - - private Set findDispatchJobs(DispatchHost host, int numJobs, boolean shuffleShows) { - LinkedHashSet result = new LinkedHashSet(); - List shows = new LinkedList(getBookableShows(host)); - // shows were sorted. If we want it in random sequence, we need to shuffle it. - if (shuffleShows) { - if (!shows.isEmpty()) - shows.remove(0); - Collections.shuffle(shows); - } - - for (SortableShow s: shows) { - - if (s.isSkipped(host.tags, (long) host.cores, host.memory)) { - logger.info("skipping show " + s.getShowId()); - continue; - } - - if (s.isSkipped(host)) { - logger.info("skipping show " + s.getShowId() + ", over its subscription."); - continue; - } - - /** - * Check if the show is over its subscription because we're using - * cached SortableShows, we don't pull a fresh list of shows for - * a while. If the show is over its subscription the alloc - * gets add to the SortableShow skipped alloc set. - */ - if (getJdbcTemplate().queryForObject( - "SELECT int_burst - int_cores FROM subscription WHERE pk_show=? AND pk_alloc=?", - Integer.class, s.getShowId(), host.getAllocationId()) < 100) { - s.skip(host); - continue; - } - - result.addAll(getJdbcTemplate().query( - FIND_JOBS_BY_SHOW, - PKJOB_MAPPER, - s.getShowId(), host.getFacilityId(), host.os, - host.idleCores, host.idleMemory, - threadMode(host.threadMode), - (host.idleGpu > 0) ? 1: 0, host.idleGpu, - hostString(host.getName()), numJobs * 10)); - - if (result.size() < 1) { - if (host.gpu == 0) { - s.skip(host.tags, host.idleCores, host.idleMemory); - } - } - else { - return result; - } - } - return result; - - } - - @Override - public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { - return findDispatchJobs(host, numJobs, true); - } - - @Override - public Set findDispatchJobs(DispatchHost host, int numJobs) { - return findDispatchJobs(host, numJobs, false); - } - - @Override - public Set findDispatchJobs(DispatchHost host, GroupInterface g) { - LinkedHashSet result = new LinkedHashSet(5); - result.addAll(getJdbcTemplate().query( - FIND_JOBS_BY_GROUP, - PKJOB_MAPPER, - g.getGroupId(),host.getFacilityId(), host.os, - host.idleCores, host.idleMemory, - threadMode(host.threadMode), - (host.idleGpu > 0) ? 1: 0, host.idleGpu, - hostString(host.getName()), 50)); - - return result; - } - - @Override - public List findNextDispatchFrames(JobInterface job, - VirtualProc proc, int limit) { - - if (proc.isLocalDispatch) { - return getJdbcTemplate().query( - FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - proc.memoryReserved, - proc.gpuReserved, - job.getJobId(), - limit); - } - else { - return getJdbcTemplate().query( - FIND_DISPATCH_FRAME_BY_JOB_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - proc.coresReserved, - proc.memoryReserved, - (proc.gpuReserved > 0) ? 1: 0, proc.gpuReserved, - job.getJobId(), job.getJobId(), - hostString(proc.hostName), limit); - } - } - - @Override - public List findNextDispatchFrames(JobInterface job, - DispatchHost host, int limit) { - - if (host.isLocalDispatch) { - return getJdbcTemplate().query( - FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleMemory, host.idleGpu, job.getJobId(), - limit); - - } else { - return getJdbcTemplate().query( - FIND_DISPATCH_FRAME_BY_JOB_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleCores, host.idleMemory, - threadMode(host.threadMode), - (host.idleGpu > 0) ? 1: 0, host.idleGpu, - job.getJobId(), job.getJobId(), - hostString(host.getName()), limit); - } - } - - - @Override - public List findNextDispatchFrames(LayerInterface layer, - VirtualProc proc, int limit) { - - if (proc.isLocalDispatch) { - return getJdbcTemplate().query( - FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - proc.memoryReserved, proc.gpuReserved, - layer.getLayerId(), - limit); - } - else { - return getJdbcTemplate().query( - FIND_DISPATCH_FRAME_BY_LAYER_AND_PROC, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - proc.coresReserved, proc.memoryReserved, - proc.gpuReserved, - layer.getLayerId(), layer.getLayerId(), - hostString(proc.hostName), limit); - } - } - - @Override - public List findNextDispatchFrames(LayerInterface layer, - DispatchHost host, int limit) { - - if (host.isLocalDispatch) { - return getJdbcTemplate().query( - FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleMemory, host.idleGpu, layer.getLayerId(), - limit); - - } else { - return getJdbcTemplate().query( - FIND_DISPATCH_FRAME_BY_LAYER_AND_HOST, - FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleCores, host.idleMemory, - threadMode(host.threadMode), - host.idleGpu, layer.getLayerId(), layer.getLayerId(), - hostString(host.getName()), limit); - } - } - - - @Override - public DispatchFrame findNextDispatchFrame(JobInterface job, VirtualProc proc) { - return findNextDispatchFrames(job, proc, 1).get(0); - } - - @Override - public DispatchFrame findNextDispatchFrame(JobInterface job, DispatchHost host) { - return findNextDispatchFrames(job, host, 1).get(0); - } - - @Override - public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { - long start = System.currentTimeMillis(); - try { - return getJdbcTemplate().queryForObject( - FIND_UNDER_PROCED_JOB_BY_FACILITY, - Integer.class, excludeJob.getShowId(), proc.getFacilityId(), - proc.os, excludeJob.getShowId(), - proc.getFacilityId(), proc.os, - proc.coresReserved, proc.memoryReserved, proc.gpuReserved, - hostString(proc.hostName)) > 0; - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - return false; - } - finally { - logger.trace("findUnderProcedJob(Job excludeJob, VirtualProc proc) " + CueUtil.duration(start)); - } - } - - @Override - public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { - long start = System.currentTimeMillis(); - try { - return getJdbcTemplate().queryForObject( - HIGHER_PRIORITY_JOB_BY_FACILITY_EXISTS, - Boolean.class, baseJob.priority, proc.getFacilityId(), - proc.os, proc.getFacilityId(), proc.os, - proc.coresReserved, proc.memoryReserved, proc.gpuReserved, - hostString(proc.hostName)); - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - return false; - } - finally { - logger.trace("higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) " + CueUtil.duration(start)); - } - } - - @Override - public Set findDispatchJobs(DispatchHost host, - ShowInterface show, int numJobs) { - LinkedHashSet result = new LinkedHashSet(numJobs); - - result.addAll(getJdbcTemplate().query( - FIND_JOBS_BY_SHOW, - PKJOB_MAPPER, - show.getShowId(), host.getFacilityId(), host.os, - host.idleCores, host.idleMemory, - threadMode(host.threadMode), - (host.idleGpu > 0) ? 1: 0, host.idleGpu, - hostString(host.getName()), numJobs * 10)); - - return result; - } - - @Override - public Set findLocalDispatchJobs(DispatchHost host) { - LinkedHashSet result = new LinkedHashSet(5); - result.addAll(getJdbcTemplate().query( - FIND_JOBS_BY_LOCAL, - PKJOB_MAPPER, - host.getHostId(), host.getFacilityId(), - host.os, host.getHostId(), host.getFacilityId(), host.os)); - - return result; - } - - private static final String hostString(String name) { - return "str_name='" + name + "'"; - } -} - - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/FacilityDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/FacilityDaoJdbc.java deleted file mode 100644 index 1f77d6e3c..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/FacilityDaoJdbc.java +++ /dev/null @@ -1,88 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.FacilityEntity; -import com.imageworks.spcue.FacilityInterface; -import com.imageworks.spcue.dao.FacilityDao; -import com.imageworks.spcue.util.SqlUtil; - -public class FacilityDaoJdbc extends JdbcDaoSupport implements FacilityDao { - - public static final RowMapper FACILITY_MAPPER = new RowMapper() { - public FacilityInterface mapRow(ResultSet rs, int rowNum) throws SQLException { - FacilityEntity facility = new FacilityEntity(); - facility.id = rs.getString("pk_facility"); - facility.name = rs.getString("str_name"); - return facility; - } - }; - - public FacilityInterface getDefaultFacility() { - return getJdbcTemplate().queryForObject( - "SELECT pk_facility,str_name FROM facility WHERE b_default=1 AND ROWNUM < 2", - FACILITY_MAPPER); - } - - public FacilityInterface getFacility(String id) { - return getJdbcTemplate().queryForObject( - "SELECT pk_facility, str_name FROM facility WHERE pk_facility=? " + - "OR str_name=?", FACILITY_MAPPER, id, id); - } - - public boolean facilityExists(String name) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM facility WHERE str_name=?", - Integer.class, name) > 0; - - } - - public FacilityInterface insertFacility(FacilityEntity facility) { - facility.id = SqlUtil.genKeyRandom(); - - getJdbcTemplate().update( - "INSERT INTO facility (pk_facility, str_name) VALUES (?,?)", - facility.getId(), facility.getName()); - - return facility; - } - - @Override - public int deleteFacility(FacilityInterface facility) { - return getJdbcTemplate().update( - "DELETE FROM facility WHERE pk_facility = ?", - facility.getFacilityId()); - } - - @Override - public int updateFacilityName(FacilityInterface facility, String name) { - return getJdbcTemplate().update( - "UPDATE facility SET str_name=? WHERE pk_facility = ?", - name, facility.getFacilityId()); - } - -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/FilterDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/FilterDaoJdbc.java deleted file mode 100644 index 3f7d99733..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/FilterDaoJdbc.java +++ /dev/null @@ -1,199 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.CallableStatement; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - -import org.springframework.jdbc.core.CallableStatementCreator; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.SqlParameter; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.FilterEntity; -import com.imageworks.spcue.FilterInterface; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.dao.FilterDao; -import com.imageworks.spcue.grpc.filter.FilterType; -import com.imageworks.spcue.util.SqlUtil; - -/** - * A DAO class for loading Filters, Actions, and Matchers. Part of the - * job filtering system. - * - * @category DAO - */ -public class FilterDaoJdbc extends JdbcDaoSupport implements FilterDao { - - private static final String GET_FILTER = - "SELECT " + - "filter.* " + - "FROM "+ - "filter "; - - private static final String GET_ACTIVE_FILTERS = - "SELECT " + - "filter.*" + - "FROM " + - "filter " + - "WHERE " + - "b_enabled = 1 "+ - "AND " + - "pk_show=? " + - "ORDER BY " + - "f_order ASC"; - - private static final String GET_FILTERS = - "SELECT " + - "filter.*" + - "FROM " + - "filter " + - "WHERE " + - "pk_show=? " + - "ORDER BY " + - "f_order ASC"; - - public static final RowMapper FILTER_DETAIL_MAPPER = new RowMapper() { - public FilterEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - FilterEntity d = new FilterEntity(); - d.type = FilterType.valueOf(rs.getString("str_type")); - d.id = rs.getString("pk_filter"); - d.name = rs.getString("str_name"); - d.showId = rs.getString("pk_show"); - d.enabled = rs.getBoolean("b_enabled"); - d.order = rs.getFloat("f_order"); - return d; - } - }; - - public List getActiveFilters(ShowInterface show) { - return getJdbcTemplate().query( - GET_ACTIVE_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); - } - - public List getFilters(ShowInterface show) { - return getJdbcTemplate().query( - GET_FILTERS, FILTER_DETAIL_MAPPER, show.getShowId()); - } - - public void deleteFilter(FilterInterface f) { - getJdbcTemplate().update( - "DELETE FROM action WHERE pk_filter=?",f.getFilterId()); - getJdbcTemplate().update( - "DELETE FROM matcher WHERE pk_filter=?",f.getFilterId()); - getJdbcTemplate().update( - "DELETE FROM filter WHERE pk_filter=?",f.getFilterId()); - reorderFilters(f); - } - - private static final String INSERT_FILTER = - "INSERT INTO " + - "filter "+ - "(" + - "pk_filter," + - "pk_show,"+ - "str_name,"+ - "str_type,"+ - "f_order "+ - ") VALUES (?,?,?,?,(SELECT COALESCE(MAX(f_order)+1,1.0) FROM filter WHERE pk_show=?))"; - - public void insertFilter(FilterEntity f) { - f.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_FILTER, - f.id, f.getShowId(),f.name, f.type.toString(), f.getShowId()); - reorderFilters(f); - } - - public void updateSetFilterEnabled(FilterInterface f, boolean enabled) { - getJdbcTemplate().update( - "UPDATE filter SET b_enabled=? WHERE pk_filter=?", - enabled, f.getFilterId()); - } - - public void updateSetFilterName(FilterInterface f, String name) { - getJdbcTemplate().update( - "UPDATE filter SET str_name=? WHERE pk_filter=?", - name, f.getFilterId()); - } - - public void updateSetFilterOrder(FilterInterface f, double order) { - getJdbcTemplate().update( - "UPDATE filter SET f_order=? - 0.1 WHERE pk_filter=?", - order, f.getFilterId()); - reorderFilters(f); - } - - public void lowerFilterOrder(FilterInterface f, int by) { - double lower_by = by + 0.1; - getJdbcTemplate().update( - "UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", - lower_by, f.getFilterId()); - reorderFilters(f); - } - - public void raiseFilterOrder(FilterInterface f, int by) { - double raise_by = (by * -1) - 0.1; - getJdbcTemplate().update( - "UPDATE filter SET f_order=f_order + ? WHERE pk_filter=?", - raise_by, f.getFilterId()); - reorderFilters(f); - } - - public void updateSetFilterType(FilterInterface f, FilterType type) { - getJdbcTemplate().update( - "UPDATE filter SET str_type=? WHERE pk_filter=?", - type.toString(), f.getFilterId()); - } - - public void reorderFilters(final ShowInterface s) { - getJdbcTemplate().update("LOCK TABLE filter IN SHARE MODE"); - getJdbcTemplate().call(new CallableStatementCreator() { - - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call reorder_filters(?) }"); - c.setString(1, s.getShowId()); - return c; - } - }, new ArrayList()); - } - - public FilterEntity findFilter(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " WHERE pk_show=? AND str_name=?", - FILTER_DETAIL_MAPPER, show.getShowId(), name); - } - - public FilterEntity getFilter(String id) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " WHERE pk_filter=?", - FILTER_DETAIL_MAPPER, id); - } - - public FilterEntity getFilter(FilterInterface filter) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " WHERE pk_filter=?", - FILTER_DETAIL_MAPPER, filter.getFilterId()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/FrameDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/FrameDaoJdbc.java deleted file mode 100644 index fd3a421d1..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/FrameDaoJdbc.java +++ /dev/null @@ -1,1104 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.EnumSet; -import java.util.List; -import java.sql.Timestamp; -import java.util.Optional; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.DispatchFrame; -import com.imageworks.spcue.FrameDetail; -import com.imageworks.spcue.FrameEntity; -import com.imageworks.spcue.FrameInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.LayerDetail; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LightweightDependency; -import com.imageworks.spcue.ResourceUsage; -import com.imageworks.spcue.VirtualProc; -import com.imageworks.spcue.dao.FrameDao; -import com.imageworks.spcue.dao.criteria.FrameSearchInterface; -import com.imageworks.spcue.dispatcher.Dispatcher; -import com.imageworks.spcue.dispatcher.FrameReservationException; -import com.imageworks.spcue.grpc.depend.DependType; -import com.imageworks.spcue.grpc.job.CheckpointState; -import com.imageworks.spcue.grpc.job.FrameExitStatus; -import com.imageworks.spcue.grpc.job.FrameState; -import com.imageworks.spcue.grpc.job.JobState; -import com.imageworks.spcue.grpc.job.LayerType; -import com.imageworks.spcue.util.CueUtil; -import com.imageworks.spcue.util.FrameSet; -import com.imageworks.spcue.util.SqlUtil; - -public class FrameDaoJdbc extends JdbcDaoSupport implements FrameDao { - - private static final String UPDATE_FRAME_STOPPED_NORSS = - "UPDATE "+ - "frame "+ - "SET " + - "str_state=?, "+ - "int_exit_status = ?, " + - "ts_stopped = systimestamp, " + - "ts_updated = systimestamp, " + - "int_version = int_version + 1, " + - "int_total_past_core_time = int_total_past_core_time + " + - "round(INTERVAL_TO_SECONDS(systimestamp - ts_started) * int_cores / 100) " + - "WHERE " + - "frame.pk_frame = ? " + - "AND " + - "frame.str_state = ? " + - "AND " + - "frame.int_version = ? "; - - @Override - public boolean updateFrameStopped(FrameInterface frame, FrameState state, - int exitStatus) { - return getJdbcTemplate().update(UPDATE_FRAME_STOPPED_NORSS, - state.toString(), exitStatus, frame.getFrameId(), - FrameState.RUNNING.toString(), frame.getVersion()) == 1; - } - - private static final String UPDATE_FRAME_STOPPED = - "UPDATE "+ - "frame "+ - "SET " + - "str_state=?, "+ - "int_exit_status = ?, " + - "ts_stopped = systimestamp + interval '1' second, " + - "ts_updated = systimestamp, " + - "int_mem_max_used = ?, " + - "int_version = int_version + 1, " + - "int_total_past_core_time = int_total_past_core_time + " + - "round(INTERVAL_TO_SECONDS(systimestamp + interval '1' second - ts_started) * int_cores / 100) " + - "WHERE " + - "frame.pk_frame = ? " + - "AND " + - "frame.str_state = ? " + - "AND " + - "frame.int_version = ? "; - - @Override - public boolean updateFrameStopped(FrameInterface frame, FrameState state, - int exitStatus, long maxRss) { - - - return getJdbcTemplate().update(UPDATE_FRAME_STOPPED, - state.toString(), exitStatus, maxRss, - frame.getFrameId(), FrameState.RUNNING.toString(), - frame.getVersion()) == 1; - } - - private static final String UPDATE_FRAME_CLEARED = - "UPDATE "+ - "frame "+ - "SET " + - "str_state = ?, "+ - "int_exit_status = ?, " + - "ts_stopped = systimestamp, " + - "ts_updated = systimestamp, " + - "int_version = int_version + 1 " + - "WHERE " + - "frame.pk_frame = ? " + - "AND " + - "frame.pk_frame NOT IN " + - "(SELECT proc.pk_frame FROM " + - "proc WHERE proc.pk_frame=?)"; - - @Override - public boolean updateFrameCleared(FrameInterface frame) { - - int result = getJdbcTemplate().update( - UPDATE_FRAME_CLEARED, - FrameState.WAITING.toString(), - Dispatcher.EXIT_STATUS_FRAME_CLEARED, - frame.getFrameId(), - frame.getFrameId()); - - return result > 0; - } - - private static final String UPDATE_FRAME_STARTED = - "UPDATE "+ - "frame "+ - "SET " + - "str_state = ?,"+ - "str_host=?, " + - "int_cores=?, "+ - "int_mem_reserved = ?, " + - "int_gpu_reserved = ?, " + - "ts_updated = systimestamp,"+ - "ts_started = systimestamp,"+ - "ts_stopped = null, "+ - "int_version = int_version + 1 "+ - "WHERE " + - "pk_frame = ? " + - "AND " + - "str_state = ? " + - "AND " + - "int_version = ? " + - "AND " + - "frame.pk_layer IN (" + - "SELECT " + - "layer.pk_layer " + - "FROM " + - "layer " + - "LEFT JOIN layer_limit ON layer_limit.pk_layer = layer.pk_layer " + - "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + - "LEFT JOIN (" + - "SELECT " + - "limit_record.pk_limit_record, " + - "SUM(layer_stat.int_running_count) AS int_sum_running " + - "FROM " + - "layer_limit " + - "LEFT JOIN limit_record ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN layer_stat ON layer_stat.pk_layer = layer_limit.pk_layer " + - "GROUP BY limit_record.pk_limit_record) sum_running " + - "ON limit_record.pk_limit_record = sum_running.pk_limit_record " + - "WHERE " + - "sum_running.int_sum_running < limit_record.int_max_value " + - "OR sum_running.int_sum_running IS NULL " + - ")"; - - private static final String UPDATE_FRAME_RETRIES = - "UPDATE " + - "frame " + - "SET " + - "int_retries = int_retries + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_exit_status NOT IN (?,?,?) "; - - @Override - public void updateFrameStarted(VirtualProc proc, FrameInterface frame) { - - lockFrameForUpdate(frame, FrameState.WAITING); - - int result = getJdbcTemplate().update(UPDATE_FRAME_STARTED, - FrameState.RUNNING.toString(), proc.hostName, proc.coresReserved, - proc.memoryReserved, proc.gpuReserved, frame.getFrameId(), - FrameState.WAITING.toString(), frame.getVersion()); - - if (result == 0) { - String error_msg = "the frame " + - frame + " was updated by another thread."; - throw new FrameReservationException(error_msg); - } - - /* - * Frames that were killed via nimby or hardware errors not attributed to - * the software do not increment the retry counter. - */ - getJdbcTemplate().update(UPDATE_FRAME_RETRIES, - frame.getFrameId(), -1, FrameExitStatus.SKIP_RETRY_VALUE, - Dispatcher.EXIT_STATUS_FRAME_CLEARED); - } - - private static final String UPDATE_FRAME_FIXED = - "UPDATE "+ - "frame "+ - "SET " + - "str_state = ?,"+ - "str_host=?, " + - "int_cores=?, "+ - "int_mem_reserved = ?, " + - "int_gpu_reserved = ?, " + - "ts_updated = systimestamp,"+ - "ts_started = systimestamp,"+ - "ts_stopped = null, "+ - "int_version = int_version + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "str_state = 'RUNNING'"; - - @Override - public boolean updateFrameFixed(VirtualProc proc, FrameInterface frame) { - return getJdbcTemplate().update(UPDATE_FRAME_FIXED, - FrameState.RUNNING.toString(), proc.hostName, proc.coresReserved, - proc.memoryReserved, proc.gpuReserved, frame.getFrameId()) == 1; - } - - @Override - public DispatchFrame getDispatchFrame(String uuid) { - return getJdbcTemplate().queryForObject( - GET_DISPATCH_FRAME, DISPATCH_FRAME_MAPPER, uuid); - } - - static final RowMapper DISPATCH_FRAME_MAPPER = new RowMapper() { - public DispatchFrame mapRow(ResultSet rs, int rowNum) throws SQLException { - DispatchFrame frame = new DispatchFrame(); - frame.id = rs.getString("pk_frame"); - frame.name = rs.getString("frame_name"); - frame.layerId = rs.getString("pk_layer"); - frame.jobId = rs.getString("pk_job"); - frame.showId = rs.getString("pk_show"); - frame.facilityId = rs.getString("pk_facility"); - frame.retries = rs.getInt("int_retries"); - frame.state = FrameState.valueOf(rs.getString("frame_state")); - frame.command = rs.getString("str_cmd"); - frame.jobName = rs.getString("job_name"); - frame.layerName = rs.getString("layer_name"); - frame.chunkSize = rs.getInt("int_chunk_size"); - frame.range = rs.getString("str_range"); - frame.logDir = rs.getString("str_log_dir"); - frame.shot = rs.getString("str_shot"); - frame.show = rs.getString("show_name"); - frame.owner = rs.getString("str_user"); - int uid = rs.getInt("int_uid"); - frame.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); - frame.state = FrameState.valueOf(rs.getString("frame_state")); - frame.minCores = rs.getInt("int_cores_min"); - frame.maxCores = rs.getInt("int_cores_max"); - frame.threadable = rs.getBoolean("b_threadable"); - frame.minMemory = rs.getLong("int_mem_min"); - frame.minGpu = rs.getLong("int_gpu_min"); - frame.version = rs.getInt("int_version"); - frame.services = rs.getString("str_services"); - return frame; - } - }; - - private static final String GET_DISPATCH_FRAME = - "SELECT " + - "show.str_name AS show_name, "+ - "job.str_name AS job_name, " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_user,"+ - "job.int_uid,"+ - "job.str_log_dir,"+ - "frame.str_name AS frame_name, "+ - "frame.str_state AS frame_state, "+ - "frame.pk_frame, "+ - "frame.pk_layer, "+ - "frame.int_retries, "+ - "frame.int_version, " + - "layer.str_name AS layer_name, " + - "layer.str_type AS layer_type, "+ - "layer.str_cmd, "+ - "layer.int_cores_min,"+ - "layer.int_cores_max,"+ - "layer.b_threadable,"+ - "layer.int_mem_min, "+ - "layer.int_gpu_min, "+ - "layer.str_range, "+ - "layer.int_chunk_size, " + - "layer.str_services " + - "FROM " + - "layer, " + - "job, "+ - "show, " + - "frame LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " + - "WHERE " + - "job.pk_show = show.pk_show "+ - "AND " + - "frame.pk_job = job.pk_job " + - "AND " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "frame.pk_frame = ?"; - - private static final String GET_FRAME_DETAIL = - "SELECT " + - "frame.*, " + - "job.pk_facility," + - "job.pk_show " + - "FROM " + - "frame," + - "layer," + - "job," + - "show " + - "WHERE "+ - "frame.pk_job = job.pk_job " + - "AND " + - "frame.pk_layer = layer.pk_layer " + - "AND "+ - "job.pk_show = show.pk_show "; - - private static final String GET_MINIMAL_FRAME = - "SELECT " + - "frame.pk_frame," + - "frame.str_name, " + - "frame.pk_job, " + - "frame.pk_layer, "+ - "frame.str_state, " + - "frame.int_version, "+ - "job.pk_show, " + - "job.pk_facility "+ - "FROM " + - "frame," + - "layer," + - "job," + - "show " + - "WHERE "+ - "frame.pk_job = job.pk_job " + - "AND " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "job.pk_show = show.pk_show "; - - private static final RowMapper FRAME_MAPPER = - new RowMapper() { - public FrameEntity mapRow(ResultSet rs, - int rowNum) throws SQLException { - FrameEntity frame = new FrameEntity(); - frame.id = rs.getString("pk_frame"); - frame.name = rs.getString("str_name"); - frame.jobId = rs.getString("pk_job"); - frame.layerId = rs.getString("pk_layer"); - frame.showId = rs.getString("pk_show"); - frame.facilityId = rs.getString("pk_facility"); - frame.version = rs.getInt("int_version"); - return frame; - } - }; - - private static final RowMapper FRAME_DETAIL_MAPPER = new RowMapper() { - public FrameDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameDetail frame = new FrameDetail(); - frame.id = rs.getString("pk_frame"); - frame.dependCount = rs.getInt("int_depend_count"); - frame.exitStatus = rs.getInt("int_exit_status"); - frame.jobId = rs.getString("pk_job"); - frame.layerId = rs.getString("pk_layer"); - frame.showId = rs.getString("pk_show"); - frame.maxRss = rs.getInt("int_mem_max_used"); - frame.name = rs.getString("str_name"); - frame.number = rs.getInt("int_number"); - frame.dispatchOrder = rs.getInt("int_dispatch_order"); - frame.retryCount = rs.getInt("int_retries"); - frame.dateStarted = rs.getTimestamp("ts_started"); - frame.dateStopped = rs.getTimestamp("ts_stopped"); - frame.dateUpdated = rs.getTimestamp("ts_updated"); - frame.version = rs.getInt("int_version"); - - if (rs.getString("str_host") != null) { - frame.lastResource = String.format("%s/%d",rs.getString("str_host"),rs.getInt("int_cores")); - } - else { - frame.lastResource = ""; - } - frame.state = FrameState.valueOf(rs.getString("str_state")); - - return frame; - } - }; - - public static final String FIND_ORPHANED_FRAMES = - "SELECT " + - "frame.pk_frame, " + - "frame.pk_layer, " + - "frame.str_name, " + - "frame.int_version, " + - "job.pk_job, " + - "job.pk_show, " + - "job.pk_facility " + - "FROM " + - "frame, " + - "job " + - "WHERE " + - "job.pk_job = frame.pk_job " + - "AND " + - "frame.str_state='RUNNING' " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + - "AND " + - "systimestamp - frame.ts_updated > interval '300' second"; - - @Override - public List getOrphanedFrames() { - return getJdbcTemplate().query(FIND_ORPHANED_FRAMES, - FRAME_MAPPER); - } - - private static final String IS_ORPHAN = - "SELECT " + - "COUNT(1) " + - "FROM " + - "frame " + - "WHERE " + - "frame.pk_frame = ? " + - "AND " + - "frame.str_state = 'RUNNING' " + - "AND " + - "(SELECT COUNT(1) FROM proc WHERE proc.pk_frame = frame.pk_frame) = 0 " + - "AND " + - "systimestamp - frame.ts_updated > interval '300' second"; - - @Override - public boolean isOrphan(FrameInterface frame) { - return getJdbcTemplate().queryForObject(IS_ORPHAN, Integer.class, - frame.getFrameId()) == 1; - } - - private static final String INSERT_FRAME = - "INSERT INTO " + - "frame " + - "("+ - "pk_frame, " + - "pk_layer, " + - "pk_job, " + - "str_name, " + - "str_state, " + - "int_number, " + - "int_dispatch_order, " + - "int_layer_order, "+ - "ts_updated "+ - ") " + - "VALUES (?,?,?,?,?,?,?,?,systimestamp)"; - - @Override - public void insertFrames(LayerDetail layer, List frames) { - - int count = 0; - for (int frame: frames) { - getJdbcTemplate().update(INSERT_FRAME, - SqlUtil.genKeyRandom(), - layer.getLayerId(), - layer.getJobId(), - CueUtil.buildFrameName(layer, frame), - FrameState.SETUP.toString(), - frame, - count, - layer.dispatchOrder); - count++; - } - } - - @Override - public List getDependentFrames(LightweightDependency depend) { - - /* - * Compound depends are handled in the DependManager. - */ - - String key = null; - StringBuilder sb = new StringBuilder(4096); - sb.append(GET_MINIMAL_FRAME); - sb.append(" AND frame.int_depend_count > 0 "); - - if (EnumSet.of( - DependType.JOB_ON_JOB, - DependType.JOB_ON_LAYER, - DependType.JOB_ON_FRAME).contains(depend.type)) { - sb.append("AND job.pk_job = ?"); - key = depend.dependErJobId; - } - else if (EnumSet.of( - DependType.LAYER_ON_FRAME, - DependType.LAYER_ON_LAYER, - DependType.LAYER_ON_JOB).contains(depend.type)) { - sb.append("AND layer.pk_layer = ?"); - key = depend.dependErLayerId; - } - else if (EnumSet.of( - DependType.FRAME_ON_JOB, - DependType.FRAME_ON_LAYER, - DependType.FRAME_ON_FRAME).contains(depend.type)) { - sb.append("AND frame.pk_frame = ?"); - key = depend.dependErFrameId; - } - else { - return new ArrayList(1); - } - - return getJdbcTemplate().query( - sb.toString(), FRAME_MAPPER, - new Object[] { key }); - } - - @Override - public FrameInterface findFrame(LayerInterface layer, int number) { - return getJdbcTemplate().queryForObject( - GET_MINIMAL_FRAME + " AND frame.pk_layer=? AND int_number=?", - FRAME_MAPPER, layer.getLayerId(), number); - } - - @Override - public FrameDetail getFrameDetail(FrameInterface frame) { - return getJdbcTemplate().queryForObject( - GET_FRAME_DETAIL + " AND pk_frame=?", - FRAME_DETAIL_MAPPER, frame.getFrameId()); - } - - @Override - public FrameDetail getFrameDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_FRAME_DETAIL + " AND pk_frame=?", - FRAME_DETAIL_MAPPER, id); - } - - @Override - public FrameDetail findFrameDetail(JobInterface job, String name) { - //Uses C_FRAME_STR_NAME_UNQ - return getJdbcTemplate().queryForObject( - GET_FRAME_DETAIL + " AND frame.str_name=? AND frame.pk_job=?", - FRAME_DETAIL_MAPPER, name, job.getJobId()); - } - - @Override - public List findFrameDetails(FrameSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_FRAME_DETAIL), - FRAME_DETAIL_MAPPER, r.getValuesArray()); - } - - @Override - public List findFrames(FrameSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_MINIMAL_FRAME), - FRAME_MAPPER, r.getValuesArray()); - } - - private static final String FIND_LONGEST_FRAME = - "SELECT " + - "pk_frame " + - "FROM (" + - "SELECT " + - "pk_frame,"+ - "ts_stopped - ts_started AS duration " + - "FROM " + - "frame, " + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "frame.pk_job = ? "+ - "AND " + - "str_state=? "+ - "AND " + - "layer.str_type=? " + - "ORDER BY "+ - "duration DESC "+ - ") WHERE ROWNUM = 1"; - - @Override - public FrameDetail findLongestFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject( - FIND_LONGEST_FRAME, String.class, job.getJobId(), - FrameState.SUCCEEDED.toString(), LayerType.RENDER.toString()); - return getFrameDetail(pk_frame); - } - - private static final String FIND_SHORTEST_FRAME = - "SELECT " + - "pk_frame " + - "FROM (" + - "SELECT " + - "pk_frame,"+ - "ts_stopped - ts_started AS duration " + - "FROM " + - "frame, " + - "layer " + - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "frame.pk_job = ? "+ - "AND " + - "frame.str_state=? "+ - "AND " + - "layer.str_type=? " + - "ORDER BY "+ - "duration ASC "+ - ") WHERE ROWNUM = 1"; - - @Override - public FrameDetail findShortestFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject( - FIND_SHORTEST_FRAME, String.class, job.getJobId(), - FrameState.SUCCEEDED.toString(),LayerType.RENDER.toString()); - return getFrameDetail(pk_frame); - } - - @Override - public FrameInterface getFrame(String id) { - return getJdbcTemplate().queryForObject( - GET_MINIMAL_FRAME + " AND frame.pk_frame=?", - FRAME_MAPPER, id); - } - - @Override - public FrameInterface findFrame(JobInterface job, String name) { - //Uses C_FRAME_STR_NAME_UNQ - return getJdbcTemplate().queryForObject( - GET_MINIMAL_FRAME + " AND frame.str_name=? AND frame.pk_job=?", - FRAME_MAPPER, name, job.getJobId()); - } - - @Override - public void checkRetries(FrameInterface frame) { - int max_retries = getJdbcTemplate().queryForObject( - "SELECT int_max_retries FROM job WHERE pk_job=?", Integer.class, - frame.getJobId()); - - if (getJdbcTemplate().queryForObject( - "SELECT int_retries FROM frame WHERE pk_frame=?", Integer.class, - frame.getFrameId()) >= max_retries) { - getJdbcTemplate().update( - "UPDATE frame SET str_state=? WHERE pk_frame=?", - FrameState.DEAD.toString(), frame.getFrameId()); - } - } - - public static final String GET_FRAME_ID = - "SELECT " + - "frame.pk_frame "+ - "FROM " + - "frame,"+ - "layer,"+ - "job "+ - "WHERE " + - "frame.pk_layer = layer.pk_layer " + - "AND " + - "frame.pk_job = job.pk_job "; - - - private static final String UPDATE_FRAME_STATE = - "UPDATE " + - "frame "+ - "SET " + - "str_state=?, " + - "ts_updated = systimestamp, " + - "int_version = int_version + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_version = ? "; - - @Override - public boolean updateFrameState(FrameInterface frame, FrameState state) { - if (getJdbcTemplate().update(UPDATE_FRAME_STATE, - state.toString(), - frame.getFrameId(), - frame.getVersion()) == 1) { - logger.info("The frame " + frame + " state changed to " + - state.toString()); - return true; - } - logger.info("Failed to change the frame " + frame + " state to " + - state.toString()); - return false; - } - - private static final String MARK_AS_WAITING = - "UPDATE " + - "frame "+ - "SET " + - "str_state=?, " + - "ts_updated = systimestamp, " + - "int_depend_count = 0, " + - "int_version = int_version + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_version = ? " + - "AND " + - "str_state = ? "; - - @Override - public void markFrameAsWaiting(FrameInterface frame) { - getJdbcTemplate().update( - MARK_AS_WAITING, - FrameState.WAITING.toString(), - frame.getFrameId(), - frame.getVersion(), - FrameState.DEPEND.toString()); - } - - private static final String MARK_AS_DEPEND = - "UPDATE " + - "frame "+ - "SET " + - "str_state=?, " + - "int_depend_count = ?, "+ - "ts_updated = systimestamp, " + - "int_version = int_version + 1 " + - "WHERE " + - "pk_frame = ? " + - "AND " + - "int_version = ? " + - "AND " + - "str_state = ? "; - - private static final String GET_FRAME_DEPEND_COUNT = - "SELECT " + - "COUNT(1) " + - "FROM " + - "depend " + - "WHERE " + - " ( " + - "(pk_job_depend_er = ? AND str_type LIKE 'JOB#_ON%' ESCAPE '#') " + - "OR " + - "pk_layer_depend_er=? " + - "OR " + - "pk_frame_depend_er=? " + - " ) " + - "AND " + - "depend.b_active = 1 " + - "AND " + - "depend.b_composite = 0 "; - - public void markFrameAsDepend(FrameInterface frame) { - // We need to full depend count in this case to reset the - // frames's depend count accurately. - int depend_count = getJdbcTemplate().queryForObject( - GET_FRAME_DEPEND_COUNT, Integer.class, - frame.getJobId(),frame.getLayerId(),frame.getFrameId()); - - if (depend_count > 0) { - getJdbcTemplate().update( - MARK_AS_DEPEND, - FrameState.DEPEND.toString(), - depend_count, - frame.getFrameId(), - frame.getVersion(), - FrameState.WAITING.toString()); - } - } - - private static final String FIND_HIGHEST_MEM_FRAME = - "SELECT " + - "pk_frame " + - "FROM (" + - "SELECT " + - "pk_frame " + - "FROM " + - "frame " + - "WHERE " + - "pk_job = ? "+ - "AND " + - "str_state=? "+ - "ORDER BY "+ - "int_mem_max_used DESC "+ - ") WHERE ROWNUM = 1"; - - @Override - public FrameDetail findHighestMemoryFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject( - FIND_HIGHEST_MEM_FRAME, String.class, job.getJobId(), - FrameState.SUCCEEDED.toString()); - return getFrameDetail(pk_frame); - } - - private static final String FIND_LOWEST_MEM_FRAME = - "SELECT " + - "pk_frame " + - "FROM (" + - "SELECT " + - "pk_frame " + - "FROM " + - "frame " + - "WHERE " + - "pk_job = ? "+ - "AND " + - "str_state=? "+ - "ORDER BY "+ - "int_mem_max_used ASC "+ - ") WHERE ROWNUM = 1"; - - @Override - public FrameDetail findLowestMemoryFrame(JobInterface job) { - String pk_frame = getJdbcTemplate().queryForObject( - FIND_LOWEST_MEM_FRAME, String.class, job.getJobId(), - FrameState.SUCCEEDED.toString()); - return getFrameDetail(pk_frame); - } - - @Override - public void reorderFramesFirst(LayerInterface layer, FrameSet frameSet) { - int start; - int size = frameSet.size(); - int min = getJdbcTemplate().queryForObject( - "SELECT MIN(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, - layer.getLayerId()); - - start = min - size; - for (int frameIdx=0; frameIdx < size; frameIdx++) { - getJdbcTemplate().update( - "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", - start, CueUtil.buildFrameName(layer, frameSet.get(frameIdx)), layer.getJobId()); - - logger.info("reordering " + CueUtil.buildFrameName(layer, frameSet.get(frameIdx)) + " to " + - start); - start++; - } - } - - @Override - public void reorderFramesLast(LayerInterface layer, FrameSet frameSet) { - int start; - int size = frameSet.size(); - List frames = new ArrayList(size); - int max = getJdbcTemplate().queryForObject( - "SELECT MAX(int_dispatch_order) FROM frame WHERE pk_layer=?", Integer.class, - layer.getLayerId()); - - start = max + 1; - for (int i=0; i <= size; i++) { - frames.add(new Object[] { start + i, CueUtil.buildFrameName(layer, i), layer.getJobId() }); - } - - if (frames.size() > 0) { - getJdbcTemplate().batchUpdate( - "UPDATE frame SET int_dispatch_order=? WHERE str_name=? AND pk_job=?", frames); - } - } - - @Override - public void reorderLayerReverse(LayerInterface layer, FrameSet frameSet) { - - int size = frameSet.size(); - List frames = new ArrayList(size); - - for (int i=0; i< size; i++) { - if (i >= size - i -1) { break; } - try { - int a = getJdbcTemplate().queryForObject( - "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", Integer.class, - CueUtil.buildFrameName(layer,frameSet.get(i)), layer.getJobId(), layer.getLayerId()); - - int b = getJdbcTemplate().queryForObject( - "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", Integer.class, - CueUtil.buildFrameName(layer,frameSet.get(size-i-1)), layer.getJobId(), layer.getLayerId()); - - frames.add(new Object[] { a, layer.getLayerId(), CueUtil.buildFrameName(layer,frameSet.get(size-i-1)) }); - frames.add(new Object[] { b, layer.getLayerId(), CueUtil.buildFrameName(layer,frameSet.get(i)) }); - - } catch (Exception e) { - logger.info("frame not found while attempting to reverse layer, skipping"); - } - } - - if (frames.size() > 0) { - getJdbcTemplate().batchUpdate( - "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", frames); - } - } - - @Override - public void staggerLayer(LayerInterface layer, String frameRange, int stagger) { - - /* - * If the layer is only 1 frame we don't stagger it. - */ - if (getJdbcTemplate().queryForObject( - "SELECT int_total_count FROM layer_stat WHERE pk_layer=?", Integer.class, - layer.getLayerId()) == 1) { - return; - } - - logger.info("staggering: " + layer.getName() + " range: " + frameRange - + " on " + stagger); - - FrameSet frameSet = null; - FrameSet range = null; - - try { - frameSet = new FrameSet(frameRange + ":" + stagger); - range = new FrameSet(frameRange); - } catch (Exception e) { - logger.warn("failed to stagger layer: " + layer.getName() + ", " + e); - return; - } - - /* - * Find the dispatch order of the first frame we're working with and base - * our other staggers of this value. - */ - int first = getJdbcTemplate().queryForObject( - "SELECT int_dispatch_order FROM frame WHERE str_name=? AND pk_job=? AND pk_layer=?", Integer.class, - CueUtil.buildFrameName(layer, range.get(0)), layer.getJobId(), layer.getLayerId()); - - int size = range.size(); - for (int i=0; i < size; i++) { - int frame = range.get(i); - int newDispatchOrder = frameSet.index(frame) + first; - - getJdbcTemplate().update( - "UPDATE frame SET int_dispatch_order=? WHERE pk_layer=? and str_name=?", - newDispatchOrder, layer.getLayerId(), CueUtil.buildFrameName(layer, frame)); - } - } - - @Override - public boolean isFrameComplete(FrameInterface f) { - - String state = getJdbcTemplate().queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, f.getFrameId()); - - if (state.equals(FrameState.SUCCEEDED.toString()) || - state.equals(FrameState.EATEN.toString())) { - return true; - } - - return false; - } - - private static final - RowMapper RESOURCE_USAGE_MAPPER = - new RowMapper() { - public ResourceUsage mapRow(ResultSet rs, - int rowNum) throws SQLException { - return new ResourceUsage( - rs.getLong("int_clock_time"), - rs.getInt("int_cores")); - } - - }; - - @Override - public ResourceUsage getResourceUsage(FrameInterface f) { - /* - * Using systimestamp = ts_started here because ts_stopped is not set. - * Stopping the frame allows it to be dispatched again, which could - * blow away the ts_stopped time. - */ - return getJdbcTemplate().queryForObject( - "SELECT " + - "NVL(interval_to_seconds(systimestamp - ts_started),1) " + - "AS int_clock_time, " + - "NVL(int_cores,100) AS int_cores " + - "FROM " + - "frame " + - "WHERE " + - "pk_frame = ?", RESOURCE_USAGE_MAPPER, f.getFrameId()); - } - - private static final String UPDATE_FRAME_IO_USAGE = - "UPDATE " + - "frame " + - "SET " + - "ts_updated = current_timestamp," + - "ts_llu = ? " + - "WHERE " + - "pk_frame = ? "; - - @Override - public void updateFrameUsage(FrameInterface f, long lluTime) { - getJdbcTemplate().update(UPDATE_FRAME_IO_USAGE, - new Timestamp(lluTime * 1000l), f.getFrameId()); - } - - private static final String UPDATE_FRAME_MEMORY_USAGE = - "UPDATE " + - "frame " + - "SET " + - "ts_updated = systimestamp," + - "int_mem_max_used = ?," + - "int_mem_used = ? " + - "WHERE " + - "pk_frame = ? "; - - @Override - public void updateFrameMemoryUsage(FrameInterface f, long maxRss, long rss) { - getJdbcTemplate().update(UPDATE_FRAME_MEMORY_USAGE, - maxRss, rss, f.getFrameId()); - } - - /** - * Attempt a SELECT FOR UPDATE NOWAIT on the frame record. If - * the frame is being modified by another transaction or if - * the version has been incremented a FrameReservationException - * is thrown. - * - * @param frame - * @param state - */ - @Override - public void lockFrameForUpdate(FrameInterface frame, FrameState state) { - try { - getJdbcTemplate().queryForObject( - "SELECT pk_frame FROM frame WHERE pk_frame=? AND " + - "str_state=? AND int_version =? FOR UPDATE NOWAIT", - String.class, frame.getFrameId(), - state.toString(), frame.getVersion()); - } catch (Exception e) { - String error_msg = "the frame " + - frame + " was updated by another thread."; - throw new FrameReservationException(error_msg, e); - } - } - - @Override - public boolean updateFrameCheckpointState(FrameInterface frame, CheckpointState state) { - - logger.info("Setting checkpoint state to: " + state.toString()); - - boolean result = false; - - if (state.equals(CheckpointState.COMPLETE)) { - /* - * Only update the checkpoint state to complete if the state - * is either Copying or Enabled. - */ - result = getJdbcTemplate().update( - "UPDATE frame SET str_checkpoint_state=?, " + - "int_checkpoint_count=int_checkpoint_count + 1 WHERE " + - "pk_frame=? AND str_checkpoint_state IN (?, ?)", - CheckpointState.COMPLETE.toString(), - frame.getFrameId(), - CheckpointState.COPYING.toString(), - CheckpointState.ENABLED.toString()) == 1; - } - else { - result = getJdbcTemplate().update( - "UPDATE frame SET str_checkpoint_state=? WHERE pk_frame=?", - state.toString(), frame.getFrameId()) == 1; - } - - /* - * If the checkpoint state is complete or disabled then set the frame - * state back to waiting, if and only if the frame state is currently - * in the checkpoint state. - */ - if ((state.equals(CheckpointState.DISABLED)) || - state.equals(CheckpointState.COMPLETE) && result) { - getJdbcTemplate().update( - "UPDATE frame SET str_state=? WHERE pk_frame=? AND str_state=?", - FrameState.WAITING.toString(), frame.getFrameId(), - FrameState.CHECKPOINT.toString()); - } - - return result; - } - - @Override - public List getStaleCheckpoints(int cutoffTimeSec) { - return getJdbcTemplate().query( - GET_MINIMAL_FRAME + - " AND job.str_state=? " + - " AND frame.str_state=? " + - " AND systimestamp - frame.ts_stopped > interval '" + cutoffTimeSec + "' second", - FRAME_MAPPER, - JobState.PENDING.toString(), - FrameState.CHECKPOINT.toString()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/GroupDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/GroupDaoJdbc.java deleted file mode 100644 index 1656da1f5..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/GroupDaoJdbc.java +++ /dev/null @@ -1,430 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.CallableStatement; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.List; - -import org.springframework.jdbc.core.CallableStatementCreator; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.SqlParameter; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.DepartmentInterface; -import com.imageworks.spcue.EntityCreationError; -import com.imageworks.spcue.EntityModificationError; -import com.imageworks.spcue.EntityRemovalError; -import com.imageworks.spcue.GroupDetail; -import com.imageworks.spcue.GroupInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.dao.GroupDao; -import com.imageworks.spcue.grpc.job.JobState; -import com.imageworks.spcue.util.CueUtil; -import com.imageworks.spcue.util.SqlUtil; - -public class GroupDaoJdbc extends JdbcDaoSupport implements GroupDao { - - private static final int MAX_NESTING_LEVEL = 10; - - @Override - public String getRootGroupId(ShowInterface show) { - return getJdbcTemplate().queryForObject( - "SELECT pk_folder FROM folder WHERE pk_show=? AND pk_parent_folder IS NULL", - String.class, show.getShowId()); - } - - @Override - public void deleteGroup(GroupInterface group) { - - if (childGroupCount(group) > 0) { - throw new EntityRemovalError("failed to delete group " + group.getName() + - ", still has sub groups"); - } - - if (childJobCount(group) > 0) { - throw new EntityRemovalError("failed to delete group " + group.getName() + - ", still has sub jobs"); - } - - // reparent all jobs to root group - getJdbcTemplate().update( - "UPDATE job SET pk_folder=? WHERE pk_folder=?", - getRootGroupId(group), group.getId()); - - getJdbcTemplate().update( - "DELETE FROM folder WHERE pk_parent_folder IS NOT NULL AND pk_folder=?", group.getId()); - } - - public static final String INSERT_GROUP = - "INSERT INTO " + - "folder " + - "( " + - "pk_folder," + - "pk_parent_folder,"+ - "pk_show, " + - "pk_dept,"+ - "str_name " + - ") " + - "VALUES (?,?,?,?,?)"; - - @Override - public void insertGroup(GroupDetail group) { - group.id = SqlUtil.genKeyRandom(); - String parentId = group.parentId; - try { - getJdbcTemplate().update(INSERT_GROUP, - group.id, parentId, group.showId, group.deptId, group.name); - } catch (Exception e) { - throw new EntityCreationError("error creating group, " + e); - } - } - - @Override - public void insertGroup(GroupDetail group, GroupInterface parent) { - if (parent != null) { - group.parentId = parent.getGroupId(); - } - insertGroup(group); - } - - @Override - public void updateGroupParent(GroupInterface group, GroupInterface dest) { - - if (group.getGroupId().equals(dest.getGroupId())) { - throw new EntityModificationError("error moving group, " + - "cannot move group into itself"); - } - - if (!group.getShowId().equals(dest.getShowId())) { - throw new EntityModificationError("error moving group, " + - "cannot move groups between shows"); - } - - int recurse = 0; - String destParent = dest.getGroupId(); - while (true) { - destParent = getJdbcTemplate().queryForObject( - "SELECT pk_parent_folder FROM folder WHERE pk_folder=?", - String.class, destParent); - if (destParent == null) { break; } - if (destParent.equals(group.getGroupId())) { - throw new EntityModificationError("error moving group, you cannot move a group " + - "into one of its sub groups"); - } - recurse++; - if (recurse > MAX_NESTING_LEVEL) { - throw new EntityModificationError("error moving group, cannot tell " + - "if your moving a group into one of its sub groups"); - } - } - - int result = getJdbcTemplate().update( - "UPDATE folder SET pk_parent_folder=? WHERE pk_folder=? AND pk_parent_folder IS NOT NULL", - dest.getId(), group.getId()); - - recurseParentChange(group.getId(), dest.getId()); - if (result == 0) { - throw new EntityModificationError("error moving group, " - + group.getName() + ", the group does not exist or its the top level group"); - } - } - - @Override - public void updateName(GroupInterface group, String value) { - getJdbcTemplate().update( - "UPDATE folder SET str_name=? WHERE pk_folder=?", - value, group.getId()); - } - - @Override - public void updateDepartment(GroupInterface group, DepartmentInterface dept) { - getJdbcTemplate().update( - "UPDATE folder SET pk_dept=? WHERE pk_folder=?", - dept.getDepartmentId(), group.getId()); - } - - @Override - public void updateDefaultJobMaxCores(GroupInterface group, int value) { - if (value <= 0) { value = CueUtil.FEATURE_DISABLED; } - if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { - String msg = "The default max cores for a job must " + - "be greater than a single core"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update( - "UPDATE folder SET int_job_max_cores=? WHERE pk_folder=?", - value, group.getId()); - } - - @Override - public void updateDefaultJobMinCores(GroupInterface group, int value) { - if (value <= 0) { value = CueUtil.FEATURE_DISABLED; } - if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { - String msg = "The default min cores for a job must " + - "be greater than a single core"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update( - "UPDATE folder SET int_job_min_cores=? WHERE pk_folder=?", - value, group.getId()); - } - - @Override - public void updateMaxCores(GroupInterface group, int value) { - if (value < 0) { value = CueUtil.FEATURE_DISABLED; } - if (value < CueUtil.ONE_CORE && value != CueUtil.FEATURE_DISABLED) { - String msg = "The group max cores feature must " + - "be a whole core or greater, pass in: " + value; - throw new IllegalArgumentException(msg); - } - - getJdbcTemplate().update( - "UPDATE folder_resource SET int_max_cores=? WHERE pk_folder=?", - value, group.getId()); - } - - @Override - public void updateMinCores(GroupInterface group, int value) { - if (value < 0) { value = 0; } - getJdbcTemplate().update( - "UPDATE folder_resource SET int_min_cores=? WHERE pk_folder=?", - value, group.getId()); - } - - private static final String IS_OVER_MIN_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job,"+ - "folder_resource fr "+ - "WHERE " + - "job.pk_folder = fr.pk_folder " + - "AND " + - "fr.int_cores > fr.int_min_cores " + - "AND "+ - "job.pk_job = ?"; - - @Override - public boolean isOverMinCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, - Integer.class, job.getJobId()) > 0; - } - - @Override - public void updateDefaultJobPriority(GroupInterface group, int value) { - if (value < 0) { value = CueUtil.FEATURE_DISABLED; } - getJdbcTemplate().update( - "UPDATE folder SET int_job_priority=? WHERE pk_folder=?", - value, group.getId()); - if (value != CueUtil.FEATURE_DISABLED) { - getJdbcTemplate().update( - "UPDATE job_resource SET int_priority=? WHERE pk_job IN (" + - "SELECT pk_job from job WHERE pk_folder=?)", - value, group.getId()); - } - } - - private static final String GET_GROUP_DETAIL = - "SELECT " + - "folder.pk_folder, " + - "folder.int_job_max_cores,"+ - "folder.int_job_min_cores,"+ - "folder.int_job_priority,"+ - "folder.str_name,"+ - "folder.pk_parent_folder,"+ - "folder.pk_show,"+ - "folder.pk_dept,"+ - "folder_level.int_level, " + - "folder_resource.int_min_cores,"+ - "folder_resource.int_max_cores " + - "FROM " + - "folder, "+ - "folder_level, " + - "folder_resource " + - "WHERE " + - "folder.pk_folder = folder_level.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder"; - - private static final String GET_GROUP_DETAIL_BY_JOB = - "SELECT " + - "folder.pk_folder, " + - "folder.int_job_max_cores,"+ - "folder.int_job_min_cores,"+ - "folder.int_job_priority,"+ - "folder.str_name,"+ - "folder.pk_parent_folder,"+ - "folder.pk_show,"+ - "folder.pk_dept,"+ - "folder_level.int_level, " + - "folder_resource.int_min_cores,"+ - "folder_resource.int_max_cores " + - "FROM " + - "folder, "+ - "folder_level, " + - "folder_resource, " + - "job "+ - "WHERE " + - "folder.pk_folder = folder_level.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder " + - "AND " + - "job.pk_folder = folder.pk_folder " + - "AND " + - "job.pk_job = ?"; - - @Override - public GroupDetail getGroupDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_GROUP_DETAIL + " AND folder.pk_folder=?", GROUP_DETAIL_MAPPER, id); - } - - @Override - public GroupDetail getGroupDetail(JobInterface job) { - return getJdbcTemplate().queryForObject(GET_GROUP_DETAIL_BY_JOB, - GROUP_DETAIL_MAPPER, job.getId()); - } - - @Override - public GroupDetail getRootGroupDetail(ShowInterface show) { - return getJdbcTemplate().queryForObject( - GET_GROUP_DETAIL + " AND folder.pk_show=? AND pk_parent_folder IS NULL", - GROUP_DETAIL_MAPPER, show.getShowId()); - } - - @Override - public GroupInterface getGroup(String id) { - return getJdbcTemplate().queryForObject( - "SELECT pk_show, pk_folder,str_name FROM folder WHERE pk_folder=?", - GROUP_MAPPER, id); - } - - @Override - public List getGroups(List idl) { - return getJdbcTemplate().query( - "SELECT pk_show, pk_folder, str_name FROM folder WHERE " + - SqlUtil.buildBindVariableArray("pk_folder", idl.size()), - GROUP_MAPPER, idl.toArray()); - } - - @Override - public List getChildrenRecursive(GroupInterface group) { - List groups = new ArrayList(32); - GroupInterface current = group; - for (GroupInterface g: getChildren(current)) { - current = g; - groups.add(current); - groups.addAll(getChildrenRecursive(current)); - } - return groups; - } - - @Override - public List getChildren(GroupInterface group) { - return getJdbcTemplate().query( - "SELECT pk_show, pk_folder, str_name FROM folder WHERE pk_parent_folder = ?", - GROUP_MAPPER, group.getGroupId()); - } - - private static final String IS_MANAGED = - "SELECT " + - "COUNT(1) " + - "FROM " + - "folder, " + - "point " + - "WHERE " + - "folder.pk_show = point.pk_show " + - "AND " + - "folder.pk_dept = point.pk_dept " + - "AND " + - "folder.b_exclude_managed = 0 " + - "AND " + - "point.b_managed = 1 " + - "AND " + - "folder.pk_folder = ?"; - - @Override - public boolean isManaged(GroupInterface group) { - return getJdbcTemplate().queryForObject(IS_MANAGED, - Integer.class, group.getGroupId()) > 0; - } - - public static final RowMapper GROUP_MAPPER = - new RowMapper() { - public GroupInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { - return new GroupInterface() { - String id = rs.getString("pk_folder"); - String show = rs.getString("pk_show"); - String name = rs.getString("str_name"); - public String getGroupId() { return id; } - public String getShowId() { return show; } - public String getId() { return id; } - public String getName() { return name; } - }; - } - }; - - public static final RowMapper GROUP_DETAIL_MAPPER = - new RowMapper() { - public GroupDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - GroupDetail group = new GroupDetail(); - group.id = rs.getString("pk_folder"); - group.jobMaxCores = rs.getInt("int_job_max_cores"); - group.jobMinCores = rs.getInt("int_job_min_cores"); - group.jobPriority = rs.getInt("int_job_priority"); - group.name = rs.getString("str_name"); - group.parentId = rs.getString("pk_parent_folder"); - group.showId = rs.getString("pk_show"); - group.deptId = rs.getString("pk_dept"); - return group; - } - }; - - - private int childGroupCount(GroupInterface group) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(*) FROM folder WHERE pk_parent_folder=?", - Integer.class, group.getId()); - } - - private int childJobCount(GroupInterface group) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(*) FROM job WHERE pk_folder=? AND str_state=?", - Integer.class, group.getId(), JobState.PENDING.toString()); - } - - private void recurseParentChange(final String folderId, final String newParentId) { - getJdbcTemplate().call(new CallableStatementCreator() { - - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call recurse_folder_parent_change(?,?) }"); - c.setString(1, folderId); - c.setString(2, newParentId); - return c; - } - }, new ArrayList()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/HistoricalDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/HistoricalDaoJdbc.java deleted file mode 100644 index ec30f7700..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/HistoricalDaoJdbc.java +++ /dev/null @@ -1,52 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.util.List; - -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.dao.HistoricalDao; -import com.imageworks.spcue.grpc.job.JobState; - -public class HistoricalDaoJdbc extends JdbcDaoSupport implements HistoricalDao { - - private static final String GET_FINISHED_JOBS = - JobDaoJdbc.GET_JOB + - "WHERE " + - "job.str_state=? " + - "AND " + - "systimestamp - job.ts_stopped > "; - - public List getFinishedJobs(int cutoffHours) { - String interval = "interval '" + cutoffHours + "' hour"; - return getJdbcTemplate().query(GET_FINISHED_JOBS + interval, - JobDaoJdbc.JOB_MAPPER, JobState.FINISHED.toString()); - } - - public void transferJob(JobInterface job) { - /** - * All of the historical transfer happens inside of triggers - */ - getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", job.getJobId()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/HostDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/HostDaoJdbc.java deleted file mode 100644 index a6ef7a079..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/HostDaoJdbc.java +++ /dev/null @@ -1,706 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.sql.CallableStatement; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.springframework.dao.EmptyResultDataAccessException; -import org.springframework.jdbc.core.CallableStatementCreator; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.SqlParameter; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.AllocationInterface; -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.EntityCreationError; -import com.imageworks.spcue.HostEntity; -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.LocalHostAssignment; -import com.imageworks.spcue.Source; -import com.imageworks.spcue.dao.HostDao; -import com.imageworks.spcue.dispatcher.Dispatcher; -import com.imageworks.spcue.dispatcher.ResourceReservationFailureException; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.host.HostTagType; -import com.imageworks.spcue.grpc.host.LockState; -import com.imageworks.spcue.grpc.host.ThreadMode; -import com.imageworks.spcue.grpc.report.HostReport; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.util.CueUtil; -import com.imageworks.spcue.util.SqlUtil; - - -public class HostDaoJdbc extends JdbcDaoSupport implements HostDao { - - public static final RowMapper HOST_DETAIL_MAPPER = new RowMapper() { - public HostEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - HostEntity host = new HostEntity(); - host.facilityId = rs.getString("pk_facility"); - host.allocId = rs.getString("pk_alloc"); - host.id = rs.getString("pk_host"); - host.lockState = LockState.valueOf(rs.getString("str_lock_state")); - host.name = rs.getString("str_name"); - host.nimbyEnabled = rs.getBoolean("b_nimby"); - host.state = HardwareState.valueOf(rs.getString("str_state")); - host.unlockAtBoot = rs.getBoolean("b_unlock_boot"); - host.cores = rs.getInt("int_cores"); - host.idleCores = rs.getInt("int_cores_idle"); - host.memory = rs.getInt("int_mem"); - host.idleMemory = rs.getInt("int_mem_idle"); - host.gpu = rs.getInt("int_gpu"); - host.idleGpu = rs.getInt("int_gpu_idle"); - host.dateBooted = rs.getDate("ts_booted"); - host.dateCreated = rs.getDate("ts_created"); - host.datePinged = rs.getDate("ts_ping"); - return host; - } - }; - - public static final RowMapper HOST_MAPPER = new RowMapper() { - public HostInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { - return new HostInterface() { - final String id = rs.getString("pk_host"); - final String allocid = rs.getString("pk_alloc"); - final String name = rs.getString("str_name"); - final String facility = rs.getString("pk_facility"); - - public String getHostId() { return id; } - public String getAllocationId() { return allocid; } - public String getId() { return id; } - public String getName() { return name; } - public String getFacilityId() { return facility; }; - }; - } - }; - - private static final String GET_HOST_DETAIL = - "SELECT " + - "host.pk_host, " + - "host.pk_alloc,"+ - "host.str_lock_state,"+ - "host.b_nimby,"+ - "host.b_unlock_boot,"+ - "host.int_cores,"+ - "host.int_cores_idle,"+ - "host.int_mem,"+ - "host.int_mem_idle,"+ - "host.int_gpu,"+ - "host.int_gpu_idle,"+ - "host.ts_created,"+ - "host.str_name, " + - "host_stat.str_state,"+ - "host_stat.ts_ping,"+ - "host_stat.ts_booted, "+ - "alloc.pk_facility " + - "FROM " + - "host, " + - "alloc, " + - "host_stat " + - "WHERE " + - "host.pk_host = host_stat.pk_host " + - "AND " + - "host.pk_alloc = alloc.pk_alloc "; - - @Override - public void lockForUpdate(HostInterface host) { - try { - getJdbcTemplate().queryForObject( - "SELECT pk_host FROM host WHERE pk_host=? " + - "FOR UPDATE NOWAIT", - String.class, host.getHostId()); - } catch (Exception e) { - throw new ResourceReservationFailureException("unable to lock host " + - host.getName() + ", the host was locked by another thread.", e); - } - } - - @Override - public HostEntity getHostDetail(HostInterface host) { - return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", - HOST_DETAIL_MAPPER, host.getHostId()); - } - - @Override - public HostEntity getHostDetail(String id) { - return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.pk_host=?", - HOST_DETAIL_MAPPER, id); - } - - @Override - public HostEntity findHostDetail(String name) { - return getJdbcTemplate().queryForObject(GET_HOST_DETAIL + " AND host.str_name=?", - HOST_DETAIL_MAPPER, name); - } - - private static final String GET_HOST= - "SELECT " + - "host.pk_host, " + - "host.pk_alloc,"+ - "host.str_name, " + - "alloc.pk_facility " + - "FROM " + - "host," + - "alloc " + - "WHERE " + - "host.pk_alloc = alloc.pk_alloc " ; - - @Override - public HostInterface getHost(String id) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host=?", - HOST_MAPPER, id); - } - - @Override - public HostInterface getHost(LocalHostAssignment l) { - return getJdbcTemplate().queryForObject(GET_HOST + " AND host.pk_host = ("+ - "SELECT pk_host FROM host_local WHERE pk_host_local=?)", - HOST_MAPPER, l.getId()); - } - - @Override - public HostInterface findHost(String name) { - return getJdbcTemplate().queryForObject( - GET_HOST + " AND (host.str_name=? OR host.str_fqdn=?)", - HOST_MAPPER, name, name); - } - - public static final RowMapper DISPATCH_HOST_MAPPER = - new RowMapper() { - public DispatchHost mapRow(ResultSet rs, int rowNum) throws SQLException { - DispatchHost host = new DispatchHost(); - host.id = rs.getString("pk_host"); - host.allocationId = rs.getString("pk_alloc"); - host.facilityId = rs.getString("pk_facility"); - host.name = rs.getString("str_name"); - host.lockState = LockState.valueOf(rs.getString("str_lock_state")); - host.memory = rs.getInt("int_mem"); - host.cores = rs.getInt("int_cores"); - host.gpu= rs.getInt("int_gpu"); - host.idleMemory= rs.getInt("int_mem_idle"); - host.idleCores = rs.getInt("int_cores_idle"); - host.idleGpu= rs.getInt("int_gpu_idle"); - host.isNimby = rs.getBoolean("b_nimby"); - host.threadMode = rs.getInt("int_thread_mode"); - host.tags = rs.getString("str_tags"); - host.os = rs.getString("str_os"); - host.hardwareState = - HardwareState.valueOf(rs.getString("str_state")); - return host; - } - }; - - public static final String GET_DISPATCH_HOST = - "SELECT " + - "host.pk_host,"+ - "host.pk_alloc,"+ - "host.str_name," + - "host.str_lock_state, " + - "host.int_cores, "+ - "host.int_cores_idle, " + - "host.int_mem,"+ - "host.int_mem_idle, "+ - "host.int_gpu,"+ - "host.int_gpu_idle, "+ - "host.b_nimby, "+ - "host.int_thread_mode, "+ - "host.str_tags, " + - "host_stat.str_os, " + - "host_stat.str_state, " + - "alloc.pk_facility " + - "FROM " + - "host " + - "INNER JOIN host_stat " + - "ON (host.pk_host = host_stat.pk_host) " + - "INNER JOIN alloc " + - "ON (host.pk_alloc = alloc.pk_alloc) "; - - @Override - public DispatchHost findDispatchHost(String name) { - try { - return getJdbcTemplate().queryForObject( - GET_DISPATCH_HOST + - "WHERE (host.str_name=? OR host.str_fqdn=?)", - DISPATCH_HOST_MAPPER, name, name); - } catch (EmptyResultDataAccessException e) { - throw new EmptyResultDataAccessException( - "Failed to find host " + name, 1); - } - } - - @Override - public DispatchHost getDispatchHost(String id) { - return getJdbcTemplate().queryForObject( - GET_DISPATCH_HOST + - "WHERE host.pk_host=?", - DISPATCH_HOST_MAPPER, id); - } - - private static final String[] INSERT_HOST_DETAIL = - { - "INSERT INTO " + - "host " + - "("+ - "pk_host, " + - "pk_alloc, " + - "str_name, " + - "b_nimby, " + - "str_lock_state, " + - "int_procs,"+ - "int_cores, " + - "int_cores_idle, " + - "int_mem,"+ - "int_mem_idle,"+ - "int_gpu,"+ - "int_gpu_idle,"+ - "str_fqdn, " + - "int_thread_mode "+ - ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", - - "INSERT INTO " + - "host_stat " + - "("+ - "pk_host_stat," + - "pk_host,"+ - "int_mem_total, " + - "int_mem_free,"+ - "int_gpu_total, " + - "int_gpu_free,"+ - "int_swap_total, " + - "int_swap_free,"+ - "int_mcp_total, " + - "int_mcp_free,"+ - "int_load, " + - "ts_booted, " + - "str_state, " + - "str_os " + - ") "+ - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)" - - }; - - @Override - public void insertRenderHost(RenderHost host, AllocationInterface a, boolean useLongNames) { - - ThreadMode threadMode = ThreadMode.AUTO; - if (host.getNimbyEnabled()) { - threadMode = ThreadMode.ALL; - } - - long memUnits = convertMemoryUnits(host); - if (memUnits < Dispatcher.MEM_RESERVED_MIN) { - throw new EntityCreationError("could not create host " + host.getName() + ", " + - " must have at least " + Dispatcher.MEM_RESERVED_MIN + " free memory."); - } - - String fqdn; - String name = host.getName(); - try { - fqdn = InetAddress.getByName(host.getName()).getCanonicalHostName(); - // If the provided host name matches the pinged name, use the pinged name. - // Otherwise use the provided name. - // If the host lookup fails, use the provided name. - // In all cases attempt to strip off the domain when setting the name. - if (fqdn.equals(host.getName())) { - name = getHostNameFromFQDN(fqdn, useLongNames); - } - else { - name = getHostNameFromFQDN(host.getName(), useLongNames); - fqdn = host.getName(); - } - } catch (UnknownHostException e) { - logger.warn(e); - fqdn = host.getName(); - name = getHostNameFromFQDN(name, useLongNames); - } - - String hid = SqlUtil.genKeyRandom(); - int coreUnits = host.getNumProcs() * host.getCoresPerProc(); - String os = host.getAttributesMap().get("SP_OS"); - if (os == null) { - os = Dispatcher.OS_DEFAULT; - } - - long totalGpu; - if (host.getAttributesMap().containsKey("totalGpu")) - totalGpu = Integer.parseInt(host.getAttributesMap().get("totalGpu")); - else - totalGpu = 0; - - long freeGpu; - if (host.getAttributesMap().containsKey("freeGpu")) - freeGpu = Integer.parseInt(host.getAttributesMap().get("freeGpu")); - else - freeGpu = 0; - - getJdbcTemplate().update(INSERT_HOST_DETAIL[0], - hid, a.getAllocationId(), name, host.getNimbyEnabled(), - LockState.OPEN.toString(), host.getNumProcs(), coreUnits, coreUnits, - memUnits, memUnits, totalGpu, totalGpu, - fqdn, threadMode.getNumber()); - - getJdbcTemplate().update(INSERT_HOST_DETAIL[1], - hid, hid, host.getTotalMem(), host.getFreeMem(), - totalGpu, freeGpu, - host.getTotalSwap(), host.getFreeSwap(), - host.getTotalMcp(), host.getFreeMcp(), - host.getLoad(), new Timestamp(host.getBootTime() * 1000l), - host.getState().toString(), os); - } - - @Override - public void recalcuateTags(final String id) { - getJdbcTemplate().call(new CallableStatementCreator() { - public CallableStatement createCallableStatement(Connection con) throws SQLException { - CallableStatement c = con.prepareCall("{ call recalculate_tags(?) }"); - c.setString(1, id); - return c; - } - }, new ArrayList()); - } - - private static final String UPDATE_RENDER_HOST = - "UPDATE " + - "host_stat " + - "SET " + - "int_mem_total=?, " + - "int_mem_free=?, " + - "int_swap_total=?, " + - "int_swap_free=?, "+ - "int_mcp_total=?, " + - "int_mcp_free=?, " + - "int_gpu_total=?, " + - "int_gpu_free=?, " + - "int_load=?," + - "ts_booted = ?, " + - "ts_ping = systimestamp, "+ - "str_os=? " + - "WHERE " + - "pk_host=?"; - - @Override - public void updateHostStats(HostInterface host, - long totalMemory, long freeMemory, - long totalSwap, long freeSwap, - long totalMcp, long freeMcp, - long totalGpu, long freeGpu, - int load, Timestamp bootTime, - String os) { - - if (os == null) { - os = Dispatcher.OS_DEFAULT; - } - - getJdbcTemplate().update(UPDATE_RENDER_HOST, - totalMemory, freeMemory, totalSwap, - freeSwap, totalMcp, freeMcp, totalGpu, freeGpu, load, - bootTime, os, host.getHostId()); - } - - @Override - public boolean hostExists(String hostname) { - try { - return getJdbcTemplate().queryForObject( - "SELECT 1 FROM host WHERE (str_fqdn=? OR str_name=?)", - Integer.class, hostname, hostname) > 0; - } catch (EmptyResultDataAccessException e) { - return false; - } - } - - @Override - public void updateHostResources(HostInterface host, HostReport report) { - - long memory = convertMemoryUnits(report.getHost()); - int cores = report.getHost().getNumProcs() * report.getHost().getCoresPerProc(); - - long totalGpu; - if (report.getHost().getAttributesMap().containsKey("totalGpu")) - totalGpu = Integer.parseInt(report.getHost().getAttributesMap().get("totalGpu")); - else - totalGpu = 0; - - getJdbcTemplate().update( - "UPDATE " + - "host " + - "SET " + - "b_nimby=?,"+ - "int_cores=?," + - "int_cores_idle=?," + - "int_mem=?," + - "int_mem_idle=?, " + - "int_gpu=?," + - "int_gpu_idle=? " + - "WHERE " + - "pk_host=? "+ - "AND " + - "int_cores = int_cores_idle " + - "AND " + - "int_mem = int_mem_idle", - report.getHost().getNimbyEnabled(), cores, cores, - memory, memory, totalGpu, totalGpu, host.getId()); - } - - @Override - public void updateHostLock(HostInterface host, LockState state, Source source) { - getJdbcTemplate().update( - "UPDATE host SET str_lock_state=?, str_lock_source=? WHERE pk_host=?", - state.toString(), source.toString(), host.getHostId()); - } - - @Override - public void updateHostRebootWhenIdle(HostInterface host, boolean enabled) { - getJdbcTemplate().update("UPDATE host SET b_reboot_idle=? WHERE pk_host=?", - enabled, host.getHostId()); - } - - @Override - public void deleteHost(HostInterface host) { - getJdbcTemplate().update( - "DELETE FROM comments WHERE pk_host=?",host.getHostId()); - getJdbcTemplate().update( - "DELETE FROM host WHERE pk_host=?",host.getHostId()); - } - - @Override - public void updateHostState(HostInterface host, HardwareState state) { - getJdbcTemplate().update( - "UPDATE host_stat SET str_state=? WHERE pk_host=?", - state.toString(), host.getHostId()); - } - - @Override - public void updateHostSetAllocation(HostInterface host, AllocationInterface alloc) { - - String tag = getJdbcTemplate().queryForObject( - "SELECT str_tag FROM alloc WHERE pk_alloc=?", - String.class, alloc.getAllocationId()); - getJdbcTemplate().update( - "UPDATE host SET pk_alloc=? WHERE pk_host=?", - alloc.getAllocationId(), host.getHostId()); - - removeTagsByType(host, HostTagType.ALLOC); - tagHost(host, tag, HostTagType.ALLOC); - } - - @Override - public boolean isHostLocked(HostInterface host) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host WHERE pk_host=? AND str_lock_state!=?", - Integer.class, host.getHostId(), LockState.OPEN.toString()) > 0; - } - - private static final String INSERT_TAG = - "INSERT INTO " + - "host_tag " + - "(" + - "pk_host_tag,"+ - "pk_host,"+ - "str_tag,"+ - "str_tag_type, " + - "b_constant " + - ") VALUES (?,?,?,?,?)"; - - - @Override - public void tagHost(String id, String tag, HostTagType type) { - boolean constant = false; - if (type.equals(HostTagType.ALLOC)) - constant = true; - - getJdbcTemplate().update(INSERT_TAG, - SqlUtil.genKeyRandom(), id, tag.trim(), type.toString(), constant); - } - - @Override - public void tagHost(HostInterface host, String tag, HostTagType type) { - tagHost(host.getHostId(), tag, type); - } - - @Override - public void removeTagsByType(HostInterface host, HostTagType type) { - getJdbcTemplate().update("DELETE FROM host_tag WHERE pk_host=? AND str_tag_type=?", - host.getHostId(), type.toString()); - } - - @Override - public void removeTag(HostInterface host, String tag) { - getJdbcTemplate().update( - "DELETE FROM host_tag WHERE pk_host=? AND str_tag=? AND b_constant=0", - host.getHostId(), tag); - } - - @Override - public void renameTag(HostInterface host, String oldTag, String newTag) { - getJdbcTemplate().update( - "UPDATE host_tag SET str_tag=? WHERE pk_host=? AND str_tag=? AND b_constant=0", - newTag, host.getHostId(), oldTag); - } - - @Override - public void updateThreadMode(HostInterface host, ThreadMode mode) { - getJdbcTemplate().update( - "UPDATE host SET int_thread_mode=? WHERE pk_host=?", - mode.getNumber(), host.getHostId()); - } - - @Override - public void updateHostOs(HostInterface host, String os) { - getJdbcTemplate().update( - "UPDATE host_stat SET str_os=? WHERE pk_host=?", - os, host.getHostId()); - } - - @Override - public boolean isKillMode(HostInterface h) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host_stat WHERE pk_host = ? " + - "AND int_swap_total - int_swap_free > ? AND int_mem_free < ?", - Integer.class, h.getHostId(), Dispatcher.KILL_MODE_SWAP_THRESHOLD, - Dispatcher.KILL_MODE_MEM_THRESHOLD) > 0; - } - - @Override - public int getStrandedCoreUnits(HostInterface h) { - try { - int idle_cores = getJdbcTemplate().queryForObject( - "SELECT int_cores_idle FROM host WHERE pk_host = ? AND int_mem_idle <= ?", - Integer.class, h.getHostId(), - Dispatcher.MEM_STRANDED_THRESHHOLD); - return (int) (Math.floor(idle_cores / 100.0)) * 100; - } catch (EmptyResultDataAccessException e) { - return 0; - } - } - - private static final String IS_HOST_UP = - "SELECT " + - "COUNT(1) " + - "FROM " + - "host_stat "+ - "WHERE " + - "host_stat.str_state = ? " + - "AND " + - "host_stat.pk_host = ? "; - - @Override - public boolean isHostUp(HostInterface host) { - return getJdbcTemplate().queryForObject(IS_HOST_UP, - Integer.class, HardwareState.UP.toString(), - host.getHostId()) == 1; - } - - private static final String IS_PREFER_SHOW = - "SELECT " + - "COUNT(1) " + - "FROM " + - "host," + - "owner," + - "deed "+ - "WHERE " + - "host.pk_host = deed.pk_host " + - "AND " + - "deed.pk_owner = owner.pk_owner " + - "AND " + - "host.pk_host = ?"; - - @Override - public boolean isPreferShow(HostInterface h) { - return getJdbcTemplate().queryForObject(IS_PREFER_SHOW, - Integer.class, h.getHostId()) > 0; - } - - @Override - public boolean isNimbyHost(HostInterface h) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host WHERE b_nimby=1 AND pk_host=?", - Integer.class, h.getHostId()) > 0; - } - - /** - * Checks if the passed in name looks like a fully qualified domain name. - * If so, returns the hostname without the domain. Otherwise returns the passed - * in name unchanged. - * @param fqdn - String - * @return String - hostname - */ - private String getHostNameFromFQDN(String fqdn, Boolean useLongNames) { - String hostName; - Pattern ipPattern = Pattern.compile("^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$"); - Matcher ipMatcher = ipPattern.matcher(fqdn); - if (ipMatcher.matches()){ - hostName = fqdn; - } - else if (useLongNames) { - hostName = fqdn; - Pattern domainPattern = Pattern.compile( - ".*(\\.(.*)\\.(co(m|.[a-z]{2})|biz|edu|info|net|org|cn|de|eu|nl))$"); - Matcher domainMatcher = domainPattern.matcher(fqdn); - if (domainMatcher.matches()){ - hostName = fqdn.replace(domainMatcher.group(1), ""); - } - } - else { - hostName = fqdn.split("\\.")[0]; - } - return hostName; - - } - - /** - * Converts the amount of memory reported by the machine - * to a modificed value which takes into account the - * operating system and the possibility of user applications. - * - * @param host - * @return - */ - private long convertMemoryUnits(RenderHost host) { - - long memUnits; - if (host.getTagsList().contains("64bit")) { - memUnits = CueUtil.convertKbToFakeKb64bit(host.getTotalMem()); - } - else { - memUnits = CueUtil.convertKbToFakeKb32bit(host.getTotalMem()); - } - - /* - * If this is a desktop, we'll just cut the memory - * so we don't annoy the user. - */ - if (host.getNimbyEnabled()) { - memUnits = (long) (memUnits / 1.5) + Dispatcher.MEM_RESERVED_SYSTEM; - } - - return memUnits; - } - -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/JobDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/JobDaoJdbc.java deleted file mode 100644 index 0f591a350..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/JobDaoJdbc.java +++ /dev/null @@ -1,966 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.math.BigDecimal; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.springframework.dao.DataAccessException; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.BuildableJob; -import com.imageworks.spcue.DepartmentInterface; -import com.imageworks.spcue.DispatchJob; -import com.imageworks.spcue.EntityModificationError; -import com.imageworks.spcue.ExecutionSummary; -import com.imageworks.spcue.FacilityInterface; -import com.imageworks.spcue.FrameStateTotals; -import com.imageworks.spcue.GroupDetail; -import com.imageworks.spcue.GroupInterface; -import com.imageworks.spcue.Inherit; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.ResourceUsage; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.TaskEntity; -import com.imageworks.spcue.dao.JobDao; -import com.imageworks.spcue.grpc.job.FrameState; -import com.imageworks.spcue.grpc.job.JobState; -import com.imageworks.spcue.util.CueUtil; -import com.imageworks.spcue.util.JobLogUtil; -import com.imageworks.spcue.util.SqlUtil; - -public class JobDaoJdbc extends JdbcDaoSupport implements JobDao { - private static final Pattern LAST_JOB_STRIP_PATTERN = Pattern.compile("_v*([_0-9]*$)"); - - /* - * Maps a row to a DispatchJob object - */ - public static final RowMapper DISPATCH_JOB_MAPPER = new RowMapper() { - public DispatchJob mapRow(ResultSet rs, int rowNum) throws SQLException { - DispatchJob job = new DispatchJob(); - job.id = rs.getString("pk_job"); - job.showId = rs.getString("pk_show"); - job.facilityId = rs.getString("pk_facility"); - job.name = rs.getString("str_name"); - job.state = JobState.valueOf(rs.getString("str_state")); - job.maxRetries = rs.getInt("int_max_retries"); - job.paused = rs.getBoolean("b_paused"); - job.autoEat = rs.getBoolean("b_autoeat"); - job.autoBook = rs.getBoolean("b_auto_book"); - job.autoUnbook = rs.getBoolean("b_auto_unbook"); - return job; - } - }; - - /* - * Maps a row to minimal job. - */ - public static final RowMapper JOB_MAPPER = new RowMapper() { - public JobInterface mapRow(final ResultSet rs, int rowNum) throws SQLException { - return new JobInterface() { - final String jobid = rs.getString("pk_job"); - final String showid = rs.getString("pk_show"); - final String name = rs.getString("str_name"); - final String facility = rs.getString("pk_facility"); - public String getJobId() { return jobid; } - public String getShowId() { return showid; } - public String getId() { return jobid; } - public String getName() { return name; } - public String getFacilityId() { return facility; } - }; - } - }; - - /* - * Maps a row to a JobDetail object - */ - private static final RowMapper JOB_DETAIL_MAPPER = - new RowMapper() { - public JobDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - JobDetail job = new JobDetail(); - job.id = rs.getString("pk_job"); - job.showId = rs.getString("pk_show"); - job.facilityId = rs.getString("pk_show"); - job.deptId = rs.getString("pk_dept"); - job.groupId = rs.getString("pk_folder"); - job.logDir = rs.getString("str_log_dir"); - job.maxCoreUnits = rs.getInt("int_max_cores"); - job.minCoreUnits = rs.getInt("int_min_cores"); - job.name = rs.getString("str_name"); - job.priority = rs.getInt("int_priority"); - job.shot = rs.getString("str_shot"); - job.state = JobState.valueOf(rs.getString("str_state")); - int uid = rs.getInt("int_uid"); - job.uid = rs.wasNull() ? Optional.empty() : Optional.of(uid); - job.user = rs.getString("str_user"); - job.email = rs.getString("str_email"); - job.totalFrames = rs.getInt("int_frame_count"); - job.totalLayers = rs.getInt("int_layer_count"); - job.isPaused = rs.getBoolean("b_paused"); - job.maxRetries = rs.getInt("int_max_retries"); - job.showName = rs.getString("show_name"); - job.facilityName = rs.getString("facility_name"); - job.deptName = rs.getString("dept_name"); - return job; - } - }; - - private static final String GET_DISPATCH_JOB = - "SELECT " + - "job.pk_job, " + - "job.pk_facility, " + - "job.pk_show, " + - "job.str_name, "+ - "job.str_show, " + - "job.str_state, "+ - "job.b_paused, "+ - "job.int_max_retries, " + - "job.b_autoeat, " + - "job.b_auto_book,"+ - "job.b_auto_unbook " + - "FROM " + - "job "+ - "WHERE " + - "pk_job = ?"; - - @Override - public DispatchJob getDispatchJob(String uuid) { - return getJdbcTemplate().queryForObject( - GET_DISPATCH_JOB, DISPATCH_JOB_MAPPER, uuid); - } - - private static final String IS_JOB_COMPLETE = - "SELECT " + - "SUM (" + - "int_waiting_count + " + - "int_running_count + " + - "int_dead_count + " + - "int_depend_count + " + - "int_checkpoint_count " + - ") " + - "FROM " + - "job_stat " + - "WHERE " + - "pk_job=?"; - - @Override - public boolean isJobComplete(JobInterface job) { - if (isLaunching(job)) { - return false; - } - return getJdbcTemplate().queryForObject(IS_JOB_COMPLETE, - Integer.class, job.getJobId()) == 0; - } - - public static final String GET_JOB= - "SELECT " + - "job.pk_job, "+ - "job.pk_show, "+ - "job.pk_dept,"+ - "job.pk_facility,"+ - "job.str_name " + - "FROM " + - "job "; - - private static final String GET_JOB_DETAIL = - "SELECT " + - "job.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility,"+ - "job.pk_dept,"+ - "job.pk_folder,"+ - "job.str_log_dir,"+ - "job.str_name,"+ - "job.str_shot,"+ - "job.str_state,"+ - "job.int_uid,"+ - "job.str_user,"+ - "job.str_email,"+ - "job.int_frame_count,"+ - "job.int_layer_count,"+ - "job.b_paused,"+ - "job.int_max_retries,"+ - "job_resource.int_max_cores,"+ - "job_resource.int_min_cores,"+ - "job_resource.int_priority,"+ - "show.str_name AS show_name, " + - "dept.str_name AS dept_name, "+ - "facility.str_name AS facility_name "+ - "FROM " + - "job, " + - "job_resource, "+ - "show, " + - "dept, "+ - "facility "+ - "WHERE " + - "job.pk_job = job_resource.pk_job " + - "AND " + - "job.pk_show = show.pk_show " + - "AND " + - "job.pk_dept = dept.pk_dept " + - "AND " + - "job.pk_facility = facility.pk_facility "; - - private static final String GET_JOB_BY_ID = - GET_JOB_DETAIL + "AND job.pk_job=?"; - - private static final String FIND_JOB_BY_NAME = - GET_JOB_DETAIL + "AND job.str_visible_name=? "; - - @Override - public JobDetail getJobDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_JOB_BY_ID, JOB_DETAIL_MAPPER, id); - } - - @Override - public JobDetail findLastJob(String name) { - Matcher matcher = LAST_JOB_STRIP_PATTERN.matcher(name); - name = matcher.replaceAll("%"); - - return getJdbcTemplate().queryForObject( - "SELECT * FROM (" + GET_JOB_DETAIL + - " AND job.str_state = 'FINISHED' AND job.str_name LIKE ? ORDER BY job.ts_stopped DESC) " + - "WHERE ROWNUM = 1", JOB_DETAIL_MAPPER, name); - } - - @Override - public JobInterface getJob(String id) { - return getJdbcTemplate().queryForObject( - GET_JOB + " WHERE pk_job=?", JOB_MAPPER, id); - } - - - public static final String GET_JOBS_BY_TASK = - "SELECT " + - "job.pk_job, "+ - "job.pk_show, "+ - "job.pk_dept,"+ - "job.pk_facility,"+ - "job.str_name " + - "FROM " + - "job," + - "folder " + - "WHERE " + - "job.pk_folder = folder.pk_folder " + - "AND " + - "folder.b_exclude_managed = 0 " + - "AND " + - "job.str_state = ? " + - "AND " + - "job.pk_dept = ? " + - "AND " + - "job.str_shot = ? " + - "ORDER BY "+ - "ts_started ASC "; - - @Override - public List getJobs(TaskEntity t) { - return getJdbcTemplate().query(GET_JOBS_BY_TASK, - JOB_MAPPER, JobState.PENDING.toString(), t.deptId, t.shot); - } - - @Override - public JobDetail findJobDetail(String name) { - return getJdbcTemplate().queryForObject( - FIND_JOB_BY_NAME, JOB_DETAIL_MAPPER, name); - } - - @Override - public JobInterface findJob(String name) { - return getJdbcTemplate().queryForObject( - GET_JOB + " WHERE job.str_visible_name=?", JOB_MAPPER, name); - } - - @Override - public List findJobs(ShowInterface show) { - return getJdbcTemplate().query( - GET_JOB_DETAIL + " AND job.pk_show=?", JOB_DETAIL_MAPPER, show.getShowId()); - } - - @Override - public List findJobs(GroupInterface group) { - return getJdbcTemplate().query( - GET_JOB_DETAIL + " AND job.pk_folder=?", JOB_DETAIL_MAPPER, group.getId()); - } - - @Override - public void deleteJob(JobInterface j) { - /* See trigger before_delete_job */ - getJdbcTemplate().update("DELETE FROM job WHERE pk_job=?", j.getId()); - } - - @Override - public void updatePriority(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE pk_job=?", - v, j.getJobId()); - } - - @Override - public void updatePriority(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_priority=? WHERE " + - "pk_job IN (SELECT pk_job FROM job WHERE job.pk_folder=?)", - v, g.getGroupId()); - } - - @Override - public void updateMinCores(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_cores=? WHERE " + - "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", - v, g.getGroupId()); - } - - @Override - public void updateMaxCores(GroupInterface g, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_cores=? WHERE " + - "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", - v, g.getGroupId()); - } - - @Override - public void updateMinCores(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_min_cores=? WHERE pk_job=?", - v, j.getJobId()); - } - - @Override - public void updateMaxCores(JobInterface j, int v) { - getJdbcTemplate().update("UPDATE job_resource SET int_max_cores=? WHERE pk_job=?", - v, j.getJobId()); - } - - @Override - public void updatePaused(JobInterface j, boolean b) { - getJdbcTemplate().update("UPDATE job SET b_paused=? WHERE pk_job=?", - b, j.getJobId()); - } - - @Override - public void updateAutoEat(JobInterface j, boolean b) { - int maxRetries = 1; - if (b) { - maxRetries = 0; - } - getJdbcTemplate().update("UPDATE job SET b_autoeat=?, int_max_retries=? WHERE pk_job=?", - b, maxRetries, j.getJobId()); - } - - @Override - public void updateState(JobInterface job, JobState state) { - getJdbcTemplate().update("UPDATE job SET str_state=? WHERE pk_job=?", - state.toString(), job.getJobId()); - } - - @Override - public void updateLogPath(JobInterface job, String path) { - getJdbcTemplate().update("UPDATE job SET str_log_dir=? WHERE pk_job=?", - path, job.getJobId()); - } - - @Override - public void updateMaxRSS(JobInterface job, long value) { - getJdbcTemplate().update( - "UPDATE job_mem SET int_max_rss=? WHERE pk_job=? AND int_max_rss < ?", - value, job.getJobId(), value); - } - - private static final String UPDATE_JOB_FINISHED = - "UPDATE " + - "job " + - "SET " + - "str_state = ?, "+ - "str_visible_name=NULL, " + - "ts_stopped = systimestamp "+ - "WHERE " + - "str_state = 'PENDING'" + - "AND " + - "pk_job = ?"; - - @Override - public boolean updateJobFinished(JobInterface job) { - // Only return true if this thread was the one who actually - // set the job state to finished. - if(getJdbcTemplate().update(UPDATE_JOB_FINISHED, - JobState.FINISHED.toString(), job.getJobId()) == 1) { - return true; - } - return false; - } - - private static final String INSERT_JOB = - "INSERT INTO " + - "job " + - "(" + - "pk_job," + - "pk_show," + - "pk_folder,"+ - "pk_facility,"+ - "pk_dept,"+ - "str_name," + - "str_visible_name,"+ - "str_show,"+ - "str_shot," + - "str_user," + - "str_email,"+ - "str_state," + - "str_log_dir," + - "str_os, "+ - "int_uid," + - "b_paused," + - "b_autoeat,"+ - "int_max_retries " + - ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertJob(JobDetail j, JobLogUtil jobLogUtil) { - j.id = SqlUtil.genKeyRandom(); - j.logDir = jobLogUtil.getJobLogPath(j); - if (j.minCoreUnits < 100) { j.minCoreUnits = 100; } - - getJdbcTemplate().update(INSERT_JOB, - j.id, j.showId, j.groupId, j.facilityId, j.deptId, - j.name, j.name, j.showName, j.shot, j.user, j.email, j.state.toString(), - j.logDir, j.os, j.uid.orElse(null), j.isPaused, j.isAutoEat, j.maxRetries); - } - - private static final String JOB_EXISTS = - "SELECT " + - "1 " + - "FROM " + - "job " + - "WHERE " + - "str_name = ? " + - "AND " + - "str_state='PENDING' " + - "AND " + - "ROWNUM = 1"; - - @Override - public boolean exists(String name) { - try { - return (getJdbcTemplate().queryForObject(JOB_EXISTS, - Integer.class, name) >= 1); - } catch (Exception e) { - return false; - } - } - - private static final String IS_LAUNCHING = - "SELECT " + - "str_state " + - "FROM " + - "job " + - "WHERE " + - "pk_job=?"; - - @Override - public boolean isLaunching(JobInterface j) { - return getJdbcTemplate().queryForObject( - IS_LAUNCHING, String.class, j.getJobId()).equals( - JobState.STARTUP.toString()); - } - - @Override - public void activateJob(JobInterface job, JobState jobState) { - - int[] jobTotals = { 0, 0 }; // Depend, Waiting - - /* - * Sets all frames in the setup state to Waiting. Frames with a depend - * count > 0 are automatically updated to Depend via the - * update_frame_wait_to_dep trigger. - */ - getJdbcTemplate().update("UPDATE frame SET str_state=? WHERE pk_job=? AND str_state=?", - FrameState.WAITING.toString(), job.getId(), FrameState.SETUP.toString()); - - List> layers = getJdbcTemplate().queryForList( - "SELECT pk_layer, str_state, count(1) AS c FROM frame " + - "WHERE pk_job=? GROUP BY pk_layer, str_state", job.getId()); - - for (Map row: layers) { - String layer = (String) row.get("pk_layer"); - FrameState state = FrameState.valueOf((String) row.get("str_state")); - int count = ((BigDecimal) row.get("c")).intValue(); - - if (count == 0 || state == null) { continue; } - - switch (state) { - case DEPEND: - jobTotals[0] = jobTotals[0] + count; - getJdbcTemplate().update( - "UPDATE layer_stat SET int_depend_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", - count, count, layer); - break; - case WAITING: - jobTotals[1] = jobTotals[1] + count; - getJdbcTemplate().update( - "UPDATE layer_stat SET int_waiting_count=?,int_total_count=int_total_count + ? WHERE pk_layer=?", - count, count, layer); - break; - } - } - - getJdbcTemplate().update( - "UPDATE job_stat SET int_depend_count=?,int_waiting_count=? WHERE pk_job=?", - jobTotals[0], jobTotals[1], job.getJobId()); - - getJdbcTemplate().update( - "UPDATE job SET int_frame_count=?, int_layer_count=? WHERE pk_job=?", - jobTotals[0] + jobTotals[1], layers.size(), job.getJobId()); - - getJdbcTemplate().update( - "UPDATE show SET int_frame_insert_count=int_frame_insert_count+?, int_job_insert_count=int_job_insert_count+1 WHERE pk_show=?", - jobTotals[0] + jobTotals[1], job.getShowId()); - - updateState(job, jobState); - } - - private static final String HAS_PENDING_FRAMES = - "SELECT " + - "int_waiting_count " + - "FROM " + - "job,"+ - "job_stat " + - "WHERE " + - "job.pk_job = job_stat.pk_job " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "job.b_paused = 0 " + - "AND " + - "job.b_auto_book = 1 " + - "AND " + - "job.pk_job = ?"; - - @Override - public boolean hasPendingFrames(JobInterface job) { - try { - return getJdbcTemplate().queryForObject(HAS_PENDING_FRAMES, - Integer.class, job.getJobId()) > 0; - } catch (DataAccessException e) { - return false; - } - } - - private static final String IS_JOB_OVER_MIN_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job_resource " + - "WHERE " + - "job_resource.pk_job = ? " + - "AND " + - "job_resource.int_cores > job_resource.int_min_cores"; - - @Override - public boolean isOverMinCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MIN_CORES, - Integer.class, job.getJobId()) > 0; - } - - private static final String IS_JOB_OVER_MAX_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job_resource " + - "WHERE " + - "job_resource.pk_job = ? " + - "AND " + - "job_resource.int_cores + ? > job_resource.int_max_cores"; - - @Override - public boolean isOverMaxCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, - Integer.class, job.getJobId(), 0) > 0; - } - - @Override - public boolean isOverMaxCores(JobInterface job, int coreUnits) { - return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_CORES, - Integer.class, job.getJobId(), coreUnits) > 0; - } - - - private static final String IS_JOB_AT_MAX_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job_resource " + - "WHERE " + - "job_resource.pk_job = ? " + - "AND " + - "job_resource.int_cores >= job_resource.int_max_cores "; - - @Override - public boolean isAtMaxCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_CORES, - Integer.class, job.getJobId()) > 0; - } - - @Override - public void updateMaxFrameRetries(JobInterface j, int max_retries) { - if (max_retries < 0) { - throw new IllegalArgumentException("max retries must be greater than 0"); - } - - int max_max_retries = getJdbcTemplate().queryForObject( - "SELECT int_value FROM config WHERE str_key=?", - Integer.class, "MAX_FRAME_RETRIES"); - - if (max_retries > max_max_retries) { - throw new IllegalArgumentException("max retries must be less than " - + max_max_retries); - } - - getJdbcTemplate().update( - "UPDATE job SET int_max_retries=? WHERE pk_job=?", - max_retries, j.getJobId()); - } - - private static final String GET_FRAME_STATE_TOTALS = - "SELECT " + - "job.int_frame_count," + - "job_stat.* " + - "FROM " + - "job," + - "job_stat " + - "WHERE " + - "job.pk_job = job_stat.pk_job " + - "AND " + - "job.pk_job=?"; - - public FrameStateTotals getFrameStateTotals(JobInterface job) { - return getJdbcTemplate().queryForObject( - GET_FRAME_STATE_TOTALS, - new RowMapper() { - public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameStateTotals t = new FrameStateTotals(); - t.dead = rs.getInt("int_dead_count"); - t.depend = rs.getInt("int_depend_count"); - t.eaten = rs.getInt("int_eaten_count"); - t.running = rs.getInt("int_running_count"); - t.succeeded = rs.getInt("int_succeeded_count"); - t.waiting = rs.getInt("int_waiting_count"); - t.total = rs.getInt("int_frame_count"); - return t; - } - },job.getJobId()); - } - - private static final String GET_EXECUTION_SUMMARY = - "SELECT " + - "job_usage.int_core_time_success,"+ - "job_usage.int_core_time_fail," + - "job_mem.int_max_rss " + - "FROM " + - "job," + - "job_usage, "+ - "job_mem " + - "WHERE " + - "job.pk_job = job_usage.pk_job "+ - "AND " + - "job.pk_job = job_mem.pk_job " + - "AND " + - "job.pk_job = ?"; - - public ExecutionSummary getExecutionSummary(JobInterface job) { - return getJdbcTemplate().queryForObject( - GET_EXECUTION_SUMMARY, - new RowMapper() { - public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { - ExecutionSummary e = new ExecutionSummary(); - e.coreTimeSuccess = rs.getLong("int_core_time_success"); - e.coreTimeFail = rs.getLong("int_core_time_fail"); - e.coreTime = e.coreTimeSuccess + e.coreTimeFail; - e.highMemoryKb = rs.getLong("int_max_rss"); - - return e; - } - }, job.getJobId()); - } - - private static final String INSERT_JOB_ENV = - "INSERT INTO " + - "job_env " + - "(" + - "pk_job_env, pk_job, str_key, str_value " + - ") " + - "VALUES (?,?,?,?)"; - - @Override - public void insertEnvironment(JobInterface job, Map env) { - for (Map.Entry e: env.entrySet()) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_JOB_ENV, - pk, job.getJobId(), e.getKey(), e.getValue()); - } - } - - @Override - public void insertEnvironment(JobInterface job, String key, String value) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_JOB_ENV, - pk, job.getJobId(), key, value); - } - - @Override - public Map getEnvironment(JobInterface job) { - Map result = new HashMap(); - - List> _result = getJdbcTemplate().queryForList( - "SELECT str_key, str_value FROM job_env WHERE pk_job=?", job.getJobId()); - - for (Map o: _result) { - result.put((String) o.get("str_key"), (String) o.get("str_value")); - } - - return result; - } - - @Override - public void updateParent(JobInterface job, GroupDetail dest) { - updateParent(job, dest, new Inherit[] { Inherit.All }); - } - - @Override - public void updateParent(JobInterface job, GroupDetail dest, Inherit[] inherits) { - - if (!job.getShowId().equals(dest.getShowId())) { - throw new EntityModificationError("error moving job, " + - "cannot move jobs between shows"); - } - - StringBuilder query = new StringBuilder(1024); - query.append("UPDATE job_resource SET "); - List values= new ArrayList(); - - Set inheritSet = new HashSet(inherits.length); - inheritSet.addAll(Arrays.asList(inherits)); - - for (Inherit i: inheritSet) { - switch(i) { - case Priority: - if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { - query.append("int_priority=?,"); - values.add(dest.jobPriority); - } - break; - - case MinCores: - if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { - query.append("int_min_cores=?,"); - values.add(dest.jobMinCores); - } - break; - - case MaxCores: - if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { - query.append("int_max_cores=?,"); - values.add(dest.jobMaxCores); - } - break; - - case All: - if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { - query.append("int_priority=?,"); - values.add(dest.jobPriority); - } - - if (dest.jobMinCores != CueUtil.FEATURE_DISABLED) { - query.append("int_min_cores=?,"); - values.add(dest.jobMinCores); - } - - if (dest.jobMaxCores != CueUtil.FEATURE_DISABLED) { - query.append("int_max_cores=?,"); - values.add(dest.jobMaxCores); - } - break; - } - } - - getJdbcTemplate().update( - "UPDATE job SET pk_folder=?, pk_dept=? WHERE pk_job=?", - dest.getGroupId(), dest.getDepartmentId(), job.getJobId()); - - getJdbcTemplate().update( - "UPDATE job_history SET pk_dept=? WHERE pk_job=?", - dest.getDepartmentId(), job.getJobId()); - - if (values.size() > 0) { - query.deleteCharAt(query.length()-1); - query.append(" WHERE pk_job=?"); - values.add(job.getJobId()); - getJdbcTemplate().update(query.toString(), values.toArray()); - } - } - - private static final String HAS_PENDING_JOBS = - "SELECT " + - "job.pk_job " + - "FROM " + - "job,"+ - "job_stat, " + - "job_resource "+ - "WHERE " + - "job.pk_job = job_stat.pk_job " + - "AND " + - "job.pk_job = job_resource.pk_job " + - "AND " + - "job.str_state='PENDING' " + - "AND " + - "job.b_paused = 0 " + - "AND " + - "job.b_auto_book = 1 " + - "AND " + - "job_stat.int_waiting_count != 0" + - "AND " + - "job_resource.int_cores < job_resource.int_max_cores " + - "AND " + - "job.pk_facility = ? " + - "AND "+ - "ROWNUM = 1"; - - @Override - public boolean cueHasPendingJobs(FacilityInterface f) { - return getJdbcTemplate().queryForList( - HAS_PENDING_JOBS, f.getFacilityId()).size() > 0; - } - - @Override - public void enableAutoBooking(JobInterface job, boolean value) { - getJdbcTemplate().update( - "UPDATE job SET b_auto_book=? WHERE pk_job=?", value, job.getJobId()); - } - - @Override - public void enableAutoUnBooking(JobInterface job, boolean value) { - getJdbcTemplate().update( - "UPDATE job SET b_auto_unbook=? WHERE pk_job=?", value, job.getJobId()); - } - - public static final String MAP_POST_JOB = - "INSERT INTO " + - "job_post " + - "(pk_job_post, pk_job, pk_post_job) " + - "VALUES (?,?,?)"; - - @Override - public void mapPostJob(BuildableJob job) { - getJdbcTemplate().update(MAP_POST_JOB, - SqlUtil.genKeyRandom(), job.detail.id, job.getPostJob().detail.id); - } - - public static final String ACTIVATE_POST_JOB = - "UPDATE " + - "job " + - "SET " + - "str_state=? " + - "WHERE " + - "pk_job IN (SELECT pk_post_job FROM job_post WHERE pk_job = ?)"; - - @Override - public void activatePostJob(JobInterface job) { - getJdbcTemplate().update(ACTIVATE_POST_JOB, - JobState.PENDING.toString(), job.getJobId()); - getJdbcTemplate().update("DELETE FROM job_post WHERE pk_job=?",job.getJobId()); - } - - @Override - public void updateDepartment(GroupInterface group, DepartmentInterface dept) { - getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_folder=?", - dept.getDepartmentId(), group.getGroupId()); - } - - @Override - public void updateDepartment(JobInterface job, DepartmentInterface dept) { - getJdbcTemplate().update("UPDATE job SET pk_dept=? WHERE pk_job=?", - dept.getDepartmentId(), job.getJobId()); - } - - - public void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus) { - - if (exitStatus == 0) { - - getJdbcTemplate().update( - "UPDATE " + - "job_usage " + - "SET " + - "int_core_time_success = int_core_time_success + ?," + - "int_clock_time_success = int_clock_time_success + ?,"+ - "int_frame_success_count = int_frame_success_count + 1 " + - "WHERE " + - "pk_job = ? ", - usage.getCoreTimeSeconds(), - usage.getClockTimeSeconds(), - job.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "job_usage " + - "SET " + - "int_clock_time_high = ? " + - "WHERE " + - "pk_job = ? " + - "AND " + - "int_clock_time_high < ?", - usage.getClockTimeSeconds(), - job.getJobId(), - usage.getClockTimeSeconds()); - } - else { - - getJdbcTemplate().update( - "UPDATE " + - "job_usage " + - "SET " + - "int_core_time_fail = int_core_time_fail + ?," + - "int_clock_time_fail = int_clock_time_fail + ?,"+ - "int_frame_fail_count = int_frame_fail_count + 1 " + - "WHERE " + - "pk_job = ? ", - usage.getCoreTimeSeconds(), - usage.getClockTimeSeconds(), - job.getJobId()); - } - } - - public void updateEmail(JobInterface job, String email) { - getJdbcTemplate().update( - "UPDATE job SET str_email=? WHERE pk_job=?", - email, job.getJobId()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/LayerDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/LayerDaoJdbc.java deleted file mode 100644 index f189af1d9..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/LayerDaoJdbc.java +++ /dev/null @@ -1,847 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; - -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import org.apache.commons.lang.StringUtils; -import org.springframework.dao.EmptyResultDataAccessException; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.ExecutionSummary; -import com.imageworks.spcue.FrameStateTotals; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.LayerDetail; -import com.imageworks.spcue.LayerEntity; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LimitEntity; -import com.imageworks.spcue.LimitInterface; -import com.imageworks.spcue.ResourceUsage; -import com.imageworks.spcue.ThreadStats; -import com.imageworks.spcue.dao.LayerDao; -import com.imageworks.spcue.dispatcher.Dispatcher; -import com.imageworks.spcue.grpc.job.JobState; -import com.imageworks.spcue.grpc.job.LayerType; -import com.imageworks.spcue.util.CueUtil; -import com.imageworks.spcue.util.SqlUtil; - -public class LayerDaoJdbc extends JdbcDaoSupport implements LayerDao { - - private static final String INSERT_OUTPUT_PATH = - "INSERT INTO " + - "layer_output " + - "( " + - "pk_layer_output,"+ - "pk_layer,"+ - "pk_job,"+ - "str_filespec " + - ") VALUES (?,?,?,?)"; - - @Override - public void insertLayerOutput(LayerInterface layer, String filespec) { - getJdbcTemplate().update( - INSERT_OUTPUT_PATH, UUID.randomUUID().toString(), - layer.getLayerId(), layer.getJobId(), - filespec); - } - - private static final String GET_OUTPUT = - "SELECT " + - "str_filespec " + - "FROM " + - "layer_output " + - "WHERE " + - "pk_layer = ?"; - - private static final RowMapper OUTPUT_MAPPER = - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("str_filespec"); - } - }; - - @Override - public List getLayerOutputs(LayerInterface layer) { - return getJdbcTemplate().query(GET_OUTPUT, - OUTPUT_MAPPER, layer.getLayerId()); - } - - private static final String IS_LAYER_DISPATCHABLE = - "SELECT " + - "int_waiting_count " + - "FROM " + - "layer_stat " + - "WHERE " + - "pk_layer=?"; - - @Override - public boolean isLayerDispatchable(LayerInterface l ) { - return getJdbcTemplate().queryForObject(IS_LAYER_DISPATCHABLE, - Integer.class, l.getLayerId()) > 0; - } - - private static final String IS_LAYER_COMPLETE = - "SELECT " + - "SUM ( " + - "int_waiting_count + " + - "int_running_count + " + - "int_dead_count + " + - "int_depend_count "+ - ") " + - "FROM " + - "layer_stat " + - "WHERE " + - "pk_layer=?"; - - public boolean isLayerComplete(LayerInterface l) { - if (isLaunching(l)) { - return false; - } - return getJdbcTemplate().queryForObject(IS_LAYER_COMPLETE, - Integer.class, l.getLayerId()) == 0; - } - - private static final String IS_LAUNCHING = - "SELECT " + - "str_state " + - "FROM " + - "job " + - "WHERE " + - "pk_job=?"; - - @Override - public boolean isLaunching(LayerInterface l) { - return getJdbcTemplate().queryForObject( - IS_LAUNCHING, String.class, l.getJobId()).equals( - JobState.STARTUP.toString()); - } - - private static final String IS_THREADABLE = - "SELECT " + - "b_threadable " + - "FROM " + - "layer " + - "WHERE " + - "pk_layer = ?"; - - @Override - public boolean isThreadable(LayerInterface l) { - return getJdbcTemplate().queryForObject(IS_THREADABLE, - Integer.class, l.getLayerId()) > 0; - } - - /** - * Query for layers table. Where clauses are appended later - */ - public static final String GET_LAYER_DETAIL = - "SELECT " + - "layer.*, " + - "job.pk_show, "+ - "job.pk_facility " + - "FROM " + - "layer," + - "job," + - "show " + - "WHERE " + - "layer.pk_job = job.pk_job " + - "AND " + - "job.pk_show = show.pk_show "; - - private static final String GET_LAYER = - "SELECT " + - "layer.pk_layer,"+ - "layer.pk_job,"+ - "job.pk_show,"+ - "job.pk_facility, " + - "layer.str_name "+ - "FROM " + - "layer," + - "job," + - "show " + - "WHERE " + - "layer.pk_job = job.pk_job " + - "AND " + - "job.pk_show = show.pk_show "; - - /** - * Maps a ResultSet to a LayerDetail - */ - public static final RowMapper LAYER_DETAIL_MAPPER = new RowMapper() { - public LayerDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - LayerDetail layer = new LayerDetail(); - layer.chunkSize = rs.getInt("int_chunk_size"); - layer.command = rs.getString("str_cmd"); - layer.dispatchOrder = rs.getInt("int_dispatch_order"); - layer.id = rs.getString("pk_layer"); - layer.jobId = rs.getString("pk_job"); - layer.showId = rs.getString("pk_show"); - layer.facilityId =rs.getString("pk_facility"); - layer.name = rs.getString("str_name"); - layer.range = rs.getString("str_range"); - layer.minimumCores = rs.getInt("int_cores_min"); - layer.minimumMemory = rs.getLong("int_mem_min"); - layer.minimumGpu = rs.getLong("int_gpu_min"); - layer.type = LayerType.valueOf(rs.getString("str_type")); - layer.tags = Sets.newHashSet( - rs.getString("str_tags").replaceAll(" ", "").split("\\|")); - layer.services.addAll( - Lists.newArrayList(rs.getString("str_services").split(","))); - return layer; - } - }; - - /** - * Maps a ResultSet to a LayerDetail - */ - private static final RowMapper LAYER_MAPPER = new RowMapper() { - public LayerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - LayerEntity layer = new LayerEntity(); - layer.id = rs.getString("pk_layer"); - layer.jobId = rs.getString("pk_job"); - layer.showId = rs.getString("pk_show"); - layer.facilityId = rs.getString("pk_facility"); - layer.name = rs.getString("str_name"); - return layer; - } - }; - - - @Override - public LayerDetail getLayerDetail(String id) { - LayerDetail layerDetail = getJdbcTemplate().queryForObject(GET_LAYER_DETAIL + - " AND layer.pk_layer=?", LAYER_DETAIL_MAPPER, id); - layerDetail.limits.addAll(getLimitNames(layerDetail)); - return layerDetail; - } - - @Override - public LayerDetail getLayerDetail(LayerInterface layer) { - LayerDetail layerDetail = getJdbcTemplate().queryForObject(GET_LAYER_DETAIL + - " AND layer.pk_layer=?", LAYER_DETAIL_MAPPER, layer.getLayerId()); - layerDetail.limits.addAll(getLimitNames(layerDetail)); - return layerDetail; - } - - @Override - public LayerDetail findLayerDetail(JobInterface job, String name) { - LayerDetail layerDetail = getJdbcTemplate().queryForObject( - GET_LAYER_DETAIL + " AND layer.pk_job=? AND layer.str_name=?", - LAYER_DETAIL_MAPPER, job.getJobId(), name); - layerDetail.limits.addAll(getLimitNames(layerDetail)); - return layerDetail; - } - - @Override - public LayerInterface findLayer(JobInterface job, String name) { - try { - return getJdbcTemplate().queryForObject( - GET_LAYER + " AND layer.pk_job=? AND layer.str_name=?", - LAYER_MAPPER, job.getJobId(), name); - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - throw new EmptyResultDataAccessException("The layer " + - name + " was not found in " + job.getName() + e, 0); - } - } - - @Override - public List getLayerDetails(JobInterface job) { - List layers = getJdbcTemplate().query( - GET_LAYER_DETAIL + " AND layer.pk_job=?", - LAYER_DETAIL_MAPPER, job.getJobId()); - layers.stream() - .forEach(layerDetail -> layerDetail.limits.addAll(getLimitNames(layerDetail))); - return layers; - } - - @Override - public List getLayers(JobInterface job) { - return getJdbcTemplate().query( - GET_LAYER + " AND layer.pk_job=?", - LAYER_MAPPER, job.getJobId()); - } - - @Override - public LayerInterface getLayer(String id) { - return getJdbcTemplate().queryForObject( - GET_LAYER + " AND layer.pk_layer=?", - LAYER_MAPPER, id); - } - - private static final String INSERT_LAYER = - "INSERT INTO " + - "layer " + - "("+ - "pk_layer, " + - "pk_job, "+ - "str_name, " + - "str_cmd, " + - "str_range, " + - "int_chunk_size, " + - "int_dispatch_order, " + - "str_tags, " + - "str_type," + - "int_cores_min, "+ - "int_cores_max, "+ - "b_threadable, " + - "int_mem_min, " + - "int_gpu_min, " + - "str_services " + - ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; - - @Override - public void insertLayerDetail(LayerDetail l) { - l.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_LAYER, - l.id, l.jobId, l.name, l.command, - l.range, l.chunkSize, l.dispatchOrder, - StringUtils.join(l.tags," | "), l.type.toString(), - l.minimumCores, l.maximumCores, l.isThreadable, - l.minimumMemory, l.minimumGpu, StringUtils.join(l.services,",")); - } - - @Override - public void updateLayerMinMemory(LayerInterface layer, long val) { - if (val < Dispatcher.MEM_RESERVED_MIN) { - val = Dispatcher.MEM_RESERVED_MIN; - } - getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_layer=?", - val, layer.getLayerId()); - } - - @Override - public void updateLayerMinGpu(LayerInterface layer, long gpu) { - getJdbcTemplate().update("UPDATE layer SET int_gpu_min=? WHERE pk_layer=?", - gpu, layer.getLayerId()); - } - - private static final String BALANCE_MEM = - "UPDATE " + - "layer " + - "SET " + - "int_mem_min = ? " + - "WHERE " + - "pk_layer = ? " + - "AND " + - "int_mem_min > ? " + - "AND " + - "b_optimize = 1"; - - @Override - public boolean balanceLayerMinMemory(LayerInterface layer, long frameMaxRss) { - - /** - * Lowers the memory value on the frame when the maxrss is lower than - * the memory requirement. - */ - long maxrss = getJdbcTemplate().queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", - Long.class, layer.getLayerId()); - - if (maxrss < frameMaxRss) { - maxrss = frameMaxRss; - } - if (maxrss < Dispatcher.MEM_RESERVED_MIN) { - maxrss = Dispatcher.MEM_RESERVED_MIN; - } else { - maxrss = maxrss + CueUtil.MB256; - } - - boolean result = getJdbcTemplate().update(BALANCE_MEM, - maxrss, layer.getLayerId(), maxrss) == 1; - if (result) { - logger.info(layer.getName() + " was balanced to " + maxrss); - } - return result; - } - - @Override - public void increaseLayerMinMemory(LayerInterface layer, long val) { - getJdbcTemplate().update("UPDATE layer SET int_mem_min=? WHERE pk_layer=? AND int_mem_min < ?", - val, layer.getLayerId(), val); - } - - @Override - public void increaseLayerMinGpu(LayerInterface layer, long gpu) { - getJdbcTemplate().update("UPDATE layer SET int_gpu_min=? WHERE pk_layer=? AND int_gpu_min < ?", - gpu, layer.getLayerId(), gpu); - } - - @Override - public void updateLayerMinCores(LayerInterface layer, int val) { - if (val < Dispatcher.CORE_POINTS_RESERVED_MIN) { - val = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; - } - getJdbcTemplate().update("UPDATE layer SET int_cores_min=? WHERE pk_layer=?", - val, layer.getLayerId()); - } - - @Override - public void updateLayerMaxCores(LayerInterface layer, int val) { - getJdbcTemplate().update("UPDATE layer SET int_cores_max=? WHERE pk_layer=?", - val, layer.getLayerId()); - } - - private static final String UPDATE_LAYER_MAX_RSS = - "UPDATE " + - "layer_mem " + - "SET " + - "int_max_rss=? " + - "WHERE " + - "pk_layer=?"; - - @Override - public void updateLayerMaxRSS(LayerInterface layer, long val, boolean force) { - StringBuilder sb = new StringBuilder(UPDATE_LAYER_MAX_RSS); - Object[] options; - if (!force) { - options = new Object[] { val, layer.getLayerId(), val}; - sb.append (" AND int_max_rss < ?"); - } - else { - options = new Object[] { val, layer.getLayerId() }; - } - getJdbcTemplate().update(sb.toString(), options); - } - - @Override - public void updateLayerTags(LayerInterface layer, Set tags) { - if (tags.size() == 0) { - throw new IllegalArgumentException( - "Layers must have at least one tag."); - } - StringBuilder sb = new StringBuilder(128); - for (String t: tags) { - if (t == null) { continue; } - if (t.length() < 1) { continue; } - sb.append(t + " | "); - } - sb.delete(sb.length()-3, sb.length()); - if (sb.length() == 0) { - throw new IllegalArgumentException( - "Invalid layer tags, cannot contain null tags or " + - "tags of zero length."); - } - getJdbcTemplate().update( - "UPDATE layer SET str_tags=? WHERE pk_layer=?", - sb.toString(), layer.getLayerId()); - } - - @Override - public void appendLayerTags(LayerInterface layer, String val) { - String appendTag = " | " + val; - String matchTag = "%" + val + "%"; - - getJdbcTemplate().update("UPDATE layer SET str_tags = str_tags || ? " + - "WHERE pk_layer=? AND str_tags NOT LIKE ?", - appendTag, layer.getLayerId(), matchTag); - } - - public FrameStateTotals getFrameStateTotals(LayerInterface layer) { - return getJdbcTemplate().queryForObject( - "SELECT * FROM layer_stat WHERE pk_layer=?", - new RowMapper() { - public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { - FrameStateTotals t = new FrameStateTotals(); - t.dead = rs.getInt("int_dead_count"); - t.depend = rs.getInt("int_depend_count"); - t.eaten = rs.getInt("int_eaten_count"); - t.running = rs.getInt("int_running_count"); - t.succeeded = rs.getInt("int_succeeded_count"); - t.waiting = rs.getInt("int_waiting_count"); - t.total = rs.getInt("int_total_count"); - return t; - } - },layer.getLayerId()); - } - - private static final String GET_EXECUTION_SUMMARY = - "SELECT " + - "layer_usage.int_core_time_success,"+ - "layer_usage.int_core_time_fail," + - "layer_usage.int_clock_time_success," + - "layer_mem.int_max_rss " + - "FROM " + - "layer," + - "layer_usage, "+ - "layer_mem " + - "WHERE " + - "layer.pk_layer = layer_usage.pk_layer "+ - "AND " + - "layer.pk_layer = layer_mem.pk_layer " + - "AND " + - "layer.pk_layer = ?"; - - @Override - public ExecutionSummary getExecutionSummary(LayerInterface layer) { - return getJdbcTemplate().queryForObject( - GET_EXECUTION_SUMMARY, - new RowMapper() { - public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { - ExecutionSummary e = new ExecutionSummary(); - e.coreTimeSuccess = rs.getLong("int_core_time_success"); - e.coreTimeFail = rs.getLong("int_core_time_fail"); - e.coreTime = e.coreTimeSuccess + e.coreTimeFail; - e.highMemoryKb = rs.getLong("int_max_rss"); - return e; - } - }, layer.getLayerId()); - } - - private static final String INSERT_LAYER_ENV = - "INSERT INTO " + - "layer_env " + - "(" + - "pk_layer_env, pk_layer, pk_job, str_key, str_value " + - ") " + - "VALUES (?,?,?,?,?)"; - - @Override - public void insertLayerEnvironment(LayerInterface layer, Map env) { - for (Map.Entry e: env.entrySet()) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_LAYER_ENV, - pk, layer.getLayerId(), layer.getJobId(), e.getKey(), e.getValue()); - } - } - - @Override - public void insertLayerEnvironment(LayerInterface layer, String key, String value) { - String pk = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_LAYER_ENV, - pk, layer.getLayerId(), layer.getJobId(), key, value); - } - - @Override - public Map getLayerEnvironment(LayerInterface layer) { - Map result = new HashMap(); - List> _result = getJdbcTemplate().queryForList( - "SELECT str_key, str_value FROM layer_env WHERE pk_layer=?", layer.getLayerId()); - - for (Map o: _result) { - result.put((String) o.get("str_key"), (String) o.get("str_value")); - } - return result; - } - - private static final String FIND_PAST_MAX_RSS = - "SELECT "+ - "layer_mem.int_max_rss " + - "FROM " + - "layer, " + - "layer_mem, "+ - "layer_stat "+ - "WHERE " + - "layer.pk_layer = layer_stat.pk_layer " + - "AND " + - "layer.pk_layer = layer_mem.pk_layer " + - "AND " + - "layer.pk_job = ? " + - "AND " + - "layer.str_name = ? " + - "AND " + - "layer_stat.int_succeeded_count >= ceil(layer_stat.int_total_count * .5) "; - - @Override - public long findPastMaxRSS(JobInterface job, String name) { - try { - long maxRss = getJdbcTemplate().queryForObject(FIND_PAST_MAX_RSS, - Long.class, job.getJobId(), name); - if (maxRss >= Dispatcher.MEM_RESERVED_MIN) { - return maxRss; - } - else { - return Dispatcher.MEM_RESERVED_MIN; - } - } catch (EmptyResultDataAccessException e) { - // Actually want to return 0 here, which means - // there is no past history. - return 0; - } - } - - @Override - public void updateTags(JobInterface job, String tags, LayerType type) { - getJdbcTemplate().update( - "UPDATE layer SET str_tags=? WHERE pk_job=? AND str_type=?", - tags, job.getJobId(), type.toString()); - } - - @Override - public void updateMinMemory(JobInterface job, long mem, LayerType type) { - if (mem < Dispatcher.MEM_RESERVED_MIN) { - mem = Dispatcher.MEM_RESERVED_MIN; - } - getJdbcTemplate().update( - "UPDATE layer SET int_mem_min=? WHERE pk_job=? AND str_type=?", - mem, job.getJobId(), type.toString()); - } - - @Override - public void updateMinGpu(JobInterface job, long gpu, LayerType type) { - getJdbcTemplate().update( - "UPDATE layer SET int_gpu_min=? WHERE pk_job=? AND str_type=?", - gpu, job.getJobId(), type.toString()); - } - - @Override - public void updateMinCores(JobInterface job, int cores, LayerType type) { - getJdbcTemplate().update( - "UPDATE layer SET int_cores_min=? WHERE pk_job=? AND str_type=?", - cores, job.getJobId(), type.toString()); - } - - @Override - public void updateThreadable(LayerInterface layer, boolean threadable) { - getJdbcTemplate().update( - "UPDATE layer SET b_threadable=? WHERE pk_layer=?", - threadable, layer.getLayerId()); - } - - @Override - public void updateTimeout(LayerInterface layer, int timeout){ - getJdbcTemplate().update( - "UPDATE layer SET int_timeout=? WHERE pk_layer=?", - timeout, layer.getLayerId()); - } - - @Override - public void updateTimeoutLLU(LayerInterface layer, int timeout_llu){ - getJdbcTemplate().update( - "UPDATE layer SET int_timeout_llu=? WHERE pk_layer=?", - timeout_llu, layer.getLayerId()); - } - - @Override - public void enableMemoryOptimizer(LayerInterface layer, boolean value) { - getJdbcTemplate().update( - "UPDATE layer SET b_optimize=? WHERE pk_layer=?", - value, layer.getLayerId()); - } - - private static final String IS_OPTIMIZABLE = - "SELECT " + - "COUNT(1) "+ - "FROM " + - "layer, " + - "layer_stat, " + - "layer_usage " + - "WHERE " + - "layer.pk_layer = layer_stat.pk_layer " + - "AND " + - "layer.pk_layer = layer_usage.pk_layer " + - "AND " + - "layer.pk_layer = ? " + - "AND " + - "layer.int_cores_min = 100 " + - "AND " + - "str_tags LIKE '%general%' " + - "AND " + - "str_tags NOT LIKE '%util%' " + - "AND " + - "layer_stat.int_succeeded_count >= ? " + - "AND " + - "(layer_usage.int_core_time_success / layer_stat.int_succeeded_count) <= ?"; - - @Override - public boolean isOptimizable(LayerInterface l, int succeeded, float avg) { - if (succeeded < 1) { - throw new IllegalArgumentException("Succeeded frames option " + - "must be greater than zero"); - } - return getJdbcTemplate().queryForObject(IS_OPTIMIZABLE, - Integer.class, l.getLayerId(), succeeded, avg) > 0; - } - - private static final String THREAD_STATS = - "SELECT " + - "avg(interval_to_seconds(ts_stopped - ts_started)) AS avg, " + - "int_cores " + - "FROM " + - "frame " + - "WHERE " + - "frame.pk_layer = ? " + - "AND " + - "frame.int_checkpoint_count = 0 " + - "AND " + - "int_cores > 0 " + - "GROUP BY " + - "int_cores " + - "ORDER BY " + - "int_cores DESC "; - - @Override - public List getThreadStats(LayerInterface layer) { - - return getJdbcTemplate().query(THREAD_STATS, - new RowMapper() { - public ThreadStats mapRow(ResultSet rs, int rowNum) throws SQLException { - ThreadStats s = new ThreadStats(); - s.setThreads(rs.getInt("int_cores") / 100); - s.setAvgFrameTime(rs.getInt("avg")); - return s; - } - }, layer.getLayerId()); - } - - @Override - public void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatus) { - - if (exitStatus == 0) { - - getJdbcTemplate().update( - "UPDATE " + - "layer_usage " + - "SET " + - "int_core_time_success = int_core_time_success + ?," + - "int_clock_time_success = int_clock_time_success + ?,"+ - "int_frame_success_count = int_frame_success_count + 1 " + - "WHERE " + - "pk_layer = ? ", - usage.getCoreTimeSeconds(), - usage.getClockTimeSeconds(), - layer.getLayerId()); - - getJdbcTemplate().update( - "UPDATE " + - "layer_usage " + - "SET " + - "int_clock_time_high = ? " + - "WHERE " + - "pk_layer = ? " + - "AND " + - "int_clock_time_high < ?", - usage.getClockTimeSeconds(), - layer.getLayerId(), - usage.getClockTimeSeconds()); - - getJdbcTemplate().update( - "UPDATE " + - "layer_usage " + - "SET " + - "int_clock_time_low = ? " + - "WHERE " + - "pk_layer = ? " + - "AND " + - "(? < int_clock_time_low OR int_clock_time_low = 0)", - usage.getClockTimeSeconds(), - layer.getLayerId(), - usage.getClockTimeSeconds()); - } - else { - getJdbcTemplate().update( - "UPDATE " + - "layer_usage " + - "SET " + - "int_core_time_fail = int_core_time_fail + ?," + - "int_clock_time_fail = int_clock_time_fail + ?,"+ - "int_frame_fail_count = int_frame_fail_count + 1 " + - "WHERE " + - "pk_layer = ? ", - usage.getCoreTimeSeconds(), - usage.getClockTimeSeconds(), - layer.getLayerId()); - } - } - - private static final String INSERT_LIMIT = - "INSERT INTO " + - "layer_limit " + - "( " + - "pk_layer_limit,"+ - "pk_layer,"+ - "pk_limit_record,"+ - ") VALUES (?,?,?)"; - - private static final String GET_LIMITS = - "SELECT " + - "limit_record.pk_limit_record," + - "limit_record.str_name," + - "limit_record.int_max_value " + - "FROM " + - "layer_limit," + - "limit_record " + - "WHERE " + - "layer_limit.pk_layer = ? " + - "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; - - private static final String GET_LIMIT_NAMES = - "SELECT " + - "limit_record.str_name " + - "FROM " + - "layer_limit," + - "limit_record " + - "WHERE " + - "layer_limit.pk_layer = ? " + - "AND limit_record.pk_limit_record = layer_limit.pk_limit_record"; - - private static final RowMapper LIMIT_MAPPER = - new RowMapper() { - public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - LimitEntity limit = new LimitEntity(); - limit.id = rs.getString("pk_limit_record"); - limit.name = rs.getString("str_name"); - limit.maxValue = rs.getInt("int_max_value"); - return limit; - } - }; - - private static final RowMapper LIMIT_NAME_MAPPER = - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("str_name"); - } - }; - - @Override - public void addLimit(LayerInterface layer, String limitId) { - getJdbcTemplate().update(INSERT_LIMIT, UUID.randomUUID().toString(), layer.getLayerId(), - limitId); - } - - @Override - public void dropLimit(LayerInterface layer, String limitId) { - getJdbcTemplate().update( - "DELETE FROM layer_limit WHERE pk_limit_record = ? AND pk_layer = ?", - limitId, - layer.getLayerId()); - } - - @Override - public List getLimits(LayerInterface layer) { - return getJdbcTemplate().query(GET_LIMITS, - LIMIT_MAPPER, layer.getLayerId()); - } - - @Override - public List getLimitNames(LayerInterface layer) { - return getJdbcTemplate().query(GET_LIMIT_NAMES, - LIMIT_NAME_MAPPER, layer.getLayerId()); - } -} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/LimitDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/LimitDaoJdbc.java deleted file mode 100644 index 96acf8af0..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/LimitDaoJdbc.java +++ /dev/null @@ -1,110 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.LimitEntity; -import com.imageworks.spcue.LimitInterface; -import com.imageworks.spcue.dao.LimitDao; -import com.imageworks.spcue.util.SqlUtil; - -public class LimitDaoJdbc extends JdbcDaoSupport implements LimitDao { - - public static final RowMapper LIMIT_MAPPER = new RowMapper() { - public LimitEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - LimitEntity limit = new LimitEntity(); - limit.id = rs.getString("pk_limit_record"); - limit.name = rs.getString("str_name"); - limit.maxValue = rs.getInt("int_max_value"); - limit.currentRunning = rs.getInt("int_current_running"); - return limit; - } - }; - - @Override - public String createLimit(String name, int maxValue) { - String limitId = SqlUtil.genKeyRandom(); - getJdbcTemplate().update( - "INSERT INTO limit_record (pk_limit_record,str_name, int_max_value) VALUES (?,?,?)", - limitId, name, maxValue); - return limitId; - } - - @Override - public void deleteLimit(LimitInterface limit) { - getJdbcTemplate().update("DELETE FROM limit_record WHERE pk_limit_recor=?", - limit.getId()); - } - - @Override - public LimitEntity findLimit(String name){ - String findLimitQuery = GET_LIMIT_BASE + - "WHERE " + - "limit-record.str_name=? " + - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); - } - - @Override - public LimitEntity getLimit(String id) { - String getLimitQuery = GET_LIMIT_BASE + - "WHERE " + - "limit_record.pk_limit_record=? " + - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); - } - - @Override - public void setLimitName(LimitInterface limit, String name) { - getJdbcTemplate().update("UPDATE limit_record SET str_name = ? WHERE pk_limit_record = ?", - name, limit.getId()); - } - - public void setMaxValue(LimitInterface limit, int maxValue) { - getJdbcTemplate().update("UPDATE limit_record SET int_max_value = ? WHERE pk_limit_record = ?", - maxValue, limit.getId()); - } - - private static final String GET_LIMIT_BASE = - "SELECT " + - "limit_record.pk_limit_record, " + - "limit_record.str_name, " + - "limit_record.int_max_value," + - "SUM(layer_stat.int_running_count) AS int_current_running " + - "FROM " + - "limit_record " + - "LEFT JOIN " + - "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN " + - "layer ON layer.pk_layer = layer_limit.pk_layer " + - "LEFT JOIN " + - "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; -} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/MaintenanceDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/MaintenanceDaoJdbc.java deleted file mode 100644 index 820804f50..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/MaintenanceDaoJdbc.java +++ /dev/null @@ -1,90 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.MaintenanceTask; -import com.imageworks.spcue.dao.MaintenanceDao; -import com.imageworks.spcue.grpc.host.HardwareState; - - -public class MaintenanceDaoJdbc extends JdbcDaoSupport implements MaintenanceDao { - - private static final String HOST_DOWN_INTERVAL = "interval '300' second"; - - private static final String UPDATE_HOSTS_DOWN = - "UPDATE " + - "host_stat " + - "SET " + - "str_state=? " + - "WHERE " + - "str_state='UP' " + - "AND " + - "systimestamp - ts_ping > " + HOST_DOWN_INTERVAL; - - public int setUpHostsToDown() { - return getJdbcTemplate().update(UPDATE_HOSTS_DOWN, - HardwareState.DOWN.toString()); - } - - public static final String LOCK_TASK = - "UPDATE " + - "task_lock " + - "SET " + - "int_lock = ?, " + - "ts_lastrun = systimestamp " + - "WHERE " + - "str_name= ? "+ - "AND " + - "(int_lock = ? OR ? - int_lock > int_timeout)"; - - public boolean lockTask(MaintenanceTask task) { - long now = System.currentTimeMillis(); - return getJdbcTemplate().update(LOCK_TASK, - now, task.toString(), 0, now) == 1; - } - - public static final String LOCK_TASK_MIN = - "UPDATE " + - "task_lock " + - "SET " + - "int_lock = ?, " + - "ts_lastrun = systimestamp " + - "WHERE " + - "str_name= ? "+ - "AND " + - "int_lock = ? " + - "AND " + - "interval_to_seconds(systimestamp - ts_lastrun) > ? "; - - public boolean lockTask(MaintenanceTask task, int minutes) { - long now = System.currentTimeMillis(); - return getJdbcTemplate().update(LOCK_TASK_MIN, - now, task.toString(), 0, minutes * 60) == 1; - } - - - public void unlockTask(MaintenanceTask task) { - getJdbcTemplate().update( - "UPDATE task_lock SET int_lock = 0 WHERE str_name=?", task.toString()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/MatcherDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/MatcherDaoJdbc.java deleted file mode 100644 index f55a0cdf1..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/MatcherDaoJdbc.java +++ /dev/null @@ -1,109 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.List; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.FilterInterface; -import com.imageworks.spcue.MatcherEntity; -import com.imageworks.spcue.MatcherInterface; -import com.imageworks.spcue.dao.MatcherDao; -import com.imageworks.spcue.grpc.filter.MatchSubject; -import com.imageworks.spcue.grpc.filter.MatchType; -import com.imageworks.spcue.util.SqlUtil; - -public class MatcherDaoJdbc extends JdbcDaoSupport implements MatcherDao { - - private static final String INSERT_MATCHER = - "INSERT INTO " + - "matcher " + - "( " + - "pk_matcher,pk_filter,str_subject,str_match,str_value"+ - ") VALUES (?,?,?,?,?)"; - - public void insertMatcher(MatcherEntity matcher) { - matcher.id = SqlUtil.genKeyRandom(); - - getJdbcTemplate().update(INSERT_MATCHER, - matcher.id, matcher.getFilterId(), matcher.subject.toString(), - matcher.type.toString(), matcher.value); - } - - public void deleteMatcher(MatcherInterface matcher) { - getJdbcTemplate().update( - "DELETE FROM matcher WHERE pk_matcher=?", - matcher.getMatcherId()); - } - - private static final String GET_MATCHER = - "SELECT " + - "matcher.*, " + - "filter.pk_show "+ - "FROM " + - "matcher, " + - "filter " + - "WHERE " + - "matcher.pk_filter = filter.pk_filter"; - - public MatcherEntity getMatcher(String id) { - return getJdbcTemplate().queryForObject( - GET_MATCHER + " AND matcher.pk_matcher=?", - MATCHER_DETAIL_MAPPER, id); - } - - public MatcherEntity getMatcher(MatcherInterface matcher) { - return getJdbcTemplate().queryForObject( - GET_MATCHER + " AND matcher.pk_matcher=?", MATCHER_DETAIL_MAPPER, - matcher.getMatcherId()); - } - - public List getMatchers(FilterInterface filter) { - return getJdbcTemplate().query( - GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", - MATCHER_DETAIL_MAPPER, filter.getFilterId()); - } - - - public void updateMatcher(MatcherEntity matcher) { - getJdbcTemplate().update( - "UPDATE matcher SET str_subject=?,str_match=?,str_value=? WHERE pk_matcher=?", - matcher.subject.toString(), matcher.type.toString(), matcher.value, matcher.getMatcherId()); - } - - public static final RowMapper MATCHER_DETAIL_MAPPER = new RowMapper() { - public MatcherEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - MatcherEntity matcher = new MatcherEntity(); - matcher.id = rs.getString("pk_matcher"); - matcher.showId = rs.getString("pk_show"); - matcher.filterId = rs.getString("pk_filter"); - matcher.name = null; - matcher.subject = MatchSubject.valueOf(rs.getString("str_subject")); - matcher.type = MatchType.valueOf(rs.getString("str_match")); - matcher.value = rs.getString("str_value"); - return matcher; - } - }; -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/NestedWhiteboardDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/NestedWhiteboardDaoJdbc.java deleted file mode 100644 index 693d99fbf..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/NestedWhiteboardDaoJdbc.java +++ /dev/null @@ -1,485 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.dao.NestedWhiteboardDao; -import com.imageworks.spcue.grpc.host.NestedHost; -import com.imageworks.spcue.grpc.host.NestedHostSeq; -import com.imageworks.spcue.grpc.host.NestedProc; -import com.imageworks.spcue.grpc.job.GroupStats; -import com.imageworks.spcue.grpc.job.JobState; -import com.imageworks.spcue.grpc.job.JobStats; -import com.imageworks.spcue.grpc.job.NestedGroup; -import com.imageworks.spcue.grpc.job.NestedGroupSeq; -import com.imageworks.spcue.grpc.job.NestedJob; -import com.imageworks.spcue.util.Convert; -import com.imageworks.spcue.util.CueUtil; - -public class NestedWhiteboardDaoJdbc extends JdbcDaoSupport implements NestedWhiteboardDao { - - private class CachedJobWhiteboardMapper { - public final long time; - public NestedJobWhiteboardMapper mapper; - - public CachedJobWhiteboardMapper(NestedJobWhiteboardMapper result) { - this.mapper = result; - this.time = System.currentTimeMillis(); - } - } - - private static final int CACHE_TIMEOUT = 5000; - private final ConcurrentHashMap jobCache = - new ConcurrentHashMap(20); - - public static final String GET_NESTED_GROUPS = - "SELECT " + - "show.pk_show, " + - "show.str_name AS str_show, " + - "facility.str_name AS facility_name, " + - "dept.str_name AS dept_name, " + - "folder.pk_folder, " + - "folder.pk_parent_folder, " + - "folder.str_name AS group_name, " + - "folder.int_job_priority as int_def_job_priority, " + - "folder.int_job_min_cores as int_def_job_min_cores, " + - "folder.int_job_max_cores as int_def_job_max_cores, " + - "folder_resource.int_min_cores AS folder_min_cores, " + - "folder_resource.int_max_cores AS folder_max_cores, " + - "folder_level.int_level, " + - "job.pk_job, " + - "job.str_name, " + - "job.str_shot, " + - "job.str_user, " + - "job.str_state, " + - "job.str_log_dir, " + - "job.int_uid, " + - "job_resource.int_priority, " + - "job.ts_started, " + - "job.ts_stopped, " + - "job.ts_updated, " + - "job.b_paused, " + - "job.b_autoeat, " + - "job.b_comment, " + - "job.str_os, " + - "job.int_frame_count, " + - "job.int_layer_count, " + - "job_stat.int_waiting_count, " + - "job_stat.int_running_count, " + - "job_stat.int_dead_count, " + - "job_stat.int_eaten_count," + - "job_stat.int_depend_count, " + - "job_stat.int_succeeded_count, " + - "job_usage.int_core_time_success, " + - "job_usage.int_core_time_fail, " + - "job_usage.int_frame_success_count, " + - "job_usage.int_frame_fail_count, " + - "job_usage.int_clock_time_high, " + - "job_usage.int_clock_time_success, " + - "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores, " + - "job_resource.int_min_cores, " + - "job_resource.int_max_cores, " + - "job_mem.int_max_rss " + - "FROM " + - "show, " + - "dept, " + - "folder_level, " + - "folder_resource, " + - "folder " + - "LEFT JOIN " + - "job " + - "ON " + - " (folder.pk_folder = job.pk_folder AND job.str_state='PENDING') " + - "LEFT JOIN " + - "facility " + - "ON " + - "(job.pk_facility = facility.pk_facility) " + - "LEFT JOIN " + - "job_stat " + - "ON " + - "(job.pk_job = job_stat.pk_job) " + - "LEFT JOIN " + - "job_resource " + - "ON " + - "(job.pk_job = job_resource.pk_job) " + - "LEFT JOIN " + - "job_usage " + - "ON " + - "(job.pk_job = job_usage.pk_job) " + - "LEFT JOIN " + - "job_mem " + - "ON " + - "(job.pk_job = job_mem.pk_job) " + - "WHERE " + - "show.pk_show = folder.pk_show " + - "AND " + - "folder.pk_folder = folder_level.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder " + - "AND " + - "folder.pk_dept = dept.pk_dept "; - - class NestedJobWhiteboardMapper implements RowMapper { - - public Map groups = new HashMap(50); - public Map> childrenMap = new HashMap>(); - public String rootGroupID; - - @Override - public NestedGroup mapRow(ResultSet rs, int rowNum) throws SQLException { - - String groupId = rs.getString("pk_folder"); - NestedGroup group; - if (!groups.containsKey(groupId)) { - group = NestedGroup.newBuilder() - .setId(rs.getString("pk_folder")) - .setName(rs.getString("group_name")) - .setDefaultJobPriority(rs.getInt("int_def_job_priority")) - .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_min_cores"))) - .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_max_cores"))) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("folder_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("folder_min_cores"))) - .setLevel(rs.getInt("int_level")) - .setDepartment(rs.getString("dept_name")) - .build(); - - String parentGroupId = rs.getString("pk_parent_folder"); - if (parentGroupId != null) { - List children = childrenMap.get(parentGroupId); - if (children == null) { - children = new ArrayList<>(); - childrenMap.put(parentGroupId, children); - } - children.add(groupId); - } - else { - rootGroupID = rs.getString("pk_folder"); - } - groups.put(groupId, group); - } - else { - group = groups.get(groupId); - } - if (rs.getString("pk_job") != null) { - GroupStats oldStats = group.getStats(); - JobStats jobStats = WhiteboardDaoJdbc.mapJobStats(rs); - GroupStats groupStats = GroupStats.newBuilder() - .setDeadFrames(oldStats.getDeadFrames() + jobStats.getDeadFrames()) - .setRunningFrames(oldStats.getRunningFrames() + jobStats.getRunningFrames()) - .setWaitingFrames(oldStats.getWaitingFrames() + jobStats.getWaitingFrames()) - .setDependFrames(oldStats.getDependFrames() + jobStats.getDependFrames()) - .setReservedCores(oldStats.getReservedCores() + jobStats.getReservedCores()) - .setPendingJobs(oldStats.getPendingJobs() + 1).build(); - - group = group.toBuilder() - .setStats(groupStats) - .addJobs(rs.getString("pk_job")) - .build(); - groups.put(groupId, group); - } - return group; - } - } - - private NestedJobWhiteboardMapper updateConnections(NestedJobWhiteboardMapper mapper) { - for (Map.Entry> entry : mapper.childrenMap.entrySet()) { - NestedGroup group = mapper.groups.get(entry.getKey()); - NestedGroupSeq.Builder childrenBuilder = NestedGroupSeq.newBuilder(); - for (String childId : entry.getValue()) { - NestedGroup child = mapper.groups.get(childId); - child = child.toBuilder().setParent(group).build(); - childrenBuilder.addNestedGroups(child); - mapper.groups.put(childId, child); - } - group = group.toBuilder() - .setGroups(childrenBuilder.build()) - .build(); - mapper.groups.put(entry.getKey(), group); - } - return mapper; - } - - public NestedGroup getJobWhiteboard(ShowInterface show) { - - CachedJobWhiteboardMapper cachedMapper = jobCache.get(show.getShowId()); - if (cachedMapper != null) { - if (System.currentTimeMillis() - cachedMapper.time < CACHE_TIMEOUT) { - return cachedMapper.mapper.groups.get(cachedMapper.mapper.rootGroupID); - } - } - - NestedJobWhiteboardMapper mapper = new NestedJobWhiteboardMapper(); - getJdbcTemplate().query( - GET_NESTED_GROUPS + " AND show.pk_show=? ORDER BY folder_level.int_level ASC", - mapper, show.getShowId()); - - mapper = updateConnections(mapper); - jobCache.put(show.getShowId(), new CachedJobWhiteboardMapper(mapper)); - return mapper.groups.get(mapper.rootGroupID); - } - - private static final NestedJob mapResultSetToJob(ResultSet rs) throws SQLException { - NestedJob.Builder jobBuilder = NestedJob.newBuilder() - .setId(rs.getString("pk_job")) - .setLogDir(rs.getString("str_log_dir")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .setName(rs.getString("str_name")) - .setPriority(rs.getInt("int_priority")) - .setShot(rs.getString("str_shot")) - .setShow(rs.getString("str_show")) - .setOs(rs.getString("str_os")) - .setFacility(rs.getString("facility_name")) - .setGroup(rs.getString("group_name")) - .setState(JobState.valueOf(rs.getString("str_state"))) - .setUser(rs.getString("str_user")) - .setIsPaused(rs.getBoolean("b_paused")) - .setHasComment(rs.getBoolean("b_comment")) - .setAutoEat(rs.getBoolean("b_autoeat")) - .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) - .setStats(WhiteboardDaoJdbc.mapJobStats(rs)); - - int uid = rs.getInt("int_uid"); - if (!rs.wasNull()) { - jobBuilder.setUid(uid); - } - - Timestamp ts = rs.getTimestamp("ts_stopped"); - if (ts != null) { - jobBuilder.setStopTime((int) (ts.getTime() / 1000)); - } - else { - jobBuilder.setStopTime(0); - } - return jobBuilder.build(); - } - - private static final String GET_HOSTS = - "SELECT " + - "alloc.str_name AS alloc_name, " + - "host.pk_host, " + - "host.str_name AS host_name, " + - "host_stat.str_state AS host_state, " + - "host.b_nimby, " + - "host_stat.ts_booted, " + - "host_stat.ts_ping, " + - "host.int_cores, " + - "host.int_cores_idle, " + - "host.int_gpu, " + - "host.int_gpu_idle, " + - "host.int_mem, " + - "host.int_mem_idle, " + - "host.str_lock_state, " + - "host.str_tags, " + - "host.b_comment, " + - "host.int_thread_mode, " + - "host_stat.str_os, " + - "host_stat.int_mem_total, " + - "host_stat.int_mem_free, " + - "host_stat.int_swap_total, " + - "host_stat.int_swap_free, " + - "host_stat.int_mcp_total, " + - "host_stat.int_mcp_free, " + - "host_stat.int_gpu_total, " + - "host_stat.int_gpu_free, " + - "host_stat.int_load, " + - "proc.pk_proc, " + - "proc.int_cores_reserved AS proc_cores, " + - "proc.int_mem_reserved AS proc_memory, " + - "proc.int_mem_used AS used_memory, " + - "proc.int_mem_max_used AS max_memory, " + - "proc.int_gpu_reserved AS proc_gpu, " + - "proc.ts_ping, " + - "proc.ts_booked, " + - "proc.ts_dispatched, " + - "proc.b_unbooked, " + - "redirect.str_name AS str_redirect, " + - "job.str_name AS job_name, " + - "job.str_log_dir, " + - "show.str_name AS show_name, " + - "frame.str_name AS frame_name " + - "FROM " + - "alloc, " + - "host_stat, " + - "host " + - "LEFT JOIN " + - "proc " + - "ON " + - "(proc.pk_host = host.pk_host) " + - "LEFT JOIN " + - "frame " + - "ON " + - "(proc.pk_frame = frame.pk_frame) " + - "LEFT JOIN " + - "job " + - "ON " + - "(proc.pk_job = job.pk_job) " + - "LEFT JOIN " + - "show " + - "ON " + - "(proc.pk_show = show.pk_show) " + - "LEFT JOIN " + - "redirect " + - "ON " + - "(proc.pk_proc = redirect.pk_proc) " + - "WHERE " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "host.pk_host = host_stat.pk_host "; - - /** - * Caches a the host whiteboard. This class is not - * thread safe so you have to synchronize calls to - * the "cache" method on your own. - */ - class CachedHostWhiteboard { - - /** - * Number of seconds till the cache expires - */ - private static final int CACHE_EXPIRE_TIME_MS = 10000; - - /** - * The host whiteboard we're caching - */ - private NestedHostSeq hostWhiteboard; - - /** - * The time in which the cache expires. - */ - private long expireTime = 0l; - - public void cache(List hostWhiteboard) { - this.hostWhiteboard = NestedHostSeq.newBuilder().addAllNestedHosts(hostWhiteboard).build(); - expireTime = System.currentTimeMillis() + CACHE_EXPIRE_TIME_MS; - } - - public NestedHostSeq get() { - return hostWhiteboard; - } - - public boolean isExpired() { - return System.currentTimeMillis() > expireTime; - } - } - - /** - * The CachedHostWhiteboard holds onto the result of the last - * host whiteboard query for about 10 seconds, returning the - * same result to all subsequent requests. - */ - private final CachedHostWhiteboard cachedHostWhiteboard = - new CachedHostWhiteboard(); - - public NestedHostSeq getHostWhiteboard() { - - if (!cachedHostWhiteboard.isExpired()) { - return cachedHostWhiteboard.get(); - } - - /* - * Ensures only 1 thread is doing the query, other threads will wait - * and then return the result of the thead that actually did - * the query. - */ - synchronized (cachedHostWhiteboard) { - - if (!cachedHostWhiteboard.isExpired()) { - return cachedHostWhiteboard.get(); - } - - final List result = new ArrayList(3000); - final Map hosts = new HashMap(3000); - final Map procs = new HashMap(8000); - - getJdbcTemplate().query( - GET_HOSTS, - new RowMapper() { - - public NestedHost mapRow(ResultSet rs, int row) throws SQLException { - NestedHost host; - String hid = rs.getString("pk_host"); - if (!hosts.containsKey(hid)) { - host = WhiteboardDaoJdbc.mapNestedHostBuilder(rs).build(); - hosts.put(hid, host); - result.add(host); - } - else { - host = hosts.get(hid); - } - - String pid = rs.getString("pk_proc"); - if (pid != null) { - NestedProc proc; - if (!procs.containsKey(pid)) { - proc = NestedProc.newBuilder() - .setId(pid) - .setName(CueUtil.buildProcName(host.getName(), - rs.getInt("proc_cores"))) - .setReservedCores(Convert.coreUnitsToCores( - rs.getInt("proc_cores"))) - .setReservedMemory(rs.getLong("proc_memory")) - .setUsedMemory(rs.getLong("used_memory")) - .setFrameName(rs.getString("frame_name")) - .setJobName(rs.getString("job_name")) - .setShowName(rs.getString("show_name")) - .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) - .setBookedTime((int) (rs.getTimestamp("ts_booked").getTime() / 1000)) - .setDispatchTime((int) (rs.getTimestamp("ts_dispatched").getTime() / 1000)) - .setUnbooked(rs.getBoolean("b_unbooked")) - .setLogPath(String.format("%s/%s.%s.rqlog", - rs.getString("str_log_dir"),rs.getString("job_name"), - rs.getString("frame_name"))) - .setRedirectTarget(rs.getString("str_redirect")) - .setParent(host) - .build(); - - host = host.toBuilder().setProcs( - host.getProcs().toBuilder().addNestedProcs(proc).build()) - .build(); - procs.put(pid, proc); - } - else { - proc = procs.get(pid); - } - } - - return null; - } - }); - - cachedHostWhiteboard.cache(result); - } - return cachedHostWhiteboard.get(); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/OwnerDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/OwnerDaoJdbc.java deleted file mode 100644 index ee94ba8ec..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/OwnerDaoJdbc.java +++ /dev/null @@ -1,127 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.springframework.dao.EmptyResultDataAccessException; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.Entity; -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.OwnerEntity; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.dao.OwnerDao; -import com.imageworks.spcue.util.SqlUtil; - -public class OwnerDaoJdbc extends JdbcDaoSupport implements OwnerDao { - - public static final RowMapper - OWNER_MAPPER = new RowMapper() { - public OwnerEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - OwnerEntity o = new OwnerEntity(); - o.id = rs.getString("pk_owner"); - o.name = rs.getString("str_username"); - return o; - } - }; - - @Override - public boolean deleteOwner(Entity owner) { - return getJdbcTemplate().update( - "DELETE FROM owner WHERE pk_owner = ?", - owner.getId()) > 0; - } - - private static final String QUERY_FOR_OWNER = - "SELECT " + - "owner.pk_owner," + - "owner.str_username " + - "FROM " + - "owner "; - - @Override - public OwnerEntity findOwner(String name) { - try { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " WHERE str_username = ?", - OWNER_MAPPER, name); - } catch (EmptyResultDataAccessException e) { - throw new EmptyResultDataAccessException( - "Failed to find owner: " + name, 1); - } - } - - @Override - public OwnerEntity getOwner(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " WHERE pk_owner = ?", - OWNER_MAPPER, id); - } - - @Override - public OwnerEntity getOwner(HostInterface host) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + - "WHERE " + - "pk_owner = (" + - "SELECT "+ - "pk_owner " + - "FROM " + - "deed " + - "WHERE " + - "pk_host = ?)", - OWNER_MAPPER, host.getHostId()); - } - - public boolean isOwner(OwnerEntity owner, HostInterface host) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host, deed" + - " WHERE host.pk_host = deed.pk_host AND deed.pk_owner=?", - Integer.class, owner.getId()) > 0; - } - - private static final String INSERT_OWNER = - "INSERT INTO " + - "owner " + - "(" + - "pk_owner," + - "pk_show," + - "str_username " + - ") " + - "VALUES (?,?,?)"; - - @Override - public void insertOwner(OwnerEntity owner, ShowInterface show) { - owner.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_OWNER, - owner.id, show.getShowId(), owner.name); - } - - @Override - public void updateShow(Entity owner, ShowInterface show) { - getJdbcTemplate().update( - "UPDATE owner SET pk_show = ? WHERE pk_owner = ?", - show.getShowId(), owner.getId()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/PointDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/PointDaoJdbc.java deleted file mode 100644 index 6ce52d8aa..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/PointDaoJdbc.java +++ /dev/null @@ -1,222 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.List; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.DepartmentInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.PointDetail; -import com.imageworks.spcue.PointInterface; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.dao.PointDao; -import com.imageworks.spcue.util.SqlUtil; - -public class PointDaoJdbc extends JdbcDaoSupport implements PointDao { - - @Override - public void insertPointConf(PointDetail t) { - t.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update( - "INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", - t.id, t.getShowId(), t.getDepartmentId()); - } - - @Override - public boolean isManaged(ShowInterface show, DepartmentInterface dept) { - try { - return getJdbcTemplate().queryForObject( - "SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", - Integer.class, show.getShowId(), dept.getDepartmentId()) == 1; - } catch (org.springframework.dao.DataRetrievalFailureException e) { - return false; - } - } - - @Override - public PointDetail insertPointConf(ShowInterface show, DepartmentInterface dept) { - PointDetail r = new PointDetail(); - r.deptId = dept.getId(); - r.showId = show.getShowId(); - r.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update( - "INSERT INTO point (pk_point, pk_show, pk_dept) VALUES (?,?,?)", - r.id, r.getShowId(), r.getDepartmentId()); - return r; - } - - @Override - public boolean pointConfExists(ShowInterface show, DepartmentInterface dept) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM point WHERE pk_show=? AND pk_dept=?", - Integer.class, show.getShowId(), dept.getDepartmentId()) > 0; - } - - private static final String UPDATE_TI_MANAGED = - "UPDATE " + - "point " + - "SET " + - "b_managed = 1,"+ - "str_ti_task=?, "+ - "int_min_cores=? " + - "WHERE " + - "pk_point=?"; - - @Override - public void updateEnableManaged(PointInterface p, String task, int coreUnits) { - getJdbcTemplate().update(UPDATE_TI_MANAGED, - task, coreUnits, p.getPointId()); - } - - private static final String UPDATE_DISABLE_TI_MANAGED = - "UPDATE " + - "point " + - "SET " + - "b_managed = 0,"+ - "str_ti_task=null, "+ - "int_min_cores=0 " + - "WHERE " + - "pk_point=?"; - - @Override - public void updateDisableManaged(PointInterface p) { - getJdbcTemplate().update(UPDATE_DISABLE_TI_MANAGED, p.getPointId()); - } - - private static final RowMapper DEPARTMENT_CONFIG_DETAIL_MAPPER = - new RowMapper() { - public PointDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - PointDetail rpd = new PointDetail(); - rpd.deptId = rs.getString("pk_dept"); - rpd.showId = rs.getString("pk_show"); - rpd.id = rs.getString("pk_point"); - rpd.cores = rs.getInt("int_min_cores"); - rpd.tiTask = rs.getString("str_ti_task"); - return rpd; - } - }; - - private static final String GET_DEPARTMENT_CONFIG_DETAIL = - "SELECT "+ - "pk_point,"+ - "pk_dept,"+ - "pk_show,"+ - "str_ti_task,"+ - "int_min_cores "+ - "FROM " + - "point " + - "WHERE " + - "pk_point = ?"; - - @Override - public PointDetail getPointConfDetail(String id) { - return getJdbcTemplate().queryForObject(GET_DEPARTMENT_CONFIG_DETAIL, - DEPARTMENT_CONFIG_DETAIL_MAPPER, id); - } - - private static final String GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT = - "SELECT "+ - "pk_point,"+ - "pk_dept,"+ - "pk_show,"+ - "str_ti_task,"+ - "int_min_cores, "+ - "b_managed " + - "FROM " + - "point " + - "WHERE " + - "pk_show = ? " + - "AND " + - "pk_dept = ? "; - - @Override - public PointDetail getPointConfigDetail(ShowInterface show, DepartmentInterface dept) { - return getJdbcTemplate().queryForObject(GET_POINT_CONFIG_DETAIL_BY_SHOW_DEPT, - DEPARTMENT_CONFIG_DETAIL_MAPPER, show.getShowId(), dept.getDepartmentId()); - } - - private static final String UPDATE_TI_MANAGED_CORES = - "UPDATE " + - "point " + - "SET " + - "int_min_cores=? " + - "WHERE " + - "pk_point=?"; - - @Override - public void updateManagedCores(PointInterface cdept, int cores) { - getJdbcTemplate().update(UPDATE_TI_MANAGED_CORES, cores, - cdept.getPointId()); - - } - - private static final String GET_MANAGED_POINT_CONFS = - "SELECT " + - "pk_point,"+ - "pk_dept,"+ - "pk_show,"+ - "str_ti_task,"+ - "int_min_cores, "+ - "b_managed " + - "FROM " + - "point " + - "WHERE " + - "b_managed = 1 "; - - @Override - public List getManagedPointConfs() { - return getJdbcTemplate().query(GET_MANAGED_POINT_CONFS, - DEPARTMENT_CONFIG_DETAIL_MAPPER); - } - - @Override - public void updatePointConfUpdateTime(PointInterface t) { - getJdbcTemplate().update( - "UPDATE point SET ts_updated=systimestamp WHERE pk_point=?", - t.getPointId()); - } - - private static final String IS_OVER_MIN_CORES = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job,"+ - "point p "+ - "WHERE " + - "job.pk_show = p.pk_show " + - "AND " + - "job.pk_dept = p.pk_dept " + - "AND " + - "p.int_cores > p.int_min_cores " + - "AND "+ - "job.pk_job = ?"; - - @Override - public boolean isOverMinCores(JobInterface job) { - return getJdbcTemplate().queryForObject(IS_OVER_MIN_CORES, - Integer.class, job.getJobId()) > 0; - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ProcDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ProcDaoJdbc.java deleted file mode 100644 index f363bbc0f..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ProcDaoJdbc.java +++ /dev/null @@ -1,904 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.math.BigDecimal; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import org.springframework.dao.DataAccessException; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.FrameInterface; -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LocalHostAssignment; -import com.imageworks.spcue.ProcInterface; -import com.imageworks.spcue.Redirect; -import com.imageworks.spcue.VirtualProc; -import com.imageworks.spcue.dao.ProcDao; -import com.imageworks.spcue.dao.criteria.FrameSearchInterface; -import com.imageworks.spcue.dao.criteria.ProcSearchInterface; -import com.imageworks.spcue.dispatcher.Dispatcher; -import com.imageworks.spcue.dispatcher.ResourceDuplicationFailureException; -import com.imageworks.spcue.dispatcher.ResourceReservationFailureException; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.util.SqlUtil; - -public class ProcDaoJdbc extends JdbcDaoSupport implements ProcDao { - - private static final String VERIFY_RUNNING_PROC = - "SELECT " + - "proc.pk_frame " + - "FROM " + - "proc, " + - "job " + - "WHERE " + - "proc.pk_job = job.pk_job " + - "AND " + - "job.str_state = 'PENDING' " + - "AND " + - "proc.pk_proc= ? "; - - public boolean verifyRunningProc(String procId, String frameId) { - try { - String pk_frame = getJdbcTemplate().queryForObject( - VERIFY_RUNNING_PROC, String.class, procId); - if (pk_frame != null) { - return pk_frame.equals(frameId); - } - else { - return false; - } - } catch (org.springframework.dao.EmptyResultDataAccessException e) { - // EAT - } - return false; - } - - private static final String DELETE_VIRTUAL_PROC = - "DELETE FROM " + - "proc " + - "WHERE " + - "pk_proc=?"; - - public boolean deleteVirtualProc(VirtualProc proc) { - if(getJdbcTemplate().update(DELETE_VIRTUAL_PROC, proc.getProcId()) == 0) { - logger.warn("failed to delete " + proc + " , proc does not exist."); - return false; - } - // update all of the resource counts. - procDestroyed(proc); - return true; - } - - private static final String INSERT_VIRTUAL_PROC = - "INSERT INTO " + - "proc " + - "( " + - "pk_proc, " + - "pk_host, " + - "pk_show, "+ - "pk_layer,"+ - "pk_job," + - "pk_frame, "+ - "int_cores_reserved, " + - "int_mem_reserved, " + - "int_mem_pre_reserved, " + - "int_mem_used, "+ - "int_gpu_reserved, " + - "b_local " + - ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?) "; - - public void insertVirtualProc(VirtualProc proc) { - proc.id = SqlUtil.genKeyRandom(); - int result = 0; - try { - result = getJdbcTemplate().update(INSERT_VIRTUAL_PROC, - proc.getProcId(), proc.getHostId(), proc.getShowId(), - proc.getLayerId(), proc.getJobId(), proc.getFrameId(), - proc.coresReserved, proc.memoryReserved, - proc.memoryReserved, Dispatcher.MEM_RESERVED_MIN, - proc.gpuReserved, proc.isLocalDispatch); - - // Update all of the resource counts - procCreated(proc); - } - catch (org.springframework.dao.DataIntegrityViolationException d) { - /* - * This means the frame is already running. If that is the - * case, don't delete it, just set pk_frame to null or - * the orphaned proc handler will catch it. - */ - throw new ResourceDuplicationFailureException("The frame " + - proc.getFrameId() + " is already assigned to a proc."); - } - catch (Exception e) { - String msg = "unable to book proc " + - proc.getName() + " on frame " + proc.getFrameId() + - " , " + e; - throw new ResourceReservationFailureException(msg,e); - } - - if (result == 0) { - String msg = "unable to book proc " + proc.id + - " the insert query succeeded but returned 0"; - throw new ResourceReservationFailureException(msg); - } - } - - private static final String UPDATE_VIRTUAL_PROC_ASSIGN = - "UPDATE " + - "proc " + - "SET " + - "pk_show=?," + - "pk_job=?,"+ - "pk_layer=?,"+ - "pk_frame=?, " + - "int_mem_used = 0,"+ - "int_mem_max_used = 0, "+ - "int_virt_used = 0,"+ - "int_virt_max_used = 0, "+ - "ts_dispatched=systimestamp " + - "WHERE " + - "pk_proc = ?"; - - public void updateVirtualProcAssignment(VirtualProc proc) { - - int result = 0; - try { - result = getJdbcTemplate().update( - UPDATE_VIRTUAL_PROC_ASSIGN, - proc.getShowId(), proc.getJobId(), proc.getLayerId(), - proc.getFrameId(), proc.getProcId()); - } - catch (org.springframework.dao.DataIntegrityViolationException d) { - throw new ResourceDuplicationFailureException("The frame " + - proc.getFrameId() + " is already assigned to " + - "the proc " + proc); - } - catch (Exception e) { - String msg = "unable to book proc " + - proc.id + ", " + e; - throw new ResourceReservationFailureException(msg, e); - } - - /* - * If the proc was not updated then it has disappeared. - */ - if (result == 0) { - String msg = "unable to book proc " + - proc.id + ", the proc no longer exists,"; - throw new ResourceReservationFailureException(msg); - } - } - - private static final String CLEAR_VIRTUAL_PROC_ASSIGN = - "UPDATE " + - "proc " + - "SET " + - "pk_frame = NULL " + - "WHERE " + - "pk_proc = ?"; - - public boolean clearVirtualProcAssignment(ProcInterface proc) { - return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN, - proc.getId()) == 1; - } - - private static final String CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME = - "UPDATE " + - "proc " + - "SET " + - "pk_frame = NULL " + - "WHERE " + - "pk_frame = ?"; - - public boolean clearVirtualProcAssignment(FrameInterface frame) { - return getJdbcTemplate().update(CLEAR_VIRTUAL_PROC_ASSIGN_BY_FRAME, - frame.getFrameId()) == 1; - } - - private static final String UPDATE_PROC_MEMORY_USAGE = - "UPDATE " + - "proc " + - "SET " + - "int_mem_used = ?, " + - "int_mem_max_used = ?," + - "int_virt_used = ?, " + - "int_virt_max_used = ?, "+ - "ts_ping = systimestamp " + - "WHERE " + - "pk_frame=?"; - - @Override - public void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, - long vss, long maxVss) { - /* - * This method is going to repeat for a proc every 1 minute, so - * if the proc is being touched by another thread, then return - * quietly without updating memory usage. - * - * If another thread is accessing the proc record, that means - * the proc is probably being booked to another frame, which - * makes this update invalid anyway. - */ - try { - if (getJdbcTemplate().queryForObject( - "SELECT pk_frame FROM proc WHERE pk_frame=? FOR UPDATE", - String.class, f.getFrameId()).equals(f.getFrameId())) { - - getJdbcTemplate().update(UPDATE_PROC_MEMORY_USAGE, - rss, maxRss, vss, maxVss, f.getFrameId()); - } - } - catch (DataAccessException dae) { - logger.info("The proc for frame " + f + - " could not be updated with new memory stats: " + dae); - } - } - - /** - * Maps a row to a VirtualProc object. - */ - public static final RowMapper VIRTUAL_PROC_MAPPER = - new RowMapper() { - public VirtualProc mapRow(ResultSet rs, int rowNum) throws SQLException { - VirtualProc proc = new VirtualProc(); - proc.id = rs.getString("pk_proc"); - proc.hostId = rs.getString("pk_host"); - proc.showId = rs.getString("pk_show"); - proc.jobId= rs.getString("pk_job"); - proc.layerId = rs.getString("pk_layer"); - proc.frameId = rs.getString("pk_frame"); - proc.hostName = rs.getString("host_name"); - proc.allocationId = rs.getString("pk_alloc"); - proc.facilityId = rs.getString("pk_facility"); - proc.coresReserved =rs.getInt("int_cores_reserved"); - proc.memoryReserved = rs.getInt("int_mem_reserved"); - proc.memoryMax = rs.getInt("int_mem_max_used"); - proc.gpuReserved = rs.getInt("int_gpu_reserved"); - proc.virtualMemoryMax = rs.getLong("int_virt_max_used"); - proc.virtualMemoryUsed = rs.getLong("int_virt_used"); - proc.memoryUsed = rs.getInt("int_mem_used"); - proc.unbooked = rs.getBoolean("b_unbooked"); - proc.isLocalDispatch = rs.getBoolean("b_local"); - proc.os = rs.getString("str_os"); - return proc; - } - }; - - private static final String GET_VIRTUAL_PROC = - "SELECT " + - "proc.pk_proc," + - "proc.pk_host,"+ - "proc.pk_show,"+ - "proc.pk_job,"+ - "proc.pk_layer,"+ - "proc.pk_frame,"+ - "proc.b_unbooked,"+ - "proc.b_local,"+ - "host.pk_alloc, " + - "alloc.pk_facility,"+ - "proc.int_cores_reserved,"+ - "proc.int_mem_reserved,"+ - "proc.int_mem_max_used,"+ - "proc.int_mem_used,"+ - "proc.int_gpu_reserved,"+ - "proc.int_virt_max_used,"+ - "proc.int_virt_used,"+ - "host.str_name AS host_name, " + - "host_stat.str_os " + - "FROM " + - "proc," + - "host, " + - "host_stat, " + - "alloc " + - "WHERE " + - "proc.pk_host = host.pk_host " + - "AND " + - "host.pk_host = host_stat.pk_host " + - "AND " + - "host.pk_alloc = alloc.pk_alloc "; - - public VirtualProc getVirtualProc(String id) { - return getJdbcTemplate().queryForObject( - GET_VIRTUAL_PROC + " AND proc.pk_proc=? ", - VIRTUAL_PROC_MAPPER, id); - } - - public VirtualProc findVirtualProc(FrameInterface frame) { - return getJdbcTemplate().queryForObject( - GET_VIRTUAL_PROC + " AND proc.pk_frame=? ", - VIRTUAL_PROC_MAPPER, frame.getFrameId()); - } - - private static final String GET_VIRTUAL_PROC_LIST = - "SELECT " + - "proc.*, " + - "host.str_name AS host_name, " + - "host.pk_alloc, " + - "host_stat.str_os, " + - "alloc.pk_facility "+ - "FROM " + - "proc, " + - "frame, " + - "host," + - "host_stat, "+ - "alloc, " + - "layer," + - "job, " + - "folder, " + - "show "+ - "WHERE " + - "proc.pk_show = show.pk_show " + - "AND " + - "proc.pk_host = host.pk_host " + - "AND " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "host.pk_host = host_stat.pk_host " + - "AND " + - "proc.pk_job = job.pk_job " + - "AND " + - "proc.pk_layer = layer.pk_layer " + - "AND " + - "proc.pk_frame = frame.pk_frame " + - "AND " + - "job.pk_folder = folder.pk_folder "; - - public List findVirtualProcs(ProcSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), - VIRTUAL_PROC_MAPPER, r.getValuesArray()); - } - - @Override - public List findBookedVirtualProcs(ProcSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST + - "AND proc.b_unbooked = 0"), VIRTUAL_PROC_MAPPER, r.getValuesArray()); - } - - public List findVirtualProcs(FrameSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_VIRTUAL_PROC_LIST), - VIRTUAL_PROC_MAPPER, r.getValuesArray()); - } - - public List findVirtualProcs(HostInterface host) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_host=?", - VIRTUAL_PROC_MAPPER, host.getHostId()); - } - - public List findVirtualProcs(LayerInterface layer) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_layer=?", - VIRTUAL_PROC_MAPPER, layer.getLayerId()); - } - - public List findVirtualProcs(JobInterface job) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND proc.pk_job=?", - VIRTUAL_PROC_MAPPER, job.getJobId()); - } - - private static final String FIND_VIRTUAL_PROCS_LJA = - GET_VIRTUAL_PROC_LIST + - "AND proc.pk_job=( " + - "SELECT pk_job FROM host_local WHERE pk_host_local = ?) " + - "AND proc.pk_host=(" + - "SELECT pk_host FROM host_local WHERE pk_host_local = ?) "; - - @Override - public List findVirtualProcs(LocalHostAssignment l) { - return getJdbcTemplate().query( - FIND_VIRTUAL_PROCS_LJA, - VIRTUAL_PROC_MAPPER, - l.getId(), - l.getId()); - } - - public List findVirtualProcs(HardwareState state) { - return getJdbcTemplate().query(GET_VIRTUAL_PROC_LIST + " AND host_stat.str_state=?", - VIRTUAL_PROC_MAPPER, state.toString()); - } - - public void unbookVirtualProcs(List procs) { - List batchArgs = new ArrayList(procs.size()); - for (VirtualProc proc: procs) { - batchArgs.add(new Object[] { proc.id }); - } - - getJdbcTemplate().batchUpdate( - "UPDATE proc SET b_unbooked=1 WHERE pk_proc=?", batchArgs); - } - - @Override - public boolean setUnbookState(ProcInterface proc, boolean unbooked) { - return getJdbcTemplate().update( - "UPDATE proc SET b_unbooked=? WHERE pk_proc=?", - unbooked, proc.getProcId()) == 1; - } - - @Override - public boolean setRedirectTarget(ProcInterface p, Redirect r) { - String name = null; - boolean unbooked = false; - if (r != null) { - name = r.getDestinationName(); - unbooked = true; - } - return getJdbcTemplate().update( - "UPDATE proc SET str_redirect=?, b_unbooked=? WHERE pk_proc=?", - name, unbooked, p.getProcId()) == 1; - } - - public void unbookProc(ProcInterface proc) { - getJdbcTemplate().update("UPDATE proc SET b_unbooked=1 WHERE pk_proc=?", - proc.getProcId()); - } - - public String getCurrentShowId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_show FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } - - public String getCurrentJobId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_job FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } - - public String getCurrentLayerId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_layer FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } - - public String getCurrentFrameId(ProcInterface p) { - return getJdbcTemplate().queryForObject("SELECT pk_frame FROM proc WHERE pk_proc=?", - String.class, p.getProcId()); - } - - private static final String ORPHANED_PROC_INTERVAL = "interval '300' second"; - private static final String GET_ORPHANED_PROC_LIST = - "SELECT " + - "proc.*, " + - "host.str_name AS host_name, " + - "host_stat.str_os, " + - "host.pk_alloc, " + - "alloc.pk_facility " + - "FROM " + - "proc, " + - "host, " + - "host_stat,"+ - "alloc " + - "WHERE " + - "proc.pk_host = host.pk_host " + - "AND " + - "host.pk_host = host_stat.pk_host " + - "AND " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "systimestamp - proc.ts_ping > " + ORPHANED_PROC_INTERVAL; - - public List findOrphanedVirtualProcs() { - return getJdbcTemplate().query(GET_ORPHANED_PROC_LIST, VIRTUAL_PROC_MAPPER); - } - - public List findOrphanedVirtualProcs(int limit) { - return getJdbcTemplate().query( - GET_ORPHANED_PROC_LIST + " AND rownum <= " + limit, - VIRTUAL_PROC_MAPPER); - } - - private static final String IS_ORPHAN = - "SELECT " + - "COUNT(1) " + - "FROM " + - "proc " + - "WHERE " + - "proc.pk_proc = ? " + - "AND " + - "systimestamp - proc.ts_ping > " + ORPHANED_PROC_INTERVAL; - - @Override - public boolean isOrphan(ProcInterface proc) { - return getJdbcTemplate().queryForObject(IS_ORPHAN, - Integer.class, proc.getProcId()) == 1; - } - - - public boolean increaseReservedMemory(ProcInterface p, long value) { - try { - return getJdbcTemplate().update("UPDATE proc SET int_mem_reserved=? WHERE pk_proc=? AND int_mem_reserved < ?", - value, p.getProcId(), value) == 1; - } catch (Exception e) { - // check by trigger erify_host_resources - throw new ResourceReservationFailureException("failed to increase memory reserveration for proc " - + p.getProcId() + " to " + value + ", proc does not have that much memory to spare."); - } - } - - private static final String FIND_WORST_MEMORY_OFFENDER = - "SELECT " + - "pk_proc, " + - "pk_host, " + - "pk_show, "+ - "pk_job, "+ - "pk_layer,"+ - "pk_frame,"+ - "b_unbooked,"+ - "b_local, "+ - "pk_alloc, "+ - "pk_facility, " + - "int_cores_reserved,"+ - "int_mem_reserved," + - "int_mem_max_used,"+ - "int_mem_used,"+ - "int_gpu_reserved," + - "int_virt_max_used,"+ - "int_virt_used,"+ - "host_name, " + - "str_os " + - "FROM (" - + GET_VIRTUAL_PROC + " " + - "AND " + - "host.pk_host =? " + - "AND " + - "proc.int_mem_reserved != 0 " + - "ORDER BY " + - "proc.int_virt_used / proc.int_mem_pre_reserved DESC ) " + - "WHERE " + - "ROWNUM = 1"; - - @Override - public VirtualProc getWorstMemoryOffender(HostInterface host) { - return getJdbcTemplate().queryForObject(FIND_WORST_MEMORY_OFFENDER, - VIRTUAL_PROC_MAPPER, host.getHostId()); - } - - public long getReservedMemory(ProcInterface proc) { - return getJdbcTemplate().queryForObject( - "SELECT int_mem_reserved FROM proc WHERE pk_proc=?", - Long.class, proc.getProcId()); - } - - public long getReservedGpu(ProcInterface proc) { - return getJdbcTemplate().queryForObject( - "SELECT int_gpu_reserved FROM proc WHERE pk_proc=?", - Long.class, proc.getProcId()); - } - - private static final String FIND_UNDERUTILIZED_PROCS = - "SELECT " + - "proc.pk_proc," + - "proc.int_mem_reserved - layer_mem.int_max_rss AS free_mem " + - "FROM " + - "proc," + - "host, " + - "layer_mem " + - "WHERE " + - "proc.pk_host = host.pk_host " + - "AND " + - "proc.pk_layer = layer_mem.pk_layer " + - "AND " + - "layer_mem.int_max_rss > 0 " + - "AND " + - "host.pk_host = ? " + - "AND " + - "proc.pk_proc != ? " + - "AND " + - "proc.int_mem_reserved - layer_mem.int_max_rss > 0"; - - public boolean balanceUnderUtilizedProcs(ProcInterface targetProc, long targetMem) { - - List> result = getJdbcTemplate().queryForList(FIND_UNDERUTILIZED_PROCS, - targetProc.getHostId(), targetProc.getProcId()); - - if (result.size() == 0) { - logger.info("unable to find under utilized procs on host " + targetProc.getName()); - return false; - } - - final Map borrowMap = new HashMap(result.size()); - for (Map map: result) { - logger.info("creating borrow map for: " + (String) map.get("pk_proc")); - borrowMap.put((String) map.get("pk_proc"), 0l); - } - - long memBorrowedTotal = 0l; - int pass = 0; - int maxPasses = 3; - - while(true) { - // the amount of memory we're going to borrow per frame/proc - long memPerFrame = ((targetMem - memBorrowedTotal) / result.size()) + 1; - - // loop through all of our other running frames and try to borrow - // a little bit of memory from each one. - for (Map map: result) { - String pk_proc = (String) map.get("pk_proc"); - BigDecimal free_mem = (BigDecimal) map.get("free_mem"); - long available = free_mem.longValueExact() - borrowMap.get(pk_proc) - Dispatcher.MEM_RESERVED_MIN; - if (available > memPerFrame) { - borrowMap.put(pk_proc, borrowMap.get(pk_proc) + memPerFrame); - memBorrowedTotal = memBorrowedTotal + memPerFrame; - } - } - pass++; - - // If we were unable to borrow anything, just break - if (memBorrowedTotal == 0) { break; } - // If we got the memory we needed, break - if (memBorrowedTotal >= targetMem) { break; } - // If we've exceeded the number of tries in this loop, break - if (pass >= maxPasses) { break; } - } - - logger.info("attempted to borrow " + targetMem + " for host " - + targetProc.getName() + ", obtained " + memBorrowedTotal); - - if (memBorrowedTotal < targetMem) { - logger.warn("mem borrowed " + memBorrowedTotal + - " was less than the target memory of " + targetMem); - return false; - } - - /* - * This might fail... I'm not really sure if we should - * fail the whole operation or what. Just gonna let it ride for now. - */ - for (Map.Entry set: borrowMap.entrySet()) { - int success = getJdbcTemplate().update( - "UPDATE proc SET int_mem_reserved = int_mem_reserved - ? WHERE pk_proc=?", - set.getValue(), set.getKey()); - logger.info("transfering " + (set.getValue() * success) + " from " + set.getKey()); - } - - return true; - } - - public void updateReservedMemory(ProcInterface p, long value) { - getJdbcTemplate().update("UPDATE proc SET int_mem_reserved=? WHERE pk_proc=?", - value, p.getProcId()); - } - - /** - * Updates proc counts for the host, subscription, - * layer, job, folder, and proc point when a proc - * is destroyed. - * - * @param proc - */ - private void procDestroyed(VirtualProc proc) { - - - getJdbcTemplate().update( - "UPDATE " + - "host " + - "SET " + - "int_cores_idle = int_cores_idle + ?," + - "int_mem_idle = int_mem_idle + ?, " + - "int_gpu_idle = int_gpu_idle + ? " + - "WHERE " + - "pk_host = ?", - proc.coresReserved, proc.memoryReserved, proc.gpuReserved, proc.getHostId()); - - if (!proc.isLocalDispatch) { - getJdbcTemplate().update( - "UPDATE " + - "subscription " + - "SET " + - "int_cores = int_cores - ? " + - "WHERE " + - "pk_show = ? " + - "AND " + - "pk_alloc = ?", - proc.coresReserved, proc.getShowId(), - proc.getAllocationId()); - } - - getJdbcTemplate().update( - "UPDATE " + - "layer_resource " + - "SET " + - "int_cores = int_cores - ? " + - "WHERE " + - "pk_layer = ?", - proc.coresReserved, proc.getLayerId()); - - if (!proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + - "job_resource " + - "SET " + - "int_cores = int_cores - ? " + - "WHERE " + - "pk_job = ?", - proc.coresReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "folder_resource " + - "SET " + - "int_cores = int_cores - ? " + - "WHERE " + - "pk_folder = " + - "(SELECT pk_folder FROM job WHERE pk_job=?)", - proc.coresReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "point " + - "SET " + - "int_cores = int_cores - ? " + - "WHERE " + - "pk_dept = " + - "(SELECT pk_dept FROM job WHERE pk_job=?) " + - "AND " + - "pk_show = " + - "(SELECT pk_show FROM job WHERE pk_job=?) ", - proc.coresReserved, proc.getJobId(), proc.getJobId()); - } - - if (proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + - "job_resource " + - "SET " + - "int_local_cores = int_local_cores - ? " + - "WHERE " + - "pk_job = ?", - proc.coresReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "host_local " + - "SET " + - "int_cores_idle = int_cores_idle + ?, " + - "int_mem_idle = int_mem_idle + ?, " + - "int_gpu_idle = int_gpu_idle + ? " + - "WHERE " + - "pk_job = ? " + - "AND " + - "pk_host = ? ", - proc.coresReserved, - proc.memoryReserved, - proc.gpuReserved, - proc.getJobId(), - proc.getHostId()); - } - } - - /** - * Updates proc counts for the host, subscription, - * layer, job, folder, and proc point when a new - * proc is created. - * - * @param proc - */ - private void procCreated(VirtualProc proc) { - - getJdbcTemplate().update( - "UPDATE " + - "host " + - "SET " + - "int_cores_idle = int_cores_idle - ?," + - "int_mem_idle = int_mem_idle - ?, " + - "int_gpu_idle = int_gpu_idle - ? " + - "WHERE " + - "pk_host = ?", - proc.coresReserved, proc.memoryReserved, proc.gpuReserved, proc.getHostId()); - - - /** - * Not keeping track of local cores this way. - */ - - if (!proc.isLocalDispatch) { - getJdbcTemplate().update( - "UPDATE " + - "subscription " + - "SET " + - "int_cores = int_cores + ? " + - "WHERE " + - "pk_show = ? " + - "AND " + - "pk_alloc = ?", - proc.coresReserved, proc.getShowId(), - proc.getAllocationId()); - } - - getJdbcTemplate().update( - "UPDATE " + - "layer_resource " + - "SET " + - "int_cores = int_cores + ? " + - "WHERE " + - "pk_layer = ?", - proc.coresReserved, proc.getLayerId()); - - if (!proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + - "job_resource " + - "SET " + - "int_cores = int_cores + ? " + - "WHERE " + - "pk_job = ?", - proc.coresReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "folder_resource " + - "SET " + - "int_cores = int_cores + ? " + - "WHERE " + - "pk_folder = " + - "(SELECT pk_folder FROM job WHERE pk_job=?)", - proc.coresReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "point " + - "SET " + - "int_cores = int_cores + ? " + - "WHERE " + - "pk_dept = " + - "(SELECT pk_dept FROM job WHERE pk_job=?) " + - "AND " + - "pk_show = " + - "(SELECT pk_show FROM job WHERE pk_job=?) ", - proc.coresReserved, proc.getJobId(), proc.getJobId()); - } - - if (proc.isLocalDispatch) { - - getJdbcTemplate().update( - "UPDATE " + - "job_resource " + - "SET " + - "int_local_cores = int_local_cores + ? " + - "WHERE " + - "pk_job = ?", - proc.coresReserved, proc.getJobId()); - - getJdbcTemplate().update( - "UPDATE " + - "host_local " + - "SET " + - "int_cores_idle = int_cores_idle - ?, " + - "int_mem_idle = int_mem_idle - ? " + - "WHERE " + - "pk_job = ? " + - "AND " + - "pk_host = ?", - proc.coresReserved, - proc.memoryReserved, - proc.getJobId(), - proc.getHostId()); - } - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/RedirectDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/RedirectDaoJdbc.java deleted file mode 100644 index d5b62b877..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/RedirectDaoJdbc.java +++ /dev/null @@ -1,119 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.springframework.dao.EmptyResultDataAccessException; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.Redirect; -import com.imageworks.spcue.dao.RedirectDao; -import com.imageworks.spcue.grpc.host.RedirectType; - -public class RedirectDaoJdbc extends JdbcDaoSupport implements RedirectDao { - @Override - public boolean containsKey(String key) { - return getJdbcTemplate().queryForObject( - "SELECT count(1) FROM redirect WHERE pk_proc = ?", - Integer.class, - key) > 0; - } - - @Override - public int countRedirectsWithGroup(String groupId) { - return getJdbcTemplate().queryForObject( - "SELECT count(1) FROM redirect WHERE str_group_id = ?", - Integer.class, - groupId); - } - - @Override - public int deleteExpired() { - long cutoff = System.currentTimeMillis() - Redirect.EXPIRE_TIME; - return getJdbcTemplate().update( - "DELETE FROM redirect WHERE lng_creation_time < ?", - cutoff); - } - - @Override - public void put(String key, Redirect r) { - getJdbcTemplate().update( - "MERGE INTO redirect D " - + "USING (SELECT 1 FROM dual) S " - + "ON (D.pk_proc = ?) " - + "WHEN MATCHED THEN UPDATE SET " - + " D.str_group_id = ?, " - + " D.int_type = ?, " - + " D.str_destination_id = ?, " - + " D.str_name = ?, " - + " D.lng_creation_time = ? " - + "WHEN NOT MATCHED THEN INSERT (D.pk_proc, D.str_group_id, D.int_type, D.str_destination_id, D.str_name, D.lng_creation_time) " - + " VALUES ( ?, ?, ?, ?, ?, ?)", - key, - - r.getGroupId(), - r.getType().getNumber(), - r.getDestinationId(), - r.getDestinationName(), - r.getCreationTime(), - - key, - r.getGroupId(), - r.getType().getNumber(), - r.getDestinationId(), - r.getDestinationName(), - r.getCreationTime()); - } - - @Override - public Redirect remove(String key) { - Redirect r = null; - try { - r = getJdbcTemplate().queryForObject( - "SELECT str_group_id, int_type, str_destination_id, str_name, lng_creation_time " - + "FROM redirect " - + "WHERE pk_proc = ? " - + "FOR UPDATE", - new RowMapper() { - @Override - public Redirect mapRow(ResultSet rs, int rowNum) throws SQLException { - return new Redirect( - rs.getString("str_group_id"), - RedirectType.forNumber(rs.getInt("int_type")), - rs.getString("str_destination_id"), - rs.getString("str_name"), - rs.getLong("lng_creation_time")); - } - }, - key); - } - catch (EmptyResultDataAccessException e) { - return null; - } - - getJdbcTemplate().update( - "DELETE FROM redirect WHERE pk_proc = ?", - key); - - return r; - } -} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ServiceDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ServiceDaoJdbc.java deleted file mode 100644 index 8b9225636..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ServiceDaoJdbc.java +++ /dev/null @@ -1,256 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.LinkedHashSet; - -import com.google.common.collect.Sets; -import org.apache.commons.lang.StringUtils; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.ServiceEntity; -import com.imageworks.spcue.ServiceOverrideEntity; -import com.imageworks.spcue.dao.ServiceDao; -import com.imageworks.spcue.util.SqlUtil; - -public class ServiceDaoJdbc extends JdbcDaoSupport implements ServiceDao { - - private static final String SPLITTER = " \\| "; - - private static final String JOINER = " | "; - - public static LinkedHashSet splitTags(String tags) { - LinkedHashSet set = Sets.newLinkedHashSet(); - for(String s: tags.split(SPLITTER)) { - set.add(s.replaceAll(" ", "")); - } - return set; - } - - public static String joinTags(LinkedHashSet tags) { - return StringUtils.join(tags, JOINER); - } - - public static final RowMapper SERVICE_MAPPER = - new RowMapper() { - public ServiceEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ServiceEntity s = new ServiceEntity(); - s.id = rs.getString("pk_service"); - s.name = rs.getString("str_name"); - s.minCores = rs.getInt("int_cores_min"); - s.maxCores = rs.getInt("int_cores_max"); - s.minMemory = rs.getLong("int_mem_min"); - s.minGpu = rs.getLong("int_gpu_min"); - s.threadable = rs.getBoolean("b_threadable"); - s.tags = splitTags(rs.getString("str_tags")); - return s; - } - }; - - public static final RowMapper SERVICE_OVERRIDE_MAPPER = - new RowMapper() { - public ServiceOverrideEntity mapRow(ResultSet rs, int rowNum) - throws SQLException { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.id = rs.getString("pk_show_service"); - s.name = rs.getString("str_name"); - s.minCores = rs.getInt("int_cores_min"); - s.maxCores = rs.getInt("int_cores_max"); - s.minMemory = rs.getLong("int_mem_min"); - s.minGpu = rs.getLong("int_gpu_min"); - s.threadable = rs.getBoolean("b_threadable"); - s.tags = splitTags(rs.getString("str_tags")); - s.showId = rs.getString("pk_show"); - return s; - } - }; - - private static final String QUERY_FOR_SERVICE = - "SELECT " + - "service.pk_service," + - "service.str_name," + - "service.b_threadable," + - "service.int_cores_min," + - "service.int_cores_max," + - "service.int_mem_min," + - "service.int_gpu_min," + - "service.str_tags " + - "FROM " + - "service "; - - @Override - public ServiceEntity get(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_SERVICE + " WHERE (pk_service=? OR str_name=?)", - SERVICE_MAPPER, id, id); - } - - private static final String QUERY_FOR_SERVICE_OVER = - "SELECT " + - "show_service.pk_show_service," + - "show_service.str_name," + - "show_service.b_threadable," + - "show_service.int_cores_min," + - "show_service.int_cores_max, "+ - "show_service.int_mem_min," + - "show_service.int_gpu_min," + - "show_service.str_tags, " + - "show.pk_show " + - "FROM " + - "show_service," + - "show " + - "WHERE " + - "show_service.pk_show = show.pk_show "; - - @Override - public ServiceOverrideEntity getOverride(String id, String show) { - return getJdbcTemplate() - .queryForObject( - QUERY_FOR_SERVICE_OVER - + " AND (show_service.pk_show_service=? OR show_service.str_name=?)" - + " AND (show.str_name=? OR show.pk_show=?)", - SERVICE_OVERRIDE_MAPPER, id, id, show, show); - } - - @Override - public ServiceOverrideEntity getOverride(String id) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_SERVICE_OVER + " AND (show_service.pk_show_service=? " + - "OR show_service.str_name=?)", - SERVICE_OVERRIDE_MAPPER, id, id); - } - - @Override - public boolean isOverridden(String service, String show) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM show_service, show WHERE " - + "show_service.pk_show = show.pk_show = ? " - + "AND show_service.str_name=? and show.str_name=?", - Integer.class, service, show) > 0; - } - - private static final String INSERT_SERVICE = - "INSERT INTO " + - "service " + - "(" + - "pk_service," + - "str_name," + - "b_threadable," + - "int_cores_min," + - "int_cores_max, "+ - "int_mem_min," + - "int_gpu_min," + - "str_tags" + - ") VALUES (?,?,?,?,?,?,?,?)"; - - @Override - public void insert(ServiceEntity service) { - service.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SERVICE, service.id, - service.name, service.threadable, service.minCores, - service.maxCores, service.minMemory, service.minGpu, - StringUtils.join(service.tags.toArray(), " | ")); - } - - private static final String INSERT_SERVICE_WITH_SHOW = - "INSERT INTO " + - "show_service " + - "(" + - "pk_show_service," + - "pk_show, " + - "str_name," + - "b_threadable," + - "int_cores_min," + - "int_cores_max," + - "int_mem_min," + - "int_gpu_min," + - "str_tags " + - ") VALUES (?,?,?,?,?,?,?,?,?)"; - - @Override - public void insert(ServiceOverrideEntity service) { - service.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SERVICE_WITH_SHOW, service.id, - service.showId, service.name, service.threadable, - service.minCores, service.maxCores, service.minMemory, - service.minGpu, joinTags(service.tags)); - } - - private static final String UPDATE_SERVICE = - "UPDATE " + - "service " + - "SET " + - "str_name=?," + - "b_threadable=?," + - "int_cores_min=?," + - "int_cores_max=?,"+ - "int_mem_min=?," + - "int_gpu_min=?," + - "str_tags=? " + - "WHERE " + - "pk_service = ?"; - - @Override - public void update(ServiceEntity service) { - getJdbcTemplate().update(UPDATE_SERVICE, service.name, - service.threadable, service.minCores, service.maxCores, - service.minMemory, service.minGpu, joinTags(service.tags), - service.getId()); - } - - private static final String UPDATE_SERVICE_WITH_SHOW = - "UPDATE " + - "show_service " + - "SET " + - "str_name=?," + - "b_threadable=?," + - "int_cores_min=?," + - "int_cores_max=?," + - "int_mem_min=?," + - "int_gpu_min=?," + - "str_tags=? " + - "WHERE " + - "pk_show_service = ?"; - - @Override - public void update(ServiceOverrideEntity service) { - getJdbcTemplate().update(UPDATE_SERVICE_WITH_SHOW, service.name, - service.threadable, service.minCores, service.maxCores, - service.minMemory, service.minGpu, joinTags(service.tags), - service.getId()); - } - - @Override - public void delete(ServiceEntity service) { - getJdbcTemplate().update( - "DELETE FROM service WHERE pk_service=?", service.getId()); - } - - @Override - public void delete(ServiceOverrideEntity service) { - getJdbcTemplate().update( - "DELETE FROM show_service WHERE pk_show_service=?", - service.getId()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ShowDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ShowDaoJdbc.java deleted file mode 100644 index 0d6765e51..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/ShowDaoJdbc.java +++ /dev/null @@ -1,221 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.apache.commons.lang.StringUtils; -import org.springframework.dao.DataAccessException; -import org.springframework.dao.EmptyResultDataAccessException; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.ShowEntity; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.util.SqlUtil; - -public class ShowDaoJdbc extends JdbcDaoSupport implements ShowDao { - - private static final RowMapper SHOW_MAPPER = - new RowMapper() { - public ShowEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - ShowEntity show = new ShowEntity(); - show.name = rs.getString("str_name"); - show.id = rs.getString("pk_show"); - show.defaultMaxCores = rs.getInt("int_default_max_cores"); - show.defaultMinCores = rs.getInt("int_default_min_cores"); - show.active = rs.getBoolean("b_active"); - - if (rs.getString("str_comment_email") != null) { - show.commentMail = rs.getString("str_comment_email").split(","); - } - else { - show.commentMail = new String[0]; - } - return show; - } - }; - - private static final String GET_SHOW = - "SELECT " + - "show.pk_show, " + - "show.int_default_max_cores, " + - "show.int_default_min_cores, " + - "show.str_name, " + - "show.b_active, " + - "show.str_comment_email " + - "FROM " + - "show "; - - private static final String GET_SHOW_BY_ALIAS = - "SELECT " + - "show.pk_show, " + - "show.int_default_max_cores, " + - "show.int_default_min_cores, " + - "show_alias.str_name, " + - "show.b_active, " + - "show.str_comment_email " + - "FROM " + - "show, " + - "show_alias " + - "WHERE " + - "show.pk_show = show_alias.pk_show " ; - - public ShowEntity findShowDetail(String name) { - try { - return getJdbcTemplate().queryForObject(GET_SHOW + "WHERE show.str_name=?", - SHOW_MAPPER, name); - } catch (EmptyResultDataAccessException e) { - return getJdbcTemplate().queryForObject(GET_SHOW_BY_ALIAS + "AND show_alias.str_name = ?", - SHOW_MAPPER, name); - } - } - - public ShowEntity getShowDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_SHOW + "WHERE show.pk_show=?", SHOW_MAPPER, id); - } - - private static final String GET_PREFERRED_SHOW = - "SELECT " + - "show.pk_show, " + - "show.int_default_max_cores, " + - "show.int_default_min_cores, " + - "show.str_name, " + - "show.b_active, " + - "show.str_comment_email " + - "FROM " + - "show, "+ - "owner,"+ - "deed " + - "WHERE " + - "show.pk_show = owner.pk_show " + - "AND " + - "deed.pk_owner = owner.pk_owner " + - "AND " + - "deed.pk_host = ?"; - - public ShowEntity getShowDetail(HostInterface host) { - return getJdbcTemplate().queryForObject( - GET_PREFERRED_SHOW, SHOW_MAPPER, host.getHostId()); - } - - private static final String INSERT_SHOW = - "INSERT INTO show (pk_show,str_name) VALUES (?,?)"; - - public void insertShow(ShowEntity show) { - show.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SHOW, show.id, show.name); - } - - private static final String SHOW_EXISTS = - "SELECT " + - "COUNT(show.pk_show) " + - "FROM " + - "show LEFT JOIN show_alias ON (show.pk_show = show_alias.pk_show )" + - "WHERE " + - "(show.str_name = ? OR show_alias.str_name = ?) "; - public boolean showExists(String name) { - try { - return getJdbcTemplate().queryForObject(SHOW_EXISTS, - Integer.class, name, name) >= 1; - } catch (DataAccessException e) { - return false; - } - } - - @Override - public void delete(ShowInterface s) { - getJdbcTemplate().update("DELETE FROM point WHERE pk_show=?", - s.getShowId()); - getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", - s.getShowId()); - getJdbcTemplate().update("DELETE FROM folder WHERE pk_show=?", - s.getShowId()); - getJdbcTemplate().update("DELETE FROM show_alias WHERE pk_show=?", - s.getShowId()); - getJdbcTemplate().update("DELETE FROM show WHERE pk_show=?", - s.getShowId()); - } - - public void updateShowDefaultMinCores(ShowInterface s, int val) { - if (val < 0) { - String msg = "Invalid argument, default min cores " + val + - "must be greater tham 0"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update( - "UPDATE show SET int_default_min_cores=? WHERE pk_show=?", - val, s.getShowId()); - } - - public void updateShowDefaultMaxCores(ShowInterface s, int val) { - if (val < 0) { - String msg = "Invalid argument, default max cores " + val + - "must be greater tham 0"; - throw new IllegalArgumentException(msg); - } - getJdbcTemplate().update( - "UPDATE show SET int_default_max_cores=? WHERE pk_show=?", - val, s.getShowId()); - } - - @Override - public void updateBookingEnabled(ShowInterface s, boolean enabled) { - getJdbcTemplate().update( - "UPDATE show SET b_booking_enabled = ? WHERE pk_show=?", - enabled, s.getShowId()); - } - - @Override - public void updateDispatchingEnabled(ShowInterface s, boolean enabled) { - getJdbcTemplate().update( - "UPDATE show SET b_dispatch_enabled = ? WHERE pk_show=?", - enabled, s.getShowId()); - } - - @Override - public void updateActive(ShowInterface s, boolean enabled) { - getJdbcTemplate().update( - "UPDATE show SET b_active= ? WHERE pk_show=?", - enabled, s.getShowId()); - } - - @Override - public void updateShowCommentEmail(ShowInterface s, String[] email) { - getJdbcTemplate().update( - "UPDATE show SET str_comment_email = ? WHERE pk_show=?", - StringUtils.join(email, ","), s.getShowId()); - } - - @Override - public void updateFrameCounters(ShowInterface s, int exitStatus) { - String col = "int_frame_success_count = int_frame_success_count + 1"; - if (exitStatus > 0) { - col = "int_frame_fail_count = int_frame_fail_count + 1"; - } - getJdbcTemplate().update( - "UPDATE show SET " + col + " WHERE pk_show=?", s.getShowId()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/SubscriptionDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/SubscriptionDaoJdbc.java deleted file mode 100644 index 55548ae04..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/SubscriptionDaoJdbc.java +++ /dev/null @@ -1,229 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.springframework.dao.DataAccessException; -import org.springframework.dao.EmptyResultDataAccessException; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.AllocationInterface; -import com.imageworks.spcue.EntityModificationError; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.SubscriptionEntity; -import com.imageworks.spcue.SubscriptionInterface; -import com.imageworks.spcue.VirtualProc; -import com.imageworks.spcue.dao.SubscriptionDao; -import com.imageworks.spcue.util.SqlUtil; - -public class SubscriptionDaoJdbc extends JdbcDaoSupport implements SubscriptionDao { - - private static final String IS_SHOW_OVER_SIZE = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_show = ? " + - "AND " + - "s.pk_alloc = ? " + - "AND " + - "s.int_cores > s.int_size "; - - public boolean isShowOverSize(ShowInterface show, AllocationInterface alloc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, - Integer.class, show.getShowId(), alloc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return false; - } - } - - public boolean isShowOverSize(VirtualProc proc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_OVER_SIZE, - Integer.class, proc.getShowId(), proc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return false; - } - } - - private static final String IS_SHOW_AT_OR_OVER_SIZE = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_show = ? " + - "AND " + - "s.pk_alloc = ? " + - "AND " + - "s.int_cores >= s.int_size "; - - public boolean isShowAtOrOverSize(ShowInterface show, AllocationInterface alloc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_SIZE, - Integer.class, show.getShowId(), alloc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return false; - } - } - - private static final String IS_SHOW_OVER_BURST = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_show = ? " + - "AND " + - "s.pk_alloc = ? " + - "AND " + - "s.int_cores + ? > s.int_burst"; - - @Override - public boolean isShowOverBurst(ShowInterface show, AllocationInterface alloc, int coreUnits) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_OVER_BURST, - Integer.class, show.getShowId(), alloc.getAllocationId(), - coreUnits) > 0; - } catch (EmptyResultDataAccessException e) { - return true; - } - } - - private static final String IS_SHOW_AT_OR_OVER_BURST = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_show = ? " + - "AND " + - "s.pk_alloc = ? " + - "AND " + - "s.int_cores >= s.int_burst"; - - @Override - public boolean isShowAtOrOverBurst(ShowInterface show, AllocationInterface alloc) { - try { - return getJdbcTemplate().queryForObject(IS_SHOW_AT_OR_OVER_BURST, - Integer.class, show.getShowId(), alloc.getAllocationId()) > 0; - } catch (EmptyResultDataAccessException e) { - return true; - } - } - - private static final String GET_SUB = - "SELECT " + - "subscription.pk_alloc," + - "subscription.pk_show,"+ - "subscription.int_size,"+ - "subscription.int_burst,"+ - "subscription.pk_subscription,"+ - "(alloc.str_name || '.' || show.str_name) AS str_name " + - "FROM " + - "subscription," + - "alloc," + - "show," + - "facility " + - "WHERE " + - "subscription.pk_show = show.pk_show " + - "AND " + - "subscription.pk_alloc = alloc.pk_alloc " + - "AND " + - "alloc.pk_facility = facility.pk_facility "; - - public static RowMapper SUB_MAPPER = new RowMapper() { - public SubscriptionEntity mapRow(ResultSet rs, int rowNum) throws SQLException { - SubscriptionEntity s = new SubscriptionEntity(); - s.allocationId = rs.getString("pk_alloc"); - s.burst = rs.getInt("int_burst"); - s.size = rs.getInt("int_size"); - s.name = rs.getString("str_name"); - s.showId = rs.getString("pk_show"); - s.id = rs.getString("pk_subscription"); - return s; - } - }; - - public SubscriptionEntity getSubscriptionDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_SUB + " AND pk_subscription=?", - SUB_MAPPER, id); - } - - private static final String INSERT_SUBSCRIPTION = - "INSERT INTO " + - "subscription " + - "( " + - "pk_subscription, pk_alloc, pk_show, int_size, int_burst"+ - ") " + - "VALUES (?,?,?,?,?)"; - - public void insertSubscription(SubscriptionEntity detail) { - detail.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_SUBSCRIPTION, - detail.id, detail.allocationId, detail.showId, detail.size, detail.burst); - } - private static final String HAS_RUNNING_PROCS = - "SELECT " + - "COUNT(1) " + - "FROM " + - "subscription s " + - "WHERE " + - "s.pk_subscription=? " + - "AND " + - "s.int_cores > 0 "; - - public boolean hasRunningProcs(SubscriptionInterface sub) { - try { - return getJdbcTemplate().queryForObject(HAS_RUNNING_PROCS, - Integer.class, sub.getSubscriptionId()) > 0; - } catch (DataAccessException e) { - return false; - } - } - - public void deleteSubscription(SubscriptionInterface sub) { - if (hasRunningProcs(sub)) { - throw new EntityModificationError("You cannot delete a subscription with running procs"); - } - getJdbcTemplate().update( - "DELETE FROM subscription WHERE pk_subscription=?", - sub.getSubscriptionId()); - } - - public void updateSubscriptionSize(SubscriptionInterface sub, int size) { - getJdbcTemplate().update( - "UPDATE subscription SET int_size=? WHERE pk_subscription=?", - size, sub.getSubscriptionId()); - } - - public void updateSubscriptionBurst(SubscriptionInterface sub, int size) { - getJdbcTemplate().update( - "UPDATE subscription SET int_burst=? WHERE pk_subscription=?", - size, sub.getSubscriptionId()); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/TaskDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/TaskDaoJdbc.java deleted file mode 100644 index 059c67808..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/TaskDaoJdbc.java +++ /dev/null @@ -1,259 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.Map; - -import org.springframework.dao.EmptyResultDataAccessException; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.DepartmentInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.PointInterface; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.TaskEntity; -import com.imageworks.spcue.TaskInterface; -import com.imageworks.spcue.dao.TaskDao; -import com.imageworks.spcue.util.SqlUtil; - -/** - * DAO for managing department tasks - */ -public class TaskDaoJdbc extends JdbcDaoSupport implements TaskDao { - - @Override - public void deleteTasks(PointInterface cdept) { - getJdbcTemplate().update( - "DELETE FROM task WHERE pk_point=?", - cdept.getPointId()); - } - - @Override - public void deleteTasks(ShowInterface show, DepartmentInterface dept) { - getJdbcTemplate().update( - "DELETE FROM task WHERE pk_show=? AND pk_dept=?", - show.getShowId(), dept.getDepartmentId()); - } - - @Override - public void deleteTask(TaskInterface task) { - getJdbcTemplate().update( - "DELETE FROM task WHERE pk_task=?", - task.getId()); - } - - @Override - public boolean isManaged(TaskInterface t) { - try { - return getJdbcTemplate().queryForObject("SELECT b_managed FROM point WHERE pk_show=? and pk_dept=?", - Integer.class, t.getShowId(), t.getDepartmentId()) == 1; - } catch (org.springframework.dao.DataRetrievalFailureException e) { - return false; - } - } - - private static final String INSERT_TASK = - "INSERT INTO " + - "task " + - "( " + - "pk_task,"+ - "pk_point,"+ - "str_shot," + - "int_min_cores" + - ") " + - "VALUES (?,?,?,?)"; - - @Override - public void insertTask(TaskEntity task) { - task.id = SqlUtil.genKeyRandom(); - getJdbcTemplate().update(INSERT_TASK, - task.id, task.getPointId(), task.shot, task.minCoreUnits); - } - - private static final String GET_TASK_DETAIL = - "SELECT " + - "point.pk_dept,"+ - "point.pk_show,"+ - "point.pk_point,"+ - "task.pk_task," + - "task.int_min_cores + task.int_adjust_cores AS int_min_cores,"+ - "task.str_shot,"+ - "(task.str_shot || '.' || dept.str_name) AS str_name " + - "FROM " + - "point,"+ - "task,"+ - "dept, "+ - "show "+ - "WHERE " + - "point.pk_dept = dept.pk_dept "+ - "AND " + - "point.pk_show = show.pk_show " + - "AND " + - "point.pk_point = task.pk_point "; - - public static final RowMapper TASK_DETAIL_MAPPER = - new RowMapper() { - public TaskEntity mapRow(ResultSet rs, int row) throws SQLException { - TaskEntity t = new TaskEntity(); - t.pointId = rs.getString("pk_point"); - t.deptId = rs.getString("pk_dept"); - t.showId = rs.getString("pk_show"); - t.id = rs.getString("pk_task"); - t.minCoreUnits = rs.getInt("int_min_cores"); - t.name = rs.getString("str_name"); - t.shot = rs.getString("str_shot"); - return t; - } - }; - - @Override - public TaskEntity getTaskDetail(String id) { - return getJdbcTemplate().queryForObject( - GET_TASK_DETAIL + " AND task.pk_task=?", - TASK_DETAIL_MAPPER, id); - } - - @Override - public TaskEntity getTaskDetail(DepartmentInterface d, String shot) { - return getJdbcTemplate().queryForObject( - GET_TASK_DETAIL + " AND point.pk_dept = ? AND task.str_shot = ?", - TASK_DETAIL_MAPPER, d.getDepartmentId(), shot); - } - - @Override - public TaskEntity getTaskDetail(JobInterface j) { - Map map = getJdbcTemplate().queryForMap( - "SELECT pk_dept, str_shot FROM job WHERE job.pk_job=?", j.getJobId()); - - return getJdbcTemplate().queryForObject( - GET_TASK_DETAIL + " AND task.str_shot = ? AND point.pk_dept = ?", - TASK_DETAIL_MAPPER, map.get("str_shot").toString(), map.get("pk_dept").toString()); - } - - public void updateTaskMinCores(TaskInterface t, int value) { - if (value < 0) { - throw new IllegalArgumentException("min cores must be greater than or equal to 0"); - } - getJdbcTemplate().update( - "UPDATE task SET int_min_cores=? WHERE pk_task=?", - value, t.getTaskId()); - } - - @Override - public void adjustTaskMinCores(TaskInterface t, int value) { - if (value < 0) { - throw new IllegalArgumentException("min cores must be greater than or equal to 0"); - } - getJdbcTemplate().update( - "UPDATE task SET int_adjust_cores = ? - int_min_cores WHERE pk_task=?", - value, t.getTaskId()); - } - - @Override - public void mergeTask(TaskEntity t) { - String pkTask = null; - try { - pkTask = getJdbcTemplate().queryForObject( - "SELECT task.pk_task FROM task, point WHERE task.pk_point = point.pk_point AND " + - "task.str_shot = ? AND point.pk_point=?", String.class, - t.shot, t.getPointId()); - - } catch (EmptyResultDataAccessException dae) { - // Eat this, its possible that no task exists - } - - // No need to do anything with this task. - if (pkTask == null && t.minCoreUnits == 0) { - return; - } - - if (t.minCoreUnits == 0) { - getJdbcTemplate().update("DELETE FROM task WHERE pk_point=? AND str_shot=? ", - t.getPointId(), t.shot); - } - else if (getJdbcTemplate().update( - "UPDATE task SET int_min_cores=? WHERE pk_point=? AND str_shot=?", - t.minCoreUnits, t.getPointId(), t.shot) == 0) { - try { - insertTask(t); - } - catch (org.springframework.dao.DataIntegrityViolationException e) { - logger.warn("error inserting task " + t.shot + "," + e); - } - } - } - - private static final String CLEAR_TASK_ADJUSTMENTS = - "UPDATE " + - "task " + - "SET " + - "int_adjust_cores = 0 " + - "WHERE " + - "pk_show=? " + - "AND " + - "pk_dept = ? "; - - @Override - public void clearTaskAdjustments(PointInterface cdept) { - getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENTS, - cdept.getShowId(), cdept.getDepartmentId()); - } - - private static final String CLEAR_TASK_ADJUSTMENT = - "UPDATE " + - "task " + - "SET " + - "int_adjust_cores = 0 " + - "WHERE " + - "pk_task=?"; - - @Override - public void clearTaskAdjustment(TaskInterface t) { - getJdbcTemplate().update(CLEAR_TASK_ADJUSTMENT, t.getTaskId()); - } - - private static final String IS_JOB_MANAGED = - "SELECT " + - "COUNT(1) " + - "FROM " + - "job,"+ - "task,"+ - "point " + - "WHERE " + - "job.pk_show = point.pk_show " + - "AND " + - "job.pk_dept = point.pk_dept " + - "AND " + - "task.pk_point = point.pk_point " + - "AND " + - "task.str_shot = job.str_shot " + - "AND " + - "job.pk_job = ?"; - - @Override - public boolean isManaged(JobInterface j) { - return getJdbcTemplate().queryForObject(IS_JOB_MANAGED, - Integer.class, j.getJobId()) > 0; - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/TrackitDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/TrackitDaoJdbc.java deleted file mode 100644 index 7ab2f0d6c..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/TrackitDaoJdbc.java +++ /dev/null @@ -1,108 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.util.List; - -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.TrackitTaskDetail; -import com.imageworks.spcue.dao.TrackitDao; - -public class TrackitDaoJdbc extends JdbcDaoSupport implements TrackitDao { - - public static final RowMapper TASK_DETAIL_MAPPER = - new RowMapper() { - public TrackitTaskDetail mapRow(ResultSet rs, int rowNum) throws SQLException { - TrackitTaskDetail t = new TrackitTaskDetail(); - t.show = rs.getString("str_show"); - t.shot = rs.getString("str_shot"); - t.status = rs.getString("str_status"); - t.startDate = rs.getDate("dt_start_date"); - t.endDate = rs.getDate("dt_est_end"); - t.frameCount = rs.getInt("int_frame_count"); - t.cgSup = rs.getString("str_cgsup"); - t.weeks = rs.getInt("int_weeks"); - return t; - } - }; - - private static final String GET_TASKS = - "SELECT DISTINCT "+ - "(CASE " + - "WHEN " + - "(asset_task.dt_est_end - next_day(sysdate,'monday')) / 7 < 1 THEN 1 " + - "ELSE " + - "CAST(((asset_task.dt_est_end - next_day(sysdate,'monday')) / 7) AS NUMERIC(6,0)) " + - "END) AS int_weeks,"+ - "show.str_show_id AS str_show, " + - "asset.str_name AS str_shot, "+ - "asset_task.str_prod_status AS str_status,"+ - "asset_task.dt_start_date, "+ - "asset_task.dt_est_end, "+ - "shot.lng_cut_length AS int_frame_count, "+ - "xmltype(asset.xml_asset_metadata).extract('/header/cgsup/text()').getstringval() AS str_cgsup " + - "FROM "+ - "element.asset,"+ - "pts.show,"+ - "pts.shot,"+ - "element.asset_task,"+ - "element.asset_pipeline,"+ - "element.x_asset_type,"+ - "element.x_show_task,"+ - "element.asset_task_entitys,"+ - "contact.entity entity, "+ - "element.x_asset_task_status "+ - "WHERE "+ - "asset.pk_asset = asset_task.pk_asset "+ - "AND " + - "asset.pk_show = show.pk_show "+ - "AND " + - "asset.str_remote_handle = shot.pk_shot " + - "AND " + - "asset.pk_x_asset_type = x_asset_type.pk_x_asset_type " + - "AND " + - "asset_task.pk_asset_pipeline = asset_pipeline.pk_asset_pipeline " + - "AND " + - "asset_task.pk_asset_task = asset_task_entitys.pk_asset_task (+) "+ - "AND " + - "asset_task_entitys.pk_entity = entity.pk_entity (+) " + - "AND " + - "asset_task.pk_x_asset_task_status = x_asset_task_status.pk_x_asset_task_status " + - "AND " + - "asset_pipeline.pk_x_show_task = x_show_task.pk_x_show_task " + - "AND " + - "str_group_type = 'Shot' " + - "AND " + - "trunc(asset_task.dt_est_end) != '31-DEC-69' " + - "AND " + - "show.str_show_id = ? " + - "AND " + - "x_show_task.str_value = ? "; - - @Override - public List getTasks(String show, String dept) { - return getJdbcTemplate().query(GET_TASKS, TASK_DETAIL_MAPPER, show, dept); - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/WhiteboardDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/WhiteboardDaoJdbc.java deleted file mode 100644 index 97ddcb927..000000000 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/oracle/WhiteboardDaoJdbc.java +++ /dev/null @@ -1,2201 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.dao.oracle; - -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.Arrays; -import java.util.List; -import java.util.Locale; - -import com.google.common.collect.Lists; -import com.google.common.collect.Sets; -import org.apache.log4j.Logger; -import org.springframework.jdbc.core.RowMapper; -import org.springframework.jdbc.core.support.JdbcDaoSupport; - -import com.imageworks.spcue.ActionInterface; -import com.imageworks.spcue.AllocationInterface; -import com.imageworks.spcue.DeedEntity; -import com.imageworks.spcue.DepartmentInterface; -import com.imageworks.spcue.DependInterface; -import com.imageworks.spcue.FilterInterface; -import com.imageworks.spcue.FrameInterface; -import com.imageworks.spcue.GroupInterface; -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LocalHostAssignment; -import com.imageworks.spcue.MatcherInterface; -import com.imageworks.spcue.OwnerEntity; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.dao.WhiteboardDao; -import com.imageworks.spcue.dao.criteria.FrameSearchInterface; -import com.imageworks.spcue.dao.criteria.FrameSearchFactory; -import com.imageworks.spcue.dao.criteria.HostSearchInterface; -import com.imageworks.spcue.dao.criteria.JobSearchInterface; -import com.imageworks.spcue.dao.criteria.ProcSearchInterface; -import com.imageworks.spcue.dao.criteria.ProcSearchFactory; -import com.imageworks.spcue.grpc.comment.Comment; -import com.imageworks.spcue.grpc.comment.CommentSeq; -import com.imageworks.spcue.grpc.department.Department; -import com.imageworks.spcue.grpc.department.DepartmentSeq; -import com.imageworks.spcue.grpc.depend.Depend; -import com.imageworks.spcue.grpc.depend.DependSeq; -import com.imageworks.spcue.grpc.depend.DependTarget; -import com.imageworks.spcue.grpc.depend.DependType; -import com.imageworks.spcue.grpc.facility.Allocation; -import com.imageworks.spcue.grpc.facility.AllocationSeq; -import com.imageworks.spcue.grpc.facility.AllocationStats; -import com.imageworks.spcue.grpc.facility.Facility; -import com.imageworks.spcue.grpc.facility.FacilitySeq; -import com.imageworks.spcue.grpc.filter.Action; -import com.imageworks.spcue.grpc.filter.ActionSeq; -import com.imageworks.spcue.grpc.filter.ActionType; -import com.imageworks.spcue.grpc.filter.ActionValueType; -import com.imageworks.spcue.grpc.filter.Filter; -import com.imageworks.spcue.grpc.filter.FilterSeq; -import com.imageworks.spcue.grpc.filter.FilterType; -import com.imageworks.spcue.grpc.filter.MatchSubject; -import com.imageworks.spcue.grpc.filter.MatchType; -import com.imageworks.spcue.grpc.filter.Matcher; -import com.imageworks.spcue.grpc.filter.MatcherSeq; -import com.imageworks.spcue.grpc.host.Deed; -import com.imageworks.spcue.grpc.host.DeedSeq; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.host.Host; -import com.imageworks.spcue.grpc.host.HostSeq; -import com.imageworks.spcue.grpc.host.LockState; -import com.imageworks.spcue.grpc.host.NestedHost; -import com.imageworks.spcue.grpc.host.Owner; -import com.imageworks.spcue.grpc.host.Proc; -import com.imageworks.spcue.grpc.host.ProcSeq; -import com.imageworks.spcue.grpc.host.ThreadMode; -import com.imageworks.spcue.grpc.job.CheckpointState; -import com.imageworks.spcue.grpc.job.Frame; -import com.imageworks.spcue.grpc.job.FrameSeq; -import com.imageworks.spcue.grpc.job.FrameState; -import com.imageworks.spcue.grpc.job.Group; -import com.imageworks.spcue.grpc.job.GroupSeq; -import com.imageworks.spcue.grpc.job.GroupStats; -import com.imageworks.spcue.grpc.job.Job; -import com.imageworks.spcue.grpc.job.JobSeq; -import com.imageworks.spcue.grpc.job.JobState; -import com.imageworks.spcue.grpc.job.JobStats; -import com.imageworks.spcue.grpc.job.Layer; -import com.imageworks.spcue.grpc.job.LayerSeq; -import com.imageworks.spcue.grpc.job.LayerStats; -import com.imageworks.spcue.grpc.job.LayerType; -import com.imageworks.spcue.grpc.job.UpdatedFrame; -import com.imageworks.spcue.grpc.job.UpdatedFrameCheckResult; -import com.imageworks.spcue.grpc.job.UpdatedFrameSeq; -import com.imageworks.spcue.grpc.limit.Limit; -import com.imageworks.spcue.grpc.renderpartition.RenderPartition; -import com.imageworks.spcue.grpc.renderpartition.RenderPartitionSeq; -import com.imageworks.spcue.grpc.renderpartition.RenderPartitionType; -import com.imageworks.spcue.grpc.service.Service; -import com.imageworks.spcue.grpc.service.ServiceOverride; -import com.imageworks.spcue.grpc.service.ServiceOverrideSeq; -import com.imageworks.spcue.grpc.service.ServiceSeq; -import com.imageworks.spcue.grpc.show.Show; -import com.imageworks.spcue.grpc.show.ShowSeq; -import com.imageworks.spcue.grpc.show.ShowStats; -import com.imageworks.spcue.grpc.subscription.Subscription; -import com.imageworks.spcue.grpc.subscription.SubscriptionSeq; -import com.imageworks.spcue.grpc.task.Task; -import com.imageworks.spcue.grpc.task.TaskSeq; -import com.imageworks.spcue.util.Convert; -import com.imageworks.spcue.util.CueUtil; -import com.imageworks.spcue.util.SqlUtil; - -public class WhiteboardDaoJdbc extends JdbcDaoSupport implements WhiteboardDao { - @SuppressWarnings("unused") - private static final Logger logger = Logger.getLogger(WhiteboardDaoJdbc.class); - - private FrameSearchFactory frameSearchFactory; - private ProcSearchFactory procSearchFactory; - - @Override - public Service getService(String id) { - return getJdbcTemplate().queryForObject( - GET_SERVICE + " WHERE (pk_service=? or str_name=?)", - SERVICE_MAPPER, id, id); - } - - @Override - public Service findService(String name) { - return getJdbcTemplate().queryForObject( - GET_SERVICE + " WHERE service.str_name=?", - SERVICE_MAPPER, name); - } - @Override - public ServiceSeq getDefaultServices() { - List services = getJdbcTemplate().query(GET_SERVICE, SERVICE_MAPPER); - return ServiceSeq.newBuilder().addAllServices(services).build(); - } - - @Override - public ServiceOverrideSeq getServiceOverrides(ShowInterface show) { - return ServiceOverrideSeq.newBuilder().addAllServiceOverrides(getJdbcTemplate().query( - GET_SERVICE_OVERRIDE + " AND show_service.pk_show = ?", - SERVICE_OVERRIDE_MAPPER, show.getId())).build(); - } - - @Override - public ServiceOverride getServiceOverride( - ShowInterface show, String name) { - return getJdbcTemplate().queryForObject ( - GET_SERVICE_OVERRIDE + - " AND show_service.pk_show=? AND (show_service.str_name=? OR" + - " show_service.pk_show_service=?)", - SERVICE_OVERRIDE_MAPPER, show.getId(), name, name); - } - - @Override - public Filter getFilter(FilterInterface filter) { - return getJdbcTemplate().queryForObject(GET_FILTER + " AND pk_filter=?", - FILTER_MAPPER, filter.getFilterId()); - } - - @Override - public Filter findFilter(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " AND filter.pk_show=? AND filter.str_name=?", - FILTER_MAPPER, show.getShowId(), name); - } - - @Override - public Filter findFilter(String show, String name) { - return getJdbcTemplate().queryForObject( - GET_FILTER + " AND show.str_name=? AND filter.str_name=?", - FILTER_MAPPER, show, name); - } - - @Override - public FilterSeq getFilters(ShowInterface show) { - return FilterSeq.newBuilder().addAllFilters( - getJdbcTemplate().query( - GET_FILTER + " AND show.pk_show=? ORDER BY f_order ASC", - FILTER_MAPPER, show.getShowId())).build(); - } - - @Override - public ActionSeq getActions(FilterInterface filter) { - return ActionSeq.newBuilder().addAllActions(getJdbcTemplate().query( - GET_ACTION + " AND filter.pk_filter=? ORDER BY b_stop ASC, ts_created ASC ", - ACTION_MAPPER, filter.getFilterId())).build(); - } - - @Override - public MatcherSeq getMatchers(FilterInterface filter) { - return MatcherSeq.newBuilder().addAllMatchers(getJdbcTemplate().query( - GET_MATCHER + " AND filter.pk_filter=? ORDER BY ts_created ASC", - MATCHER_MAPPER, filter.getFilterId())).build(); - } - - @Override - public Action getAction(ActionInterface action) { - return getJdbcTemplate().queryForObject( - GET_ACTION + " AND action.pk_action=?", - ACTION_MAPPER, action.getActionId()); - } - - @Override - public Matcher getMatcher(MatcherInterface matcher) { - return getJdbcTemplate().queryForObject( - GET_MATCHER + " AND matcher.pk_matcher=?", - MATCHER_MAPPER, matcher.getMatcherId()); - } - - @Override - public Show getShow(String id) { - return getJdbcTemplate().queryForObject( - GET_SHOW + " AND show.pk_show=?", - SHOW_MAPPER, id); - } - - @Override - public ShowSeq getShows() { - List shows = getJdbcTemplate().query(GET_SHOW, SHOW_MAPPER); - return ShowSeq.newBuilder().addAllShows(shows).build(); - } - - @Override - public ShowSeq getActiveShows() { - List shows = getJdbcTemplate().query(GET_SHOW + " AND b_active=?", - SHOW_MAPPER, true); - return ShowSeq.newBuilder().addAllShows(shows).build(); - } - - @Override - public Show findShow(String name) { - return getJdbcTemplate().queryForObject(GET_SHOW + " AND show.str_name=?", - SHOW_MAPPER, name); - } - - @Override - public Subscription getSubscription(String id) { - return getJdbcTemplate().queryForObject( - GET_SUBSCRIPTION + " AND subscription.pk_subscription=?", - SUBSCRIPTION_MAPPER, id); - } - - @Override - public Subscription findSubscription(String show, String alloc) { - return getJdbcTemplate().queryForObject( - GET_SUBSCRIPTION + - " AND show.str_name=? AND alloc.str_name=?", - SUBSCRIPTION_MAPPER, show, alloc); - } - - @Override - public SubscriptionSeq getSubscriptions(ShowInterface show) { - List subscriptions = getJdbcTemplate().query( - GET_SUBSCRIPTION + " AND show.pk_show=?", - SUBSCRIPTION_MAPPER, show.getShowId()); - return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); - } - - @Override - public SubscriptionSeq getSubscriptions(AllocationInterface alloc) { - List subscriptions = getJdbcTemplate().query( - GET_SUBSCRIPTION + " AND subscription.pk_alloc=?", - SUBSCRIPTION_MAPPER, alloc.getAllocationId()); - return SubscriptionSeq.newBuilder().addAllSubscriptions(subscriptions).build(); - } - - @Override - public Allocation findAllocation(String name) { - return getJdbcTemplate().queryForObject( - GET_ALLOCATION + " AND alloc.str_name=?", - ALLOCATION_MAPPER, name); - } - - @Override - public Allocation getAllocation(String id) { - return getJdbcTemplate().queryForObject(GET_ALLOCATION + " AND alloc.pk_alloc=?", - ALLOCATION_MAPPER, id); - } - - @Override - public AllocationSeq getAllocations() { - return AllocationSeq.newBuilder().addAllAllocations(getJdbcTemplate().query( - GET_ALLOCATION + " ORDER BY alloc.str_name ", - ALLOCATION_MAPPER)).build(); - } - - @Override - public AllocationSeq getAllocations( - com.imageworks.spcue.FacilityInterface facility) { - return AllocationSeq.newBuilder().addAllAllocations(getJdbcTemplate().query( - GET_ALLOCATION + " AND alloc.pk_facility = ?", - ALLOCATION_MAPPER, facility.getFacilityId())).build(); - } - - @Override - public JobSeq getJobs(GroupInterface group) { - List jobs = getJdbcTemplate().query( - GET_PENDING_JOBS + " AND job.pk_folder=? ORDER BY job.str_name ASC", - JOB_MAPPER, group.getId()); - return JobSeq.newBuilder().addAllJobs(jobs).build(); - } - - @Override - public List getJobNames(JobSearchInterface r) { - return getJdbcTemplate().query(r.getFilteredQuery(GET_JOB_NAMES), - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return SqlUtil.getString(rs, 1); - } - }, r.getValuesArray()); - } - - @Override - public JobSeq getJobs(JobSearchInterface r) { - List jobs = getJdbcTemplate().query( - r.getFilteredQuery(GET_JOB) + "ORDER BY job.str_name ASC", - JOB_MAPPER, - r.getValuesArray()); - return JobSeq.newBuilder().addAllJobs(jobs).build(); - } - - @Override - public Job findJob(String name) { - return getJdbcTemplate().queryForObject( - GET_PENDING_JOBS + " AND job.str_name=?", - JOB_MAPPER, name.toLowerCase()); - } - - @Override - public Job getJob(String id) { - return getJdbcTemplate().queryForObject( - GET_JOB + " AND job.pk_job=?", - JOB_MAPPER, id); - } - - @Override - public Layer getLayer(String id) { - return getJdbcTemplate().queryForObject( - GET_LAYER_WITH_LIMITS + " WHERE layer.pk_layer=?", - LAYER_MAPPER, id); - } - - @Override - public Layer findLayer(String job, String layer) { - return getJdbcTemplate().queryForObject( - GET_LAYER_WITH_LIMITS + " WHERE job.str_state='PENDING' AND job.str_name=? AND layer.str_name=?", - LAYER_MAPPER, job, layer); - } - - @Override - public LayerSeq getLayers(JobInterface job) { - String query = GET_LAYER_WITH_LIMITS + " WHERE layer.pk_job=? ORDER BY layer.int_dispatch_order ASC"; - List layers = getJdbcTemplate().query( - query, LAYER_MAPPER, job.getJobId()); - return LayerSeq.newBuilder().addAllLayers(layers).build(); - } - - public List getLimitNames(String layerId) { - return getJdbcTemplate().query(GET_LIMIT_NAMES, - LIMIT_NAME_MAPPER, layerId); - } - - @Override - public List getLimits(LayerInterface layer) { - List limits = getJdbcTemplate().query( - GET_LIMIT_FROM_LAYER_ID, LIMIT_MAPPER, layer.getLayerId()); - return limits; - } - - @Override - public GroupSeq getGroups(ShowInterface show) { - List groups = getJdbcTemplate().query( - GET_GROUPS + " AND folder.pk_show=? ORDER BY folder_level.int_level ASC, folder.str_name ASC ", - GROUP_MAPPER, show.getShowId()); - return GroupSeq.newBuilder().addAllGroups(groups).build(); - } - - @Override - public GroupSeq getGroups(GroupInterface group) { - List groups = getJdbcTemplate().query( - GET_GROUPS + " AND folder.pk_parent_folder=? ORDER BY folder_level.int_level ASC, folder.f_order DESC, folder.str_name ASC ", - GROUP_MAPPER, group.getGroupId()); - return GroupSeq.newBuilder().addAllGroups(groups).build(); - } - - @Override - public Group getGroup(String id) { - return getJdbcTemplate().queryForObject( - GET_GROUPS + " AND folder.pk_folder=?", - GROUP_MAPPER, id); - } - - @Override - public Group getRootGroup(ShowInterface show) { - return getJdbcTemplate().queryForObject( - GET_GROUPS + " AND show.pk_show=? AND folder.b_default=?", - GROUP_MAPPER, show.getShowId(), true); - } - - @Override - public Frame findFrame(String job, String layer, int frame) { - return getJdbcTemplate().queryForObject(FIND_FRAME, FRAME_MAPPER, job, layer, frame); - } - - @Override - public Frame getFrame(String id) { - return getJdbcTemplate().queryForObject( - GET_FRAME + " AND frame.pk_frame=?", FRAME_MAPPER, id); - } - - @Override - public FrameSeq getFrames(FrameSearchInterface r) { - List frames = getJdbcTemplate().query( - r.getSortedQuery(GET_FRAMES_CRITERIA), FRAME_MAPPER, r.getValuesArray()); - return FrameSeq.newBuilder().addAllFrames(frames).build(); - } - - @Override - public Depend getDepend(DependInterface depend) { - return getJdbcTemplate().queryForObject( - GET_DEPEND + " WHERE pk_depend=?",DEPEND_MAPPER, depend.getId()); - } - - @Override - public Depend getDepend(com.imageworks.spcue.depend.AbstractDepend depend) { - return getJdbcTemplate().queryForObject( - GET_DEPEND + " WHERE pk_depend=?",DEPEND_MAPPER, depend.getId()); - } - - @Override - public DependSeq getWhatDependsOnThis(JobInterface job) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_parent IS NULL AND pk_job_depend_on=?", - DEPEND_MAPPER, job.getJobId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatDependsOnThis(LayerInterface layer) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_on=?", - DEPEND_MAPPER, layer.getLayerId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatDependsOnThis(FrameInterface frame) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_frame_depend_on=?", - DEPEND_MAPPER, frame.getFrameId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatThisDependsOn(JobInterface job) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er IS NULL AND " + - "pk_frame_depend_er IS NULL AND pk_job_depend_er=?", - DEPEND_MAPPER, job.getJobId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatThisDependsOn(LayerInterface layer) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_parent IS NULL AND pk_layer_depend_er=?", - DEPEND_MAPPER, layer.getLayerId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getWhatThisDependsOn(FrameInterface frame) { - /* - * This should show anything that is making the frame dependent. - */ - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE " + - "(pk_job_depend_er=? AND str_type IN ('JOB_ON_JOB','JOB_ON_LAYER','JOB_ON_FRAME')) OR " + - "(pk_layer_depend_er=? AND str_type IN ('LAYER_ON_JOB','LAYER_ON_LAYER','LAYER_ON_FRAME')) " + - "OR (pk_frame_depend_er=?)", - DEPEND_MAPPER, frame.getJobId(), frame.getLayerId(), frame.getFrameId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public DependSeq getDepends(JobInterface job) { - List depends = getJdbcTemplate().query( - GET_DEPEND + " WHERE pk_job_depend_er=? AND str_type != 'FRAME_ON_FRAME'", - DEPEND_MAPPER, job.getJobId()); - return DependSeq.newBuilder().addAllDepends(depends).build(); - } - - @Override - public Depend getDepend(String id) { - return getJdbcTemplate().queryForObject( - GET_DEPEND + " WHERE pk_depend=?",DEPEND_MAPPER,id); - } - - @Override - public Group findGroup(String show, String group) { - return getJdbcTemplate().queryForObject( - GET_GROUPS + " AND show.str_name=? AND folder.str_name=?", - GROUP_MAPPER, show, group); - } - - @Override - public Host findHost(String name) { - return getJdbcTemplate().queryForObject( - GET_HOST + " AND host.str_name=?", HOST_MAPPER, name); - } - - @Override - public HostSeq getHosts(HostSearchInterface r) { - List hosts = getJdbcTemplate().query(r.getFilteredQuery(GET_HOST), HOST_MAPPER, - r.getValuesArray()); - return HostSeq.newBuilder().addAllHosts(hosts).build(); - } - - @Override - public Host getHost(String id) { - return getJdbcTemplate().queryForObject( - GET_HOST + " AND host.pk_host=?", HOST_MAPPER, id); - } - - @Override - public ProcSeq getProcs(HostInterface host) { - ProcSearchInterface r = procSearchFactory.create(); - r.filterByHost(host); - r.sortByHostName(); - r.sortByDispatchedTime(); - return ProcSeq.newBuilder().addAllProcs(getProcs(r).getProcsList()).build(); - } - - @Override - public ProcSeq getProcs(ProcSearchInterface p) { - p.sortByHostName(); - p.sortByDispatchedTime(); - List procs = getJdbcTemplate().query(p.getFilteredQuery(GET_PROC), - PROC_MAPPER, p.getValuesArray()); - return ProcSeq.newBuilder().addAllProcs(procs).build(); - } - - @Override - public CommentSeq getComments(HostInterface h) { - List comments = getJdbcTemplate().query( - GET_HOST_COMMENTS, COMMENT_MAPPER, h.getHostId()); - return CommentSeq.newBuilder().addAllComments(comments).build(); - } - - @Override - public CommentSeq getComments(JobInterface j) { - List comments = getJdbcTemplate().query( - GET_JOB_COMMENTS, COMMENT_MAPPER, j.getJobId()); - return CommentSeq.newBuilder().addAllComments(comments).build(); - } - - @Override - public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, - List layers, int epochSeconds) { - - if ((System.currentTimeMillis() / 1000) - epochSeconds > 60) { - long timeDiff = System.currentTimeMillis() - epochSeconds; - throw new IllegalArgumentException("the last update timestamp cannot be over " + - "a minute off the current time, difference was: " + timeDiff); - } - - UpdatedFrameCheckResult.Builder resultBuilder = UpdatedFrameCheckResult.newBuilder(); - resultBuilder.setState(JobState.valueOf(getJdbcTemplate().queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", String.class, job.getJobId()))); - - FrameSearchInterface r = frameSearchFactory.create(job); - r.filterByLayers(layers); - r.filterByChangeDate(epochSeconds); - r.setMaxResults(100); - - List updatedFrameList = getJdbcTemplate().query( - r.getFilteredQuery(GET_UPDATED_FRAME), UPDATED_FRAME_MAPPER, r.getValuesArray()); - resultBuilder.setUpdatedFrames(UpdatedFrameSeq.newBuilder().addAllUpdatedFrames(updatedFrameList).build()); - resultBuilder.setServerTime((int) (System.currentTimeMillis() / 1000) - 1); - - return resultBuilder.build(); - } - - @Override - public Department getDepartment(ShowInterface show, String name) { - return getJdbcTemplate().queryForObject( - GET_DEPARTMENT, DEPARTMENT_MAPPER, - show.getShowId(), name); - } - - @Override - public DepartmentSeq getDepartments ( - ShowInterface show) { - List departments = getJdbcTemplate().query( - GET_DEPARTMENTS, DEPARTMENT_MAPPER, - show.getShowId()); - return DepartmentSeq.newBuilder().addAllDepartments(departments).build(); - } - - @Override - public List getDepartmentNames() { - return getJdbcTemplate().query("SELECT str_name FROM dept ORDER BY str_name ASC", - new RowMapper() { - public String mapRow(ResultSet rs, int row) throws SQLException { - return SqlUtil.getString(rs, "str_name"); - } - }); - } - - @Override - public Task getTask(ShowInterface show, DepartmentInterface dept, String shot) { - return getJdbcTemplate().queryForObject( - GET_TASK + " AND point.pk_show=? AND point.pk_dept=? AND task.str_shot=?", - TASK_MAPPER, show.getShowId(), dept.getDepartmentId(), shot); - } - - @Override - public TaskSeq getTasks(ShowInterface show, DepartmentInterface dept) { - if (dept == null) { - return TaskSeq.newBuilder().addAllTasks( - getJdbcTemplate().query( - GET_TASK + " AND point.pk_show=? ORDER BY task.str_shot", - TASK_MAPPER, show.getShowId())).build(); - } else { - return TaskSeq.newBuilder().addAllTasks(getJdbcTemplate().query( - GET_TASK + " AND point.pk_show=? AND point.pk_dept=? ORDER BY task.str_shot", - TASK_MAPPER, show.getShowId(), dept.getDepartmentId())).build(); - } - } - - - @Override - public DeedSeq getDeeds(OwnerEntity owner) { - List deeds = getJdbcTemplate().query( - QUERY_FOR_DEED + " AND owner.pk_owner=?", - DEED_MAPPER, owner.getId()); - return DeedSeq.newBuilder().addAllDeeds(deeds).build(); - } - - @Override - public DeedSeq getDeeds(ShowInterface show) { - List deeds = getJdbcTemplate().query( - QUERY_FOR_DEED + " AND show.pk_show=?", - DEED_MAPPER, show.getId()); - return DeedSeq.newBuilder().addAllDeeds(deeds).build(); - } - - @Override - public Host getHost(DeedEntity deed) { - return getJdbcTemplate().queryForObject( - GET_HOST + " AND host.pk_host=?", - HOST_MAPPER, deed.id); - } - - @Override - public Deed getDeed(HostInterface host) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_DEED + " AND host.pk_host=?", - DEED_MAPPER, host.getHostId()); - } - - @Override - public HostSeq getHosts(OwnerEntity owner) { - StringBuilder sb = new StringBuilder(4096); - String query = GET_HOST; - query = query.replace("FROM " , "FROM owner, deed,"); - sb.append(query); - sb.append("AND deed.pk_host = host.pk_host "); - sb.append("AND deed.pk_owner = owner.pk_owner "); - sb.append("AND owner.pk_owner = ?"); - - List hosts = getJdbcTemplate().query( - sb.toString(), HOST_MAPPER, owner.getId()); - return HostSeq.newBuilder().addAllHosts(hosts).build(); - } - - @Override - public Owner getOwner(DeedEntity deed) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " AND " + - "pk_owner = (SELECT deed.pk_owner FROM deed " + - "WHERE pk_deed=?)", OWNER_MAPPER, deed.getId()); - } - - @Override - public Owner getOwner(HostInterface host) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " AND " + - "pk_owner = (SELECT deed.pk_owner FROM deed " + - "WHERE pk_host=?)", OWNER_MAPPER, host.getHostId()); - } - - @Override - public List getOwners(ShowInterface show) { - return getJdbcTemplate().query( - QUERY_FOR_OWNER + " AND owner.pk_show=?", OWNER_MAPPER, - show.getShowId()); - } - - - @Override - public RenderPartition getRenderPartition(LocalHostAssignment l) { - return getJdbcTemplate().queryForObject(QUERY_FOR_RENDER_PART + - "WHERE host_local.pk_host_local = ?", - RENDER_PARTION_MAPPER, l.getId()); - } - - - @Override - public RenderPartitionSeq getRenderPartitions(HostInterface host) { - List partitions = getJdbcTemplate().query(QUERY_FOR_RENDER_PART + - "WHERE host_local.pk_host = ?", - RENDER_PARTION_MAPPER, host.getHostId()); - return RenderPartitionSeq.newBuilder().addAllRenderPartitions(partitions).build(); - } - - - @Override - public Owner getOwner(String name) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_OWNER + " AND " + - "(" + - "owner.str_username = ? " + - "OR " + - "owner.pk_owner = ?" + - ")", OWNER_MAPPER, name, name); - } - - @Override - public Facility getFacility(String name) { - return getJdbcTemplate().queryForObject( - QUERY_FOR_FACILITY + - " WHERE facility.pk_facility = ? OR facility.str_name = ?", - FACILITY_MAPPER, name, name); - } - - @Override - public FacilitySeq getFacilities() { - return FacilitySeq.newBuilder().addAllFacilities(getJdbcTemplate().query( - QUERY_FOR_FACILITY, FACILITY_MAPPER)).build(); - } - - @Override - public Limit findLimit(String name) { - String findLimitQuery = QUERY_FOR_LIMIT + - " WHERE limit_record.str_name = ? " + - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(findLimitQuery, LIMIT_MAPPER, name); - } - - @Override - public Limit getLimit(String id) { - String getLimitQuery = QUERY_FOR_LIMIT + - " WHERE limit_record.pk_limit_record = ? "+ - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().queryForObject(getLimitQuery, LIMIT_MAPPER, id); - } - - @Override - public List getLimits() { - String getLimitsQuery = QUERY_FOR_LIMIT + - " GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - return getJdbcTemplate().query(getLimitsQuery, LIMIT_MAPPER); - } - - /* - * Row Mappers - */ - - public static final RowMapper LIMIT_MAPPER = - new RowMapper() { - public Limit mapRow(ResultSet rs, int rowNum) throws SQLException { - return Limit.newBuilder() - .setId(SqlUtil.getString(rs, "pk_limit_record")) - .setName(SqlUtil.getString(rs, "str_name")) - .setMaxValue(rs.getInt("int_max_value")) - .setCurrentRunning(rs.getInt("int_current_running")) - .build(); - } - }; - - public static final RowMapper MATCHER_MAPPER = - new RowMapper() { - public Matcher mapRow(ResultSet rs, int rowNum) throws SQLException { - return Matcher.newBuilder() - .setId(SqlUtil.getString(rs, "pk_matcher")) - .setInput(SqlUtil.getString(rs, "str_value")) - .setSubject(MatchSubject.valueOf(SqlUtil.getString(rs, "str_subject"))) - .setType(MatchType.valueOf(SqlUtil.getString(rs, "str_match"))) - .build(); - } - }; - - public static final RowMapper FILTER_MAPPER = - new RowMapper() { - public Filter mapRow(ResultSet rs, int rowNum) throws SQLException { - return Filter.newBuilder() - .setId(SqlUtil.getString(rs, "pk_filter")) - .setType(FilterType.valueOf(SqlUtil.getString(rs, "str_type"))) - .setOrder(rs.getFloat("f_order")) - .setName(SqlUtil.getString(rs, "str_name")) - .setEnabled(rs.getBoolean("b_enabled")) - .build(); - } - }; - - public static final RowMapper ACTION_MAPPER = - new RowMapper() { - public Action mapRow(ResultSet rs, int rowNum) throws SQLException { - Action.Builder builder = Action.newBuilder() - .setId(SqlUtil.getString(rs, "pk_action")) - .setBooleanValue(false) - .setIntegerValue(0) - .setFloatValue(0f) - .setStringValue("") - .setType(ActionType.valueOf(SqlUtil.getString(rs, "str_action"))) - .setValueType(ActionValueType.valueOf(SqlUtil.getString(rs, "str_value_type"))); - - switch (builder.getValueType()) { - case GROUP_TYPE: - builder.setGroupValue(SqlUtil.getString(rs, "pk_folder")); - break; - case STRING_TYPE: - builder.setStringValue(SqlUtil.getString(rs, "str_value")); - break; - case INTEGER_TYPE: - builder.setIntegerValue(rs.getInt("int_value")); - break; - case FLOAT_TYPE: - builder.setFloatValue(rs.getFloat("float_value")); - break; - case BOOLEAN_TYPE: - builder.setBooleanValue(rs.getBoolean("b_value")); - break; - } - return builder.build(); - } - }; - - public static final RowMapper FACILITY_MAPPER = - new RowMapper() { - public Facility mapRow(ResultSet rs, int rowNum) throws SQLException { - return Facility.newBuilder() - .setName(rs.getString("str_name")) - .setId(rs.getString("pk_facility")) - .build(); - } - }; - - - public static final RowMapper DEED_MAPPER = - new RowMapper() { - public Deed mapRow(ResultSet rs, int rowNum) throws SQLException { - return Deed.newBuilder() - .setId(SqlUtil.getString(rs, "pk_deed")) - .setHost(SqlUtil.getString(rs, "str_host")) - .setOwner(SqlUtil.getString(rs, "str_username")) - .setBlackout(rs.getBoolean("b_blackout")) - .setBlackoutStartTime(rs.getInt("int_blackout_start")) - .setBlackoutStopTime(rs.getInt("int_blackout_stop")) - .build(); - } - }; - - public static final RowMapper - RENDER_PARTION_MAPPER = new RowMapper() { - public RenderPartition mapRow(ResultSet rs, int rowNum) throws SQLException { - - RenderPartition.Builder builder = RenderPartition.newBuilder() - .setId(SqlUtil.getString(rs, "pk_host_local")) - .setCores(rs.getInt("int_cores_max") - rs.getInt("int_cores_idle")) - .setMaxCores(rs.getInt("int_cores_max")) - .setThreads(rs.getInt("int_threads")) - .setMaxMemory(rs.getLong("int_mem_max")) - .setMemory( rs.getLong("int_mem_max") - rs.getLong("int_mem_idle")) - .setMaxGpu(rs.getLong("int_gpu_max")) - .setHost(SqlUtil.getString(rs, "str_host_name")) - .setJob(SqlUtil.getString(rs, "str_job_name")) - .setRenderPartType(RenderPartitionType.valueOf(SqlUtil.getString(rs, "str_type"))) - .setLayer("") - .setFrame(""); - - if (SqlUtil.getString(rs, "str_layer_name") != null) { - builder.setLayer(SqlUtil.getString(rs, "str_layer_name")); - } - - if (SqlUtil.getString(rs, "str_frame_name") != null) { - builder.setFrame(SqlUtil.getString(rs, "str_frame_name")); - } - - return builder.build(); - - } - }; - - public static final RowMapper - OWNER_MAPPER = new RowMapper() { - public Owner mapRow(ResultSet rs, int rowNum) throws SQLException { - return Owner.newBuilder() - .setName(SqlUtil.getString(rs, "str_username")) - .setId(SqlUtil.getString(rs, "pk_owner")) - .setShow(SqlUtil.getString(rs, "str_show")) - .setHostCount(rs.getInt("host_count")) - .build(); - } - }; - - public static final RowMapper DEPARTMENT_MAPPER = - new RowMapper() { - public Department mapRow(ResultSet rs, int row) throws SQLException { - return Department.newBuilder() - .setId(SqlUtil.getString(rs, "pk_point")) - .setName(SqlUtil.getString(rs, "str_name")) - .setDept(SqlUtil.getString(rs, "str_dept")) - .setTiManaged(rs.getBoolean("b_managed")) - .setTiTask(SqlUtil.getString(rs, "str_ti_task")) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .build(); - } - }; - - public static final RowMapper PROC_MAPPER = - new RowMapper() { - public Proc mapRow(ResultSet rs, int row) throws SQLException { - return Proc.newBuilder() - .setId(SqlUtil.getString(rs, "pk_proc")) - .setName(CueUtil.buildProcName(SqlUtil.getString(rs, "host_name"), - rs.getInt("int_cores_reserved"))) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores_reserved"))) - .setReservedMemory(rs.getLong("int_mem_reserved")) - .setReservedGpu(rs.getLong("int_gpu_reserved")) - .setUsedMemory(rs.getLong("int_mem_used")) - .setFrameName(SqlUtil.getString(rs, "frame_name")) - .setJobName(SqlUtil.getString(rs, "job_name")) - .setGroupName(SqlUtil.getString(rs, "folder_name")) - .setShowName(SqlUtil.getString(rs, "show_name")) - .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) - .setBookedTime((int) (rs.getTimestamp("ts_booked").getTime() / 1000)) - .setDispatchTime((int) (rs.getTimestamp("ts_dispatched").getTime() / 1000)) - .setUnbooked(rs.getBoolean("b_unbooked")) - .setLogPath(String.format("%s/%s.%s.rqlog", - SqlUtil.getString(rs, "str_log_dir"), SqlUtil.getString(rs, "job_name"), - SqlUtil.getString(rs, "frame_name"))) - .setRedirectTarget(SqlUtil.getString(rs, "str_redirect")) - .addAllServices(Arrays.asList(SqlUtil.getString(rs, "str_services").split(","))) - .build(); - } - }; - - public static final RowMapper TASK_MAPPER = - new RowMapper() { - public Task mapRow(ResultSet rs, int row) throws SQLException { - return Task.newBuilder() - .setId(SqlUtil.getString(rs, "pk_task")) - .setDept(SqlUtil.getString(rs, "str_dept")) - .setShot(SqlUtil.getString(rs, "str_shot")) - .setMinCores(Convert.coreUnitsToWholeCores(rs.getInt("int_min_cores"))) - .setAdjustCores(Convert.coreUnitsToWholeCores(rs.getInt("int_adjust_cores"))) - .build(); - } - }; - - public static final RowMapper COMMENT_MAPPER = - new RowMapper() { - - public Comment mapRow(ResultSet rs, int row) throws SQLException { - return Comment.newBuilder() - .setId(SqlUtil.getString(rs, "pk_comment")) - .setMessage(SqlUtil.getString(rs, "str_message")) - .setSubject(SqlUtil.getString(rs, "str_subject")) - .setTimestamp((int)(rs.getTimestamp("ts_created").getTime() / 1000)) - .setUser(SqlUtil.getString(rs, "str_user")) - .build(); - } - }; - - public static NestedHost.Builder mapNestedHostBuilder(ResultSet rs) throws SQLException { - NestedHost.Builder builder = NestedHost.newBuilder() - .setId(SqlUtil.getString(rs, "pk_host")) - .setName(SqlUtil.getString(rs, "host_name")) - .setAllocName(SqlUtil.getString(rs, "alloc_name")) - .setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)) - .setFreeMcp(rs.getLong("int_mcp_free")) - .setFreeMemory(rs.getLong("int_mem_free")) - .setFreeSwap(rs.getLong("int_swap_free")) - .setFreeGpu(rs.getLong("int_gpu_free")) - .setLoad(rs.getInt("int_load")) - .setNimbyEnabled(rs.getBoolean("b_nimby")) - .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))) - .setMemory(rs.getLong("int_mem")) - .setIdleMemory(rs.getLong("int_mem_idle")) - .setGpu(rs.getLong("int_gpu")) - .setIdleGpu(rs.getLong("int_gpu_idle")) - .setState(HardwareState.valueOf(SqlUtil.getString(rs, "host_state"))) - .setTotalMcp(rs.getLong("int_mcp_total")) - .setTotalMemory(rs.getLong("int_mem_total")) - .setTotalSwap(rs.getLong("int_swap_total")) - .setTotalGpu(rs.getLong("int_gpu_total")) - .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) - .setLockState(LockState.valueOf(SqlUtil.getString(rs, "str_lock_state"))) - .setHasComment(rs.getBoolean("b_comment")) - .setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]) - .setOs(SqlUtil.getString(rs, "str_os")); - - String tags = SqlUtil.getString(rs, "str_tags"); - if (tags != null) - builder.addAllTags(Arrays.asList(tags.split(" "))); - return builder; - } - - public static Host.Builder mapHostBuilder(ResultSet rs) throws SQLException { - Host.Builder builder = Host.newBuilder(); - builder.setId(SqlUtil.getString(rs, "pk_host")); - builder.setName(SqlUtil.getString(rs, "host_name")); - builder.setAllocName(SqlUtil.getString(rs, "alloc_name")); - builder.setBootTime((int) (rs.getTimestamp("ts_booted").getTime() / 1000)); - builder.setFreeMcp(rs.getLong("int_mcp_free")); - builder.setFreeMemory(rs.getLong("int_mem_free")); - builder.setFreeSwap(rs.getLong("int_swap_free")); - builder.setFreeGpu(rs.getLong("int_gpu_free")); - builder.setLoad(rs.getInt("int_load")); - builder.setNimbyEnabled(rs.getBoolean("b_nimby")); - builder.setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))); - builder.setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))); - builder.setMemory(rs.getLong("int_mem")); - builder.setIdleMemory(rs.getLong("int_mem_idle")); - builder.setGpu(rs.getLong("int_gpu")); - builder.setIdleGpu(rs.getLong("int_gpu_idle")); - builder.setState(HardwareState.valueOf(SqlUtil.getString(rs, "host_state"))); - builder.setTotalMcp(rs.getLong("int_mcp_total")); - builder.setTotalMemory(rs.getLong("int_mem_total")); - builder.setTotalSwap(rs.getLong("int_swap_total")); - builder.setTotalGpu(rs.getLong("int_gpu_total")); - builder.setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)); - builder.setLockState(LockState.valueOf(SqlUtil.getString(rs, "str_lock_state"))); - builder.setHasComment(rs.getBoolean("b_comment")); - builder.setThreadMode(ThreadMode.values()[rs.getInt("int_thread_mode")]); - builder.setOs(SqlUtil.getString(rs, "str_os")); - - String tags = SqlUtil.getString(rs, "str_tags"); - if (tags != null) - builder.addAllTags(Arrays.asList(tags.split(" "))); - return builder; - } - - public static final RowMapper HOST_MAPPER = - new RowMapper() { - public Host mapRow(ResultSet rs, int row) throws SQLException { - Host.Builder builder = mapHostBuilder(rs); - return builder.build(); - } - }; - - public static final RowMapper DEPEND_MAPPER = - new RowMapper() { - public Depend mapRow(ResultSet rs, int rowNum) throws SQLException { - return Depend.newBuilder() - .setId(SqlUtil.getString(rs, "pk_depend")) - .setActive(rs.getBoolean("b_active")) - .setAnyFrame(rs.getBoolean("b_any")) - .setDependErFrame(SqlUtil.getString(rs, "depend_er_frame")) - .setDependErLayer(SqlUtil.getString(rs, "depend_er_layer")) - .setDependErJob(SqlUtil.getString(rs, "depend_er_job")) - .setDependOnFrame(SqlUtil.getString(rs, "depend_on_frame")) - .setDependOnLayer(SqlUtil.getString(rs, "depend_on_layer")) - .setDependOnJob(SqlUtil.getString(rs, "depend_on_job")) - .setType(DependType.valueOf(SqlUtil.getString(rs, "str_type"))) - .setTarget(DependTarget.valueOf(SqlUtil.getString(rs, "str_target"))) - .build(); - } - }; - - public static final RowMapper ALLOCATION_MAPPER = - new RowMapper() { - public Allocation mapRow(ResultSet rs, int rowNum) throws SQLException { - return Allocation.newBuilder() - .setId(rs.getString("pk_alloc")) - .setName(SqlUtil.getString(rs, "str_name")) - .setFacility(SqlUtil.getString(rs, "facility_name")) - .setTag(SqlUtil.getString(rs, "str_tag")) - .setBillable(rs.getBoolean("b_billable")) - .setStats(AllocationStats.newBuilder() - .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setAvailableCores(Convert.coreUnitsToCores(rs.getInt("int_available_cores"))) - .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_idle_cores"))) - .setRunningCores(Convert.coreUnitsToCores(rs.getInt("int_running_cores"))) - .setLockedCores(Convert.coreUnitsToCores(rs.getInt("int_locked_cores"))) - .setHosts(rs.getInt("int_hosts")) - .setDownHosts(rs.getInt("int_down_hosts")) - .setLockedHosts(rs.getInt("int_locked_hosts")) - .build()) - .build(); - } - }; - - private static final RowMapper GROUP_MAPPER = - new RowMapper() { - - public Group mapRow(ResultSet rs, int rowNum) throws SQLException { - GroupStats stats = GroupStats.newBuilder() - .setDeadFrames(rs.getInt("int_dead_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setWaitingFrames(rs.getInt("int_waiting_count")) - .setDependFrames(rs.getInt("int_depend_count")) - .setPendingJobs(rs.getInt("int_job_count")) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .build(); - return Group.newBuilder() - .setId(SqlUtil.getString(rs, "pk_folder")) - .setName(SqlUtil.getString(rs, "group_name")) - .setDepartment(SqlUtil.getString(rs, "str_dept")) - .setDefaultJobPriority(rs.getInt("int_job_priority")) - .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_job_min_cores"))) - .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_job_max_cores"))) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .setLevel(rs.getInt("int_level")) - .setParentId(SqlUtil.getString(rs, "pk_parent_folder")) - .setGroupStats(stats) - .build(); - } - }; - - public static final RowMapper JOB_MAPPER = - new RowMapper() { - public Job mapRow(ResultSet rs, int rowNum) throws SQLException { - Job.Builder jobBuilder = Job.newBuilder() - .setLogDir(SqlUtil.getString(rs, "str_log_dir")) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) - .setName(SqlUtil.getString(rs, "str_name")) - .setPriority(rs.getInt("int_priority")) - .setShot(SqlUtil.getString(rs, "str_shot")) - .setShow(SqlUtil.getString(rs, "str_show")) - .setFacility(SqlUtil.getString(rs, "facility_name")) - .setGroup(SqlUtil.getString(rs, "group_name")) - .setState(JobState.valueOf(SqlUtil.getString(rs, "str_state"))) - .setUser(SqlUtil.getString(rs, "str_user")) - .setIsPaused(rs.getBoolean("b_paused")) - .setHasComment(rs.getBoolean("b_comment")) - .setAutoEat(rs.getBoolean("b_autoeat")) - .setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)) - .setOs(SqlUtil.getString(rs, "str_os")); - - int uid = rs.getInt("int_uid"); - if (!rs.wasNull()) { - jobBuilder.setUid(uid); - } - - Timestamp ts = rs.getTimestamp("ts_stopped"); - if (ts != null) { - jobBuilder.setStopTime((int) (ts.getTime() / 1000)); - } - else { - jobBuilder.setStopTime(0); - } - - jobBuilder.setJobStats(mapJobStats(rs)); - return jobBuilder.build(); - } - }; - - public static JobStats mapJobStats(ResultSet rs) throws SQLException { - - JobStats.Builder statsBuilder = JobStats.newBuilder() - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setMaxRss(rs.getLong("int_max_rss")) - .setTotalFrames(rs.getInt("int_frame_count")) - .setTotalLayers(rs.getInt("int_layer_count")) - .setWaitingFrames(rs.getInt("int_waiting_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setDeadFrames(rs.getInt("int_dead_count")) - .setSucceededFrames(rs.getInt("int_succeeded_count")) - .setEatenFrames(rs.getInt("int_eaten_count")) - .setDependFrames(rs.getInt("int_depend_count")) - .setPendingFrames(rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) - .setFailedCoreSec(rs.getLong("int_core_time_fail")) - .setRenderedCoreSec(rs.getLong("int_core_time_success")) - .setTotalCoreSec( rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) - .setRenderedFrameCount( rs.getLong("int_frame_success_count")) - .setFailedFrameCount(rs.getLong("int_frame_fail_count")) - .setHighFrameSec(rs.getInt("int_clock_time_high")); - - if (statsBuilder.getRenderedFrameCount() > 0) { - statsBuilder.setAvgCoreSec( - (int) (rs.getLong("int_clock_time_success") / statsBuilder.getRenderedFrameCount())); - statsBuilder.setAvgCoreSec( - (int) (statsBuilder.getRenderedCoreSec() / statsBuilder.getRenderedFrameCount())); - statsBuilder.setRemainingCoreSec( - (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); - } - else { - statsBuilder.setAvgFrameSec(0); - statsBuilder.setAvgCoreSec(0); - statsBuilder.setRemainingCoreSec(0); - } - return statsBuilder.build(); - } - - public static final RowMapper LAYER_MAPPER = - new RowMapper() { - public Layer mapRow(ResultSet rs, int rowNum) throws SQLException { - Layer.Builder builder = Layer.newBuilder() - .setId(SqlUtil.getString(rs, "pk_layer")) - .setParentId(SqlUtil.getString(rs, "pk_job")) - .setChunkSize(rs.getInt("int_chunk_size")) - .setDispatchOrder(rs.getInt("int_dispatch_order")) - .setName(SqlUtil.getString(rs, "str_name")) - .setRange(SqlUtil.getString(rs, "str_range")) - .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_cores_min"))) - .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_cores_max"))) - .setIsThreadable(rs.getBoolean("b_threadable")) - .setMinMemory(rs.getLong("int_mem_min")) - .setMinGpu(rs.getLong("int_gpu_min")) - .setType(LayerType.valueOf(SqlUtil.getString(rs, "str_type"))) - .addAllTags(Sets.newHashSet( - SqlUtil.getString(rs, "str_tags"). - replaceAll(" ","").split("\\|"))) - .addAllServices(Arrays.asList(SqlUtil.getString(rs, "str_services").split(","))) - .addAllLimits(Arrays.asList(SqlUtil.getString(rs,"str_limit_names").split(","))) - .setMemoryOptimizerEnabled(rs.getBoolean("b_optimize")); - - LayerStats.Builder statsBuilder = LayerStats.newBuilder() - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setMaxRss(rs.getLong("int_max_rss")) - .setTotalFrames(rs.getInt("int_total_count")) - .setWaitingFrames(rs.getInt("int_waiting_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setDeadFrames(rs.getInt("int_dead_count")) - .setSucceededFrames(rs.getInt("int_succeeded_count")) - .setEatenFrames(rs.getInt("int_eaten_count")) - .setDependFrames(rs.getInt("int_depend_count")) - .setPendingFrames( - rs.getInt("int_waiting_count") + rs.getInt("int_depend_count")) - .setFailedCoreSec(rs.getLong("int_core_time_fail")) - .setRenderedCoreSec(rs.getLong("int_core_time_success")) - .setTotalCoreSec( - rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) - .setRenderedFrameCount( rs.getLong("int_frame_success_count")) - .setFailedFrameCount(rs.getLong("int_frame_fail_count")) - .setHighFrameSec(rs.getInt("int_clock_time_high")) - .setLowFrameSec(rs.getInt("int_clock_time_low")); - - if (statsBuilder.getRenderedFrameCount() > 0) { - statsBuilder.setAvgFrameSec( - (int) (rs.getLong("int_clock_time_success") / statsBuilder.getRenderedFrameCount())); - statsBuilder.setAvgCoreSec( - (int) (statsBuilder.getRenderedCoreSec() / statsBuilder.getRenderedFrameCount())); - statsBuilder.setRemainingCoreSec( - (long) statsBuilder.getPendingFrames() * statsBuilder.getAvgCoreSec()); - } - else { - statsBuilder.setAvgFrameSec(0); - statsBuilder.setAvgCoreSec(0); - statsBuilder.setRemainingCoreSec(0); - } - builder.setLayerStats(statsBuilder.build()); - return builder.build(); - } - }; - - private static final RowMapper LIMIT_NAME_MAPPER = - new RowMapper() { - public String mapRow(ResultSet rs, int rowNum) throws SQLException { - return rs.getString("str_name"); - } - }; - - public static final RowMapper SUBSCRIPTION_MAPPER = - new RowMapper() { - public Subscription mapRow(ResultSet rs, int rowNum) throws SQLException { - return Subscription.newBuilder() - .setId(SqlUtil.getString(rs, "pk_subscription")) - .setBurst(rs.getInt("int_burst")) - .setName(SqlUtil.getString(rs, "name")) - .setReservedCores(rs.getInt("int_cores")) - .setSize(rs.getInt("int_size")) - .setAllocationName(SqlUtil.getString(rs, "alloc_name")) - .setShowName(SqlUtil.getString(rs, "show_name")) - .setFacility(SqlUtil.getString(rs, "facility_name")) - .build(); - } - }; - - public static final RowMapper UPDATED_FRAME_MAPPER = - new RowMapper() { - public UpdatedFrame mapRow(ResultSet rs, int rowNum) throws SQLException { - UpdatedFrame.Builder builder = UpdatedFrame.newBuilder() - .setId(SqlUtil.getString(rs, "pk_frame")) - .setExitStatus(rs.getInt("int_exit_status")) - .setMaxRss(rs.getInt("int_mem_max_used")) - .setRetryCount(rs.getInt("int_retries")) - .setState(FrameState.valueOf(SqlUtil.getString(rs, "str_state"))) - .setUsedMemory(rs.getInt("int_mem_used")); - - if (SqlUtil.getString(rs, "str_host") != null) { - builder.setLastResource(String.format(Locale.ROOT, "%s/%2.2f", - SqlUtil.getString(rs, "str_host"), - Convert.coreUnitsToCores(rs.getInt("int_cores")))); - }else { - builder.setLastResource(""); - } - - java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); - if (ts_started != null) { - builder.setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)); - } - else { - builder.setStartTime(0); - } - java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); - if (ts_stopped!= null) { - builder.setStopTime((int) (ts_stopped.getTime() / 1000)); - } - else { - builder.setStopTime(0); - } - - return builder.build(); - } - }; - - public static final RowMapper FRAME_MAPPER = - new RowMapper() { - public Frame mapRow(ResultSet rs, int rowNum) throws SQLException { - Frame.Builder builder = Frame.newBuilder() - .setId(SqlUtil.getString(rs, "pk_frame")) - .setName(SqlUtil.getString(rs, "str_name")) - .setExitStatus(rs.getInt("int_exit_status")) - .setMaxRss(rs.getLong("int_mem_max_used")) - .setNumber(rs.getInt("int_number")) - .setDispatchOrder(rs.getInt("int_dispatch_order")) - .setRetryCount(rs.getInt("int_retries")) - .setState(FrameState.valueOf(SqlUtil.getString(rs, "str_state"))) - .setLayerName(SqlUtil.getString(rs, "layer_name")) - .setUsedMemory(rs.getLong("int_mem_used")) - .setReservedMemory(rs.getLong("int_mem_reserved")) - .setReservedGpu(rs.getLong("int_gpu_reserved")) - .setCheckpointState(CheckpointState.valueOf( - SqlUtil.getString(rs, "str_checkpoint_state"))) - .setCheckpointCount(rs.getInt("int_checkpoint_count")); - - if (SqlUtil.getString(rs, "str_host") != null) { - builder.setLastResource(CueUtil.buildProcName(SqlUtil.getString(rs, "str_host"), - rs.getInt("int_cores"))); - } else { - builder.setLastResource(""); - } - - java.sql.Timestamp ts_started = rs.getTimestamp("ts_started"); - if (ts_started != null) { - builder.setStartTime((int) (rs.getTimestamp("ts_started").getTime() / 1000)); - } - else { - builder.setStartTime(0); - } - java.sql.Timestamp ts_stopped = rs.getTimestamp("ts_stopped"); - if (ts_stopped!= null) { - builder.setStopTime((int) (ts_stopped.getTime() / 1000)); - } - else { - builder.setStopTime(0); - } - - builder.setTotalCoreTime(rs.getInt("int_total_past_core_time")); - if (builder.getState() == FrameState.RUNNING) { - builder.setTotalCoreTime(builder.getTotalCoreTime() + - (int)(System.currentTimeMillis() / 1000 - builder.getStartTime()) * rs.getInt("int_cores") / 100); - } - return builder.build(); - } - }; - - private static final RowMapper SERVICE_MAPPER = - new RowMapper() { - public Service mapRow(ResultSet rs, int rowNum) throws SQLException { - return Service.newBuilder() - .setId(SqlUtil.getString(rs, "pk_service")) - .setName(SqlUtil.getString(rs, "str_name")) - .setThreadable(rs.getBoolean("b_threadable")) - .setMinCores(rs.getInt("int_cores_min")) - .setMaxCores(rs.getInt("int_cores_max")) - .setMinMemory(rs.getInt("int_mem_min")) - .setMinGpu(rs.getInt("int_gpu_min")) - .addAllTags(Lists.newArrayList(ServiceDaoJdbc.splitTags( - SqlUtil.getString(rs, "str_tags")))) - .build(); - } - }; - - private static final RowMapper SERVICE_OVERRIDE_MAPPER = - new RowMapper() { - public ServiceOverride mapRow(ResultSet rs, int rowNum) throws SQLException { - Service data = Service.newBuilder() - .setId(SqlUtil.getString(rs, "pk_show_service")) - .setName(SqlUtil.getString(rs, "str_name")) - .setThreadable(rs.getBoolean("b_threadable")) - .setMinCores(rs.getInt("int_cores_min")) - .setMaxCores(rs.getInt("int_cores_max")) - .setMinMemory(rs.getInt("int_mem_min")) - .setMinGpu(rs.getInt("int_gpu_min")) - .addAllTags(Lists.newArrayList(ServiceDaoJdbc.splitTags( - SqlUtil.getString(rs, "str_tags")))) - .build(); - return ServiceOverride.newBuilder() - .setId(SqlUtil.getString(rs, "pk_show_service")) - .setData(data) - .build(); - } - }; - - public static final RowMapper SHOW_MAPPER = - new RowMapper() { - public Show mapRow(ResultSet rs, int rowNum) throws SQLException { - ShowStats stats = ShowStats.newBuilder() - .setPendingFrames(rs.getInt("int_pending_count")) - .setRunningFrames(rs.getInt("int_running_count")) - .setDeadFrames(rs.getInt("int_dead_count")) - .setCreatedFrameCount(rs.getLong("int_frame_insert_count")) - .setCreatedJobCount(rs.getLong("int_job_insert_count")) - .setRenderedFrameCount(rs.getLong("int_frame_success_count")) - .setFailedFrameCount(rs.getLong("int_frame_fail_count")) - .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) - .setPendingJobs(rs.getInt("int_job_count")) - .build(); - return Show.newBuilder() - .setId(SqlUtil.getString(rs, "pk_show")) - .setName(SqlUtil.getString(rs, "str_name")) - .setActive(rs.getBoolean("b_active")) - .setDefaultMaxCores(Convert.coreUnitsToCores(rs.getInt("int_default_max_cores"))) - .setDefaultMinCores(Convert.coreUnitsToCores(rs.getInt("int_default_min_cores"))) - .setBookingEnabled(rs.getBoolean("b_booking_enabled")) - .setDispatchEnabled(rs.getBoolean("b_dispatch_enabled")) - .setCommentEmail(SqlUtil.getString(rs, "str_comment_email")) - .setShowStats(stats) - .build(); - } - }; - /* - * Queries - */ - - private static final String GET_JOB_NAMES = - "SELECT " + - "job.str_name "+ - "FROM " + - "job," + - "show " + - "WHERE " + - "job.pk_show = show.pk_show " + - "AND " + - "job.str_state = 'PENDING' "; - - private static final String GET_HOST_COMMENTS = - "SELECT " + - "* " + - "FROM " + - "comments " + - "WHERE " + - "pk_host=? " + - "ORDER BY " + - "ts_created ASC"; - - private static final String GET_FILTER = - "SELECT " + - "filter.* " + - "FROM " + - "filter," + - "show " + - "WHERE " + - "filter.pk_show = show.pk_show"; - - private static final String GET_FRAME = - "SELECT " + - "frame.pk_frame, " + - "frame.int_exit_status,"+ - "frame.str_name,"+ - "frame.int_number,"+ - "frame.int_dispatch_order,"+ - "frame.ts_started,"+ - "frame.ts_stopped,"+ - "frame.int_retries,"+ - "frame.str_state,"+ - "frame.str_host,"+ - "frame.int_cores,"+ - "frame.int_mem_max_used," + - "frame.int_mem_used, " + - "frame.int_mem_reserved, " + - "frame.int_gpu_reserved, " + - "frame.str_checkpoint_state,"+ - "frame.int_checkpoint_count,"+ - "frame.int_total_past_core_time,"+ - "layer.str_name AS layer_name," + - "job.str_name AS job_name "+ - "FROM "+ - "job, " + - "layer, "+ - "frame " + - "WHERE " + - "frame.pk_layer = layer.pk_layer "+ - "AND "+ - "frame.pk_job= job.pk_job"; - - private static final String FIND_FRAME = GET_FRAME + " " + - "AND " + - "job.str_state='PENDING' " + - "AND " + - "job.str_name=? " + - "AND " + - "layer.str_name=? " + - "AND " + - "frame.int_number=?"; - - private static final String GET_PROC = - "SELECT " + - "host.str_name AS host_name,"+ - "job.str_name AS job_name,"+ - "job.str_log_dir,"+ - "folder.str_name as folder_name, "+ - "show.str_name AS show_name,"+ - "frame.str_name AS frame_name,"+ - "layer.str_services,"+ - "proc.pk_proc,"+ - "proc.pk_host,"+ - "proc.int_cores_reserved,"+ - "proc.int_mem_reserved, "+ - "proc.int_mem_used,"+ - "proc.int_mem_max_used,"+ - "proc.int_gpu_reserved, "+ - "proc.ts_ping,"+ - "proc.ts_booked,"+ - "proc.ts_dispatched,"+ - "proc.b_unbooked,"+ - "redirect.str_name AS str_redirect "+ - "FROM " + - "proc,"+ - "host, "+ - "alloc,"+ - "frame, "+ - "layer,"+ - "job,"+ - "folder, "+ - "show, " + - "redirect "+ - "WHERE " + - "proc.pk_host = host.pk_host " + - "AND " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "proc.pk_frame = frame.pk_frame " + - "AND " + - "proc.pk_layer = layer.pk_layer "+ - "AND " + - "proc.pk_job = job.pk_job " + - "AND " + - "job.pk_folder = folder.pk_folder " + - "AND " + - "proc.pk_show = show.pk_show " + - "AND " + - "proc.pk_proc = redirect.pk_proc (+) "; - - - private static final String GET_JOB_COMMENTS = - "SELECT " + - "* " + - "FROM " + - "comments " + - "WHERE " + - "pk_job=? " + - "ORDER BY " + - "ts_created ASC"; - - private static final String GET_UPDATED_FRAME = - "SELECT " + - "frame.pk_frame, " + - "frame.int_exit_status,"+ - "frame.ts_started,"+ - "frame.ts_stopped,"+ - "frame.int_retries,"+ - "frame.str_state,"+ - "frame.str_host,"+ - "frame.int_cores,"+ - "NVL(proc.int_mem_max_used, frame.int_mem_max_used) AS int_mem_max_used," + - "NVL(proc.int_mem_used, frame.int_mem_used) AS int_mem_used " + - "FROM "+ - "job, " + - "layer,"+ - "frame LEFT JOIN proc ON (proc.pk_frame = frame.pk_frame) " + - "WHERE " + - "frame.pk_layer = layer.pk_layer "+ - "AND "+ - "frame.pk_job= job.pk_job"; - - private static final String GET_ALLOCATION = - "SELECT " + - "alloc.pk_alloc, " + - "alloc.str_name, " + - "alloc.str_tag, " + - "alloc.b_billable,"+ - "facility.str_name AS facility_name,"+ - "vs_alloc_usage.int_cores,"+ - "vs_alloc_usage.int_idle_cores,"+ - "vs_alloc_usage.int_running_cores,"+ - "vs_alloc_usage.int_available_cores,"+ - "vs_alloc_usage.int_locked_cores,"+ - "vs_alloc_usage.int_hosts,"+ - "vs_alloc_usage.int_locked_hosts,"+ - "vs_alloc_usage.int_down_hosts "+ - "FROM " + - "alloc, " + - "facility, " + - "vs_alloc_usage " + - "WHERE " + - "alloc.pk_alloc = vs_alloc_usage.pk_alloc " + - "AND " + - "alloc.pk_facility = facility.pk_facility " + - "AND " + - "alloc.b_enabled = 1"; - - - private static final String GET_MATCHER = - "SELECT " + - "filter.pk_show," + - "matcher.* " + - "FROM " + - "filter,"+ - "matcher " + - "WHERE " + - "filter.pk_filter = matcher.pk_filter"; - - private static final String GET_DEPARTMENT = - "SELECT " + - "dept.str_name AS str_dept," + - "show.str_name || '.' || dept.str_name AS str_name, " + - "pk_point,"+ - "str_ti_task,"+ - "int_cores,"+ - "int_min_cores,"+ - "b_managed " + - "FROM " + - "point," + - "dept,"+ - "show " + - "WHERE " + - "point.pk_show = show.pk_show " + - "AND " + - "point.pk_dept = dept.pk_dept " + - "AND " + - "point.pk_show = ? " + - "AND " + - "dept.str_name = ?"; - - private static final String GET_DEPARTMENTS = - "SELECT " + - "dept.str_name AS str_dept," + - "show.str_name || '.' || dept.str_name AS str_name, " + - "pk_point,"+ - "str_ti_task,"+ - "int_cores,"+ - "int_min_cores,"+ - "b_managed " + - "FROM " + - "point," + - "dept,"+ - "show " + - "WHERE " + - "point.pk_show = show.pk_show " + - "AND " + - "point.pk_dept = dept.pk_dept " + - "AND " + - "point.pk_show = ? "; - - private static final String QUERY_FOR_OWNER = - "SELECT " + - "owner.pk_owner," + - "owner.str_username,"+ - "show.str_name AS str_show, " + - "(SELECT COUNT(1) FROM deed WHERE deed.pk_owner = owner.pk_owner) " + - " AS host_count " + - "FROM " + - "owner, " + - "show " + - "WHERE " + - "owner.pk_show = show.pk_show"; - - private static final String QUERY_FOR_RENDER_PART = - "SELECT " + - "host_local.pk_host_local,"+ - "host_local.int_cores_idle,"+ - "host_local.int_cores_max,"+ - "host_local.int_threads,"+ - "host_local.int_mem_idle,"+ - "host_local.int_mem_max,"+ - "host_local.int_gpu_idle,"+ - "host_local.int_gpu_max,"+ - "host_local.str_type,"+ - "(SELECT str_name FROM host WHERE host.pk_host = host_local.pk_host) " + - "AS str_host_name,"+ - "(SELECT str_name FROM job WHERE job.pk_job = host_local.pk_job) " + - "AS str_job_name,"+ - "(SELECT str_name FROM layer WHERE layer.pk_layer = host_local.pk_layer) " + - "AS str_layer_name,"+ - "(SELECT str_name FROM frame WHERE frame.pk_frame = host_local.pk_frame) " + - "AS str_frame_name " + - "FROM " + - "host_local "; - - private static final String QUERY_FOR_FACILITY = - "SELECT " + - "facility.pk_facility," + - "facility.str_name " + - "FROM " + - "facility "; - - private static final String QUERY_FOR_LIMIT = - "SELECT " + - "limit_record.pk_limit_record, " + - "limit_record.str_name, " + - "limit_record.int_max_value, " + - "SUM(layer_stat.int_running_count) AS int_current_running " + - "FROM " + - "limit_record " + - "LEFT JOIN " + - "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN " + - "layer ON layer.pk_layer = layer_limit.pk_layer " + - "LEFT JOIN " + - "layer_stat ON layer_stat.pk_layer = layer.pk_layer "; - - private static final String GET_LIMIT_FROM_LAYER_ID = - "SELECT " + - "limit_record.pk_limit_record, " + - "limit_record.str_name, " + - "limit_record.int_max_value, " + - "SUM(layer_stat.int_running_count) AS int_current_running " + - "FROM " + - "limit_record " + - "LEFT JOIN " + - "layer_limit ON layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "LEFT JOIN " + - "layer ON layer.pk_layer = layer_limit.pk_layer " + - "LEFT JOIN " + - "layer_stat ON layer_stat.pk_layer = layer.pk_layer " + - "WHERE " + - "layer_limit.pk_layer = ? " + - "GROUP BY " + - "limit_record.str_name, " + - "limit_record.pk_limit_record, " + - "limit_record.int_max_value"; - - public static final String GET_GROUPS = - "SELECT " + - "show.pk_show, " + - "show.str_name AS str_show," + - "dept.str_name AS str_dept," + - "folder.pk_folder," + - "folder.pk_parent_folder," + - "folder.str_name AS group_name," + - "folder.int_job_priority,"+ - "folder.int_job_min_cores," + - "folder.int_job_max_cores," + - "folder_resource.int_min_cores,"+ - "folder_resource.int_max_cores,"+ - "folder.b_default, " + - "folder_level.int_level, " + - "c.int_waiting_count, " + - "c.int_depend_count, " + - "c.int_running_count,"+ - "c.int_dead_count,"+ - "c.int_job_count,"+ - "c.int_cores " + - "FROM " + - "folder, " + - "folder_level," + - "folder_resource, "+ - "vs_folder_counts c, " + - "show," + - "dept " + - "WHERE " + - "show.pk_show = folder.pk_show "+ - "AND " + - "folder.pk_folder = folder_level.pk_folder " + - "AND " + - "folder.pk_folder = folder_resource.pk_folder " + - "AND " + - "folder.pk_folder = c.pk_folder " + - "AND " + - "folder.pk_dept = dept.pk_dept "; - - private static final String GET_ACTION = - "SELECT " + - "filter.pk_show," + - "action.* " + - "FROM " + - "filter,"+ - "action " + - "WHERE " + - "filter.pk_filter = action.pk_filter "; - - private static final String GET_JOB = - "SELECT " + - "job.pk_job,"+ - "job.str_log_dir," + - "job_resource.int_max_cores," + - "job_resource.int_min_cores," + - "job.str_name," + - "job.str_shot,"+ - "job.str_state,"+ - "job.int_uid,"+ - "job.str_user,"+ - "job.b_paused,"+ - "job.ts_started,"+ - "job.ts_stopped,"+ - "job.b_comment,"+ - "job.b_autoeat,"+ - "job.str_os,"+ - "job_resource.int_priority,"+ - "job.int_frame_count, " + - "job.int_layer_count, " + - "show.str_name as str_show," + - "show.pk_show as id_show,"+ - "facility.str_name AS facility_name,"+ - "folder.str_name AS group_name,"+ - "job_stat.int_waiting_count, "+ - "job_stat.int_running_count, "+ - "job_stat.int_dead_count, " + - "job_stat.int_eaten_count," + - "job_stat.int_depend_count, "+ - "job_stat.int_succeeded_count, "+ - "job_usage.int_core_time_success, "+ - "job_usage.int_core_time_fail, " + - "job_usage.int_frame_success_count, "+ - "job_usage.int_frame_fail_count, "+ - "job_usage.int_clock_time_high,"+ - "job_usage.int_clock_time_success,"+ - "job_mem.int_max_rss,"+ - "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores " + - "FROM " + - "job,"+ - "folder,"+ - "show," + - "facility,"+ - "job_stat," + - "job_resource, " + - "job_mem, " + - "job_usage " + - "WHERE " + - "job.pk_show = show.pk_show " + - "AND " + - "job.pk_folder = folder.pk_folder " + - "AND " + - "job.pk_facility = facility.pk_facility " + - "AND " + - "job.pk_job = job_stat.pk_job " + - "AND " + - "job.pk_job = job_resource.pk_job " + - "AND " + - "job.pk_job = job_mem.pk_job " + - "AND " + - "job.pk_job = job_usage.pk_job "; - - private static final String GET_LAYER = - "SELECT " + - "layer.*," + - "layer_stat.int_total_count," + - "layer_stat.int_waiting_count," + - "layer_stat.int_running_count," + - "layer_stat.int_dead_count," + - "layer_stat.int_depend_count," + - "layer_stat.int_eaten_count," + - "layer_stat.int_succeeded_count," + - "layer_usage.int_core_time_success," + - "layer_usage.int_core_time_fail, "+ - "layer_usage.int_frame_success_count, "+ - "layer_usage.int_frame_fail_count, "+ - "layer_usage.int_clock_time_low, "+ - "layer_usage.int_clock_time_high," + - "layer_usage.int_clock_time_success," + - "layer_usage.int_clock_time_fail," + - "layer_mem.int_max_rss,"+ - "layer_resource.int_cores " + - "FROM " + - "layer, " + - "job," + - "layer_stat, " + - "layer_resource, " + - "layer_usage, " + - "layer_mem " + - "WHERE " + - "layer.pk_job = job.pk_job " + - "AND " + - "layer.pk_layer = layer_stat.pk_layer "+ - "AND " + - "layer.pk_layer = layer_resource.pk_layer " + - "AND " + - "layer.pk_layer = layer_usage.pk_layer " + - "AND " + - "layer.pk_layer = layer_mem.pk_layer "; - - private static final String GET_LAYER_WITH_LIMITS = - "SELECT " + - "layer.*, " + - "layer_stat.int_total_count, " + - "layer_stat.int_waiting_count, " + - "layer_stat.int_running_count, " + - "layer_stat.int_dead_count, " + - "layer_stat.int_depend_count, " + - "layer_stat.int_eaten_count, " + - "layer_stat.int_succeeded_count, " + - "layer_usage.int_core_time_success, " + - "layer_usage.int_core_time_fail, " + - "layer_usage.int_frame_success_count, " + - "layer_usage.int_frame_fail_count, " + - "layer_usage.int_clock_time_low, " + - "layer_usage.int_clock_time_high, " + - "layer_usage.int_clock_time_success, " + - "layer_usage.int_clock_time_fail, " + - "layer_mem.int_max_rss, " + - "layer_resource.int_cores, " + - "limit_names.str_limit_names " + - "FROM " + - "layer " + - "JOIN " + - "job ON layer.pk_job = job.pk_job " + - "JOIN " + - "layer_stat ON layer.pk_layer = layer_stat.pk_layer " + - "JOIN " + - "layer_resource ON layer.pk_layer = layer_resource.pk_layer " + - "JOIN " + - "layer_usage ON layer.pk_layer = layer_usage.pk_layer " + - "JOIN " + - "layer_mem ON layer.pk_layer = layer_mem.pk_layer " + - "LEFT JOIN " + - "(" + - "SELECT " + - "layer_limit.pk_layer, " + - "LISTAGG(limit_record.str_name, ',') AS str_limit_names " + - "FROM " + - "limit_record, " + - "layer_limit " + - "WHERE " + - "layer_limit.pk_limit_record = limit_record.pk_limit_record " + - "GROUP BY " + - "layer_limit.pk_layer) AS limit_names " + - "ON layer.pk_layer = limit_names.pk_layer "; - - private static final String GET_LIMIT_NAMES = - "SELECT " + - "limit_record.str_name " + - "FROM " + - "layer_limit, " + - "limit_record " + - "WHERE " + - "layer_limit.pk_layer = ? " + - "AND " + - "limit_record.pk_limit_record = layer_limit.pk_limit_record "; - - private static final String GET_SHOW = - "SELECT " + - "show.*," + - "NVL(vs_show_stat.int_pending_count,0) AS int_pending_count," + - "NVL(vs_show_stat.int_running_count,0) AS int_running_count," + - "NVL(vs_show_stat.int_dead_count,0) AS int_dead_count," + - "NVL(vs_show_resource.int_cores,0) AS int_cores, " + - "NVL(vs_show_stat.int_job_count,0) AS int_job_count " + - "FROM " + - "show " + - "LEFT JOIN vs_show_stat ON (vs_show_stat.pk_show = show.pk_show) "+ - "LEFT JOIN vs_show_resource ON (vs_show_resource.pk_show=show.pk_show) " + - "WHERE " + - "1 = 1 "; - - private static final String GET_SERVICE = - "SELECT " + - "service.pk_service,"+ - "service.str_name," + - "service.b_threadable," + - "service.int_cores_min," + - "service.int_cores_max," + - "service.int_mem_min," + - "service.int_gpu_min," + - "service.str_tags " + - "FROM "+ - "service "; - - private static final String GET_SERVICE_OVERRIDE = - "SELECT " + - "show_service.pk_show_service,"+ - "show_service.str_name," + - "show_service.b_threadable," + - "show_service.int_cores_min," + - "show_service.int_cores_max," + - "show_service.int_mem_min," + - "show_service.int_gpu_min," + - "show_service.str_tags " + - "FROM "+ - "show_service, " + - "show " + - "WHERE " + - "show_service.pk_show = show.pk_show "; - - private static final String GET_TASK = - "SELECT " + - "task.pk_task," + - "task.str_shot,"+ - "task.int_min_cores + task.int_adjust_cores AS int_min_cores, "+ - "task.int_adjust_cores, " + - "dept.str_name AS str_dept "+ - "FROM " + - "task,"+ - "dept, " + - "point "+ - "WHERE " + - "task.pk_point = point.pk_point " + - "AND " + - "point.pk_dept = dept.pk_dept "; - - private static final String GET_HOST = - "SELECT " + - "host.pk_host, "+ - "host.str_name AS host_name," + - "host_stat.str_state AS host_state,"+ - "host.b_nimby,"+ - "host_stat.ts_booted,"+ - "host_stat.ts_ping,"+ - "host.int_cores,"+ - "host.int_cores_idle,"+ - "host.int_mem,"+ - "host.int_mem_idle,"+ - "host.int_gpu,"+ - "host.int_gpu_idle,"+ - "host.str_tags,"+ - "host.str_lock_state,"+ - "host.b_comment,"+ - "host.int_thread_mode,"+ - "host_stat.str_os,"+ - "host_stat.int_mem_total,"+ - "host_stat.int_mem_free,"+ - "host_stat.int_swap_total,"+ - "host_stat.int_swap_free,"+ - "host_stat.int_mcp_total,"+ - "host_stat.int_mcp_free,"+ - "host_stat.int_gpu_total,"+ - "host_stat.int_gpu_free,"+ - "host_stat.int_load, " + - "alloc.str_name AS alloc_name " + - "FROM " + - "alloc," + - "facility, "+ - "host_stat,"+ - "host "+ - "WHERE " + - "host.pk_alloc = alloc.pk_alloc " + - "AND " + - "facility.pk_facility = alloc.pk_facility " + - "AND "+ - "host.pk_host = host_stat.pk_host "; - - private static final String GET_DEPEND = - "SELECT " + - "depend.pk_depend, "+ - "depend.str_type, "+ - "depend.b_active, "+ - "depend.b_any, "+ - "depend.str_target, "+ - "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_on) AS depend_on_job, "+ - "(SELECT str_name FROM job j WHERE j.pk_job = depend.pk_job_depend_er) AS depend_er_job, "+ - "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_on) AS depend_on_layer, "+ - "(SELECT str_name FROM layer l WHERE l.pk_layer = depend.pk_layer_depend_er) AS depend_er_layer, "+ - "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_on) AS depend_on_frame, "+ - "(SELECT str_name FROM frame f WHERE f.pk_frame = depend.pk_frame_depend_er) AS depend_er_frame "+ - "FROM " + - "depend "; - - private static final String GET_SUBSCRIPTION = - "SELECT " + - "subscription.pk_subscription, " + - "(alloc.str_name || '.' || show.str_name) AS name, "+ - "subscription.int_burst, " + - "subscription.int_size, " + - "subscription.int_cores, " + - "show.str_name AS show_name, " + - "alloc.str_name AS alloc_name, " + - "facility.str_name AS facility_name " + - "FROM "+ - "show, " + - "alloc, " + - "facility,"+ - "subscription " + - "WHERE " + - "subscription.pk_show = show.pk_show " + - "AND " + - "subscription.pk_alloc = alloc.pk_alloc " + - "AND " + - "alloc.pk_facility = facility.pk_facility "; - - private static final String GET_PENDING_JOBS = - GET_JOB + - "AND " + - "job.str_state = 'PENDING' "; - - private static final String GET_FRAMES_CRITERIA = - - "SELECT " + - "frame.pk_frame, " + - "frame.int_exit_status,"+ - "frame.str_name,"+ - "frame.int_number,"+ - "frame.int_dispatch_order,"+ - "frame.ts_started,"+ - "frame.ts_stopped,"+ - "frame.int_retries,"+ - "frame.str_state,"+ - "frame.str_host,"+ - "frame.int_cores,"+ - "frame.int_mem_max_used," + - "frame.int_mem_used, " + - "frame.int_mem_reserved, " + - "frame.int_gpu_reserved, " + - "frame.str_checkpoint_state,"+ - "frame.int_checkpoint_count,"+ - "frame.int_total_past_core_time,"+ - "layer.str_name AS layer_name," + - "job.str_name AS job_name, "+ - "ROW_NUMBER() OVER " + - "(ORDER BY frame.int_dispatch_order ASC, layer.int_dispatch_order ASC) AS row_number " + - "FROM "+ - "job, " + - "layer,"+ - "frame " + - "WHERE " + - "frame.pk_layer = layer.pk_layer "+ - "AND "+ - "frame.pk_job= job.pk_job "; - - private static final String QUERY_FOR_DEED = - "SELECT " + - "host.str_name AS str_host,"+ - "show.str_name AS str_show,"+ - "owner.str_username," + - "deed.b_blackout,"+ - "deed.int_blackout_start,"+ - "deed.int_blackout_stop,"+ - "deed.pk_deed " + - "FROM " + - "deed,"+ - "owner,"+ - "host,"+ - "show "+ - "WHERE " + - "deed.pk_host = host.pk_host " + - "AND " + - "deed.pk_owner = owner.pk_owner " + - "AND " + - "owner.pk_show = show.pk_show "; - - public FrameSearchFactory getFrameSearchFactory() { - return frameSearchFactory; - } - - public void setFrameSearchFactory(FrameSearchFactory frameSearchFactory) { - this.frameSearchFactory = frameSearchFactory; - } - - public ProcSearchFactory getProcSearchFactory() { - return procSearchFactory; - } - - public void setProcSearchFactory(ProcSearchFactory procSearchFactory) { - this.procSearchFactory = procSearchFactory; - } -} - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java index c57e1d8e8..1ceda9b5c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/AllocationDaoJdbc.java @@ -109,7 +109,7 @@ public void insertAllocation(FacilityInterface facility, AllocationEntity detail Integer.class, new_alloc_name) > 0) { getJdbcTemplate().update( - "UPDATE alloc SET b_enabled=1 WHERE str_name=?", + "UPDATE alloc SET b_enabled = true WHERE str_name=?", new_alloc_name); } else { @@ -196,8 +196,8 @@ public CallableStatement createCallableStatement(Connection con) throws SQLExcep } public void setDefaultAllocation(AllocationInterface a) { - getJdbcTemplate().update("UPDATE alloc SET b_default = 0 WHERE b_default = 1"); - getJdbcTemplate().update("UPDATE alloc SET b_default = 1 WHERe pk_alloc=?", + getJdbcTemplate().update("UPDATE alloc SET b_default = false WHERE b_default = true"); + getJdbcTemplate().update("UPDATE alloc SET b_default = true WHERE pk_alloc=?", a.getAllocationId()); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java index b6ec245f1..08e1634aa 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/BookingDaoJdbc.java @@ -55,12 +55,14 @@ public class BookingDaoJdbc extends "int_mem_idle,"+ "int_cores_max,"+ "int_cores_idle,"+ - "int_gpu_idle,"+ - "int_gpu_max,"+ + "int_gpu_mem_idle,"+ + "int_gpu_mem_max,"+ + "int_gpus_max,"+ + "int_gpus_idle,"+ "int_threads "+ ") " + "VALUES " + - "(?,?,?,?,?,?,?,?,?,?,?,?,?)"; + "(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; @Override public void insertLocalHostAssignment(HostInterface h, JobInterface job, LocalHostAssignment l) { @@ -71,7 +73,8 @@ public void insertLocalHostAssignment(HostInterface h, JobInterface job, LocalHo l.setType(RenderPartitionType.JOB_PARTITION); l.setIdleCoreUnits(l.getMaxCoreUnits()); l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpu(l.getMaxGpu()); + l.setIdleGpuUnits(l.getMaxGpuUnits()); + l.setIdleGpuMemory(l.getMaxGpuMemory()); getJdbcTemplate().update( INSERT_LOCAL_JOB_ASSIGNMENT, @@ -85,8 +88,10 @@ public void insertLocalHostAssignment(HostInterface h, JobInterface job, LocalHo l.getMaxMemory(), l.getMaxCoreUnits(), l.getMaxCoreUnits(), - l.getMaxGpu(), - l.getMaxGpu(), + l.getMaxGpuMemory(), + l.getMaxGpuMemory(), + l.getMaxGpuUnits(), + l.getMaxGpuUnits(), l.getThreads()); } @@ -100,7 +105,8 @@ public void insertLocalHostAssignment(HostInterface h, LayerInterface layer, Loc l.setType(RenderPartitionType.LAYER_PARTITION); l.setIdleCoreUnits(l.getMaxCoreUnits()); l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpu(l.getMaxGpu()); + l.setIdleGpuUnits(l.getMaxGpuUnits()); + l.setIdleGpuMemory(l.getMaxGpuMemory()); getJdbcTemplate().update( INSERT_LOCAL_JOB_ASSIGNMENT, @@ -114,8 +120,10 @@ public void insertLocalHostAssignment(HostInterface h, LayerInterface layer, Loc l.getMaxMemory(), l.getMaxCoreUnits(), l.getMaxCoreUnits(), - l.getMaxGpu(), - l.getMaxGpu(), + l.getMaxGpuMemory(), + l.getMaxGpuMemory(), + l.getMaxGpuUnits(), + l.getMaxGpuUnits(), l.getThreads()); } @@ -130,7 +138,8 @@ public void insertLocalHostAssignment(HostInterface h, FrameInterface frame, Loc l.setType(RenderPartitionType.FRAME_PARTITION); l.setIdleCoreUnits(l.getMaxCoreUnits()); l.setIdleMemory(l.getMaxMemory()); - l.setIdleGpu(l.getMaxGpu()); + l.setIdleGpuUnits(l.getMaxGpuUnits()); + l.setIdleGpuMemory(l.getMaxGpuMemory()); getJdbcTemplate().update( INSERT_LOCAL_JOB_ASSIGNMENT, @@ -144,8 +153,10 @@ public void insertLocalHostAssignment(HostInterface h, FrameInterface frame, Loc l.getMaxMemory(), l.getMaxCoreUnits(), l.getMaxCoreUnits(), - l.getMaxGpu(), - l.getMaxGpu(), + l.getMaxGpuMemory(), + l.getMaxGpuMemory(), + l.getMaxGpuUnits(), + l.getMaxGpuUnits(), l.getThreads()); } public static final RowMapper LJA_MAPPER = @@ -155,11 +166,13 @@ public LocalHostAssignment mapRow(final ResultSet rs, int rowNum) throws SQLExce l.id = rs.getString("pk_host_local"); l.setMaxCoreUnits(rs.getInt("int_cores_max")); l.setMaxMemory(rs.getLong("int_mem_max")); - l.setMaxGpu(rs.getLong("int_gpu_max")); + l.setMaxGpuUnits(rs.getInt("int_gpus_max")); + l.setMaxGpuMemory(rs.getLong("int_gpu_mem_max")); l.setThreads(rs.getInt("int_threads")); l.setIdleCoreUnits(rs.getInt("int_cores_idle")); l.setIdleMemory(rs.getLong("int_mem_idle")); - l.setIdleGpu(rs.getLong("int_gpu_idle")); + l.setIdleGpuUnits(rs.getInt("int_gpus_idle")); + l.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); l.setJobId(rs.getString("pk_job")); l.setLayerId(rs.getString("pk_layer")); l.setFrameId(rs.getString("pk_frame")); @@ -180,8 +193,10 @@ public LocalHostAssignment mapRow(final ResultSet rs, int rowNum) throws SQLExce "int_mem_max,"+ "int_cores_idle,"+ "int_cores_max,"+ - "int_gpu_idle,"+ - "int_gpu_max,"+ + "int_gpu_mem_idle,"+ + "int_gpu_mem_max,"+ + "int_gpus_idle,"+ + "int_gpus_max,"+ "int_threads, "+ "str_type " + "FROM " + @@ -250,61 +265,6 @@ public boolean hasActiveLocalJob(HostInterface host) { Integer.class, host.getHostId()) > 0; } - private static final String IS_BLACKOUT_TIME = - "SELECT " + - "int_blackout_start,"+ - "int_blackout_duration " + - "FROM " + - "host,"+ - "deed "+ - "WHERE " + - "host.pk_host = deed.pk_host " + - "AND " + - "deed.b_blackout = true " + - "AND " + - "host.pk_host = ? "; - - public static final RowMapper BLACKOUT_MAPPER = - new RowMapper() { - public Boolean mapRow(final ResultSet rs, int rowNum) throws SQLException { - - int startTimeSeconds = rs.getInt("int_backout_start"); - int stopTimeSeconds = rs.getInt("int_blackout_stop"); - if (stopTimeSeconds <= startTimeSeconds) { - stopTimeSeconds = stopTimeSeconds + 86400; - } - - Calendar startTime = Calendar.getInstance(); - startTime.set(Calendar.HOUR_OF_DAY, 0); - startTime.set(Calendar.MINUTE, 0); - startTime.set(Calendar.SECOND, 0); - startTime.add(Calendar.SECOND, startTimeSeconds); - - Calendar stopTime = Calendar.getInstance(); - stopTime.set(Calendar.HOUR_OF_DAY, 0); - stopTime.set(Calendar.MINUTE, 0); - stopTime.set(Calendar.SECOND, 0); - stopTime.add(Calendar.SECOND, stopTimeSeconds); - - Calendar now = Calendar.getInstance(); - if (now.compareTo(startTime) >= 0 && now.compareTo(stopTime) <= 0) { - return true; - } - - return false; - } - }; - - @Override - public boolean isBlackoutTime(HostInterface h) { - try { - return getJdbcTemplate().queryForObject(IS_BLACKOUT_TIME, - BLACKOUT_MAPPER, h.getHostId()); - } catch (Exception e) { - return false; - } - } - @Override public int getCoreUsageDifference(LocalHostAssignment l, int coreUnits) { return getJdbcTemplate().queryForObject( @@ -312,6 +272,13 @@ public int getCoreUsageDifference(LocalHostAssignment l, int coreUnits) { Integer.class, coreUnits, l.getId()); } + @Override + public int getGpuUsageDifference(LocalHostAssignment l, int gpuUnits) { + return getJdbcTemplate().queryForObject( + "SELECT ? - int_gpus_max FROM host_local WHERE pk_host_local=?", + Integer.class, gpuUnits, l.getId()); + } + private static final String UPDATE_MAX_CORES = "UPDATE " + "host_local " + @@ -327,6 +294,21 @@ public boolean updateMaxCores(LocalHostAssignment l, int coreUnits) { coreUnits, coreUnits, l.getId()) > 0; } + private static final String UPDATE_MAX_GPUS = + "UPDATE " + + "host_local " + + "SET " + + "int_gpus_idle = int_gpus_idle + (? - int_gpus_max), " + + "int_gpus_max = ? "+ + "WHERE " + + "pk_host_local = ? "; + + @Override + public boolean updateMaxGpus(LocalHostAssignment l, int gpuUnits) { + return getJdbcTemplate().update(UPDATE_MAX_GPUS, + gpuUnits, gpuUnits, l.getId()) > 0; + } + private static final String UPDATE_MAX_MEMORY = "UPDATE " + "host_local " + @@ -342,19 +324,19 @@ public boolean updateMaxMemory(LocalHostAssignment l, long maxMemory) { UPDATE_MAX_MEMORY, maxMemory, maxMemory, l.getId()) > 0; } - private static final String UPDATE_MAX_GPU = + private static final String UPDATE_MAX_GPU_MEMORY = "UPDATE " + "host_local " + "SET " + - "int_gpu_idle = int_gpu_idle + (? - int_gpu_max), " + - "int_gpu_max = ? "+ + "int_gpu_mem_idle = int_gpu_mem_idle + (? - int_gpu_mem_max), " + + "int_gpu_mem_max = ? "+ "WHERE " + "pk_host_local = ? "; @Override - public boolean updateMaxGpu(LocalHostAssignment l, long maxGpu) { + public boolean updateMaxGpuMemory(LocalHostAssignment l, long maxGpuMemory) { return getJdbcTemplate().update( - UPDATE_MAX_GPU, maxGpu, maxGpu, l.getId()) > 0; + UPDATE_MAX_GPU_MEMORY, maxGpuMemory, maxGpuMemory, l.getId()) > 0; } @Override @@ -386,6 +368,26 @@ public boolean allocateCoresFromHost(HostInterface h, int cores) { } + /** + * + * @param h HostInterface + * @param gpus int + * @return boolean + */ + @Override + public boolean allocateGpusFromHost(HostInterface h, int gpus) { + + try { + return getJdbcTemplate().update( + "UPDATE host SET int_gpus_idle = int_gpus_idle - ? " + + "WHERE pk_host = ?", + gpus, h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException("Failed to allocate " + + gpus + " GPU from host, " + e); + } + } + /** * * @param h HostInterface @@ -404,12 +406,31 @@ public boolean deallocateCoresFromHost(HostInterface h, int cores) { } } + /** + * + * @param h HostInterface + * @param gpus int + * @return boolean + */ + @Override + public boolean deallocateGpusFromHost(HostInterface h, int gpus) { + try { + return getJdbcTemplate().update( + "UPDATE host SET int_gpus_idle = int_gpus_idle + ? WHERE pk_host = ?", + gpus, h.getHostId()) > 0; + } catch (DataAccessException e) { + throw new ResourceReservationFailureException("Failed to de-allocate " + + gpus + " GPU from host, " + e); + } + } + @Override public boolean hasResourceDeficit(HostInterface host) { return getJdbcTemplate().queryForObject( "SELECT COUNT(1) FROM host_local WHERE " + "(int_cores_max < int_cores_max - int_cores_idle OR " + - "int_gpu_max < int_gpu_max - int_gpu_idle OR " + + "int_gpus_max < int_gpus_max - int_gpus_idle OR " + + "int_gpu_mem_max < int_gpu_mem_max - int_gpu_mem_idle OR " + "int_mem_max < int_mem_max - int_mem_idle) AND " + "host_local.pk_host= ?", Integer.class, host.getHostId()) > 0; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java index 9587e41db..ea61f07bb 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/CommentDaoJdbc.java @@ -21,6 +21,7 @@ import java.sql.ResultSet; import java.sql.SQLException; +import java.util.List; import java.util.Map; import org.springframework.jdbc.core.RowMapper; @@ -71,6 +72,18 @@ public CommentDetail mapRow(ResultSet rs, int row) throws SQLException { } }; + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject) { + return getJdbcTemplate().update( + "DELETE FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", + host.getHostId(), user, subject) > 0; + } + + public List getCommentsByHostUserAndSubject(HostInterface host, String user, String subject) { + return getJdbcTemplate().query( + "SELECT * FROM comments WHERE pk_host=? AND str_user=? AND str_subject=?", + COMMENT_DETAIL_MAPPER, host.getHostId(), user, subject); + } + public CommentDetail getCommentDetail(String id) { return getJdbcTemplate().queryForObject( "SELECT * FROM comments WHERE pk_comment=?", diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java index b002d3033..6d461423f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DeedDaoJdbc.java @@ -41,9 +41,6 @@ public DeedEntity mapRow(ResultSet rs, int rowNum) throws SQLException { o.id = rs.getString("pk_deed"); o.owner = rs.getString("str_username"); o.host = rs.getString("str_hostname"); - o.isBlackoutEnabled = rs.getBoolean("b_blackout"); - o.blackoutStart = rs.getInt("int_blackout_start"); - o.blackoutStop = rs.getInt("int_blackout_stop"); return o; } }; @@ -94,9 +91,6 @@ public DeedEntity insertDeed(OwnerEntity owner, HostInterface host) { private static final String QUERY_FOR_DEED = "SELECT " + "deed.pk_deed, "+ - "deed.b_blackout,"+ - "deed.int_blackout_start,"+ - "deed.int_blackout_stop, " + "host.str_name as str_hostname, " + "owner.str_username " + "FROM " + @@ -121,20 +115,5 @@ public List getDeeds(OwnerEntity owner) { QUERY_FOR_DEED + " AND owner.pk_owner = ?", DEED_MAPPER, owner.getId()); } - - @Override - public void setBlackoutTime(DeedEntity deed, int startSeconds, int stopSeconds) { - getJdbcTemplate().update( - "UPDATE deed SET int_blackout_start = ?, " + - "int_blackout_stop = ? WHERE deed.pk_deed = ?", - startSeconds, stopSeconds, deed.getId()); - } - - @Override - public void updateBlackoutTimeEnabled(DeedEntity deed, boolean bool) { - getJdbcTemplate().update( - "UPDATE deed SET b_blackout = ? WHERE deed.pk_deed = ?", - bool, deed.getId()); - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatchQuery.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatchQuery.java index 0443f691a..a789307af 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatchQuery.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatchQuery.java @@ -21,8 +21,8 @@ public class DispatchQuery { - public static final String FIND_JOBS_BY_SHOW = - "/* FIND_JOBS_BY_SHOW */ " + + public static final String FIND_JOBS_BY_SHOW_PRIORITY_MODE = + "/* FIND_JOBS_BY_SHOW_PRIORITY_MODE */ " + "SELECT pk_job, int_priority, rank FROM ( " + "SELECT " + "ROW_NUMBER() OVER (ORDER BY job_resource.int_priority DESC) AS rank, " + @@ -44,7 +44,6 @@ public class DispatchQuery { "AND folder.pk_dept = point.pk_dept " + "AND folder.pk_show = point.pk_show " + "AND job.pk_job = layer.pk_job " + - "AND job_resource.pk_job = job.pk_job " + "AND (CASE WHEN layer_stat.int_waiting_count > 0 THEN layer_stat.pk_layer ELSE NULL END) = layer.pk_layer " + "AND " + "(" + @@ -52,6 +51,12 @@ public class DispatchQuery { "OR " + "folder_resource.int_cores < folder_resource.int_max_cores " + ") " + + "AND " + + "(" + + "folder_resource.int_max_gpus = -1 " + + "OR " + + "folder_resource.int_gpus < folder_resource.int_max_gpus " + + ") " + "AND job.str_state = 'PENDING' " + "AND job.b_paused = false " + "AND job.pk_show = ? " + @@ -66,9 +71,11 @@ public class DispatchQuery { "AND layer.int_cores_min <= ? " + "AND layer.int_mem_min <= ? " + "AND (CASE WHEN layer.b_threadable = true THEN 1 ELSE 0 END) >= ? " + - "AND layer.int_gpu_min BETWEEN ? AND ? " + + "AND layer.int_gpus_min <= ? " + + "AND layer.int_gpu_mem_min BETWEEN ? AND ? " + "AND job_resource.int_cores + layer.int_cores_min < job_resource.int_max_cores " + - "AND host.str_tags ~* ('(?x)' || layer.str_tags) " + + "AND job_resource.int_gpus + layer.int_gpus_min < job_resource.int_max_gpus " + + "AND host.str_tags ~* ('(?x)' || layer.str_tags || '\\y') " + "AND host.str_name = ? " + "AND layer.pk_layer IN (" + "SELECT " + @@ -93,9 +100,75 @@ public class DispatchQuery { ") " + ") AS t1 WHERE rank < ?"; + // sort = priority + (100 * (1 - (job.cores/job.int_min_cores))) + (age in days) */ + public static final String FIND_JOBS_BY_SHOW_BALANCED_MODE = + "/* FIND_JOBS_BY_SHOW_BALANCED_MODE */ " + + "SELECT pk_job, int_priority, rank FROM ( " + + "SELECT " + + "ROW_NUMBER() OVER (ORDER BY int_priority DESC) AS rank, " + + "pk_job, " + + "int_priority " + + "FROM ( " + + "SELECT DISTINCT " + + "job.pk_job as pk_job, " + + "CAST( " + + "job_resource.int_priority + ( " + + "100 * (CASE WHEN job_resource.int_min_cores <= 0 THEN 0 " + + "ELSE " + + "CASE WHEN job_resource.int_cores > job_resource.int_min_cores THEN 0 " + + "ELSE 1 - job_resource.int_cores/job_resource.int_min_cores " + + "END " + + "END) " + + ") + ( " + + "(DATE_PART('days', NOW()) - DATE_PART('days', job.ts_updated)) " + + ") as INT) as int_priority " + + "FROM " + + "job , " + + "job_resource , " + + "folder , " + + "folder_resource, " + + "point , " + + "layer , " + + "layer_stat , " + + "host " + + "WHERE " + + "job.pk_job = job_resource.pk_job " + + "AND job.pk_folder = folder.pk_folder " + + "AND folder.pk_folder = folder_resource.pk_folder " + + "AND folder.pk_dept = point.pk_dept " + + "AND folder.pk_show = point.pk_show " + + "AND job.pk_job = layer.pk_job " + + "AND (CASE WHEN layer_stat.int_waiting_count > 0 THEN layer_stat.pk_layer ELSE NULL END) = layer.pk_layer " + + "AND " + + "(" + + "folder_resource.int_max_cores = -1 " + + "OR " + + "folder_resource.int_cores + layer.int_cores_min < folder_resource.int_max_cores " + + ") " + + "AND job.str_state = 'PENDING' " + + "AND job.b_paused = false " + + "AND job.pk_show = ? " + + "AND job.pk_facility = ? " + + "AND " + + "(" + + "job.str_os IS NULL OR job.str_os = '' " + + "OR " + + "job.str_os = ? " + + ") " + + "AND (CASE WHEN layer_stat.int_waiting_count > 0 THEN 1 ELSE NULL END) = 1 " + + "AND layer.int_cores_min <= ? " + + "AND layer.int_mem_min <= ? " + + "AND (CASE WHEN layer.b_threadable = true THEN 1 ELSE 0 END) >= ? " + + "AND layer.int_gpus_min <= ? " + + "AND layer.int_gpu_mem_min BETWEEN ? AND ? " + + "AND job_resource.int_cores + layer.int_cores_min <= job_resource.int_max_cores " + + "AND host.str_tags ~* ('(?x)' || layer.str_tags || '\\y') " + + "AND host.str_name = ? " + + ") AS t1 ) AS t2 WHERE rank < ?"; + - public static final String FIND_JOBS_BY_GROUP = - FIND_JOBS_BY_SHOW + public static final String FIND_JOBS_BY_GROUP_PRIORITY_MODE = + FIND_JOBS_BY_SHOW_PRIORITY_MODE .replace( "FIND_JOBS_BY_SHOW", "FIND_JOBS_BY_GROUP") @@ -103,6 +176,30 @@ public class DispatchQuery { "AND job.pk_show = ? ", "AND job.pk_folder = ? "); + public static final String FIND_JOBS_BY_GROUP_BALANCED_MODE = + FIND_JOBS_BY_SHOW_BALANCED_MODE + .replace( + "FIND_JOBS_BY_SHOW", + "FIND_JOBS_BY_GROUP") + .replace( + "AND job.pk_show = ? ", + "AND job.pk_folder = ? "); + + private static final String replaceQueryForFifo(String query) { + return query + .replace( + "JOBS_BY", + "JOBS_FIFO_BY") + .replace( + "ORDER BY job_resource.int_priority DESC", + "ORDER BY job_resource.int_priority DESC, job.ts_started ASC") + .replace( + "WHERE rank < ?", + "WHERE rank < ? ORDER BY rank"); + } + + public static final String FIND_JOBS_BY_SHOW_FIFO_MODE = replaceQueryForFifo(FIND_JOBS_BY_SHOW_PRIORITY_MODE); + public static final String FIND_JOBS_BY_GROUP_FIFO_MODE = replaceQueryForFifo(FIND_JOBS_BY_GROUP_PRIORITY_MODE); /** * Dispatch a host in local booking mode. @@ -131,7 +228,7 @@ public class DispatchQuery { "AND " + "job.pk_facility = ? " + "AND " + - "(job.str_os = ? OR job.str_os IS NULL)" + + "(job.str_os = ? OR job.str_os IS NULL) " + "AND " + "job.pk_job IN ( " + "SELECT " + @@ -157,7 +254,7 @@ public class DispatchQuery { "AND " + "j.pk_facility = ? " + "AND " + - "(j.str_os = ? OR j.str_os IS NULL)" + + "(j.str_os = ? OR j.str_os IS NULL) " + "AND " + "(CASE WHEN lst.int_waiting_count > 0 THEN lst.pk_layer ELSE NULL END) = l.pk_layer " + "AND " + @@ -165,7 +262,7 @@ public class DispatchQuery { "AND " + "l.int_mem_min <= host_local.int_mem_idle " + "AND " + - "l.int_gpu_min <= host_local.int_gpu_idle " + + "l.int_gpu_mem_min <= host_local.int_gpu_mem_idle " + "AND " + "l.pk_layer IN (" + "SELECT " + @@ -219,6 +316,8 @@ public class DispatchQuery { "folder.pk_folder = folder_resource.pk_folder " + "AND " + "(folder_resource.int_max_cores = -1 OR folder_resource.int_cores < folder_resource.int_max_cores) " + + "AND " + + "(folder_resource.int_max_gpus = -1 OR folder_resource.int_gpus < folder_resource.int_max_gpus) " + "AND " + "job_resource.float_tier < 1.00 " + "AND " + @@ -232,7 +331,7 @@ public class DispatchQuery { "AND " + "job.pk_facility = ? " + "AND " + - "(job.str_os = ? OR job.str_os IS NULL)" + + "(job.str_os = ? OR job.str_os IS NULL) " + "AND " + "job.pk_job IN ( " + "SELECT /* index (h i_str_host_tag) */ " + @@ -253,7 +352,7 @@ public class DispatchQuery { "AND " + "j.pk_facility = ? " + "AND " + - "(j.str_os = ? OR j.str_os IS NULL)" + + "(j.str_os = ? OR j.str_os IS NULL) " + "AND " + "(CASE WHEN lst.int_waiting_count > 0 THEN lst.pk_layer ELSE NULL END) = l.pk_layer " + "AND " + @@ -263,9 +362,11 @@ public class DispatchQuery { "AND " + "l.int_mem_min <= ? " + "AND " + - "l.int_gpu_min = ? " + + "l.int_gpus_min <= ? " + + "AND " + + "l.int_gpu_mem_min = ? " + "AND " + - "h.str_tags ~* ('(?x)' || l.str_tags) " + + "h.str_tags ~* ('(?x)' || l.str_tags || '\\y') " + "AND " + "h.str_name = ? " + "AND " + @@ -321,9 +422,13 @@ public class DispatchQuery { "AND " + "(folder_resource.int_max_cores = -1 OR folder_resource.int_cores < folder_resource.int_max_cores) " + "AND " + - "job_resource.int_priority > ?" + + "(folder_resource.int_max_gpus = -1 OR folder_resource.int_gpus < folder_resource.int_max_gpus) " + + "AND " + + "job_resource.int_priority > ? " + "AND " + "job_resource.int_cores < job_resource.int_max_cores " + + "AND " + + "job_resource.int_gpus < job_resource.int_max_gpus " + "AND " + "job.str_state = 'PENDING' " + "AND " + @@ -331,7 +436,7 @@ public class DispatchQuery { "AND " + "job.pk_facility = ? " + "AND " + - "(job.str_os = ? OR job.str_os IS NULL)" + + "(job.str_os = ? OR job.str_os IS NULL) " + "AND " + "job.pk_job IN ( " + "SELECT /* index (h i_str_host_tag) */ " + @@ -350,7 +455,7 @@ public class DispatchQuery { "AND " + "j.pk_facility = ? " + "AND " + - "(j.str_os = ? OR j.str_os IS NULL)" + + "(j.str_os = ? OR j.str_os IS NULL) " + "AND " + "(CASE WHEN lst.int_waiting_count > 0 THEN lst.pk_layer ELSE NULL END) = l.pk_layer " + "AND " + @@ -360,9 +465,11 @@ public class DispatchQuery { "AND " + "l.int_mem_min <= ? " + "AND " + - "l.int_gpu_min = ? " + + "l.int_gpus_min <= ? " + "AND " + - "h.str_tags ~* ('(?x)' || l.str_tags) " + + "l.int_gpu_mem_min = ? " + + "AND " + + "h.str_tags ~* ('(?x)' || l.str_tags || '\\y') " + "AND " + "h.str_name = ? " + "AND " + @@ -417,7 +524,9 @@ public class DispatchQuery { "int_cores_min, " + "int_cores_max, " + "int_mem_min, " + - "int_gpu_min, " + + "int_gpus_min, " + + "int_gpus_max, " + + "int_gpu_mem_min, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + @@ -450,7 +559,9 @@ public class DispatchQuery { "layer.int_cores_min, " + "layer.int_cores_max, " + "layer.int_mem_min, " + - "layer.int_gpu_min, " + + "layer.int_gpus_min, " + + "layer.int_gpus_max, " + + "layer.int_gpu_mem_min, " + "layer.str_cmd, " + "layer.str_range, " + "layer.int_chunk_size, " + @@ -468,7 +579,9 @@ public class DispatchQuery { "AND " + "layer.int_mem_min <= ? " + "AND " + - "layer.int_gpu_min BETWEEN ? AND ? " + + "layer.int_gpus_min <= ? " + + "AND " + + "layer.int_gpu_mem_min BETWEEN ? AND ? " + "AND " + "frame.str_state='WAITING' " + "AND " + @@ -478,7 +591,7 @@ public class DispatchQuery { "l.pk_layer " + "FROM " + "layer l " + - "JOIN host h ON (h.str_tags ~* ('(?x)' || l.str_tags) AND h.str_name = ?) " + + "JOIN host h ON (h.str_tags ~* ('(?x)' || l.str_tags || '\\y') AND h.str_name = ?) " + "LEFT JOIN layer_limit ON layer_limit.pk_layer = l.pk_layer " + "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + "LEFT JOIN (" + @@ -524,9 +637,11 @@ public class DispatchQuery { "layer_type, " + "int_cores_min, " + "int_cores_max, " + + "int_gpus_min, " + + "int_gpus_max, " + "b_threadable, " + "int_mem_min, " + - "int_gpu_min, " + + "int_gpu_mem_min, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + @@ -557,9 +672,11 @@ public class DispatchQuery { "layer.str_type AS layer_type, " + "layer.int_cores_min, " + "layer.int_cores_max, " + + "layer.int_gpus_min, " + + "layer.int_gpus_max, " + "layer.b_threadable, " + "layer.int_mem_min, " + - "layer.int_gpu_min, " + + "layer.int_gpu_mem_min, " + "layer.str_cmd, " + "layer.str_range, " + "layer.int_chunk_size, " + @@ -579,7 +696,9 @@ public class DispatchQuery { "AND " + "(CASE WHEN layer.b_threadable = true THEN 1 ELSE 0 END) >= ? " + "AND " + - "layer.int_gpu_min BETWEEN ? AND ? " + + "layer.int_gpus_min <= ? " + + "AND " + + "layer.int_gpu_mem_min BETWEEN ? AND ? " + "AND " + "frame.str_state='WAITING' " + "AND " + @@ -590,7 +709,7 @@ public class DispatchQuery { "l.pk_layer " + "FROM " + "layer l " + - "JOIN host h ON (h.str_tags ~* ('(?x)' || l.str_tags) AND h.str_name = ?) " + + "JOIN host h ON (h.str_tags ~* ('(?x)' || l.str_tags || '\\y') AND h.str_name = ?) " + "LEFT JOIN layer_limit ON layer_limit.pk_layer = l.pk_layer " + "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + "LEFT JOIN (" + @@ -636,7 +755,9 @@ public class DispatchQuery { "int_cores_min, " + "int_cores_max, " + "int_mem_min, " + - "int_gpu_min, " + + "int_gpus_min, " + + "int_gpus_max, " + + "int_gpu_mem_min, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + @@ -669,7 +790,9 @@ public class DispatchQuery { "layer.int_cores_min, " + "layer.int_cores_max, " + "layer.int_mem_min, " + - "layer.int_gpu_min, " + + "layer.int_gpus_min, " + + "layer.int_gpus_max, " + + "layer.int_gpu_mem_min, " + "layer.str_cmd, " + "layer.str_range, " + "layer.int_chunk_size, " + @@ -685,7 +808,7 @@ public class DispatchQuery { "AND " + "layer.int_mem_min <= ? " + "AND " + - "layer.int_gpu_min <= ? " + + "layer.int_gpu_mem_min <= ? " + "AND " + "frame.str_state='WAITING' " + "AND " + @@ -739,9 +862,11 @@ public class DispatchQuery { "layer_type, " + "int_cores_min, " + "int_cores_max, " + + "int_gpus_min, " + + "int_gpus_max, " + "b_threadable, " + "int_mem_min, " + - "int_gpu_min, " + + "int_gpu_mem_min, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + @@ -774,7 +899,9 @@ public class DispatchQuery { "layer.int_cores_max, " + "layer.b_threadable, " + "layer.int_mem_min, " + - "layer.int_gpu_min, " + + "layer.int_gpus_min, " + + "layer.int_gpus_max, " + + "layer.int_gpu_mem_min, " + "layer.str_cmd, " + "layer.str_range, " + "layer.int_chunk_size, " + @@ -790,7 +917,7 @@ public class DispatchQuery { "AND " + "layer.int_mem_min <= ? " + "AND " + - "layer.int_gpu_min <= ? " + + "layer.int_gpu_mem_min <= ? " + "AND " + "frame.str_state='WAITING' " + "AND " + @@ -849,7 +976,9 @@ public class DispatchQuery { "int_cores_min, " + "int_cores_max, " + "int_mem_min, " + - "int_gpu_min, " + + "int_gpus_min, " + + "int_gpus_max, " + + "int_gpu_mem_min, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + @@ -882,7 +1011,9 @@ public class DispatchQuery { "layer.int_cores_min, " + "layer.int_cores_max, " + "layer.int_mem_min, " + - "layer.int_gpu_min, " + + "layer.int_gpus_min, " + + "layer.int_gpus_max, " + + "layer.int_gpu_mem_min, " + "layer.str_cmd, " + "layer.str_range, " + "layer.int_chunk_size, " + @@ -900,7 +1031,9 @@ public class DispatchQuery { "AND " + "layer.int_mem_min <= ? " + "AND " + - "layer.int_gpu_min = ? " + + "layer.int_gpus_min <= ? " + + "AND " + + "layer.int_gpu_mem_min <= ? " + "AND " + "frame.str_state='WAITING' " + "AND " + @@ -910,7 +1043,7 @@ public class DispatchQuery { "l.pk_layer " + "FROM " + "layer l " + - "JOIN host h ON (h.str_tags ~* ('(?x)' || l.str_tags) AND h.str_name = ?) " + + "JOIN host h ON (h.str_tags ~* ('(?x)' || l.str_tags || '\\y') AND h.str_name = ?) " + "LEFT JOIN layer_limit ON layer_limit.pk_layer = l.pk_layer " + "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + "LEFT JOIN (" + @@ -958,7 +1091,9 @@ public class DispatchQuery { "int_cores_max, " + "b_threadable, " + "int_mem_min, " + - "int_gpu_min, " + + "int_gpus_min, " + + "int_gpus_max, " + + "int_gpu_mem_min, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + @@ -991,7 +1126,9 @@ public class DispatchQuery { "layer.int_cores_max, " + "layer.b_threadable, " + "layer.int_mem_min, " + - "layer.int_gpu_min, " + + "layer.int_gpus_min, " + + "layer.int_gpus_max, " + + "layer.int_gpu_mem_min, " + "layer.str_cmd, " + "layer.str_range, " + "layer.int_chunk_size, " + @@ -1011,7 +1148,9 @@ public class DispatchQuery { "AND " + "(CASE WHEN layer.b_threadable = true THEN 1 ELSE 0 END) >= ? " + "AND " + - "layer.int_gpu_min <= ? " + + "layer.int_gpus_min <= ? " + + "AND " + + "layer.int_gpu_mem_min <= ? " + "AND " + "frame.str_state='WAITING' " + "AND " + @@ -1022,7 +1161,7 @@ public class DispatchQuery { "l.pk_layer " + "FROM " + "layer l " + - "JOIN host h ON (h.str_tags ~* ('(?x)' || l.str_tags) AND h.str_name = ?) " + + "JOIN host h ON (h.str_tags ~* ('(?x)' || l.str_tags || '\\y') AND h.str_name = ?) " + "LEFT JOIN layer_limit ON layer_limit.pk_layer = l.pk_layer " + "LEFT JOIN limit_record ON limit_record.pk_limit_record = layer_limit.pk_limit_record " + "LEFT JOIN (" + @@ -1068,7 +1207,9 @@ public class DispatchQuery { "int_cores_min, " + "int_cores_max, " + "int_mem_min, " + - "int_gpu_min, " + + "int_gpus_min, " + + "int_gpus_max, " + + "int_gpu_mem_min, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + @@ -1100,7 +1241,9 @@ public class DispatchQuery { "layer.b_threadable, " + "layer.int_cores_min, " + "layer.int_mem_min, " + - "layer.int_gpu_min, " + + "layer.int_gpus_min, " + + "layer.int_gpus_max, " + + "layer.int_gpu_mem_min, " + "layer.int_cores_max, " + "layer.str_cmd, " + "layer.str_range, " + @@ -1117,7 +1260,7 @@ public class DispatchQuery { "AND " + "layer.int_mem_min <= ? " + "AND " + - "layer.int_gpu_min <= ? " + + "layer.int_gpu_mem_min <= ? " + "AND " + "frame.str_state='WAITING' " + "AND " + @@ -1173,7 +1316,9 @@ public class DispatchQuery { "int_cores_max, " + "b_threadable, " + "int_mem_min, " + - "int_gpu_min, " + + "int_gpus_min, " + + "int_gpus_max, " + + "int_gpu_mem_min, " + "str_cmd, " + "str_range, " + "int_chunk_size, " + @@ -1206,7 +1351,9 @@ public class DispatchQuery { "layer.int_cores_max, " + "layer.b_threadable, " + "layer.int_mem_min, " + - "layer.int_gpu_min, " + + "layer.int_gpus_min, " + + "layer.int_gpus_max, " + + "layer.int_gpu_mem_min, " + "layer.str_cmd, " + "layer.str_range, " + "layer.int_chunk_size, " + @@ -1222,7 +1369,7 @@ public class DispatchQuery { "AND " + "layer.int_mem_min <= ? " + "AND " + - "layer.int_gpu_min <= ? " + + "layer.int_gpu_mem_min <= ? " + "AND " + "frame.str_state='WAITING' " + "AND " + diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java index d3e50525d..032c90dac 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/DispatcherDaoJdbc.java @@ -20,6 +20,7 @@ import java.sql.ResultSet; import java.sql.SQLException; +import java.util.ArrayList; import java.util.Collections; import java.util.LinkedHashSet; import java.util.LinkedList; @@ -27,7 +28,10 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; import org.springframework.jdbc.core.RowMapper; import org.springframework.jdbc.core.support.JdbcDaoSupport; @@ -45,20 +49,7 @@ import com.imageworks.spcue.grpc.host.ThreadMode; import com.imageworks.spcue.util.CueUtil; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_DISPATCH_FRAME_BY_JOB_AND_HOST; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_DISPATCH_FRAME_BY_JOB_AND_PROC; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_DISPATCH_FRAME_BY_LAYER_AND_HOST; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_DISPATCH_FRAME_BY_LAYER_AND_PROC; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_JOBS_BY_GROUP; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_JOBS_BY_LOCAL; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_JOBS_BY_SHOW; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_HOST; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_PROC; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_HOST; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_PROC; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_SHOWS; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.FIND_UNDER_PROCED_JOB_BY_FACILITY; -import static com.imageworks.spcue.dao.postgres.DispatchQuery.HIGHER_PRIORITY_JOB_BY_FACILITY_EXISTS; +import static com.imageworks.spcue.dao.postgres.DispatchQuery.*; /** @@ -68,7 +59,7 @@ */ public class DispatcherDaoJdbc extends JdbcDaoSupport implements DispatcherDao { - private static final Logger logger = Logger.getLogger(DispatcherDaoJdbc.class); + private static final Logger logger = LogManager.getLogger(DispatcherDaoJdbc.class); public static final RowMapper PKJOB_MAPPER = new RowMapper() { @@ -124,6 +115,29 @@ public List getShows() { private final ConcurrentHashMap bookableShows = new ConcurrentHashMap(); + public boolean testMode = false; + + /** + * Choose between different scheduling strategies + */ + private SchedulingMode schedulingMode; + + @Autowired + public DispatcherDaoJdbc(Environment env) { + this.schedulingMode = SchedulingMode.valueOf(env.getProperty( + "dispatcher.scheduling_mode", String.class, "PRIORITY_ONLY")); + } + + @Override + public SchedulingMode getSchedulingMode() { + return schedulingMode; + } + + @Override + public void setSchedulingMode(SchedulingMode schedulingMode) { + this.schedulingMode = schedulingMode; + } + /** * Returns a sorted list of shows that have pending jobs * which could benefit from the specified allocation. @@ -149,8 +163,8 @@ else if (cached.isExpired()) { return bookableShows.get(key).shows; } - private Set findDispatchJobs(DispatchHost host, int numJobs, boolean shuffleShows) { - LinkedHashSet result = new LinkedHashSet(); + private List findDispatchJobs(DispatchHost host, int numJobs, boolean shuffleShows) { + ArrayList result = new ArrayList(); List shows = new LinkedList(getBookableShows(host)); // shows were sorted. If we want it in random sequence, we need to shuffle it. if (shuffleShows) { @@ -185,16 +199,17 @@ private Set findDispatchJobs(DispatchHost host, int numJobs, boolean shu } result.addAll(getJdbcTemplate().query( - FIND_JOBS_BY_SHOW, + findByShowQuery(), PKJOB_MAPPER, s.getShowId(), host.getFacilityId(), host.os, host.idleCores, host.idleMemory, threadMode(host.threadMode), - (host.idleGpu > 0) ? 1: 0, host.idleGpu, + host.idleGpus, + (host.idleGpuMemory > 0) ? 1 : 0, host.idleGpuMemory, host.getName(), numJobs * 10)); if (result.size() < 1) { - if (host.gpu == 0) { + if (host.gpuMemory == 0) { s.skip(host.tags, host.idleCores, host.idleMemory); } } @@ -206,27 +221,45 @@ private Set findDispatchJobs(DispatchHost host, int numJobs, boolean shu } + private String findByShowQuery() { + switch (schedulingMode) { + case PRIORITY_ONLY: return FIND_JOBS_BY_SHOW_PRIORITY_MODE; + case FIFO: return FIND_JOBS_BY_SHOW_FIFO_MODE; + case BALANCED: return FIND_JOBS_BY_SHOW_BALANCED_MODE; + default: return FIND_JOBS_BY_SHOW_PRIORITY_MODE; + } + } + + private String findByGroupQuery() { + switch (schedulingMode) { + case PRIORITY_ONLY: return FIND_JOBS_BY_GROUP_PRIORITY_MODE; + case FIFO: return FIND_JOBS_BY_GROUP_FIFO_MODE; + case BALANCED: return FIND_JOBS_BY_GROUP_BALANCED_MODE; + default: return FIND_JOBS_BY_GROUP_PRIORITY_MODE; + } + } + @Override - public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { + public List findDispatchJobsForAllShows(DispatchHost host, int numJobs) { return findDispatchJobs(host, numJobs, true); } @Override - public Set findDispatchJobs(DispatchHost host, int numJobs) { + public List findDispatchJobs(DispatchHost host, int numJobs) { return findDispatchJobs(host, numJobs, false); } @Override - public Set findDispatchJobs(DispatchHost host, GroupInterface g) { - LinkedHashSet result = new LinkedHashSet(5); - result.addAll(getJdbcTemplate().query( - FIND_JOBS_BY_GROUP, + public List findDispatchJobs(DispatchHost host, GroupInterface g) { + List result = getJdbcTemplate().query( + findByGroupQuery(), PKJOB_MAPPER, g.getGroupId(),host.getFacilityId(), host.os, host.idleCores, host.idleMemory, threadMode(host.threadMode), - (host.idleGpu > 0) ? 1: 0, host.idleGpu, - host.getName(), 50)); + host.idleGpus, + (host.idleGpuMemory > 0) ? 1 : 0, host.idleGpuMemory, + host.getName(), 50); return result; } @@ -240,7 +273,7 @@ public List findNextDispatchFrames(JobInterface job, FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_PROC, FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.memoryReserved, - proc.gpuReserved, + proc.gpuMemoryReserved, job.getJobId(), limit); } @@ -250,7 +283,8 @@ public List findNextDispatchFrames(JobInterface job, FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.coresReserved, proc.memoryReserved, - (proc.gpuReserved > 0) ? 1: 0, proc.gpuReserved, + proc.gpusReserved, + (proc.gpuMemoryReserved > 0) ? 1 : 0, proc.gpuMemoryReserved, job.getJobId(), proc.hostName, job.getJobId(), limit); } @@ -264,7 +298,7 @@ public List findNextDispatchFrames(JobInterface job, return getJdbcTemplate().query( FIND_LOCAL_DISPATCH_FRAME_BY_JOB_AND_HOST, FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleMemory, host.idleGpu, job.getJobId(), + host.idleMemory, host.idleGpuMemory, job.getJobId(), limit); } else { @@ -273,7 +307,8 @@ public List findNextDispatchFrames(JobInterface job, FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleCores, host.idleMemory, threadMode(host.threadMode), - (host.idleGpu > 0) ? 1: 0, host.idleGpu, + host.idleGpus, + (host.idleGpuMemory > 0) ? 1 : 0, host.idleGpuMemory, job.getJobId(), host.getName(), job.getJobId(), limit); } @@ -288,7 +323,7 @@ public List findNextDispatchFrames(LayerInterface layer, return getJdbcTemplate().query( FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_PROC, FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - proc.memoryReserved, proc.gpuReserved, + proc.memoryReserved, proc.gpuMemoryReserved, layer.getLayerId(), limit); } @@ -297,7 +332,7 @@ public List findNextDispatchFrames(LayerInterface layer, FIND_DISPATCH_FRAME_BY_LAYER_AND_PROC, FrameDaoJdbc.DISPATCH_FRAME_MAPPER, proc.coresReserved, proc.memoryReserved, - proc.gpuReserved, + proc.gpusReserved, proc.gpuMemoryReserved, layer.getLayerId(), layer.getLayerId(), proc.hostName, limit); } @@ -311,7 +346,7 @@ public List findNextDispatchFrames(LayerInterface layer, return getJdbcTemplate().query( FIND_LOCAL_DISPATCH_FRAME_BY_LAYER_AND_HOST, FrameDaoJdbc.DISPATCH_FRAME_MAPPER, - host.idleMemory, host.idleGpu, layer.getLayerId(), + host.idleMemory, host.idleGpuMemory, layer.getLayerId(), limit); } else { @@ -320,7 +355,7 @@ public List findNextDispatchFrames(LayerInterface layer, FrameDaoJdbc.DISPATCH_FRAME_MAPPER, host.idleCores, host.idleMemory, threadMode(host.threadMode), - host.idleGpu, layer.getLayerId(), layer.getLayerId(), + host.idleGpus, host.idleGpuMemory, layer.getLayerId(), layer.getLayerId(), host.getName(), limit); } } @@ -345,7 +380,7 @@ public boolean findUnderProcedJob(JobInterface excludeJob, VirtualProc proc) { Integer.class, excludeJob.getShowId(), proc.getFacilityId(), proc.os, excludeJob.getShowId(), proc.getFacilityId(), proc.os, - proc.coresReserved, proc.memoryReserved, proc.gpuReserved, + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, proc.hostName) > 0; } catch (org.springframework.dao.EmptyResultDataAccessException e) { return false; @@ -363,7 +398,7 @@ public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { HIGHER_PRIORITY_JOB_BY_FACILITY_EXISTS, Boolean.class, baseJob.priority, proc.getFacilityId(), proc.os, proc.getFacilityId(), proc.os, - proc.coresReserved, proc.memoryReserved, proc.gpuReserved, + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, proc.hostName); } catch (org.springframework.dao.EmptyResultDataAccessException e) { return false; @@ -374,18 +409,17 @@ public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { } @Override - public Set findDispatchJobs(DispatchHost host, + public List findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs) { - LinkedHashSet result = new LinkedHashSet(numJobs); - - result.addAll(getJdbcTemplate().query( - FIND_JOBS_BY_SHOW, + List result = getJdbcTemplate().query( + findByShowQuery(), PKJOB_MAPPER, show.getShowId(), host.getFacilityId(), host.os, host.idleCores, host.idleMemory, threadMode(host.threadMode), - (host.idleGpu > 0) ? 1: 0, host.idleGpu, - host.getName(), numJobs * 10)); + host.idleGpus, + (host.idleGpuMemory > 0) ? 1 : 0, host.idleGpuMemory, + host.getName(), numJobs * 10); return result; } @@ -401,5 +435,10 @@ public Set findLocalDispatchJobs(DispatchHost host) { return result; } + + @Override + public void clearCache() { + bookableShows.clear(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java index a1cb7d2cf..ee30d5f8e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FilterDaoJdbc.java @@ -54,7 +54,7 @@ public class FilterDaoJdbc extends JdbcDaoSupport implements FilterDao { private static final String GET_ACTIVE_FILTERS = "SELECT " + - "filter.*" + + "filter.* " + "FROM " + "filter " + "WHERE " + @@ -66,7 +66,7 @@ public class FilterDaoJdbc extends JdbcDaoSupport implements FilterDao { private static final String GET_FILTERS = "SELECT " + - "filter.*" + + "filter.* " + "FROM " + "filter " + "WHERE " + diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java index 5c043e995..21c197a3b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/FrameDaoJdbc.java @@ -27,6 +27,7 @@ import java.sql.Timestamp; import java.util.Optional; +import org.springframework.dao.DataAccessException; import org.springframework.jdbc.core.RowMapper; import org.springframework.jdbc.core.support.JdbcDaoSupport; @@ -66,7 +67,9 @@ public class FrameDaoJdbc extends JdbcDaoSupport implements FrameDao { "ts_updated = current_timestamp, " + "int_version = int_version + 1, " + "int_total_past_core_time = int_total_past_core_time + " + - "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_cores / 100) " + + "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_cores / 100)," + + "int_total_past_gpu_time = int_total_past_gpu_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp - ts_started) * int_gpus) " + "WHERE " + "frame.pk_frame = ? " + "AND " + @@ -93,7 +96,9 @@ public boolean updateFrameStopped(FrameInterface frame, FrameState state, "int_mem_max_used = ?, " + "int_version = int_version + 1, " + "int_total_past_core_time = int_total_past_core_time + " + - "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_cores / 100) " + + "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_cores / 100), " + + "int_total_past_gpu_time = int_total_past_gpu_time + " + + "round(INTERVAL_TO_SECONDS(current_timestamp + interval '1' second - ts_started) * int_gpus) " + "WHERE " + "frame.pk_frame = ? " + "AND " + @@ -112,7 +117,7 @@ public boolean updateFrameStopped(FrameInterface frame, FrameState state, frame.getVersion()) == 1; } - private static final String UPDATE_FRAME_CLEARED = + private static final String UPDATE_FRAME_REASON = "UPDATE "+ "frame "+ "SET " + @@ -128,15 +133,42 @@ public boolean updateFrameStopped(FrameInterface frame, FrameState state, "(SELECT proc.pk_frame FROM " + "proc WHERE proc.pk_frame=?)"; + private int updateFrame(FrameInterface frame, int exitStatus) { + + int result = getJdbcTemplate().update( + UPDATE_FRAME_REASON, + FrameState.WAITING.toString(), + exitStatus, + frame.getFrameId(), + frame.getFrameId()); + + return result; + } + + @Override + public boolean updateFrameHostDown(FrameInterface frame) { + return updateFrame(frame, Dispatcher.EXIT_STATUS_DOWN_HOST) > 0; + } + @Override public boolean updateFrameCleared(FrameInterface frame) { + return updateFrame(frame, Dispatcher.EXIT_STATUS_FRAME_CLEARED) > 0; + } + private static final String UPDATE_FRAME_MEMORY_ERROR = + "UPDATE "+ + "frame "+ + "SET " + + "int_exit_status = ?, " + + "int_version = int_version + 1 " + + "WHERE " + + "frame.pk_frame = ? "; + @Override + public boolean updateFrameMemoryError(FrameInterface frame) { int result = getJdbcTemplate().update( - UPDATE_FRAME_CLEARED, - FrameState.WAITING.toString(), - Dispatcher.EXIT_STATUS_FRAME_CLEARED, - frame.getFrameId(), - frame.getFrameId()); + UPDATE_FRAME_MEMORY_ERROR, + Dispatcher.EXIT_STATUS_MEMORY_FAILURE, + frame.getFrameId()); return result > 0; } @@ -149,7 +181,8 @@ public boolean updateFrameCleared(FrameInterface frame) { "str_host = ?, " + "int_cores = ?, " + "int_mem_reserved = ?, " + - "int_gpu_reserved = ?, " + + "int_gpus = ?, " + + "int_gpu_mem_reserved = ?, " + "ts_updated = current_timestamp, " + "ts_started = current_timestamp, " + "ts_stopped = null, " + @@ -191,31 +224,45 @@ public boolean updateFrameCleared(FrameInterface frame) { "WHERE " + "pk_frame = ? " + "AND " + - "int_exit_status NOT IN (?,?,?) "; + "int_exit_status NOT IN (?,?,?,?,?,?,?) "; @Override public void updateFrameStarted(VirtualProc proc, FrameInterface frame) { lockFrameForUpdate(frame, FrameState.WAITING); - int result = getJdbcTemplate().update(UPDATE_FRAME_STARTED, - FrameState.RUNNING.toString(), proc.hostName, proc.coresReserved, - proc.memoryReserved, proc.gpuReserved, frame.getFrameId(), - FrameState.WAITING.toString(), frame.getVersion()); - - if (result == 0) { - String error_msg = "the frame " + - frame + " was updated by another thread."; - throw new FrameReservationException(error_msg); + try { + int result = getJdbcTemplate().update(UPDATE_FRAME_STARTED, + FrameState.RUNNING.toString(), proc.hostName, proc.coresReserved, + proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, frame.getFrameId(), + FrameState.WAITING.toString(), frame.getVersion()); + if (result == 0) { + String error_msg = "the frame " + + frame + " was updated by another thread."; + throw new FrameReservationException(error_msg); + } + } catch (DataAccessException e) { + /* + * This usually happens when the folder's max cores + * limit has exceeded + */ + throw new FrameReservationException(e.getCause()); } /* * Frames that were killed via nimby or hardware errors not attributed to - * the software do not increment the retry counter. + * the software do not increment the retry counter. Like failed launch, + * orphaned frame, failed kill or down host. */ - getJdbcTemplate().update(UPDATE_FRAME_RETRIES, - frame.getFrameId(), -1, FrameExitStatus.SKIP_RETRY_VALUE, - Dispatcher.EXIT_STATUS_FRAME_CLEARED); + try { + getJdbcTemplate().update(UPDATE_FRAME_RETRIES, + frame.getFrameId(), -1, FrameExitStatus.SKIP_RETRY_VALUE, + FrameExitStatus.FAILED_LAUNCH_VALUE, Dispatcher.EXIT_STATUS_FRAME_CLEARED, + Dispatcher.EXIT_STATUS_FRAME_ORPHAN, Dispatcher.EXIT_STATUS_FAILED_KILL, + Dispatcher.EXIT_STATUS_DOWN_HOST); + } catch (DataAccessException e) { + throw new FrameReservationException(e.getCause()); + } } private static final String UPDATE_FRAME_FIXED = @@ -226,7 +273,8 @@ public void updateFrameStarted(VirtualProc proc, FrameInterface frame) { "str_host=?, " + "int_cores=?, "+ "int_mem_reserved = ?, " + - "int_gpu_reserved = ?, " + + "int_gpus = ?, " + + "int_gpu_mem_reserved = ?, " + "ts_updated = current_timestamp, " + "ts_started = current_timestamp, " + "ts_stopped = null, "+ @@ -240,7 +288,7 @@ public void updateFrameStarted(VirtualProc proc, FrameInterface frame) { public boolean updateFrameFixed(VirtualProc proc, FrameInterface frame) { return getJdbcTemplate().update(UPDATE_FRAME_FIXED, FrameState.RUNNING.toString(), proc.hostName, proc.coresReserved, - proc.memoryReserved, proc.gpuReserved, frame.getFrameId()) == 1; + proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, frame.getFrameId()) == 1; } @Override @@ -276,7 +324,9 @@ public DispatchFrame mapRow(ResultSet rs, int rowNum) throws SQLException { frame.maxCores = rs.getInt("int_cores_max"); frame.threadable = rs.getBoolean("b_threadable"); frame.minMemory = rs.getLong("int_mem_min"); - frame.minGpu = rs.getLong("int_gpu_min"); + frame.minGpus = rs.getInt("int_gpus_min"); + frame.maxGpus = rs.getInt("int_gpus_max"); + frame.minGpuMemory = rs.getLong("int_gpu_mem_min"); frame.version = rs.getInt("int_version"); frame.services = rs.getString("str_services"); return frame; @@ -308,7 +358,9 @@ public DispatchFrame mapRow(ResultSet rs, int rowNum) throws SQLException { "layer.int_cores_max,"+ "layer.b_threadable,"+ "layer.int_mem_min, "+ - "layer.int_gpu_min, "+ + "layer.int_gpus_min,"+ + "layer.int_gpus_max,"+ + "layer.int_gpu_mem_min, "+ "layer.str_range, "+ "layer.int_chunk_size, " + "layer.str_services " + @@ -390,7 +442,7 @@ public FrameDetail mapRow(ResultSet rs, int rowNum) throws SQLException { frame.jobId = rs.getString("pk_job"); frame.layerId = rs.getString("pk_layer"); frame.showId = rs.getString("pk_show"); - frame.maxRss = rs.getInt("int_mem_max_used"); + frame.maxRss = rs.getLong("int_mem_max_used"); frame.name = rs.getString("str_name"); frame.number = rs.getInt("int_number"); frame.dispatchOrder = rs.getInt("int_dispatch_order"); @@ -402,7 +454,7 @@ public FrameDetail mapRow(ResultSet rs, int rowNum) throws SQLException { frame.version = rs.getInt("int_version"); if (rs.getString("str_host") != null) { - frame.lastResource = String.format("%s/%d",rs.getString("str_host"),rs.getInt("int_cores")); + frame.lastResource = String.format("%s/%d/%d",rs.getString("str_host"),rs.getInt("int_cores"),rs.getInt("int_gpus")); } else { frame.lastResource = ""; @@ -946,7 +998,8 @@ public ResourceUsage mapRow(ResultSet rs, int rowNum) throws SQLException { return new ResourceUsage( rs.getLong("int_clock_time"), - rs.getInt("int_cores")); + rs.getInt("int_cores"), + rs.getInt("int_gpus")); } }; @@ -962,42 +1015,30 @@ public ResourceUsage getResourceUsage(FrameInterface f) { "SELECT " + "COALESCE(interval_to_seconds(current_timestamp - ts_started), 1) " + "AS int_clock_time, " + - "COALESCE(int_cores, 100) AS int_cores " + + "COALESCE(int_cores, 100) AS int_cores," + + "int_gpus " + "FROM " + "frame " + "WHERE " + "pk_frame = ?", RESOURCE_USAGE_MAPPER, f.getFrameId()); } - private static final String UPDATE_FRAME_IO_USAGE = - "UPDATE " + - "frame " + - "SET " + - "ts_updated = current_timestamp," + - "ts_llu = ? " + - "WHERE " + - "pk_frame = ? "; - - @Override - public void updateFrameUsage(FrameInterface f, long lluTime) { - getJdbcTemplate().update(UPDATE_FRAME_IO_USAGE, - new Timestamp(lluTime * 1000l), f.getFrameId()); - } - - private static final String UPDATE_FRAME_MEMORY_USAGE = + private static final String UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME = "UPDATE " + "frame " + "SET " + "ts_updated = current_timestamp," + "int_mem_max_used = ?," + - "int_mem_used = ? " + + "int_mem_used = ?," + + "ts_llu = ? " + "WHERE " + "pk_frame = ? "; @Override - public void updateFrameMemoryUsage(FrameInterface f, long maxRss, long rss) { - getJdbcTemplate().update(UPDATE_FRAME_MEMORY_USAGE, - maxRss, rss, f.getFrameId()); + public void updateFrameMemoryUsageAndLluTime(FrameInterface f, long maxRss, long rss, + long lluTime) { + getJdbcTemplate().update(UPDATE_FRAME_MEMORY_USAGE_AND_LLU_TIME, + maxRss, rss, new Timestamp(lluTime * 1000l), f.getFrameId()); } /** diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java index 9051131ea..948020f9f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/GroupDaoJdbc.java @@ -232,6 +232,39 @@ public boolean isOverMinCores(JobInterface job) { Integer.class, job.getJobId()) > 0; } + @Override + public void updateDefaultJobMaxGpus(GroupInterface group, int value) { + if (value <= 0) { value = CueUtil.FEATURE_DISABLED; } + getJdbcTemplate().update( + "UPDATE folder SET int_job_max_gpus=? WHERE pk_folder=?", + value, group.getId()); + } + + @Override + public void updateDefaultJobMinGpus(GroupInterface group, int value) { + if (value <= 0) { value = CueUtil.FEATURE_DISABLED; } + getJdbcTemplate().update( + "UPDATE folder SET int_job_min_gpus=? WHERE pk_folder=?", + value, group.getId()); + } + + @Override + public void updateMaxGpus(GroupInterface group, int value) { + if (value < 0) { value = CueUtil.FEATURE_DISABLED; } + + getJdbcTemplate().update( + "UPDATE folder_resource SET int_max_gpus=? WHERE pk_folder=?", + value, group.getId()); + } + + @Override + public void updateMinGpus(GroupInterface group, int value) { + if (value < 0) { value = 0; } + getJdbcTemplate().update( + "UPDATE folder_resource SET int_min_gpus=? WHERE pk_folder=?", + value, group.getId()); + } + @Override public void updateDefaultJobPriority(GroupInterface group, int value) { if (value < 0) { value = CueUtil.FEATURE_DISABLED; } @@ -251,6 +284,8 @@ public void updateDefaultJobPriority(GroupInterface group, int value) { "folder.pk_folder, " + "folder.int_job_max_cores,"+ "folder.int_job_min_cores,"+ + "folder.int_job_max_gpus,"+ + "folder.int_job_min_gpus,"+ "folder.int_job_priority,"+ "folder.str_name,"+ "folder.pk_parent_folder,"+ @@ -258,7 +293,9 @@ public void updateDefaultJobPriority(GroupInterface group, int value) { "folder.pk_dept,"+ "folder_level.int_level, " + "folder_resource.int_min_cores,"+ - "folder_resource.int_max_cores " + + "folder_resource.int_max_cores," + + "folder_resource.int_min_gpus,"+ + "folder_resource.int_max_gpus " + "FROM " + "folder, "+ "folder_level, " + @@ -273,6 +310,8 @@ public void updateDefaultJobPriority(GroupInterface group, int value) { "folder.pk_folder, " + "folder.int_job_max_cores,"+ "folder.int_job_min_cores,"+ + "folder.int_job_max_gpus,"+ + "folder.int_job_min_gpus,"+ "folder.int_job_priority,"+ "folder.str_name,"+ "folder.pk_parent_folder,"+ @@ -280,7 +319,9 @@ public void updateDefaultJobPriority(GroupInterface group, int value) { "folder.pk_dept,"+ "folder_level.int_level, " + "folder_resource.int_min_cores,"+ - "folder_resource.int_max_cores " + + "folder_resource.int_max_cores," + + "folder_resource.int_min_gpus,"+ + "folder_resource.int_max_gpus " + "FROM " + "folder, "+ "folder_level, " + @@ -393,7 +434,13 @@ public GroupDetail mapRow(ResultSet rs, int rowNum) throws SQLException { group.id = rs.getString("pk_folder"); group.jobMaxCores = rs.getInt("int_job_max_cores"); group.jobMinCores = rs.getInt("int_job_min_cores"); + group.jobMaxGpus = rs.getInt("int_job_max_gpus"); + group.jobMinGpus = rs.getInt("int_job_min_gpus"); group.jobPriority = rs.getInt("int_job_priority"); + group.minCores = rs.getInt("int_min_cores"); + group.maxCores = rs.getInt("int_max_cores"); + group.minGpus = rs.getInt("int_min_gpus"); + group.maxGpus = rs.getInt("int_max_gpus"); group.name = rs.getString("str_name"); group.parentId = rs.getString("pk_parent_folder"); group.showId = rs.getString("pk_show"); diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java index bf42f0ecb..f703416b2 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/HostDaoJdbc.java @@ -71,10 +71,12 @@ public HostEntity mapRow(ResultSet rs, int rowNum) throws SQLException { host.unlockAtBoot = rs.getBoolean("b_unlock_boot"); host.cores = rs.getInt("int_cores"); host.idleCores = rs.getInt("int_cores_idle"); - host.memory = rs.getInt("int_mem"); - host.idleMemory = rs.getInt("int_mem_idle"); - host.gpu = rs.getInt("int_gpu"); - host.idleGpu = rs.getInt("int_gpu_idle"); + host.memory = rs.getLong("int_mem"); + host.idleMemory = rs.getLong("int_mem_idle"); + host.gpus = rs.getInt("int_gpus"); + host.idleGpus = rs.getInt("int_gpus_idle"); + host.gpuMemory = rs.getLong("int_gpu_mem"); + host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); host.dateBooted = rs.getDate("ts_booted"); host.dateCreated = rs.getDate("ts_created"); host.datePinged = rs.getDate("ts_ping"); @@ -110,8 +112,10 @@ public HostInterface mapRow(final ResultSet rs, int rowNum) throws SQLException "host.int_cores_idle,"+ "host.int_mem,"+ "host.int_mem_idle,"+ - "host.int_gpu,"+ - "host.int_gpu_idle,"+ + "host.int_gpus,"+ + "host.int_gpus_idle,"+ + "host.int_gpu_mem,"+ + "host.int_gpu_mem_idle,"+ "host.ts_created,"+ "host.str_name, " + "host_stat.str_state,"+ @@ -199,12 +203,14 @@ public DispatchHost mapRow(ResultSet rs, int rowNum) throws SQLException { host.facilityId = rs.getString("pk_facility"); host.name = rs.getString("str_name"); host.lockState = LockState.valueOf(rs.getString("str_lock_state")); - host.memory = rs.getInt("int_mem"); + host.memory = rs.getLong("int_mem"); host.cores = rs.getInt("int_cores"); - host.gpu= rs.getInt("int_gpu"); - host.idleMemory= rs.getInt("int_mem_idle"); + host.gpus = rs.getInt("int_gpus"); + host.gpuMemory = rs.getLong("int_gpu_mem"); + host.idleMemory= rs.getLong("int_mem_idle"); host.idleCores = rs.getInt("int_cores_idle"); - host.idleGpu= rs.getInt("int_gpu_idle"); + host.idleGpuMemory = rs.getLong("int_gpu_mem_idle"); + host.idleGpus = rs.getInt("int_gpus_idle"); host.isNimby = rs.getBoolean("b_nimby"); host.threadMode = rs.getInt("int_thread_mode"); host.tags = rs.getString("str_tags"); @@ -225,8 +231,10 @@ public DispatchHost mapRow(ResultSet rs, int rowNum) throws SQLException { "host.int_cores_idle, " + "host.int_mem,"+ "host.int_mem_idle, "+ - "host.int_gpu,"+ - "host.int_gpu_idle, "+ + "host.int_gpus, "+ + "host.int_gpus_idle, " + + "host.int_gpu_mem,"+ + "host.int_gpu_mem_idle, "+ "host.b_nimby, "+ "host.int_thread_mode, "+ "host.str_tags, " + @@ -276,12 +284,14 @@ public DispatchHost getDispatchHost(String id) { "int_cores_idle, " + "int_mem,"+ "int_mem_idle,"+ - "int_gpu,"+ - "int_gpu_idle,"+ + "int_gpus, " + + "int_gpus_idle, " + + "int_gpu_mem,"+ + "int_gpu_mem_idle,"+ "str_fqdn, " + "int_thread_mode "+ ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)", "INSERT INTO " + "host_stat " + @@ -290,8 +300,8 @@ public DispatchHost getDispatchHost(String id) { "pk_host,"+ "int_mem_total, " + "int_mem_free,"+ - "int_gpu_total, " + - "int_gpu_free,"+ + "int_gpu_mem_total, " + + "int_gpu_mem_free,"+ "int_swap_total, " + "int_swap_free,"+ "int_mcp_total, " + @@ -347,28 +357,17 @@ public void insertRenderHost(RenderHost host, AllocationInterface a, boolean use os = Dispatcher.OS_DEFAULT; } - long totalGpu; - if (host.getAttributesMap().containsKey("totalGpu")) - totalGpu = Integer.parseInt(host.getAttributesMap().get("totalGpu")); - else - totalGpu = 0; - - long freeGpu; - if (host.getAttributesMap().containsKey("freeGpu")) - freeGpu = Integer.parseInt(host.getAttributesMap().get("freeGpu")); - else - freeGpu = 0; - - getJdbcTemplate().update(INSERT_HOST_DETAIL[0], hid, a.getAllocationId(), name, host.getNimbyEnabled(), LockState.OPEN.toString(), host.getNumProcs(), coreUnits, coreUnits, - memUnits, memUnits, totalGpu, totalGpu, + memUnits, memUnits, + host.getNumGpus(), host.getNumGpus(), + host.getTotalGpuMem(), host.getTotalGpuMem(), fqdn, threadMode.getNumber()); getJdbcTemplate().update(INSERT_HOST_DETAIL[1], hid, hid, host.getTotalMem(), host.getFreeMem(), - totalGpu, freeGpu, + host.getTotalGpuMem(), host.getFreeGpuMem(), host.getTotalSwap(), host.getFreeSwap(), host.getTotalMcp(), host.getFreeMcp(), host.getLoad(), new Timestamp(host.getBootTime() * 1000l), @@ -396,8 +395,8 @@ public CallableStatement createCallableStatement(Connection con) throws SQLExcep "int_swap_free = ?, "+ "int_mcp_total = ?, " + "int_mcp_free = ?, " + - "int_gpu_total = ?, " + - "int_gpu_free = ?, " + + "int_gpu_mem_total = ?, " + + "int_gpu_mem_free = ?, " + "int_load = ?," + "ts_booted = ?, " + "ts_ping = current_timestamp, "+ @@ -410,7 +409,7 @@ public void updateHostStats(HostInterface host, long totalMemory, long freeMemory, long totalSwap, long freeSwap, long totalMcp, long freeMcp, - long totalGpu, long freeGpu, + long totalGpuMemory, long freeGpuMemory, int load, Timestamp bootTime, String os) { @@ -420,7 +419,7 @@ public void updateHostStats(HostInterface host, getJdbcTemplate().update(UPDATE_RENDER_HOST, totalMemory, freeMemory, totalSwap, - freeSwap, totalMcp, freeMcp, totalGpu, freeGpu, load, + freeSwap, totalMcp, freeMcp, totalGpuMemory, freeGpuMemory, load, bootTime, os, host.getHostId()); } @@ -440,12 +439,8 @@ public void updateHostResources(HostInterface host, HostReport report) { long memory = convertMemoryUnits(report.getHost()); int cores = report.getHost().getNumProcs() * report.getHost().getCoresPerProc(); - - long totalGpu; - if (report.getHost().getAttributesMap().containsKey("totalGpu")) - totalGpu = Integer.parseInt(report.getHost().getAttributesMap().get("totalGpu")); - else - totalGpu = 0; + long gpu_memory = report.getHost().getTotalGpuMem(); + int gpus = report.getHost().getNumGpus(); getJdbcTemplate().update( "UPDATE " + @@ -456,16 +451,20 @@ public void updateHostResources(HostInterface host, HostReport report) { "int_cores_idle=?," + "int_mem=?," + "int_mem_idle=?, " + - "int_gpu=?," + - "int_gpu_idle=? " + + "int_gpus=?," + + "int_gpus_idle=?," + + "int_gpu_mem=?," + + "int_gpu_mem_idle=? " + "WHERE " + "pk_host=? "+ "AND " + "int_cores = int_cores_idle " + "AND " + - "int_mem = int_mem_idle", + "int_mem = int_mem_idle " + + "AND " + + "int_gpus = int_gpus_idle", report.getHost().getNimbyEnabled(), cores, cores, - memory, memory, totalGpu, totalGpu, host.getId()); + memory, memory, gpus, gpus, gpu_memory, gpu_memory, host.getId()); } @Override @@ -489,6 +488,34 @@ public void deleteHost(HostInterface host) { "DELETE FROM host WHERE pk_host=?",host.getHostId()); } + private static final String DELETE_DOWN_HOST_COMMENTS = + "DELETE " + + "FROM " + + "comments " + + "USING " + + "host_stat " + + "WHERE " + + "comments.pk_host = host_stat.pk_host " + + "AND " + + "host_stat.str_state = ?"; + + private static final String DELETE_DOWN_HOSTS = + "DELETE " + + "FROM " + + "host " + + "USING " + + "host_stat " + + "WHERE " + + "host.pk_host = host_stat.pk_host " + + "AND " + + "host_stat.str_state=?"; + + @Override + public void deleteDownHosts() { + getJdbcTemplate().update(DELETE_DOWN_HOST_COMMENTS, HardwareState.DOWN.toString()); + getJdbcTemplate().update(DELETE_DOWN_HOSTS, HardwareState.DOWN.toString()); + } + @Override public void updateHostState(HostInterface host, HardwareState state) { getJdbcTemplate().update( @@ -496,6 +523,13 @@ public void updateHostState(HostInterface host, HardwareState state) { state.toString(), host.getHostId()); } + @Override + public void updateHostFreeTempDir(HostInterface host, Long freeTempDir) { + getJdbcTemplate().update( + "UPDATE host_stat SET int_mcp_free=? WHERE pk_host=?", + freeTempDir, host.getHostId()); + } + @Override public void updateHostSetAllocation(HostInterface host, AllocationInterface alloc) { @@ -578,15 +612,6 @@ public void updateHostOs(HostInterface host, String os) { os, host.getHostId()); } - @Override - public boolean isKillMode(HostInterface h) { - return getJdbcTemplate().queryForObject( - "SELECT COUNT(1) FROM host_stat WHERE pk_host = ? " + - "AND int_swap_total - int_swap_free > ? AND int_mem_free < ?", - Integer.class, h.getHostId(), Dispatcher.KILL_MODE_SWAP_THRESHOLD, - Dispatcher.KILL_MODE_MEM_THRESHOLD) > 0; - } - @Override public int getStrandedCoreUnits(HostInterface h) { try { @@ -600,6 +625,18 @@ public int getStrandedCoreUnits(HostInterface h) { } } + @Override + public int getStrandedGpus(HostInterface h) { + try { + int idle_gpus = getJdbcTemplate().queryForObject( + "SELECT int_gpus_idle FROM host WHERE pk_host = ?", + Integer.class, h.getHostId()); + return idle_gpus; + } catch (EmptyResultDataAccessException e) { + return 0; + } + } + private static final String IS_HOST_UP = "SELECT " + "COUNT(1) " + diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java index a3dab8f4e..a5f595f4e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/JobDaoJdbc.java @@ -110,12 +110,14 @@ public JobDetail mapRow(ResultSet rs, int rowNum) throws SQLException { JobDetail job = new JobDetail(); job.id = rs.getString("pk_job"); job.showId = rs.getString("pk_show"); - job.facilityId = rs.getString("pk_show"); + job.facilityId = rs.getString("pk_facility"); job.deptId = rs.getString("pk_dept"); job.groupId = rs.getString("pk_folder"); job.logDir = rs.getString("str_log_dir"); job.maxCoreUnits = rs.getInt("int_max_cores"); job.minCoreUnits = rs.getInt("int_min_cores"); + job.maxGpuUnits = rs.getInt("int_max_gpus"); + job.minGpuUnits = rs.getInt("int_min_gpus"); job.name = rs.getString("str_name"); job.priority = rs.getInt("int_priority"); job.shot = rs.getString("str_shot"); @@ -218,6 +220,8 @@ public boolean isJobComplete(JobInterface job) { "job.int_max_retries,"+ "job_resource.int_max_cores,"+ "job_resource.int_min_cores,"+ + "job_resource.int_max_gpus,"+ + "job_resource.int_min_gpus,"+ "job_resource.int_priority,"+ "show.str_name AS show_name, " + "dept.str_name AS dept_name, "+ @@ -364,6 +368,32 @@ public void updateMaxCores(JobInterface j, int v) { v, j.getJobId()); } + @Override + public void updateMinGpus(GroupInterface g, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_min_gpus=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", + v, g.getGroupId()); + } + + @Override + public void updateMaxGpus(GroupInterface g, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_max_gpus=? WHERE " + + "pk_job IN (SELECT pk_job FROM job WHERE pk_folder=?)", + v, g.getGroupId()); + } + + @Override + public void updateMinGpus(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_min_gpus=? WHERE pk_job=?", + v, j.getJobId()); + } + + @Override + public void updateMaxGpus(JobInterface j, int v) { + getJdbcTemplate().update("UPDATE job_resource SET int_max_gpus=? WHERE pk_job=?", + v, j.getJobId()); + } + @Override public void updatePaused(JobInterface j, boolean b) { getJdbcTemplate().update("UPDATE job SET b_paused=? WHERE pk_job=?", @@ -407,7 +437,7 @@ public void updateMaxRSS(JobInterface job, long value) { "str_visible_name = NULL, " + "ts_stopped = current_timestamp "+ "WHERE " + - "str_state = 'PENDING'" + + "str_state = 'PENDING' " + "AND " + "pk_job = ?"; @@ -544,7 +574,7 @@ public void activateJob(JobInterface job, JobState jobState) { jobTotals[0] + jobTotals[1], layers.size(), job.getJobId()); getJdbcTemplate().update( - "UPDATE show SET int_frame_insert_count=int_frame_insert_count+?, int_job_insert_count=int_job_insert_count+1 WHERE pk_show=?", + "UPDATE show_stats SET int_frame_insert_count=int_frame_insert_count+?, int_job_insert_count=int_job_insert_count+1 WHERE pk_show=?", jobTotals[0] + jobTotals[1], job.getShowId()); updateState(job, jobState); @@ -632,6 +662,44 @@ public boolean isAtMaxCores(JobInterface job) { Integer.class, job.getJobId()) > 0; } + private static final String IS_JOB_OVER_MAX_GPUS = + "SELECT " + + "COUNT(1) " + + "FROM " + + "job_resource " + + "WHERE " + + "job_resource.pk_job = ? " + + "AND " + + "job_resource.int_gpus + ? > job_resource.int_max_gpus"; + + @Override + public boolean isOverMaxGpus(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, + Integer.class, job.getJobId(), 0) > 0; + } + + @Override + public boolean isOverMaxGpus(JobInterface job, int gpu) { + return getJdbcTemplate().queryForObject(IS_JOB_OVER_MAX_GPUS, + Integer.class, job.getJobId(), gpu) > 0; + } + + private static final String IS_JOB_AT_MAX_GPUS = + "SELECT " + + "COUNT(1) " + + "FROM " + + "job_resource " + + "WHERE " + + "job_resource.pk_job = ? " + + "AND " + + "job_resource.int_gpus >= job_resource.int_max_gpus "; + + @Override + public boolean isAtMaxGpus(JobInterface job) { + return getJdbcTemplate().queryForObject(IS_JOB_AT_MAX_GPUS, + Integer.class, job.getJobId()) > 0; + } + @Override public void updateMaxFrameRetries(JobInterface j, int max_retries) { if (max_retries < 0) { @@ -685,8 +753,10 @@ public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { private static final String GET_EXECUTION_SUMMARY = "SELECT " + "job_usage.int_core_time_success,"+ - "job_usage.int_core_time_fail," + - "job_mem.int_max_rss " + + "job_usage.int_core_time_fail,"+ + "job_usage.int_gpu_time_success,"+ + "job_usage.int_gpu_time_fail,"+ + "job_mem.int_max_rss " + "FROM " + "job," + "job_usage, "+ @@ -707,6 +777,9 @@ public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { e.coreTimeSuccess = rs.getLong("int_core_time_success"); e.coreTimeFail = rs.getLong("int_core_time_fail"); e.coreTime = e.coreTimeSuccess + e.coreTimeFail; + e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); + e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); + e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; e.highMemoryKb = rs.getLong("int_max_rss"); return e; @@ -795,6 +868,20 @@ public void updateParent(JobInterface job, GroupDetail dest, Inherit[] inherits) } break; + case MinGpus: + if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_min_gpus=?,"); + values.add(dest.jobMinGpus); + } + break; + + case MaxGpus: + if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_max_gpus=?,"); + values.add(dest.jobMaxGpus); + } + break; + case All: if (dest.jobPriority != CueUtil.FEATURE_DISABLED) { query.append("int_priority=?,"); @@ -810,6 +897,16 @@ public void updateParent(JobInterface job, GroupDetail dest, Inherit[] inherits) query.append("int_max_cores=?,"); values.add(dest.jobMaxCores); } + + if (dest.jobMinGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_min_gpus=?,"); + values.add(dest.jobMinGpus); + } + + if (dest.jobMaxGpus != CueUtil.FEATURE_DISABLED) { + query.append("int_max_gpus=?,"); + values.add(dest.jobMaxGpus); + } break; } } @@ -848,9 +945,11 @@ public void updateParent(JobInterface job, GroupDetail dest, Inherit[] inherits) "AND " + "job.b_auto_book = true " + "AND " + - "job_stat.int_waiting_count != 0" + + "job_stat.int_waiting_count != 0 " + "AND " + "job_resource.int_cores < job_resource.int_max_cores " + + "AND " + + "job_resource.int_gpus < job_resource.int_max_gpus " + "AND " + "job.pk_facility = ? " + "LIMIT 1"; @@ -922,11 +1021,13 @@ public void updateUsage(JobInterface job, ResourceUsage usage, int exitStatus) { "job_usage " + "SET " + "int_core_time_success = int_core_time_success + ?," + + "int_gpu_time_success = int_gpu_time_success + ?," + "int_clock_time_success = int_clock_time_success + ?,"+ "int_frame_success_count = int_frame_success_count + 1 " + "WHERE " + "pk_job = ? ", usage.getCoreTimeSeconds(), + usage.getGpuTimeSeconds(), usage.getClockTimeSeconds(), job.getJobId()); diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java index 26654f392..15941a196 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/LayerDaoJdbc.java @@ -205,7 +205,8 @@ public LayerDetail mapRow(ResultSet rs, int rowNum) throws SQLException { layer.range = rs.getString("str_range"); layer.minimumCores = rs.getInt("int_cores_min"); layer.minimumMemory = rs.getLong("int_mem_min"); - layer.minimumGpu = rs.getLong("int_gpu_min"); + layer.minimumGpus = rs.getInt("int_gpus_min"); + layer.minimumGpuMemory = rs.getLong("int_gpu_mem_min"); layer.type = LayerType.valueOf(rs.getString("str_type")); layer.tags = Sets.newHashSet( rs.getString("str_tags").replaceAll(" ", "").split("\\|")); @@ -311,12 +312,14 @@ public LayerInterface getLayer(String id) { "int_cores_max, "+ "b_threadable, " + "int_mem_min, " + - "int_gpu_min, " + + "int_gpus_min, "+ + "int_gpus_max, "+ + "int_gpu_mem_min, " + "str_services, " + "int_timeout," + "int_timeout_llu " + ") " + - "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; + "VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; @Override public void insertLayerDetail(LayerDetail l) { @@ -326,7 +329,7 @@ public void insertLayerDetail(LayerDetail l) { l.range, l.chunkSize, l.dispatchOrder, StringUtils.join(l.tags," | "), l.type.toString(), l.minimumCores, l.maximumCores, l.isThreadable, - l.minimumMemory, l.minimumGpu, StringUtils.join(l.services,","), + l.minimumMemory, l.minimumGpus, l.maximumGpus, l.minimumGpuMemory, StringUtils.join(l.services,","), l.timeout, l.timeout_llu); } @@ -340,9 +343,9 @@ public void updateLayerMinMemory(LayerInterface layer, long val) { } @Override - public void updateLayerMinGpu(LayerInterface layer, long gpu) { - getJdbcTemplate().update("UPDATE layer SET int_gpu_min=? WHERE pk_layer=?", - gpu, layer.getLayerId()); + public void updateLayerMinGpuMemory(LayerInterface layer, long kb) { + getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=?", + kb, layer.getLayerId()); } private static final String BALANCE_MEM = @@ -392,9 +395,9 @@ public void increaseLayerMinMemory(LayerInterface layer, long val) { } @Override - public void increaseLayerMinGpu(LayerInterface layer, long gpu) { - getJdbcTemplate().update("UPDATE layer SET int_gpu_min=? WHERE pk_layer=? AND int_gpu_min < ?", - gpu, layer.getLayerId(), gpu); + public void increaseLayerMinGpuMemory(LayerInterface layer, long kb) { + getJdbcTemplate().update("UPDATE layer SET int_gpu_mem_min=? WHERE pk_layer=? AND int_gpu_mem_min < ?", + kb, layer.getLayerId(), kb); } @Override @@ -412,6 +415,18 @@ public void updateLayerMaxCores(LayerInterface layer, int val) { val, layer.getLayerId()); } + @Override + public void updateLayerMinGpus(LayerInterface layer, int val) { + getJdbcTemplate().update("UPDATE layer SET int_gpus_min=? WHERE pk_layer=?", + val, layer.getLayerId()); + } + + @Override + public void updateLayerMaxGpus(LayerInterface layer, int val) { + getJdbcTemplate().update("UPDATE layer SET int_gpus_max=? WHERE pk_layer=?", + val, layer.getLayerId()); + } + private static final String UPDATE_LAYER_MAX_RSS = "UPDATE " + "layer_mem " + @@ -489,6 +504,8 @@ public FrameStateTotals mapRow(ResultSet rs, int rowNum) throws SQLException { "SELECT " + "layer_usage.int_core_time_success,"+ "layer_usage.int_core_time_fail," + + "layer_usage.int_gpu_time_success,"+ + "layer_usage.int_gpu_time_fail," + "layer_usage.int_clock_time_success," + "layer_mem.int_max_rss " + "FROM " + @@ -512,6 +529,9 @@ public ExecutionSummary mapRow(ResultSet rs, int rowNum) throws SQLException { e.coreTimeSuccess = rs.getLong("int_core_time_success"); e.coreTimeFail = rs.getLong("int_core_time_fail"); e.coreTime = e.coreTimeSuccess + e.coreTimeFail; + e.gpuTimeSuccess = rs.getLong("int_gpu_time_success"); + e.gpuTimeFail = rs.getLong("int_gpu_time_fail"); + e.gpuTime = e.gpuTimeSuccess + e.gpuTimeFail; e.highMemoryKb = rs.getLong("int_max_rss"); return e; } @@ -608,10 +628,10 @@ public void updateMinMemory(JobInterface job, long mem, LayerType type) { } @Override - public void updateMinGpu(JobInterface job, long gpu, LayerType type) { + public void updateMinGpuMemory(JobInterface job, long kb, LayerType type) { getJdbcTemplate().update( - "UPDATE layer SET int_gpu_min=? WHERE pk_job=? AND str_type=?", - gpu, job.getJobId(), type.toString()); + "UPDATE layer SET int_gpu_mem_min=? WHERE pk_job=? AND str_type=?", + kb, job.getJobId(), type.toString()); } @Override @@ -621,6 +641,20 @@ public void updateMinCores(JobInterface job, int cores, LayerType type) { cores, job.getJobId(), type.toString()); } + @Override + public void updateMaxCores(JobInterface job, int cores, LayerType type) { + getJdbcTemplate().update( + "UPDATE layer SET int_cores_max=? WHERE pk_job=? AND str_type=?", + cores, job.getJobId(), type.toString()); + } + + @Override + public void updateMinGpus(JobInterface job, int gpus, LayerType type) { + getJdbcTemplate().update( + "UPDATE layer SET int_gpus_min=? WHERE pk_job=? AND str_type=?", + gpus, job.getJobId(), type.toString()); + } + @Override public void updateThreadable(LayerInterface layer, boolean threadable) { getJdbcTemplate().update( @@ -664,6 +698,8 @@ public void enableMemoryOptimizer(LayerInterface layer, boolean value) { "layer.pk_layer = ? " + "AND " + "layer.int_cores_min = 100 " + + "AND " + + "layer.int_gpus_min = 0 " + "AND " + "str_tags LIKE '%general%' " + "AND " + @@ -686,7 +722,8 @@ public boolean isOptimizable(LayerInterface l, int succeeded, float avg) { private static final String THREAD_STATS = "SELECT " + "avg(interval_to_seconds(ts_stopped - ts_started)) AS avg, " + - "int_cores " + + "int_cores, " + + "int_gpus " + "FROM " + "frame " + "WHERE " + @@ -695,8 +732,11 @@ public boolean isOptimizable(LayerInterface l, int succeeded, float avg) { "frame.int_checkpoint_count = 0 " + "AND " + "int_cores > 0 " + + "AND " + + "int_gpus > 0 " + "GROUP BY " + - "int_cores " + + "int_cores, " + + "int_gpus " + "ORDER BY " + "int_cores DESC "; @@ -724,11 +764,13 @@ public void updateUsage(LayerInterface layer, ResourceUsage usage, int exitStatu "layer_usage " + "SET " + "int_core_time_success = int_core_time_success + ?," + + "int_gpu_time_success = int_gpu_time_success + ?," + "int_clock_time_success = int_clock_time_success + ?,"+ "int_frame_success_count = int_frame_success_count + 1 " + "WHERE " + "pk_layer = ? ", usage.getCoreTimeSeconds(), + usage.getGpuTimeSeconds(), usage.getClockTimeSeconds(), layer.getLayerId()); diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java index f2cff28a5..8c920c5a4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/NestedWhiteboardDaoJdbc.java @@ -23,10 +23,10 @@ import java.sql.SQLException; import java.sql.Timestamp; import java.util.ArrayList; +import java.util.concurrent.ConcurrentHashMap; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; import org.springframework.jdbc.core.RowMapper; import org.springframework.jdbc.core.support.JdbcDaoSupport; @@ -73,8 +73,12 @@ public CachedJobWhiteboardMapper(NestedJobWhiteboardMapper result) { "folder.int_job_priority as int_def_job_priority, " + "folder.int_job_min_cores as int_def_job_min_cores, " + "folder.int_job_max_cores as int_def_job_max_cores, " + + "folder.int_job_min_gpus as int_def_job_min_gpus, " + + "folder.int_job_max_gpus as int_def_job_max_gpus, " + "folder_resource.int_min_cores AS folder_min_cores, " + "folder_resource.int_max_cores AS folder_max_cores, " + + "folder_resource.int_min_gpus AS folder_min_gpus, " + + "folder_resource.int_max_gpus AS folder_max_gpus, " + "folder_level.int_level, " + "job.pk_job, " + "job.str_name, " + @@ -101,13 +105,18 @@ public CachedJobWhiteboardMapper(NestedJobWhiteboardMapper result) { "job_stat.int_succeeded_count, " + "job_usage.int_core_time_success, " + "job_usage.int_core_time_fail, " + + "job_usage.int_gpu_time_success, " + + "job_usage.int_gpu_time_fail, " + "job_usage.int_frame_success_count, " + "job_usage.int_frame_fail_count, " + "job_usage.int_clock_time_high, " + "job_usage.int_clock_time_success, " + "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores, " + + "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus, " + "job_resource.int_min_cores, " + + "job_resource.int_min_gpus, " + "job_resource.int_max_cores, " + + "job_resource.int_max_gpus, " + "job_mem.int_max_rss " + "FROM " + "show, " + @@ -148,10 +157,56 @@ public CachedJobWhiteboardMapper(NestedJobWhiteboardMapper result) { "AND " + "folder.pk_dept = dept.pk_dept "; - class NestedJobWhiteboardMapper implements RowMapper { + private class ChildrenEntry { + String key; + int level; + List children; + String name; + + public ChildrenEntry(String key, int level, String name) { + this.key = key; + this.level = level; + this.children = new ArrayList<>(); + this.name = name; + } + + public List getChildren() { + return children; + } + + public void addChild(String child) { + children.add(child); + } + + public String getKey() { + return key; + } + + public String getName() { + return name; + } - public Map groups = new HashMap(50); - public Map> childrenMap = new HashMap>(); + public int compareTo(ChildrenEntry o) { + // Invert order + return Integer.compare(o.level, this.level); + } + + @Override + public String toString() { + StringBuilder out = new StringBuilder(); + String spacing = " ".repeat(Math.max(0, this.level + 1)); + out.append(spacing); + out.append(key + "(c " + name + ")"); + for (String id : children) { + out.append("\n " + spacing + id.substring(0, 4)); + } + return out.toString(); + } + } + + class NestedJobWhiteboardMapper implements RowMapper { + public Map groups = new HashMap(50); + public Map childrenMap = new HashMap(); public String rootGroupID; @Override @@ -165,20 +220,28 @@ public NestedGroup mapRow(ResultSet rs, int rowNum) throws SQLException { .setDefaultJobPriority(rs.getInt("int_def_job_priority")) .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_min_cores"))) .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_def_job_max_cores"))) + .setDefaultJobMinGpus(rs.getInt("int_def_job_min_gpus")) + .setDefaultJobMaxGpus(rs.getInt("int_def_job_max_gpus")) .setMaxCores(Convert.coreUnitsToCores(rs.getInt("folder_max_cores"))) .setMinCores(Convert.coreUnitsToCores(rs.getInt("folder_min_cores"))) + .setMaxGpus(rs.getInt("folder_max_gpus")) + .setMinGpus(rs.getInt("folder_min_gpus")) .setLevel(rs.getInt("int_level")) .setDepartment(rs.getString("dept_name")) .build(); String parentGroupId = rs.getString("pk_parent_folder"); if (parentGroupId != null) { - List children = childrenMap.get(parentGroupId); - if (children == null) { - children = new ArrayList<>(); - childrenMap.put(parentGroupId, children); + ChildrenEntry childrenEntry = childrenMap.get(parentGroupId); + if (childrenEntry == null) { + childrenEntry = new ChildrenEntry( + parentGroupId, group.getLevel() - 1, rs.getString("group_name")); + childrenEntry.addChild(groupId); + childrenMap.put(parentGroupId, childrenEntry); + } + else { + childrenEntry.addChild(groupId); } - children.add(groupId); } else { rootGroupID = rs.getString("pk_folder"); @@ -210,19 +273,22 @@ public NestedGroup mapRow(ResultSet rs, int rowNum) throws SQLException { } private NestedJobWhiteboardMapper updateConnections(NestedJobWhiteboardMapper mapper) { - for (Map.Entry> entry : mapper.childrenMap.entrySet()) { - NestedGroup group = mapper.groups.get(entry.getKey()); - NestedGroupSeq.Builder childrenBuilder = NestedGroupSeq.newBuilder(); - for (String childId : entry.getValue()) { - NestedGroup child = mapper.groups.get(childId); - child = child.toBuilder().setParent(group).build(); - childrenBuilder.addNestedGroups(child); - mapper.groups.put(childId, child); - } - group = group.toBuilder() - .setGroups(childrenBuilder.build()) - .build(); - mapper.groups.put(entry.getKey(), group); + ArrayList orderedChildren = new ArrayList<>(mapper.childrenMap.values()); + orderedChildren.sort(ChildrenEntry::compareTo); + + for (ChildrenEntry entry : orderedChildren) { + NestedGroup group = mapper.groups.get(entry.getKey()); + NestedGroupSeq.Builder childrenBuilder = NestedGroupSeq.newBuilder(); + for (String childId : entry.getChildren()) { + NestedGroup child = mapper.groups.get(childId); + child = child.toBuilder().setParent(group).build(); + childrenBuilder.addNestedGroups(child); + mapper.groups.put(childId, child); + } + group = group.toBuilder() + .setGroups(childrenBuilder.build()) + .build(); + mapper.groups.put(entry.getKey(), group); } return mapper; } @@ -254,6 +320,8 @@ private static final NestedJob mapResultSetToJob(ResultSet rs) throws SQLExcepti .setLogDir(rs.getString("str_log_dir")) .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) + .setMaxGpus(rs.getInt("int_max_cores")) + .setMinGpus(rs.getInt("int_min_cores")) .setName(rs.getString("str_name")) .setPriority(rs.getInt("int_priority")) .setShot(rs.getString("str_shot")) @@ -295,8 +363,10 @@ private static final NestedJob mapResultSetToJob(ResultSet rs) throws SQLExcepti "host_stat.ts_ping, " + "host.int_cores, " + "host.int_cores_idle, " + - "host.int_gpu, " + - "host.int_gpu_idle, " + + "host.int_gpus, " + + "host.int_gpus_idle, " + + "host.int_gpu_mem, " + + "host.int_gpu_mem_idle, " + "host.int_mem, " + "host.int_mem_idle, " + "host.str_lock_state, " + @@ -310,15 +380,16 @@ private static final NestedJob mapResultSetToJob(ResultSet rs) throws SQLExcepti "host_stat.int_swap_free, " + "host_stat.int_mcp_total, " + "host_stat.int_mcp_free, " + - "host_stat.int_gpu_total, " + - "host_stat.int_gpu_free, " + + "host_stat.int_gpu_mem_total, " + + "host_stat.int_gpu_mem_free, " + "host_stat.int_load, " + "proc.pk_proc, " + "proc.int_cores_reserved AS proc_cores, " + + "proc.int_gpus_reserved AS proc_gpus, " + "proc.int_mem_reserved AS proc_memory, " + "proc.int_mem_used AS used_memory, " + "proc.int_mem_max_used AS max_memory, " + - "proc.int_gpu_reserved AS proc_gpu, " + + "proc.int_gpu_mem_reserved AS proc_gpu_memory, " + "proc.ts_ping, " + "proc.ts_booked, " + "proc.ts_dispatched, " + @@ -445,10 +516,13 @@ public NestedHost mapRow(ResultSet rs, int row) throws SQLException { proc = NestedProc.newBuilder() .setId(pid) .setName(CueUtil.buildProcName(host.getName(), - rs.getInt("proc_cores"))) + rs.getInt("proc_cores"), + rs.getInt("proc_gpus"))) .setReservedCores(Convert.coreUnitsToCores( rs.getInt("proc_cores"))) + .setReservedGpus(rs.getInt("proc_gpus")) .setReservedMemory(rs.getLong("proc_memory")) + .setReservedGpuMemory(rs.getLong("proc_gpu_memory")) .setUsedMemory(rs.getLong("used_memory")) .setFrameName(rs.getString("frame_name")) .setJobName(rs.getString("job_name")) diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java index be8643b7e..8f6322690 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ProcDaoJdbc.java @@ -19,6 +19,9 @@ package com.imageworks.spcue.dao.postgres; + +import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.util.ArrayList; @@ -29,6 +32,7 @@ import org.springframework.dao.DataAccessException; import org.springframework.jdbc.core.RowMapper; import org.springframework.jdbc.core.support.JdbcDaoSupport; +import org.springframework.jdbc.core.PreparedStatementCreator; import com.imageworks.spcue.FrameInterface; import com.imageworks.spcue.HostInterface; @@ -108,9 +112,12 @@ public boolean deleteVirtualProc(VirtualProc proc) { "int_mem_reserved, " + "int_mem_pre_reserved, " + "int_mem_used, "+ - "int_gpu_reserved, " + + "int_gpus_reserved, " + + "int_gpu_mem_reserved, " + + "int_gpu_mem_pre_reserved, " + + "int_gpu_mem_used, " + "b_local " + - ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?) "; + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?) "; public void insertVirtualProc(VirtualProc proc) { proc.id = SqlUtil.genKeyRandom(); @@ -121,7 +128,9 @@ public void insertVirtualProc(VirtualProc proc) { proc.getLayerId(), proc.getJobId(), proc.getFrameId(), proc.coresReserved, proc.memoryReserved, proc.memoryReserved, Dispatcher.MEM_RESERVED_MIN, - proc.gpuReserved, proc.isLocalDispatch); + proc.gpusReserved, proc.gpuMemoryReserved, + proc.gpuMemoryReserved, Dispatcher.MEM_GPU_RESERVED_MIN, + proc.isLocalDispatch); // Update all of the resource counts procCreated(proc); @@ -229,13 +238,16 @@ public boolean clearVirtualProcAssignment(FrameInterface frame) { "int_mem_max_used = ?," + "int_virt_used = ?, " + "int_virt_max_used = ?, " + + "int_gpu_mem_used = ?, " + + "int_gpu_mem_max_used = ?, " + + "bytea_children = ?, " + "ts_ping = current_timestamp " + "WHERE " + "pk_frame = ?"; @Override public void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, - long vss, long maxVss) { + long vss, long maxVss, long usedGpuMemory, long maxUsedGpuMemory, byte[] children) { /* * This method is going to repeat for a proc every 1 minute, so * if the proc is being touched by another thread, then return @@ -250,8 +262,23 @@ public void updateProcMemoryUsage(FrameInterface f, long rss, long maxRss, "SELECT pk_frame FROM proc WHERE pk_frame=? FOR UPDATE", String.class, f.getFrameId()).equals(f.getFrameId())) { - getJdbcTemplate().update(UPDATE_PROC_MEMORY_USAGE, - rss, maxRss, vss, maxVss, f.getFrameId()); + getJdbcTemplate().update(new PreparedStatementCreator() { + @Override + public PreparedStatement createPreparedStatement(Connection conn) + throws SQLException { + PreparedStatement updateProc = conn.prepareStatement( + UPDATE_PROC_MEMORY_USAGE); + updateProc.setLong(1, rss); + updateProc.setLong(2, maxRss); + updateProc.setLong(3, vss); + updateProc.setLong(4, maxVss); + updateProc.setLong(5, usedGpuMemory); + updateProc.setLong(6, maxUsedGpuMemory); + updateProc.setBytes(7, children); + updateProc.setString(8, f.getFrameId()); + return updateProc; + } + }); } } catch (DataAccessException dae) { logger.info("The proc for frame " + f + @@ -278,13 +305,16 @@ public VirtualProc mapRow(ResultSet rs, int rowNum) throws SQLException { proc.coresReserved =rs.getInt("int_cores_reserved"); proc.memoryReserved = rs.getLong("int_mem_reserved"); proc.memoryMax = rs.getLong("int_mem_max_used"); - proc.gpuReserved = rs.getLong("int_gpu_reserved"); + proc.gpusReserved = rs.getInt("int_gpus_reserved"); + proc.gpuMemoryReserved = rs.getLong("int_gpu_mem_reserved"); + proc.gpuMemoryMax = rs.getLong("int_gpu_mem_max_used"); proc.virtualMemoryMax = rs.getLong("int_virt_max_used"); proc.virtualMemoryUsed = rs.getLong("int_virt_used"); proc.memoryUsed = rs.getLong("int_mem_used"); proc.unbooked = rs.getBoolean("b_unbooked"); proc.isLocalDispatch = rs.getBoolean("b_local"); proc.os = rs.getString("str_os"); + proc.childProcesses = rs.getBytes("bytea_children"); return proc; } }; @@ -305,7 +335,11 @@ public VirtualProc mapRow(ResultSet rs, int rowNum) throws SQLException { "proc.int_mem_reserved,"+ "proc.int_mem_max_used,"+ "proc.int_mem_used,"+ - "proc.int_gpu_reserved,"+ + "proc.int_gpus_reserved,"+ + "proc.int_gpu_mem_reserved,"+ + "proc.int_gpu_mem_max_used,"+ + "proc.int_gpu_mem_used,"+ + "proc.bytea_children,"+ "proc.int_virt_max_used,"+ "proc.int_virt_used,"+ "host.str_name AS host_name, " + @@ -530,7 +564,7 @@ public boolean increaseReservedMemory(ProcInterface p, long value) { value, p.getProcId(), value) == 1; } catch (Exception e) { // check by trigger erify_host_resources - throw new ResourceReservationFailureException("failed to increase memory reserveration for proc " + throw new ResourceReservationFailureException("failed to increase memory reservation for proc " + p.getProcId() + " to " + value + ", proc does not have that much memory to spare."); } } @@ -551,11 +585,15 @@ public boolean increaseReservedMemory(ProcInterface p, long value) { "int_mem_reserved," + "int_mem_max_used,"+ "int_mem_used,"+ - "int_gpu_reserved," + + "int_gpus_reserved," + + "int_gpu_mem_reserved," + + "int_gpu_mem_max_used," + + "int_gpu_mem_used," + "int_virt_max_used,"+ "int_virt_used,"+ "host_name, " + - "str_os " + + "str_os, " + + "bytea_children " + "FROM (" + GET_VIRTUAL_PROC + " " + "AND " + @@ -578,9 +616,9 @@ public long getReservedMemory(ProcInterface proc) { Long.class, proc.getProcId()); } - public long getReservedGpu(ProcInterface proc) { + public long getReservedGpuMemory(ProcInterface proc) { return getJdbcTemplate().queryForObject( - "SELECT int_gpu_reserved FROM proc WHERE pk_proc=?", + "SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", Long.class, proc.getProcId()); } @@ -694,22 +732,24 @@ private void procDestroyed(VirtualProc proc) { "SET " + "int_cores_idle = int_cores_idle + ?," + "int_mem_idle = int_mem_idle + ?, " + - "int_gpu_idle = int_gpu_idle + ? " + + "int_gpus_idle = int_gpus_idle + ?," + + "int_gpu_mem_idle = int_gpu_mem_idle + ? " + "WHERE " + "pk_host = ?", - proc.coresReserved, proc.memoryReserved, proc.gpuReserved, proc.getHostId()); + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, proc.getHostId()); if (!proc.isLocalDispatch) { getJdbcTemplate().update( "UPDATE " + "subscription " + "SET " + - "int_cores = int_cores - ? " + + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_show = ? " + "AND " + "pk_alloc = ?", - proc.coresReserved, proc.getShowId(), + proc.coresReserved, proc.gpusReserved, proc.getShowId(), proc.getAllocationId()); } @@ -717,10 +757,11 @@ private void procDestroyed(VirtualProc proc) { "UPDATE " + "layer_resource " + "SET " + - "int_cores = int_cores - ? " + + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_layer = ?", - proc.coresReserved, proc.getLayerId()); + proc.coresReserved, proc.gpusReserved, proc.getLayerId()); if (!proc.isLocalDispatch) { @@ -728,33 +769,36 @@ private void procDestroyed(VirtualProc proc) { "UPDATE " + "job_resource " + "SET " + - "int_cores = int_cores - ? " + + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_job = ?", - proc.coresReserved, proc.getJobId()); + proc.coresReserved, proc.gpusReserved, proc.getJobId()); getJdbcTemplate().update( "UPDATE " + "folder_resource " + "SET " + - "int_cores = int_cores - ? " + + "int_cores = int_cores - ?," + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_folder = " + "(SELECT pk_folder FROM job WHERE pk_job=?)", - proc.coresReserved, proc.getJobId()); + proc.coresReserved, proc.gpusReserved, proc.getJobId()); getJdbcTemplate().update( "UPDATE " + "point " + "SET " + - "int_cores = int_cores - ? " + + "int_cores = int_cores - ?, " + + "int_gpus = int_gpus - ? " + "WHERE " + "pk_dept = " + "(SELECT pk_dept FROM job WHERE pk_job=?) " + "AND " + "pk_show = " + "(SELECT pk_show FROM job WHERE pk_job=?) ", - proc.coresReserved, proc.getJobId(), proc.getJobId()); + proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); } if (proc.isLocalDispatch) { @@ -763,10 +807,11 @@ private void procDestroyed(VirtualProc proc) { "UPDATE " + "job_resource " + "SET " + - "int_local_cores = int_local_cores - ? " + + "int_local_cores = int_local_cores - ?, " + + "int_local_gpus = int_local_gpus - ? " + "WHERE " + "pk_job = ?", - proc.coresReserved, proc.getJobId()); + proc.coresReserved, proc.gpusReserved, proc.getJobId()); getJdbcTemplate().update( "UPDATE " + @@ -774,14 +819,16 @@ private void procDestroyed(VirtualProc proc) { "SET " + "int_cores_idle = int_cores_idle + ?, " + "int_mem_idle = int_mem_idle + ?, " + - "int_gpu_idle = int_gpu_idle + ? " + + "int_gpus_idle = int_gpus_idle + ?, " + + "int_gpu_mem_idle = int_gpu_mem_idle + ? " + "WHERE " + "pk_job = ? " + "AND " + "pk_host = ? ", proc.coresReserved, proc.memoryReserved, - proc.gpuReserved, + proc.gpusReserved, + proc.gpuMemoryReserved, proc.getJobId(), proc.getHostId()); } @@ -802,10 +849,11 @@ private void procCreated(VirtualProc proc) { "SET " + "int_cores_idle = int_cores_idle - ?," + "int_mem_idle = int_mem_idle - ?, " + - "int_gpu_idle = int_gpu_idle - ? " + + "int_gpus_idle = int_gpus_idle - ?," + + "int_gpu_mem_idle = int_gpu_mem_idle - ? " + "WHERE " + "pk_host = ?", - proc.coresReserved, proc.memoryReserved, proc.gpuReserved, proc.getHostId()); + proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved, proc.getHostId()); /** @@ -817,12 +865,13 @@ private void procCreated(VirtualProc proc) { "UPDATE " + "subscription " + "SET " + - "int_cores = int_cores + ? " + + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_show = ? " + "AND " + "pk_alloc = ?", - proc.coresReserved, proc.getShowId(), + proc.coresReserved, proc.gpusReserved, proc.getShowId(), proc.getAllocationId()); } @@ -830,10 +879,11 @@ private void procCreated(VirtualProc proc) { "UPDATE " + "layer_resource " + "SET " + - "int_cores = int_cores + ? " + + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_layer = ?", - proc.coresReserved, proc.getLayerId()); + proc.coresReserved, proc.gpusReserved, proc.getLayerId()); if (!proc.isLocalDispatch) { @@ -841,33 +891,36 @@ private void procCreated(VirtualProc proc) { "UPDATE " + "job_resource " + "SET " + - "int_cores = int_cores + ? " + + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_job = ?", - proc.coresReserved, proc.getJobId()); + proc.coresReserved, proc.gpusReserved, proc.getJobId()); getJdbcTemplate().update( "UPDATE " + "folder_resource " + "SET " + - "int_cores = int_cores + ? " + + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_folder = " + "(SELECT pk_folder FROM job WHERE pk_job=?)", - proc.coresReserved, proc.getJobId()); + proc.coresReserved, proc.gpusReserved, proc.getJobId()); getJdbcTemplate().update( "UPDATE " + "point " + "SET " + - "int_cores = int_cores + ? " + + "int_cores = int_cores + ?," + + "int_gpus = int_gpus + ? " + "WHERE " + "pk_dept = " + "(SELECT pk_dept FROM job WHERE pk_job=?) " + "AND " + "pk_show = " + "(SELECT pk_show FROM job WHERE pk_job=?) ", - proc.coresReserved, proc.getJobId(), proc.getJobId()); + proc.coresReserved, proc.gpusReserved, proc.getJobId(), proc.getJobId()); } if (proc.isLocalDispatch) { @@ -876,23 +929,28 @@ private void procCreated(VirtualProc proc) { "UPDATE " + "job_resource " + "SET " + - "int_local_cores = int_local_cores + ? " + + "int_local_cores = int_local_cores + ?," + + "int_local_gpus = int_local_gpus + ? " + "WHERE " + "pk_job = ?", - proc.coresReserved, proc.getJobId()); + proc.coresReserved, proc.gpusReserved, proc.getJobId()); getJdbcTemplate().update( "UPDATE " + "host_local " + "SET " + "int_cores_idle = int_cores_idle - ?, " + - "int_mem_idle = int_mem_idle - ? " + + "int_mem_idle = int_mem_idle - ?," + + "int_gpus_idle = int_gpus_idle - ?, " + + "int_gpu_mem_idle = int_gpu_mem_idle - ? " + "WHERE " + "pk_job = ? " + "AND " + "pk_host = ?", proc.coresReserved, proc.memoryReserved, + proc.gpusReserved, + proc.gpuMemoryReserved, proc.getJobId(), proc.getHostId()); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java index b31d9ade0..a637b34d6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ServiceDaoJdbc.java @@ -60,11 +60,14 @@ public ServiceEntity mapRow(ResultSet rs, int rowNum) throws SQLException { s.minCores = rs.getInt("int_cores_min"); s.maxCores = rs.getInt("int_cores_max"); s.minMemory = rs.getLong("int_mem_min"); - s.minGpu = rs.getLong("int_gpu_min"); + s.minGpus = rs.getInt("int_gpus_min"); + s.maxGpus = rs.getInt("int_gpus_max"); + s.minGpuMemory = rs.getLong("int_gpu_mem_min"); s.threadable = rs.getBoolean("b_threadable"); s.tags = splitTags(rs.getString("str_tags")); s.timeout = rs.getInt("int_timeout"); s.timeout_llu = rs.getInt("int_timeout_llu"); + s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); return s; } }; @@ -79,12 +82,15 @@ public ServiceOverrideEntity mapRow(ResultSet rs, int rowNum) s.minCores = rs.getInt("int_cores_min"); s.maxCores = rs.getInt("int_cores_max"); s.minMemory = rs.getLong("int_mem_min"); - s.minGpu = rs.getLong("int_gpu_min"); + s.minGpus = rs.getInt("int_gpus_min"); + s.maxGpus = rs.getInt("int_gpus_max"); + s.minGpuMemory = rs.getLong("int_gpu_mem_min"); s.threadable = rs.getBoolean("b_threadable"); s.tags = splitTags(rs.getString("str_tags")); s.showId = rs.getString("pk_show"); s.timeout = rs.getInt("int_timeout"); s.timeout_llu = rs.getInt("int_timeout_llu"); + s.minMemoryIncrease = rs.getLong("int_min_memory_increase"); return s; } }; @@ -97,10 +103,13 @@ public ServiceOverrideEntity mapRow(ResultSet rs, int rowNum) "service.int_cores_min," + "service.int_cores_max," + "service.int_mem_min," + - "service.int_gpu_min," + + "service.int_gpus_min," + + "service.int_gpus_max," + + "service.int_gpu_mem_min," + "service.str_tags, " + "service.int_timeout, " + - "service.int_timeout_llu " + + "service.int_timeout_llu, " + + "service.int_min_memory_increase " + "FROM " + "service "; @@ -119,10 +128,13 @@ public ServiceEntity get(String id) { "show_service.int_cores_min," + "show_service.int_cores_max, "+ "show_service.int_mem_min," + - "show_service.int_gpu_min," + + "show_service.int_gpus_min," + + "show_service.int_gpus_max, "+ + "show_service.int_gpu_mem_min," + "show_service.str_tags," + "show_service.int_timeout," + "show_service.int_timeout_llu," + + "show_service.int_min_memory_increase," + "show.pk_show " + "FROM " + "show_service," + @@ -167,20 +179,25 @@ public boolean isOverridden(String service, String show) { "int_cores_min," + "int_cores_max, "+ "int_mem_min," + - "int_gpu_min," + + "int_gpus_min," + + "int_gpus_max, "+ + "int_gpu_mem_min," + "str_tags," + "int_timeout," + - "int_timeout_llu " + - ") VALUES (?,?,?,?,?,?,?,?,?,?)"; + "int_timeout_llu, " + + "int_min_memory_increase " + + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)"; @Override public void insert(ServiceEntity service) { service.id = SqlUtil.genKeyRandom(); getJdbcTemplate().update(INSERT_SERVICE, service.id, service.name, service.threadable, service.minCores, - service.maxCores, service.minMemory, service.minGpu, + service.maxCores, service.minMemory, + service.minGpus, service.maxGpus, service.minGpuMemory, StringUtils.join(service.tags.toArray(), " | "), - service.timeout, service.timeout_llu); + service.timeout, service.timeout_llu, + service.minMemoryIncrease); } private static final String INSERT_SERVICE_WITH_SHOW = @@ -194,11 +211,14 @@ public void insert(ServiceEntity service) { "int_cores_min," + "int_cores_max," + "int_mem_min," + - "int_gpu_min," + + "int_gpus_min," + + "int_gpus_max," + + "int_gpu_mem_min," + "str_tags," + "int_timeout," + - "int_timeout_llu " + - ") VALUES (?,?,?,?,?,?,?,?,?,?,?)"; + "int_timeout_llu, " + + "int_min_memory_increase " + + ") VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"; @Override public void insert(ServiceOverrideEntity service) { @@ -206,8 +226,8 @@ public void insert(ServiceOverrideEntity service) { getJdbcTemplate().update(INSERT_SERVICE_WITH_SHOW, service.id, service.showId, service.name, service.threadable, service.minCores, service.maxCores, service.minMemory, - service.minGpu, joinTags(service.tags), - service.timeout, service.timeout_llu); + service.minGpus, service.maxGpus, service.minGpuMemory, joinTags(service.tags), + service.timeout, service.timeout_llu, service.minMemoryIncrease); } private static final String UPDATE_SERVICE = @@ -219,10 +239,13 @@ service.minGpu, joinTags(service.tags), "int_cores_min=?," + "int_cores_max=?,"+ "int_mem_min=?," + - "int_gpu_min=?," + + "int_gpus_min=?," + + "int_gpus_max=?," + + "int_gpu_mem_min=?," + "str_tags=?," + "int_timeout=?," + - "int_timeout_llu=? " + + "int_timeout_llu=?, " + + "int_min_memory_increase=? " + "WHERE " + "pk_service = ?"; @@ -230,8 +253,9 @@ service.minGpu, joinTags(service.tags), public void update(ServiceEntity service) { getJdbcTemplate().update(UPDATE_SERVICE, service.name, service.threadable, service.minCores, service.maxCores, - service.minMemory, service.minGpu, joinTags(service.tags), - service.timeout, service.timeout_llu, service.getId()); + service.minMemory, service.minGpus, service.maxGpus, service.minGpuMemory, joinTags(service.tags), + service.timeout, service.timeout_llu, service.minMemoryIncrease, + service.getId()); } private static final String UPDATE_SERVICE_WITH_SHOW = @@ -243,10 +267,13 @@ service.minMemory, service.minGpu, joinTags(service.tags), "int_cores_min=?," + "int_cores_max=?," + "int_mem_min=?," + - "int_gpu_min=?," + + "int_gpus_min=?," + + "int_gpus_max=?," + + "int_gpu_mem_min=?," + "str_tags=?," + "int_timeout=?," + - "int_timeout_llu=? " + + "int_timeout_llu=?, " + + "int_min_memory_increase=? " + "WHERE " + "pk_show_service = ?"; @@ -254,8 +281,9 @@ service.minMemory, service.minGpu, joinTags(service.tags), public void update(ServiceOverrideEntity service) { getJdbcTemplate().update(UPDATE_SERVICE_WITH_SHOW, service.name, service.threadable, service.minCores, service.maxCores, - service.minMemory, service.minGpu, joinTags(service.tags), - service.timeout, service.timeout_llu, service.getId()); + service.minMemory, service.minGpus, service.maxGpus, service.minGpuMemory, joinTags(service.tags), + service.timeout, service.timeout_llu, service.minMemoryIncrease, + service.getId()); } @Override diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java index 893455be3..86e126559 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/ShowDaoJdbc.java @@ -21,8 +21,13 @@ import java.sql.ResultSet; import java.sql.SQLException; +import java.util.Arrays; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.commons.lang.StringUtils; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; import org.springframework.dao.DataAccessException; import org.springframework.dao.EmptyResultDataAccessException; import org.springframework.jdbc.core.RowMapper; @@ -35,6 +40,8 @@ import com.imageworks.spcue.util.SqlUtil; public class ShowDaoJdbc extends JdbcDaoSupport implements ShowDao { + @Autowired + private Environment env; private static final RowMapper SHOW_MAPPER = new RowMapper() { @@ -44,6 +51,8 @@ public ShowEntity mapRow(ResultSet rs, int rowNum) throws SQLException { show.id = rs.getString("pk_show"); show.defaultMaxCores = rs.getInt("int_default_max_cores"); show.defaultMinCores = rs.getInt("int_default_min_cores"); + show.defaultMaxGpus = rs.getInt("int_default_max_gpus"); + show.defaultMinGpus = rs.getInt("int_default_min_gpus"); show.active = rs.getBoolean("b_active"); if (rs.getString("str_comment_email") != null) { @@ -61,6 +70,8 @@ public ShowEntity mapRow(ResultSet rs, int rowNum) throws SQLException { "show.pk_show, " + "show.int_default_max_cores, " + "show.int_default_min_cores, " + + "show.int_default_max_gpus, " + + "show.int_default_min_gpus, " + "show.str_name, " + "show.b_active, " + "show.str_comment_email " + @@ -72,6 +83,8 @@ public ShowEntity mapRow(ResultSet rs, int rowNum) throws SQLException { "show.pk_show, " + "show.int_default_max_cores, " + "show.int_default_min_cores, " + + "show.int_default_max_gpus, " + + "show.int_default_min_gpus, " + "show_alias.str_name, " + "show.b_active, " + "show.str_comment_email " + @@ -101,6 +114,8 @@ public ShowEntity getShowDetail(String id) { "show.pk_show, " + "show.int_default_max_cores, " + "show.int_default_min_cores, " + + "show.int_default_max_gpus, " + + "show.int_default_min_gpus, " + "show.str_name, " + "show.b_active, " + "show.str_comment_email " + @@ -123,16 +138,22 @@ public ShowEntity getShowDetail(HostInterface host) { private static final String INSERT_SHOW = "INSERT INTO show (pk_show,str_name) VALUES (?,?)"; + private static final String INSERT_SHOW_STATS = + "INSERT INTO show_stats " + + "(pk_show, int_frame_insert_count, int_job_insert_count, int_frame_success_count, int_frame_fail_count) " + + "VALUES (?, 0, 0, 0, 0)"; + public void insertShow(ShowEntity show) { show.id = SqlUtil.genKeyRandom(); getJdbcTemplate().update(INSERT_SHOW, show.id, show.name); + getJdbcTemplate().update(INSERT_SHOW_STATS, show.id); } private static final String SHOW_EXISTS = "SELECT " + "COUNT(show.pk_show) " + "FROM " + - "show LEFT JOIN show_alias ON (show.pk_show = show_alias.pk_show )" + + "show LEFT JOIN show_alias ON (show.pk_show = show_alias.pk_show) " + "WHERE " + "(show.str_name = ? OR show_alias.str_name = ?) "; public boolean showExists(String name) { @@ -154,6 +175,8 @@ public void delete(ShowInterface s) { s.getShowId()); getJdbcTemplate().update("DELETE FROM show_alias WHERE pk_show=?", s.getShowId()); + getJdbcTemplate().update("DELETE FROM show_stats WHERE pk_show=?", + s.getShowId()); getJdbcTemplate().update("DELETE FROM show WHERE pk_show=?", s.getShowId()); } @@ -180,6 +203,18 @@ public void updateShowDefaultMaxCores(ShowInterface s, int val) { val, s.getShowId()); } + public void updateShowDefaultMinGpus(ShowInterface s, int val) { + getJdbcTemplate().update( + "UPDATE show SET int_default_min_gpus=? WHERE pk_show=?", + val, s.getShowId()); + } + + public void updateShowDefaultMaxGpus(ShowInterface s, int val) { + getJdbcTemplate().update( + "UPDATE show SET int_default_max_gpus=? WHERE pk_show=?", + val, s.getShowId()); + } + @Override public void updateBookingEnabled(ShowInterface s, boolean enabled) { getJdbcTemplate().update( @@ -208,6 +243,26 @@ public void updateShowCommentEmail(ShowInterface s, String[] email) { StringUtils.join(email, ","), s.getShowId()); } + @Override + public void updateShowsStatus() { + Stream protectedShowsRaw = Arrays + .stream(env.getProperty("protected_shows", String.class, "").split(",")); + String protectedShows = protectedShowsRaw.map(show -> "'" + show + "'") + .collect(Collectors.joining(",")); + int maxShowStaleDays = env.getProperty("max_show_stale_days", Integer.class, -1); + + if (maxShowStaleDays > 0) { + getJdbcTemplate().update("UPDATE show SET b_active=false " + + "WHERE pk_show NOT IN (SELECT pk_show " + + " FROM (SELECT pk_show, count(pk_job) FROM job_history " + + " WHERE " + + " (DATE_PART('days', NOW()) - DATE_PART('days', dt_last_modified)) < ? " + + "GROUP BY pk_show HAVING COUNT(pk_job) > 0) pk_show) " + + " AND str_name NOT IN (?)", + maxShowStaleDays, protectedShows); + } + } + @Override public void updateFrameCounters(ShowInterface s, int exitStatus) { String col = "int_frame_success_count = int_frame_success_count + 1"; @@ -215,7 +270,7 @@ public void updateFrameCounters(ShowInterface s, int exitStatus) { col = "int_frame_fail_count = int_frame_fail_count + 1"; } getJdbcTemplate().update( - "UPDATE show SET " + col + " WHERE pk_show=?", s.getShowId()); + "UPDATE show_stats SET " + col + " WHERE pk_show=?", s.getShowId()); } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java index 687e91ecd..fe3217f96 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dao/postgres/WhiteboardDaoJdbc.java @@ -28,7 +28,9 @@ import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.apache.log4j.Logger; +import com.google.protobuf.ByteString; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.jdbc.core.RowMapper; import org.springframework.jdbc.core.support.JdbcDaoSupport; @@ -128,7 +130,7 @@ public class WhiteboardDaoJdbc extends JdbcDaoSupport implements WhiteboardDao { @SuppressWarnings("unused") - private static final Logger logger = Logger.getLogger(WhiteboardDaoJdbc.class); + private static final Logger logger = LogManager.getLogger(WhiteboardDaoJdbc.class); private FrameSearchFactory frameSearchFactory; private ProcSearchFactory procSearchFactory; @@ -577,8 +579,8 @@ public CommentSeq getComments(JobInterface j) { public UpdatedFrameCheckResult getUpdatedFrames(JobInterface job, List layers, int epochSeconds) { - if ((System.currentTimeMillis() / 1000) - epochSeconds > 60) { - long timeDiff = System.currentTimeMillis() - epochSeconds; + long timeDiff = (System.currentTimeMillis() / 1000) - epochSeconds; + if (timeDiff > 60) { throw new IllegalArgumentException("the last update timestamp cannot be over " + "a minute off the current time, difference was: " + timeDiff); } @@ -880,9 +882,6 @@ public Deed mapRow(ResultSet rs, int rowNum) throws SQLException { .setId(SqlUtil.getString(rs,"pk_deed")) .setHost(SqlUtil.getString(rs,"str_host")) .setOwner(SqlUtil.getString(rs,"str_username")) - .setBlackout(rs.getBoolean("b_blackout")) - .setBlackoutStartTime(rs.getInt("int_blackout_start")) - .setBlackoutStopTime(rs.getInt("int_blackout_stop")) .build(); } }; @@ -898,7 +897,10 @@ public RenderPartition mapRow(ResultSet rs, int rowNum) throws SQLException { .setThreads(rs.getInt("int_threads")) .setMaxMemory(rs.getLong("int_mem_max")) .setMemory( rs.getLong("int_mem_max") - rs.getLong("int_mem_idle")) - .setMaxGpu(rs.getLong("int_gpu_max")) + .setGpus(rs.getInt("int_gpus_max") - rs.getInt("int_gpus_idle")) + .setMaxGpus(rs.getInt("int_gpus_max")) + .setGpuMemory(rs.getLong("int_gpu_mem_max") - rs.getLong("int_gpu_mem_idle")) + .setMaxGpuMemory(rs.getLong("int_gpu_mem_max")) .setHost(SqlUtil.getString(rs,"str_host_name")) .setJob(SqlUtil.getString(rs,"str_job_name")) .setRenderPartType(RenderPartitionType.valueOf(SqlUtil.getString(rs,"str_type"))) @@ -950,11 +952,13 @@ public Proc mapRow(ResultSet rs, int row) throws SQLException { return Proc.newBuilder() .setId(SqlUtil.getString(rs,"pk_proc")) .setName(CueUtil.buildProcName(SqlUtil.getString(rs,"host_name"), - rs.getInt("int_cores_reserved"))) + rs.getInt("int_cores_reserved"), rs.getInt("int_gpus_reserved"))) .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores_reserved"))) .setReservedMemory(rs.getLong("int_mem_reserved")) - .setReservedGpu(rs.getLong("int_gpu_reserved")) + .setReservedGpus(rs.getInt("int_gpus_reserved")) + .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) .setUsedMemory(rs.getLong("int_mem_used")) + .setUsedGpuMemory(rs.getLong("int_gpu_mem_used")) .setFrameName(SqlUtil.getString(rs, "frame_name")) .setJobName(SqlUtil.getString(rs,"job_name")) .setGroupName(SqlUtil.getString(rs,"folder_name")) @@ -967,6 +971,7 @@ public Proc mapRow(ResultSet rs, int row) throws SQLException { SqlUtil.getString(rs,"str_log_dir"), SqlUtil.getString(rs,"job_name"), SqlUtil.getString(rs,"frame_name"))) .setRedirectTarget(SqlUtil.getString(rs, "str_redirect")) + .setChildProcesses(SqlUtil.getByteString(rs, "bytea_children")) .addAllServices(Arrays.asList(SqlUtil.getString(rs,"str_services").split(","))) .build(); } @@ -1009,20 +1014,22 @@ public static NestedHost.Builder mapNestedHostBuilder(ResultSet rs) throws SQLEx .setFreeMcp(rs.getLong("int_mcp_free")) .setFreeMemory(rs.getLong("int_mem_free")) .setFreeSwap(rs.getLong("int_swap_free")) - .setFreeGpu(rs.getLong("int_gpu_free")) + .setFreeGpuMemory(rs.getLong("int_gpu_mem_free")) .setLoad(rs.getInt("int_load")) .setNimbyEnabled(rs.getBoolean("b_nimby")) .setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))) .setMemory(rs.getLong("int_mem")) .setIdleMemory(rs.getLong("int_mem_idle")) - .setGpu(rs.getLong("int_gpu")) - .setIdleGpu(rs.getLong("int_gpu_idle")) + .setGpus(rs.getInt("int_gpus")) + .setIdleGpus(rs.getInt("int_gpus_idle")) + .setGpuMemory(rs.getLong("int_gpu_mem")) + .setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")) .setState(HardwareState.valueOf(SqlUtil.getString(rs,"host_state"))) .setTotalMcp(rs.getLong("int_mcp_total")) .setTotalMemory(rs.getLong("int_mem_total")) .setTotalSwap(rs.getLong("int_swap_total")) - .setTotalGpu(rs.getLong("int_gpu_total")) + .setTotalGpuMemory(rs.getLong("int_gpu_mem_total")) .setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)) .setLockState(LockState.valueOf(SqlUtil.getString(rs,"str_lock_state"))) .setHasComment(rs.getBoolean("b_comment")) @@ -1044,20 +1051,22 @@ public static Host.Builder mapHostBuilder(ResultSet rs) throws SQLException { builder.setFreeMcp(rs.getLong("int_mcp_free")); builder.setFreeMemory(rs.getLong("int_mem_free")); builder.setFreeSwap(rs.getLong("int_swap_free")); - builder.setFreeGpu(rs.getLong("int_gpu_free")); + builder.setFreeGpuMemory(rs.getLong("int_gpu_mem_free")); builder.setLoad(rs.getInt("int_load")); builder.setNimbyEnabled(rs.getBoolean("b_nimby")); builder.setCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))); builder.setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_cores_idle"))); builder.setMemory(rs.getLong("int_mem")); builder.setIdleMemory(rs.getLong("int_mem_idle")); - builder.setGpu(rs.getLong("int_gpu")); - builder.setIdleGpu(rs.getLong("int_gpu_idle")); + builder.setGpus(rs.getInt("int_gpus")); + builder.setIdleGpus(rs.getInt("int_gpus_idle")); + builder.setGpuMemory(rs.getLong("int_gpu_mem")); + builder.setIdleGpuMemory(rs.getLong("int_gpu_mem_idle")); builder.setState(HardwareState.valueOf(SqlUtil.getString(rs,"host_state"))); builder.setTotalMcp(rs.getLong("int_mcp_total")); builder.setTotalMemory(rs.getLong("int_mem_total")); builder.setTotalSwap(rs.getLong("int_swap_total")); - builder.setTotalGpu(rs.getLong("int_gpu_total")); + builder.setTotalGpuMemory(rs.getLong("int_gpu_mem_total")); builder.setPingTime((int) (rs.getTimestamp("ts_ping").getTime() / 1000)); builder.setLockState(LockState.valueOf(SqlUtil.getString(rs,"str_lock_state"))); builder.setHasComment(rs.getBoolean("b_comment")); @@ -1112,6 +1121,11 @@ public Allocation mapRow(ResultSet rs, int rowNum) throws SQLException { .setIdleCores(Convert.coreUnitsToCores(rs.getInt("int_idle_cores"))) .setRunningCores(Convert.coreUnitsToCores(rs.getInt("int_running_cores"))) .setLockedCores(Convert.coreUnitsToCores(rs.getInt("int_locked_cores"))) + .setGpus(rs.getInt("int_gpus")) + .setAvailableGpus(rs.getInt("int_available_gpus")) + .setIdleGpus(rs.getInt("int_idle_gpus")) + .setRunningGpus(rs.getInt("int_running_gpus")) + .setLockedGpus(rs.getInt("int_locked_gpus")) .setHosts(rs.getInt("int_hosts")) .setDownHosts(rs.getInt("int_down_hosts")) .setLockedHosts(rs.getInt("int_locked_hosts")) @@ -1131,6 +1145,7 @@ public Group mapRow(ResultSet rs, int rowNum) throws SQLException { .setDependFrames(rs.getInt("int_depend_count")) .setPendingJobs(rs.getInt("int_job_count")) .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")) .build(); return Group.newBuilder() .setId(SqlUtil.getString(rs,"pk_folder")) @@ -1139,8 +1154,12 @@ public Group mapRow(ResultSet rs, int rowNum) throws SQLException { .setDefaultJobPriority(rs.getInt("int_job_priority")) .setDefaultJobMinCores(Convert.coreUnitsToCores(rs.getInt("int_job_min_cores"))) .setDefaultJobMaxCores(Convert.coreUnitsToCores(rs.getInt("int_job_max_cores"))) + .setDefaultJobMinGpus(rs.getInt("int_job_min_gpus")) + .setDefaultJobMaxGpus(rs.getInt("int_job_max_gpus")) .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) + .setMaxGpus(rs.getInt("int_max_gpus")) + .setMinGpus(rs.getInt("int_min_gpus")) .setLevel(rs.getInt("int_level")) .setParentId(SqlUtil.getString(rs, "pk_parent_folder")) .setGroupStats(stats) @@ -1156,6 +1175,8 @@ public Job mapRow(ResultSet rs, int rowNum) throws SQLException { .setLogDir(SqlUtil.getString(rs, "str_log_dir")) .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_max_cores"))) .setMinCores(Convert.coreUnitsToCores(rs.getInt("int_min_cores"))) + .setMaxGpus(rs.getInt("int_max_gpus")) + .setMinGpus(rs.getInt("int_min_gpus")) .setName(SqlUtil.getString(rs,"str_name")) .setPriority(rs.getInt("int_priority")) .setShot(SqlUtil.getString(rs,"str_shot")) @@ -1192,6 +1213,7 @@ public static JobStats mapJobStats(ResultSet rs) throws SQLException { JobStats.Builder statsBuilder = JobStats.newBuilder() .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")) .setMaxRss(rs.getLong("int_max_rss")) .setTotalFrames(rs.getInt("int_frame_count")) .setTotalLayers(rs.getInt("int_layer_count")) @@ -1205,6 +1227,9 @@ public static JobStats mapJobStats(ResultSet rs) throws SQLException { .setFailedCoreSec(rs.getLong("int_core_time_fail")) .setRenderedCoreSec(rs.getLong("int_core_time_success")) .setTotalCoreSec( rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) + .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) + .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) + .setTotalGpuSec(rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) .setRenderedFrameCount( rs.getLong("int_frame_success_count")) .setFailedFrameCount(rs.getLong("int_frame_fail_count")) .setHighFrameSec(rs.getInt("int_clock_time_high")); @@ -1239,7 +1264,9 @@ public Layer mapRow(ResultSet rs, int rowNum) throws SQLException { .setMaxCores(Convert.coreUnitsToCores(rs.getInt("int_cores_max"))) .setIsThreadable(rs.getBoolean("b_threadable")) .setMinMemory(rs.getLong("int_mem_min")) - .setMinGpu(rs.getLong("int_gpu_min")) + .setMinGpus(rs.getInt("int_gpus_min")) + .setMaxGpus(rs.getInt("int_gpus_max")) + .setMinGpuMemory(rs.getLong("int_gpu_mem_min")) .setType(LayerType.valueOf(SqlUtil.getString(rs,"str_type"))) .addAllTags(Sets.newHashSet( SqlUtil.getString(rs,"str_tags"). @@ -1252,6 +1279,7 @@ public Layer mapRow(ResultSet rs, int rowNum) throws SQLException { LayerStats.Builder statsBuilder = LayerStats.newBuilder() .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")) .setMaxRss(rs.getLong("int_max_rss")) .setTotalFrames(rs.getInt("int_total_count")) .setWaitingFrames(rs.getInt("int_waiting_count")) @@ -1266,6 +1294,9 @@ public Layer mapRow(ResultSet rs, int rowNum) throws SQLException { .setRenderedCoreSec(rs.getLong("int_core_time_success")) .setTotalCoreSec( rs.getLong("int_core_time_fail") + rs.getLong("int_core_time_success")) + .setFailedGpuSec(rs.getLong("int_gpu_time_fail")) + .setRenderedGpuSec(rs.getLong("int_gpu_time_success")) + .setTotalGpuSec(rs.getLong("int_gpu_time_fail") + rs.getLong("int_gpu_time_success")) .setRenderedFrameCount( rs.getLong("int_frame_success_count")) .setFailedFrameCount(rs.getLong("int_frame_fail_count")) .setHighFrameSec(rs.getInt("int_clock_time_high")) @@ -1304,6 +1335,7 @@ public Subscription mapRow(ResultSet rs, int rowNum) throws SQLException { .setBurst(rs.getInt("int_burst")) .setName(rs.getString("name")) .setReservedCores(rs.getInt("int_cores")) + .setReservedGpus(rs.getInt("int_gpus")) .setSize(rs.getInt("int_size")) .setAllocationName(rs.getString("alloc_name")) .setShowName(rs.getString("show_name")) @@ -1324,9 +1356,10 @@ public UpdatedFrame mapRow(ResultSet rs, int rowNum) throws SQLException { .setUsedMemory(rs.getInt("int_mem_used")); if (SqlUtil.getString(rs, "str_host") != null) { - builder.setLastResource(String.format(Locale.ROOT, "%s/%2.2f", + builder.setLastResource(String.format(Locale.ROOT, "%s/%2.2f/%d", SqlUtil.getString(rs, "str_host"), - Convert.coreUnitsToCores(rs.getInt("int_cores")))); + Convert.coreUnitsToCores(rs.getInt("int_cores")), + rs.getInt("int_gpus"))); } else { builder.setLastResource(""); } @@ -1363,14 +1396,14 @@ public Frame mapRow(ResultSet rs, int rowNum) throws SQLException { .setLayerName(SqlUtil.getString(rs,"layer_name")) .setUsedMemory(rs.getLong("int_mem_used")) .setReservedMemory(rs.getLong("int_mem_reserved")) - .setReservedGpu(rs.getLong("int_gpu_reserved")) + .setReservedGpuMemory(rs.getLong("int_gpu_mem_reserved")) .setCheckpointState(CheckpointState.valueOf( SqlUtil.getString(rs,"str_checkpoint_state"))) .setCheckpointCount(rs.getInt("int_checkpoint_count")); if (SqlUtil.getString(rs,"str_host") != null) { builder.setLastResource(CueUtil.buildProcName(SqlUtil.getString(rs,"str_host"), - rs.getInt("int_cores"))); + rs.getInt("int_cores"), rs.getInt("int_gpus"))); } else { builder.setLastResource(""); } @@ -1389,11 +1422,21 @@ public Frame mapRow(ResultSet rs, int rowNum) throws SQLException { else { builder.setStopTime(0); } + java.sql.Timestamp ts_llu = rs.getTimestamp("ts_llu"); + if (ts_llu!= null) { + builder.setLluTime((int) (ts_llu.getTime() / 1000)); + } + else { + builder.setLluTime(0); + } builder.setTotalCoreTime(rs.getInt("int_total_past_core_time")); + builder.setTotalGpuTime(rs.getInt("int_total_past_gpu_time")); if (builder.getState() == FrameState.RUNNING) { builder.setTotalCoreTime(builder.getTotalCoreTime() + (int)(System.currentTimeMillis() / 1000 - builder.getStartTime()) * rs.getInt("int_cores") / 100); + builder.setTotalGpuTime(builder.getTotalGpuTime() + + (int)(System.currentTimeMillis() / 1000 - builder.getStartTime()) * rs.getInt("int_gpus")); } return builder.build(); } @@ -1409,11 +1452,14 @@ public Service mapRow(ResultSet rs, int rowNum) throws SQLException { .setMinCores(rs.getInt("int_cores_min")) .setMaxCores(rs.getInt("int_cores_max")) .setMinMemory(rs.getInt("int_mem_min")) - .setMinGpu(rs.getInt("int_gpu_min")) + .setMinGpus(rs.getInt("int_gpus_min")) + .setMaxGpus(rs.getInt("int_gpus_max")) + .setMinGpuMemory(rs.getInt("int_gpu_mem_min")) .addAllTags(Lists.newArrayList(ServiceDaoJdbc.splitTags( SqlUtil.getString(rs,"str_tags")))) .setTimeout(rs.getInt("int_timeout")) .setTimeoutLlu(rs.getInt("int_timeout_llu")) + .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")) .build(); } }; @@ -1428,11 +1474,14 @@ public ServiceOverride mapRow(ResultSet rs, int rowNum) throws SQLException { .setMinCores(rs.getInt("int_cores_min")) .setMaxCores(rs.getInt("int_cores_max")) .setMinMemory(rs.getInt("int_mem_min")) - .setMinGpu(rs.getInt("int_gpu_min")) + .setMinGpus(rs.getInt("int_gpus_min")) + .setMaxGpus(rs.getInt("int_gpus_max")) + .setMinGpuMemory(rs.getInt("int_gpu_mem_min")) .addAllTags(Lists.newArrayList(ServiceDaoJdbc.splitTags( SqlUtil.getString(rs,"str_tags")))) .setTimeout(rs.getInt("int_timeout")) .setTimeoutLlu(rs.getInt("int_timeout_llu")) + .setMinMemoryIncrease(rs.getInt("int_min_memory_increase")) .build(); return ServiceOverride.newBuilder() .setId(SqlUtil.getString(rs,"pk_show_service")) @@ -1453,6 +1502,7 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { .setRenderedFrameCount(rs.getLong("int_frame_success_count")) .setFailedFrameCount(rs.getLong("int_frame_fail_count")) .setReservedCores(Convert.coreUnitsToCores(rs.getInt("int_cores"))) + .setReservedGpus(rs.getInt("int_gpus")) .setPendingJobs(rs.getInt("int_job_count")) .build(); return Show.newBuilder() @@ -1461,6 +1511,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { .setActive(rs.getBoolean("b_active")) .setDefaultMaxCores(Convert.coreUnitsToCores(rs.getInt("int_default_max_cores"))) .setDefaultMinCores(Convert.coreUnitsToCores(rs.getInt("int_default_min_cores"))) + .setDefaultMaxGpus(rs.getInt("int_default_max_gpus")) + .setDefaultMinGpus(rs.getInt("int_default_min_gpus")) .setBookingEnabled(rs.getBoolean("b_booking_enabled")) .setDispatchEnabled(rs.getBoolean("b_dispatch_enabled")) .setCommentEmail(SqlUtil.getString(rs,"str_comment_email")) @@ -1516,13 +1568,15 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "frame.str_state,"+ "frame.str_host,"+ "frame.int_cores,"+ + "frame.int_gpus,"+ "frame.int_mem_max_used," + "frame.int_mem_used, " + "frame.int_mem_reserved, " + - "frame.int_gpu_reserved, " + + "frame.int_gpu_mem_reserved, " + "frame.str_checkpoint_state,"+ "frame.int_checkpoint_count,"+ "frame.int_total_past_core_time,"+ + "frame.int_total_past_gpu_time,"+ "layer.str_name AS layer_name," + "job.str_name AS job_name "+ "FROM "+ @@ -1559,11 +1613,15 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "proc.int_mem_reserved, " + "proc.int_mem_used, " + "proc.int_mem_max_used, " + - "proc.int_gpu_reserved, " + + "proc.int_gpus_reserved, " + + "proc.int_gpu_mem_reserved, " + + "proc.int_gpu_mem_used, " + + "proc.int_gpu_mem_max_used, " + "proc.ts_ping, " + "proc.ts_booked, " + "proc.ts_dispatched, " + "proc.b_unbooked, " + + "proc.bytea_children, " + "redirect.str_name AS str_redirect " + "FROM proc " + "JOIN host ON proc.pk_host = host.pk_host " + @@ -1596,6 +1654,7 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "frame.str_state,"+ "frame.str_host,"+ "frame.int_cores,"+ + "frame.int_gpus,"+ "frame.ts_llu,"+ "COALESCE(proc.int_mem_max_used, frame.int_mem_max_used) AS int_mem_max_used," + "COALESCE(proc.int_mem_used, frame.int_mem_used) AS int_mem_used " + @@ -1620,6 +1679,11 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "vs_alloc_usage.int_running_cores,"+ "vs_alloc_usage.int_available_cores,"+ "vs_alloc_usage.int_locked_cores,"+ + "vs_alloc_usage.int_gpus,"+ + "vs_alloc_usage.int_idle_gpus,"+ + "vs_alloc_usage.int_running_gpus,"+ + "vs_alloc_usage.int_available_gpus,"+ + "vs_alloc_usage.int_locked_gpus,"+ "vs_alloc_usage.int_hosts,"+ "vs_alloc_usage.int_locked_hosts,"+ "vs_alloc_usage.int_down_hosts "+ @@ -1653,6 +1717,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "str_ti_task,"+ "int_cores,"+ "int_min_cores,"+ + "int_gpus,"+ + "int_min_gpus,"+ "b_managed " + "FROM " + "point," + @@ -1675,6 +1741,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "str_ti_task,"+ "int_cores,"+ "int_min_cores,"+ + "int_gpus,"+ + "int_min_gpus,"+ "b_managed " + "FROM " + "point," + @@ -1705,11 +1773,13 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "host_local.pk_host_local,"+ "host_local.int_cores_idle,"+ "host_local.int_cores_max,"+ + "host_local.int_gpus_idle,"+ + "host_local.int_gpus_max,"+ "host_local.int_threads,"+ "host_local.int_mem_idle,"+ "host_local.int_mem_max,"+ - "host_local.int_gpu_idle,"+ - "host_local.int_gpu_max,"+ + "host_local.int_gpu_mem_idle,"+ + "host_local.int_gpu_mem_max,"+ "host_local.str_type,"+ "(SELECT str_name FROM host WHERE host.pk_host = host_local.pk_host) " + "AS str_host_name,"+ @@ -1778,6 +1848,10 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "folder.int_job_max_cores," + "folder_resource.int_min_cores,"+ "folder_resource.int_max_cores,"+ + "folder.int_job_min_gpus," + + "folder.int_job_max_gpus," + + "folder_resource.int_min_gpus,"+ + "folder_resource.int_max_gpus,"+ "folder.b_default, " + "folder_level.int_level, " + "c.int_waiting_count, " + @@ -1785,7 +1859,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "c.int_running_count,"+ "c.int_dead_count,"+ "c.int_job_count,"+ - "c.int_cores " + + "c.int_cores," + + "c.int_gpus " + "FROM " + "folder, " + "folder_level," + @@ -1820,6 +1895,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "job.str_log_dir," + "job_resource.int_max_cores," + "job_resource.int_min_cores," + + "job_resource.int_max_gpus," + + "job_resource.int_min_gpus," + "job.str_name," + "job.str_shot,"+ "job.str_state,"+ @@ -1846,12 +1923,15 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "job_stat.int_succeeded_count, "+ "job_usage.int_core_time_success, "+ "job_usage.int_core_time_fail, " + + "job_usage.int_gpu_time_success, "+ + "job_usage.int_gpu_time_fail, " + "job_usage.int_frame_success_count, "+ "job_usage.int_frame_fail_count, "+ "job_usage.int_clock_time_high,"+ "job_usage.int_clock_time_success,"+ "job_mem.int_max_rss,"+ - "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores " + + "(job_resource.int_cores + job_resource.int_local_cores) AS int_cores," + + "(job_resource.int_gpus + job_resource.int_local_gpus) AS int_gpus " + "FROM " + "job,"+ "folder,"+ @@ -1888,6 +1968,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "layer_stat.int_succeeded_count," + "layer_usage.int_core_time_success," + "layer_usage.int_core_time_fail, "+ + "layer_usage.int_gpu_time_success," + + "layer_usage.int_gpu_time_fail, "+ "layer_usage.int_frame_success_count, "+ "layer_usage.int_frame_fail_count, "+ "layer_usage.int_clock_time_low, "+ @@ -1895,7 +1977,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "layer_usage.int_clock_time_success," + "layer_usage.int_clock_time_fail," + "layer_mem.int_max_rss,"+ - "layer_resource.int_cores " + + "layer_resource.int_cores," + + "layer_resource.int_gpus " + "FROM " + "layer, " + "job," + @@ -1926,6 +2009,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "layer_stat.int_succeeded_count, " + "layer_usage.int_core_time_success, " + "layer_usage.int_core_time_fail, " + + "layer_usage.int_gpu_time_success, " + + "layer_usage.int_gpu_time_fail, " + "layer_usage.int_frame_success_count, " + "layer_usage.int_frame_fail_count, " + "layer_usage.int_clock_time_low, " + @@ -1934,6 +2019,7 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "layer_usage.int_clock_time_fail, " + "layer_mem.int_max_rss, " + "layer_resource.int_cores, " + + "layer_resource.int_gpus, " + "limit_names.str_limit_names " + "FROM " + "layer " + @@ -1974,14 +2060,30 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { private static final String GET_SHOW = "SELECT " + - "show.*," + + "show.pk_show," + + "show.str_name," + + "show.b_paused," + + "show.int_default_min_cores," + + "show.int_default_max_cores," + + "show.int_default_min_gpus," + + "show.int_default_max_gpus," + + "show.b_booking_enabled," + + "show.b_dispatch_enabled," + + "show.b_active," + + "show.str_comment_email," + + "show_stats.int_frame_insert_count," + + "show_stats.int_job_insert_count," + + "show_stats.int_frame_success_count," + + "show_stats.int_frame_fail_count," + "COALESCE(vs_show_stat.int_pending_count,0) AS int_pending_count," + "COALESCE(vs_show_stat.int_running_count,0) AS int_running_count," + "COALESCE(vs_show_stat.int_dead_count,0) AS int_dead_count," + "COALESCE(vs_show_resource.int_cores,0) AS int_cores, " + + "COALESCE(vs_show_resource.int_gpus,0) AS int_gpus, " + "COALESCE(vs_show_stat.int_job_count,0) AS int_job_count " + "FROM " + "show " + + "JOIN show_stats ON (show.pk_show = show_stats.pk_show) " + "LEFT JOIN vs_show_stat ON (vs_show_stat.pk_show = show.pk_show) " + "LEFT JOIN vs_show_resource ON (vs_show_resource.pk_show=show.pk_show) " + "WHERE " + @@ -1995,10 +2097,13 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "service.int_cores_min," + "service.int_cores_max," + "service.int_mem_min," + - "service.int_gpu_min," + + "service.int_gpus_min," + + "service.int_gpus_max," + + "service.int_gpu_mem_min," + "service.str_tags," + "service.int_timeout," + - "service.int_timeout_llu " + + "service.int_timeout_llu," + + "service.int_min_memory_increase " + "FROM "+ "service "; @@ -2010,10 +2115,13 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "show_service.int_cores_min," + "show_service.int_cores_max," + "show_service.int_mem_min," + - "show_service.int_gpu_min," + + "show_service.int_gpus_min," + + "show_service.int_gpus_max," + + "show_service.int_gpu_mem_min," + "show_service.str_tags," + "show_service.int_timeout," + - "show_service.int_timeout_llu " + + "show_service.int_timeout_llu," + + "show_service.int_min_memory_increase " + "FROM "+ "show_service, " + "show " + @@ -2026,6 +2134,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "task.str_shot,"+ "task.int_min_cores + task.int_adjust_cores AS int_min_cores, "+ "task.int_adjust_cores, " + + "task.int_min_gpus + task.int_adjust_gpus AS int_min_gpus, "+ + "task.int_adjust_gpus, " + "dept.str_name AS str_dept "+ "FROM " + "task,"+ @@ -2048,8 +2158,10 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "host.int_cores_idle,"+ "host.int_mem,"+ "host.int_mem_idle,"+ - "host.int_gpu,"+ - "host.int_gpu_idle,"+ + "host.int_gpus,"+ + "host.int_gpus_idle,"+ + "host.int_gpu_mem,"+ + "host.int_gpu_mem_idle,"+ "host.str_tags,"+ "host.str_lock_state,"+ "host.b_comment,"+ @@ -2061,8 +2173,8 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "host_stat.int_swap_free,"+ "host_stat.int_mcp_total,"+ "host_stat.int_mcp_free,"+ - "host_stat.int_gpu_total,"+ - "host_stat.int_gpu_free,"+ + "host_stat.int_gpu_mem_total,"+ + "host_stat.int_gpu_mem_free,"+ "host_stat.int_load, " + "alloc.str_name AS alloc_name " + "FROM " + @@ -2100,6 +2212,7 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "subscription.int_burst, " + "subscription.int_size, " + "subscription.int_cores, " + + "subscription.int_gpus, " + "show.str_name AS show_name, " + "alloc.str_name AS alloc_name, " + "facility.str_name AS facility_name " + @@ -2138,10 +2251,14 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "frame.int_mem_max_used," + "frame.int_mem_used, " + "frame.int_mem_reserved, " + - "frame.int_gpu_reserved, " + + "frame.int_gpus,"+ + "frame.int_gpu_mem_max_used, " + + "frame.int_gpu_mem_used, " + + "frame.int_gpu_mem_reserved, " + "frame.str_checkpoint_state,"+ "frame.int_checkpoint_count,"+ "frame.int_total_past_core_time,"+ + "frame.int_total_past_gpu_time,"+ "layer.str_name AS layer_name," + "job.str_name AS job_name, "+ "ROW_NUMBER() OVER " + @@ -2160,9 +2277,6 @@ public Show mapRow(ResultSet rs, int rowNum) throws SQLException { "host.str_name AS str_host,"+ "show.str_name AS str_show,"+ "owner.str_username," + - "deed.b_blackout,"+ - "deed.int_blackout_start,"+ - "deed.int_blackout_stop,"+ "deed.pk_deed " + "FROM " + "deed,"+ diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java index ddf3b2a2b..355c64175 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/AbstractDispatcher.java @@ -19,7 +19,8 @@ package com.imageworks.spcue.dispatcher; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.imageworks.spcue.DispatchFrame; import com.imageworks.spcue.VirtualProc; @@ -33,7 +34,7 @@ */ public abstract class AbstractDispatcher { - private static final Logger logger = Logger.getLogger(AbstractDispatcher.class); + private static final Logger logger = LogManager.getLogger(AbstractDispatcher.class); public DispatchSupport dispatchSupport; public RqdClient rqdClient; @@ -129,6 +130,7 @@ public boolean dispatchHost(DispatchFrame frame, VirtualProc proc) { dispatchSummary(proc, frame, "Booking"); DispatchSupport.bookedProcs.getAndIncrement(); DispatchSupport.bookedCores.addAndGet(proc.coresReserved); + DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); return true; } catch (FrameReservationException fre) { /* @@ -222,8 +224,10 @@ private static void dispatchSummary(VirtualProc p, DispatchFrame f, String type) " cores / " + CueUtil.KbToMb(p.memoryReserved) + " memory / " + - p.gpuReserved + - " gpu on " + + p.gpusReserved + + " gpus / " + + CueUtil.KbToMb(p.gpuMemoryReserved) + + " gpu memory " + p.getName() + " to " + f.show + "/" + f.shot; logger.info(msg); diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java index ace595ea2..69a961977 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/BookingQueue.java @@ -19,90 +19,103 @@ package com.imageworks.spcue.dispatcher; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.apache.log4j.Logger; - -public class BookingQueue extends ThreadPoolExecutor { +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.imageworks.spcue.dispatcher.commands.KeyRunnable; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; + +public class BookingQueue { + + private final int healthThreshold; + private final int minUnhealthyPeriodMin; + private final int queueCapacity; + private final int corePoolSize; + private final int maxPoolSize; + private static final int BASE_SLEEP_TIME_MILLIS = 300; + + private static final Logger logger = LogManager.getLogger("HEALTH"); + private HealthyThreadPool healthyThreadPool; + + public BookingQueue(int healthThreshold, int minUnhealthyPeriodMin, int queueCapacity, + int corePoolSize, int maxPoolSize) { + this.healthThreshold = healthThreshold; + this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; + this.queueCapacity = queueCapacity; + this.corePoolSize = corePoolSize; + this.maxPoolSize = maxPoolSize; + initThreadPool(); + } - private static final Logger logger = Logger.getLogger(BookingQueue.class); + public void initThreadPool() { + healthyThreadPool = new HealthyThreadPool( + "BookingQueue", + healthThreshold, + minUnhealthyPeriodMin, + queueCapacity, + corePoolSize, + maxPoolSize, + BASE_SLEEP_TIME_MILLIS); + } - private static final int INITIAL_QUEUE_SIZE = 1000; + public boolean isHealthy() { + try { + if (!healthyThreadPool.isHealthyOrShutdown()) { + logger.warn("BookingQueue: Unhealthy queue terminated, starting a new one"); + initThreadPool(); + } + } catch (InterruptedException e) { + // TODO: evaluate crashing the whole springbook context here + // to force a container restart cycle + logger.error("Failed to restart BookingThreadPool", e); + return false; + } - private static final int THREADS_MINIMUM = 6; - private static final int THREADS_MAXIMUM = 6; - private static final int THREADS_KEEP_ALIVE_SECONDS = 10; + return true; + } - private int baseSleepTimeMillis = 400; - private AtomicBoolean isShutdown = new AtomicBoolean(false); + public void execute(KeyRunnable r) { + healthyThreadPool.execute(r); + } - private QueueRejectCounter rejectCounter = new QueueRejectCounter(); + public long getRejectedTaskCount() { + return healthyThreadPool.getRejectedTaskCount(); + } - public BookingQueue(int sleepTimeMs) { - super(THREADS_MINIMUM, THREADS_MAXIMUM, THREADS_KEEP_ALIVE_SECONDS, - TimeUnit.SECONDS, new LinkedBlockingQueue(INITIAL_QUEUE_SIZE)); - this.baseSleepTimeMillis = sleepTimeMs; - this.setRejectedExecutionHandler(rejectCounter); + public int getQueueCapacity() { + return queueCapacity; } - public void execute(Runnable r) { - if (!isShutdown.get()) { - super.execute(r); - } + public void shutdown() { + healthyThreadPool.shutdown(); } - public long getRejectedTaskCount() { - return rejectCounter.getRejectCount(); + public int getSize() { + return healthyThreadPool.getQueue().size(); } - public void shutdown() { - if (!isShutdown.getAndSet(true)) { - logger.info("clearing out booking queue: " + this.getQueue().size()); - this.getQueue().clear(); - } + public int getRemainingCapacity() { + return healthyThreadPool.getQueue().remainingCapacity(); + } + public int getActiveCount() { + return healthyThreadPool.getActiveCount(); } - /** - * Lowers the sleep time as the queue grows. - * - * @return - */ - public int sleepTime() { - if (!isShutdown.get()) { - int sleep = (int) (baseSleepTimeMillis - (((this.getQueue().size () / - (float) INITIAL_QUEUE_SIZE) * baseSleepTimeMillis)) * 2); - if (sleep < 0) { - sleep = 0; - } - return sleep; - } else { - return 0; - } + public long getCompletedTaskCount() { + return healthyThreadPool.getCompletedTaskCount(); } - protected void beforeExecute(Thread t, Runnable r) { - super.beforeExecute(t, r); - if (isShutdown()) { - this.remove(r); - } else { - try { - Thread.sleep(sleepTime()); - } catch (InterruptedException e) { - logger.info("booking queue was interrupted."); - Thread.currentThread().interrupt(); - } - } + public long getCorePoolSize() { + return corePoolSize; } - protected void afterExecute(Runnable r, Throwable t) { - super.afterExecute(r, t); - if (sleepTime() < 100) { - getQueue().clear(); - } + public long getMaximumPoolSize() { + return maxPoolSize; } + } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java index d57caf3e9..a9a8b918a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/CoreUnitDispatcher.java @@ -21,12 +21,12 @@ import java.util.ArrayList; import java.util.List; -import java.util.Set; import java.util.concurrent.TimeUnit; import com.google.common.cache.Cache; import com.google.common.cache.CacheBuilder; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.core.env.Environment; @@ -86,7 +86,7 @@ */ public class CoreUnitDispatcher implements Dispatcher { private static final Logger logger = - Logger.getLogger(CoreUnitDispatcher.class); + LogManager.getLogger(CoreUnitDispatcher.class); private DispatchSupport dispatchSupport; @@ -126,7 +126,7 @@ private Cache getOrCreateJobLock() { } - private List dispatchJobs(DispatchHost host, Set jobs) { + private List dispatchJobs(DispatchHost host, List jobs) { List procs = new ArrayList(); try { @@ -135,7 +135,8 @@ private List dispatchJobs(DispatchHost host, Set jobs) { if (!host.hasAdditionalResources( Dispatcher.CORE_POINTS_RESERVED_MIN, Dispatcher.MEM_RESERVED_MIN, - Dispatcher.GPU_RESERVED_MIN)) { + Dispatcher.GPU_UNITS_RESERVED_MIN, + Dispatcher.MEM_GPU_RESERVED_MIN)) { return procs; } @@ -169,8 +170,8 @@ private List dispatchJobs(DispatchHost host, Set jobs) { return procs; } - private Set getGpuJobs(DispatchHost host, ShowInterface show) { - Set jobs = null; + private List getGpuJobs(DispatchHost host, ShowInterface show) { + List jobs = null; // TODO: GPU: make index with the 4 components instead of just 3, replace the just 3 @@ -179,7 +180,8 @@ private Set getGpuJobs(DispatchHost host, ShowInterface show) { if (host.hasAdditionalResources( Dispatcher.CORE_POINTS_RESERVED_DEFAULT, Dispatcher.MEM_RESERVED_MIN, - 1)) { + Dispatcher.GPU_UNITS_RESERVED_DEFAULT, + Dispatcher.MEM_GPU_RESERVED_DEFAULT)) { if (show == null) jobs = dispatchSupport.findDispatchJobs(host, getIntProperty("dispatcher.job_query_max")); @@ -198,7 +200,7 @@ private Set getGpuJobs(DispatchHost host, ShowInterface show) { @Override public List dispatchHostToAllShows(DispatchHost host) { - Set jobs = dispatchSupport.findDispatchJobsForAllShows( + List jobs = dispatchSupport.findDispatchJobsForAllShows( host, getIntProperty("dispatcher.job_query_max")); @@ -208,7 +210,7 @@ public List dispatchHostToAllShows(DispatchHost host) { @Override public List dispatchHost(DispatchHost host) { - Set jobs = getGpuJobs(host, null); + List jobs = getGpuJobs(host, null); if (jobs == null) jobs = dispatchSupport.findDispatchJobs(host, getIntProperty("dispatcher.job_query_max")); @@ -219,7 +221,7 @@ public List dispatchHost(DispatchHost host) { @Override public List dispatchHost(DispatchHost host, ShowInterface show) { - Set jobs = getGpuJobs(host, show); + List jobs = getGpuJobs(host, show); if (jobs == null) jobs = dispatchSupport.findDispatchJobs(host, show, @@ -231,7 +233,7 @@ public List dispatchHost(DispatchHost host, ShowInterface show) { @Override public List dispatchHost(DispatchHost host, GroupInterface group) { - Set jobs = getGpuJobs(host, null); + List jobs = getGpuJobs(host, null); if (jobs == null) jobs = dispatchSupport.findDispatchJobs(host, group); @@ -262,11 +264,12 @@ public List dispatchHost(DispatchHost host, JobInterface job) { if (host.idleCores < frame.minCores || host.idleMemory < frame.minMemory || - host.idleGpu < frame.minGpu) { + host.idleGpus < frame.minGpus || + host.idleGpuMemory < frame.minGpuMemory) { break; } - if (!dispatchSupport.isJobBookable(job, proc.coresReserved)) { + if (!dispatchSupport.isJobBookable(job, proc.coresReserved, proc.gpusReserved)) { break; } @@ -289,17 +292,19 @@ public void wrapDispatchFrame() { DispatchSupport.bookedProcs.getAndIncrement(); DispatchSupport.bookedCores.addAndGet(proc.coresReserved); + DispatchSupport.bookedGpus.addAndGet(proc.gpusReserved); if (host.strandedCores > 0) { dispatchSupport.pickupStrandedCores(host); break; } - host.useResources(proc.coresReserved, proc.memoryReserved, proc.gpuReserved); + host.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved); if (!host.hasAdditionalResources( Dispatcher.CORE_POINTS_RESERVED_MIN, Dispatcher.MEM_RESERVED_MIN, - Dispatcher.GPU_RESERVED_MIN)) { + Dispatcher.GPU_UNITS_RESERVED_MIN, + Dispatcher.MEM_GPU_RESERVED_MIN)) { break; } else if (procs.size() >= getIntProperty("dispatcher.job_frame_dispatch_max")) { @@ -383,6 +388,7 @@ public boolean isTestMode() { @Override public void setTestMode(boolean enabled) { testMode = enabled; + dispatchSupport.clearCache(); } /** @@ -398,8 +404,10 @@ private void dispatchSummary(VirtualProc p, DispatchFrame f, String type) { " cores / " + CueUtil.KbToMb(p.memoryReserved) + " memory / " + - p.gpuReserved + - " gpu on " + + p.gpusReserved + + " gpus / " + + CueUtil.KbToMb(p.gpuMemoryReserved) + + " gpu memory " + p.getName() + " to " + f.show + "/" + f.shot; logger.trace(msg); @@ -538,4 +546,3 @@ public boolean execute() { } } } - diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java index 81366dedc..00e552a05 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchQueue.java @@ -22,94 +22,86 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; -import org.apache.log4j.Logger; -import org.springframework.core.task.TaskExecutor; -import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import com.imageworks.spcue.dispatcher.commands.KeyRunnable; public class DispatchQueue { - private TaskExecutor dispatchPool; - private ThreadPoolTaskExecutor _dispatchPool; - private String name = "Default"; - private AtomicBoolean isShutdown = new AtomicBoolean(false); - - private final AtomicLong tasksRun = new AtomicLong(0); - private final AtomicLong tasksRejected = new AtomicLong(0); + private int healthThreshold; + private int minUnhealthyPeriodMin; + private int queueCapacity; + private int corePoolSize; + private int maxPoolSize; - private static final Logger logger = Logger.getLogger(DispatchQueue.class); - - public DispatchQueue() {} + private static final Logger logger = LogManager.getLogger("HEALTH"); + private String name = "Default"; + private HealthyThreadPool healthyDispatchPool; - public DispatchQueue(String name) { + public DispatchQueue(String name, int healthThreshold, int minUnhealthyPeriodMin, int queueCapacity, + int corePoolSize, int maxPoolSize) { this.name = name; + this.healthThreshold = healthThreshold; + this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; + this.queueCapacity = queueCapacity; + this.corePoolSize = corePoolSize; + this.maxPoolSize = maxPoolSize; + initThreadPool(); } - public void execute(Runnable r) { + public void initThreadPool() { + healthyDispatchPool = new HealthyThreadPool( + name, + healthThreshold, + minUnhealthyPeriodMin, + queueCapacity, + corePoolSize, + maxPoolSize); + } + + public boolean isHealthy() { try { - if (!isShutdown.get()) { - this.dispatchPool.execute(r); - tasksRun.addAndGet(1); + if (!healthyDispatchPool.isHealthyOrShutdown()) { + logger.warn("DispatchQueue_" + name + ": Unhealthy queue terminated, starting a new one"); + initThreadPool(); } - } catch (Exception e) { - long rejection = tasksRejected.addAndGet(1); - logger.warn("Warning, dispatch queue - [" + name + "] rejected, " + e); - throw new DispatchQueueTaskRejectionException( - "Warning, dispatch queue [" + name + " rejected task #" - + rejection); + } catch (InterruptedException e) { + // TODO: evaluate crashing the whole springbook context here + // to force a container restart cycle + logger.error("DispatchQueue_" + name + ":Failed to restart DispatchThreadPool", e); + return false; } - } - public int getMaxPoolSize() { - return _dispatchPool.getMaxPoolSize(); + return true; } - public int getActiveThreadCount() { - return _dispatchPool.getActiveCount(); + public void execute(KeyRunnable r) { + healthyDispatchPool.execute(r); } - public int getWaitingCount() { - return _dispatchPool.getThreadPoolExecutor().getQueue().size(); + public long getRejectedTaskCount() { + return healthyDispatchPool.getRejectedTaskCount(); } - public int getRemainingCapacity() { - return _dispatchPool.getThreadPoolExecutor().getQueue().remainingCapacity(); + public void shutdown() { + healthyDispatchPool.shutdown(); } - public long getTotalDispatched() { - return tasksRun.get(); + public int getSize() { + return healthyDispatchPool.getQueue().size(); } - public long getTotalRejected() { - return tasksRejected.get(); + public int getRemainingCapacity() { + return healthyDispatchPool.getQueue().remainingCapacity(); } - public TaskExecutor getDispatchPool() { - return dispatchPool; + public int getActiveCount() { + return healthyDispatchPool.getActiveCount(); } - public void setDispatchPool(TaskExecutor dispatchPool) { - this.dispatchPool = dispatchPool; - this._dispatchPool = (ThreadPoolTaskExecutor) dispatchPool; + public long getCompletedTaskCount() { + return healthyDispatchPool.getCompletedTaskCount(); } - public void shutdown() { - if (!isShutdown.getAndSet(true)) { - logger.info("Shutting down thread pool " + name + ", currently " - + getActiveThreadCount() + " active threads."); - final long startTime = System.currentTimeMillis(); - while (getWaitingCount() != 0 && getActiveThreadCount() != 0) { - try { - if (System.currentTimeMillis() - startTime > 10000) { - throw new InterruptedException(name - + " thread pool failed to shutdown properly"); - } - Thread.sleep(250); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - break; - } - } - } - } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java index ebdd5082d..aa20e6266 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupport.java @@ -77,6 +77,11 @@ public interface DispatchSupport { */ static final AtomicLong bookedCores = new AtomicLong(0); + /** + * Long for counting how many gpus have been booked + */ + static final AtomicLong bookedGpus = new AtomicLong(0); + /** * Long for counting how many procs have been booked */ @@ -122,6 +127,16 @@ public interface DispatchSupport { */ static final AtomicLong strandedCoresCount = new AtomicLong(0); + /** + * Count number of picked up gpus. + */ + static final AtomicLong pickedUpGpusCount = new AtomicLong(0); + + /** + * Count number of stranded gpus. + */ + static final AtomicLong strandedGpusCount = new AtomicLong(0); + /** * Set the proc's frame assignment to null; * @@ -291,7 +306,7 @@ List findNextDispatchFrames(LayerInterface layer, VirtualProc pro * @param host * @return */ - Set findDispatchJobsForAllShows(DispatchHost host, int numJobs); + List findDispatchJobsForAllShows(DispatchHost host, int numJobs); /** * Returns the highest priority job that can utilize @@ -300,7 +315,7 @@ List findNextDispatchFrames(LayerInterface layer, VirtualProc pro * @param host * @return */ - Set findDispatchJobs(DispatchHost host, int numJobs); + List findDispatchJobs(DispatchHost host, int numJobs); /** * Returns the highest priority jobs that can utilize @@ -309,7 +324,7 @@ List findNextDispatchFrames(LayerInterface layer, VirtualProc pro * @param host * @return A set of unique job ids. */ - Set findDispatchJobs(DispatchHost host, GroupInterface p); + List findDispatchJobs(DispatchHost host, GroupInterface p); /** * @@ -401,21 +416,23 @@ List findNextDispatchFrames(LayerInterface layer, VirtualProc pro void clearFrame(DispatchFrame frame); /** - * Update usage data for the given frame. + * Sets the frame state exitStatus to EXIT_STATUS_MEMORY_FAILURE * * @param frame - * @param lluTime + * @return whether the frame has been updated */ - void updateFrameUsage(FrameInterface frame, long lluTime); + boolean updateFrameMemoryError(FrameInterface frame); /** - * Update memory usage data for the given frame. + * Update Memory usage data and LLU time for the given frame. * * @param frame * @param rss * @param maxRss + * @param lluTime */ - void updateFrameMemoryUsage(FrameInterface frame, long rss, long maxRss); + void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, + long lluTime); /** * Update memory usage data for a given frame's proc record. The @@ -427,9 +444,12 @@ List findNextDispatchFrames(LayerInterface layer, VirtualProc pro * @param maxRss * @param vsize * @param maxVsize + * @param usedGpuMemory + * @param maxUsedGpuMemory */ - void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsize, - long maxVsize); + void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, + long vsize, long maxVsize, long usedGpuMemory, + long maxUsedGpuMemory, byte[] children); /** * Return true if adding the given core units would put the show @@ -456,7 +476,7 @@ void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsi * @param job * @return */ - boolean isJobBookable(JobInterface job, int coreUnits); + boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits); /** * Return true if the specified show is at or over its @@ -512,14 +532,14 @@ void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsi void determineIdleCores(DispatchHost host, int load); /** - * Return a set of job IDs that can take the given host. + * Return a list of job IDs that can take the given host. * * @param host * @param show * @param numJobs * @return */ - Set findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); + List findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs); /** * Return true of the job has pending frames. @@ -537,5 +557,12 @@ void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, long vsi */ boolean hasPendingFrames(LayerInterface layer); + /** + * Clear bookableShows cache + * + * @return + */ + void clearCache(); + } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java index 3e3d82b2f..713f6c86c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java @@ -23,15 +23,11 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import org.apache.log4j.Logger; -import org.springframework.dao.EmptyResultDataAccessException; -import org.springframework.transaction.annotation.Propagation; -import org.springframework.transaction.annotation.Transactional; - import com.imageworks.spcue.AllocationInterface; import com.imageworks.spcue.DispatchFrame; import com.imageworks.spcue.DispatchHost; import com.imageworks.spcue.FacilityInterface; +import com.imageworks.spcue.FrameDetail; import com.imageworks.spcue.FrameInterface; import com.imageworks.spcue.GroupInterface; import com.imageworks.spcue.HostInterface; @@ -43,6 +39,13 @@ import com.imageworks.spcue.ShowInterface; import com.imageworks.spcue.StrandedCores; import com.imageworks.spcue.VirtualProc; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.dao.EmptyResultDataAccessException; +import org.springframework.dao.DataAccessException; +import org.springframework.transaction.annotation.Propagation; +import org.springframework.transaction.annotation.Transactional; + import com.imageworks.spcue.dao.BookingDao; import com.imageworks.spcue.dao.DispatcherDao; import com.imageworks.spcue.dao.FrameDao; @@ -63,7 +66,7 @@ @Transactional(propagation = Propagation.REQUIRED) public class DispatchSupportService implements DispatchSupport { - private static final Logger logger = Logger.getLogger(DispatchSupportService.class); + private static final Logger logger = LogManager.getLogger(DispatchSupportService.class); private JobDao jobDao; private FrameDao frameDao; @@ -148,17 +151,17 @@ public boolean higherPriorityJobExists(JobDetail baseJob, VirtualProc proc) { } @Transactional(readOnly = true) - public Set findDispatchJobsForAllShows(DispatchHost host, int numJobs) { + public List findDispatchJobsForAllShows(DispatchHost host, int numJobs) { return dispatcherDao.findDispatchJobsForAllShows(host, numJobs); } @Transactional(readOnly = true) - public Set findDispatchJobs(DispatchHost host, int numJobs) { + public List findDispatchJobs(DispatchHost host, int numJobs) { return dispatcherDao.findDispatchJobs(host, numJobs); } @Transactional(readOnly = true) - public Set findDispatchJobs(DispatchHost host, GroupInterface g) { + public List findDispatchJobs(DispatchHost host, GroupInterface g) { return dispatcherDao.findDispatchJobs(host, g); } @@ -170,7 +173,7 @@ public Set findLocalDispatchJobs(DispatchHost host) { @Override @Transactional(readOnly = true) - public Set findDispatchJobs(DispatchHost host, ShowInterface show, + public List findDispatchJobs(DispatchHost host, ShowInterface show, int numJobs) { return dispatcherDao.findDispatchJobs(host, show, numJobs); } @@ -182,7 +185,11 @@ public boolean increaseReservedMemory(ProcInterface p, long value) { @Override public boolean clearVirtualProcAssignement(ProcInterface proc) { - return procDao.clearVirtualProcAssignment(proc); + try { + return procDao.clearVirtualProcAssignment(proc); + } catch (DataAccessException e) { + return false; + } } @Transactional(propagation = Propagation.REQUIRED) @@ -245,7 +252,7 @@ public boolean isJobBookable(JobInterface job) { @Override @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isJobBookable(JobInterface job, int coreUnits) { + public boolean isJobBookable(JobInterface job, int coreUnits, int gpuUnits) { if (!jobDao.hasPendingFrames(job)) { return false; @@ -255,6 +262,10 @@ public boolean isJobBookable(JobInterface job, int coreUnits) { return false; } + if (jobDao.isOverMaxGpus(job, gpuUnits)) { + return false; + } + return true; } @@ -337,6 +348,12 @@ public void clearFrame(DispatchFrame frame) { frameDao.updateFrameCleared(frame); } + @Override + @Transactional(propagation = Propagation.REQUIRED) + public boolean updateFrameMemoryError(FrameInterface frame) { + return frameDao.updateFrameMemoryError(frame); + } + @Transactional(propagation = Propagation.SUPPORTS) public RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame) { int threads = proc.coresReserved / 100; @@ -363,6 +380,7 @@ public RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame) { .setLayerId(frame.getLayerId()) .setResourceId(proc.getProcId()) .setNumCores(proc.coresReserved) + .setNumGpus(proc.gpusReserved) .setStartTime(System.currentTimeMillis()) .setIgnoreNimby(proc.isLocalDispatch) .putAllEnvironment(jobDao.getEnvironment(frame)) @@ -370,6 +388,8 @@ public RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame) { .putEnvironment("CUE3", "1") .putEnvironment("CUE_THREADS", String.valueOf(threads)) .putEnvironment("CUE_MEMORY", String.valueOf(proc.memoryReserved)) + .putEnvironment("CUE_GPUS", String.valueOf(proc.gpusReserved)) + .putEnvironment("CUE_GPU_MEMORY", String.valueOf(proc.gpuMemoryReserved)) .putEnvironment("CUE_LOG_PATH", frame.logDir) .putEnvironment("CUE_RANGE", frame.range) .putEnvironment("CUE_CHUNK", String.valueOf(frame.chunkSize)) @@ -395,6 +415,10 @@ public RunFrame prepareRqdRunFrame(VirtualProc proc, DispatchFrame frame) { .replaceAll("#JOB#", frame.jobName) .replaceAll("#FRAMESPEC#", frameSpec) .replaceAll("#FRAME#", frame.name)); + /* The special command tokens above (#ZFRAME# and others) are provided to the user in cuesubmit. + * see: cuesubmit/cuesubmit/Constants.py + * Update the Constant.py file when updating tokens here, they will appear in the cuesubmit tooltip popup. + */ frame.uid.ifPresent(builder::setUid); @@ -515,52 +539,55 @@ public void lostProc(VirtualProc proc, String reason, int exitStatus) { frameDao.updateFrameCheckpointState(f, CheckpointState.DISABLED); /* * If the proc has a frame, stop the frame. Frames - * can only be stopped that are running, so if the frame - * is not running it will remain untouched. + * can only be stopped that are running. */ if (frameDao.updateFrameStopped(f, FrameState.WAITING, exitStatus)) { updateUsageCounters(proc, exitStatus); } + /* + * If the frame is not running, check if frame is in dead state, + * frames that died due to host going down should be put back + * into WAITING status. + */ + else { + FrameDetail frameDetail = frameDao.getFrameDetail(f); + if ((frameDetail.state == FrameState.DEAD) && + (Dispatcher.EXIT_STATUS_DOWN_HOST == exitStatus)) { + if (frameDao.updateFrameHostDown(f)) { + logger.info("update frame " + f.getFrameId() + + "to WAITING status for down host"); + } + } + } + } else { + logger.info("Frame ID is NULL, not updating Frame state"); } } @Override @Transactional(propagation = Propagation.REQUIRED) public void updateProcMemoryUsage(FrameInterface frame, long rss, long maxRss, - long vsize, long maxVsize) { - procDao.updateProcMemoryUsage(frame, rss, maxRss, vsize, maxVsize); - } - - @Override - @Transactional(propagation = Propagation.REQUIRED) - public void updateFrameUsage(FrameInterface frame, long lluTime) { - - try { - frameDao.updateFrameUsage(frame, lluTime); - } - catch (FrameReservationException ex) { - // Eat this, the frame was not in the correct state or - // was locked by another thread. The only reason it would - // be locked by another thread would be if the state is - // changing. - logger.warn("failed to update io stats for frame: " + frame); - } + long vsize, long maxVsize, long usedGpuMemory, + long maxUsedGpuMemory, byte[] children) { + procDao.updateProcMemoryUsage(frame, rss, maxRss, vsize, maxVsize, + usedGpuMemory, maxUsedGpuMemory, children); } @Override @Transactional(propagation = Propagation.REQUIRED) - public void updateFrameMemoryUsage(FrameInterface frame, long rss, long maxRss) { + public void updateFrameMemoryUsageAndLluTime(FrameInterface frame, long rss, long maxRss, + long lluTime) { try { - frameDao.updateFrameMemoryUsage(frame, maxRss, rss); + frameDao.updateFrameMemoryUsageAndLluTime(frame, maxRss, rss, lluTime); } catch (FrameReservationException ex) { // Eat this, the frame was not in the correct state or // was locked by another thread. The only reason it would // be locked by another thread would be if the state is // changing. - logger.warn("failed to update memory stats for frame: " + frame); + logger.warn("failed to update memory usage and LLU time for frame: " + frame); } } @@ -678,5 +705,10 @@ public BookingDao getBookingDao() { public void setBookingDao(BookingDao bookingDao) { this.bookingDao = bookingDao; } + + @Override + public void clearCache() { + dispatcherDao.clearCache(); + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java index d29c51f9c..072b04113 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/Dispatcher.java @@ -1,4 +1,3 @@ - /* * Copyright Contributors to the OpenCue Project * @@ -18,7 +17,6 @@ package com.imageworks.spcue.dispatcher; - import java.util.List; import com.imageworks.spcue.DispatchFrame; @@ -44,6 +42,10 @@ public interface Dispatcher { // The minimum amount of core points you can assign to a frame. public static final int CORE_POINTS_RESERVED_MIN = 10; + // The minimum amount of gpu points you can assign to a frame. + public static final int GPU_UNITS_RESERVED_DEFAULT = 0; + public static final int GPU_UNITS_RESERVED_MIN = 0; + // Amount of load per core a host can have before the perceived // number of idle cores is modified to reflect load conditions // on the host. @@ -69,13 +71,13 @@ public interface Dispatcher { // The default amount of gpu memory reserved for a frame if no gpu memory // reservation settings are specified - public static final long GPU_RESERVED_DEFAULT = 0; + public static final long MEM_GPU_RESERVED_DEFAULT = 0; // The minimum amount of gpu memory that can be assigned to a frame. - public static final long GPU_RESERVED_MIN = 0; + public static final long MEM_GPU_RESERVED_MIN = 0; // The maximum amount of gpu memory that can be assigned to a frame. - public static final long GPU_RESERVED_MAX = CueUtil.GB4; + public static final long MEM_GPU_RESERVED_MAX = CueUtil.GB * 1024; // Return value for cleared frame public static final int EXIT_STATUS_FRAME_CLEARED = 299; @@ -105,13 +107,8 @@ public interface Dispatcher { // without being penalized for it. public static final long VIRTUAL_MEM_THRESHHOLD = CueUtil.GB2; - // The amount of swap that must be used before a host can go - // into kill mode. - public static final long KILL_MODE_SWAP_THRESHOLD = CueUtil.MB128; - - // When the amount of free memory drops below this point, the - // host can go into kill mode. - public static final long KILL_MODE_MEM_THRESHOLD = CueUtil.MB512; + // How long to keep track of a frame kill request + public static final int FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES = 3; // A higher number gets more deep booking but less spread on the cue. public static final int DEFAULT_MAX_FRAMES_PER_PASS = 4; @@ -129,6 +126,10 @@ public interface Dispatcher { // The default operating system assigned to host that don't report one. public static final String OS_DEFAULT = "rhel40"; + // The default minimum memory increase for when jobs fail due to not enough + // memory + public static final long MINIMUM_MEMORY_INCREASE = CueUtil.GB2; + /** * Dispatch a host to the facility. * diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java index fe2482720..c405a9e31 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/FrameCompleteHandler.java @@ -24,12 +24,16 @@ import java.util.Random; import java.util.concurrent.atomic.AtomicLong; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; import org.springframework.dao.EmptyResultDataAccessException; import com.imageworks.spcue.DispatchFrame; import com.imageworks.spcue.DispatchHost; import com.imageworks.spcue.DispatchJob; +import com.imageworks.spcue.FrameDetail; import com.imageworks.spcue.JobDetail; import com.imageworks.spcue.LayerDetail; import com.imageworks.spcue.LayerInterface; @@ -37,6 +41,7 @@ import com.imageworks.spcue.VirtualProc; import com.imageworks.spcue.dispatcher.commands.DispatchBookHost; import com.imageworks.spcue.dispatcher.commands.DispatchNextFrame; +import com.imageworks.spcue.dispatcher.commands.KeyRunnable; import com.imageworks.spcue.grpc.host.LockState; import com.imageworks.spcue.grpc.job.FrameExitStatus; import com.imageworks.spcue.grpc.job.FrameState; @@ -50,13 +55,19 @@ import com.imageworks.spcue.util.CueExceptionUtil; import com.imageworks.spcue.util.CueUtil; +import com.imageworks.spcue.dao.WhiteboardDao; +import com.imageworks.spcue.dao.ShowDao; +import com.imageworks.spcue.dao.ServiceDao; +import com.imageworks.spcue.grpc.service.Service; +import com.imageworks.spcue.grpc.service.ServiceOverride; + /** * The FrameCompleteHandler encapsulates all logic necessary for processing * FrameComplete reports from RQD. */ public class FrameCompleteHandler { - private static final Logger logger = Logger.getLogger(FrameCompleteHandler.class); + private static final Logger logger = LogManager.getLogger(FrameCompleteHandler.class); private static final Random randomNumber = new Random(); @@ -72,6 +83,10 @@ public class FrameCompleteHandler { private DispatchSupport dispatchSupport; private JmsMover jsmMover; + private WhiteboardDao whiteboardDao; + private ServiceDao serviceDao; + private ShowDao showDao; + /* * The last time a proc was unbooked for subscription or job balancing. * Since there are so many more dispatch threads than booking threads, the @@ -92,6 +107,25 @@ public class FrameCompleteHandler { */ private boolean shutdown = false; + /** + * Whether or not to satisfy dependents (*_ON_FRAME and *_ON_LAYER) only on Frame success + */ + private boolean satisfyDependOnlyOnFrameSuccess; + + public boolean getSatisfyDependOnlyOnFrameSuccess() { + return satisfyDependOnlyOnFrameSuccess; + } + + public void setSatisfyDependOnlyOnFrameSuccess(boolean satisfyDependOnlyOnFrameSuccess) { + this.satisfyDependOnlyOnFrameSuccess = satisfyDependOnlyOnFrameSuccess; + } + + @Autowired + public FrameCompleteHandler(Environment env) { + satisfyDependOnlyOnFrameSuccess = env.getProperty( + "depend.satisfy_only_on_frame_success", Boolean.class, true); + } + /** * Handle the given FrameCompleteReport from RQD. * @@ -110,48 +144,35 @@ public void handleFrameCompleteReport(final FrameCompleteReport report) { } try { - - final VirtualProc proc; - - try { - - proc = hostManager.getVirtualProc( - report.getFrame().getResourceId()); - } - catch (EmptyResultDataAccessException e) { - /* - * Do not propagate this exception to RQD. This - * usually means the cue lost connectivity to - * the host and cleared out the record of the proc. - * If this is propagated back to RQD, RQD will - * keep retrying the operation forever. - */ - logger.info("failed to acquire data needed to " + - "process completed frame: " + - report.getFrame().getFrameName() + " in job " + - report.getFrame().getJobName() + "," + e); - return; - } - + final VirtualProc proc = hostManager.getVirtualProc(report.getFrame().getResourceId()); final DispatchJob job = jobManager.getDispatchJob(proc.getJobId()); final LayerDetail layer = jobManager.getLayerDetail(report.getFrame().getLayerId()); + final FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); final DispatchFrame frame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); final FrameState newFrameState = determineFrameState(job, layer, frame, report); + final String key = proc.getJobId() + "_" + report.getFrame().getLayerId() + + "_" + report.getFrame().getFrameId(); if (dispatchSupport.stopFrame(frame, newFrameState, report.getExitStatus(), report.getFrame().getMaxRss())) { - dispatchQueue.execute(new Runnable() { - @Override - public void run() { - try { - handlePostFrameCompleteOperations(proc, report, job, frame, - newFrameState); - } catch (Exception e) { - logger.warn("Exception during handlePostFrameCompleteOperations " + - "in handleFrameCompleteReport" + CueExceptionUtil.getStackTrace(e)); + if (dispatcher.isTestMode()) { + // Database modifications on a threadpool cannot be captured by the test thread + handlePostFrameCompleteOperations(proc, report, job, frame, + newFrameState, frameDetail); + } else { + dispatchQueue.execute(new KeyRunnable(key) { + @Override + public void run() { + try { + handlePostFrameCompleteOperations(proc, report, job, frame, + newFrameState, frameDetail); + } catch (Exception e) { + logger.warn("Exception during handlePostFrameCompleteOperations " + + "in handleFrameCompleteReport" + CueExceptionUtil.getStackTrace(e)); + } } - } - }); + }); + } } else { /* @@ -161,7 +182,7 @@ public void run() { * properties. */ if (redirectManager.hasRedirect(proc)) { - dispatchQueue.execute(new Runnable() { + dispatchQueue.execute(new KeyRunnable(key) { @Override public void run() { try { @@ -174,7 +195,7 @@ public void run() { }); } else { - dispatchQueue.execute(new Runnable() { + dispatchQueue.execute(new KeyRunnable(key) { @Override public void run() { try { @@ -188,6 +209,19 @@ public void run() { } } } + catch (EmptyResultDataAccessException e) { + /* + * Do not propagate this exception to RQD. This + * usually means the cue lost connectivity to + * the host and cleared out the record of the proc. + * If this is propagated back to RQD, RQD will + * keep retrying the operation forever. + */ + logger.info("failed to acquire data needed to " + + "process completed frame: " + + report.getFrame().getFrameName() + " in job " + + report.getFrame().getJobName() + "," + e); + } catch (Exception e) { /* @@ -225,7 +259,7 @@ public void run() { */ public void handlePostFrameCompleteOperations(VirtualProc proc, FrameCompleteReport report, DispatchJob job, DispatchFrame frame, - FrameState newFrameState) { + FrameState newFrameState, FrameDetail frameDetail) { try { /* @@ -235,21 +269,28 @@ public void handlePostFrameCompleteOperations(VirtualProc proc, dispatchSupport.updateUsageCounters(frame, report.getExitStatus()); - if (newFrameState.equals(FrameState.SUCCEEDED)) { + boolean isLayerComplete = false; + + if (newFrameState.equals(FrameState.SUCCEEDED) + || (!satisfyDependOnlyOnFrameSuccess + && newFrameState.equals(FrameState.EATEN))) { jobManagerSupport.satisfyWhatDependsOn(frame); - if (jobManager.isLayerComplete(frame)) { + isLayerComplete = jobManager.isLayerComplete(frame); + if (isLayerComplete) { jobManagerSupport.satisfyWhatDependsOn((LayerInterface) frame); - } else { - /* - * If the layer meets some specific criteria then try to - * update the minimum memory and tags so it can run on a - * wider variety of cores, namely older hardware. - */ - jobManager.optimizeLayer(frame, report.getFrame().getNumCores(), - report.getFrame().getMaxRss(), report.getRunTime()); } } + if (newFrameState.equals(FrameState.SUCCEEDED) && !isLayerComplete) { + /* + * If the layer meets some specific criteria then try to + * update the minimum memory and tags so it can run on a + * wider variety of cores, namely older hardware. + */ + jobManager.optimizeLayer(frame, report.getFrame().getNumCores(), + report.getFrame().getMaxRss(), report.getRunTime()); + } + /* * The final frame can either be Succeeded or Eaten. If you only * check if the frame is Succeeded before doing an isJobComplete @@ -268,14 +309,46 @@ public void handlePostFrameCompleteOperations(VirtualProc proc, /* * An exit status of 33 indicates that the frame was killed by the * application due to a memory issue and should be retried. In this - * case, disable the optimizer and raise the memory by 2GB. + * case, disable the optimizer and raise the memory by what is + * specified in the show's service override, service or 2GB. */ if (report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE - || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE) { + || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || frameDetail.exitStatus == Dispatcher.EXIT_STATUS_MEMORY_FAILURE) { + long increase = CueUtil.GB2; + + // since there can be multiple services, just going for the + // first service (primary) + String serviceName = ""; + try { + serviceName = frame.services.split(",")[0]; + ServiceOverride showService = whiteboardDao.getServiceOverride( + showDao.findShowDetail(frame.show), serviceName); + // increase override is stored in Kb format so convert to Mb + // for easier reading. Note: Kb->Mb conversion uses 1024 blocks + increase = showService.getData().getMinMemoryIncrease(); + logger.info("Using " + serviceName + " service show " + + "override for memory increase: " + + Math.floor(increase / 1024) + "Mb."); + } + catch (NullPointerException e) { + logger.info("Frame has no associated services"); + } + catch (EmptyResultDataAccessException e) { + logger.info(frame.show + " has no service override for " + + serviceName + "."); + Service service = whiteboardDao.findService(serviceName); + increase = service.getMinMemoryIncrease(); + logger.info("Using service default for mem increase: " + + Math.floor(increase / 1024) + "Mb."); + } + unbookProc = true; jobManager.enableMemoryOptimizer(frame, false); jobManager.increaseLayerMemoryRequirement(frame, - proc.memoryReserved + CueUtil.GB2); + proc.memoryReserved + increase); + logger.info("Increased mem usage to: " + + (proc.memoryReserved + increase)); } /* @@ -291,17 +364,17 @@ public void handlePostFrameCompleteOperations(VirtualProc proc, } /* - * An exit status of NO_RETRY (256) indicates that the frame could + * An exit status of FAILED_LAUNCH (256) indicates that the frame could * not be launched due to some unforeseen unrecoverable error that * is not checked when the launch command is given. The most common * cause of this is when the job log directory is removed before the * job is complete. * - * Frames that return a 256 are not automatically retried. + * Frames that return a 256 are put Frame back into WAITING status */ - else if (report.getExitStatus() == FrameExitStatus.NO_RETRY_VALUE) { - logger.info("unbooking " + proc + " frame status was no-retry."); + else if (report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE) { + logger.info("unbooking " + proc + " frame status was failed frame launch."); unbookProc = true; } @@ -471,7 +544,6 @@ else if (report.getHost().getNimbyLocked()) { dispatchSupport.unbookProc(proc, "frame state was " + newFrameState.toString()); } - } catch (Exception e) { /* * At this point, the proc has no place to go. Since we've run into @@ -514,7 +586,8 @@ else if (report.getHost().getNimbyLocked()) { * @param report * @return */ - public static final FrameState determineFrameState(DispatchJob job, LayerDetail layer, DispatchFrame frame, FrameCompleteReport report) { + public static final FrameState determineFrameState(DispatchJob job, LayerDetail layer, + DispatchFrame frame, FrameCompleteReport report) { if (EnumSet.of(FrameState.WAITING, FrameState.EATEN).contains( frame.state)) { @@ -537,17 +610,25 @@ else if (frame.state.equals(FrameState.DEAD)) { || (job.maxRetries != 0 && report.getExitSignal() == 119)) { report = FrameCompleteReport.newBuilder(report).setExitStatus(FrameExitStatus.SKIP_RETRY_VALUE).build(); newState = FrameState.WAITING; + // exemption code 256 + } else if ((report.getExitStatus() == FrameExitStatus.FAILED_LAUNCH_VALUE || + report.getExitSignal() == FrameExitStatus.FAILED_LAUNCH_VALUE) && + (frame.retries < job.maxRetries)) { + report = FrameCompleteReport.newBuilder(report).setExitStatus(report.getExitStatus()).build(); + newState = FrameState.WAITING; } else if (job.autoEat) { newState = FrameState.EATEN; // ETC Time out and LLU timeout - } else if (layer.timeout_llu != 0 && report.getFrame().getLluTime() != 0 && lastUpdate > (layer.timeout_llu -1)) { + } else if (layer.timeout_llu != 0 && report.getFrame().getLluTime() != 0 + && lastUpdate > (layer.timeout_llu -1)) { newState = FrameState.DEAD; } else if (layer.timeout != 0 && report.getRunTime() > layer.timeout * 60) { newState = FrameState.DEAD; } else if (report.getRunTime() > Dispatcher.FRAME_TIME_NO_RETRY) { newState = FrameState.DEAD; } else if (frame.retries >= job.maxRetries) { - if (!(report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE)) + if (!(report.getExitStatus() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE + || report.getExitSignal() == Dispatcher.EXIT_STATUS_MEMORY_FAILURE)) newState = FrameState.DEAD; } @@ -653,5 +734,22 @@ public JmsMover getJmsMover() { public void setJmsMover(JmsMover jsmMover) { this.jsmMover = jsmMover; } + + public WhiteboardDao getWhiteboardDao() { return whiteboardDao; } + + public void setWhiteboardDao(WhiteboardDao whiteboardDao) { + this.whiteboardDao = whiteboardDao; } + + public ServiceDao getServiceDao() { return serviceDao; } + + public void setServiceDao(ServiceDao serviceDao) { + this.serviceDao = serviceDao; } + + public ShowDao getShowDao() { return showDao; } + + public void setShowDao(ShowDao showDao) { + this.showDao = showDao; } + } + diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java new file mode 100644 index 000000000..5c2efabc4 --- /dev/null +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HealthyThreadPool.java @@ -0,0 +1,236 @@ +package com.imageworks.spcue.dispatcher; + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; +import java.util.Date; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.LinkedBlockingQueue; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.imageworks.spcue.dispatcher.commands.DispatchBookHost; +import com.imageworks.spcue.dispatcher.commands.KeyRunnable; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + + +/*** + * A ThreadPoolExecutor with two additional features: + * - Handles repeated tasks by always keeping the latest version + * - With isHealthyOrShutdown, the threadpool will drain and clear resources when unhealthy + * + */ +public class HealthyThreadPool extends ThreadPoolExecutor { + // The service need s to be unhealthy for this period of time to report + private static final Logger logger = LogManager.getLogger("HEALTH"); + // Threshold to consider healthy or unhealthy + private final int healthThreshold; + private final int poolSize; + private final int minUnhealthyPeriodMin; + private final QueueRejectCounter rejectCounter = new QueueRejectCounter(); + private final Cache taskCache; + private final String name; + private Date lastCheck = new Date(); + private boolean wasHealthy = true; + protected final AtomicBoolean isShutdown = new AtomicBoolean(false); + private final int baseSleepTimeMillis; + + /** + * Start a thread pool + * @param name For logging purposes + * @param healthThreshold Percentage that should be available to consider healthy + * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy + * @param poolSize how many jobs can be queued + * @param threadsMinimum Minimum number of threads + * @param threadsMaximum Maximum number of threads to grow to + */ + public HealthyThreadPool(String name, + int healthThreshold, + int minUnhealthyPeriodMin, + int poolSize, + int threadsMinimum, + int threadsMaximum) { + this(name, healthThreshold, minUnhealthyPeriodMin, poolSize, + threadsMinimum, threadsMaximum, 0); + } + + /** + * Start a thread pool + * + * @param name For logging purposes + * @param healthThreshold Percentage that should be available to consider healthy + * @param minUnhealthyPeriodMin Period in min to consider a queue unhealthy + * @param poolSize how many jobs can be queued + * @param threadsMinimum Minimum number of threads + * @param threadsMaximum Maximum number of threads to grow to + * @param baseSleepTimeMillis Time a thread should sleep when the service is not under pressure + */ + public HealthyThreadPool(String name, + int healthThreshold, + int minUnhealthyPeriodMin, + int poolSize, + int threadsMinimum, + int threadsMaximum, + int baseSleepTimeMillis) { + super(threadsMinimum, threadsMaximum, 10, + TimeUnit.SECONDS, new LinkedBlockingQueue(poolSize)); + + logger.debug(name + ": Starting a new HealthyThreadPool"); + this.name = name; + this.healthThreshold = healthThreshold; + this.poolSize = poolSize; + this.minUnhealthyPeriodMin = minUnhealthyPeriodMin; + this.baseSleepTimeMillis = baseSleepTimeMillis; + this.setRejectedExecutionHandler(rejectCounter); + + this.taskCache = CacheBuilder.newBuilder() + .expireAfterWrite(3, TimeUnit.MINUTES) + // Invalidate entries that got executed by the threadPool and lost their reference + .weakValues() + .concurrencyLevel(threadsMaximum) + .build(); + } + + public void execute(KeyRunnable r) { + if (isShutdown.get()) { + logger.info(name + ": Task ignored, queue on hold or shutdown"); + return; + } + if (taskCache.getIfPresent(r.getKey()) == null){ + taskCache.put(r.getKey(), r); + super.execute(r); + } + } + + public long getRejectedTaskCount() { + return rejectCounter.getRejectCount(); + } + + /** + * Monitor if the queue is unhealthy for MIN_UNHEALTHY_PERIOD_MIN + * + * If unhealthy, the service will start the shutdown process and the + * caller is responsible for starting a new instance after the lock on + * awaitTermination is released. + */ + protected boolean isHealthyOrShutdown() throws InterruptedException { + Date now = new Date(); + if (diffInMinutes(lastCheck, now) > minUnhealthyPeriodMin){ + this.wasHealthy = healthCheck(); + this.lastCheck = now; + } + + if(healthCheck() || wasHealthy) { + logger.debug(name + ": healthy (" + + "Remaining Capacity: " + this.getQueue().remainingCapacity() + + ", Running: " + this.getActiveCount() + + ", Total Executed: " + this.getCompletedTaskCount() + + ")"); + return true; + } + else if (isShutdown.get()) { + logger.warn("Queue shutting down"); + return false; + } + else { + logger.warn(name + ": unhealthy, starting shutdown)"); + threadDump(); + + isShutdown.set(true); + super.shutdownNow(); + logger.warn(name + ": Awaiting unhealthy queue termination"); + if (super.awaitTermination(1, TimeUnit.MINUTES)){ + logger.info(name + ": Terminated successfully"); + } + else { + logger.warn(name + ": Failed to terminate"); + } + // Threads will eventually terminate, proceed + taskCache.invalidateAll(); + return false; + } + } + + private void threadDump() { + ThreadMXBean mx = ManagementFactory.getThreadMXBean(); + for(ThreadInfo info : mx.dumpAllThreads(true, true)){ + logger.debug(info.toString()); + } + } + + private static long diffInMinutes(Date dateStart, Date dateEnd) { + return TimeUnit.MINUTES.convert( + dateEnd.getTime() - dateStart.getTime(), + TimeUnit.MILLISECONDS + ); + } + + /** + * Lowers the sleep time as the queue grows. + * + * @return + */ + public int sleepTime() { + if (!isShutdown.get()) { + int sleep = (int) (baseSleepTimeMillis - (((this.getQueue().size () / + (float) this.poolSize) * baseSleepTimeMillis)) * 2); + if (sleep < 0) { + sleep = 0; + } + return sleep; + } else { + return 0; + } + } + + protected void beforeExecute(Thread t, Runnable r) { + super.beforeExecute(t, r); + if (isShutdown()) { + this.remove(r); + } else { + if (baseSleepTimeMillis > 0) { + try { + Thread.sleep(sleepTime()); + } catch (InterruptedException e) { + logger.info(name + ": booking queue was interrupted."); + } + } + } + } + + protected void afterExecute(Runnable r, Throwable t) { + super.afterExecute(r, t); + + // Invalidate cache to avoid having to wait for GC to mark processed entries collectible + KeyRunnable h = (KeyRunnable)r; + taskCache.invalidate(h.getKey()); + } + + protected boolean healthCheck() { + return (this.getQueue().remainingCapacity() > 0) || + (getRejectedTaskCount() < this.poolSize / healthThreshold); + } + + public void shutdown() { + if (!isShutdown.getAndSet(true)) { + logger.info("Shutting down thread pool " + name + ", currently " + + getActiveCount() + " active threads."); + final long startTime = System.currentTimeMillis(); + while (this.getQueue().size() != 0 && this.getActiveCount() != 0) { + try { + if (System.currentTimeMillis() - startTime > 10000) { + throw new InterruptedException(name + + " thread pool failed to shutdown properly"); + } + Thread.sleep(250); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } + } + } + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java index 815689bb4..997e32fd4 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportHandler.java @@ -21,17 +21,25 @@ import java.sql.Timestamp; import java.util.ArrayList; -import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.ExecutionException; import java.util.concurrent.ThreadPoolExecutor; - -import org.apache.log4j.Logger; +import java.util.concurrent.TimeUnit; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.imageworks.spcue.JobInterface; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; import org.springframework.core.task.TaskRejectedException; import org.springframework.dao.DataAccessException; import org.springframework.dao.EmptyResultDataAccessException; +import com.imageworks.spcue.CommentDetail; import com.imageworks.spcue.DispatchHost; import com.imageworks.spcue.FrameInterface; import com.imageworks.spcue.JobEntity; @@ -46,6 +54,7 @@ import com.imageworks.spcue.dispatcher.commands.DispatchBookHostLocal; import com.imageworks.spcue.dispatcher.commands.DispatchHandleHostReport; import com.imageworks.spcue.dispatcher.commands.DispatchRqdKillFrame; +import com.imageworks.spcue.dispatcher.commands.DispatchRqdKillFrameMemory; import com.imageworks.spcue.grpc.host.HardwareState; import com.imageworks.spcue.grpc.host.LockState; import com.imageworks.spcue.grpc.report.BootReport; @@ -56,15 +65,17 @@ import com.imageworks.spcue.rqd.RqdClient; import com.imageworks.spcue.rqd.RqdClientException; import com.imageworks.spcue.service.BookingManager; +import com.imageworks.spcue.service.CommentManager; import com.imageworks.spcue.service.HostManager; import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.service.JobManagerSupport; import com.imageworks.spcue.util.CueExceptionUtil; import com.imageworks.spcue.util.CueUtil; +import static com.imageworks.spcue.dispatcher.Dispatcher.*; + public class HostReportHandler { - private static final Logger logger = Logger.getLogger(HostReportHandler.class); + private static final Logger logger = LogManager.getLogger(HostReportHandler.class); private BookingManager bookingManager; private HostManager hostManager; @@ -76,9 +87,23 @@ public class HostReportHandler { private Dispatcher localDispatcher; private RqdClient rqdClient; private JobManager jobManager; - private JobManagerSupport jobManagerSupport; private JobDao jobDao; private LayerDao layerDao; + @Autowired + private Environment env; + @Autowired + private CommentManager commentManager; + // Comment constants + private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = "Host set to REPAIR for not having enough storage " + + "space on the temporary directory (mcp)"; + private static final String CUEBOT_COMMENT_USER = "cuebot"; + + // A cache to store kill requests and count the number of occurrences. + // The cache expires after write to avoid growing unbounded. If a request for a host-frame doesn't appear + // for a period of time, the entry will be removed. + Cache killRequestCounterCache = CacheBuilder.newBuilder() + .expireAfterWrite(FRAME_KILL_CACHE_EXPIRE_AFTER_WRITE_MINUTES, TimeUnit.MINUTES) + .build(); /** * Boolean to toggle if this class is accepting data or not. @@ -130,23 +155,10 @@ public void queueHostReport(HostReport report) { reportQueue.execute(new DispatchHandleHostReport(report, this)); } - public void handleHostReport(HostReport report, boolean isBoot) { long startTime = System.currentTimeMillis(); try { - long totalGpu; - if (report.getHost().getAttributesMap().containsKey("totalGpu")) - totalGpu = Integer.parseInt(report.getHost().getAttributesMap().get("totalGpu")); - else - totalGpu = 0; - - long freeGpu; - if (report.getHost().getAttributesMap().containsKey("freeGpu")) - freeGpu = Integer.parseInt(report.getHost().getAttributesMap().get("freeGpu")); - else - freeGpu = 0; - long swapOut = 0; if (report.getHost().getAttributesMap().containsKey("swapout")) { swapOut = Integer.parseInt(report.getHost().getAttributesMap().get("swapout")); @@ -163,11 +175,11 @@ public void handleHostReport(HostReport report, boolean isBoot) { rhost.getTotalMem(), rhost.getFreeMem(), rhost.getTotalSwap(), rhost.getFreeSwap(), rhost.getTotalMcp(), rhost.getFreeMcp(), - totalGpu, freeGpu, + rhost.getTotalGpuMem(), rhost.getFreeGpuMem(), rhost.getLoad(), new Timestamp(rhost.getBootTime() * 1000l), rhost.getAttributesMap().get("SP_OS")); - changeHardwareState(host, report.getHost().getState()); + changeHardwareState(host, report.getHost().getState(), isBoot, report.getHost().getFreeMcp()); changeNimbyState(host, report.getHost()); /** @@ -200,15 +212,9 @@ public void handleHostReport(HostReport report, boolean isBoot) { /* * Updates memory usage for the proc, frames, - * jobs, and layers. + * jobs, and layers. And LLU time for the frames. */ - updateMemoryUsage(report.getFramesList()); - - /* - * Updates usage for the proc, frames, - * jobs, and layers. - */ - updateFrameUsage(report.getFramesList()); + updateMemoryUsageAndLluTime(report.getFramesList()); /* * kill frames that have over run. @@ -216,9 +222,9 @@ public void handleHostReport(HostReport report, boolean isBoot) { killTimedOutFrames(report); /* - * Increase/decreased reserved memory. + * Prevent OOM (Out-Of-Memory) issues on the host and manage frame reserved memory */ - handleMemoryReservations(host, report); + handleMemoryUsage(host, report); /* * The checks are done in order of least CPU intensive to @@ -238,7 +244,14 @@ public void handleHostReport(HostReport report, boolean isBoot) { } } - if (host.idleCores < Dispatcher.CORE_POINTS_RESERVED_MIN) { + // The minimum amount of free space in the temporary directory to book a host + Long minBookableFreeTempDir = env.getRequiredProperty("dispatcher.min_bookable_free_temp_dir_kb", Long.class); + + if (minBookableFreeTempDir != -1 && report.getHost().getFreeMcp() < minBookableFreeTempDir) { + msg = String.format("%s doens't have enough free space in the temporary directory (mcp), %dMB needs %dMB", + host.name, (report.getHost().getFreeMcp()/1024), (minBookableFreeTempDir/1024)); + } + else if (host.idleCores < Dispatcher.CORE_POINTS_RESERVED_MIN) { msg = String.format("%s doesn't have enough idle cores, %d needs %d", host.name, host.idleCores, Dispatcher.CORE_POINTS_RESERVED_MIN); } @@ -248,7 +261,7 @@ else if (host.idleMemory < Dispatcher.MEM_RESERVED_MIN) { } else if (report.getHost().getFreeMem() < CueUtil.MB512) { msg = String.format("%s doens't have enough free system mem, %d needs %d", - host.name, report.getHost().getFreeMem(), Dispatcher.MEM_RESERVED_MIN); + host.name, report.getHost().getFreeMem(), Dispatcher.MEM_RESERVED_MIN); } else if(!host.hardwareState.equals(HardwareState.UP)) { msg = host + " is not in the Up state."; @@ -289,16 +302,6 @@ else if (!dispatchSupport.isCueBookable(host)) { return; } - /* - * Check for NIMBY blackout time. - */ - /* - if (bookingManager.isBlackOutTime(host)) { - logger.trace(host + " is blacked out."); - return ; - } - */ - /* * Check if the host prefers a show. If it does , dispatch * to that show first. @@ -332,46 +335,93 @@ else if (!dispatchSupport.isCueBookable(host)) { * * If a host pings in with a different hardware state than what * is currently in the DB, the state is updated. If the hardware - * state is Rebooting RebootWhenIdle, then state can only be + * state is Rebooting or RebootWhenIdle, then state can only be * updated with a boot report. If the state is Repair, then state is * never updated via RQD. * + * + * Prevent cue frames from booking on hosts with full temporary directories. + * + * Change host state to REPAIR or UP according the amount of free space + * in the temporary directory: + * - Set the host state to REPAIR, when the amount of free space in the + * temporary directory is less than the minimum required. Add a comment with + * subject: SUBJECT_COMMENT_FULL_TEMP_DIR + * - Set the host state to UP, when the amount of free space in the temporary directory + * is greater or equals to the minimum required and the host has a comment with + * subject: SUBJECT_COMMENT_FULL_TEMP_DIR + * * @param host * @param reportState + * @param isBoot + * @param freeTempDir */ - private void changeHardwareState(DispatchHost host, - HardwareState reportState) { + private void changeHardwareState(DispatchHost host, HardwareState reportState, boolean isBoot, long freeTempDir) { - /* - * If the states are the same there is no reason - * to do this update. - */ - if (host.hardwareState.equals(reportState)) { - return; + // The minimum amount of free space in the temporary directory to book a host + Long minBookableFreeTempDir = env.getRequiredProperty("dispatcher.min_bookable_free_temp_dir_kb", Long.class); + + // Prevent cue frames from booking on hosts with full temporary directories + if (minBookableFreeTempDir != -1) { + if (host.hardwareState == HardwareState.UP && freeTempDir < minBookableFreeTempDir) { + + // Insert a comment indicating that the Host status = Repair with reason = Full temporary directory + CommentDetail c = new CommentDetail(); + c.subject = SUBJECT_COMMENT_FULL_TEMP_DIR; + c.user = CUEBOT_COMMENT_USER; + c.timestamp = null; + c.message = "Host " + host.getName() + " marked as REPAIR. The current amount of free space in the " + + "temporary directory (mcp) is " + (freeTempDir/1024) + "MB. It must have at least " + + (minBookableFreeTempDir/1024) + "MB of free space in temporary directory"; + commentManager.addComment(host, c); + + // Set the host state to REPAIR + hostManager.setHostState(host, HardwareState.REPAIR); + host.hardwareState = HardwareState.REPAIR; + + return; + } else if (host.hardwareState == HardwareState.REPAIR && freeTempDir >= minBookableFreeTempDir) { + // Check if the host with REPAIR status has comments with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and + // user=CUEBOT_COMMENT_USER and delete the comments, if they exists + boolean commentsDeleted = commentManager.deleteCommentByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + + if (commentsDeleted) { + // Set the host state to UP + hostManager.setHostState(host, HardwareState.UP); + host.hardwareState = HardwareState.UP; + return; + } + } } - /* - * Do not change the state of the host if its in a - * repair state. Removing the repair state must - * be done manually. - */ - if (host.hardwareState.equals(HardwareState.REPAIR)) { + // If the states are the same there is no reason to do this update. + if (host.hardwareState.equals(reportState)) { return; } - /* - * Hosts in these states always change to Up. - */ - if (reportState.equals(HardwareState.UP) && EnumSet.of(HardwareState.DOWN, - HardwareState.REBOOTING, - HardwareState.REBOOT_WHEN_IDLE).contains(host.hardwareState)) { - hostManager.setHostState(host, HardwareState.UP); - } - else { - hostManager.setHostState(host, reportState); - } + switch (host.hardwareState) { + case DOWN: + hostManager.setHostState(host, HardwareState.UP); + host.hardwareState = HardwareState.UP; + break; + case REBOOTING: + case REBOOT_WHEN_IDLE: + // Rebooting hosts only change to UP when processing a boot report + if (isBoot) { + hostManager.setHostState(host, HardwareState.UP); + host.hardwareState = HardwareState.UP; + } + break; + case REPAIR: + // Do not change the state of the host if its in a repair state. + break; + default: + hostManager.setHostState(host, reportState); + host.hardwareState = reportState; + break; - host.hardwareState = reportState; + } } /** @@ -402,7 +452,7 @@ private void changeNimbyState(DispatchHost host, RenderHost rh) { * locked if all cores are locked. * * @param host DispatchHost - * @param renderHost RenderHost + * @param coreInfo CoreDetail */ private void changeLockState(DispatchHost host, CoreDetail coreInfo) { if (host.lockState == LockState.LOCKED) { @@ -417,103 +467,279 @@ private void changeLockState(DispatchHost host, CoreDetail coreInfo) { } /** - * Handle memory reservations for the given host. This will re-balance memory - * reservations on the machine and kill and frames that are out of control. - * + * Prevent host from entering an OOM state where oom-killer might start killing important OS processes. + * The kill logic will kick in one of the following conditions is met: + * - Host has less than OOM_MEMORY_LEFT_THRESHOLD_PERCENT memory available + * - A frame is taking more than OOM_FRAME_OVERBOARD_PERCENT of what it had reserved + * For frames that are using more than they had reserved but not above the threshold, negotiate expanding + * the reservations with other frames on the same host * @param host * @param report */ - private void handleMemoryReservations(final DispatchHost host, final HostReport report) { + private void handleMemoryUsage(final DispatchHost host, final HostReport report) { + // Don't keep memory balances on nimby hosts + if (host.isNimby) { + return; + } + + final double OOM_MAX_SAFE_USED_MEMORY_THRESHOLD = + env.getRequiredProperty("dispatcher.oom_max_safe_used_memory_threshold", Double.class); + final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = + env.getRequiredProperty("dispatcher.oom_frame_overboard_allowed_threshold", Double.class); + RenderHost renderHost = report.getHost(); + List runningFrames = report.getFramesList(); + + boolean memoryWarning = renderHost.getTotalMem() > 0 && + ((double)renderHost.getFreeMem()/renderHost.getTotalMem() < + (1.0 - OOM_MAX_SAFE_USED_MEMORY_THRESHOLD)); + + if (memoryWarning) { + long memoryAvailable = renderHost.getFreeMem(); + long minSafeMemoryAvailable = (long)(renderHost.getTotalMem() * (1.0 - OOM_MAX_SAFE_USED_MEMORY_THRESHOLD)); + // Only allow killing up to 10 frames at a time + int killAttemptsRemaining = 10; + VirtualProc killedProc = null; + do { + killedProc = killWorstMemoryOffender(host); + killAttemptsRemaining -= 1; + if (killedProc != null) { + memoryAvailable = memoryAvailable + killedProc.memoryUsed; + } + } while (killAttemptsRemaining > 0 && + memoryAvailable < minSafeMemoryAvailable && + killedProc != null); + } else { + // When no mass cleaning was required, check for frames going overboard + // if frames didn't go overboard, manage its reservations trying to increase + // them accordingly + for (final RunningFrameInfo frame : runningFrames) { + if (OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD > 0 && isFrameOverboard(frame)) { + if (!killFrameOverusingMemory(frame, host.getName())) { + logger.warn("Frame " + frame.getJobName() + "." + frame.getFrameName() + + " is overboard but could not be killed"); + } + } else { + handleMemoryReservations(frame); + } + } + } + } - // TODO: GPU: Need to keep frames from growing into space reserved for GPU frames - // However all this is done in the database without a chance to edit the values here + public enum KillCause { + FrameOverboard("This frame is using more memory than it had reserved."), + HostUnderOom("Frame killed by host under OOM pressure"), + FrameTimedOut("Frame timed out"), + FrameLluTimedOut("Frame LLU timed out"), + FrameVerificationFailure("Frame failed to be verified on the database"); + private final String message; - /* - * Check to see if we enable kill mode to free up memory. - */ - boolean killMode = hostManager.isSwapping(host); + private KillCause(String message) { + this.message = message; + } + @Override + public String toString() { + return message; + } + } - for (final RunningFrameInfo f: report.getFramesList()) { + private boolean killFrameOverusingMemory(RunningFrameInfo frame, String hostname) { + try { + VirtualProc proc = hostManager.getVirtualProc(frame.getResourceId()); - VirtualProc proc = null; + // Don't mess with localDispatch procs + if (proc.isLocalDispatch) { + return false; + } + + logger.info("Killing frame on " + frame.getJobName() + "." + frame.getFrameName() + + ", using too much memory."); + return killProcForMemory(proc, hostname, KillCause.FrameOverboard); + } catch (EmptyResultDataAccessException e) { + return false; + } + } + + private boolean getKillClearance(String hostname, String frameId) { + String cacheKey = hostname + "-" + frameId; + final int FRAME_KILL_RETRY_LIMIT = + env.getRequiredProperty("dispatcher.frame_kill_retry_limit", Integer.class); + + // Cache frame+host receiving a killRequest and count how many times the request is being retried + // meaning rqd is probably failing at attempting to kill the related proc + long cachedCount; + try { + cachedCount = 1 + killRequestCounterCache.get(cacheKey, () -> 0L); + } catch (ExecutionException e) { + return false; + } + killRequestCounterCache.put(cacheKey, cachedCount); + if (cachedCount > FRAME_KILL_RETRY_LIMIT) { + FrameInterface frame = jobManager.getFrame(frameId); + JobInterface job = jobManager.getJob(frame.getJobId()); + + logger.warn("KillRequest blocked for " + job.getName() + "." + frame.getName() + + " blocked for host " + hostname + ". The kill retry limit has been reached."); + return false; + } + return true; + } + + private boolean killProcForMemory(VirtualProc proc, String hostname, KillCause killCause) { + if (!getKillClearance(hostname, proc.frameId)) { + return false; + } + + FrameInterface frame = jobManager.getFrame(proc.frameId); + if (dispatcher.isTestMode()) { + // Different threads don't share the same database state on the test environment + (new DispatchRqdKillFrameMemory(hostname, frame, killCause.toString(), rqdClient, + dispatchSupport, dispatcher.isTestMode())).run(); + } else { try { - proc = hostManager.getVirtualProc(f.getResourceId()); + killQueue.execute(new DispatchRqdKillFrameMemory(hostname, frame, killCause.toString(), rqdClient, + dispatchSupport, dispatcher.isTestMode())); + } catch (TaskRejectedException e) { + logger.warn("Unable to add a DispatchRqdKillFrame request, task rejected, " + e); + return false; + } + } + DispatchSupport.killedOffenderProcs.incrementAndGet(); + return true; + } - // TODO: handle memory management for local dispatches - // Skip local dispatches for now. - if (proc.isLocalDispatch) { - continue; - } + private boolean killFrame(String frameId, String hostname, KillCause killCause) { + if (!getKillClearance(hostname, frameId)) { + return false; + } + if (dispatcher.isTestMode()) { + // Different threads don't share the same database state on the test environment + (new DispatchRqdKillFrame(hostname, frameId, killCause.toString(), rqdClient)).run(); + } else { + try { + killQueue.execute(new DispatchRqdKillFrame(hostname, + frameId, + killCause.toString(), + rqdClient)); + } catch (TaskRejectedException e) { + logger.warn("Unable to add a DispatchRqdKillFrame request, task rejected, " + e); + } + } + DispatchSupport.killedOffenderProcs.incrementAndGet(); + return true; + } - if (f.getRss() > host.memory) { - try{ - logger.info("Killing frame " + f.getJobName() + "/" + f.getFrameName() + ", " - + proc.getName() + " was OOM"); - try { - killQueue.execute(new DispatchRqdKillFrame(proc, "The frame required " + - CueUtil.KbToMb(f.getRss()) + "MB but the machine only has " + - CueUtil.KbToMb(host.memory), rqdClient)); - } catch (TaskRejectedException e) { - logger.warn("Unable to queue RQD kill, task rejected, " + e); - } - DispatchSupport.killedOomProcs.incrementAndGet(); - } catch (Exception e) { - logger.info("failed to kill frame on " + proc.getName() + - "," + e); - } - } + /** + * Kill proc with the worst user/reserved memory ratio. + * + * @param host + * @return killed proc, or null if none could be found or failed to be killed + */ + private VirtualProc killWorstMemoryOffender(final DispatchHost host) { + try { + VirtualProc proc = hostManager.getWorstMemoryOffender(host); + logger.info("Killing frame on " + proc.getName() + ", host is under stress."); - if (dispatchSupport.increaseReservedMemory(proc, f.getRss())) { - proc.memoryReserved = f.getRss(); - logger.info("frame " + f.getFrameName() + " on job " + f.getJobName() - + " increased its reserved memory to " + - CueUtil.KbToMb(f.getRss())); - } + if (!killProcForMemory(proc, host.getName(), KillCause.HostUnderOom)) { + proc = null; + } + return proc; + } + catch (EmptyResultDataAccessException e) { + logger.error(host.name + " is under OOM and no proc is running on it."); + return null; + } + } + + /** + * Check frame memory usage comparing the amount used with the amount it had reserved + * @param frame + * @return + */ + private boolean isFrameOverboard(final RunningFrameInfo frame) { + final double OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD = + env.getRequiredProperty("dispatcher.oom_frame_overboard_allowed_threshold", Double.class); - } catch (ResourceReservationFailureException e) { + if (OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD < 0) { + return false; + } + + double rss = (double)frame.getRss(); + double maxRss = (double)frame.getMaxRss(); + final double MAX_RSS_OVERBOARD_THRESHOLD = OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD * 2; + final double RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER = 0.1; + + try { + VirtualProc proc = hostManager.getVirtualProc(frame.getResourceId()); + double reserved = (double)proc.memoryReserved; + + // Last memory report is higher than the threshold + if (isOverboard(rss, reserved, OOM_FRAME_OVERBOARD_ALLOWED_THRESHOLD)) { + return true; + } + // If rss is not overboard, handle the situation where the frame might be going overboard from + // time to time but the last report wasn't during a spike. For this case, consider a combination + // of rss and maxRss. maxRss > 2 * threshold and rss > 0.9 + else { + return isOverboard(maxRss, reserved, MAX_RSS_OVERBOARD_THRESHOLD) && + isOverboard(rss, reserved, -RSS_AVAILABLE_FOR_MAX_RSS_TRIGGER); + } + } catch (EmptyResultDataAccessException e) { + logger.info("HostReportHandler(isFrameOverboard): Virtual proc for frame " + + frame.getFrameName() + " on job " + frame.getJobName() + " doesn't exist on the database"); + // Not able to mark the frame overboard is it couldn't be found on the db. + // Proc accounting (verifyRunningProc) should take care of it + return false; + } + } + + private boolean isOverboard(double value, double total, double threshold) { + return value/total >= (1 + threshold); + } - long memNeeded = f.getRss() - proc.memoryReserved; + /** + * Handle memory reservations for the given frame + * + * @param frame + */ + private void handleMemoryReservations(final RunningFrameInfo frame) { + VirtualProc proc = null; + try { + proc = hostManager.getVirtualProc(frame.getResourceId()); - logger.info("frame " + f.getFrameName() + " on job " + f.getJobName() + if (proc.isLocalDispatch) { + return; + } + + if (dispatchSupport.increaseReservedMemory(proc, frame.getRss())) { + proc.memoryReserved = frame.getRss(); + logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() + + " increased its reserved memory to " + + CueUtil.KbToMb(frame.getRss())); + } + } catch (ResourceReservationFailureException e) { + if (proc != null) { + long memNeeded = frame.getRss() - proc.memoryReserved; + logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() + "was unable to reserve an additional " + CueUtil.KbToMb(memNeeded) + "on proc " + proc.getName() + ", " + e); - try { if (dispatchSupport.balanceReservedMemory(proc, memNeeded)) { - proc.memoryReserved = f.getRss(); + proc.memoryReserved = frame.getRss(); logger.info("was able to balance host: " + proc.getName()); - } - else { + } else { logger.info("failed to balance host: " + proc.getName()); } } catch (Exception ex) { logger.warn("failed to balance host: " + proc.getName() + ", " + e); } - } catch (EmptyResultDataAccessException e) { - logger.info("HostReportHandler: frame " + f.getFrameName() + - " on job " + f.getJobName() + - " was unable be processed" + - " because the proc could not be found"); + } else { + logger.info("frame " + frame.getFrameName() + " on job " + frame.getJobName() + + "was unable to reserve an additional memory. Proc could not be found"); } - } - - if (killMode) { - VirtualProc proc; - try { - proc = hostManager.getWorstMemoryOffender(host); - } - catch (EmptyResultDataAccessException e) { - logger.info(host.name + " is swapping and no proc is running on it."); - return; - } - - logger.info("Killing frame on " + - proc.getName() + ", host is distressed."); - - DispatchSupport.killedOffenderProcs.incrementAndGet(); - jobManagerSupport.kill(proc, new Source( - "The host was dangerously low on memory and swapping.")); + } catch (EmptyResultDataAccessException e) { + logger.info("HostReportHandler: Memory reservations for frame " + frame.getFrameName() + + " on job " + frame.getJobName() + " proc could not be found"); } } @@ -523,7 +749,6 @@ private void handleMemoryReservations(final DispatchHost host, final HostReport * @param rFrames */ private void killTimedOutFrames(HostReport report) { - final Map layers = new HashMap(5); for (RunningFrameInfo frame: report.getFramesList()) { @@ -531,70 +756,38 @@ private void killTimedOutFrames(HostReport report) { LayerDetail layer = layerDao.getLayerDetail(layerId); long runtimeMinutes = ((System.currentTimeMillis() - frame.getStartTime()) / 1000l) / 60; - if (layer.timeout != 0 && runtimeMinutes > layer.timeout){ - try { - killQueue.execute(new DispatchRqdKillFrame(report.getHost().getName(), - frame.getFrameId(), - "This frame has reached it timeout.", - rqdClient)); - } catch (TaskRejectedException e) { - logger.warn("Unable to queue RQD kill, task rejected, " + e); - } - } - - if (layer.timeout_llu == 0){ - continue; - } + String hostname = report.getHost().getName(); - if (frame.getLluTime() == 0){ - continue; - } - - long r = System.currentTimeMillis() / 1000; - long lastUpdate = (r - frame.getLluTime()) / 60; + if (layer.timeout != 0 && runtimeMinutes > layer.timeout){ + killFrame(frame.getFrameId(), hostname, KillCause.FrameTimedOut); + } else if (layer.timeout_llu != 0 && frame.getLluTime() != 0) { + long r = System.currentTimeMillis() / 1000; + long lastUpdate = (r - frame.getLluTime()) / 60; - if (layer.timeout_llu != 0 && lastUpdate > (layer.timeout_llu -1)){ - try { - killQueue.execute(new DispatchRqdKillFrame(report.getHost().getName(), - frame.getFrameId(), - "This frame has reached it LLU timeout.", - rqdClient)); - } catch (TaskRejectedException e) { - logger.warn("Unable to queue RQD kill, task rejected, " + e); + if (layer.timeout_llu != 0 && lastUpdate > (layer.timeout_llu - 1)){ + killFrame(frame.getFrameId(), hostname, KillCause.FrameLluTimedOut); } } } } /** - * Update IO usage for the given list of frames. - * - * @param rFrames - */ - private void updateFrameUsage(List rFrames) { - - for (RunningFrameInfo rf: rFrames) { - FrameInterface frame = jobManager.getFrame(rf.getFrameId()); - dispatchSupport.updateFrameUsage(frame, rf.getLluTime()); - } - } - - /** - * Update memory usage for the given list of frames. + * Update memory usage and LLU time for the given list of frames. * * @param rFrames */ - private void updateMemoryUsage(List rFrames) { + private void updateMemoryUsageAndLluTime(List rFrames) { for (RunningFrameInfo rf: rFrames) { FrameInterface frame = jobManager.getFrame(rf.getFrameId()); - dispatchSupport.updateFrameMemoryUsage(frame, - rf.getRss(), rf.getMaxRss()); + dispatchSupport.updateFrameMemoryUsageAndLluTime(frame, + rf.getRss(), rf.getMaxRss(), rf.getLluTime()); - dispatchSupport.updateProcMemoryUsage(frame, - rf.getRss(), rf.getMaxRss(), rf.getVsize(), rf.getMaxVsize()); + dispatchSupport.updateProcMemoryUsage(frame, rf.getRss(), rf.getMaxRss(), + rf.getVsize(), rf.getMaxVsize(), rf.getUsedGpuMemory(), + rf.getMaxUsedGpuMemory(), rf.getChildren().toByteArray()); } updateJobMemoryUsage(rFrames); @@ -698,98 +891,59 @@ public void verifyRunningFrameInfo(HostReport report) { continue; } + if (hostManager.verifyRunningProc(runningFrame.getResourceId(), runningFrame.getFrameId())) { + runningFrames.add(runningFrame); + continue; + } - if (!hostManager.verifyRunningProc(runningFrame.getResourceId(), - runningFrame.getFrameId())) { - - /* - * The frame this proc is running is no longer - * assigned to this proc. Don't ever touch - * the frame record. If we make it here that means - * the proc has been running for over 2 min. - */ - - String msg; - VirtualProc proc = null; + /* + * The frame this proc is running is no longer + * assigned to this proc. Don't ever touch + * the frame record. If we make it here that means + * the proc has been running for over 2 min. + */ + String msg; + VirtualProc proc = null; - try { - proc = hostManager.getVirtualProc(runningFrame.getResourceId()); - msg = "Virutal proc " + proc.getProcId() + + try { + proc = hostManager.getVirtualProc(runningFrame.getResourceId()); + msg = "Virtual proc " + proc.getProcId() + "is assigned to " + proc.getFrameId() + " not " + runningFrame.getFrameId(); - } - catch (Exception e) { - /* - * This will happen if the host goes off line and then - * comes back. In this case, we don't touch the frame - * since it might already be running somewhere else. We - * do however kill the proc. - */ - msg = "Virtual proc did not exist."; - } - - logger.info("warning, the proc " + - runningFrame.getResourceId() + " on host " + - report.getHost().getName() + " was running for " + - (runtimeSeconds / 60.0f) + " minutes " + - runningFrame.getJobName() + "/" + runningFrame.getFrameName() + - "but the DB did not " + - "reflect this " + - msg); - - DispatchSupport.accountingErrors.incrementAndGet(); - - try { - /* - * If the proc did exist unbook it if we can't - * verify its running something. - */ - boolean rqd_kill = false; - if (proc != null) { - - /* - * Check to see if the proc is an orphan. - */ - if (hostManager.isOprhan(proc)) { - dispatchSupport.clearVirtualProcAssignement(proc); - dispatchSupport.unbookProc(proc); - rqd_kill = true; - } - } - else { - /* Proc doesn't exist so a kill won't hurt */ - rqd_kill = true; - } - - if (rqd_kill) { - try { - killQueue.execute(new DispatchRqdKillFrame(report.getHost().getName(), - runningFrame.getFrameId(), - "OpenCue could not verify this frame.", - rqdClient)); - } catch (TaskRejectedException e) { - logger.warn("Unable to queue RQD kill, task rejected, " + e); - } - } + } + catch (Exception e) { + /* + * This will happen if the host goes offline and then + * comes back. In this case, we don't touch the frame + * since it might already be running somewhere else. We + * do however kill the proc. + */ + msg = "Virtual proc did not exist."; + } - } catch (RqdClientException rqde) { - logger.warn("failed to kill " + - runningFrame.getJobName() + "/" + - runningFrame.getFrameName() + - " when trying to clear a failed " + - " frame verification, " + rqde); - - } catch (Exception e) { - CueExceptionUtil.logStackTrace("failed", e); - logger.warn("failed to verify " + - runningFrame.getJobName() + "/" + - runningFrame.getFrameName() + - " was running but the frame was " + - " unable to be killed, " + e); - } + DispatchSupport.accountingErrors.incrementAndGet(); + if (proc != null && hostManager.isOprhan(proc)) { + dispatchSupport.clearVirtualProcAssignement(proc); + dispatchSupport.unbookProc(proc); + proc = null; } - else { - runningFrames.add(runningFrame); + if (proc == null) { + if (killFrame(runningFrame.getFrameId(), + report.getHost().getName(), + KillCause.FrameVerificationFailure)) { + logger.info("FrameVerificationError, the proc " + + runningFrame.getResourceId() + " on host " + + report.getHost().getName() + " was running for " + + (runtimeSeconds / 60.0f) + " minutes " + + runningFrame.getJobName() + "/" + runningFrame.getFrameName() + + "but the DB did not " + + "reflect this. " + + msg); + } else { + logger.warn("FrameStuckWarning: frameId=" + runningFrame.getFrameId() + + " render_node=" + report.getHost().getName() + " - " + + runningFrame.getJobName() + "/" + runningFrame.getFrameName()); + } } } } @@ -850,14 +1004,6 @@ public void setJobManager(JobManager jobManager) { this.jobManager = jobManager; } - public JobManagerSupport getJobManagerSupport() { - return jobManagerSupport; - } - - public void setJobManagerSupport(JobManagerSupport jobManagerSupport) { - this.jobManagerSupport = jobManagerSupport; - } - public JobDao getJobDao() { return jobDao; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java index 34e8d13e6..6326086ae 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/HostReportQueue.java @@ -19,47 +19,104 @@ package com.imageworks.spcue.dispatcher; +import java.lang.ref.WeakReference; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.log4j.Logger; +import com.imageworks.spcue.grpc.report.HostReport; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.imageworks.spcue.dispatcher.commands.DispatchHandleHostReport; +import com.imageworks.spcue.util.CueUtil; public class HostReportQueue extends ThreadPoolExecutor { - private static final Logger logger = Logger.getLogger(HostReportQueue.class); - - private static final int THREAD_POOL_SIZE_INITIAL = 6; - private static final int THREAD_POOL_SIZE_MAX = 8; - private static final int QUEUE_SIZE_INITIAL = 1000; - + private static final Logger logger = LogManager.getLogger(HostReportQueue.class); private QueueRejectCounter rejectCounter = new QueueRejectCounter(); private AtomicBoolean isShutdown = new AtomicBoolean(false); + private int queueCapacity; + + private Cache hostMap = CacheBuilder.newBuilder() + .expireAfterWrite(1, TimeUnit.HOURS) + .build(); + + /** + * Wrapper around protobuf object HostReport to add reportTi + */ + private class HostReportWrapper{ + private final HostReport hostReport; + private final WeakReference reportTaskRef; + public long taskTime = System.currentTimeMillis(); + + public HostReportWrapper(HostReport hostReport, DispatchHandleHostReport reportTask) { + this.hostReport = hostReport; + this.reportTaskRef = new WeakReference<>(reportTask); + } - public HostReportQueue() { - super(THREAD_POOL_SIZE_INITIAL, THREAD_POOL_SIZE_MAX, 10 , TimeUnit.SECONDS, - new LinkedBlockingQueue(QUEUE_SIZE_INITIAL)); - this.setRejectedExecutionHandler(rejectCounter); + public HostReport getHostReport() { + return hostReport; + } + + public DispatchHandleHostReport getReportTask() { + return reportTaskRef.get(); + } + + public long getTaskTime() { + return taskTime; + } + } + public HostReportQueue(int threadPoolSizeInitial, int threadPoolSizeMax, int queueSize) { + super(threadPoolSizeInitial, threadPoolSizeMax, 10 , TimeUnit.SECONDS, + new LinkedBlockingQueue(queueSize)); + this.setRejectedExecutionHandler(rejectCounter); } - public void execute(DispatchHandleHostReport r) { + public void execute(DispatchHandleHostReport newReport) { if (isShutdown.get()) { return; } - if (getQueue().contains(r)) { - getQueue().remove(r); + HostReportWrapper oldWrappedReport = hostMap.getIfPresent(newReport.getKey()); + // If hostReport exists on the cache and there's also a task waiting to be executed + // replace the old report by the new on, but refrain from creating another task + if (oldWrappedReport != null) { + DispatchHandleHostReport oldReport = oldWrappedReport.getReportTask(); + if(oldReport != null) { + // Replace report, but keep the reference of the existing task + hostMap.put(newReport.getKey(), + new HostReportWrapper(newReport.getHostReport(), oldReport)); + return; + } + } + hostMap.put(newReport.getKey(), + new HostReportWrapper(newReport.getHostReport(), newReport)); + super.execute(newReport); + } + + public HostReport removePendingHostReport(String key) { + if (key != null) { + HostReportWrapper r = hostMap.getIfPresent(key); + if (r != null) { + hostMap.asMap().remove(key, r); + return r.getHostReport(); + } } - super.execute(r); + return null; } public long getRejectedTaskCount() { return rejectCounter.getRejectCount(); } + public int getQueueCapacity() { + return queueCapacity; + } + public void shutdown() { if (!isShutdown.getAndSet(true)) { logger.info("Shutting down report pool, currently " + this.getActiveCount() + " active threads."); @@ -79,5 +136,9 @@ public void shutdown() { } } } + + public boolean isHealthy() { + return getQueue().remainingCapacity() > 0; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java index 55497b83e..288965a04 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/LocalDispatcher.java @@ -22,7 +22,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.dao.EmptyResultDataAccessException; import com.imageworks.spcue.DispatchFrame; @@ -42,7 +43,7 @@ public class LocalDispatcher extends AbstractDispatcher implements Dispatcher { private static final Logger logger = - Logger.getLogger(LocalDispatcher.class); + LogManager.getLogger(LocalDispatcher.class); private BookingManager bookingManager; private JobManager jobManager; @@ -111,7 +112,8 @@ private List dispatchHost(DispatchHost host, JobInterface job, */ if (!lha.hasAdditionalResources(lha.getThreads() * 100, frame.minMemory, - frame.minGpu)) { + frame.minGpus, + frame.minGpuMemory)) { continue; } @@ -141,10 +143,11 @@ private List dispatchHost(DispatchHost host, JobInterface job, * This should stay here and not go into VirtualProc * or else the count will be off if you fail to book. */ - lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpuReserved); + lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved); if (!lha.hasAdditionalResources(lha.getThreads() * 100, Dispatcher.MEM_RESERVED_MIN, - Dispatcher.GPU_RESERVED_MIN)) { + Dispatcher.GPU_UNITS_RESERVED_MIN, + Dispatcher.MEM_GPU_RESERVED_MIN)) { break; } @@ -196,7 +199,8 @@ private List dispatchHost(DispatchHost host, LayerInterface layer, */ if (!lha.hasAdditionalResources(lha.getThreads() * 100, frame.minMemory, - frame.minGpu)) { + frame.minGpus, + frame.minGpuMemory)) { continue; } @@ -226,10 +230,11 @@ private List dispatchHost(DispatchHost host, LayerInterface layer, * This should stay here and not go into VirtualProc * or else the count will be off if you fail to book. */ - lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpuReserved); + lha.useResources(proc.coresReserved, proc.memoryReserved, proc.gpusReserved, proc.gpuMemoryReserved); if (!lha.hasAdditionalResources(100, Dispatcher.MEM_RESERVED_MIN, - Dispatcher.GPU_RESERVED_MIN)) { + Dispatcher.GPU_UNITS_RESERVED_MIN, + Dispatcher.MEM_GPU_RESERVED_MIN)) { break; } @@ -272,7 +277,8 @@ private List dispatchHost(DispatchHost host, FrameInterface frame, DispatchFrame dframe = jobManager.getDispatchFrame(frame.getId()); if (!lha.hasAdditionalResources(lha.getMaxCoreUnits(), dframe.minMemory, - dframe.minGpu)) { + lha.getMaxGpuUnits(), + dframe.minGpuMemory)) { return procs; } @@ -382,7 +388,8 @@ private void prepHost(DispatchHost host, LocalHostAssignment lha) { host.isLocalDispatch = true; host.idleCores = lha.getIdleCoreUnits(); host.idleMemory = lha.getIdleMemory(); - host.idleGpu = lha.getIdleGpu(); + host.idleGpus = lha.getIdleGpuUnits(); + host.idleGpuMemory = lha.getIdleGpuMemory(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java index a9e0809f9..bb3d00716 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/QueueRejectCounter.java @@ -36,5 +36,9 @@ public long getRejectCount() { return rejectCounter.get(); } + public void clear() { + rejectCounter.set(0); + } + } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java index a3519f10e..e665345cd 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/RedirectManager.java @@ -21,7 +21,8 @@ import java.util.ArrayList; import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.imageworks.spcue.DispatchHost; import com.imageworks.spcue.GroupInterface; @@ -46,7 +47,7 @@ public class RedirectManager { - private static final Logger logger = Logger.getLogger(RedirectManager.class); + private static final Logger logger = LogManager.getLogger(RedirectManager.class); private JobDao jobDao; private ProcDao procDao; @@ -258,6 +259,8 @@ public boolean addRedirect(VirtualProc proc, GroupInterface group, DispatchHost host = hostManager.getDispatchHost(proc.getHostId()); host.idleCores = proc.coresReserved; host.idleMemory = proc.memoryReserved; + host.idleGpus = proc.gpusReserved; + host.idleGpuMemory = proc.gpuMemoryReserved; if (dispatchSupport.findDispatchJobs(host, group).size() < 1) { logger.info("Failed to find a pending job in group: " + group.getName()); diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java index c829eb390..0d1141bc1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ResourceContainer.java @@ -27,19 +27,21 @@ public interface ResourceContainer { * * @param minCores * @param minMemory - * @param minGpu + * @param minGpus + * @param minGpuMemory * @return */ - public boolean hasAdditionalResources(int minCores, long minMemory, long minGpu); + public boolean hasAdditionalResources(int minCores, long minMemory, int minGpus, long minGpuMemory); /** * Subtract the given resources from the grand totals. * * @param coreUnits * @param memory - * @param gpu + * @param gpuUnits + * @param gpuMemory */ - public void useResources(int coreUnits, long memory, long gpu); + public void useResources(int coreUnits, long memory, int gpuUnits, long gpuMemory); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ThreadPoolTaskExecutorWrapper.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ThreadPoolTaskExecutorWrapper.java new file mode 100644 index 000000000..0090d3619 --- /dev/null +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/ThreadPoolTaskExecutorWrapper.java @@ -0,0 +1,64 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.imageworks.spcue.dispatcher; + +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; + +import com.imageworks.spcue.util.CueUtil; + +/** + * Wrapper class of spring ThreadPoolTaskExecutor to initialize with the thread pool properties. + */ +public class ThreadPoolTaskExecutorWrapper extends ThreadPoolTaskExecutor { + + private static final Logger logger = LogManager.getLogger(ThreadPoolTaskExecutorWrapper.class); + private static final long serialVersionUID = -2977068663355369141L; + + private int queueCapacity; + + private ThreadPoolTaskExecutorWrapper(String name, int corePoolSize, int maxPoolSize, + int queueCapacity) { + super(); + this.setMaxPoolSize(maxPoolSize); + this.setCorePoolSize(corePoolSize); + this.setQueueCapacity(queueCapacity); + this.queueCapacity = queueCapacity; + logger.info(name + + " core:" + getCorePoolSize() + + " max:" + getMaxPoolSize() + + " capacity:" + queueCapacity); + } + + @Autowired + public ThreadPoolTaskExecutorWrapper(Environment env, String name, String propertyKeyPrefix) { + this(name, + CueUtil.getIntProperty(env, propertyKeyPrefix, "core_pool_size"), + CueUtil.getIntProperty(env, propertyKeyPrefix, "max_pool_size"), + CueUtil.getIntProperty(env, propertyKeyPrefix, "queue_capacity")); + } + + public int getQueueCapacity() { + return queueCapacity; + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java index 6ddaa9a3b..64329fc0f 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHost.java @@ -19,18 +19,26 @@ package com.imageworks.spcue.dispatcher.commands; +import java.util.List; +import java.util.ArrayList; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + import com.imageworks.spcue.DispatchHost; import com.imageworks.spcue.GroupInterface; import com.imageworks.spcue.JobInterface; import com.imageworks.spcue.ShowInterface; import com.imageworks.spcue.dispatcher.Dispatcher; +import com.imageworks.spcue.VirtualProc; /** * A command for booking a host. * * @category command */ -public class DispatchBookHost implements Runnable { +public class DispatchBookHost extends KeyRunnable { + private static final Logger logger = + LogManager.getLogger(DispatchBookHost.class); private ShowInterface show = null; private GroupInterface group = null; @@ -39,27 +47,32 @@ public class DispatchBookHost implements Runnable { private Dispatcher dispatcher; public DispatchHost getDispatchHost() { + this.setKey(host.getId()); return host; } public DispatchBookHost(DispatchHost host, Dispatcher d) { + super(host.getId()); this.host = host; this.dispatcher = d; } public DispatchBookHost(DispatchHost host, JobInterface job, Dispatcher d) { + super(host.getId() + "_job_" + job.getJobId()); this.host = host; this.job = job; this.dispatcher = d; } public DispatchBookHost(DispatchHost host, GroupInterface group, Dispatcher d) { + super(host.getId() + "_group_" + group.getGroupId()); this.host = host; this.group = group; this.dispatcher = d; } public DispatchBookHost(DispatchHost host, ShowInterface show, Dispatcher d) { + super(host.getId() + "_name_" + show.getName()); this.host = host; this.show = show; this.dispatcher = d; @@ -82,14 +95,16 @@ else if (job != null) { if (host.hasAdditionalResources( Dispatcher.CORE_POINTS_RESERVED_MIN, Dispatcher.MEM_RESERVED_MIN, - Dispatcher.GPU_RESERVED_MIN)) { + Dispatcher.GPU_UNITS_RESERVED_MIN, + Dispatcher.MEM_GPU_RESERVED_MIN)) { dispatcher.dispatchHost(host); } if (host.hasAdditionalResources( Dispatcher.CORE_POINTS_RESERVED_MIN, Dispatcher.MEM_RESERVED_MIN, - Dispatcher.GPU_RESERVED_MIN)) { + Dispatcher.GPU_UNITS_RESERVED_MIN, + Dispatcher.MEM_GPU_RESERVED_MIN)) { dispatcher.dispatchHostToAllShows(host); } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java index 3a742504c..737541a08 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchBookHostLocal.java @@ -22,12 +22,14 @@ import com.imageworks.spcue.DispatchHost; import com.imageworks.spcue.dispatcher.Dispatcher; -public class DispatchBookHostLocal implements Runnable { +public class DispatchBookHostLocal extends KeyRunnable { private DispatchHost host; private Dispatcher dispatcher; public DispatchBookHostLocal(DispatchHost host, Dispatcher d) { + super(host.getId()); + this.host = host; this.dispatcher = d; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java index ca6e4b8c2..cf6428f3a 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchDropDepends.java @@ -19,6 +19,7 @@ package com.imageworks.spcue.dispatcher.commands; + import com.imageworks.spcue.FrameInterface; import com.imageworks.spcue.JobInterface; import com.imageworks.spcue.LayerInterface; @@ -31,7 +32,7 @@ * * @category command */ -public class DispatchDropDepends implements Runnable { +public class DispatchDropDepends extends KeyRunnable { JobInterface job; LayerInterface layer; @@ -41,18 +42,21 @@ public class DispatchDropDepends implements Runnable { DependManager dependManager; public DispatchDropDepends(JobInterface job, DependTarget target, DependManager dependManager) { + super("disp_drop_dep_job_" + job.getJobId() + "_" + target.toString()); this.job = job; this.target = target; this.dependManager = dependManager; } public DispatchDropDepends(LayerInterface layer, DependTarget target, DependManager dependManager) { + super("disp_drop_dep_layer_" + layer.getLayerId() + "_" + target.toString()); this.layer = layer; this.target = target; this.dependManager = dependManager; } public DispatchDropDepends(FrameInterface frame, DependTarget target, DependManager dependManager) { + super("disp_drop_dep_frame_" + frame.getFrameId() + "_" + target.toString()); this.frame = frame; this.target = target; this.dependManager = dependManager; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java index 34292db49..32a2acf69 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchEatFrames.java @@ -23,18 +23,20 @@ import com.imageworks.spcue.dao.criteria.FrameSearchInterface; import com.imageworks.spcue.service.JobManagerSupport; + /** * A command for eating an array of frames * * @category command */ -public class DispatchEatFrames implements Runnable { +public class DispatchEatFrames extends KeyRunnable { private FrameSearchInterface search; private Source source; private JobManagerSupport jobManagerSupport; public DispatchEatFrames(FrameSearchInterface search, Source source, JobManagerSupport jobManagerSupport) { + super("disp_eat_frames_job_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); this.search = search; this.source = source; this.jobManagerSupport = jobManagerSupport; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java index c8f41131d..1a18f06ad 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchHandleHostReport.java @@ -28,7 +28,7 @@ * * @category command */ -public class DispatchHandleHostReport implements Runnable { +public class DispatchHandleHostReport extends KeyRunnable { private HostReport hostReport; private boolean isBootReport; @@ -36,12 +36,16 @@ public class DispatchHandleHostReport implements Runnable { public volatile int reportTime = (int) (System.currentTimeMillis() / 1000); public DispatchHandleHostReport(HostReport report, HostReportHandler rqdReportManager) { + super("disp_handle_host_report_" + report.hashCode() + + "_" + rqdReportManager.hashCode()); this.hostReport = report; this.isBootReport = false; this.hostReportHandler = rqdReportManager; } public DispatchHandleHostReport(BootReport report, HostReportHandler rqdReportManager) { + super("disp_handle_host_report_" + report.hashCode() + + "_" + rqdReportManager.hashCode()); HostReport hostReport = HostReport.newBuilder() .setHost(report.getHost()) .setCoreInfo(report.getCoreInfo()) diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java index 5cb0a0da7..1910321a8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchJobComplete.java @@ -28,13 +28,15 @@ * * @category command */ -public class DispatchJobComplete implements Runnable { +public class DispatchJobComplete extends KeyRunnable { private JobInterface job; private Source source; private boolean isManualKill; private JobManagerSupport jobManagerSupport; - public DispatchJobComplete(JobInterface job, Source source, boolean isManualKill, JobManagerSupport jobManagerSupport) { + public DispatchJobComplete(JobInterface job, Source source, boolean isManualKill, + JobManagerSupport jobManagerSupport) { + super("disp_job_complete_" + job.getJobId() + "_" + source.toString()); this.job = job; this.source = source; this.isManualKill = isManualKill; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java index 73ce9d97c..986d6bd05 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillFrames.java @@ -28,13 +28,14 @@ * * @category command */ -public class DispatchKillFrames implements Runnable { +public class DispatchKillFrames extends KeyRunnable { private FrameSearchInterface search; private JobManagerSupport jobManagerSupport; private Source source; public DispatchKillFrames(FrameSearchInterface search, Source source, JobManagerSupport jobManagerSupport) { + super("disp_kill_frames_" + source.toString() + "_" + jobManagerSupport.hashCode()); this.search = search; this.source = source; this.jobManagerSupport = jobManagerSupport; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java index 7c6464eff..d97966139 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchKillProcs.java @@ -19,18 +19,19 @@ package com.imageworks.spcue.dispatcher.commands; -import java.util.Collection; - import com.imageworks.spcue.Source; import com.imageworks.spcue.VirtualProc; import com.imageworks.spcue.service.JobManagerSupport; +import java.util.Collection; -public class DispatchKillProcs implements Runnable { +public class DispatchKillProcs extends KeyRunnable { private Collection procs; private JobManagerSupport jobManagerSupport; private Source source; public DispatchKillProcs(Collection procs, Source source, JobManagerSupport jobManagerSupport) { + super("disp_kill_procs_" + procs.hashCode() + "_" + source.toString() + + "_" + jobManagerSupport.hashCode()); this.procs = procs; this.source = source; this.jobManagerSupport = jobManagerSupport; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java index cfbcd3eb4..c3682866e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchLaunchJob.java @@ -25,12 +25,13 @@ /** * @category DispatchCommand */ -public class DispatchLaunchJob implements Runnable { +public class DispatchLaunchJob extends KeyRunnable { private JobLauncher jobLauncher; private JobSpec spec; public DispatchLaunchJob(JobSpec spec, JobLauncher jobLauncher) { + super("disp_launch_job_" + spec.getShow() + "_" + spec.getShot() + "_" + spec.getUid()); this.spec = spec; this.jobLauncher = jobLauncher; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java index b5c96740b..92ec0db29 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchMoveJobs.java @@ -26,13 +26,15 @@ import com.imageworks.spcue.JobInterface; import com.imageworks.spcue.service.GroupManager; -public class DispatchMoveJobs implements Runnable { +public class DispatchMoveJobs extends KeyRunnable { private GroupDetail group; private List jobs; private GroupManager groupManager; public DispatchMoveJobs(GroupDetail group, List jobs, GroupManager groupManager) { + super("disp_move_jobs_" + group.getGroupId() + "_dept_" + group.getDepartmentId() + + "_show_" + group.getShowId()); this.group = group; this.jobs = jobs; this.groupManager = groupManager; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java index c563a34c0..7e12eb90c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchNextFrame.java @@ -28,13 +28,14 @@ * * @category command */ -public class DispatchNextFrame implements Runnable { +public class DispatchNextFrame extends KeyRunnable { private VirtualProc proc; private DispatchJob job; private Dispatcher dispatcher; public DispatchNextFrame(DispatchJob j, VirtualProc p, Dispatcher d) { + super("disp_next_frame_" + j.getJobId() + "_" + p.getProcId()); this.job = j; this.proc = p; this.dispatcher = d; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java index b17710f51..528474929 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchReorderFrames.java @@ -25,7 +25,7 @@ import com.imageworks.spcue.service.JobManagerSupport; import com.imageworks.spcue.util.FrameSet; -public class DispatchReorderFrames implements Runnable { +public class DispatchReorderFrames extends KeyRunnable { private JobInterface job = null; private LayerInterface layer = null; @@ -33,14 +33,20 @@ public class DispatchReorderFrames implements Runnable { private Order order; private JobManagerSupport jobManagerSupport; - public DispatchReorderFrames(JobInterface job, FrameSet frameSet, Order order, JobManagerSupport jobManagerSupport) { + public DispatchReorderFrames(JobInterface job, FrameSet frameSet, Order order, + JobManagerSupport jobManagerSupport) { + super("disp_reorder_frames_job_" + job.getJobId() + + "_" + jobManagerSupport.toString()); this.job = job; this.frameSet = frameSet; this.order = order; this.jobManagerSupport = jobManagerSupport; } - public DispatchReorderFrames(LayerInterface layer, FrameSet frameSet, Order order, JobManagerSupport jobManagerSupport) { + public DispatchReorderFrames(LayerInterface layer, FrameSet frameSet, Order order, + JobManagerSupport jobManagerSupport) { + super("disp_reorder_frames_layer_" + layer.getLayerId() + + "_" + jobManagerSupport.toString()); this.layer = layer; this.frameSet = frameSet; this.order = order; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java index 5d9037032..8546423dd 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRetryFrames.java @@ -28,7 +28,7 @@ * * @category command */ -public class DispatchRetryFrames implements Runnable { +public class DispatchRetryFrames extends KeyRunnable { private FrameSearchInterface search; private Source source; @@ -36,6 +36,7 @@ public class DispatchRetryFrames implements Runnable { public DispatchRetryFrames(FrameSearchInterface search, Source source, JobManagerSupport jobManagerSupport) { + super("disp_retry_frames_" + search.hashCode() + "_" + source.toString()); this.search = search; this.source = source; this.jobManagerSupport = jobManagerSupport; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java index 4b7af8328..fe9bde60e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrame.java @@ -19,52 +19,40 @@ package com.imageworks.spcue.dispatcher.commands; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import com.imageworks.spcue.dispatcher.commands.KeyRunnable; import com.imageworks.spcue.VirtualProc; import com.imageworks.spcue.rqd.RqdClient; import com.imageworks.spcue.rqd.RqdClientException; -public class DispatchRqdKillFrame implements Runnable { +public class DispatchRqdKillFrame extends KeyRunnable { - private static final Logger logger = Logger.getLogger(DispatchRqdKillFrame.class); + private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrame.class); - private VirtualProc proc = null; private String message; - private String hostname; private String frameId; private final RqdClient rqdClient; public DispatchRqdKillFrame(String hostname, String frameId, String message, RqdClient rqdClient) { + super("disp_rqd_kill_frame_" + hostname + "_" + frameId + "_" + rqdClient.toString()); this.hostname = hostname; this.frameId = frameId; this.message = message; this.rqdClient = rqdClient; } - public DispatchRqdKillFrame(VirtualProc proc, String message, RqdClient rqdClient) { - this.proc = proc; - this.hostname = proc.hostName; - this.message = message; - this.rqdClient = rqdClient; - } - @Override public void run() { long startTime = System.currentTimeMillis(); try { - if (proc != null) { - rqdClient.killFrame(proc, message); - } - else { - rqdClient.killFrame(hostname, frameId, message); - } + rqdClient.killFrame(hostname, frameId, message); } catch (RqdClientException e) { logger.info("Failed to contact host " + hostname + ", " + e); - } - finally { + } finally { long elapsedTime = System.currentTimeMillis() - startTime; logger.info("RQD communication with " + hostname + " took " + elapsedTime + "ms"); diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java new file mode 100644 index 000000000..301f77479 --- /dev/null +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchRqdKillFrameMemory.java @@ -0,0 +1,78 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.imageworks.spcue.dispatcher.commands; + +import com.imageworks.spcue.FrameInterface; +import com.imageworks.spcue.VirtualProc; +import com.imageworks.spcue.dispatcher.DispatchSupport; +import com.imageworks.spcue.rqd.RqdClient; +import com.imageworks.spcue.rqd.RqdClientException; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; + +/** + * A runnable to communicate with rqd requesting for a frame to be killed due to memory issues. + *

+ * Before killing a frame, the database is updated to mark the frame status as EXIT_STATUS_MEMORY_FAILURE, + * this allows the FrameCompleteHandler to possibly retry the frame after increasing its memory requirements + */ +public class DispatchRqdKillFrameMemory extends KeyRunnable { + + private static final Logger logger = LogManager.getLogger(DispatchRqdKillFrameMemory.class); + + private String message; + private String hostname; + private DispatchSupport dispatchSupport; + private final RqdClient rqdClient; + private final boolean isTestMode; + + private FrameInterface frame; + + public DispatchRqdKillFrameMemory(String hostname, FrameInterface frame, String message, RqdClient rqdClient, + DispatchSupport dispatchSupport, boolean isTestMode) { + super("disp_rqd_kill_frame_" + frame.getFrameId() + "_" + rqdClient.toString()); + this.frame = frame; + this.hostname = hostname; + this.message = message; + this.rqdClient = rqdClient; + this.dispatchSupport = dispatchSupport; + this.isTestMode = isTestMode; + } + + @Override + public void run() { + long startTime = System.currentTimeMillis(); + try { + if (dispatchSupport.updateFrameMemoryError(frame) && !isTestMode) { + rqdClient.killFrame(hostname, frame.getFrameId(), message); + } else { + logger.warn("Could not update frame " + frame.getFrameId() + + " status to EXIT_STATUS_MEMORY_FAILURE. Canceling kill request!"); + } + } catch (RqdClientException e) { + logger.warn("Failed to contact host " + hostname + ", " + e); + } finally { + long elapsedTime = System.currentTimeMillis() - startTime; + logger.info("RQD communication with " + hostname + + " took " + elapsedTime + "ms"); + } + } +} + diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java index 7016e526f..5294a203c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchSatisfyDepends.java @@ -30,7 +30,7 @@ * * @category command */ -public class DispatchSatisfyDepends implements Runnable { +public class DispatchSatisfyDepends extends KeyRunnable { private JobInterface job = null; private LayerInterface layer = null; @@ -39,21 +39,25 @@ public class DispatchSatisfyDepends implements Runnable { private JobManagerSupport jobManagerSupport; public DispatchSatisfyDepends(JobInterface job, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + job.getJobId() + "_" + jobManagerSupport.toString()); this.job = job; this.jobManagerSupport = jobManagerSupport; } public DispatchSatisfyDepends(LayerInterface layer, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + layer.getLayerId() + "_" + jobManagerSupport.toString()); this.layer = layer; this.jobManagerSupport = jobManagerSupport; } public DispatchSatisfyDepends(FrameInterface frame, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + frame.getFrameId() + "_" + jobManagerSupport.toString()); this.frame = frame; this.jobManagerSupport = jobManagerSupport; } public DispatchSatisfyDepends(FrameSearchInterface search, JobManagerSupport jobManagerSupport) { + super("disp_sat_deps_" + search.hashCode() + "_" + jobManagerSupport.hashCode()); this.search = search; this.jobManagerSupport = jobManagerSupport; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java new file mode 100644 index 000000000..b4eb11a07 --- /dev/null +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchShutdownJobIfCompleted.java @@ -0,0 +1,52 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.imageworks.spcue.dispatcher.commands; + +import com.imageworks.spcue.JobInterface; +import com.imageworks.spcue.Source; +import com.imageworks.spcue.service.JobManagerSupport; + +/** + * A command for shutting down a job if it is completed. + * This is a workaround for when Cuebot failed to shutdown a job due to database access error. + * + * @category command + */ +public class DispatchShutdownJobIfCompleted extends KeyRunnable { + private JobInterface job; + + private JobManagerSupport jobManagerSupport; + public DispatchShutdownJobIfCompleted(JobInterface job, JobManagerSupport jobManagerSupport) { + super("disp_st_job_comp_" + job.getJobId()); + this.job = job; + this.jobManagerSupport = jobManagerSupport; + } + + public void run() { + new DispatchCommandTemplate() { + public void wrapDispatchCommand() { + if (jobManagerSupport.isJobComplete(job)) { + jobManagerSupport.shutdownJob(job, new Source("natural"), false); + } + } + }.execute(); + } +} + diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java index d40e5e185..b0430b892 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/DispatchStaggerFrames.java @@ -23,7 +23,7 @@ import com.imageworks.spcue.LayerInterface; import com.imageworks.spcue.service.JobManagerSupport; -public class DispatchStaggerFrames implements Runnable { +public class DispatchStaggerFrames extends KeyRunnable { private JobInterface job = null; private LayerInterface layer = null; @@ -32,6 +32,7 @@ public class DispatchStaggerFrames implements Runnable { private JobManagerSupport jobManagerSupport; public DispatchStaggerFrames(JobInterface job, String range, int stagger, JobManagerSupport jobManagerSupport) { + super("disp_stag_frames_" + job.getJobId() + "_" + range); this.job = job; this.range = range; this.stagger = stagger; @@ -39,6 +40,7 @@ public DispatchStaggerFrames(JobInterface job, String range, int stagger, JobMan } public DispatchStaggerFrames(LayerInterface layer, String range, int stagger, JobManagerSupport jobManagerSupport) { + super("disp_stag_frames_" + layer.getLayerId() + "_" + range); this.layer = layer; this.range = range; this.stagger = stagger; diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java new file mode 100644 index 000000000..bdbdb87da --- /dev/null +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/KeyRunnable.java @@ -0,0 +1,41 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.imageworks.spcue.dispatcher.commands; + +import com.imageworks.spcue.DispatchHost; +import com.imageworks.spcue.dispatcher.Dispatcher; + +public abstract class KeyRunnable implements Runnable { + + private String key; + + public KeyRunnable(String key) { + this.key = key; + } + + public String getKey() { + return key; + } + + public void setKey(String key) { + this.key = key; + } +} + diff --git a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java index 1dfc2ca9d..15ab1384e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java +++ b/cuebot/src/main/java/com/imageworks/spcue/dispatcher/commands/ManageReparentHosts.java @@ -22,15 +22,17 @@ import java.util.List; import com.imageworks.spcue.AllocationInterface; +import com.imageworks.spcue.dispatcher.commands.KeyRunnable; import com.imageworks.spcue.HostInterface; import com.imageworks.spcue.service.HostManager; -public class ManageReparentHosts implements Runnable { +public class ManageReparentHosts extends KeyRunnable { AllocationInterface alloc; List hosts; HostManager hostManager; public ManageReparentHosts(AllocationInterface alloc, List hosts, HostManager hostManager) { + super(alloc.getAllocationId()); this.alloc = alloc; this.hosts = hosts; this.hostManager = hostManager; diff --git a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java index 6aa6866ea..40554904b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java +++ b/cuebot/src/main/java/com/imageworks/spcue/rqd/RqdClientGrpc.java @@ -23,7 +23,8 @@ import io.grpc.ManagedChannel; import io.grpc.ManagedChannelBuilder; import io.grpc.StatusRuntimeException; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; @@ -49,25 +50,31 @@ import com.imageworks.spcue.grpc.rqd.RunningFrameStatusResponse; public final class RqdClientGrpc implements RqdClient { - private static final Logger logger = Logger.getLogger(RqdClientGrpc.class); + private static final Logger logger = LogManager.getLogger(RqdClientGrpc.class); private final int rqdCacheSize; private final int rqdCacheExpiration; + private final int rqdCacheConcurrency; private final int rqdServerPort; + private final int rqdTaskDeadlineSeconds; private LoadingCache channelCache; private boolean testMode = false; - public RqdClientGrpc(int rqdServerPort, int rqdCacheSize, int rqdCacheExpiration) { + public RqdClientGrpc(int rqdServerPort, int rqdCacheSize, int rqdCacheExpiration, + int rqdCacheConcurrency, int rqdTaskDeadline) { this.rqdServerPort = rqdServerPort; this.rqdCacheSize = rqdCacheSize; this.rqdCacheExpiration = rqdCacheExpiration; + this.rqdCacheConcurrency = rqdCacheConcurrency; + this.rqdTaskDeadlineSeconds = rqdTaskDeadline; } private void buildChannelCache() { this.channelCache = CacheBuilder.newBuilder() .maximumSize(rqdCacheSize) + .concurrencyLevel(rqdCacheConcurrency) .expireAfterAccess(rqdCacheExpiration, TimeUnit.MINUTES) .removalListener(new RemovalListener() { @Override @@ -80,8 +87,9 @@ public void onRemoval(RemovalNotification removal){ new CacheLoader() { @Override public ManagedChannel load(String host) throws Exception { - ManagedChannelBuilder channelBuilder = ManagedChannelBuilder.forAddress( - host, rqdServerPort).usePlaintext(); + ManagedChannelBuilder channelBuilder = ManagedChannelBuilder + .forAddress(host, rqdServerPort) + .usePlaintext(); return channelBuilder.build(); } }); @@ -92,7 +100,9 @@ private RqdInterfaceGrpc.RqdInterfaceBlockingStub getStub(String host) throws Ex buildChannelCache(); } ManagedChannel channel = channelCache.get(host); - return RqdInterfaceGrpc.newBlockingStub(channel); + return RqdInterfaceGrpc + .newBlockingStub(channel) + .withDeadlineAfter(rqdTaskDeadlineSeconds, TimeUnit.SECONDS); } private RunningFrameGrpc.RunningFrameBlockingStub getRunningFrameStub(String host) throws ExecutionException { @@ -100,7 +110,9 @@ private RunningFrameGrpc.RunningFrameBlockingStub getRunningFrameStub(String hos buildChannelCache(); } ManagedChannel channel = channelCache.get(host); - return RunningFrameGrpc.newBlockingStub(channel); + return RunningFrameGrpc + .newBlockingStub(channel) + .withDeadlineAfter(rqdTaskDeadlineSeconds, TimeUnit.SECONDS); } public void setHostLock(HostInterface host, LockState lock) { @@ -111,7 +123,7 @@ public void setHostLock(HostInterface host, LockState lock) { logger.debug("Locking RQD host"); lockHost(host); } else { - logger.debug("Unkown LockState passed to setHostLock."); + logger.debug("Unknown LockState passed to setHostLock."); } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java b/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java index f7d5437af..d2c8b17e1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/CueStatic.java @@ -44,17 +44,17 @@ public class CueStatic extends CueInterfaceGrpc.CueInterfaceImplBase { public void getSystemStats(CueGetSystemStatsRequest request, StreamObserver responseObserver) { SystemStats stats = SystemStats.newBuilder() - .setDispatchThreads(dispatchQueue.getActiveThreadCount()) - .setDispatchWaiting(dispatchQueue.getWaitingCount()) + .setDispatchThreads(dispatchQueue.getActiveCount()) + .setDispatchWaiting(dispatchQueue.getSize()) .setDispatchRemainingCapacity(dispatchQueue.getRemainingCapacity()) - .setDispatchExecuted(dispatchQueue.getTotalDispatched()) - .setDispatchRejected(dispatchQueue.getTotalRejected()) + .setDispatchExecuted(dispatchQueue.getCompletedTaskCount()) + .setDispatchRejected(dispatchQueue.getRejectedTaskCount()) - .setManageThreads(manageQueue.getActiveThreadCount()) - .setManageWaiting(manageQueue.getWaitingCount()) + .setManageThreads(manageQueue.getActiveCount()) + .setManageWaiting(manageQueue.getSize()) .setManageRemainingCapacity(manageQueue.getRemainingCapacity()) - .setManageExecuted(manageQueue.getTotalDispatched()) - .setManageRejected(manageQueue.getTotalRejected()) + .setManageExecuted(manageQueue.getCompletedTaskCount()) + .setManageRejected(manageQueue.getRejectedTaskCount()) .setReportThreads(reportQueue.getActiveCount()) .setReportWaiting(reportQueue.getQueue().size()) @@ -62,12 +62,12 @@ public void getSystemStats(CueGetSystemStatsRequest request, .setReportExecuted(reportQueue.getTaskCount()) .setReportRejected(reportQueue.getRejectedTaskCount()) - .setBookingWaiting(bookingQueue.getQueue().size()) - .setBookingRemainingCapacity(bookingQueue.getQueue().remainingCapacity()) + .setBookingWaiting(bookingQueue.getSize()) + .setBookingRemainingCapacity(bookingQueue.getRemainingCapacity()) .setBookingThreads(bookingQueue.getActiveCount()) .setBookingExecuted(bookingQueue.getCompletedTaskCount()) .setBookingRejected(bookingQueue.getRejectedTaskCount()) - .setBookingSleepMillis(bookingQueue.sleepTime()) + .setBookingSleepMillis(0) .setHostBalanceSuccess(DispatchSupport.balanceSuccess.get()) .setHostBalanceFailed(DispatchSupport.balanceFailed.get()) @@ -76,7 +76,7 @@ public void getSystemStats(CueGetSystemStatsRequest request, .setClearedProcs(DispatchSupport.clearedProcs.get()) .setBookingRetries(DispatchSupport.bookingRetries.get()) .setBookingErrors(DispatchSupport.bookingErrors.get()) - .setBookedProcs( DispatchSupport.bookedProcs.get()) + .setBookedProcs(DispatchSupport.bookedProcs.get()) // TODO(gregdenton) Reimplement these with gRPC. (Issue #69) // .setReqForData(IceServer.dataRequests.get()) diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java index 02781bbcd..46edc2025 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageAllocation.java @@ -24,6 +24,8 @@ import com.imageworks.spcue.grpc.facility.AllocFindResponse; import com.imageworks.spcue.grpc.facility.AllocGetAllRequest; import com.imageworks.spcue.grpc.facility.AllocGetAllResponse; +import com.imageworks.spcue.grpc.facility.AllocGetDefaultRequest; +import com.imageworks.spcue.grpc.facility.AllocGetDefaultResponse; import com.imageworks.spcue.grpc.facility.AllocGetHostsRequest; import com.imageworks.spcue.grpc.facility.AllocGetHostsResponse; import com.imageworks.spcue.grpc.facility.AllocGetRequest; @@ -34,6 +36,8 @@ import com.imageworks.spcue.grpc.facility.AllocReparentHostsResponse; import com.imageworks.spcue.grpc.facility.AllocSetBillableRequest; import com.imageworks.spcue.grpc.facility.AllocSetBillableResponse; +import com.imageworks.spcue.grpc.facility.AllocSetDefaultRequest; +import com.imageworks.spcue.grpc.facility.AllocSetDefaultResponse; import com.imageworks.spcue.grpc.facility.AllocSetNameRequest; import com.imageworks.spcue.grpc.facility.AllocSetNameResponse; import com.imageworks.spcue.grpc.facility.AllocSetTagRequest; @@ -122,10 +126,18 @@ public void get(AllocGetRequest request, StreamObserver respon } } + private AllocationEntity findAllocationDetail(String facility, String name) { + // If they pass name in the format ., just remove the facility. + if (CueUtil.verifyAllocationNameFormat(name)) { + name = CueUtil.splitAllocationName(name)[1]; + } + return adminManager.findAllocationDetail(facility, name); + } + @Override public void delete( AllocDeleteRequest request, StreamObserver responseObserver) { - AllocationEntity alloc = adminManager.findAllocationDetail( + AllocationEntity alloc = findAllocationDetail( request.getAllocation().getFacility(), request.getAllocation().getName()); adminManager.deleteAllocation(alloc); responseObserver.onNext(AllocDeleteResponse.newBuilder().build()); @@ -186,7 +198,7 @@ public void reparentHosts( public void setBillable( AllocSetBillableRequest request, StreamObserver responseObserver) { - AllocationEntity alloc = adminManager.findAllocationDetail( + AllocationEntity alloc = findAllocationDetail( request.getAllocation().getFacility(), request.getAllocation().getName()); adminManager.setAllocationBillable(alloc, request.getValue()); responseObserver.onNext(AllocSetBillableResponse.newBuilder().build()); @@ -196,7 +208,7 @@ public void setBillable( @Override public void setName( AllocSetNameRequest request, StreamObserver responseObserver) { - AllocationEntity alloc = adminManager.findAllocationDetail( + AllocationEntity alloc = findAllocationDetail( request.getAllocation().getFacility(), request.getAllocation().getName()); adminManager.setAllocationName(alloc, request.getName()); responseObserver.onNext(AllocSetNameResponse.newBuilder().build()); @@ -206,13 +218,35 @@ public void setName( @Override public void setTag( AllocSetTagRequest request, StreamObserver responseObserver) { - AllocationEntity alloc = adminManager.findAllocationDetail( + AllocationEntity alloc = findAllocationDetail( request.getAllocation().getFacility(), request.getAllocation().getName()); adminManager.setAllocationTag(alloc, request.getTag()); responseObserver.onNext(AllocSetTagResponse.newBuilder().build()); responseObserver.onCompleted(); } + @Override + public void getDefault( + AllocGetDefaultRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = adminManager.getDefaultAllocation(); + responseObserver.onNext(AllocGetDefaultResponse.newBuilder() + .setAllocation(whiteboard.getAllocation(alloc.id)) + .build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefault( + AllocSetDefaultRequest request, + StreamObserver responseObserver) { + AllocationEntity alloc = findAllocationDetail( + request.getAllocation().getFacility(), request.getAllocation().getName()); + adminManager.setDefaultAllocation(alloc); + responseObserver.onNext(AllocSetDefaultResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + public AdminManager getAdminManager() { return adminManager; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java index f92d03dea..057be1843 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDeed.java @@ -30,10 +30,6 @@ import com.imageworks.spcue.grpc.host.DeedGetOwnerRequest; import com.imageworks.spcue.grpc.host.DeedGetOwnerResponse; import com.imageworks.spcue.grpc.host.DeedInterfaceGrpc; -import com.imageworks.spcue.grpc.host.DeedSetBlackoutTimeEnabledRequest; -import com.imageworks.spcue.grpc.host.DeedSetBlackoutTimeEnabledResponse; -import com.imageworks.spcue.grpc.host.DeedSetBlackoutTimeRequest; -import com.imageworks.spcue.grpc.host.DeedSetBlackoutTimeResponse; import com.imageworks.spcue.grpc.host.Host; import com.imageworks.spcue.grpc.host.Owner; import com.imageworks.spcue.service.OwnerManager; @@ -65,23 +61,6 @@ public void getOwner(DeedGetOwnerRequest request, StreamObserver responseObserver) { - ownerManager.setBlackoutTime(toEntity(request.getDeed()), request.getStartTime(), request.getStopTime()); - responseObserver.onNext(DeedSetBlackoutTimeResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - @Override - public void setBlackoutTimeEnabled(DeedSetBlackoutTimeEnabledRequest request, - StreamObserver responseObserver) { - ownerManager.setBlackoutTimeEnabled(toEntity(request.getDeed()), request.getEnabled()); - responseObserver.onNext(DeedSetBlackoutTimeEnabledResponse.newBuilder().build()); - responseObserver.onCompleted(); - } - - public OwnerManager getOwnerManager() { return ownerManager; } @@ -104,9 +83,6 @@ private DeedEntity toEntity(Deed deed) { entity.host = deed.getHost(); entity.owner = deed.getOwner(); entity.show = deed.getShow(); - entity.isBlackoutEnabled = deed.getBlackout(); - entity.blackoutStart = deed.getBlackoutStartTime(); - entity.blackoutStop = deed.getBlackoutStopTime(); return entity; } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java index bb3837c26..9fad37ef1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageDepend.java @@ -21,11 +21,13 @@ import io.grpc.Status; import io.grpc.stub.StreamObserver; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.dao.EmptyResultDataAccessException; import com.imageworks.spcue.LightweightDependency; import com.imageworks.spcue.dispatcher.DispatchQueue; +import com.imageworks.spcue.dispatcher.commands.KeyRunnable; import com.imageworks.spcue.grpc.depend.DependGetDependRequest; import com.imageworks.spcue.grpc.depend.DependGetDependResponse; import com.imageworks.spcue.grpc.depend.DependInterfaceGrpc; @@ -38,7 +40,7 @@ public class ManageDepend extends DependInterfaceGrpc.DependInterfaceImplBase { - private static final Logger logger = Logger.getLogger(ManageDepend.class); + private static final Logger logger = LogManager.getLogger(ManageDepend.class); private DependManager dependManager; private DispatchQueue manageQueue; @@ -62,7 +64,8 @@ public void getDepend(DependGetDependRequest request, StreamObserver responseObserver) { LightweightDependency depend = dependManager.getDepend(request.getDepend().getId()); - manageQueue.execute(new Runnable() { + String key = "manage_dep_sat_req_" + request.getDepend().getId(); + manageQueue.execute(new KeyRunnable(key) { public void run() { try { logger.info("dropping dependency: " + depend.id); diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java index 15f6163bc..8749a5787 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFilter.java @@ -31,6 +31,7 @@ import com.imageworks.spcue.dao.FilterDao; import com.imageworks.spcue.dao.GroupDao; import com.imageworks.spcue.dispatcher.DispatchQueue; +import com.imageworks.spcue.dispatcher.commands.KeyRunnable; import com.imageworks.spcue.grpc.filter.Action; import com.imageworks.spcue.grpc.filter.ActionSeq; import com.imageworks.spcue.grpc.filter.Filter; @@ -125,7 +126,8 @@ public void createMatcher(FilterCreateMatcherRequest request, @Override public void delete(FilterDeleteRequest request, StreamObserver responseObserver) { FilterEntity filter = getFilterEntity(request.getFilter()); - manageQueue.execute(new Runnable() { + String key = "manage_filter_del_req_" + filter.getId(); + manageQueue.execute(new KeyRunnable(key) { public void run() { filterManager.deleteFilter(filter); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java index 7cc811cfb..27511fb94 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageFrame.java @@ -269,7 +269,7 @@ public void addRenderPartition(FrameAddRenderPartitionRequest request, lha.setThreads(request.getThreads()); lha.setMaxCoreUnits(request.getMaxCores() * 100); lha.setMaxMemory(request.getMaxMemory()); - lha.setMaxGpu(request.getMaxGpu()); + lha.setMaxGpuMemory(request.getMaxGpuMemory()); lha.setType(RenderPartitionType.FRAME_PARTITION); if (localBookingSupport.bookLocal(frame, request.getHost(), request.getUsername(), lha)) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java index 8fd7e10ce..b8f3cd43e 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageGroup.java @@ -55,6 +55,10 @@ import com.imageworks.spcue.grpc.job.GroupSetDefJobMaxCoresResponse; import com.imageworks.spcue.grpc.job.GroupSetDefJobMinCoresRequest; import com.imageworks.spcue.grpc.job.GroupSetDefJobMinCoresResponse; +import com.imageworks.spcue.grpc.job.GroupSetDefJobMaxGpusRequest; +import com.imageworks.spcue.grpc.job.GroupSetDefJobMaxGpusResponse; +import com.imageworks.spcue.grpc.job.GroupSetDefJobMinGpusRequest; +import com.imageworks.spcue.grpc.job.GroupSetDefJobMinGpusResponse; import com.imageworks.spcue.grpc.job.GroupSetDefJobPriorityRequest; import com.imageworks.spcue.grpc.job.GroupSetDefJobPriorityResponse; import com.imageworks.spcue.grpc.job.GroupSetDeptRequest; @@ -65,6 +69,10 @@ import com.imageworks.spcue.grpc.job.GroupSetMaxCoresResponse; import com.imageworks.spcue.grpc.job.GroupSetMinCoresRequest; import com.imageworks.spcue.grpc.job.GroupSetMinCoresResponse; +import com.imageworks.spcue.grpc.job.GroupSetMaxGpusRequest; +import com.imageworks.spcue.grpc.job.GroupSetMaxGpusResponse; +import com.imageworks.spcue.grpc.job.GroupSetMinGpusRequest; +import com.imageworks.spcue.grpc.job.GroupSetMinGpusResponse; import com.imageworks.spcue.grpc.job.GroupSetNameRequest; import com.imageworks.spcue.grpc.job.GroupSetNameResponse; import com.imageworks.spcue.grpc.job.Job; @@ -189,6 +197,24 @@ public void setDefaultJobMinCores(GroupSetDefJobMinCoresRequest request, StreamO responseObserver.onCompleted(); } + @Override + public void setDefaultJobMaxGpus(GroupSetDefJobMaxGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMaxGpus(group, request.getMaxGpus()); + responseObserver.onNext(GroupSetDefJobMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultJobMinGpus(GroupSetDefJobMinGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupDefaultJobMinGpus(group, request.getMinGpus()); + responseObserver.onNext(GroupSetDefJobMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + @Override public void setName(GroupSetNameRequest request, StreamObserver responseObserver) { GroupInterface group = getGroupInterface(request.getGroup()); @@ -262,6 +288,24 @@ public void setMinCores(GroupSetMinCoresRequest request, responseObserver.onCompleted(); } + @Override + public void setMaxGpus(GroupSetMaxGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMaxGpus(group, request.getMaxGpus()); + responseObserver.onNext(GroupSetMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setMinGpus(GroupSetMinGpusRequest request, + StreamObserver responseObserver) { + GroupInterface group = getGroupInterface(request.getGroup()); + groupManager.setGroupMinGpus(group, request.getMinGpus()); + responseObserver.onNext(GroupSetMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + public GroupDao getGroupDao() { return groupDao; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java index 77998e416..7728c4cb1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageHost.java @@ -140,7 +140,7 @@ public void getHost(HostGetHostRequest request, StreamObserver responseObserver) { try { responseObserver.onNext(HostGetHostResponse.newBuilder() - .setHost(whiteboard.findHost(request.getId())) + .setHost(whiteboard.getHost(request.getId())) .build()); responseObserver.onCompleted(); } catch (EmptyResultDataAccessException e) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java index dd00476cd..e3cfa5178 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageJob.java @@ -24,7 +24,10 @@ import io.grpc.Status; import io.grpc.stub.StreamObserver; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; import org.springframework.dao.EmptyResultDataAccessException; import com.imageworks.spcue.BuildableJob; @@ -49,6 +52,7 @@ import com.imageworks.spcue.dispatcher.commands.DispatchReorderFrames; import com.imageworks.spcue.dispatcher.commands.DispatchRetryFrames; import com.imageworks.spcue.dispatcher.commands.DispatchSatisfyDepends; +import com.imageworks.spcue.dispatcher.commands.DispatchShutdownJobIfCompleted; import com.imageworks.spcue.dispatcher.commands.DispatchStaggerFrames; import com.imageworks.spcue.grpc.comment.Comment; import com.imageworks.spcue.grpc.job.FrameSeq; @@ -124,12 +128,18 @@ import com.imageworks.spcue.grpc.job.JobSetGroupResponse; import com.imageworks.spcue.grpc.job.JobSetMaxCoresRequest; import com.imageworks.spcue.grpc.job.JobSetMaxCoresResponse; +import com.imageworks.spcue.grpc.job.JobSetMaxGpusRequest; +import com.imageworks.spcue.grpc.job.JobSetMaxGpusResponse; import com.imageworks.spcue.grpc.job.JobSetMaxRetriesRequest; import com.imageworks.spcue.grpc.job.JobSetMaxRetriesResponse; import com.imageworks.spcue.grpc.job.JobSetMinCoresRequest; import com.imageworks.spcue.grpc.job.JobSetMinCoresResponse; +import com.imageworks.spcue.grpc.job.JobSetMinGpusRequest; +import com.imageworks.spcue.grpc.job.JobSetMinGpusResponse; import com.imageworks.spcue.grpc.job.JobSetPriorityRequest; import com.imageworks.spcue.grpc.job.JobSetPriorityResponse; +import com.imageworks.spcue.grpc.job.JobShutdownIfCompletedRequest; +import com.imageworks.spcue.grpc.job.JobShutdownIfCompletedResponse; import com.imageworks.spcue.grpc.job.JobStaggerFramesRequest; import com.imageworks.spcue.grpc.job.JobStaggerFramesResponse; import com.imageworks.spcue.grpc.job.LayerSeq; @@ -149,8 +159,10 @@ import com.imageworks.spcue.util.Convert; import com.imageworks.spcue.util.FrameSet; +import static com.imageworks.spcue.servant.ServantUtil.attemptChange; + public class ManageJob extends JobInterfaceGrpc.JobInterfaceImplBase { - private static final Logger logger = Logger.getLogger(ManageJob.class); + private static final Logger logger = LogManager.getLogger(ManageJob.class); private Whiteboard whiteboard; private JobManager jobManager; private GroupManager groupManager; @@ -166,6 +178,9 @@ public class ManageJob extends JobInterfaceGrpc.JobInterfaceImplBase { private JobInterface job; private FrameSearchFactory frameSearchFactory; private JobSearchFactory jobSearchFactory; + private final String property = "frame.finished_jobs_readonly"; + @Autowired + private Environment env; @Override public void findJob(JobFindJobRequest request, StreamObserver responseObserver) { @@ -350,9 +365,11 @@ public void resume(JobResumeRequest request, StreamObserver r public void setMaxCores(JobSetMaxCoresRequest request, StreamObserver responseObserver) { try{ setupJobData(request.getJob()); - jobDao.updateMaxCores(job, Convert.coresToWholeCoreUnits(request.getVal())); - responseObserver.onNext(JobSetMaxCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateMaxCores(job, Convert.coresToWholeCoreUnits(request.getVal())); + responseObserver.onNext(JobSetMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -377,11 +394,26 @@ public void setMinCores(JobSetMinCoresRequest request, StreamObserver responseObserver) { + public void setMaxGpus(JobSetMaxGpusRequest request, StreamObserver responseObserver) { + try{ + setupJobData(request.getJob()); + jobDao.updateMaxGpus(job, request.getVal()); + responseObserver.onNext(JobSetMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void setMinGpus(JobSetMinGpusRequest request, StreamObserver responseObserver) { try{ setupJobData(request.getJob()); - jobDao.updatePriority(job, request.getVal()); - responseObserver.onNext(JobSetPriorityResponse.newBuilder().build()); + jobDao.updateMinGpus(job, request.getVal()); + responseObserver.onNext(JobSetMinGpusResponse.newBuilder().build()); responseObserver.onCompleted(); } catch (EmptyResultDataAccessException e) { @@ -391,6 +423,23 @@ public void setPriority(JobSetPriorityRequest request, StreamObserver responseObserver) { + try{ + setupJobData(request.getJob()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updatePriority(job, request.getVal()); + responseObserver.onNext(JobSetPriorityResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to find job data") + .asRuntimeException()); + } + } + @Override public void getCurrent(JobGetCurrentRequest request, StreamObserver responseObserver) { try { @@ -412,13 +461,15 @@ public void getCurrent(JobGetCurrentRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - manageQueue.execute( - new DispatchEatFrames( - frameSearchFactory.create(job, request.getReq()), - new Source(request.toString()), - jobManagerSupport)); - responseObserver.onNext(JobEatFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute( + new DispatchEatFrames( + frameSearchFactory.create(job, request.getReq()), + new Source(request.toString()), + jobManagerSupport)); + responseObserver.onNext(JobEatFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -431,13 +482,15 @@ public void eatFrames(JobEatFramesRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - manageQueue.execute( - new DispatchKillFrames( - frameSearchFactory.create(job, request.getReq()), - new Source(request.toString()), - jobManagerSupport)); - responseObserver.onNext(JobKillFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute( + new DispatchKillFrames( + frameSearchFactory.create(job, request.getReq()), + new Source(request.toString()), + jobManagerSupport)); + responseObserver.onNext(JobKillFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -451,11 +504,13 @@ public void markDoneFrames(JobMarkDoneFramesRequest request, StreamObserver responseObserver) { try{ setupJobData(request.getJob()); - manageQueue.execute( - new DispatchSatisfyDepends( - frameSearchFactory.create(job, request.getReq()), jobManagerSupport)); - responseObserver.onNext(JobMarkDoneFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute( + new DispatchSatisfyDepends( + frameSearchFactory.create(job, request.getReq()), jobManagerSupport)); + responseObserver.onNext(JobMarkDoneFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -468,13 +523,15 @@ public void markDoneFrames(JobMarkDoneFramesRequest request, public void retryFrames(JobRetryFramesRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - manageQueue.execute( - new DispatchRetryFrames( - frameSearchFactory.create(job, request.getReq()), - new Source(request.toString()), - jobManagerSupport)); - responseObserver.onNext(JobRetryFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute( + new DispatchRetryFrames( + frameSearchFactory.create(job, request.getReq()), + new Source(request.toString()), + jobManagerSupport)); + responseObserver.onNext(JobRetryFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -487,9 +544,11 @@ public void retryFrames(JobRetryFramesRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - jobDao.updateAutoEat(job, request.getValue()); - responseObserver.onNext(JobSetAutoEatResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateAutoEat(job, request.getValue()); + responseObserver.onNext(JobSetAutoEatResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -503,13 +562,15 @@ public void createDependencyOnFrame(JobCreateDependencyOnFrameRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - JobOnFrame depend = new JobOnFrame(job, - jobManager.getFrameDetail(request.getFrame().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(JobCreateDependencyOnFrameResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobOnFrame depend = new JobOnFrame(job, + jobManager.getFrameDetail(request.getFrame().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(JobCreateDependencyOnFrameResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)) + .build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -523,13 +584,15 @@ public void createDependencyOnJob(JobCreateDependencyOnJobRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - JobOnJob depend = new JobOnJob(job, - jobManager.getJobDetail(request.getOnJob().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(JobCreateDependencyOnJobResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobOnJob depend = new JobOnJob(job, + jobManager.getJobDetail(request.getOnJob().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(JobCreateDependencyOnJobResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)) + .build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -543,13 +606,15 @@ public void createDependencyOnLayer(JobCreateDependencyOnLayerRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - JobOnLayer depend = new JobOnLayer(job, - jobManager.getLayerDetail(request.getLayer().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(JobCreateDependencyOnLayerResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobOnLayer depend = new JobOnLayer(job, + jobManager.getLayerDetail(request.getLayer().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(JobCreateDependencyOnLayerResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)) + .build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -634,9 +699,11 @@ public void getUpdatedFrames(JobGetUpdatedFramesRequest request, StreamObserver< public void setMaxRetries(JobSetMaxRetriesRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - jobDao.updateMaxFrameRetries(job, request.getMaxRetries()); - responseObserver.onNext(JobSetMaxRetriesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateMaxFrameRetries(job, request.getMaxRetries()); + responseObserver.onNext(JobSetMaxRetriesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -686,9 +753,11 @@ public void getComments(JobGetCommentsRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - manageQueue.execute(new DispatchDropDepends(job, request.getTarget(), dependManager)); - responseObserver.onNext(JobDropDependsResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchDropDepends(job, request.getTarget(), dependManager)); + responseObserver.onNext(JobDropDependsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -701,9 +770,11 @@ public void dropDepends(JobDropDependsRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - jobDao.updateParent(job, groupManager.getGroupDetail(request.getGroupId())); - responseObserver.onNext(JobSetGroupResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobDao.updateParent(job, groupManager.getGroupDetail(request.getGroupId())); + responseObserver.onNext(JobSetGroupResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -717,10 +788,12 @@ public void markAsWaiting(JobMarkAsWaitingRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - jobManagerSupport.markFramesAsWaiting( - frameSearchFactory.create(job, request.getReq()), new Source(request.toString())); - responseObserver.onNext(JobMarkAsWaitingResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + jobManagerSupport.markFramesAsWaiting( + frameSearchFactory.create(job, request.getReq()), new Source(request.toString())); + responseObserver.onNext(JobMarkAsWaitingResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -734,9 +807,27 @@ public void reorderFrames(JobReorderFramesRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - manageQueue.execute(new DispatchReorderFrames(job, - new FrameSet(request.getRange()), request.getOrder(), jobManagerSupport)); - responseObserver.onNext(JobReorderFramesResponse.newBuilder().build()); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute(new DispatchReorderFrames(job, + new FrameSet(request.getRange()), request.getOrder(), jobManagerSupport)); + responseObserver.onNext(JobReorderFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to find job data") + .asRuntimeException()); + } + } + + @Override + public void shutdownIfCompleted(JobShutdownIfCompletedRequest request, + StreamObserver responseObserver) { + try { + setupJobData(request.getJob()); + manageQueue.execute(new DispatchShutdownJobIfCompleted(job, jobManagerSupport)); + responseObserver.onNext(JobShutdownIfCompletedResponse.newBuilder().build()); responseObserver.onCompleted(); } catch (EmptyResultDataAccessException e) { @@ -751,10 +842,12 @@ public void staggerFrames(JobStaggerFramesRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - manageQueue.execute( - new DispatchStaggerFrames(job, request.getRange(), request.getStagger(), jobManagerSupport)); - responseObserver.onNext(JobStaggerFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + manageQueue.execute( + new DispatchStaggerFrames(job, request.getRange(), request.getStagger(), jobManagerSupport)); + responseObserver.onNext(JobStaggerFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL @@ -767,30 +860,33 @@ public void staggerFrames(JobStaggerFramesRequest request, public void addRenderPartition(JobAddRenderPartRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setJobId(job.getId()); - lha.setThreads(request.getThreads()); - lha.setMaxCoreUnits(request.getMaxCores() * 100); - lha.setMaxMemory(request.getMaxMemory()); - lha.setMaxGpu(request.getMaxGpu()); - lha.setType(RenderPartitionType.JOB_PARTITION); - - if (localBookingSupport.bookLocal(job, request.getHost(), request.getUsername(), lha)) { - try { - RenderPartition renderPart = whiteboard.getRenderPartition(lha); - responseObserver.onNext(JobAddRenderPartResponse.newBuilder() - .setRenderPartition(renderPart) - .build()); - responseObserver.onCompleted(); - } catch (EmptyResultDataAccessException e) { + if (attemptChange(env, property, jobManager, job, responseObserver)) { + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setJobId(job.getId()); + lha.setThreads(request.getThreads()); + lha.setMaxCoreUnits(request.getMaxCores() * 100); + lha.setMaxMemory(request.getMaxMemory()); + lha.setMaxGpuUnits(request.getMaxGpus()); + lha.setMaxGpuMemory(request.getMaxGpuMemory()); + lha.setType(RenderPartitionType.JOB_PARTITION); + + if (localBookingSupport.bookLocal(job, request.getHost(), request.getUsername(), lha)) { + try { + RenderPartition renderPart = whiteboard.getRenderPartition(lha); + responseObserver.onNext(JobAddRenderPartResponse.newBuilder() + .setRenderPartition(renderPart) + .build()); + responseObserver.onCompleted(); + } catch (EmptyResultDataAccessException e) { + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to allocate render partition to host.") + .asRuntimeException()); + } + } else { responseObserver.onError(Status.INTERNAL - .withDescription("Failed to allocate render partition to host.") + .withDescription("Failed to find suitable frames.") .asRuntimeException()); } - } else { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find suitable frames.") - .asRuntimeException()); } } catch (EmptyResultDataAccessException e) { @@ -804,10 +900,12 @@ public void addRenderPartition(JobAddRenderPartRequest request, StreamObserver responseObserver) { try { setupJobData(request.getJob()); - JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); - filterManager.runFiltersOnJob(jobDetail); - responseObserver.onNext(JobRunFiltersResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, job, responseObserver)) { + JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); + filterManager.runFiltersOnJob(jobDetail); + responseObserver.onNext(JobRunFiltersResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } catch (EmptyResultDataAccessException e) { responseObserver.onError(Status.INTERNAL diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java index 126639a1b..7f61bc287 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageLayer.java @@ -22,6 +22,8 @@ import com.google.protobuf.Descriptors; import io.grpc.Status; import io.grpc.stub.StreamObserver; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; import org.springframework.dao.EmptyResultDataAccessException; import com.imageworks.spcue.LayerDetail; @@ -94,8 +96,12 @@ import com.imageworks.spcue.grpc.job.LayerSetMaxCoresResponse; import com.imageworks.spcue.grpc.job.LayerSetMinCoresRequest; import com.imageworks.spcue.grpc.job.LayerSetMinCoresResponse; -import com.imageworks.spcue.grpc.job.LayerSetMinGpuRequest; -import com.imageworks.spcue.grpc.job.LayerSetMinGpuResponse; +import com.imageworks.spcue.grpc.job.LayerSetMaxGpusRequest; +import com.imageworks.spcue.grpc.job.LayerSetMaxGpusResponse; +import com.imageworks.spcue.grpc.job.LayerSetMinGpusRequest; +import com.imageworks.spcue.grpc.job.LayerSetMinGpusResponse; +import com.imageworks.spcue.grpc.job.LayerSetMinGpuMemoryRequest; +import com.imageworks.spcue.grpc.job.LayerSetMinGpuMemoryResponse; import com.imageworks.spcue.grpc.job.LayerSetMinMemoryRequest; import com.imageworks.spcue.grpc.job.LayerSetMinMemoryResponse; import com.imageworks.spcue.grpc.job.LayerSetTagsRequest; @@ -119,6 +125,8 @@ import com.imageworks.spcue.util.Convert; import com.imageworks.spcue.util.FrameSet; +import static com.imageworks.spcue.servant.ServantUtil.attemptChange; + public class ManageLayer extends LayerInterfaceGrpc.LayerInterfaceImplBase { private LayerDetail layer; @@ -131,6 +139,9 @@ public class ManageLayer extends LayerInterfaceGrpc.LayerInterfaceImplBase { private Whiteboard whiteboard; private LocalBookingSupport localBookingSupport; private FrameSearchFactory frameSearchFactory; + private final String property = "layer.finished_jobs_readonly"; + @Autowired + private Environment env; @Override public void findLayer(LayerFindLayerRequest request, StreamObserver responseObserver) { @@ -165,10 +176,12 @@ public void getLayer(LayerGetLayerRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - manageQueue.execute(new DispatchEatFrames(frameSearch, - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(LayerEatFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchEatFrames(frameSearch, + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(LayerEatFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override @@ -184,109 +197,142 @@ public void getFrames(LayerGetFramesRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - manageQueue.execute(new DispatchKillFrames(frameSearch, - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(LayerKillFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchKillFrames(frameSearch, + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(LayerKillFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void markdoneFrames(LayerMarkdoneFramesRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - manageQueue.execute(new DispatchSatisfyDepends(layer, jobManagerSupport)); - responseObserver.onNext(LayerMarkdoneFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchSatisfyDepends(layer, jobManagerSupport)); + responseObserver.onNext(LayerMarkdoneFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void retryFrames(LayerRetryFramesRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - manageQueue.execute(new DispatchRetryFrames(frameSearch, - new Source(request.toString()), jobManagerSupport)); - responseObserver.onNext(LayerRetryFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchRetryFrames(frameSearch, + new Source(request.toString()), jobManagerSupport)); + responseObserver.onNext(LayerRetryFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void setTags(LayerSetTagsRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - layerDao.updateLayerTags(layer, new HashSet<>(request.getTagsList())); - responseObserver.onNext(LayerSetTagsResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateLayerTags(layer, new HashSet<>(request.getTagsList())); + responseObserver.onNext(LayerSetTagsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void setMinCores(LayerSetMinCoresRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - jobManager.setLayerMinCores(layer, Convert.coresToCoreUnits(request.getCores())); - responseObserver.onNext(LayerSetMinCoresResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.setLayerMinCores(layer, Convert.coresToCoreUnits(request.getCores())); + responseObserver.onNext(LayerSetMinCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMinGpus(LayerSetMinGpusRequest request, StreamObserver responseObserver) { + updateLayer(request.getLayer()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.setLayerMinGpus(layer, request.getMinGpus()); + responseObserver.onNext(LayerSetMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void setMinMemory(LayerSetMinMemoryRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - layerDao.updateLayerMinMemory(layer, request.getMemory()); - responseObserver.onNext(LayerSetMinMemoryResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateLayerMinMemory(layer, request.getMemory()); + responseObserver.onNext(LayerSetMinMemoryResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override - public void setMinGpu(LayerSetMinGpuRequest request, StreamObserver responseObserver) { + public void setMinGpuMemory(LayerSetMinGpuMemoryRequest request, + StreamObserver responseObserver) { updateLayer(request.getLayer()); - layerDao.updateLayerMinGpu(layer, request.getGpu()); - responseObserver.onNext(LayerSetMinGpuResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateLayerMinGpuMemory(layer, request.getGpuMemory()); + responseObserver.onNext(LayerSetMinGpuMemoryResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void createDependencyOnFrame(LayerCreateDependOnFrameRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - LayerOnFrame depend = new LayerOnFrame(layer, jobManager.getFrameDetail(request.getFrame().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateDependOnFrameResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LayerOnFrame depend = new LayerOnFrame(layer, jobManager.getFrameDetail(request.getFrame().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateDependOnFrameResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)) + .build()); + responseObserver.onCompleted(); + } } @Override public void createDependencyOnJob(LayerCreateDependOnJobRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - LayerOnJob depend = new LayerOnJob(layer, jobManager.getJobDetail(request.getJob().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateDependOnJobResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LayerOnJob depend = new LayerOnJob(layer, jobManager.getJobDetail(request.getJob().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateDependOnJobResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)) + .build()); + responseObserver.onCompleted(); + } } @Override public void createDependencyOnLayer(LayerCreateDependOnLayerRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - LayerOnLayer depend = new LayerOnLayer(layer, jobManager.getLayerDetail(request.getDependOnLayer().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateDependOnLayerResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LayerOnLayer depend = new LayerOnLayer(layer, jobManager.getLayerDetail(request.getDependOnLayer().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateDependOnLayerResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)) + .build()); + responseObserver.onCompleted(); + } } @Override public void createFrameByFrameDependency(LayerCreateFrameByFrameDependRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - FrameByFrame depend = new FrameByFrame(layer, jobManager.getLayerDetail(request.getDependLayer().getId())); - dependManager.createDepend(depend); - responseObserver.onNext(LayerCreateFrameByFrameDependResponse.newBuilder() - .setDepend(whiteboard.getDepend(depend)) - .build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + FrameByFrame depend = new FrameByFrame(layer, jobManager.getLayerDetail(request.getDependLayer().getId())); + dependManager.createDepend(depend); + responseObserver.onNext(LayerCreateFrameByFrameDependResponse.newBuilder() + .setDepend(whiteboard.getDepend(depend)) + .build()); + responseObserver.onCompleted(); + } } @Override @@ -313,93 +359,112 @@ public void getWhatThisDependsOn(LayerGetWhatThisDependsOnRequest request, public void dropDepends(LayerDropDependsRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - manageQueue.execute(new DispatchDropDepends(layer, request.getTarget(), dependManager)); - responseObserver.onNext(LayerDropDependsResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchDropDepends(layer, request.getTarget(), dependManager)); + responseObserver.onNext(LayerDropDependsResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void dropLimit(LayerDropLimitRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - layerDao.dropLimit(layer, request.getLimitId()); - responseObserver.onNext(LayerDropLimitResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.dropLimit(layer, request.getLimitId()); + responseObserver.onNext(LayerDropLimitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void reorderFrames(LayerReorderFramesRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - manageQueue.execute(new DispatchReorderFrames(layer, new FrameSet(request.getRange()), request.getOrder(), - jobManagerSupport)); - responseObserver.onNext(LayerReorderFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchReorderFrames(layer, new FrameSet(request.getRange()), request.getOrder(), + jobManagerSupport)); + responseObserver.onNext(LayerReorderFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void staggerFrames(LayerStaggerFramesRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - manageQueue.execute(new DispatchStaggerFrames(layer, request.getRange(), request.getStagger(), - jobManagerSupport)); - responseObserver.onNext(LayerStaggerFramesResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + manageQueue.execute(new DispatchStaggerFrames(layer, request.getRange(), request.getStagger(), + jobManagerSupport)); + responseObserver.onNext(LayerStaggerFramesResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void setThreadable(LayerSetThreadableRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - layerDao.updateThreadable(layer, request.getThreadable()); - responseObserver.onNext(LayerSetThreadableResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateThreadable(layer, request.getThreadable()); + responseObserver.onNext(LayerSetThreadableResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void setTimeout(LayerSetTimeoutRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - layerDao.updateTimeout(layer, request.getTimeout()); - responseObserver.onNext(LayerSetTimeoutResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateTimeout(layer, request.getTimeout()); + responseObserver.onNext(LayerSetTimeoutResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void setTimeoutLLU(LayerSetTimeoutLLURequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - layerDao.updateTimeoutLLU(layer, request.getTimeoutLlu()); - responseObserver.onNext(LayerSetTimeoutLLUResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.updateTimeoutLLU(layer, request.getTimeoutLlu()); + responseObserver.onNext(LayerSetTimeoutLLUResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void addLimit(LayerAddLimitRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - layerDao.addLimit(layer, request.getLimitId()); - responseObserver.onNext(LayerAddLimitResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + layerDao.addLimit(layer, request.getLimitId()); + responseObserver.onNext(LayerAddLimitResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void addRenderPartition(LayerAddRenderPartitionRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - LocalHostAssignment lha = new LocalHostAssignment(); - lha.setThreads(request.getThreads()); - lha.setMaxCoreUnits(request.getMaxCores() * 100); - lha.setMaxMemory(request.getMaxMemory()); - lha.setMaxGpu(request.getMaxGpu()); - lha.setType(RenderPartitionType.LAYER_PARTITION); - if (localBookingSupport.bookLocal(layer, request.getHost(), request.getUsername(), lha)) { - RenderPartition partition = whiteboard.getRenderPartition(lha); - responseObserver.onNext(LayerAddRenderPartitionResponse.newBuilder() - .setRenderPartition(partition) - .build()); - responseObserver.onCompleted(); - } else { - responseObserver.onError(Status.INTERNAL - .withDescription("Failed to find suitable frames.") - .asRuntimeException()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + LocalHostAssignment lha = new LocalHostAssignment(); + lha.setThreads(request.getThreads()); + lha.setMaxCoreUnits(request.getMaxCores() * 100); + lha.setMaxMemory(request.getMaxMemory()); + lha.setMaxGpuUnits(request.getMaxGpus()); + lha.setMaxGpuMemory(request.getMaxGpuMemory()); + lha.setType(RenderPartitionType.LAYER_PARTITION); + if (localBookingSupport.bookLocal(layer, request.getHost(), request.getUsername(), lha)) { + RenderPartition partition = whiteboard.getRenderPartition(lha); + responseObserver.onNext(LayerAddRenderPartitionResponse.newBuilder() + .setRenderPartition(partition) + .build()); + responseObserver.onCompleted(); + } else { + responseObserver.onError(Status.INTERNAL + .withDescription("Failed to find suitable frames.") + .asRuntimeException()); + } } } @@ -408,9 +473,11 @@ public void addRenderPartition(LayerAddRenderPartitionRequest request, public void registerOutputPath(LayerRegisterOutputPathRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - jobManager.registerLayerOutput(layer, request.getSpec()); - responseObserver.onNext(LayerRegisterOutputPathResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.registerLayerOutput(layer, request.getSpec()); + responseObserver.onNext(LayerRegisterOutputPathResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override @@ -436,16 +503,28 @@ public void getOutputPaths(LayerGetOutputPathsRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - jobManager.enableMemoryOptimizer(layer, request.getValue()); - responseObserver.onNext(LayerEnableMemoryOptimizerResponse.newBuilder().build()); - responseObserver.onCompleted(); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.enableMemoryOptimizer(layer, request.getValue()); + responseObserver.onNext(LayerEnableMemoryOptimizerResponse.newBuilder().build()); + responseObserver.onCompleted(); + } } @Override public void setMaxCores(LayerSetMaxCoresRequest request, StreamObserver responseObserver) { updateLayer(request.getLayer()); - jobManager.setLayerMaxCores(layer, Convert.coresToWholeCoreUnits(request.getCores())); - responseObserver.onNext(LayerSetMaxCoresResponse.newBuilder().build()); + if (attemptChange(env, property, jobManager, layer, responseObserver)) { + jobManager.setLayerMaxCores(layer, Convert.coresToWholeCoreUnits(request.getCores())); + responseObserver.onNext(LayerSetMaxCoresResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + } + + @Override + public void setMaxGpus(LayerSetMaxGpusRequest request, StreamObserver responseObserver) { + updateLayer(request.getLayer()); + jobManager.setLayerMaxGpus(layer, request.getMaxGpus()); + responseObserver.onNext(LayerSetMaxGpusResponse.newBuilder().build()); responseObserver.onCompleted(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java index 88e53ee95..413f1982c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageRenderPartition.java @@ -45,7 +45,7 @@ public void delete(RenderPartDeleteRequest request, StreamObserver responseObserver) { LocalHostAssignment localJobAssign = getLocalHostAssignment(request.getRenderPartition()); - bookingManager.setMaxResources(localJobAssign, request.getCores(), request.getMemory(), request.getGpu()); + bookingManager.setMaxResources(localJobAssign, request.getCores(), request.getMemory(), request.getGpus(), request.getGpuMemory()); responseObserver.onNext(RenderPartSetMaxResourcesResponse.newBuilder().build()); responseObserver.onCompleted(); } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java index eae767006..8cd9029c8 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageService.java @@ -55,11 +55,14 @@ public void createService(ServiceCreateServiceRequest request, service.minCores = request.getData().getMinCores(); service.maxCores = request.getData().getMaxCores(); service.minMemory = request.getData().getMinMemory(); - service.minGpu = request.getData().getMinGpu(); + service.minGpus = request.getData().getMinGpus(); + service.maxGpus = request.getData().getMaxGpus(); + service.minGpuMemory = request.getData().getMinGpuMemory(); service.tags = Sets.newLinkedHashSet(request.getData().getTagsList()); service.threadable = request.getData().getThreadable(); service.timeout = request.getData().getTimeout(); service.timeout_llu = request.getData().getTimeoutLlu(); + service.minMemoryIncrease = request.getData().getMinMemoryIncrease(); serviceManager.createService(service); responseObserver.onNext(ServiceCreateServiceResponse.newBuilder() .setService(whiteboard.getService(service.getId())) @@ -129,11 +132,14 @@ private ServiceEntity toServiceEntity(Service service) { entity.minCores = service.getMinCores(); entity.maxCores = service.getMaxCores(); entity.minMemory = service.getMinMemory(); - entity.minGpu = service.getMinGpu(); + entity.minGpus = service.getMinGpus(); + entity.maxGpus = service.getMaxGpus(); + entity.minGpuMemory = service.getMinGpuMemory(); entity.tags = new LinkedHashSet<> (service.getTagsList()); entity.threadable = service.getThreadable(); entity.timeout = service.getTimeout(); entity.timeout_llu = service.getTimeoutLlu(); + entity.minMemoryIncrease = service.getMinMemoryIncrease(); return entity; } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java index bd90575b5..6d2db02fe 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageServiceOverride.java @@ -23,7 +23,7 @@ import io.grpc.stub.StreamObserver; -import com.imageworks.spcue.ServiceEntity; +import com.imageworks.spcue.ServiceOverrideEntity; import com.imageworks.spcue.grpc.service.Service; import com.imageworks.spcue.grpc.service.ServiceOverrideDeleteRequest; import com.imageworks.spcue.grpc.service.ServiceOverrideDeleteResponse; @@ -39,7 +39,8 @@ public class ManageServiceOverride extends ServiceOverrideInterfaceGrpc.ServiceO @Override public void delete(ServiceOverrideDeleteRequest request, StreamObserver responseObserver) { - serviceManager.deleteService(toServiceEntity(request.getService())); + // Passing null on showId as the interface doesn't require a showId in this situation + serviceManager.deleteService(toServiceOverrideEntity(request.getService(), null)); responseObserver.onNext(ServiceOverrideDeleteResponse.newBuilder().build()); responseObserver.onCompleted(); } @@ -47,7 +48,8 @@ public void delete(ServiceOverrideDeleteRequest request, @Override public void update(ServiceOverrideUpdateRequest request, StreamObserver responseObserver) { - serviceManager.updateService(toServiceEntity(request.getService())); + // Passing null on showId as the interface doesn't require a showId in this situation + serviceManager.updateService(toServiceOverrideEntity(request.getService(), null)); responseObserver.onNext(ServiceOverrideUpdateResponse.newBuilder().build()); responseObserver.onCompleted(); } @@ -60,18 +62,22 @@ public void setServiceManager(ServiceManager serviceManager) { this.serviceManager = serviceManager; } - private ServiceEntity toServiceEntity(Service service) { - ServiceEntity entity = new ServiceEntity(); + private ServiceOverrideEntity toServiceOverrideEntity(Service service, String showId){ + ServiceOverrideEntity entity = new ServiceOverrideEntity(); entity.id = service.getId(); entity.name = service.getName(); entity.minCores = service.getMinCores(); entity.maxCores = service.getMaxCores(); entity.minMemory = service.getMinMemory(); - entity.minGpu = service.getMinGpu(); + entity.minGpus = service.getMinGpus(); + entity.maxGpus = service.getMaxGpus(); + entity.minGpuMemory = service.getMinGpuMemory(); entity.tags = new LinkedHashSet<>(service.getTagsList()); entity.threadable = service.getThreadable(); + entity.showId = showId; entity.timeout = service.getTimeout(); entity.timeout_llu = service.getTimeoutLlu(); + entity.minMemoryIncrease = service.getMinMemoryIncrease(); return entity; } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java index 6dd94a5ff..0c496b25d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ManageShow.java @@ -93,6 +93,10 @@ import com.imageworks.spcue.grpc.show.ShowSetDefaultMaxCoresResponse; import com.imageworks.spcue.grpc.show.ShowSetDefaultMinCoresRequest; import com.imageworks.spcue.grpc.show.ShowSetDefaultMinCoresResponse; +import com.imageworks.spcue.grpc.show.ShowSetDefaultMaxGpusRequest; +import com.imageworks.spcue.grpc.show.ShowSetDefaultMaxGpusResponse; +import com.imageworks.spcue.grpc.show.ShowSetDefaultMinGpusRequest; +import com.imageworks.spcue.grpc.show.ShowSetDefaultMinGpusResponse; import com.imageworks.spcue.grpc.subscription.Subscription; import com.imageworks.spcue.grpc.subscription.SubscriptionSeq; import com.imageworks.spcue.service.AdminManager; @@ -257,6 +261,24 @@ public void setDefaultMinCores(ShowSetDefaultMinCoresRequest request, responseObserver.onCompleted(); } + @Override + public void setDefaultMaxGpus(ShowSetDefaultMaxGpusRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMaxGpus(show, request.getMaxGpus()); + responseObserver.onNext(ShowSetDefaultMaxGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + + @Override + public void setDefaultMinGpus(ShowSetDefaultMinGpusRequest request, + StreamObserver responseObserver) { + ShowEntity show = getShowEntity(request.getShow()); + showDao.updateShowDefaultMinGpus(show, request.getMinGpus()); + responseObserver.onNext(ShowSetDefaultMinGpusResponse.newBuilder().build()); + responseObserver.onCompleted(); + } + @Override public void findFilter(ShowFindFilterRequest request, StreamObserver responseObserver) { @@ -361,9 +383,12 @@ public void createServiceOverride(ShowCreateServiceOverrideRequest request, service.minCores = requestService.getMinCores(); service.maxCores = requestService.getMaxCores(); service.minMemory = requestService.getMinMemory(); - service.minGpu = requestService.getMinGpu(); + service.minGpus = requestService.getMinGpus(); + service.maxGpus = requestService.getMaxGpus(); + service.minGpuMemory = requestService.getMinGpuMemory(); service.tags = Sets.newLinkedHashSet(requestService.getTagsList()); service.threadable = requestService.getThreadable(); + service.minMemoryIncrease = requestService.getMinMemoryIncrease(); serviceManager.createService(service); ServiceOverride serviceOverride = whiteboard.getServiceOverride(show, service.name); responseObserver.onNext(ShowCreateServiceOverrideResponse.newBuilder() diff --git a/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java b/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java index a5146947e..a88c8e87d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servant/ServantUtil.java @@ -21,10 +21,19 @@ import java.util.ArrayList; import java.util.List; +import java.util.Objects; +import com.imageworks.spcue.JobDetail; +import com.imageworks.spcue.JobInterface; import com.imageworks.spcue.LayerInterface; +import com.imageworks.spcue.grpc.job.JobState; import com.imageworks.spcue.grpc.job.Layer; import com.imageworks.spcue.grpc.job.LayerSeq; +import com.imageworks.spcue.service.JobManager; + +import io.grpc.Status; +import io.grpc.stub.StreamObserver; +import org.springframework.core.env.Environment; public class ServantUtil { @@ -44,5 +53,24 @@ public static List convertLayerFilterList(LayerSeq layers) { } return result; } + + private static boolean isJobFinished(Environment env, String property, JobManager jobManager, JobInterface job) { + if (env.getProperty(property, String.class) != null && + Objects.equals(env.getProperty(property, String.class), "true")) { + JobDetail jobDetail = jobManager.getJobDetail(job.getJobId()); + return jobDetail.state == JobState.FINISHED; + } + return false; + } + + public static boolean attemptChange(Environment env, String property, JobManager jobManager, JobInterface job, StreamObserver responseObserver) { + if (ServantUtil.isJobFinished(env, property, jobManager, job)) { + responseObserver.onError(Status.FAILED_PRECONDITION + .withDescription("Finished jobs are readonly") + .asRuntimeException()); + return false; + } + return true; + } } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java index dd7b914cd..755b39317 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManager.java @@ -40,6 +40,7 @@ public interface AdminManager { ShowEntity getShowEntity(String id); void setShowActive(ShowInterface show, boolean value); void updateShowCommentEmail(ShowInterface s, String[] emails); + void updateShowsStatus(); /* * Facilities @@ -56,6 +57,8 @@ public interface AdminManager { void deleteAllocation(AllocationInterface alloc); void setAllocationName(AllocationInterface a, String name); void setAllocationTag(AllocationInterface a, String tag); + AllocationEntity getDefaultAllocation(); + void setDefaultAllocation(AllocationInterface a); AllocationEntity findAllocationDetail(String facility, String name); AllocationEntity getAllocationDetail(String id); void setAllocationBillable(AllocationInterface alloc, boolean value); diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java index 193de98a5..8f1f66133 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/AdminManagerService.java @@ -19,7 +19,8 @@ package com.imageworks.spcue.service; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; @@ -46,7 +47,7 @@ public class AdminManagerService implements AdminManager { @SuppressWarnings("unused") - private static final Logger logger = Logger.getLogger(AdminManagerService.class); + private static final Logger logger = LogManager.getLogger(AdminManagerService.class); private ShowDao showDao; @@ -106,6 +107,17 @@ public void setAllocationTag(AllocationInterface a, String tag) { allocationDao.updateAllocationTag(a, tag); } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly=true) + public AllocationEntity getDefaultAllocation() { + return allocationDao.getDefaultAllocationEntity(); + } + + @Override + public void setDefaultAllocation(AllocationInterface a) { + allocationDao.setDefaultAllocation(a); + } + @Override @Transactional(propagation = Propagation.REQUIRED, readOnly=true) public ShowEntity findShowEntity(String name) { @@ -124,6 +136,11 @@ public void updateShowCommentEmail(ShowInterface s, String[] emails) { showDao.updateShowCommentEmail(s, emails); } + @Override + public void updateShowsStatus() { + showDao.updateShowsStatus(); + } + public SubscriptionInterface createSubscription(SubscriptionEntity sub) { subscriptionDao.insertSubscription(sub); return sub; diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java index 51a04bfa3..b5cfb8455 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManager.java @@ -122,9 +122,10 @@ public void createLocalHostAssignment(DispatchHost host, * @param l * @param maxCoreUnits * @param maxMemory - * @param maxGpu + * @param maxGpuUnits + * @param maxGpuMemory */ - void setMaxResources(LocalHostAssignment l, int maxCoreUnits, long maxMemory, long maxGpu); + void setMaxResources(LocalHostAssignment l, int maxCoreUnits, long maxMemory, int maxGpuUnits, long maxGpuMemory); /** * Remove a LocalHostAssignment if there are no procs assigned to it. @@ -133,13 +134,6 @@ public void createLocalHostAssignment(DispatchHost host, */ void removeInactiveLocalHostAssignment(LocalHostAssignment lha); - /** - * - * @param host - * @return - */ - boolean isBlackOutTime(HostInterface host); - /** * Return true if the host is running more cores than the maximum allowed. * diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java index 91abdab4b..1322b622d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/BookingManagerService.java @@ -21,7 +21,8 @@ import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.dao.EmptyResultDataAccessException; import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; @@ -49,7 +50,7 @@ public class BookingManagerService implements BookingManager { @SuppressWarnings("unused") private static final Logger logger = - Logger.getLogger(BookingManagerService.class); + LogManager.getLogger(BookingManagerService.class); private BookingQueue bookingQueue; private BookingDao bookingDao; @@ -72,7 +73,7 @@ public boolean hasActiveLocalFrames(HostInterface host) { @Override public void setMaxResources(LocalHostAssignment l, int maxCoreUnits, - long maxMemory, long maxGpu) { + long maxMemory, int maxGpuUnits, long maxGpuMemory) { HostInterface host = hostDao.getHost(l.getHostId()); @@ -84,8 +85,12 @@ public void setMaxResources(LocalHostAssignment l, int maxCoreUnits, bookingDao.updateMaxMemory(l, maxMemory); } - if (maxGpu > 0) { - bookingDao.updateMaxGpu(l, maxGpu); + if (maxGpuUnits > 0) { + bookingDao.updateMaxGpus(l, maxGpuUnits); + } + + if (maxGpuMemory > 0) { + bookingDao.updateMaxGpuMemory(l, maxGpuMemory); } } @@ -172,12 +177,6 @@ public void createLocalHostAssignment(DispatchHost host, FrameInterface frame, bookingDao.insertLocalHostAssignment(host, frame, lja); } - @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isBlackOutTime(HostInterface host) { - return bookingDao.isBlackoutTime(host); - } - @Override @Transactional(propagation = Propagation.REQUIRED, readOnly=true) public boolean hasResourceDeficit(HostInterface host) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java index 10533c542..faee9dff9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManager.java @@ -23,6 +23,8 @@ import com.imageworks.spcue.HostInterface; import com.imageworks.spcue.JobInterface; +import java.util.List; + public interface CommentManager { /** @@ -47,6 +49,26 @@ public interface CommentManager { */ public void deleteComment(String id); + /** + * Deletes comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return boolean: returns true if one or more comments where deleted + */ + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject); + + /** + * Get comments using host, user, and subject + * + * @param host + * @param user + * @param subject + * @return List + */ + public List getCommentsByHostUserAndSubject(HostInterface host, String user, String subject); + /** * * @param id diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java index cc9a016ef..b6d4430ec 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/CommentManagerService.java @@ -28,6 +28,8 @@ import com.imageworks.spcue.ShowEntity; import com.imageworks.spcue.dao.CommentDao; +import java.util.List; + @Transactional public class CommentManagerService implements CommentManager { @@ -55,6 +57,16 @@ public void deleteComment(String id) { commentDao.deleteComment(id); } + @Transactional(propagation = Propagation.REQUIRED) + public boolean deleteCommentByHostUserAndSubject(HostInterface host, String user, String subject) { + return commentDao.deleteCommentByHostUserAndSubject(host, user, subject); + } + + @Transactional(propagation = Propagation.REQUIRED) + public List getCommentsByHostUserAndSubject(HostInterface host, String user, String subject) { + return commentDao.getCommentsByHostUserAndSubject(host, user, subject); + } + @Transactional(propagation = Propagation.REQUIRED) public void setCommentSubject(String id, String subject) { commentDao.updateCommentSubject(id, subject); diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java index 0d09ca44c..87d1ea158 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DepartmentManagerService.java @@ -37,12 +37,10 @@ import com.imageworks.spcue.ShowInterface; import com.imageworks.spcue.TaskEntity; import com.imageworks.spcue.TaskInterface; -import com.imageworks.spcue.TrackitTaskDetail; import com.imageworks.spcue.dao.JobDao; import com.imageworks.spcue.dao.PointDao; import com.imageworks.spcue.dao.ShowDao; import com.imageworks.spcue.dao.TaskDao; -import com.imageworks.spcue.dao.TrackitDao; import com.imageworks.spcue.util.CueUtil; @@ -56,7 +54,6 @@ public class DepartmentManagerService implements DepartmentManager { private TaskDao taskDao; private ShowDao showDao; private JobDao jobDao; - private TrackitDao trackitDao; @Override public void createDepartmentConfig(PointDetail renderPoint) { @@ -176,61 +173,6 @@ public List getManagedPointConfs() { @Override @Transactional(propagation = Propagation.NOT_SUPPORTED) public void updateManagedTasks(PointInterface pd) { - if (env.getRequiredProperty("cue.trackit.enabled", Boolean.class)) { - - ShowInterface show = showDao.getShowDetail(pd.getShowId()); - PointDetail p = pointDao.getPointConfDetail(pd.getPointId()); - pointDao.updatePointConfUpdateTime(p); - - /* - * First calculate raw point ratios, which will be used to calculate - * the normalized proc point values - */ - float totalRawPoints = 0f; - float rawPoints = 0f; - - List tasks = trackitDao.getTasks(show.getName(), p.tiTask); - HashMap rawCache = new HashMap(tasks.size()); - - for (TrackitTaskDetail task : tasks) { - if (!IN_PROGRESS_TASK_STATUS.contains(task.status)) { - continue; - } - rawPoints = ((task.frameCount / 10f) / task.weeks); - rawCache.put(task.shot, rawPoints); - totalRawPoints = totalRawPoints + rawPoints; - } - - /* - * Now create TaskDetail objects which will be merged into - * the current data set. Tasks with a 0 minCores value will - * be deleted. - */ - float normalizedRawPoints = 0f; - if (totalRawPoints != 0) { - normalizedRawPoints = p.cores / totalRawPoints; - } - - for (TrackitTaskDetail task : tasks) { - - TaskEntity td = new TaskEntity(); - td.pointId = p.getPointId(); - td.deptId = p.getDepartmentId(); - td.showId = p.getShowId(); - td.shot = task.shot; - - if (!IN_PROGRESS_TASK_STATUS.contains(task.status)) { - td.minCoreUnits = 0; - } else { - td.minCoreUnits = (int) ((rawCache.get(task.shot) * normalizedRawPoints) + 0.5f); - if (td.minCoreUnits < CueUtil.ONE_CORE) { - td.minCoreUnits = CueUtil.ONE_CORE; - } - } - taskDao.mergeTask(td); - syncJobsWithTask(td); - } - } } @Override @@ -323,14 +265,6 @@ public void setTaskDao(TaskDao taskDao) { this.taskDao = taskDao; } - public TrackitDao getTrackitDao() { - return trackitDao; - } - - public void setTrackitDao(TrackitDao trackitDao) { - this.trackitDao = trackitDao; - } - public ShowDao getShowDao() { return showDao; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java index 187e81331..2a82c099d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/DependManagerService.java @@ -23,7 +23,8 @@ import java.util.List; import java.util.Set; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.dao.DataIntegrityViolationException; import org.springframework.dao.DataRetrievalFailureException; import org.springframework.transaction.annotation.Propagation; @@ -63,7 +64,7 @@ @Transactional public class DependManagerService implements DependManager { - private static final Logger logger = Logger.getLogger(DependManagerService.class); + private static final Logger logger = LogManager.getLogger(DependManagerService.class); private DependDao dependDao; private JobDao jobDao; diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java index ef9c2d32a..b25e7d520 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/EmailSupport.java @@ -33,7 +33,8 @@ import java.util.Map; import java.util.Properties; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.apache.velocity.Template; import org.apache.velocity.VelocityContext; import org.apache.velocity.app.VelocityEngine; @@ -65,7 +66,7 @@ public class EmailSupport { private final Map imageMap; - private static final Logger logger = Logger.getLogger(EmailSupport.class); + private static final Logger logger = LogManager.getLogger(EmailSupport.class); public EmailSupport() { diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java index f27541884..eb49a9c51 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/FilterManagerService.java @@ -23,7 +23,8 @@ import java.util.List; import java.util.regex.Pattern; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; @@ -62,7 +63,7 @@ @Transactional public class FilterManagerService implements FilterManager { - private static final Logger logger = Logger.getLogger(FilterManagerService.class); + private static final Logger logger = LogManager.getLogger(FilterManagerService.class); private ActionDao actionDao; private MatcherDao matcherDao; @@ -396,10 +397,14 @@ public boolean applyAction(ActionEntity action, JobDetail job, Context context) layerDao.updateMinMemory(job, (int) action.intValue, LayerType.RENDER); break; - case SET_ALL_RENDER_LAYER_CORES: + case SET_ALL_RENDER_LAYER_MIN_CORES: layerDao.updateMinCores(job, Convert.coresToCoreUnits(action.floatValue), LayerType.RENDER); break; + case SET_ALL_RENDER_LAYER_MAX_CORES: + layerDao.updateMaxCores(job, Convert.coresToCoreUnits(action.floatValue), LayerType.RENDER); + break; + case SET_MEMORY_OPTIMIZER: List layers = layerDao.getLayers(job); for (LayerInterface layer : layers) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java index 9017304f8..2e3cf70be 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManager.java @@ -34,6 +34,10 @@ public interface GroupManager { void setGroupMinCores(GroupInterface g, int coreUnits); void setGroupDefaultJobMinCores(GroupInterface g, int coreUnits); void setGroupDefaultJobMaxCores(GroupInterface g, int coreUnits); + void setGroupMaxGpus(GroupInterface g, int gpuUnits); + void setGroupMinGpus(GroupInterface g, int gpuUnits); + void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits); + void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits); void setGroupDefaultJobPriority(GroupInterface g, int priority); /** diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java index 7e785c0ea..89fc25193 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/GroupManagerService.java @@ -78,6 +78,32 @@ public void setGroupMinCores(GroupInterface g, int coreUnits) { groupDao.updateMinCores(g,coreUnits); } + @Override + public void setGroupDefaultJobMaxGpus(GroupInterface g, int gpuUnits) { + groupDao.updateDefaultJobMaxGpus(g,gpuUnits); + if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMaxGpus(g, gpuUnits); + } + } + + @Override + public void setGroupDefaultJobMinGpus(GroupInterface g, int gpuUnits) { + groupDao.updateDefaultJobMinGpus(g,gpuUnits); + if (gpuUnits != CueUtil.FEATURE_DISABLED && !groupDao.isManaged(g)) { + jobDao.updateMinGpus(g, gpuUnits); + } + } + + @Override + public void setGroupMaxGpus(GroupInterface g, int gpuUnits) { + groupDao.updateMaxGpus(g, gpuUnits); + } + + @Override + public void setGroupMinGpus(GroupInterface g, int gpuUnits) { + groupDao.updateMinGpus(g, gpuUnits); + } + @Override public void setGroupParent(GroupInterface group, GroupInterface newParent) { groupDao.updateGroupParent(group, newParent); diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java index 0eb6d1213..74c256729 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HistoricalSupport.java @@ -21,12 +21,13 @@ import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.imageworks.spcue.JobInterface; public class HistoricalSupport { - private static final Logger logger = Logger.getLogger(HistoricalSupport.class); + private static final Logger logger = LogManager.getLogger(HistoricalSupport.class); private HistoricalManager historicalManager; diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java index 19704e65e..e62d8647b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HostManager.java @@ -63,13 +63,12 @@ public interface HostManager { void setHostState(HostInterface host, HardwareState state); /** - * Return true if the host is swapping hard enough - * that killing frames will save the entire machine. + * Updates the free temporary directory (mcp) of a host. * - * @param host - * @return + * @param host HostInterface + * @param freeTempDir Long */ - boolean isSwapping(HostInterface host); + void setHostFreeTempDir(HostInterface host, Long freeTempDir); DispatchHost createHost(HostReport report); DispatchHost createHost(RenderHost host); @@ -112,8 +111,8 @@ public interface HostManager { * @param freeSwap * @param totalMcp * @param freeMcp - * @param totalGpu - * @param freeGpu + * @param totalGpuMemory + * @param freeGpuMemory * @param load * @param bootTime * @param os @@ -122,7 +121,7 @@ void setHostStatistics(HostInterface host, long totalMemory, long freeMemory, long totalSwap, long freeSwap, long totalMcp, long freeMcp, - long totalGpu, long freeGpu, + long totalGpuMemory, long freeGpuMemory, int load, Timestamp bootTime, String os); @@ -212,6 +211,11 @@ void setHostStatistics(HostInterface host, */ int getStrandedCoreUnits(HostInterface h); + /** + * Return the number of stranded cores on the host. + */ + int getStrandedGpuUnits(HostInterface h); + /** * Return true of the host prefers a particular show. * diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java index ee081ecc7..36de34a1c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/HostManagerService.java @@ -22,7 +22,9 @@ import java.sql.Timestamp; import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.dao.EmptyResultDataAccessException; import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; @@ -56,7 +58,7 @@ @Transactional public class HostManagerService implements HostManager { - private static final Logger logger = Logger.getLogger(HostManagerService.class); + private static final Logger logger = LogManager.getLogger(HostManagerService.class); private HostDao hostDao; private RqdClient rqdClient; @@ -92,9 +94,8 @@ public void setHostState(HostInterface host, HardwareState state) { } @Override - @Transactional(propagation = Propagation.REQUIRED, readOnly=true) - public boolean isSwapping(HostInterface host) { - return hostDao.isKillMode(host); + public void setHostFreeTempDir(HostInterface host, Long freeTempDir) { + hostDao.updateHostFreeTempDir(host, freeTempDir); } public void rebootWhenIdle(HostInterface host) { @@ -123,7 +124,7 @@ public void setHostStatistics(HostInterface host, long totalMemory, long freeMemory, long totalSwap, long freeSwap, long totalMcp, long freeMcp, - long totalGpu, long freeGpu, + long totalGpuMemory, long freeGpuMemory, int load, Timestamp bootTime, String os) { @@ -131,7 +132,7 @@ public void setHostStatistics(HostInterface host, totalMemory, freeMemory, totalSwap, freeSwap, totalMcp, freeMcp, - totalGpu, freeGpu, + totalGpuMemory, freeGpuMemory, load, bootTime, os); } @@ -152,7 +153,28 @@ public DispatchHost createHost(HostReport report) { @Transactional(propagation = Propagation.REQUIRED) public DispatchHost createHost(RenderHost rhost) { - return createHost(rhost, getDefaultAllocationDetail()); + // Find suitable allocation with facility and tags. + AllocationEntity alloc = null; + if (rhost.getTagsCount() > 0) { + String facility = rhost.getFacility(); + for (String tag : rhost.getTagsList()) { + try { + alloc = allocationDao.findAllocationEntity(facility, tag); + logger.info("set " + rhost.getName() + + " to the given allocation " + alloc.getName()); + break; + } + catch (EmptyResultDataAccessException e) { + // Allocation doesn't exist. ignore. + } + } + } + if (alloc == null) { + alloc = getDefaultAllocationDetail(); + logger.info("set " + rhost.getName() + + " to the default allocation " + alloc.getName()); + } + return createHost(rhost, alloc); } @Transactional(propagation = Propagation.REQUIRED) @@ -246,6 +268,12 @@ public int getStrandedCoreUnits(HostInterface h) { return hostDao.getStrandedCoreUnits(h); } + @Override + @Transactional(propagation = Propagation.REQUIRED, readOnly=true) + public int getStrandedGpuUnits(HostInterface h) { + return hostDao.getStrandedGpus(h); + } + @Override @Transactional(propagation = Propagation.REQUIRED, readOnly=true) public boolean verifyRunningProc(String procId, String frameId) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java b/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java index bc5be1779..ce231331b 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JmsMover.java @@ -29,7 +29,8 @@ import com.google.gson.Gson; import com.google.gson.GsonBuilder; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.core.env.Environment; import org.springframework.jms.JmsException; @@ -39,7 +40,7 @@ import com.imageworks.spcue.util.CueExceptionUtil; public class JmsMover extends ThreadPoolExecutor { - private static final Logger logger = Logger.getLogger(JmsMover.class); + private static final Logger logger = LogManager.getLogger(JmsMover.class); private final Gson gson = new GsonBuilder().serializeNulls().create(); @Autowired diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java index e84bebb16..f46616115 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobLauncher.java @@ -23,7 +23,8 @@ import java.util.HashSet; import java.util.Set; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.beans.BeansException; import org.springframework.context.ApplicationContext; import org.springframework.context.ApplicationContextAware; @@ -43,7 +44,7 @@ * Job launching functions. */ public class JobLauncher implements ApplicationContextAware { - private static final Logger logger = Logger.getLogger(JobLauncher.class); + private static final Logger logger = LogManager.getLogger(JobLauncher.class); private ApplicationContext context; private JobManager jobManager; @@ -109,7 +110,8 @@ public void launch(final JobSpec spec) { lha.setThreads(d.localThreadNumber); lha.setMaxCoreUnits(d.localMaxCores * 100); lha.setMaxMemory(d.localMaxMemory); - lha.setMaxGpu(d.localMaxGpu); + lha.setMaxGpuUnits(d.localMaxGpus); + lha.setMaxGpuMemory(d.localMaxGpuMemory); lha.setType(RenderPartitionType.JOB_PARTITION); try { diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java index 4ab1b2120..dfb4873b9 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobManager.java @@ -453,6 +453,22 @@ public interface JobManager { */ void setLayerMinCores(LayerInterface layer, int coreUnits); + /** + * Update the max gpu value for the given layer. + * + * @param layer + * @param gpuUnits + */ + void setLayerMaxGpus(LayerInterface layer, int gpuUnits); + + /** + * Update the min gpu value for the given layer. + * + * @param layer + * @param gpuUnits + */ + void setLayerMinGpus(LayerInterface layer, int gpuUnits); + /** * Add a limit to the given layer. * diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java index 68821ed64..c1ca1bdfc 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerService.java @@ -22,7 +22,8 @@ import java.util.List; import com.google.common.collect.Sets; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.dao.DataAccessException; import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; @@ -65,11 +66,12 @@ import com.imageworks.spcue.util.CueUtil; import com.imageworks.spcue.util.FrameSet; import com.imageworks.spcue.util.JobLogUtil; +import com.imageworks.spcue.util.Convert; @Transactional public class JobManagerService implements JobManager { - private static final Logger logger = Logger.getLogger(JobManagerService.class); + private static final Logger logger = LogManager.getLogger(JobManagerService.class); private JobDao jobDao; private ShowDao showDao; @@ -173,6 +175,13 @@ public void launchJobSpec(JobSpec spec) { for (BuildableJob job: spec.getJobs()) { JobDetail d = createJob(job); + if (job.maxCoresOverride != null) { + jobDao.updateMaxCores(d, + Convert.coresToWholeCoreUnits(job.maxCoresOverride.intValue())); + } + if (job.maxGpusOverride != null) { + jobDao.updateMaxGpus(d, job.maxGpusOverride.intValue()); + } if (job.getPostJob() != null) { BuildableJob postJob = job.getPostJob(); postJob.env.put("CUE_PARENT_JOB_ID", d.id); @@ -450,6 +459,16 @@ public void setLayerMaxCores(LayerInterface layer, int coreUnits) { layerDao.updateLayerMaxCores(layer, coreUnits); } + @Override + public void setLayerMinGpus(LayerInterface layer, int gpu) { + layerDao.updateLayerMinGpus(layer, gpu); + } + + @Override + public void setLayerMaxGpus(LayerInterface layer, int gpu) { + layerDao.updateLayerMaxGpus(layer, gpu); + } + @Override public void addLayerLimit(LayerInterface layer, String limitId) { layerDao.addLimit(layer, limitId); diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java index e6c03c221..b2db74d59 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobManagerSupport.java @@ -22,7 +22,8 @@ import java.util.Collection; import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.dao.DataAccessException; import org.springframework.dao.EmptyResultDataAccessException; @@ -55,7 +56,7 @@ * A non-transaction support class for managing jobs. */ public class JobManagerSupport { - private static final Logger logger = Logger.getLogger(JobManagerSupport.class); + private static final Logger logger = LogManager.getLogger(JobManagerSupport.class); private JobManager jobManager; private DependManager dependManager; @@ -212,6 +213,10 @@ public void satisfyWhatDependsOn(FrameSearchInterface request) { } } + public boolean isJobComplete(JobInterface job) { + return jobManager.isJobComplete(job); + } + /* * Destructive functions require a extra Source argument which contains * information about the user making the call. This information is diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java b/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java index 1e498eaf4..269a9f4af 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/JobSpec.java @@ -33,7 +33,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.jdom.Document; import org.jdom.Element; import org.jdom.input.SAXBuilder; @@ -57,7 +58,7 @@ import com.imageworks.spcue.util.CueUtil; public class JobSpec { - private static final Logger logger = Logger.getLogger(JobSpec.class); + private static final Logger logger = LogManager.getLogger(JobSpec.class); private String facility; @@ -286,11 +287,17 @@ private BuildableJob handleJobTag(Element jobTag) { if (local.getAttributeValue("cores") != null) job.localMaxCores = Integer.parseInt(local.getAttributeValue("cores")); if (local.getAttributeValue("memory") != null) - job.localMaxMemory = Integer.parseInt(local.getAttributeValue("memory")); + job.localMaxMemory = Long.parseLong(local.getAttributeValue("memory")); if (local.getAttributeValue("threads") != null) job.localThreadNumber = Integer.parseInt(local.getAttributeValue("threads")); - if (local.getAttributeValue("gpu") != null) - job.localMaxGpu = Integer.parseInt(local.getAttributeValue("gpu")); + if (local.getAttributeValue("gpus") != null) + job.localMaxGpus = Integer.parseInt(local.getAttributeValue("gpus")); + if (local.getAttributeValue("gpu") != null) { + logger.warn(job.name + " localbook has the deprecated gpu. Use gpu_memory."); + job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu")); + } + if (local.getAttributeValue("gpu_memory") != null) + job.localMaxGpuMemory = Long.parseLong(local.getAttributeValue("gpu_memory")); } job.maxCoreUnits = 20000; @@ -322,10 +329,25 @@ private BuildableJob handleJobTag(Element jobTag) { } } + if (jobTag.getChildTextTrim("maxcores") != null) { + buildableJob.maxCoresOverride = Integer.valueOf(jobTag + .getChildTextTrim("maxcores")); + } + if (jobTag.getChildTextTrim("maxgpus") != null) { + buildableJob.maxGpusOverride = Integer.valueOf(jobTag + .getChildTextTrim("maxgpus")); + } + if (jobTag.getChildTextTrim("priority") != null) { job.priority = Integer.valueOf(jobTag.getChildTextTrim("priority")); } + + Element envTag = jobTag.getChild("env"); + if (envTag != null) { + handleEnvironmentTag(envTag, buildableJob.env); + } + handleLayerTags(buildableJob, jobTag); if (buildableJob.getBuildableLayers().size() > MAX_LAYERS) { @@ -338,11 +360,6 @@ private BuildableJob handleJobTag(Element jobTag) { + " has no layers"); } - Element envTag = jobTag.getChild("env"); - if (envTag != null) { - handleEnvironmentTag(envTag, buildableJob.env); - } - return buildableJob; } @@ -423,11 +440,12 @@ private void handleLayerTags(BuildableJob buildableJob, Element jobTag) { determineResourceDefaults(layerTag, buildableJob, layer); determineChunkSize(layerTag, layer); determineMinimumCores(layerTag, layer); + determineMinimumGpus(layerTag, layer); determineThreadable(layerTag, layer); determineTags(buildableJob, layer, layerTag); determineMinimumMemory(buildableJob, layerTag, layer, buildableLayer); - determineMinimumGpu(buildableJob, layerTag, layer); + determineMinimumGpuMemory(buildableJob, layerTag, layer); // set a timeout value on the layer if (layerTag.getChildTextTrim("timeout") != null) { @@ -521,44 +539,53 @@ else if (minMemory < Dispatcher.MEM_RESERVED_MIN) { } /** - * If the gpu option is set, set minimumGpu to that supplied value + * If the gpu_memory option is set, set minimumGpuMemory to that supplied value * * @param layerTag * @param layer */ - private void determineMinimumGpu(BuildableJob buildableJob, Element layerTag, + private void determineMinimumGpuMemory(BuildableJob buildableJob, Element layerTag, LayerDetail layer) { - if (layerTag.getChildTextTrim("gpu") == null) { + String gpu = layerTag.getChildTextTrim("gpu"); + String gpuMemory = layerTag.getChildTextTrim("gpu_memory"); + if (gpu == null && gpuMemory == null) { return; } - long minGpu; - String memory = layerTag.getChildTextTrim("gpu").toLowerCase(); + String memory = null; + if (gpu != null) { + logger.warn(buildableJob.detail.name + "/" + layer.name + + " has the deprecated gpu. Use gpu_memory."); + memory = gpu.toLowerCase(); + } + if (gpuMemory != null) + memory = gpuMemory.toLowerCase(); + long minGpuMemory; try { - minGpu = convertMemoryInput(memory); + minGpuMemory = convertMemoryInput(memory); // Some quick sanity checks to make sure gpu memory hasn't gone // over or under reasonable defaults. - if (minGpu> Dispatcher.GPU_RESERVED_MAX) { + if (minGpuMemory > Dispatcher.MEM_GPU_RESERVED_MAX) { throw new SpecBuilderException("Gpu memory requirements exceed " + "maximum. Are you specifying the correct units?"); } - else if (minGpu < Dispatcher.GPU_RESERVED_MIN) { + else if (minGpuMemory < Dispatcher.MEM_GPU_RESERVED_MIN) { logger.warn(buildableJob.detail.name + "/" + layer.name + "Specified too little gpu memory, defaulting to: " + - Dispatcher.GPU_RESERVED_MIN); - minGpu = Dispatcher.GPU_RESERVED_MIN; + Dispatcher.MEM_GPU_RESERVED_MIN); + minGpuMemory = Dispatcher.MEM_GPU_RESERVED_MIN; } - layer.minimumGpu = minGpu; + layer.minimumGpuMemory = minGpuMemory; } catch (Exception e) { logger.info("Error setting gpu memory for " + buildableJob.detail.name + "/" + layer.name + " failed, reason: " + e + ". Using default."); - layer.minimumGpu = Dispatcher.GPU_RESERVED_DEFAULT; + layer.minimumGpuMemory = Dispatcher.MEM_GPU_RESERVED_DEFAULT; } } @@ -590,14 +617,27 @@ private void determineMinimumCores(Element layerTag, LayerDetail layer) { corePoints = Integer.valueOf(cores); } - if (corePoints < Dispatcher.CORE_POINTS_RESERVED_MIN - || corePoints > Dispatcher.CORE_POINTS_RESERVED_MAX) { + if (corePoints < Dispatcher.CORE_POINTS_RESERVED_MIN) { corePoints = Dispatcher.CORE_POINTS_RESERVED_DEFAULT; } layer.minimumCores = corePoints; } + /** + * Gpu is a int. + * + * If no gpu value is specified, we default to the value of + * Dispatcher.GPU_RESERVED_DEFAULT + */ + private void determineMinimumGpus(Element layerTag, LayerDetail layer) { + + String gpus = layerTag.getChildTextTrim("gpus"); + if (gpus != null) { + layer.minimumGpus = Integer.valueOf(gpus); + } + } + private void determineChunkSize(Element layerTag, LayerDetail layer) { layer.chunkSize = Integer.parseInt(layerTag.getChildTextTrim("chunk")); } @@ -702,7 +742,9 @@ private void determineResourceDefaults(Element layerTag, layer.maximumCores = primaryService.maxCores; layer.minimumCores = primaryService.minCores; layer.minimumMemory = primaryService.minMemory; - layer.minimumGpu = primaryService.minGpu; + layer.maximumGpus = primaryService.maxGpus; + layer.minimumGpus = primaryService.minGpus; + layer.minimumGpuMemory = primaryService.minGpuMemory; layer.tags.addAll(primaryService.tags); layer.services.addAll(services); layer.limits.addAll(limits); @@ -918,6 +960,11 @@ private BuildableJob initPostJob(BuildableJob parent) { job.deptName = parent.detail.deptName; BuildableJob postJob = new BuildableJob(job); + + for (String key : parent.env.keySet()) { + postJob.env.put(key, parent.env.get(key)); + } + return postJob; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java index a242382bb..7bf7db136 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/LocalBookingSupport.java @@ -19,7 +19,8 @@ package com.imageworks.spcue.service; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import com.imageworks.spcue.DispatchHost; import com.imageworks.spcue.FrameInterface; @@ -37,7 +38,7 @@ */ public class LocalBookingSupport { - private static final Logger logger = Logger.getLogger(LocalBookingSupport.class); + private static final Logger logger = LogManager.getLogger(LocalBookingSupport.class); private HostManager hostManager; private LocalDispatcher localDispatcher; diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java b/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java index a1b1ecadf..84049bd61 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/MaintenanceManagerSupport.java @@ -21,7 +21,10 @@ import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.core.env.Environment; import org.springframework.jdbc.CannotGetJdbcConnectionException; import com.imageworks.spcue.FrameInterface; @@ -29,6 +32,7 @@ import com.imageworks.spcue.PointDetail; import com.imageworks.spcue.VirtualProc; import com.imageworks.spcue.dao.FrameDao; +import com.imageworks.spcue.dao.HostDao; import com.imageworks.spcue.dao.MaintenanceDao; import com.imageworks.spcue.dao.ProcDao; import com.imageworks.spcue.dispatcher.DispatchSupport; @@ -39,7 +43,10 @@ public class MaintenanceManagerSupport { - private static final Logger logger = Logger.getLogger(MaintenanceManagerSupport.class); + private static final Logger logger = LogManager.getLogger(MaintenanceManagerSupport.class); + + @Autowired + private Environment env; private MaintenanceDao maintenanceDao; @@ -47,6 +54,8 @@ public class MaintenanceManagerSupport { private FrameDao frameDao; + private HostDao hostDao; + private JobManager jobManager; private DispatchSupport dispatchSupport; @@ -90,6 +99,12 @@ public void checkHardwareState() { int hosts = maintenanceDao.setUpHostsToDown(); if (hosts > 0) { clearDownProcs(); + + boolean autoDeleteDownHosts = env.getProperty( + "maintenance.auto_delete_down_hosts", Boolean.class, false); + if (autoDeleteDownHosts) { + hostDao.deleteDownHosts(); + } } clearOrphanedProcs(); } finally { @@ -193,6 +208,10 @@ public void setFrameDao(FrameDao frameDao) { this.frameDao = frameDao; } + public void setHostDao(HostDao hostDao) { + this.hostDao = hostDao; + } + public DispatchSupport getDispatchSupport() { return dispatchSupport; } diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java index ca57a52e7..d639405ee 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManager.java @@ -81,13 +81,6 @@ public interface OwnerManager { */ DeedEntity takeOwnership(OwnerEntity owner, HostInterface host); - /** - * - * @param deed - * @param value - */ - void setBlackoutTimeEnabled(DeedEntity deed, boolean value); - /** * * @param id @@ -95,14 +88,6 @@ public interface OwnerManager { */ DeedEntity getDeed(String id); - /** - * - * @param deed - * @param startSeconds - * @param stopSeconds - */ - void setBlackoutTime(DeedEntity deed, int startSeconds, int stopSeconds); - /** * Deletes a deed for the specified host. * diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java index 30273968f..d3fb63ee6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/OwnerManagerService.java @@ -70,16 +70,6 @@ public DeedEntity getDeed(String id) { return deedDao.getDeed(id); } - @Override - public void setBlackoutTime(DeedEntity deed, int startSeconds, int stopSeconds) { - deedDao.setBlackoutTime(deed, startSeconds, stopSeconds); - } - - @Override - public void setBlackoutTimeEnabled(DeedEntity deed, boolean value) { - deedDao.updateBlackoutTimeEnabled(deed, value); - } - @Override public DeedEntity takeOwnership(OwnerEntity owner, HostInterface host) { if (!hostDao.isNimbyHost(host)) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java b/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java index 23f164c53..51bf4211d 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/RedirectService.java @@ -20,7 +20,8 @@ import javax.annotation.Resource; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.dao.CannotSerializeTransactionException; import org.springframework.dao.DuplicateKeyException; import org.springframework.transaction.PlatformTransactionManager; @@ -37,7 +38,7 @@ public class RedirectService { private static final Logger logger = - Logger.getLogger(RedirectService.class); + LogManager.getLogger(RedirectService.class); @Resource private PlatformTransactionManager txManager; diff --git a/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java b/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java index 694a58643..5fe08e1f6 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java +++ b/cuebot/src/main/java/com/imageworks/spcue/service/WhiteboardService.java @@ -21,7 +21,8 @@ import java.util.List; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.transaction.annotation.Propagation; import org.springframework.transaction.annotation.Transactional; @@ -111,7 +112,7 @@ public class WhiteboardService implements Whiteboard { @SuppressWarnings("unused") - private static final Logger logger = Logger.getLogger(WhiteboardService.class); + private static final Logger logger = LogManager.getLogger(WhiteboardService.class); private WhiteboardDao whiteboardDao; diff --git a/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java b/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java index 284b6739a..76040b7bd 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java +++ b/cuebot/src/main/java/com/imageworks/spcue/servlet/JobLaunchServlet.java @@ -25,7 +25,8 @@ import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; import org.springframework.web.servlet.FrameworkServlet; import com.imageworks.spcue.BuildableJob; @@ -39,7 +40,7 @@ @SuppressWarnings("serial") public class JobLaunchServlet extends FrameworkServlet { - private static final Logger logger = Logger.getLogger(JobLaunchServlet.class); + private static final Logger logger = LogManager.getLogger(JobLaunchServlet.class); private JobLauncher jobLauncher; diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java index e947815a2..3879914b1 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/CueExceptionUtil.java @@ -23,7 +23,8 @@ import java.io.StringWriter; import java.io.Writer; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; /** * Utility class for handling and logging exceptions @@ -52,7 +53,7 @@ public static String getStackTrace(Throwable aThrowable) { * @return String */ public static void logStackTrace(String msg, Throwable aThrowable) { - Logger error_logger = Logger.getLogger(CueExceptionUtil.class); + Logger error_logger = LogManager.getLogger(CueExceptionUtil.class); error_logger.info("Caught unexpected exception caused by: " + aThrowable); error_logger.info("StackTrace: \n" + getStackTrace(aThrowable)); if (aThrowable.getCause() != null) { diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java index a7d89e7ee..88b325483 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/CueUtil.java @@ -35,6 +35,7 @@ import java.util.regex.Pattern; import javax.activation.DataHandler; import javax.activation.DataSource; +import javax.annotation.PostConstruct; import javax.mail.BodyPart; import javax.mail.Message; import javax.mail.Session; @@ -45,18 +46,27 @@ import javax.mail.internet.MimeMultipart; import javax.mail.util.ByteArrayDataSource; -import org.apache.log4j.Logger; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.LogManager; +import org.springframework.core.env.Environment; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Component; import com.imageworks.spcue.LayerInterface; import com.imageworks.spcue.SpcueRuntimeException; import com.imageworks.spcue.dispatcher.Dispatcher; + /** * CueUtil is set of common methods used throughout the application. */ +@Component public final class CueUtil { - private static final Logger logger = Logger.getLogger(CueUtil.class); + private static final Logger logger = LogManager.getLogger(CueUtil.class); + private static String smtpHost = ""; + @Autowired + private Environment env; /** * Commonly used macros for gigabyte values in KB. @@ -87,6 +97,11 @@ public final class CueUtil { */ public static final int ONE_HOUR = 3600; + @PostConstruct + public void init() { + CueUtil.smtpHost = this.env.getRequiredProperty("smtp_host", String.class); + } + /** * Return true if the given name is formatted as a valid * allocation name. Allocation names should be facility.unique_name. @@ -156,7 +171,7 @@ public static int findChunk(List dependOnFrames, int dependErFrame) { public static void sendmail(String to, String from, String subject, StringBuilder body, Map images) { try { Properties props = System.getProperties(); - props.put("mail.smtp.host", "smtp"); + props.put("mail.smtp.host", CueUtil.smtpHost); Session session = Session.getDefaultInstance(props, null); Message msg = new MimeMessage(session); msg.setFrom(new InternetAddress(from)); @@ -188,6 +203,8 @@ public static void sendmail(String to, String from, String subject, StringBuilde msg.setContent(mimeMultipart); msg.setHeader("X-Mailer", "OpenCueMailer"); msg.setSentDate(new Date()); + Transport transport = session.getTransport("smtp"); + transport.connect(CueUtil.smtpHost, null, null); Transport.send(msg); } catch (Exception e) { @@ -207,11 +224,11 @@ public static final String KbToMb(long kb) { return String.format("%dMB", kb / 1024); } - public static final long convertKbToFakeKb64bit(int Kb) { + public static final long convertKbToFakeKb64bit(long Kb) { return (long) (Math.ceil((Kb * 0.0009765625) * 0.0009765625) * 1048576) - Dispatcher.MEM_RESERVED_SYSTEM; } - public static final long convertKbToFakeKb32bit(int Kb) { + public static final long convertKbToFakeKb32bit(long Kb) { return (long) (Math.floor((Kb * 0.0009765625) * 0.0009765625) * 1048576) - Dispatcher.MEM_RESERVED_SYSTEM; } @@ -235,10 +252,10 @@ public final static String buildFrameName(LayerInterface layer, int num) { return String.format("%04d-%s", num, layer.getName()); } - public final static String buildProcName(String host, int cores) { - return String.format(Locale.ROOT, "%s/%4.2f", host, Convert.coreUnitsToCores(cores)); - + public final static String buildProcName(String host, int cores, int gpus) { + return String.format(Locale.ROOT, "%s/%4.2f/%d", host, Convert.coreUnitsToCores(cores), gpus); } + /** * for logging how long an operation took * @@ -351,5 +368,17 @@ public static List normalizeFrameRange(FrameSet frameSet, int chunkSize return Collections.unmodifiableList( new ArrayList(result)); } -} + /** + * Get "{prefix}.{key}" property int value + * + * @param env + * @param prefix Example "dispatcher.report_queue" + * @param key Example "core_pool_size" + */ + public static int getIntProperty(Environment env, String prefix, String key) + throws IllegalStateException { + Integer value = env.getRequiredProperty(prefix + "." + key, Integer.class); + return value.intValue(); + } +} diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java b/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java index 58713d49d..2e601f72c 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/FrameRange.java @@ -62,7 +62,7 @@ public class FrameRange { * set of frames unique from the first set. This process is repeated until interleaveSize * reaches 1. * - * Example: 1-10:5 == 1, 6, 2, 4, 8, 10, 3, 5, 7, 9. + * Example: 1-10:5 == 1, 6, 3, 5 ,7 ,9, 2, 4, 8, 10. */ public FrameRange(String frameRange) { frameList = parseFrameRange(frameRange); @@ -166,10 +166,9 @@ private static ImmutableList getSteppedRange( private static ImmutableList getInterleavedRange(Integer start, Integer end, Integer step) { validateStepSign(start, end, step); Set interleavedFrames = new LinkedHashSet<>(); - int incrValue = step / abs(step); + while (abs(step) > 0) { interleavedFrames.addAll(getIntRange(start, end, step)); - start += incrValue; step /= 2; } return ImmutableList.copyOf(interleavedFrames); diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java index e7cfe1dff..c223ebbc0 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/JobLogUtil.java @@ -38,9 +38,9 @@ public boolean createJobLogDirectory(String path) { return f.isDirectory(); } - public String getJobLogDir(String show, String shot) { + public String getJobLogDir(String show, String shot, String os) { StringBuilder sb = new StringBuilder(512); - sb.append(getJobLogRootDir()); + sb.append(getJobLogRootDir(os)); sb.append("/"); sb.append(show); sb.append("/"); @@ -51,7 +51,7 @@ public String getJobLogDir(String show, String shot) { public String getJobLogPath(JobDetail job) { StringBuilder sb = new StringBuilder(512); - sb.append(getJobLogDir(job.showName, job.shot)); + sb.append(getJobLogDir(job.showName, job.shot, job.os)); sb.append("/"); sb.append(job.name); sb.append("--"); @@ -59,8 +59,11 @@ public String getJobLogPath(JobDetail job) { return sb.toString(); } - public String getJobLogRootDir() { - return env.getRequiredProperty("log.frame-log-root", String.class); + public String getJobLogRootDir(String os) { + try { + return env.getRequiredProperty(String.format("log.frame-log-root.%s", os), String.class); + } catch (IllegalStateException e) { + return env.getRequiredProperty("log.frame-log-root.default_os", String.class); + } } -} - +} \ No newline at end of file diff --git a/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java b/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java index a7233777e..ddfbabc61 100644 --- a/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java +++ b/cuebot/src/main/java/com/imageworks/spcue/util/SqlUtil.java @@ -19,6 +19,8 @@ package com.imageworks.spcue.util; +import com.google.protobuf.ByteString; + import java.sql.ResultSet; import java.sql.SQLException; import java.util.UUID; @@ -132,5 +134,14 @@ public static String getString(ResultSet rs, int index) throws SQLException { return value; } } + + public static ByteString getByteString(ResultSet rs, String field) throws SQLException { + byte[] data = rs.getBytes(field); + if (rs.wasNull()) { + return ByteString.copyFrom("".getBytes()); + } else { + return ByteString.copyFrom(data); + } + } } diff --git a/cuebot/src/main/resources/conf/ddl/oracle/demo_data.sql b/cuebot/src/main/resources/conf/ddl/oracle/demo_data.sql deleted file mode 100644 index fc97a477f..000000000 --- a/cuebot/src/main/resources/conf/ddl/oracle/demo_data.sql +++ /dev/null @@ -1,83 +0,0 @@ -Insert into SHOW (PK_SHOW,STR_NAME,INT_DEFAULT_MAX_CORES,INT_DEFAULT_MIN_CORES,INT_FRAME_INSERT_COUNT,INT_JOB_INSERT_COUNT,INT_FRAME_SUCCESS_COUNT,INT_FRAME_FAIL_COUNT,B_BOOKING_ENABLED,B_DISPATCH_ENABLED,B_ACTIVE) values ('00000000-0000-0000-0000-000000000000','testing',2000000,100,0,0,0,0,1,1,1) - --- SPLIT HERE! -Insert into SHOW_ALIAS (PK_SHOW_ALIAS,PK_SHOW,STR_NAME) values ('00000000-0000-0000-0000-000000000001','00000000-0000-0000-0000-000000000000','test') - --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0','Lighting',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1','Animation',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA2','Hair',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA3','Cloth',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA4','Layout',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA5','FX',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA6','Pipeline',0) - --- SPLIT HERE! -Insert into FACILITY (PK_FACILITY,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1','local',1) --- SPLIT HERE! -Insert into FACILITY (PK_FACILITY,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0','cloud',0) - --- SPLIT HERE! -Insert into FOLDER (PK_FOLDER,PK_PARENT_FOLDER,PK_SHOW,STR_NAME,B_DEFAULT,PK_DEPT,INT_JOB_MIN_CORES,INT_JOB_MAX_CORES,INT_JOB_PRIORITY,F_ORDER,B_EXCLUDE_MANAGED) values ('A0000000-0000-0000-0000-000000000000',null,'00000000-0000-0000-0000-000000000000','testing',1,'AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA6',-1,-1,-1,1,0) - --- SPLIT HERE! -Insert into POINT (PK_POINT,PK_DEPT,PK_SHOW,STR_TI_TASK,INT_CORES,B_MANAGED,INT_MIN_CORES,FLOAT_TIER) values ('FFEEDDCC-AAAA-AAAA-AAAA-AAAAAAAAAAA0','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA6','00000000-0000-0000-0000-000000000000',null,0,0,0,0) - --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000000','local.general',0,0,'general','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000001','local.desktop',0,0,'desktop','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000002','local.unassigned',0,1,'unassigned','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000003','gcp.cloud',1,0,'cloud','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000004','gcp.unassigned',1,0,'unassigned','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0',0,1) - --- SPLIT HERE! -Insert into SUBSCRIPTION (PK_SUBSCRIPTION,PK_ALLOC,PK_SHOW,INT_SIZE,INT_BURST,INT_CORES,FLOAT_TIER) values ('00000000-0000-0000-0000-000000000001','00000000-0000-0000-0000-000000000000','00000000-0000-0000-0000-000000000000',100000,100000,0,0) --- SPLIT HERE! -Insert into SUBSCRIPTION (PK_SUBSCRIPTION,PK_ALLOC,PK_SHOW,INT_SIZE,INT_BURST,INT_CORES,FLOAT_TIER) values ('00000000-0000-0000-0000-000000000002','00000000-0000-0000-0000-000000000001','00000000-0000-0000-0000-000000000000',100000,100000,0,0) --- SPLIT HERE! -Insert into SUBSCRIPTION (PK_SUBSCRIPTION,PK_ALLOC,PK_SHOW,INT_SIZE,INT_BURST,INT_CORES,FLOAT_TIER) values ('00000000-0000-0000-0000-000000000003','00000000-0000-0000-0000-000000000003','00000000-0000-0000-0000-000000000000',100000,100000,0,0) - --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0','default',0,100,3355443,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1','prman',0,100,3355443,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA2','arnold',1,100,3355443,'general | desktop | cloud') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA3','shell',0,100,3355443,'general | util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA4','maya',0,100,2097152,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA5','houdini',0,100,3355443,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA7','katana',1,100,2097152,'general | desktop | util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA8','shake',0,100,2097152,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA9','nuke',0,100,2097152,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAA11','preprocess',0,10,393216,'util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAA12','postprocess',0,10,524288,'util') - --- SPLIT HERE! -Insert into CONFIG (PK_CONFIG,STR_KEY,INT_VALUE,LONG_VALUE,STR_VALUE,B_VALUE) values ('00000000-0000-0000-0000-000000000005','MAX_FRAME_RETRIES',16,0,null,0) - --- SPLIT HERE! -Insert into TASK_LOCK (PK_TASK_LOCK,STR_NAME,INT_LOCK,INT_TIMEOUT) values ('00000000-0000-0000-0000-000000000002','LOCK_HARDWARE_STATE_CHECK',0,30) --- SPLIT HERE! -Insert into TASK_LOCK (PK_TASK_LOCK,STR_NAME,INT_LOCK,INT_TIMEOUT) values ('00000000-0000-0000-0000-000000000001','LOCK_HISTORICAL_TRANSFER',0,3600) --- SPLIT HERE! -Insert into TASK_LOCK (PK_TASK_LOCK,STR_NAME,INT_LOCK,INT_TIMEOUT) values ('00000000-0000-0000-0000-000000000003','LOCK_ORPHANED_PROC_CHECK',0,30) --- SPLIT HERE! -Insert into TASK_LOCK (PK_TASK_LOCK,STR_NAME,INT_LOCK,INT_TIMEOUT) values ('00000000-0000-0000-0000-000000000005','LOCK_TASK_UPDATE',1240618998852,3600) diff --git a/cuebot/src/main/resources/conf/ddl/oracle/migrations/.keep b/cuebot/src/main/resources/conf/ddl/oracle/migrations/.keep deleted file mode 100644 index e69de29bb..000000000 diff --git a/cuebot/src/main/resources/conf/ddl/oracle/migrations/V2__Add_limit_table.sql b/cuebot/src/main/resources/conf/ddl/oracle/migrations/V2__Add_limit_table.sql deleted file mode 100644 index ee0e6d5ad..000000000 --- a/cuebot/src/main/resources/conf/ddl/oracle/migrations/V2__Add_limit_table.sql +++ /dev/null @@ -1,19 +0,0 @@ - -CREATE TABLE "LIMIT_RECORD" ( - "PK_LIMIT_RECORD" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(255 BYTE) NOT NULL, - "INT_MAX_VALUE" NUMBER(38,0) DEFAULT 0 NOT NULL, - "B_HOST_LIMIT" NUMBER(1,0) DEFAULT 0 NOT NULL -); - -CREATE TABLE "LAYER_LIMIT" ( - "PK_LAYER_LIMIT" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "PK_LIMIT_RECORD" VARCHAR2(36 BYTE) NOT NULL -); - -CREATE TRIGGER "BEFORE_DELETE_LAYER_DROP_LIMIT" BEFORE DELETE ON layer -FOR EACH ROW -BEGIN - DELETE FROM layer_limit WHERE pk_layer=:old.pk_layer; -END; diff --git a/cuebot/src/main/resources/conf/ddl/oracle/schema.sql b/cuebot/src/main/resources/conf/ddl/oracle/schema.sql deleted file mode 100644 index dde36df9e..000000000 --- a/cuebot/src/main/resources/conf/ddl/oracle/schema.sql +++ /dev/null @@ -1,2737 +0,0 @@ -CREATE TABLE "HISTORY_PERIOD_BAK" - ( "PK" VARCHAR2(32 BYTE), - "DT_BEGIN" DATE NOT NULL, - "DT_END" DATE NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "FRAME_HISTORY" - ( "PK_FRAME_HISTORY" RAW(16) DEFAULT sys_guid() NOT NULL, - "PK_FRAME" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(256 BYTE) NOT NULL, - "STR_STATE" VARCHAR2(24 BYTE) NOT NULL, - "INT_MEM_RESERVED" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MEM_MAX_USED" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_CORES" NUMBER(16,0) DEFAULT 100 NOT NULL, - "STR_HOST" VARCHAR2(64 BYTE) DEFAULT NULL, - "INT_EXIT_STATUS" NUMBER(8,0) DEFAULT -1 NOT NULL, - "PK_ALLOC" VARCHAR2(36 BYTE), - "INT_TS_STARTED" NUMBER(12,0) NOT NULL, - "INT_TS_STOPPED" NUMBER(12,0) DEFAULT 0 NOT NULL, - "INT_CHECKPOINT_COUNT" NUMBER(6,0) DEFAULT 0 NOT NULL, - "DT_LAST_MODIFIED" DATE NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "HISTORY_PERIOD" - ( "PK" VARCHAR2(32 BYTE) DEFAULT sys_guid(), - "DT_BEGIN" DATE DEFAULT to_date('01-JAN-2000','DD-MON-YYYY') NOT NULL, - "DT_END" DATE DEFAULT sysdate NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "DUPLICATE_CURSORS" - ( "DT_RECORDED" DATE, - "INST_ID" NUMBER, - "LNG_COUNT" NUMBER - ) --- SPLIT HERE! -CREATE TABLE "UNCOMMITTED_TRANSACTIONS_BAK" - ( "INST_ID" NUMBER, - "SID" NUMBER, - "SERIAL#" NUMBER, - "USERNAME" VARCHAR2(30 BYTE), - "MACHINE" VARCHAR2(64 BYTE), - "MODULE" VARCHAR2(48 BYTE), - "SERVICE_NAME" VARCHAR2(64 BYTE), - "DURATION" NUMBER, - "DT_RECORDED" DATE - ) --- SPLIT HERE! -CREATE TABLE "UNCOMMITTED_TRANSACTIONS" - ( "INST_ID" NUMBER, - "SID" NUMBER, - "SERIAL#" NUMBER, - "USERNAME" VARCHAR2(30 BYTE), - "MACHINE" VARCHAR2(64 BYTE), - "MODULE" VARCHAR2(48 BYTE), - "SERVICE_NAME" VARCHAR2(64 BYTE), - "DURATION" NUMBER, - "DT_RECORDED" DATE DEFAULT sysdate - ) --- SPLIT HERE! -CREATE TABLE "TEST" - ( "COL1" VARCHAR2(32 BYTE) - ) --- SPLIT HERE! -CREATE TABLE "LAYER_OUTPUT" - ( "PK_LAYER_OUTPUT" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "STR_FILESPEC" VARCHAR2(2048 BYTE) NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "SHOW_SERVICE" - ( "PK_SHOW_SERVICE" VARCHAR2(36 BYTE) NOT NULL, - "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(36 BYTE) NOT NULL, - "B_THREADABLE" NUMBER(1,0) NOT NULL, - "INT_CORES_MIN" NUMBER(8,0) NOT NULL, - "INT_MEM_MIN" NUMBER(16,0) NOT NULL, - "STR_TAGS" VARCHAR2(128 BYTE) NOT NULL, - "INT_CORES_MAX" NUMBER(10,0) DEFAULT 0 NOT NULL, - "INT_GPU_MIN" NUMBER(10,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "SQLN_EXPLAIN_PLAN" - ( "STATEMENT_ID" VARCHAR2(30 BYTE), - "TIMESTAMP" DATE, - "REMARKS" VARCHAR2(80 BYTE), - "OPERATION" VARCHAR2(30 BYTE), - "OPTIONS" VARCHAR2(30 BYTE), - "OBJECT_NODE" VARCHAR2(128 BYTE), - "OBJECT_OWNER" VARCHAR2(30 BYTE), - "OBJECT_NAME" VARCHAR2(30 BYTE), - "OBJECT_INSTANCE" NUMBER(*,0), - "OBJECT_TYPE" VARCHAR2(30 BYTE), - "OPTIMIZER" VARCHAR2(255 BYTE), - "SEARCH_COLUMNS" NUMBER(*,0), - "ID" NUMBER(*,0), - "PARENT_ID" NUMBER(*,0), - "POSITION" NUMBER(*,0), - "COST" NUMBER(*,0), - "CARDINALITY" NUMBER(*,0), - "BYTES" NUMBER(*,0), - "OTHER_TAG" VARCHAR2(255 BYTE), - "PARTITION_START" VARCHAR2(255 BYTE), - "PARTITION_STOP" VARCHAR2(255 BYTE), - "PARTITION_ID" NUMBER(*,0), - "OTHER" LONG, - "DISTRIBUTION" VARCHAR2(30 BYTE) - ) --- SPLIT HERE! -CREATE TABLE "DEED" - ( "PK_DEED" VARCHAR2(36 BYTE) NOT NULL, - "PK_OWNER" VARCHAR2(36 BYTE) NOT NULL, - "PK_HOST" VARCHAR2(36 BYTE) NOT NULL, - "B_BLACKOUT" NUMBER(1,0) DEFAULT 0 NOT NULL, - "INT_BLACKOUT_START" NUMBER(12,0), - "INT_BLACKOUT_STOP" NUMBER(12,0) - ) --- SPLIT HERE! -CREATE TABLE "OWNER" - ( "PK_OWNER" VARCHAR2(36 BYTE) NOT NULL, - "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "STR_USERNAME" VARCHAR2(64 BYTE) NOT NULL, - "TS_CREATED" TIMESTAMP (6) WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL, - "TS_UPDATED" TIMESTAMP (6) WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "HOST_LOCAL" - ( "PK_HOST_LOCAL" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE), - "PK_FRAME" VARCHAR2(36 BYTE), - "PK_HOST" VARCHAR2(36 BYTE) NOT NULL, - "TS_CREATED" TIMESTAMP (6) WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL, - "TS_UPDATED" TIMESTAMP (6) WITH TIME ZONE, - "INT_MEM_MAX" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MEM_IDLE" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_CORES_MAX" NUMBER(16,0) DEFAULT 100 NOT NULL, - "INT_CORES_IDLE" NUMBER(16,0) DEFAULT 100 NOT NULL, - "INT_THREADS" NUMBER(4,0) DEFAULT 1 NOT NULL, - "FLOAT_TIER" NUMBER(16,2) DEFAULT 0 NOT NULL, - "B_ACTIVE" NUMBER(1,0) DEFAULT 1 NOT NULL, - "STR_TYPE" VARCHAR2(36 BYTE) NOT NULL, - "INT_GPU_IDLE" NUMBER(10,0) DEFAULT 0 NOT NULL, - "INT_GPU_MAX" NUMBER(10,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "SERVICE" - ( "PK_SERVICE" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(36 BYTE) NOT NULL, - "B_THREADABLE" NUMBER(1,0) NOT NULL, - "INT_CORES_MIN" NUMBER(8,0) NOT NULL, - "INT_MEM_MIN" NUMBER(16,0) NOT NULL, - "STR_TAGS" VARCHAR2(128 BYTE) NOT NULL, - "INT_CORES_MAX" NUMBER(10,0) DEFAULT 0 NOT NULL, - "INT_GPU_MIN" NUMBER(10,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "JOB_LOCAL" - ( "PK_JOB_LOCAL" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "PK_HOST" VARCHAR2(36 BYTE) NOT NULL, - "STR_SOURCE" VARCHAR2(255 BYTE) NOT NULL, - "TS_CREATED" TIMESTAMP (6) WITH TIME ZONE DEFAULT SYSTIMESTAMP NOT NULL, - "INT_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MAX_CORES" NUMBER(16,0) NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "TASK" - ( "PK_TASK" VARCHAR2(36 BYTE) NOT NULL, - "PK_POINT" VARCHAR2(36 BYTE) NOT NULL, - "STR_SHOT" VARCHAR2(36 BYTE) NOT NULL, - "INT_MIN_CORES" NUMBER(16,0) DEFAULT 100 NOT NULL, - "INT_ADJUST_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "POINT" - ( "PK_POINT" VARCHAR2(36 BYTE) NOT NULL, - "PK_DEPT" VARCHAR2(36 BYTE) NOT NULL, - "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "STR_TI_TASK" VARCHAR2(36 BYTE), - "INT_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL, - "B_MANAGED" NUMBER(1,0) DEFAULT 0 NOT NULL, - "INT_MIN_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL, - "FLOAT_TIER" NUMBER(16,2) DEFAULT 0 NOT NULL, - "TS_UPDATED" TIMESTAMP (6) WITH TIME ZONE DEFAULT systimestamp NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "LAYER_MEM" - ( "PK_LAYER_MEM" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "INT_MAX_RSS" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MAX_VSS" NUMBER(16,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "JOB_MEM" - ( "PK_JOB_MEM" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "INT_MAX_RSS" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MAX_VSS" NUMBER(16,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "FOLDER_RESOURCE" - ( "PK_FOLDER_RESOURCE" VARCHAR2(36 BYTE) NOT NULL, - "PK_FOLDER" VARCHAR2(36 BYTE) NOT NULL, - "INT_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MAX_CORES" NUMBER(16,0) DEFAULT -1 NOT NULL, - "INT_MIN_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL, - "FLOAT_TIER" NUMBER(16,2) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "SHOW_ALIAS" - ( "PK_SHOW_ALIAS" VARCHAR2(36 BYTE) NOT NULL, - "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(16 BYTE) NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "DEPT" - ( "PK_DEPT" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(36 BYTE) NOT NULL, - "B_DEFAULT" NUMBER(1,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "FACILITY" - ( "PK_FACILITY" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(36 BYTE) NOT NULL, - "B_DEFAULT" NUMBER(1,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "JOB_POST" - ( "PK_JOB_POST" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "PK_POST_JOB" VARCHAR2(36 BYTE) NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "LAYER_HISTORY" - ( "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(512 BYTE) NOT NULL, - "STR_TYPE" VARCHAR2(16 BYTE) NOT NULL, - "INT_CORES_MIN" NUMBER(38,0) DEFAULT 100 NOT NULL, - "INT_MEM_MIN" NUMBER(38,0) DEFAULT 4194304 NOT NULL, - "INT_CORE_TIME_SUCCESS" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_CORE_TIME_FAIL" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_FRAME_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_LAYER_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_WAITING_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_DEAD_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_DEPEND_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_EATEN_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_SUCCEEDED_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_RUNNING_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MAX_RSS" NUMBER(38,0) DEFAULT 0 NOT NULL, - "B_ARCHIVED" NUMBER(1,0) DEFAULT 0 NOT NULL, - "DT_LAST_MODIFIED" DATE NOT NULL, - "STR_SERVICES" VARCHAR2(128) - ) --- SPLIT HERE! -CREATE TABLE "JOB_HISTORY" - ( "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(512 BYTE) NOT NULL, - "STR_SHOT" VARCHAR2(64 BYTE) NOT NULL, - "STR_USER" VARCHAR2(36 BYTE) NOT NULL, - "INT_CORE_TIME_SUCCESS" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_CORE_TIME_FAIL" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_FRAME_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_LAYER_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_WAITING_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_DEAD_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_DEPEND_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_EATEN_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_SUCCEEDED_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_RUNNING_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MAX_RSS" NUMBER(38,0) DEFAULT 0 NOT NULL, - "B_ARCHIVED" NUMBER(1,0) DEFAULT 0 NOT NULL, - "PK_FACILITY" VARCHAR2(36 BYTE) NOT NULL, - "PK_DEPT" VARCHAR2(36 BYTE) NOT NULL, - "INT_TS_STARTED" NUMBER(12,0) NOT NULL, - "INT_TS_STOPPED" NUMBER(12,0) DEFAULT 0 NOT NULL, - "DT_LAST_MODIFIED" DATE NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "TASK_LOCK" - ( "PK_TASK_LOCK" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(36 BYTE) NOT NULL, - "INT_LOCK" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_TIMEOUT" NUMBER(38,0) DEFAULT 30 NOT NULL, - "TS_LASTRUN" TIMESTAMP (6) DEFAULT systimestamp NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "MATTHEW_STATS_TAB" - ( "STATID" VARCHAR2(30 BYTE), - "TYPE" CHAR(1 BYTE), - "VERSION" NUMBER, - "FLAGS" NUMBER, - "C1" VARCHAR2(30 BYTE), - "C2" VARCHAR2(30 BYTE), - "C3" VARCHAR2(30 BYTE), - "C4" VARCHAR2(30 BYTE), - "C5" VARCHAR2(30 BYTE), - "N1" NUMBER, - "N2" NUMBER, - "N3" NUMBER, - "N4" NUMBER, - "N5" NUMBER, - "N6" NUMBER, - "N7" NUMBER, - "N8" NUMBER, - "N9" NUMBER, - "N10" NUMBER, - "N11" NUMBER, - "N12" NUMBER, - "D1" DATE, - "R1" RAW(32), - "R2" RAW(32), - "CH1" VARCHAR2(1000 BYTE) - ) --- SPLIT HERE! -CREATE TABLE "HOST_TAG" - ( "PK_HOST_TAG" VARCHAR2(36 BYTE) NOT NULL, - "PK_HOST" VARCHAR2(36 BYTE) NOT NULL, - "STR_TAG" VARCHAR2(36 BYTE) NOT NULL, - "STR_TAG_TYPE" VARCHAR2(24 BYTE) DEFAULT 'Hardware' NOT NULL, - "B_CONSTANT" NUMBER(1,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "JOB_USAGE" - ( "PK_JOB_USAGE" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "INT_CORE_TIME_SUCCESS" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_CORE_TIME_FAIL" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_FRAME_SUCCESS_COUNT" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_FRAME_FAIL_COUNT" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_CLOCK_TIME_FAIL" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_CLOCK_TIME_HIGH" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_CLOCK_TIME_SUCCESS" NUMBER(16,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "JOB_RESOURCE" - ( "PK_JOB_RESOURCE" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "INT_CORES" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MAX_RSS" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MAX_VSS" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MIN_CORES" NUMBER(16,0) DEFAULT 100 NOT NULL, - "INT_MAX_CORES" NUMBER(16,0) DEFAULT 10000 NOT NULL, - "FLOAT_TIER" NUMBER(16,2) DEFAULT 0 NOT NULL, - "INT_PRIORITY" NUMBER(16,0) DEFAULT 1 NOT NULL, - "INT_LOCAL_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "JOB_STAT" - ( "PK_JOB_STAT" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "INT_WAITING_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_RUNNING_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_DEAD_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_DEPEND_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_EATEN_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_SUCCEEDED_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_CHECKPOINT_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "SUBSCRIPTION" - ( "PK_SUBSCRIPTION" VARCHAR2(36 BYTE) NOT NULL, - "PK_ALLOC" VARCHAR2(36 BYTE) NOT NULL, - "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "INT_SIZE" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_BURST" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL, - "FLOAT_TIER" NUMBER(16,2) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "SHOW" - ( "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(36 BYTE) NOT NULL, - "B_PAUSED" NUMBER(1,0) DEFAULT 0 NOT NULL, - "INT_DEFAULT_MIN_CORES" NUMBER(16,0) DEFAULT 100 NOT NULL, - "INT_DEFAULT_MAX_CORES" NUMBER(16,0) DEFAULT 10000 NOT NULL, - "INT_FRAME_INSERT_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_JOB_INSERT_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_FRAME_SUCCESS_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_FRAME_FAIL_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "B_BOOKING_ENABLED" NUMBER(1,0) DEFAULT 1 NOT NULL, - "B_DISPATCH_ENABLED" NUMBER(1,0) DEFAULT 1 NOT NULL, - "B_ACTIVE" NUMBER(1,0) DEFAULT 1 NOT NULL, - "STR_COMMENT_EMAIL" VARCHAR2(1024 BYTE) - ) --- SPLIT HERE! -CREATE TABLE "PROC" - ( "PK_PROC" VARCHAR2(36 BYTE) NOT NULL, - "PK_HOST" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE), - "PK_SHOW" VARCHAR2(36 BYTE), - "PK_LAYER" VARCHAR2(36 BYTE), - "PK_FRAME" VARCHAR2(36 BYTE), - "INT_CORES_RESERVED" NUMBER(38,0) NOT NULL, - "INT_MEM_RESERVED" NUMBER(38,0) NOT NULL, - "INT_MEM_USED" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MEM_MAX_USED" NUMBER(38,0) DEFAULT 0 NOT NULL, - "B_UNBOOKED" NUMBER(1,0) DEFAULT 0 NOT NULL, - "INT_MEM_PRE_RESERVED" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_VIRT_USED" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_VIRT_MAX_USED" NUMBER(16,0) DEFAULT 0 NOT NULL, - "STR_REDIRECT" VARCHAR2(265 BYTE), - "B_LOCAL" NUMBER(1,0) DEFAULT 0 NOT NULL, - "TS_PING" TIMESTAMP (6) WITH TIME ZONE DEFAULT systimestamp NOT NULL, - "TS_BOOKED" TIMESTAMP (6) WITH TIME ZONE DEFAULT systimestamp NOT NULL, - "TS_DISPATCHED" TIMESTAMP (6) WITH TIME ZONE DEFAULT systimestamp NOT NULL, - "INT_GPU_RESERVED" NUMBER(10,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "MATCHER" - ( "PK_MATCHER" VARCHAR2(36 BYTE) NOT NULL, - "PK_FILTER" VARCHAR2(36 BYTE) NOT NULL, - "STR_SUBJECT" VARCHAR2(64 BYTE) NOT NULL, - "STR_MATCH" VARCHAR2(64 BYTE) NOT NULL, - "STR_VALUE" VARCHAR2(4000 BYTE) NOT NULL, - "TS_CREATED" TIMESTAMP (6) DEFAULT systimestamp NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "LAYER_USAGE" - ( "PK_LAYER_USAGE" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "INT_CORE_TIME_SUCCESS" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_CORE_TIME_FAIL" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_FRAME_SUCCESS_COUNT" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_FRAME_FAIL_COUNT" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_CLOCK_TIME_FAIL" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_CLOCK_TIME_HIGH" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_CLOCK_TIME_LOW" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_CLOCK_TIME_SUCCESS" NUMBER(16,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "LAYER_STAT" - ( "PK_LAYER_STAT" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "INT_TOTAL_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_WAITING_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_RUNNING_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_DEAD_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_DEPEND_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_EATEN_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_SUCCEEDED_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_CHECKPOINT_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "LAYER_RESOURCE" - ( "PK_LAYER_RESOURCE" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "INT_CORES" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MAX_RSS" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MAX_VSS" NUMBER(16,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "LAYER_ENV" - ( "PK_LAYER_ENV" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE), - "PK_JOB" VARCHAR2(36 BYTE), - "STR_KEY" VARCHAR2(36 BYTE), - "STR_VALUE" VARCHAR2(2048 BYTE) - ) --- SPLIT HERE! -CREATE TABLE "LAYER" - ( "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(256 BYTE) NOT NULL, - "STR_CMD" VARCHAR2(4000 BYTE) NOT NULL, - "STR_RANGE" VARCHAR2(4000 BYTE) NOT NULL, - "INT_CHUNK_SIZE" NUMBER(38,0) DEFAULT 1 NOT NULL, - "INT_DISPATCH_ORDER" NUMBER(38,0) DEFAULT 1 NOT NULL, - "INT_CORES_MIN" NUMBER(38,0) DEFAULT 100 NOT NULL, - "INT_MEM_MIN" NUMBER(38,0) DEFAULT 4194304 NOT NULL, - "STR_TAGS" VARCHAR2(4000 BYTE) DEFAULT '' NOT NULL, - "STR_TYPE" VARCHAR2(16 BYTE) NOT NULL, - "B_THREADABLE" NUMBER(1,0) DEFAULT 1 NOT NULL, - "STR_SERVICES" VARCHAR2(128 BYTE) DEFAULT 'default' NOT NULL, - "B_OPTIMIZE" NUMBER(1,0) DEFAULT 1 NOT NULL, - "INT_CORES_MAX" NUMBER(10,0) DEFAULT 0 NOT NULL, - "INT_GPU_MIN" NUMBER(10,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "JOB_ENV" - ( "PK_JOB_ENV" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE), - "STR_KEY" VARCHAR2(36 BYTE), - "STR_VALUE" VARCHAR2(2048 BYTE) - ) --- SPLIT HERE! -CREATE TABLE "JOB" - ( "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "PK_FOLDER" VARCHAR2(36 BYTE) NOT NULL, - "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(255 BYTE) NOT NULL, - "STR_VISIBLE_NAME" VARCHAR2(255 BYTE), - "STR_SHOT" VARCHAR2(64 BYTE) NOT NULL, - "STR_USER" VARCHAR2(32 BYTE) NOT NULL, - "STR_STATE" VARCHAR2(16 BYTE) NOT NULL, - "STR_LOG_DIR" VARCHAR2(4000 BYTE) DEFAULT '' NOT NULL, - "INT_UID" NUMBER(38,0) DEFAULT 0 NOT NULL, - "B_PAUSED" NUMBER(1,0) DEFAULT 0 NOT NULL, - "B_AUTOEAT" NUMBER(1,0) DEFAULT 0 NOT NULL, - "INT_FRAME_COUNT" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_LAYER_COUNT" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MAX_RETRIES" NUMBER(4,0) DEFAULT 3 NOT NULL, - "B_AUTO_BOOK" NUMBER(1,0) DEFAULT 1 NOT NULL, - "B_AUTO_UNBOOK" NUMBER(1,0) DEFAULT 1 NOT NULL, - "B_COMMENT" NUMBER(1,0) DEFAULT 0 NOT NULL, - "STR_EMAIL" VARCHAR2(256 BYTE), - "PK_FACILITY" VARCHAR2(36 BYTE) NOT NULL, - "PK_DEPT" VARCHAR2(36 BYTE) NOT NULL, - "TS_STARTED" TIMESTAMP (6) WITH TIME ZONE DEFAULT systimestamp NOT NULL, - "TS_STOPPED" TIMESTAMP (6) WITH TIME ZONE, - "INT_MIN_CORES" NUMBER(16,0) DEFAULT 100 NOT NULL, - "INT_MAX_CORES" NUMBER(16,0) DEFAULT 20000 NOT NULL, - "STR_SHOW" VARCHAR2(32 BYTE) DEFAULT 'none' NOT NULL, - "TS_UPDATED" TIMESTAMP (6) WITH TIME ZONE DEFAULT systimestamp NOT NULL, - "STR_OS" VARCHAR2(12 BYTE) DEFAULT 'rhel40' NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "HOST_STAT" - ( "PK_HOST_STAT" VARCHAR2(36 BYTE) NOT NULL, - "PK_HOST" VARCHAR2(36 BYTE) NOT NULL, - "INT_MEM_TOTAL" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MEM_FREE" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_SWAP_TOTAL" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_SWAP_FREE" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MCP_TOTAL" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MCP_FREE" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_LOAD" NUMBER(38,0) DEFAULT 0 NOT NULL, - "TS_PING" TIMESTAMP (6) WITH TIME ZONE DEFAULT systimestamp NOT NULL, - "TS_BOOTED" TIMESTAMP (6) WITH TIME ZONE DEFAULT systimestamp NOT NULL, - "STR_STATE" VARCHAR2(32 BYTE) DEFAULT 'Up' NOT NULL, - "STR_OS" VARCHAR2(12 BYTE) DEFAULT 'rhel40' NOT NULL, - "INT_GPU_TOTAL" NUMBER(10,0) DEFAULT 0 NOT NULL, - "INT_GPU_FREE" NUMBER(10,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "HOST" - ( "PK_HOST" VARCHAR2(36 BYTE) NOT NULL, - "PK_ALLOC" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(30 BYTE) NOT NULL, - "STR_LOCK_STATE" VARCHAR2(36 BYTE) NOT NULL, - "B_NIMBY" NUMBER(1,0) DEFAULT 0 NOT NULL, - "TS_CREATED" TIMESTAMP (6) DEFAULT systimestamp NOT NULL, - "INT_CORES" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_PROCS" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_CORES_IDLE" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MEM" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MEM_IDLE" NUMBER(38,0) DEFAULT 0 NOT NULL, - "B_UNLOCK_BOOT" NUMBER(1,0) DEFAULT 0 NOT NULL, - "B_UNLOCK_IDLE" NUMBER(1,0) DEFAULT 0 NOT NULL, - "B_REBOOT_IDLE" NUMBER(1,0) DEFAULT 0 NOT NULL, - "STR_TAGS" VARCHAR2(128 BYTE), - "STR_FQDN" VARCHAR2(128 BYTE), - "B_COMMENT" NUMBER(1,0) DEFAULT 0 NOT NULL, - "INT_THREAD_MODE" NUMBER(1,0) DEFAULT 0 NOT NULL, - "STR_LOCK_SOURCE" VARCHAR2(128 BYTE), - "INT_GPU" NUMBER(10,0) DEFAULT 0 NOT NULL, - "INT_GPU_IDLE" NUMBER(10,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "FRAME" - ( "PK_FRAME" VARCHAR2(36 BYTE) NOT NULL, - "PK_LAYER" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(256 BYTE) NOT NULL, - "STR_STATE" VARCHAR2(24 BYTE) NOT NULL, - "INT_NUMBER" NUMBER(38,0) NOT NULL, - "INT_DEPEND_COUNT" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_EXIT_STATUS" NUMBER(38,0) DEFAULT -1 NOT NULL, - "INT_RETRIES" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MEM_RESERVED" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MEM_MAX_USED" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_MEM_USED" NUMBER(38,0) DEFAULT 0 NOT NULL, - "INT_DISPATCH_ORDER" NUMBER(38,0) DEFAULT 0 NOT NULL, - "STR_HOST" VARCHAR2(256 BYTE), - "INT_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_LAYER_ORDER" NUMBER(16,0) NOT NULL, - "TS_STARTED" TIMESTAMP (6) WITH TIME ZONE, - "TS_STOPPED" TIMESTAMP (6) WITH TIME ZONE, - "TS_LAST_RUN" TIMESTAMP (6) WITH TIME ZONE, - "TS_UPDATED" TIMESTAMP (6) WITH TIME ZONE, - "INT_VERSION" NUMBER(16,0) DEFAULT 0, - "STR_CHECKPOINT_STATE" VARCHAR2(12 BYTE) DEFAULT 'Disabled' NOT NULL, - "INT_CHECKPOINT_COUNT" NUMBER(6,0) DEFAULT 0 NOT NULL, - "INT_GPU_RESERVED" NUMBER(10,0) DEFAULT 0 NOT NULL, - "INT_TOTAL_PAST_CORE_TIME" NUMBER(16,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "FOLDER_LEVEL" - ( "PK_FOLDER_LEVEL" VARCHAR2(36 BYTE) NOT NULL, - "PK_FOLDER" VARCHAR2(36 BYTE) NOT NULL, - "INT_LEVEL" NUMBER(38,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "FOLDER" - ( "PK_FOLDER" VARCHAR2(36 BYTE) NOT NULL, - "PK_PARENT_FOLDER" VARCHAR2(36 BYTE), - "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(36 BYTE) NOT NULL, - "INT_PRIORITY" NUMBER(38,0) DEFAULT 1 NOT NULL, - "B_DEFAULT" NUMBER(1,0) DEFAULT 0 NOT NULL, - "PK_DEPT" VARCHAR2(36 BYTE) NOT NULL, - "INT_JOB_MIN_CORES" NUMBER(16,0) DEFAULT -1 NOT NULL, - "INT_JOB_MAX_CORES" NUMBER(16,0) DEFAULT -1 NOT NULL, - "INT_JOB_PRIORITY" NUMBER(16,0) DEFAULT -1 NOT NULL, - "INT_MIN_CORES" NUMBER(16,0) DEFAULT 0 NOT NULL, - "INT_MAX_CORES" NUMBER(16,0) DEFAULT -1 NOT NULL, - "B_EXCLUDE_MANAGED" NUMBER(1,0) DEFAULT 0 NOT NULL, - "F_ORDER" NUMBER(16,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "FILTER" - ( "PK_FILTER" VARCHAR2(36 BYTE) NOT NULL, - "PK_SHOW" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(128 BYTE) NOT NULL, - "STR_TYPE" VARCHAR2(16 BYTE) NOT NULL, - "F_ORDER" NUMBER(6,2) DEFAULT 0.0 NOT NULL, - "B_ENABLED" NUMBER(1,0) DEFAULT 1 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "DEPEND" - ( "PK_DEPEND" VARCHAR2(36 BYTE) NOT NULL, - "PK_PARENT" VARCHAR2(36 BYTE), - "PK_JOB_DEPEND_ON" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB_DEPEND_ER" VARCHAR2(36 BYTE) NOT NULL, - "PK_FRAME_DEPEND_ON" VARCHAR2(36 BYTE), - "PK_FRAME_DEPEND_ER" VARCHAR2(36 BYTE), - "PK_LAYER_DEPEND_ON" VARCHAR2(36 BYTE), - "PK_LAYER_DEPEND_ER" VARCHAR2(36 BYTE), - "STR_TYPE" VARCHAR2(36 BYTE) NOT NULL, - "B_ACTIVE" NUMBER(1,0) DEFAULT 1 NOT NULL, - "B_ANY" NUMBER(1,0) DEFAULT 0 NOT NULL, - "TS_CREATED" TIMESTAMP (6) DEFAULT systimestamp NOT NULL, - "TS_SATISFIED" TIMESTAMP (6), - "STR_TARGET" VARCHAR2(20 BYTE) DEFAULT 'Internal' NOT NULL, - "STR_SIGNATURE" VARCHAR2(36 BYTE) NOT NULL, - "B_COMPOSITE" NUMBER(1,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "CONFIG" - ( "PK_CONFIG" VARCHAR2(36 BYTE) NOT NULL, - "STR_KEY" VARCHAR2(36 BYTE) NOT NULL, - "INT_VALUE" NUMBER(38,0) DEFAULT 0, - "LONG_VALUE" NUMBER(38,0) DEFAULT 0, - "STR_VALUE" VARCHAR2(255 BYTE) DEFAULT '', - "B_VALUE" NUMBER(1,0) DEFAULT 0 - ) --- SPLIT HERE! -CREATE TABLE "COMMENTS" - ( "PK_COMMENT" VARCHAR2(36 BYTE) NOT NULL, - "PK_JOB" VARCHAR2(36 BYTE), - "PK_HOST" VARCHAR2(36 BYTE), - "TS_CREATED" TIMESTAMP (6) DEFAULT systimestamp NOT NULL, - "STR_USER" VARCHAR2(36 BYTE) NOT NULL, - "STR_SUBJECT" VARCHAR2(128 BYTE) NOT NULL, - "STR_MESSAGE" VARCHAR2(4000 BYTE) NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "ALLOC" - ( "PK_ALLOC" VARCHAR2(36 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(36 BYTE) NOT NULL, - "B_ALLOW_EDIT" NUMBER(1,0) DEFAULT 1 NOT NULL, - "B_DEFAULT" NUMBER(1,0) DEFAULT 0 NOT NULL, - "STR_TAG" VARCHAR2(24 BYTE), - "B_BILLABLE" NUMBER(1,0) DEFAULT 1 NOT NULL, - "PK_FACILITY" VARCHAR2(36 BYTE) NOT NULL, - "B_ENABLED" NUMBER(1,0) DEFAULT 1 - ) --- SPLIT HERE! -CREATE TABLE "ACTION" - ( "PK_ACTION" VARCHAR2(36 BYTE) NOT NULL, - "PK_FILTER" VARCHAR2(36 BYTE) NOT NULL, - "PK_FOLDER" VARCHAR2(36 BYTE), - "STR_ACTION" VARCHAR2(24 BYTE) NOT NULL, - "STR_VALUE_TYPE" VARCHAR2(24 BYTE) NOT NULL, - "STR_VALUE" VARCHAR2(4000 BYTE), - "INT_VALUE" NUMBER(38,0), - "B_VALUE" NUMBER(1,0), - "TS_CREATED" TIMESTAMP (6) DEFAULT systimestamp NOT NULL, - "FLOAT_VALUE" NUMBER(6,2), - "B_STOP" NUMBER(1,0) DEFAULT 0 NOT NULL - ) --- SPLIT HERE! -CREATE TABLE "REDIRECT" - ( "PK_PROC" VARCHAR2(36 BYTE) NOT NULL, - "STR_GROUP_ID" VARCHAR(36 BYTE) NOT NULL, - "INT_TYPE" NUMBER(38,0) NOT NULL, - "STR_DESTINATION_ID" VARCHAR2(512 BYTE) NOT NULL, - "STR_NAME" VARCHAR2(512 BYTE) NOT NULL, - "LNG_CREATION_TIME" NUMBER(38,0) NOT NULL - ) - - --- SPLIT HERE! - COMMENT ON COLUMN "JOB_HISTORY"."INT_CORE_TIME_SUCCESS" IS 'seconds per core succeeded' --- SPLIT HERE! - COMMENT ON COLUMN "JOB_HISTORY"."INT_CORE_TIME_FAIL" IS 'seconds per core failed' --- SPLIT HERE! - COMMENT ON COLUMN "JOB_HISTORY"."INT_MAX_RSS" IS 'maximum kilobytes of rss memory used by a single frame' --- SPLIT HERE! - COMMENT ON COLUMN "LAYER_HISTORY"."INT_CORE_TIME_SUCCESS" IS 'seconds per core succeeded' --- SPLIT HERE! - COMMENT ON COLUMN "LAYER_HISTORY"."INT_CORE_TIME_FAIL" IS 'seconds per core failed' --- SPLIT HERE! - COMMENT ON COLUMN "LAYER_HISTORY"."INT_MAX_RSS" IS 'maximum kilobytes of rss memory used by a single frame' --- SPLIT HERE! - COMMENT ON COLUMN "FRAME_HISTORY"."INT_MEM_RESERVED" IS 'kilobytes of memory reserved' --- SPLIT HERE! - COMMENT ON COLUMN "FRAME_HISTORY"."INT_MEM_MAX_USED" IS 'maximum kilobytes of rss memory used' --- SPLIT HERE! - COMMENT ON COLUMN "FRAME_HISTORY"."INT_CORES" IS '100 cores per physical core' - --- SPLIT HERE! -CREATE PACKAGE HISTORY AS - - procedure period_shift; - procedure period_shift(piEndDate in date); - procedure period_clear; - -END HISTORY; --- SPLIT HERE! -CREATE FUNCTION CALCULATE_CORE_HOURS -(int_ts_started NUMERIC, int_ts_stopped NUMERIC, -int_start_report NUMERIC, int_stop_report NUMERIC, -int_job_stopped NUMERIC, int_cores NUMBER) -RETURN NUMBER IS - int_started NUMERIC(12,0); - int_stopped NUMERIC(12,0); -BEGIN - IF int_cores = 0 THEN - RETURN 0; - END IF; - - int_started := int_ts_started; - int_stopped := int_ts_stopped; - - IF int_stopped = 0 THEN - int_stopped := int_job_stopped; - END IF; - - IF int_stopped = 0 OR int_stopped > int_stop_report THEN - int_stopped := int_stop_report; - END IF; - - IF int_started < int_start_report THEN - int_started := int_start_report; - END IF; - RETURN ((int_stopped - int_started) * (int_cores / 100) / 3600); -END; --- SPLIT HERE! -CREATE FUNCTION "INTERVAL_TO_SECONDS" -( intrvl IN DSINTERVAL_UNCONSTRAINED -) RETURN NUMBER AS -BEGIN - RETURN EXTRACT(DAY FROM intrvl) * 86400 - + EXTRACT(HOUR FROM intrvl) * 3600 - + EXTRACT(MINUTE FROM intrvl) * 60 - + EXTRACT(SECOND FROM intrvl); -END INTERVAL_TO_SECONDS; --- SPLIT HERE! -CREATE FUNCTION "EPOCH" -( t IN TIMESTAMP WITH TIME ZONE -) RETURN NUMBER AS - epoch_date TIMESTAMP(0) WITH TIME ZONE := TIMESTAMP '1970-01-01 00:00:00.00 +00:00'; - epoch_sec NUMERIC(12,0); - delta INTERVAL DAY(9) TO SECOND(0); -BEGIN - delta := t - epoch_date; - RETURN INTERVAL_TO_SECONDS(delta); -END EPOCH; --- SPLIT HERE! -CREATE FUNCTION epoch_to_ts(seconds IN NUMBER) -RETURN TIMESTAMP AS -BEGIN - RETURN TO_TIMESTAMP('19700101000000','YYYYMMDDHH24MISS TZH:TZM') - + NUMTODSINTERVAL(seconds, 'SECOND'); -END; --- SPLIT HERE! -CREATE FUNCTION FIND_DURATION -(ts_started TIMESTAMP, ts_stopped TIMESTAMP) -RETURN NUMBER IS - t_interval INTERVAL DAY TO SECOND; - t_stopped TIMESTAMP(0); -BEGIN - - IF ts_started IS NULL THEN - RETURN 0; - END IF; - - IF ts_stopped IS NULL THEN - t_stopped := systimestamp; - ELSE - t_stopped := ts_stopped; - END IF; - - t_interval := t_stopped - ts_started; - - RETURN ROUND((EXTRACT(DAY FROM t_interval) * 86400 - + EXTRACT(HOUR FROM t_interval) * 3600 - + EXTRACT(MINUTE FROM t_interval) * 60 - + EXTRACT(SECOND FROM t_interval))); -END; --- SPLIT HERE! -CREATE FUNCTION genkey RETURN VARCHAR2 IS - str_result VARCHAR2(36); - guid VARCHAR2(36) := sys_guid(); -BEGIN - str_result := SUBSTR(guid, 0,8) || '-' || SUBSTR(guid,8,4) - || '-' || SUBSTR(guid,12,4) || '-' || SUBSTR(guid,16,4) || '-' || SUBSTR(guid,20,12); - RETURN str_result; -END; --- SPLIT HERE! - -CREATE FUNCTION render_weeks -(dt_end DATE) -RETURN NUMBER IS - int_weeks NUMERIC; -BEGIN - int_weeks := (dt_end - (next_day(sysdate,'sunday')+7)) / 7.0; - IF int_weeks < 1 THEN - RETURN 1; - ELSE - RETURN int_weeks; - END IF; -END; --- SPLIT HERE! -CREATE FUNCTION soft_tier(int_cores IN NUMERIC, int_min_cores IN NUMERIC) -RETURN NUMBER AS -BEGIN - IF int_cores IS NULL THEN - RETURN 0; - END IF; - IF int_min_cores = 0 OR int_cores >= int_min_cores THEN - RETURN 1; - ELSE - IF int_cores = 0 THEN - return int_min_cores * -1; - ELSE - RETURN int_cores / int_min_cores; - END IF; - END IF; -END; --- SPLIT HERE! -CREATE FUNCTION tier(int_cores IN NUMERIC, int_min_cores IN NUMERIC) -RETURN NUMBER AS -BEGIN - - IF int_min_cores = 0 THEN - RETURN (int_cores / 100) + 1; - ELSE - IF int_cores = 0 THEN - return int_min_cores * -1; - ELSE - RETURN int_cores / int_min_cores; - END IF; - END IF; -END; --- SPLIT HERE! -CREATE PROCEDURE recalculate_subs -IS -BEGIN - /** - * concatenates all tags in host_tag and sets host.str_tags - **/ - UPDATE subscription SET int_cores = 0; - for r in (select proc.pk_show, alloc.pk_alloc, sum(proc.int_cores_reserved) as c from proc, host, alloc - where proc.pk_host = host.pk_host AND host.pk_alloc = alloc.pk_alloc - group by proc.pk_show, alloc.pk_alloc) LOOP - UPDATE subscription SET int_cores = r.c WHERE pk_alloc=r.pk_alloc AND pk_show=r.pk_show; - - END LOOP; -END; --- SPLIT HERE! -CREATE PROCEDURE recalculate_tags(str_host_id IN VARCHAR2) -IS - str_tag VARCHAR2(256) := ''; -BEGIN - /** - * concatenates all tags in host_tag and sets host.str_tags - **/ - FOR tag IN (SELECT str_tag FROM host_tag WHERE pk_host=str_host_id ORDER BY str_tag_type ASC, str_tag ASC) LOOP - str_tag := str_tag || ' ' || tag.str_tag; - END LOOP; - - EXECUTE IMMEDIATE 'UPDATE host SET str_tags=trim(:1) WHERE pk_host=:2' - USING str_tag, str_host_id; -END; --- SPLIT HERE! -CREATE PROCEDURE recurse_folder_parent_change(str_folder_id IN VARCHAR2, str_parent_folder_id IN VARCHAR2) -IS - int_parent_level NUMBER(38); -BEGIN - SELECT int_level+1 INTO - int_parent_level - FROM - folder_level - WHERE - pk_folder = str_parent_folder_id; - - UPDATE - folder_level - SET - int_level = int_parent_level - WHERE - pk_folder = str_folder_id; - - FOR subfolder IN (SELECT pk_folder FROM folder WHERE pk_parent_folder = str_folder_id) LOOP - recurse_folder_parent_change(subfolder.pk_folder, str_folder_id); - END LOOP; -END; --- SPLIT HERE! -CREATE PROCEDURE rename_allocs -IS -BEGIN - FOR alloc IN (SELECT alloc.pk_alloc, alloc.str_name AS aname,facility.str_name AS fname FROM alloc,facility - WHERE alloc.pk_facility = facility.pk_facility) LOOP - EXECUTE IMMEDIATE 'UPDATE alloc SET str_name=:1 WHERE pk_alloc=:2' USING - alloc.fname || '.' || alloc.aname, alloc.pk_alloc; - END LOOP; -END; --- SPLIT HERE! -CREATE PROCEDURE reorder_filters(p_str_show_id IN VARCHAR2) IS - f_new_order NUMBER(16,0) := 1.0; -BEGIN - FOR r_filter IN (SELECT pk_filter FROM filter WHERE pk_show=p_str_show_id ORDER BY f_order ASC) LOOP - UPDATE filter SET f_order=f_new_order WHERE pk_filter = r_filter.pk_filter; - f_new_order := f_new_order + 1.0; - END LOOP; -END; --- SPLIT HERE! -CREATE PROCEDURE tmp_populate_folder IS -BEGIN - FOR t in (select pk_folder, pk_show, sum(int_cores) AS c from job, job_resource where job.pk_job = job_resource.pk_job GROUP by pk_folder, pk_show) LOOP - UPDATE folder_resource SET int_cores = t.c WHERE pk_folder = t.pk_folder; - COMMIT; - END LOOP; -END; --- SPLIT HERE! -CREATE PROCEDURE tmp_populate_point IS -BEGIN - FOR t in (select pk_dept, pk_show, sum(int_cores) AS c from job, job_resource where job.pk_job = job_resource.pk_job GROUP by pk_dept, pk_show) LOOP - UPDATE point SET int_cores = t.c WHERE pk_show = t.pk_show AND pk_dept = t.pk_dept; - END LOOP; -END; --- SPLIT HERE! -CREATE PROCEDURE tmp_populate_sub IS -BEGIN - FOR t in (select proc.pk_show, host.pk_alloc, sum(int_cores_reserved) AS c from proc,host where - proc.pk_host = host.pk_host GROUP BY proc.pk_show, host.pk_alloc) LOOP - UPDATE subscription SET int_cores = t.c WHERE pk_show = t.pk_show AND pk_alloc = t.pk_alloc; - END LOOP; -END; - --- SPLIT HERE! -CREATE UNIQUE INDEX "C_ACTION_PK" ON "ACTION" ("PK_ACTION") --- SPLIT HERE! -CREATE INDEX "I_ACTION_PK_FILTER" ON "ACTION" ("PK_FILTER") --- SPLIT HERE! -CREATE INDEX "I_ACTION_PK_GROUP" ON "ACTION" ("PK_FOLDER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_ALLOC_PK" ON "ALLOC" ("PK_ALLOC") --- SPLIT HERE! -CREATE INDEX "I_ALLOC_PK_FACILITY" ON "ALLOC" ("PK_FACILITY") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_ALLOC_NAME_UNIQ" ON "ALLOC" ("STR_NAME") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_COMMENT_PK" ON "COMMENTS" ("PK_COMMENT") --- SPLIT HERE! -CREATE INDEX "I_COMMENT_PK_JOB" ON "COMMENTS" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_COMMENT_PK_HOST" ON "COMMENTS" ("PK_HOST") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PK_PKCONFIG" ON "CONFIG" ("PK_CONFIG") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_SHOW_UK" ON "CONFIG" ("STR_KEY") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_DEPEND_SIGNATURE" ON "DEPEND" ("STR_SIGNATURE") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_ON_LAYER" ON "DEPEND" ("PK_LAYER_DEPEND_ON") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_ER_LAYER" ON "DEPEND" ("PK_LAYER_DEPEND_ER") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_STR_TARGET" ON "DEPEND" ("STR_TARGET") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_ON_FRAME" ON "DEPEND" ("PK_FRAME_DEPEND_ON") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_STR_TYPE" ON "DEPEND" ("STR_TYPE") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_ER_FRAME" ON "DEPEND" ("PK_FRAME_DEPEND_ER") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_B_COMPOSITE" ON "DEPEND" ("B_COMPOSITE") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_DEPEND_PK" ON "DEPEND" ("PK_DEPEND") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_PKPARENT" ON "DEPEND" ("PK_PARENT") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_PK_ON_JOB" ON "DEPEND" ("PK_JOB_DEPEND_ON") --- SPLIT HERE! -CREATE INDEX "I_DEPEND_PK_ER_JOB" ON "DEPEND" ("PK_JOB_DEPEND_ER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_FILTER_PK" ON "FILTER" ("PK_FILTER") --- SPLIT HERE! -CREATE INDEX "I_FILTERS_PK_SHOW" ON "FILTER" ("PK_SHOW") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_FOLDER_UK" ON "FOLDER" ("PK_PARENT_FOLDER", "STR_NAME") --- SPLIT HERE! -CREATE INDEX "I_FOLDER_PKPARENTFOLDER" ON "FOLDER" ("PK_PARENT_FOLDER") --- SPLIT HERE! -CREATE INDEX "I_FOLDER_PKSHOW" ON "FOLDER" ("PK_SHOW") --- SPLIT HERE! -CREATE INDEX "I_FOLDER_STRNAME" ON "FOLDER" ("STR_NAME") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_FOLDER_PK" ON "FOLDER" ("PK_FOLDER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_FOLDER_LEVEL_PK" ON "FOLDER_LEVEL" ("PK_FOLDER_LEVEL") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_FOLDER_LEVEL_UK" ON "FOLDER_LEVEL" ("PK_FOLDER") --- SPLIT HERE! -CREATE INDEX "I_FRAME_STATE_JOB" ON "FRAME" ("STR_STATE", "PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_FRAME_DISPATCH_IDX" ON "FRAME" ("INT_DISPATCH_ORDER", "INT_LAYER_ORDER") --- SPLIT HERE! -CREATE INDEX "I_FRAME_PK_JOB" ON "FRAME" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_FRAME_PK" ON "FRAME" ("PK_FRAME") --- SPLIT HERE! -CREATE INDEX "I_FRAME_PKJOBLAYER" ON "FRAME" ("PK_LAYER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_FRAME_STR_NAME_UNQ" ON "FRAME" ("STR_NAME", "PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_FRAME_INT_GPU_RESERVED" ON "FRAME" ("INT_GPU_RESERVED") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_STR_HOST_FQDN_UK" ON "HOST" ("STR_FQDN") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_HOST_PK" ON "HOST" ("PK_HOST") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_HOST_UK" ON "HOST" ("STR_NAME") --- SPLIT HERE! -CREATE INDEX "I_HOST_PKALLOC" ON "HOST" ("PK_ALLOC") --- SPLIT HERE! -CREATE INDEX "I_HOST_STRLOCKSTATE" ON "HOST" ("STR_LOCK_STATE") --- SPLIT HERE! -CREATE INDEX "I_HOST_INT_GPU" ON "HOST" ("INT_GPU") --- SPLIT HERE! -CREATE INDEX "I_HOST_INT_GPU_IDLE" ON "HOST" ("INT_GPU_IDLE") --- SPLIT HERE! -CREATE INDEX "I_HOST_STAT_INT_GPU_TOTAL" ON "HOST_STAT" ("INT_GPU_TOTAL") --- SPLIT HERE! -CREATE INDEX "I_HOST_STAT_INT_GPU_FREE" ON "HOST_STAT" ("INT_GPU_FREE") --- SPLIT HERE! -CREATE INDEX "I_HOST_STAT_STR_OS" ON "HOST_STAT" ("STR_OS") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_HOSTSTAT_PK" ON "HOST_STAT" ("PK_HOST_STAT") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_HOST_STAT_PK_HOST_UK" ON "HOST_STAT" ("PK_HOST") --- SPLIT HERE! -CREATE INDEX "I_BOOKING_3" ON "JOB" ("STR_STATE", "B_PAUSED", "PK_SHOW", "PK_FACILITY") --- SPLIT HERE! -CREATE INDEX "I_JOB_STR_OS" ON "JOB" ("STR_OS") --- SPLIT HERE! -CREATE INDEX "I_JOB_PK_DEPT" ON "JOB" ("PK_DEPT") --- SPLIT HERE! -CREATE INDEX "I_JOB_PK_FACILITY" ON "JOB" ("PK_FACILITY") --- SPLIT HERE! -CREATE INDEX "I_JOB_STR_SHOT" ON "JOB" ("STR_SHOT") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_PK" ON "JOB" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_UK" ON "JOB" ("STR_VISIBLE_NAME") --- SPLIT HERE! -CREATE INDEX "I_JOB_PKGROUP" ON "JOB" ("PK_FOLDER") --- SPLIT HERE! -CREATE INDEX "I_JOB_PKSHOW" ON "JOB" ("PK_SHOW") --- SPLIT HERE! -CREATE INDEX "I_JOB_STR_NAME" ON "JOB" ("STR_NAME") --- SPLIT HERE! -CREATE INDEX "I_JOB_STR_STATE" ON "JOB" ("STR_STATE") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_ENV_PK" ON "JOB_ENV" ("PK_JOB_ENV") --- SPLIT HERE! -CREATE INDEX "I_JOB_ENV_PK_JOB" ON "JOB_ENV" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_LAYER_B_THREADABLE" ON "LAYER" ("B_THREADABLE") --- SPLIT HERE! -CREATE INDEX "I_LAYER_CORES_MEM" ON "LAYER" ("INT_CORES_MIN", "INT_MEM_MIN") --- SPLIT HERE! -CREATE INDEX "I_LAYER_CORES_MEM_THREAD" ON "LAYER" ("INT_CORES_MIN", "INT_MEM_MIN", "B_THREADABLE") --- SPLIT HERE! -CREATE INDEX "I_LAYER_MEM_MIN" ON "LAYER" ("INT_MEM_MIN") --- SPLIT HERE! -CREATE INDEX "I_LAYER_INT_DISPATCH_ORDER" ON "LAYER" ("INT_DISPATCH_ORDER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYER_PK" ON "LAYER" ("PK_LAYER") --- SPLIT HERE! -CREATE INDEX "I_LAYER_PKJOB" ON "LAYER" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_LAYER_STRNAME" ON "LAYER" ("STR_NAME") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYER_STR_NAME_UNQ" ON "LAYER" ("STR_NAME", "PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_LAYER_INT_GPU_MIN" ON "LAYER" ("INT_GPU_MIN") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYER_ENV_PK" ON "LAYER_ENV" ("PK_LAYER_ENV") --- SPLIT HERE! -CREATE INDEX "I_LAYER_ENV_PK_JOB" ON "LAYER_ENV" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_LAYER_ENV_PK_LAYER" ON "LAYER_ENV" ("PK_LAYER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYERRESOURCE_PK" ON "LAYER_RESOURCE" ("PK_LAYER_RESOURCE") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYERRESOURCE_UK" ON "LAYER_RESOURCE" ("PK_LAYER") --- SPLIT HERE! -CREATE INDEX "I_LAYER_RESOURCE_PK_JOB" ON "LAYER_RESOURCE" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_LAYER_STAT_PK_LAYER" ON "LAYER_STAT" ("PK_LAYER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYERSTAT_PK" ON "LAYER_STAT" ("PK_LAYER_STAT") --- SPLIT HERE! -CREATE INDEX "I_LAYERSTAT_PKJOB" ON "LAYER_STAT" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_LAYERSTAT_INT_WAITING_COUNT" ON "LAYER_STAT" (CASE WHEN "INT_WAITING_COUNT" > 0 THEN 1 ELSE NULL END, CASE WHEN "INT_WAITING_COUNT" > 0 THEN "PK_LAYER" ELSE NULL END) --- SPLIT HERE! -CREATE INDEX "I_LAYER_USAGE_PK_JOB" ON "LAYER_USAGE" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYER_USAGE_PK" ON "LAYER_USAGE" ("PK_LAYER_USAGE") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYER_USAGE_PK_LAYER_UK" ON "LAYER_USAGE" ("PK_LAYER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_MATCHER_PK" ON "MATCHER" ("PK_MATCHER") --- SPLIT HERE! -CREATE INDEX "I_MATCHER_PK_FILTER" ON "MATCHER" ("PK_FILTER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PROC_PK" ON "PROC" ("PK_PROC") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PROC_UK" ON "PROC" ("PK_FRAME") --- SPLIT HERE! -CREATE INDEX "I_PROC_PKHOST" ON "PROC" ("PK_HOST") --- SPLIT HERE! -CREATE INDEX "I_PROC_PKJOB" ON "PROC" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_PROC_PKLAYER" ON "PROC" ("PK_LAYER") --- SPLIT HERE! -CREATE INDEX "I_PROC_PKSHOW" ON "PROC" ("PK_SHOW") --- SPLIT HERE! -CREATE INDEX "I_PROC_INT_GPU_RESERVED" ON "PROC" ("INT_GPU_RESERVED") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_SHOW_PK" ON "SHOW" ("PK_SHOW") --- SPLIT HERE! -CREATE INDEX "I_SUB_TIER" ON "SUBSCRIPTION" ("FLOAT_TIER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_SUBSCRIPTION_PK" ON "SUBSCRIPTION" ("PK_SUBSCRIPTION") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_SUBSCRIPTION_UK" ON "SUBSCRIPTION" ("PK_SHOW", "PK_ALLOC") --- SPLIT HERE! -CREATE INDEX "I_SUBSCRIPTION_PKALLOC" ON "SUBSCRIPTION" ("PK_ALLOC") --- SPLIT HERE! -CREATE INDEX "I_JOB_STAT_INT_WAITING_COUNT" ON "JOB_STAT" ("INT_WAITING_COUNT") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_JOB_STAT_PK_JOB" ON "JOB_STAT" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_STAT_PK" ON "JOB_STAT" ("PK_JOB_STAT") --- SPLIT HERE! -CREATE INDEX "I_JOB_RESOURCE_MIN_MAX" ON "JOB_RESOURCE" ("INT_MIN_CORES", "INT_MAX_CORES") --- SPLIT HERE! -CREATE INDEX "I_JOB_TIER" ON "JOB_RESOURCE" ("FLOAT_TIER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_RESOURCE_PK" ON "JOB_RESOURCE" ("PK_JOB_RESOURCE") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_RESOURCE_UK" ON "JOB_RESOURCE" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_JOB_RESOURCE_CORES" ON "JOB_RESOURCE" ("INT_CORES") --- SPLIT HERE! -CREATE INDEX "I_JOB_RESOURCE_MAX_C" ON "JOB_RESOURCE" ("INT_MAX_CORES") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_USAGE_PK" ON "JOB_USAGE" ("PK_JOB_USAGE") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_USAGE_PK_JOB_UNIQ" ON "JOB_USAGE" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_HOST_TAG_PK" ON "HOST_TAG" ("PK_HOST_TAG") --- SPLIT HERE! -CREATE INDEX "I_HOST_TAG_PK_HOST" ON "HOST_TAG" ("PK_HOST") --- SPLIT HERE! -CREATE INDEX "I_HOST_STR_TAG_TYPE" ON "HOST_TAG" ("STR_TAG_TYPE") --- SPLIT HERE! -CREATE INDEX "MATTHEW_STATS_TAB" ON "MATTHEW_STATS_TAB" ("STATID", "TYPE", "C5", "C1", "C2", "C3", "C4", "VERSION") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_TASK_LOCK_PK" ON "TASK_LOCK" ("PK_TASK_LOCK") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_HISTORY_PK" ON "JOB_HISTORY" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_JOB_HISTORY_PK_SHOW" ON "JOB_HISTORY" ("PK_SHOW") --- SPLIT HERE! -CREATE INDEX "I_JOB_HISTORY_B_ARCHIVED" ON "JOB_HISTORY" ("B_ARCHIVED") --- SPLIT HERE! -CREATE INDEX "I_JOB_HISTORY_TS_START_STOP" ON "JOB_HISTORY" ("INT_TS_STARTED", "INT_TS_STOPPED") --- SPLIT HERE! -CREATE INDEX "I_JOB_HISTORY_STR_NAME" ON "JOB_HISTORY" ("STR_NAME") --- SPLIT HERE! -CREATE INDEX "I_JOB_HISTORY_STR_SHOT" ON "JOB_HISTORY" ("STR_SHOT") --- SPLIT HERE! -CREATE INDEX "I_JOB_HISTORY_STR_USER" ON "JOB_HISTORY" ("STR_USER") --- SPLIT HERE! -CREATE INDEX "I_JOB_HISTORY_PK_DEPT" ON "JOB_HISTORY" ("PK_DEPT") --- SPLIT HERE! -CREATE INDEX "I_JOB_HISTORY_PK_FACILITY" ON "JOB_HISTORY" ("PK_FACILITY") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYER_HISTORY_PK" ON "LAYER_HISTORY" ("PK_LAYER") --- SPLIT HERE! -CREATE INDEX "I_LAYER_HISTORY_STR_NAME" ON "LAYER_HISTORY" ("STR_NAME") --- SPLIT HERE! -CREATE INDEX "I_LAYER_HISTORY_STR_TYPE" ON "LAYER_HISTORY" ("STR_TYPE") --- SPLIT HERE! -CREATE INDEX "I_LAYER_HISTORY_PK_JOB" ON "LAYER_HISTORY" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_LAYER_HISTORY_B_ARCHIVED" ON "LAYER_HISTORY" ("B_ARCHIVED") --- SPLIT HERE! -CREATE INDEX "I_JOB_POST_PK_POST_JOB" ON "JOB_POST" ("PK_POST_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_POST_PK" ON "JOB_POST" ("PK_JOB_POST") --- SPLIT HERE! -CREATE INDEX "I_JOB_POST_PK_JOB" ON "JOB_POST" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_FACILITY_PK" ON "FACILITY" ("PK_FACILITY") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_DEPT_PK" ON "DEPT" ("PK_DEPT") --- SPLIT HERE! -CREATE INDEX "I_SHOW_ALIAS_PK_SHOW" ON "SHOW_ALIAS" ("PK_SHOW") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_SHOW_ALIAS_PK" ON "SHOW_ALIAS" ("PK_SHOW_ALIAS") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_FOLDER_RESOURCE_PK" ON "FOLDER_RESOURCE" ("PK_FOLDER_RESOURCE") --- SPLIT HERE! -CREATE INDEX "I_FOLDER_RES_INT_MAX_CORES" ON "FOLDER_RESOURCE" ("INT_MAX_CORES") --- SPLIT HERE! -CREATE INDEX "I_FOLDER_RESOURCE_FL_TIER" ON "FOLDER_RESOURCE" ("FLOAT_TIER") --- SPLIT HERE! -CREATE INDEX "I_FOLDERRESOURCE_PKFOLDER" ON "FOLDER_RESOURCE" ("PK_FOLDER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_JOB_MEM_PK" ON "JOB_MEM" ("PK_JOB_MEM") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_JOB_MEM_PK_JOB" ON "JOB_MEM" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_JOB_MEM_INT_MAX_RSS" ON "JOB_MEM" ("INT_MAX_RSS") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_LAYER_MEM_PK" ON "LAYER_MEM" ("PK_LAYER_MEM") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_LAYER_MEM_PK_LAYER" ON "LAYER_MEM" ("PK_LAYER") --- SPLIT HERE! -CREATE INDEX "I_LAYER_MEM_PK_JOB" ON "LAYER_MEM" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_LAYER_MEM_INT_MAX_RSS" ON "LAYER_MEM" ("INT_MAX_RSS") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_POINT_PK" ON "POINT" ("PK_POINT") --- SPLIT HERE! -CREATE INDEX "I_POINT_PK_DEPT" ON "POINT" ("PK_DEPT") --- SPLIT HERE! -CREATE INDEX "I_POINT_PK_SHOW" ON "POINT" ("PK_SHOW") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_POINT_PK_SHOW_DEPT" ON "POINT" ("PK_SHOW", "PK_DEPT") --- SPLIT HERE! -CREATE INDEX "I_POINT_TIER" ON "POINT" ("FLOAT_TIER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_TASK_PK" ON "TASK" ("PK_TASK") --- SPLIT HERE! -CREATE INDEX "I_TASK_PK_POINT" ON "TASK" ("PK_POINT") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_TASK_UNIQ" ON "TASK" ("STR_SHOT", "PK_POINT") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PK_JOB_LOCAL" ON "JOB_LOCAL" ("PK_JOB_LOCAL") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_JOB_LOCAL_PK_JOB" ON "JOB_LOCAL" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_JOB_LOCAL_PK_HOST" ON "JOB_LOCAL" ("PK_HOST") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PK_SERVICE" ON "SERVICE" ("PK_SERVICE") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_SERVICE_STR_NAME" ON "SERVICE" ("STR_NAME") --- SPLIT HERE! -CREATE INDEX "I_SERVICE_INT_GPU_MIN" ON "SERVICE" ("INT_GPU_MIN") --- SPLIT HERE! -CREATE INDEX "I_HOST_LOCAL" ON "HOST_LOCAL" ("PK_HOST") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PK_HOST_LOCAL" ON "HOST_LOCAL" ("PK_HOST_LOCAL") --- SPLIT HERE! -CREATE INDEX "I_HOST_LOCAL_PK_JOB" ON "HOST_LOCAL" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_HOST_LOCAL_UNIQUE" ON "HOST_LOCAL" ("PK_HOST", "PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_HOST_LOCAL_INT_GPU_IDLE" ON "HOST_LOCAL" ("INT_GPU_IDLE") --- SPLIT HERE! -CREATE INDEX "I_HOST_LOCAL_INT_GPU_MAX" ON "HOST_LOCAL" ("INT_GPU_MAX") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PK_OWNER" ON "OWNER" ("PK_OWNER") --- SPLIT HERE! -CREATE INDEX "I_OWNER_PK_SHOW" ON "OWNER" ("PK_SHOW") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_OWNER_STR_USERNAME" ON "OWNER" ("STR_USERNAME") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PK_DEED" ON "DEED" ("PK_DEED") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_DEED_PK_HOST" ON "DEED" ("PK_HOST") --- SPLIT HERE! -CREATE INDEX "I_DEED_PK_OWNER" ON "DEED" ("PK_OWNER") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PK_SHOW_SERVICE" ON "SHOW_SERVICE" ("PK_SHOW_SERVICE") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_SHOW_SERVICE_STR_NAME" ON "SHOW_SERVICE" ("STR_NAME", "PK_SHOW") --- SPLIT HERE! -CREATE INDEX "I_SHOW_SERVICE_INT_GPU_MIN" ON "SHOW_SERVICE" ("INT_GPU_MIN") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_PK_LAYER_OUTPUT" ON "LAYER_OUTPUT" ("PK_LAYER_OUTPUT") --- SPLIT HERE! -CREATE INDEX "I_LAYER_OUTPUT_PK_LAYER" ON "LAYER_OUTPUT" ("PK_LAYER") --- SPLIT HERE! -CREATE INDEX "I_LAYER_OUTPUT_PK_JOB" ON "LAYER_OUTPUT" ("PK_JOB") --- SPLIT HERE! -CREATE UNIQUE INDEX "I_LAYER_OUTPUT_UNIQUE" ON "LAYER_OUTPUT" ("PK_LAYER", "STR_FILESPEC") --- SPLIT HERE! - -CREATE INDEX "I_FRAME_HISTORY_TS_START_STOP" ON "FRAME_HISTORY" ("INT_TS_STARTED", "INT_TS_STOPPED") --- SPLIT HERE! - -CREATE INDEX "I_FRAME_HISTORY_INT_EXIT_STAT" ON "FRAME_HISTORY" ("INT_EXIT_STATUS") --- SPLIT HERE! -CREATE INDEX "I_FRAME_HISTORY_INT_TS_STOPPED" ON "FRAME_HISTORY" ("INT_TS_STOPPED") --- SPLIT HERE! -CREATE INDEX "I_FRAME_HISTORY_PK_ALLOC" ON "FRAME_HISTORY" ("PK_ALLOC") --- SPLIT HERE! -CREATE INDEX "I_FRAME_HISTORY_PK_FRAME" ON "FRAME_HISTORY" ("PK_FRAME") --- SPLIT HERE! -CREATE INDEX "I_FRAME_HISTORY_PK_JOB" ON "FRAME_HISTORY" ("PK_JOB") --- SPLIT HERE! -CREATE INDEX "I_FRAME_HISTORY_PK_LAYER" ON "FRAME_HISTORY" ("PK_LAYER") --- SPLIT HERE! -CREATE INDEX "I_FRAME_HISTORY_STR_STATE" ON "FRAME_HISTORY" ("STR_STATE") --- SPLIT HERE! -CREATE UNIQUE INDEX "C_REDIRECT_PK" ON "REDIRECT" ("PK_PROC") --- SPLIT HERE! -CREATE INDEX "I_REDIRECT_GROUP" ON "REDIRECT" ("STR_GROUP_ID") --- SPLIT HERE! -CREATE INDEX "I_REDIRECT_CREATE" ON "REDIRECT" ("LNG_CREATION_TIME") --- SPLIT HERE! - -ALTER TABLE "ACTION" ADD CONSTRAINT "C_ACTION_PK" PRIMARY KEY ("PK_ACTION") - USING INDEX "C_ACTION_PK" --- SPLIT HERE! -ALTER TABLE "ALLOC" ADD CONSTRAINT "C_ALLOC_NAME_UNIQ" UNIQUE ("STR_NAME") - USING INDEX "C_ALLOC_NAME_UNIQ" --- SPLIT HERE! -ALTER TABLE "ALLOC" ADD CONSTRAINT "C_ALLOC_PK" PRIMARY KEY ("PK_ALLOC") - USING INDEX "C_ALLOC_PK" --- SPLIT HERE! -ALTER TABLE "COMMENTS" ADD CONSTRAINT "C_COMMENT_PK" PRIMARY KEY ("PK_COMMENT") - USING INDEX "C_COMMENT_PK" --- SPLIT HERE! -ALTER TABLE "CONFIG" ADD CONSTRAINT "C_PK_PKCONFIG" PRIMARY KEY ("PK_CONFIG") - USING INDEX "C_PK_PKCONFIG" --- SPLIT HERE! -ALTER TABLE "CONFIG" ADD CONSTRAINT "C_SHOW_UK" UNIQUE ("STR_KEY") - USING INDEX "C_SHOW_UK" --- SPLIT HERE! -ALTER TABLE "DEPEND" ADD CONSTRAINT "C_DEPEND_PK" PRIMARY KEY ("PK_DEPEND") - USING INDEX "C_DEPEND_PK" --- SPLIT HERE! -ALTER TABLE "FILTER" ADD CONSTRAINT "C_FILTER_PK" PRIMARY KEY ("PK_FILTER") - USING INDEX "C_FILTER_PK" --- SPLIT HERE! -ALTER TABLE "FOLDER" ADD CONSTRAINT "C_FOLDER_UK" UNIQUE ("PK_PARENT_FOLDER", "STR_NAME") - USING INDEX "C_FOLDER_UK" --- SPLIT HERE! -ALTER TABLE "FOLDER" ADD CONSTRAINT "C_FOLDER_PK" PRIMARY KEY ("PK_FOLDER") - USING INDEX "C_FOLDER_PK" --- SPLIT HERE! -ALTER TABLE "FOLDER_LEVEL" ADD CONSTRAINT "C_FOLDER_LEVEL_PK" PRIMARY KEY ("PK_FOLDER_LEVEL") - USING INDEX "C_FOLDER_LEVEL_PK" --- SPLIT HERE! -ALTER TABLE "FOLDER_LEVEL" ADD CONSTRAINT "C_FOLDER_LEVEL_UK" UNIQUE ("PK_FOLDER") - USING INDEX "C_FOLDER_LEVEL_UK" --- SPLIT HERE! -ALTER TABLE "FRAME" ADD CONSTRAINT "C_FRAME_PK" PRIMARY KEY ("PK_FRAME") - USING INDEX "C_FRAME_PK" --- SPLIT HERE! -ALTER TABLE "FRAME" ADD CONSTRAINT "C_FRAME_STR_NAME_UNQ" UNIQUE ("STR_NAME", "PK_JOB") - USING INDEX "C_FRAME_STR_NAME_UNQ" --- SPLIT HERE! -ALTER TABLE "HOST" ADD CONSTRAINT "C_STR_HOST_FQDN_UK" UNIQUE ("STR_FQDN") - USING INDEX "C_STR_HOST_FQDN_UK" --- SPLIT HERE! -ALTER TABLE "HOST" ADD CONSTRAINT "C_HOST_PK" PRIMARY KEY ("PK_HOST") - USING INDEX "C_HOST_PK" --- SPLIT HERE! -ALTER TABLE "HOST" ADD CONSTRAINT "C_HOST_UK" UNIQUE ("STR_NAME") - USING INDEX "C_HOST_UK" --- SPLIT HERE! -ALTER TABLE "HOST_STAT" ADD CONSTRAINT "C_HOSTSTAT_PK" PRIMARY KEY ("PK_HOST_STAT") - USING INDEX "C_HOSTSTAT_PK" --- SPLIT HERE! -ALTER TABLE "HOST_STAT" ADD CONSTRAINT "C_HOST_STAT_PK_HOST_UK" UNIQUE ("PK_HOST") - USING INDEX "C_HOST_STAT_PK_HOST_UK" --- SPLIT HERE! -ALTER TABLE "JOB" ADD CONSTRAINT "C_JOB_PK" PRIMARY KEY ("PK_JOB") - USING INDEX "C_JOB_PK" --- SPLIT HERE! -ALTER TABLE "JOB" ADD CONSTRAINT "C_JOB_UK" UNIQUE ("STR_VISIBLE_NAME") - USING INDEX "C_JOB_UK" --- SPLIT HERE! -ALTER TABLE "JOB_ENV" ADD CONSTRAINT "C_JOB_ENV_PK" PRIMARY KEY ("PK_JOB_ENV") - USING INDEX "C_JOB_ENV_PK" --- SPLIT HERE! -ALTER TABLE "JOB_LOCAL" ADD CONSTRAINT "C_PK_JOB_LOCAL" PRIMARY KEY ("PK_JOB_LOCAL") - USING INDEX "C_PK_JOB_LOCAL" --- SPLIT HERE! -ALTER TABLE "SERVICE" ADD CONSTRAINT "C_PK_SERVICE" PRIMARY KEY ("PK_SERVICE") - USING INDEX "C_PK_SERVICE" --- SPLIT HERE! -ALTER TABLE "HOST_LOCAL" ADD CONSTRAINT "C_PK_HOST_LOCAL" PRIMARY KEY ("PK_HOST_LOCAL") - USING INDEX "C_PK_HOST_LOCAL" --- SPLIT HERE! -ALTER TABLE "OWNER" ADD CONSTRAINT "C_PK_OWNER" PRIMARY KEY ("PK_OWNER") - USING INDEX "C_PK_OWNER" --- SPLIT HERE! -ALTER TABLE "DEED" ADD CONSTRAINT "C_PK_DEED" PRIMARY KEY ("PK_DEED") - USING INDEX "C_PK_DEED" --- SPLIT HERE! -ALTER TABLE "SHOW_SERVICE" ADD CONSTRAINT "C_PK_SHOW_SERVICE" PRIMARY KEY ("PK_SHOW_SERVICE") - USING INDEX "C_PK_SHOW_SERVICE" --- SPLIT HERE! -ALTER TABLE "LAYER_OUTPUT" ADD CONSTRAINT "C_PK_LAYER_OUTPUT" PRIMARY KEY ("PK_LAYER_OUTPUT") - USING INDEX "C_PK_LAYER_OUTPUT" --- SPLIT HERE! -ALTER TABLE "LAYER" ADD CONSTRAINT "C_LAYER_PK" PRIMARY KEY ("PK_LAYER") - USING INDEX "C_LAYER_PK" --- SPLIT HERE! -ALTER TABLE "LAYER" ADD CONSTRAINT "C_LAYER_STR_NAME_UNQ" UNIQUE ("STR_NAME", "PK_JOB") - USING INDEX "C_LAYER_STR_NAME_UNQ" --- SPLIT HERE! -ALTER TABLE "LAYER_ENV" ADD CONSTRAINT "C_LAYER_ENV_PK" PRIMARY KEY ("PK_LAYER_ENV") - USING INDEX "C_LAYER_ENV_PK" --- SPLIT HERE! -ALTER TABLE "LAYER_RESOURCE" ADD CONSTRAINT "C_LAYERRESOURCE_PK" PRIMARY KEY ("PK_LAYER_RESOURCE") - USING INDEX "C_LAYERRESOURCE_PK" --- SPLIT HERE! -ALTER TABLE "LAYER_RESOURCE" ADD CONSTRAINT "C_LAYERRESOURCE_UK" UNIQUE ("PK_LAYER") - USING INDEX "C_LAYERRESOURCE_UK" --- SPLIT HERE! -ALTER TABLE "LAYER_STAT" ADD CONSTRAINT "C_LAYERSTAT_PK" PRIMARY KEY ("PK_LAYER_STAT") - USING INDEX "C_LAYERSTAT_PK" --- SPLIT HERE! -ALTER TABLE "LAYER_USAGE" ADD CONSTRAINT "C_LAYER_USAGE_PK" PRIMARY KEY ("PK_LAYER_USAGE") - USING INDEX "C_LAYER_USAGE_PK" --- SPLIT HERE! -ALTER TABLE "LAYER_USAGE" ADD CONSTRAINT "C_LAYER_USAGE_PK_LAYER_UK" UNIQUE ("PK_LAYER") - USING INDEX "C_LAYER_USAGE_PK_LAYER_UK" --- SPLIT HERE! -ALTER TABLE "MATCHER" ADD CONSTRAINT "C_MATCHER_PK" PRIMARY KEY ("PK_MATCHER") - USING INDEX "C_MATCHER_PK" --- SPLIT HERE! -ALTER TABLE "PROC" ADD CONSTRAINT "C_PROC_PK" PRIMARY KEY ("PK_PROC") - USING INDEX "C_PROC_PK" --- SPLIT HERE! -ALTER TABLE "PROC" ADD CONSTRAINT "C_PROC_UK" UNIQUE ("PK_FRAME") - USING INDEX "C_PROC_UK" --- SPLIT HERE! -ALTER TABLE "SHOW" ADD CONSTRAINT "C_SHOW_PK" PRIMARY KEY ("PK_SHOW") - USING INDEX "C_SHOW_PK" --- SPLIT HERE! -ALTER TABLE "SUBSCRIPTION" ADD CONSTRAINT "C_SUBSCRIPTION_PK" PRIMARY KEY ("PK_SUBSCRIPTION") - USING INDEX "C_SUBSCRIPTION_PK" --- SPLIT HERE! -ALTER TABLE "SUBSCRIPTION" ADD CONSTRAINT "C_SUBSCRIPTION_UK" UNIQUE ("PK_SHOW", "PK_ALLOC") - USING INDEX "C_SUBSCRIPTION_UK" --- SPLIT HERE! -ALTER TABLE "JOB_STAT" ADD CONSTRAINT "C_JOB_STAT_PK" PRIMARY KEY ("PK_JOB_STAT") - USING INDEX "C_JOB_STAT_PK" --- SPLIT HERE! -ALTER TABLE "JOB_RESOURCE" ADD CONSTRAINT "C_JOB_RESOURCE_PK" PRIMARY KEY ("PK_JOB_RESOURCE") - USING INDEX "C_JOB_RESOURCE_PK" --- SPLIT HERE! -ALTER TABLE "JOB_RESOURCE" ADD CONSTRAINT "C_JOB_RESOURCE_UK" UNIQUE ("PK_JOB") - USING INDEX "C_JOB_RESOURCE_UK" --- SPLIT HERE! -ALTER TABLE "JOB_USAGE" ADD CONSTRAINT "C_JOB_USAGE_PK" PRIMARY KEY ("PK_JOB_USAGE") - USING INDEX "C_JOB_USAGE_PK" --- SPLIT HERE! -ALTER TABLE "JOB_USAGE" ADD CONSTRAINT "C_JOB_USAGE_PK_JOB_UNIQ" UNIQUE ("PK_JOB") - USING INDEX "C_JOB_USAGE_PK_JOB_UNIQ" --- SPLIT HERE! -ALTER TABLE "HOST_TAG" ADD CONSTRAINT "C_HOST_TAG_PK" PRIMARY KEY ("PK_HOST_TAG") - USING INDEX "C_HOST_TAG_PK" --- SPLIT HERE! -ALTER TABLE "TASK_LOCK" ADD CONSTRAINT "C_TASK_LOCK_PK" PRIMARY KEY ("PK_TASK_LOCK") - USING INDEX "C_TASK_LOCK_PK" --- SPLIT HERE! -ALTER TABLE "JOB_HISTORY" ADD CONSTRAINT "C_JOB_HISTORY_PK" PRIMARY KEY ("PK_JOB") - USING INDEX "C_JOB_HISTORY_PK" --- SPLIT HERE! -ALTER TABLE "LAYER_HISTORY" ADD CONSTRAINT "C_LAYER_HISTORY_PK" PRIMARY KEY ("PK_LAYER") - USING INDEX "C_LAYER_HISTORY_PK" --- SPLIT HERE! -ALTER TABLE "JOB_POST" ADD CONSTRAINT "C_JOB_POST_PK" PRIMARY KEY ("PK_JOB_POST") - USING INDEX "C_JOB_POST_PK" --- SPLIT HERE! -ALTER TABLE "FACILITY" ADD CONSTRAINT "C_FACILITY_PK" PRIMARY KEY ("PK_FACILITY") - USING INDEX "C_FACILITY_PK" --- SPLIT HERE! -ALTER TABLE "DEPT" ADD CONSTRAINT "C_DEPT_PK" PRIMARY KEY ("PK_DEPT") - USING INDEX "C_DEPT_PK" --- SPLIT HERE! -ALTER TABLE "SHOW_ALIAS" ADD CONSTRAINT "C_SHOW_ALIAS_PK" PRIMARY KEY ("PK_SHOW_ALIAS") - USING INDEX "C_SHOW_ALIAS_PK" --- SPLIT HERE! -ALTER TABLE "FOLDER_RESOURCE" ADD CONSTRAINT "C_FOLDER_RESOURCE_PK" PRIMARY KEY ("PK_FOLDER_RESOURCE") - USING INDEX "C_FOLDER_RESOURCE_PK" --- SPLIT HERE! -ALTER TABLE "JOB_MEM" ADD CONSTRAINT "C_JOB_MEM_PK" PRIMARY KEY ("PK_JOB_MEM") - USING INDEX "C_JOB_MEM_PK" --- SPLIT HERE! -ALTER TABLE "LAYER_MEM" ADD CONSTRAINT "C_LAYER_MEM_PK" PRIMARY KEY ("PK_LAYER_MEM") - USING INDEX "C_LAYER_MEM_PK" --- SPLIT HERE! -ALTER TABLE "POINT" ADD CONSTRAINT "C_POINT_PK" PRIMARY KEY ("PK_POINT") - USING INDEX "C_POINT_PK" --- SPLIT HERE! -ALTER TABLE "POINT" ADD CONSTRAINT "C_POINT_PK_SHOW_DEPT" UNIQUE ("PK_SHOW", "PK_DEPT") - USING INDEX "C_POINT_PK_SHOW_DEPT" --- SPLIT HERE! -ALTER TABLE "TASK" ADD CONSTRAINT "C_TASK_PK" PRIMARY KEY ("PK_TASK") - USING INDEX "C_TASK_PK" --- SPLIT HERE! -ALTER TABLE "TASK" ADD CONSTRAINT "C_TASK_UNIQ" UNIQUE ("STR_SHOT", "PK_POINT") - USING INDEX "C_TASK_UNIQ" --- SPLIT HERE! -ALTER TABLE "HISTORY_PERIOD" ADD CONSTRAINT "C_HISTORY_PERIOD_PK" PRIMARY KEY ("PK") - USING INDEX (CREATE UNIQUE INDEX "C_HISTORY_PERIOD_PK" ON "HISTORY_PERIOD" ("PK")) --- SPLIT HERE! -ALTER TABLE "FRAME_HISTORY" ADD CONSTRAINT "C_FRAME_HISTORY_PK" PRIMARY KEY ("PK_FRAME_HISTORY") - USING INDEX (CREATE UNIQUE INDEX "C_FRAME_HISTORY_PK" ON "FRAME_HISTORY" ("PK_FRAME_HISTORY")) --- SPLIT HERE! -ALTER TABLE "REDIRECT" ADD CONSTRAINT "C_REDIRECT_PK" PRIMARY KEY ("PK_PROC") - USING INDEX "C_REDIRECT_PK" --- SPLIT HERE! - -CREATE VIEW "VS_SHOW_RESOURCE" ("PK_SHOW", "INT_CORES") AS - SELECT - job.pk_show, - SUM(int_cores) AS int_cores - FROM - job, - job_resource - WHERE - job.pk_job = job_resource.pk_job - AND - job.str_state='Pending' - GROUP BY - job.pk_show - --- SPLIT HERE! -CREATE VIEW "VS_SHOW_STAT" ("PK_SHOW", "INT_PENDING_COUNT", "INT_RUNNING_COUNT", "INT_DEAD_COUNT", "INT_JOB_COUNT") AS - SELECT - job.pk_show, - SUM(int_waiting_count+int_depend_count) AS int_pending_count, - SUM(int_running_count) AS int_running_count, - SUM(int_dead_count) AS int_dead_count, - COUNT(1) AS int_job_count - FROM - job_stat, - job - WHERE - job_stat.pk_job = job.pk_job - AND - job.str_state = 'Pending' - GROUP BY job.pk_show - --- SPLIT HERE! -CREATE VIEW "VS_JOB_RESOURCE" ("PK_JOB", "INT_PROCS", "INT_CORES", "INT_MEM_RESERVED") AS - SELECT - job.pk_job, - COUNT(proc.pk_proc) AS int_procs, - COALESCE(SUM(int_cores_reserved),0) AS int_cores, - COALESCE(SUM(int_mem_reserved),0) AS int_mem_reserved - FROM - job LEFT JOIN proc ON (proc.pk_job = job.pk_job) - GROUP BY - job.pk_job - --- SPLIT HERE! -CREATE VIEW "VS_ALLOC_USAGE" ("PK_ALLOC", "INT_CORES", "INT_IDLE_CORES", "INT_RUNNING_CORES", "INT_LOCKED_CORES", "INT_AVAILABLE_CORES", "INT_HOSTS", "INT_LOCKED_HOSTS", "INT_DOWN_HOSTS") AS - SELECT - alloc.pk_alloc, - NVL(SUM(host.int_cores),0) AS int_cores, - NVL(SUM(host.int_cores_idle),0) AS int_idle_cores, - NVL(SUM(host.int_cores - host.int_cores_idle),0) as int_running_cores, - NVL((SELECT SUM(int_cores) FROM host WHERE host.pk_alloc=alloc.pk_alloc AND (str_lock_state='NimbyLocked' OR str_lock_state='Locked')),0) AS int_locked_cores, - NVL((SELECT SUM(int_cores_idle) FROM host h,host_stat hs WHERE h.pk_host = hs.pk_host AND h.pk_alloc=alloc.pk_alloc AND h.str_lock_state='Open' AND hs.str_state ='Up'),0) AS int_available_cores, - COUNT(host.pk_host) AS int_hosts, - (SELECT COUNT(*) FROM host WHERE host.pk_alloc=alloc.pk_alloc AND str_lock_state='Locked') AS int_locked_hosts, - (SELECT COUNT(*) FROM host h,host_stat hs WHERE h.pk_host = hs.pk_host AND h.pk_alloc=alloc.pk_alloc AND hs.str_state='Down') AS int_down_hosts - FROM - alloc LEFT JOIN host ON (alloc.pk_alloc = host.pk_alloc) - GROUP BY - alloc.pk_alloc - --- SPLIT HERE! -CREATE VIEW "VS_FOLDER_COUNTS" ("PK_FOLDER", "INT_DEPEND_COUNT", "INT_WAITING_COUNT", "INT_RUNNING_COUNT", "INT_DEAD_COUNT", "INT_CORES", "INT_JOB_COUNT") AS - SELECT - folder.pk_folder, - NVL(SUM(int_depend_count),0) AS int_depend_count, - NVL(SUM(int_waiting_count),0) AS int_waiting_count, - NVL(SUM(int_running_count),0) AS int_running_count, - NVL(SUM(int_dead_count),0) AS int_dead_count, - NVL(SUM(int_cores),0) AS int_cores, - NVL(COUNT(job.pk_job),0) AS int_job_count -FROM - folder - LEFT JOIN - job ON (folder.pk_folder = job.pk_folder AND job.str_state='Pending') - LEFT JOIN - job_stat ON (job.pk_job = job_stat.pk_job) - LEFT JOIN - job_resource ON (job.pk_job = job_resource.pk_job) - GROUP BY - folder.pk_folder - --- SPLIT HERE! -CREATE VIEW "VS_WAITING" ("PK_SHOW") AS - SELECT - job.pk_show - FROM - job_resource jr, - job_stat, - job - WHERE - job_stat.pk_job = job.pk_job - AND - jr.pk_job = job.pk_job - AND - job.str_state = 'Pending' - AND - job.b_paused = 0 - AND - jr.int_max_cores - jr.int_cores >= 100 - AND - job_stat.int_waiting_count != 0 - - GROUP BY job.pk_show - --- SPLIT HERE! -CREATE VIEW "V_HISTORY_FRAME" ("PK_FRAME_HISTORY", "PK_FRAME", "PK_LAYER", "PK_JOB", "STR_NAME", "STR_STATE", "INT_MEM_RESERVED", "INT_MEM_MAX_USED", "INT_CORES", "STR_HOST", "INT_EXIT_STATUS", "STR_ALLOC_NAME", "B_ALLOC_BILLABLE", "STR_FACILITY_NAME", "INT_TS_STARTED", "INT_TS_STOPPED", "INT_CHECKPOINT_COUNT", "STR_SHOW_NAME", "DT_LAST_MODIFIED") AS - select -fh.PK_FRAME_HISTORY, -fh.PK_FRAME, -fh.PK_LAYER, -fh.PK_JOB, -fh.STR_NAME, -fh.STR_STATE, -fh.INT_MEM_RESERVED, -fh.INT_MEM_MAX_USED, -fh.INT_CORES, -fh.STR_HOST, -fh.INT_EXIT_STATUS, -a.STR_NAME STR_ALLOC_NAME, -a.B_BILLABLE B_ALLOC_BILLABLE, -f.STR_NAME STR_FACILITY_NAME, -fh.INT_TS_STARTED, -fh.INT_TS_STOPPED, -fh.INT_CHECKPOINT_COUNT, -null str_show_name, -fh.dt_last_modified -from frame_history fh, job_history jh, alloc a, facility f -where fh.pk_job = jh.pk_job -and fh.pk_alloc = a.pk_alloc (+) -and a.pk_facility = f.pk_facility (+) -and fh.dt_last_modified >= ( - select dt_begin - from history_period -) -and fh.dt_last_modified < ( - select dt_end - from history_period -) --- SPLIT HERE! -CREATE VIEW "V_HISTORY_JOB" ("PK_JOB", "STR_NAME", "STR_SHOT", "STR_USER", "INT_CORE_TIME_SUCCESS", "INT_CORE_TIME_FAIL", "INT_FRAME_COUNT", "INT_LAYER_COUNT", "INT_WAITING_COUNT", "INT_DEAD_COUNT", "INT_DEPEND_COUNT", "INT_EATEN_COUNT", "INT_SUCCEEDED_COUNT", "INT_RUNNING_COUNT", "INT_MAX_RSS", "B_ARCHIVED", "STR_FACILITY_NAME", "STR_DEPT_NAME", "INT_TS_STARTED", "INT_TS_STOPPED", "STR_SHOW_NAME", "DT_LAST_MODIFIED") AS - select -jh.PK_JOB, -jh.STR_NAME, -jh.STR_SHOT, -jh.STR_USER, -jh.INT_CORE_TIME_SUCCESS, -jh.INT_CORE_TIME_FAIL, -jh.INT_FRAME_COUNT, -jh.INT_LAYER_COUNT, -jh.INT_WAITING_COUNT, -jh.INT_DEAD_COUNT, -jh.INT_DEPEND_COUNT, -jh.INT_EATEN_COUNT, -jh.INT_SUCCEEDED_COUNT, -jh.INT_RUNNING_COUNT, -jh.INT_MAX_RSS, -jh.B_ARCHIVED, -f.str_name STR_FACILITY_NAME, -d.str_name str_dept_name, -jh.INT_TS_STARTED, -jh.INT_TS_STOPPED, -s.str_name str_show_name, -jh.dt_last_modified -from job_history jh, show s, facility f, dept d -where jh.pk_show = s.pk_show -and jh.pk_facility = f.pk_facility -and jh.pk_dept = d.pk_dept -and ( - jh.dt_last_modified >= ( - select dt_begin - from history_period - ) - or - jh.int_ts_stopped = 0 -) --- SPLIT HERE! -CREATE VIEW "V_HISTORY_LAYER" ("PK_LAYER", "PK_JOB", "STR_NAME", "STR_TYPE", "INT_CORES_MIN", "INT_MEM_MIN", "INT_CORE_TIME_SUCCESS", "INT_CORE_TIME_FAIL", "INT_FRAME_COUNT", "INT_LAYER_COUNT", "INT_WAITING_COUNT", "INT_DEAD_COUNT", "INT_DEPEND_COUNT", "INT_EATEN_COUNT", "INT_SUCCEEDED_COUNT", "INT_RUNNING_COUNT", "INT_MAX_RSS", "B_ARCHIVED", "STR_SERVICES", "STR_SHOW_NAME", "DT_LAST_MODIFIED") AS - select -lh.PK_LAYER, -lh.PK_JOB, -lh.STR_NAME, -lh.STR_TYPE, -lh.INT_CORES_MIN, -lh.INT_MEM_MIN, -lh.INT_CORE_TIME_SUCCESS, -lh.INT_CORE_TIME_FAIL, -lh.INT_FRAME_COUNT, -lh.INT_LAYER_COUNT, -lh.INT_WAITING_COUNT, -lh.INT_DEAD_COUNT, -lh.INT_DEPEND_COUNT, -lh.INT_EATEN_COUNT, -lh.INT_SUCCEEDED_COUNT, -lh.INT_RUNNING_COUNT, -lh.INT_MAX_RSS, -lh.B_ARCHIVED, -lh.STR_SERVICES, -s.str_name str_show_name, -lh.dt_last_modified -from layer_history lh, job_history jh, show s -where lh.pk_job = jh.pk_job -and jh.pk_show = s.pk_show -and jh.dt_last_modified >= ( - select dt_begin - from history_period -) -and jh.dt_last_modified < ( - select dt_end - from history_period -) --- SPLIT HERE! -CREATE PACKAGE BODY HISTORY AS - - procedure period_shift as - vTemp date - begin - - begin - - select dt_end - into vTemp - from history_period - - update history_period - set dt_begin = vTemp, - dt_end = ( - select sysdate from dual - ) - - exception - when no_data_found then - insert into history_period (pk) values (sys_guid()) - select dt_end - into vTemp - from history_period - when others then - raise - end - - end period_shift - - procedure period_shift(piEndDate in date) as - vTemp date - begin - - begin - - select dt_end - into vTemp - from history_period - - update history_period - set dt_begin = vTemp, - dt_end = ( - select nvl(piEndDate, sysdate) from dual - ) - - exception - when no_data_found then - insert into history_period (pk) values (sys_guid()) - select dt_end - into vTemp - from history_period - when others then - raise - end - - end period_shift - - - procedure period_clear as - begin - - delete from history_period - insert into history_period (pk) values (sys_guid()) - - end period_clear - -END HISTORY --- SPLIT HERE! - - -ALTER TABLE "ACTION" ADD CONSTRAINT "C_ACTION_PK_FILTER" FOREIGN KEY ("PK_FILTER") - REFERENCES "FILTER" ("PK_FILTER") --- SPLIT HERE! -ALTER TABLE "ACTION" ADD CONSTRAINT "C_ACTION_PK_FOLDER" FOREIGN KEY ("PK_FOLDER") - REFERENCES "FOLDER" ("PK_FOLDER") --- SPLIT HERE! -ALTER TABLE "ALLOC" ADD CONSTRAINT "C_ALLOC_PK_FACILITY" FOREIGN KEY ("PK_FACILITY") - REFERENCES "FACILITY" ("PK_FACILITY") --- SPLIT HERE! -ALTER TABLE "COMMENTS" ADD CONSTRAINT "C_COMMENT_PK_HOST" FOREIGN KEY ("PK_HOST") - REFERENCES "HOST" ("PK_HOST") --- SPLIT HERE! -ALTER TABLE "COMMENTS" ADD CONSTRAINT "C_COMMENT_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "FILTER" ADD CONSTRAINT "C_FILTER_PK_SHOW" FOREIGN KEY ("PK_SHOW") - REFERENCES "SHOW" ("PK_SHOW") --- SPLIT HERE! -ALTER TABLE "FOLDER" ADD CONSTRAINT "C_FOLDER_PK_SHOW" FOREIGN KEY ("PK_SHOW") - REFERENCES "SHOW" ("PK_SHOW") --- SPLIT HERE! -ALTER TABLE "FOLDER" ADD CONSTRAINT "C_FOLDER_PK_DEPT" FOREIGN KEY ("PK_DEPT") - REFERENCES "DEPT" ("PK_DEPT") --- SPLIT HERE! -ALTER TABLE "FOLDER_LEVEL" ADD CONSTRAINT "C_FOLDER_LEVEL_PK_FOLDER" FOREIGN KEY ("PK_FOLDER") - REFERENCES "FOLDER" ("PK_FOLDER") --- SPLIT HERE! -ALTER TABLE "FRAME" ADD CONSTRAINT "C_FRAME_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "FRAME" ADD CONSTRAINT "C_FRAME_PK_LAYER" FOREIGN KEY ("PK_LAYER") - REFERENCES "LAYER" ("PK_LAYER") --- SPLIT HERE! -ALTER TABLE "HOST" ADD CONSTRAINT "C_HOST_PK_ALLOC" FOREIGN KEY ("PK_ALLOC") - REFERENCES "ALLOC" ("PK_ALLOC") --- SPLIT HERE! -ALTER TABLE "HOST_STAT" ADD CONSTRAINT "C_HOST_STAT_PK_HOST" FOREIGN KEY ("PK_HOST") - REFERENCES "HOST" ("PK_HOST") --- SPLIT HERE! -ALTER TABLE "JOB" ADD CONSTRAINT "C_JOB_PK_SHOW" FOREIGN KEY ("PK_SHOW") - REFERENCES "SHOW" ("PK_SHOW") --- SPLIT HERE! -ALTER TABLE "JOB" ADD CONSTRAINT "C_JOB_PK_FOLDER" FOREIGN KEY ("PK_FOLDER") - REFERENCES "FOLDER" ("PK_FOLDER") --- SPLIT HERE! -ALTER TABLE "JOB" ADD CONSTRAINT "C_JOB_PK_FACILITY" FOREIGN KEY ("PK_FACILITY") - REFERENCES "FACILITY" ("PK_FACILITY") --- SPLIT HERE! -ALTER TABLE "JOB" ADD CONSTRAINT "C_JOB_PK_DEPT" FOREIGN KEY ("PK_DEPT") - REFERENCES "DEPT" ("PK_DEPT") --- SPLIT HERE! -ALTER TABLE "JOB_ENV" ADD CONSTRAINT "C_JOB_ENV_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "LAYER" ADD CONSTRAINT "C_LAYER_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "LAYER_ENV" ADD CONSTRAINT "C_LAYER_ENV_PK_LAYER" FOREIGN KEY ("PK_LAYER") - REFERENCES "LAYER" ("PK_LAYER") --- SPLIT HERE! -ALTER TABLE "LAYER_ENV" ADD CONSTRAINT "C_LAYER_ENV_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "LAYER_RESOURCE" ADD CONSTRAINT "C_LAYER_RESOURCE_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "LAYER_RESOURCE" ADD CONSTRAINT "C_LAYER_RESOURCE_PK_LAYER" FOREIGN KEY ("PK_LAYER") - REFERENCES "LAYER" ("PK_LAYER") --- SPLIT HERE! -ALTER TABLE "LAYER_STAT" ADD CONSTRAINT "C_LAYER_STAT_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "LAYER_STAT" ADD CONSTRAINT "C_LAYER_STAT_PK_LAYER" FOREIGN KEY ("PK_LAYER") - REFERENCES "LAYER" ("PK_LAYER") --- SPLIT HERE! -ALTER TABLE "LAYER_USAGE" ADD CONSTRAINT "C_LAYER_USAGE_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "LAYER_USAGE" ADD CONSTRAINT "C_LAYER_USAGE_PK_LAYER" FOREIGN KEY ("PK_LAYER") - REFERENCES "LAYER" ("PK_LAYER") --- SPLIT HERE! -ALTER TABLE "MATCHER" ADD CONSTRAINT "C_MATCHER_PK_FILTER" FOREIGN KEY ("PK_FILTER") - REFERENCES "FILTER" ("PK_FILTER") --- SPLIT HERE! -ALTER TABLE "PROC" ADD CONSTRAINT "C_PROC_PK_FRAME" FOREIGN KEY ("PK_FRAME") - REFERENCES "FRAME" ("PK_FRAME") --- SPLIT HERE! -ALTER TABLE "PROC" ADD CONSTRAINT "C_PROC_PK_HOST" FOREIGN KEY ("PK_HOST") - REFERENCES "HOST" ("PK_HOST") --- SPLIT HERE! -ALTER TABLE "SUBSCRIPTION" ADD CONSTRAINT "C_SUBSCRIPTION_PK_ALLOC" FOREIGN KEY ("PK_ALLOC") - REFERENCES "ALLOC" ("PK_ALLOC") --- SPLIT HERE! -ALTER TABLE "SUBSCRIPTION" ADD CONSTRAINT "C_SUBSCRIPTION_PK_SHOW" FOREIGN KEY ("PK_SHOW") - REFERENCES "SHOW" ("PK_SHOW") --- SPLIT HERE! -ALTER TABLE "JOB_STAT" ADD CONSTRAINT "C_JOB_STAT_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "JOB_RESOURCE" ADD CONSTRAINT "C_JOB_RESOURCE_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "JOB_USAGE" ADD CONSTRAINT "C_JOB_USAGE_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "JOB_HISTORY" ADD CONSTRAINT "C_JOB_HISTORY_PK_FACILITY" FOREIGN KEY ("PK_FACILITY") - REFERENCES "FACILITY" ("PK_FACILITY") --- SPLIT HERE! -ALTER TABLE "JOB_HISTORY" ADD CONSTRAINT "C_JOB_HISTORY_PK_DEPT" FOREIGN KEY ("PK_DEPT") - REFERENCES "DEPT" ("PK_DEPT") --- SPLIT HERE! -ALTER TABLE "JOB_HISTORY" ADD CONSTRAINT "C_JOB_HISTORY_PK_SHOW" FOREIGN KEY ("PK_SHOW") - REFERENCES "SHOW" ("PK_SHOW") --- SPLIT HERE! -ALTER TABLE "LAYER_HISTORY" ADD CONSTRAINT "C_LAYER_HISTORY_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB_HISTORY" ("PK_JOB") ON DELETE CASCADE --- SPLIT HERE! -ALTER TABLE "JOB_POST" ADD CONSTRAINT "C_JOB_POST_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "JOB_POST" ADD CONSTRAINT "C_JOB_POST_PK_POST_JOB" FOREIGN KEY ("PK_POST_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "SHOW_ALIAS" ADD CONSTRAINT "C_SHOW_ALIAS_PK_SHOW" FOREIGN KEY ("PK_SHOW") - REFERENCES "SHOW" ("PK_SHOW") --- SPLIT HERE! -ALTER TABLE "FOLDER_RESOURCE" ADD CONSTRAINT "C_FOLDER_RESOURCE_PK_FOLDER" FOREIGN KEY ("PK_FOLDER") - REFERENCES "FOLDER" ("PK_FOLDER") --- SPLIT HERE! -ALTER TABLE "JOB_MEM" ADD CONSTRAINT "C_JOB_MEM_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "LAYER_MEM" ADD CONSTRAINT "C_LAYER_MEM_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "LAYER_MEM" ADD CONSTRAINT "C_LAYER_MEM_PK_LAYER" FOREIGN KEY ("PK_LAYER") - REFERENCES "LAYER" ("PK_LAYER") --- SPLIT HERE! -ALTER TABLE "POINT" ADD CONSTRAINT "C_POINT_PK_DEPT" FOREIGN KEY ("PK_DEPT") - REFERENCES "DEPT" ("PK_DEPT") --- SPLIT HERE! -ALTER TABLE "POINT" ADD CONSTRAINT "C_POINT_PK_SHOW" FOREIGN KEY ("PK_SHOW") - REFERENCES "SHOW" ("PK_SHOW") --- SPLIT HERE! -ALTER TABLE "TASK" ADD CONSTRAINT "C_TASK_PK_POINT" FOREIGN KEY ("PK_POINT") - REFERENCES "POINT" ("PK_POINT") --- SPLIT HERE! -ALTER TABLE "JOB_LOCAL" ADD CONSTRAINT "C_JOB_LOCAL_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "JOB_LOCAL" ADD CONSTRAINT "C_JOB_LOCAL_PK_HOST" FOREIGN KEY ("PK_HOST") - REFERENCES "HOST" ("PK_HOST") --- SPLIT HERE! -ALTER TABLE "HOST_LOCAL" ADD CONSTRAINT "C_HOST_LOCAL_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "HOST_LOCAL" ADD CONSTRAINT "C_HOST_LOCAL_PK_HOST" FOREIGN KEY ("PK_HOST") - REFERENCES "HOST" ("PK_HOST") --- SPLIT HERE! -ALTER TABLE "OWNER" ADD CONSTRAINT "C_OWNER_PK_SHOW" FOREIGN KEY ("PK_SHOW") - REFERENCES "SHOW" ("PK_SHOW") --- SPLIT HERE! -ALTER TABLE "DEED" ADD CONSTRAINT "C_DEED_PK_HOST" FOREIGN KEY ("PK_HOST") - REFERENCES "HOST" ("PK_HOST") --- SPLIT HERE! -ALTER TABLE "SHOW_SERVICE" ADD CONSTRAINT "C_SHOW_SERVICE_PK_SHOW" FOREIGN KEY ("PK_SHOW") - REFERENCES "SHOW" ("PK_SHOW") --- SPLIT HERE! -ALTER TABLE "LAYER_OUTPUT" ADD CONSTRAINT "C_LAYER_OUTPUT_PK_LAYER" FOREIGN KEY ("PK_LAYER") - REFERENCES "LAYER" ("PK_LAYER") --- SPLIT HERE! -ALTER TABLE "LAYER_OUTPUT" ADD CONSTRAINT "C_LAYER_OUTPUT_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB" ("PK_JOB") --- SPLIT HERE! -ALTER TABLE "FRAME_HISTORY" ADD CONSTRAINT "C_FRAME_HISTORY_PK_JOB" FOREIGN KEY ("PK_JOB") - REFERENCES "JOB_HISTORY" ("PK_JOB") ON DELETE CASCADE --- SPLIT HERE! -ALTER TABLE "FRAME_HISTORY" ADD CONSTRAINT "C_FRAME_HISTORY_PK_LAYER" FOREIGN KEY ("PK_LAYER") - REFERENCES "LAYER_HISTORY" ("PK_LAYER") ON DELETE CASCADE --- SPLIT HERE! -ALTER TABLE "FRAME_HISTORY" ADD CONSTRAINT "C_FRAME_HISTORY_PK_ALLOC" FOREIGN KEY ("PK_ALLOC") - REFERENCES "ALLOC" ("PK_ALLOC") --- SPLIT HERE! - -CREATE TRIGGER tbiu_layer_history -before insert or update -on layer_history -referencing new as new old as old -for each row -begin - :new.dt_last_modified := sysdate; -end; --- SPLIT HERE! - -CREATE TRIGGER "AFTER_JOB_MOVED" AFTER UPDATE ON job -FOR EACH ROW - WHEN (NEW.pk_folder != OLD.pk_folder) DECLARE - int_core_count NUMERIC(16,0); -BEGIN - SELECT int_cores INTO int_core_count - FROM job_resource WHERE pk_job = :new.pk_job; - - IF int_core_count > 0 THEN - UPDATE folder_resource SET int_cores = int_cores + int_core_count - WHERE pk_folder = :new.pk_folder; - - UPDATE folder_resource SET int_cores = int_cores - int_core_count - WHERE pk_folder = :old.pk_folder; - END IF; -END; --- SPLIT HERE! - -CREATE TRIGGER "BEFORE_DELETE_JOB" BEFORE DELETE ON job -FOR EACH ROW -DECLARE - TYPE StatType IS RECORD ( - int_core_time_success NUMERIC(38), - int_core_time_fail NUMERIC(38), - int_waiting_count NUMERIC(38), - int_dead_count NUMERIC(38), - int_depend_count NUMERIC(38), - int_eaten_count NUMERIC(38), - int_succeeded_count NUMERIC(38), - int_running_count NUMERIC(38), - int_max_rss NUMERIC(38) - ); - js StatType; - -BEGIN - SELECT - job_usage.int_core_time_success, - job_usage.int_core_time_fail, - job_stat.int_waiting_count, - job_stat.int_dead_count, - job_stat.int_depend_count, - job_stat.int_eaten_count, - job_stat.int_succeeded_count, - job_stat.int_running_count, - job_mem.int_max_rss - INTO - js - FROM - job_mem, - job_usage, - job_stat - WHERE - job_usage.pk_job = job_mem.pk_job - AND - job_stat.pk_job = job_mem.pk_job - AND - job_mem.pk_job = :old.pk_job; - - UPDATE - job_history - SET - pk_dept = :old.pk_dept, - int_core_time_success = js.int_core_time_success, - int_core_time_fail = js.int_core_time_fail, - int_frame_count = :old.int_frame_count, - int_layer_count = :old.int_layer_count, - int_waiting_count = js.int_waiting_count, - int_dead_count = js.int_dead_count, - int_depend_count = js.int_depend_count, - int_eaten_count = js.int_eaten_count, - int_succeeded_count = js.int_succeeded_count, - int_running_count = js.int_running_count, - int_max_rss = js.int_max_rss, - b_archived = 1, - int_ts_stopped = nvl(epoch(:old.ts_stopped), epoch(systimestamp)) - WHERE - pk_job = :old.pk_job; - - delete from depend where pk_job_depend_on=:old.pk_job or pk_job_depend_er=:old.pk_job; - delete from frame where pk_job=:old.pk_job; - delete from layer where pk_job=:old.pk_job; - delete from job_env WHERE pk_job=:old.pk_job; - delete from job_stat WHERE pk_job=:old.pk_job; - delete from job_resource WHERE pk_job=:old.pk_job; - delete from job_usage WHERE pk_job=:old.pk_job; - delete from job_mem WHERE pk_job=:old.pk_job; - delete from comments WHERE pk_job=:old.pk_job; - -END; --- SPLIT HERE! - -CREATE TRIGGER "AFTER_JOB_FINISHED" AFTER UPDATE ON job -FOR EACH ROW - WHEN (old.str_state = 'Pending' AND new.str_state = 'Finished') DECLARE - ts NUMERIC(12,0) := epoch(systimestamp); - TYPE StatType IS RECORD ( - int_core_time_success NUMERIC(38), - int_core_time_fail NUMERIC(38), - int_waiting_count NUMERIC(38), - int_dead_count NUMERIC(38), - int_depend_count NUMERIC(38), - int_eaten_count NUMERIC(38), - int_succeeded_count NUMERIC(38), - int_running_count NUMERIC(38), - int_max_rss NUMERIC(38) - ); - - TYPE LayerStatType IS RECORD ( - int_core_time_success NUMERIC(38), - int_core_time_fail NUMERIC(38), - int_total_count NUMERIC(38), - int_waiting_count NUMERIC(38), - int_dead_count NUMERIC(38), - int_depend_count NUMERIC(38), - int_eaten_count NUMERIC(38), - int_succeeded_count NUMERIC(38), - int_running_count NUMERIC(38), - int_max_rss NUMERIC(38) - ); - js StatType; - ls LayerStatType; -BEGIN - SELECT - job_usage.int_core_time_success, - job_usage.int_core_time_fail, - job_stat.int_waiting_count, - job_stat.int_dead_count, - job_stat.int_depend_count, - job_stat.int_eaten_count, - job_stat.int_succeeded_count, - job_stat.int_running_count, - job_mem.int_max_rss - INTO - js - FROM - job_mem, - job_usage, - job_stat - WHERE - job_usage.pk_job = job_mem.pk_job - AND - job_stat.pk_job = job_mem.pk_job - AND - job_mem.pk_job = :new.pk_job; - - UPDATE - job_history - SET - pk_dept = :new.pk_dept, - int_core_time_success = js.int_core_time_success, - int_core_time_fail = js.int_core_time_fail, - int_frame_count = :new.int_frame_count, - int_layer_count = :new.int_layer_count, - int_waiting_count = js.int_waiting_count, - int_dead_count = js.int_dead_count, - int_depend_count = js.int_depend_count, - int_eaten_count = js.int_eaten_count, - int_succeeded_count = js.int_succeeded_count, - int_running_count = js.int_running_count, - int_max_rss = js.int_max_rss, - int_ts_stopped = ts - WHERE - pk_job = :new.pk_job; - - FOR one_layer in (SELECT pk_layer from layer where pk_job = :new.pk_job) LOOP - SELECT - layer_usage.int_core_time_success, - layer_usage.int_core_time_fail, - layer_stat.int_total_count, - layer_stat.int_waiting_count, - layer_stat.int_dead_count, - layer_stat.int_depend_count, - layer_stat.int_eaten_count, - layer_stat.int_succeeded_count, - layer_stat.int_running_count, - layer_mem.int_max_rss - INTO - ls - FROM - layer_mem, - layer_usage, - layer_stat - WHERE - layer_usage.pk_layer = layer_mem.pk_layer - AND - layer_stat.pk_layer = layer_mem.pk_layer - AND - layer_mem.pk_layer = one_layer.pk_layer; - - UPDATE - layer_history - SET - int_core_time_success = ls.int_core_time_success, - int_core_time_fail = ls.int_core_time_fail, - int_frame_count = ls.int_total_count, - int_waiting_count = ls.int_waiting_count, - int_dead_count = ls.int_dead_count, - int_depend_count = ls.int_depend_count, - int_eaten_count = ls.int_eaten_count, - int_succeeded_count = ls.int_succeeded_count, - int_running_count = ls.int_running_count, - int_max_rss = ls.int_max_rss - WHERE - pk_layer = one_layer.pk_layer; - END LOOP; - - /** - * Delete any local core assignements from this job. - **/ - DELETE FROM job_local WHERE pk_job=:new.pk_job; - -END; --- SPLIT HERE! - -CREATE TRIGGER "AFTER_INSERT_JOB" AFTER INSERT ON job -FOR EACH ROW -BEGIN - INSERT INTO job_stat (pk_job_stat,pk_job) VALUES(:new.pk_job,:new.pk_job); - INSERT INTO job_resource (pk_job_resource,pk_job) VALUES(:new.pk_job,:new.pk_job); - INSERT INTO job_usage (pk_job_usage,pk_job) VALUES(:new.pk_job,:new.pk_job); - INSERT INTO job_mem (pk_job_mem,pk_job) VALUES (:new.pk_job,:new.pk_job); - - INSERT INTO job_history - (pk_job, pk_show, pk_facility, pk_dept, str_name, str_shot, str_user, int_ts_started) - VALUES - (:new.pk_job, :new.pk_show, :new.pk_facility, :new.pk_dept, - :new.str_name, :new.str_shot, :new.str_user, epoch(systimestamp)); -END; --- SPLIT HERE! - -CREATE TRIGGER "AFTER_JOB_DEPT_UPDATE" AFTER UPDATE ON job -FOR EACH ROW - WHEN (NEW.pk_dept != OLD.pk_dept AND new.str_state='Pending') DECLARE - int_running_cores NUMERIC(16,0); -BEGIN - /** - * Handles the accounting for moving a job between departments. - **/ - SELECT int_cores INTO int_running_cores - FROM job_resource WHERE pk_job = :new.pk_job; - - IF int_running_cores > 0 THEN - UPDATE point SET int_cores = int_cores + int_running_cores - WHERE pk_dept = :new.pk_dept AND pk_show = :new.pk_show; - - UPDATE point SET int_cores = int_cores - int_running_cores - WHERE pk_dept = :old.pk_dept AND pk_show = :old.pk_show; - END IF; - -END; --- SPLIT HERE! - -CREATE TRIGGER "VERIFY_HOST_LOCAL" BEFORE UPDATE ON host_local -FOR EACH ROW - WHEN ((NEW.int_cores_max = OLD.int_cores_max AND NEW.int_mem_max = OLD.int_mem_max) AND -(NEW.int_cores_idle != OLD.int_cores_idle OR NEW.int_mem_idle != OLD.int_mem_idle)) BEGIN - /** - * Check to see if the new cores exceeds max cores. This check is only - * done if NEW.int_max_cores is equal to OLD.int_max_cores and - * NEW.int_cores > OLD.int_cores, otherwise this error will be thrown - * when people lower the max. - **/ - IF :NEW.int_cores_idle < 0 THEN - Raise_application_error(-20021, 'host local doesnt have enough idle cores.'); - END IF; - - IF :NEW.int_mem_idle < 0 THEN - Raise_application_error(-20021, 'host local doesnt have enough idle memory'); - END IF; - -END; --- SPLIT HERE! - -CREATE TRIGGER "TIER_HOST_LOCAL" BEFORE UPDATE ON host_local -FOR EACH ROW -BEGIN - :new.float_tier := tier(:new.int_cores_max - :new.int_cores_idle,:new.int_cores_max); -END; --- SPLIT HERE! - -CREATE TRIGGER "AFTER_INSERT_LAYER" AFTER INSERT ON layer -FOR EACH ROW -BEGIN - - INSERT INTO layer_stat (pk_layer_stat, pk_layer, pk_job) VALUES (:new.pk_layer, :new.pk_layer, :new.pk_job); - INSERT INTO layer_resource (pk_layer_resource, pk_layer, pk_job) VALUES (:new.pk_layer, :new.pk_layer, :new.pk_job); - INSERT INTO layer_usage (pk_layer_usage, pk_layer, pk_job) VALUES (:new.pk_layer, :new.pk_layer, :new.pk_job); - INSERT INTO layer_mem (pk_layer_mem, pk_layer, pk_job) VALUES (:new.pk_layer, :new.pk_layer, :new.pk_job); - - INSERT INTO layer_history - (pk_layer, pk_job, str_name, str_type, int_cores_min, int_mem_min, b_archived, str_services) - VALUES - (:new.pk_layer, :new.pk_job, :new.str_name, :new.str_type, :new.int_cores_min, :new.int_mem_min, 0, :new.str_services); -END; --- SPLIT HERE! - -CREATE TRIGGER "BEFORE_DELETE_LAYER" BEFORE DELETE ON layer -FOR EACH ROW -DECLARE - TYPE StatType IS RECORD ( - int_core_time_success NUMERIC(38), - int_core_time_fail NUMERIC(38), - int_total_count NUMERIC(38), - int_waiting_count NUMERIC(38), - int_dead_count NUMERIC(38), - int_depend_count NUMERIC(38), - int_eaten_count NUMERIC(38), - int_succeeded_count NUMERIC(38), - int_running_count NUMERIC(38), - int_max_rss NUMERIC(38) - ); - js StatType; - -BEGIN - SELECT - layer_usage.int_core_time_success, - layer_usage.int_core_time_fail, - layer_stat.int_total_count, - layer_stat.int_waiting_count, - layer_stat.int_dead_count, - layer_stat.int_depend_count, - layer_stat.int_eaten_count, - layer_stat.int_succeeded_count, - layer_stat.int_running_count, - layer_mem.int_max_rss - INTO - js - FROM - layer_mem, - layer_usage, - layer_stat - WHERE - layer_usage.pk_layer = layer_mem.pk_layer - AND - layer_stat.pk_layer = layer_mem.pk_layer - AND - layer_mem.pk_layer = :old.pk_layer; - - UPDATE - layer_history - SET - int_core_time_success = js.int_core_time_success, - int_core_time_fail = js.int_core_time_fail, - int_frame_count = js.int_total_count, - int_waiting_count = js.int_waiting_count, - int_dead_count = js.int_dead_count, - int_depend_count = js.int_depend_count, - int_eaten_count = js.int_eaten_count, - int_succeeded_count = js.int_succeeded_count, - int_running_count = js.int_running_count, - int_max_rss = js.int_max_rss, - b_archived = 1 - WHERE - pk_layer = :old.pk_layer; - - delete from layer_resource where pk_layer=:old.pk_layer; - delete from layer_stat where pk_layer=:old.pk_layer; - delete from layer_usage where pk_layer=:old.pk_layer; - delete from layer_env where pk_layer=:old.pk_layer; - delete from layer_mem where pk_layer=:old.pk_layer; - delete from layer_output where pk_layer=:old.pk_layer; -END; --- SPLIT HERE! - -CREATE TRIGGER tbiu_job_history -before insert or update -on job_history -referencing new as new old as old -for each row -begin - :new.dt_last_modified := sysdate; -end; --- SPLIT HERE! - -CREATE TRIGGER "VERIFY_HOST_RESOURCES" BEFORE UPDATE ON host -FOR EACH ROW - WHEN (new.int_cores_idle != old.int_cores_idle OR new.int_mem_idle != old.int_mem_idle) BEGIN - IF :new.int_cores_idle < 0 THEN - Raise_application_error(-20011, 'unable to allocate additional core units'); - END IF; - - If :new.int_mem_idle < 0 THEN - Raise_application_error(-20012, 'unable to allocate additional memory'); - END IF; - - If :new.int_gpu_idle < 0 THEN - Raise_application_error(-20013, 'unable to allocate additional gpu memory'); - END IF; - -END; --- SPLIT HERE! - -CREATE TRIGGER "BEFORE_DELETE_HOST" BEFORE DELETE ON host -FOR EACH ROW -BEGIN - delete from host_stat WHERE pk_host = :old.pk_host; - delete from host_tag WHERE pk_host = :old.pk_host; - delete from deed WHERE pk_host = :old.pk_host; -END; --- SPLIT HERE! - -CREATE TRIGGER "VERIFY_JOB_RESOURCES" BEFORE UPDATE ON job_resource -FOR EACH ROW - WHEN ( NEW.int_max_cores = OLD.int_max_cores AND NEW.int_cores > OLD.int_cores) BEGIN - /** - * Check to see if the new cores exceeds max cores. This check is only - * done if NEW.int_max_cores is equal to OLD.int_max_cores and - * NEW.int_cores > OLD.int_cores, otherwise this error will be thrown - * at the wrong time. - **/ - IF :NEW.int_cores > :NEW.int_max_cores THEN - Raise_application_error(-20021, 'job has exceeded max cores'); - END IF; -END; --- SPLIT HERE! - -CREATE TRIGGER "TIER_JOB" BEFORE UPDATE ON job_resource -FOR EACH ROW -BEGIN - /** calculates new tier **/ - :new.float_tier := tier(:new.int_cores,:new.int_min_cores); -END; --- SPLIT HERE! - -CREATE TRIGGER "VERIFY_JOB_LOCAL" BEFORE UPDATE ON job_local -FOR EACH ROW - WHEN ( NEW.int_max_cores = OLD.int_max_cores AND NEW.int_cores > OLD.int_cores) BEGIN - /** - * Check to see if the new cores exceeds max cores. This check is only - * done if NEW.int_max_cores is equal to OLD.int_max_cores and - * NEW.int_cores > OLD.int_cores, otherwise this error will be thrown - * when people lower the max. - **/ - IF :NEW.int_cores > :NEW.int_max_cores THEN - Raise_application_error(-20021, 'job local has exceeded max cores'); - END IF; -END; --- SPLIT HERE! - -CREATE TRIGGER "TIER_FOLDER" BEFORE UPDATE ON folder_resource -FOR EACH ROW -BEGIN - /** calculates new tier **/ - :new.float_tier := soft_tier(:new.int_cores,:new.int_min_cores); -END; --- SPLIT HERE! - -CREATE TRIGGER "BEFORE_DELETE_FOLDER" BEFORE DELETE ON folder -FOR EACH ROW -BEGIN - DELETE FROM folder_level WHERE pk_folder = :old.pk_folder; - DELETE FROM folder_resource WHERE pk_folder = :old.pk_folder; -END; --- SPLIT HERE! - -CREATE TRIGGER "AFTER_INSERT_FOLDER" AFTER INSERT ON folder -FOR EACH ROW -DECLARE - int_level NUMERIC(16,0) :=0; -BEGIN - IF :new.pk_parent_folder IS NOT NULL THEN - SELECT folder_level.int_level + 1 INTO int_level FROM folder_level WHERE pk_folder = :new.pk_parent_folder; - END IF; - INSERT INTO folder_level (pk_folder_level,pk_folder,int_level) VALUES (:new.pk_folder, :new.pk_folder, int_level); - INSERT INTO folder_resource (pk_folder_resource,pk_folder) VALUES (:new.pk_folder, :new.pk_folder); -END; --- SPLIT HERE! - -CREATE TRIGGER "BEFORE_INSERT_FOLDER" BEFORE INSERT ON folder -FOR EACH ROW -BEGIN - IF :new.pk_parent_folder IS NULL THEN - :new.b_default := 1; - END IF; -END; --- SPLIT HERE! - -CREATE TRIGGER "BEFORE_INSERT_PROC" BEFORE INSERT ON proc -FOR EACH ROW -BEGIN - IF :new.int_cores_reserved <= 0 THEN - Raise_application_error(-20010, 'failed to allocate proc, tried to allocate 0 cores'); - END IF; -END; --- SPLIT HERE! - -CREATE TRIGGER "UPDATE_PROC_UPDATE_LAYER" AFTER UPDATE ON proc -FOR EACH ROW - WHEN (new.pk_layer != old.pk_layer) BEGIN - FOR lr IN ( - SELECT - pk_layer - FROM - layer_stat - WHERE - pk_layer IN (:old.pk_layer,:new.pk_layer) - ORDER BY layer_stat.pk_layer DESC - ) LOOP - - IF lr.pk_layer = :old.pk_layer THEN - - UPDATE layer_resource SET - int_cores = int_cores - :old.int_cores_reserved - WHERE - pk_layer = :old.pk_layer; - - ELSE - - UPDATE layer_resource SET - int_cores = int_cores + :new.int_cores_reserved - WHERE - pk_layer = :new.pk_layer; - END IF; - - END LOOP; -END; --- SPLIT HERE! - -CREATE TRIGGER "UPGRADE_PROC_MEMORY_USAGE" AFTER UPDATE ON proc -FOR EACH ROW - WHEN (NEW.int_mem_reserved != OLD.int_mem_reserved) BEGIN - UPDATE host SET - int_mem_idle = int_mem_idle - (:new.int_mem_reserved - :old.int_mem_reserved) - WHERE - pk_host = :new.pk_host; -END; --- SPLIT HERE! - -CREATE TRIGGER "UPDATE_FRAME_WAIT_TO_DEP" BEFORE UPDATE ON frame -FOR EACH ROW - WHEN (NEW.int_depend_count > 0 AND NEW.str_state IN ('Dead','Succeeded','Waiting','Checkpoint')) BEGIN - :NEW.str_state := 'Depend'; - :NEW.ts_updated := systimestamp; - :NEW.int_version := :NEW.int_version + 1; -END; --- SPLIT HERE! - -CREATE TRIGGER "UPDATE_FRAME_EATEN" BEFORE UPDATE ON frame -FOR EACH ROW - WHEN (NEW.str_state='Eaten' AND OLD.str_state='Succeeded') BEGIN - :NEW.str_state :='Succeeded'; -END; --- SPLIT HERE! - -CREATE TRIGGER "UPDATE_FRAME_DEP_TO_WAIT" BEFORE UPDATE ON frame -FOR EACH ROW - WHEN (OLD.int_depend_count > 0 AND NEW.int_depend_count < 1 AND OLD.str_state='Depend') BEGIN - :NEW.str_state := 'Waiting'; - :NEW.ts_updated := systimestamp; - :NEW.int_version := :NEW.int_version + 1; -END; --- SPLIT HERE! - -CREATE TRIGGER "FRAME_HISTORY_OPEN" AFTER UPDATE ON frame -FOR EACH ROW - WHEN (NEW.str_state != OLD.str_state) DECLARE - str_pk_alloc VARCHAR2(36) := null; - int_checkpoint integer := 0; -BEGIN - - IF :old.str_state = 'Running' THEN - - IF :new.int_exit_status = 299 THEN - - EXECUTE IMMEDIATE - 'DELETE FROM - frame_history - WHERE - int_ts_stopped = 0 AND pk_frame=:1' - USING - :new.pk_frame; - - ELSE - If :new.str_state = 'Checkpoint' THEN - int_checkpoint := 1; - END IF; - - EXECUTE IMMEDIATE - 'UPDATE - frame_history - SET - int_mem_max_used=:1, - int_ts_stopped=:2, - int_exit_status=:3, - int_checkpoint_count=:4 - WHERE - int_ts_stopped = 0 AND pk_frame=:5' - USING - :new.int_mem_max_used, - epoch(systimestamp), - :new.int_exit_status, - int_checkpoint, - :new.pk_frame; - END IF; - END IF; - - IF :new.str_state = 'Running' THEN - - SELECT pk_alloc INTO str_pk_alloc FROM host WHERE str_name=:new.str_host; - - EXECUTE IMMEDIATE - 'INSERT INTO - frame_history - ( - pk_frame, - pk_layer, - pk_job, - str_name, - str_state, - int_cores, - int_mem_reserved, - str_host, - int_ts_started, - pk_alloc - ) - VALUES - (:1,:2,:3,:4,:5,:6,:7,:8,:9,:10)' - USING :new.pk_frame, - :new.pk_layer, - :new.pk_job, - :new.str_name, - 'Running', - :new.int_cores, - :new.int_mem_reserved, - :new.str_host, - epoch(systimestamp), - str_pk_alloc; - END IF; - - -EXCEPTION - /** - * When we first roll this out then job won't be in the historical - * table, so frames on existing jobs will fail unless we catch - * and eat the exceptions. - **/ - WHEN OTHERS THEN - NULL; -END; --- SPLIT HERE! - -CREATE TRIGGER "UPDATE_FRAME_CHECKPOINT_STATE" BEFORE UPDATE ON frame -FOR EACH ROW - WHEN (NEW.str_state='Waiting' AND OLD.str_state='Running' AND NEW.str_checkpoint_state IN ('Enabled', 'Copying')) BEGIN - :NEW.str_state :='Checkpoint'; -END; --- SPLIT HERE! - -CREATE TRIGGER "UPDATE_FRAME_STATUS_COUNTS" AFTER UPDATE ON frame -FOR EACH ROW - WHEN (old.str_state != 'Setup' AND old.str_state != new.str_state) DECLARE - s_old_status_col VARCHAR2(32); - s_new_status_col VARCHAR2(32); -BEGIN - s_old_status_col := 'int_' || :old.str_state || '_count'; - s_new_status_col := 'int_' || :new.str_state || '_count'; - - EXECUTE IMMEDIATE 'UPDATE layer_stat SET ' || s_old_status_col || '=' || s_old_status_col || ' -1, ' - || s_new_status_col || ' = ' || s_new_status_col || '+1 WHERE pk_layer=:1' USING :new.pk_layer; - - EXECUTE IMMEDIATE 'UPDATE job_stat SET ' || s_old_status_col || '=' || s_old_status_col || ' -1, ' - || s_new_status_col || ' = ' || s_new_status_col || '+1 WHERE pk_job=:1' USING :new.pk_job; -END; --- SPLIT HERE! - -CREATE TRIGGER "VERIFY_SUBSCRIPTION" BEFORE UPDATE ON subscription -FOR EACH ROW - WHEN ( NEW.int_burst = OLD.int_burst AND NEW.int_cores > OLD.int_cores) BEGIN - /** - * Check to see if adding more procs will push the show over - * its subscription size. This check is only done when - * new.int_burst = old.int_burst and new.int_cores > old.int cores, - * otherwise this error would be thrown at the wrong time. - **/ - IF :NEW.int_cores > :NEW.int_burst THEN - Raise_application_error(-20022, 'subscription has exceeded burst size'); - END IF; -END; --- SPLIT HERE! - -CREATE TRIGGER "TIER_SUBSCRIPTION" BEFORE UPDATE ON subscription -FOR EACH ROW -BEGIN - /* calcultes a soft tier */ - :new.float_tier := tier(:new.int_cores, :new.int_size); -END; --- SPLIT HERE! - -CREATE TRIGGER "POINT_TIER" BEFORE UPDATE ON point -FOR EACH ROW -BEGIN - /* calcultes a soft tier */ - :new.float_tier := soft_tier(:new.int_cores, :new.int_min_cores); -END; --- SPLIT HERE! - -CREATE TRIGGER TBIU_FRAME_HISTORY -before INSERT OR UPDATE -ON frame_history -REFERENCING NEW AS NEW OLD AS OLD -FOR EACH row -BEGIN - :new.dt_last_modified := sysdate; -END; --- SPLIT HERE! - -CALL ctx_ddl.create_index_set('tag_set') --- SPLIT HERE! -CALL ctx_ddl.add_index('tag_set','str_name') --- SPLIT HERE! -create index i_host_str_tags ON host (str_tags) INDEXTYPE IS ctxsys.ctxcat parameters ('INDEX SET tag_set') diff --git a/cuebot/src/main/resources/conf/ddl/postgres/migrations/V10__Change_mem_type.sql b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V10__Change_mem_type.sql new file mode 100644 index 000000000..31f0c2afa --- /dev/null +++ b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V10__Change_mem_type.sql @@ -0,0 +1,10 @@ +-- Change memory type from INT to BIGINT. + +ALTER TABLE "job_mem" ALTER COLUMN "int_max_rss" TYPE BIGINT; +ALTER TABLE "job_mem" ALTER COLUMN "int_max_vss" TYPE BIGINT; +ALTER TABLE "job_resource" ALTER COLUMN "int_max_rss" TYPE BIGINT; +ALTER TABLE "job_resource" ALTER COLUMN "int_max_vss" TYPE BIGINT; +ALTER TABLE "layer_mem" ALTER COLUMN "int_max_rss" TYPE BIGINT; +ALTER TABLE "layer_mem" ALTER COLUMN "int_max_vss" TYPE BIGINT; +ALTER TABLE "layer_resource" ALTER COLUMN "int_max_rss" TYPE BIGINT; +ALTER TABLE "layer_resource" ALTER COLUMN "int_max_vss" TYPE BIGINT; diff --git a/cuebot/src/main/resources/conf/ddl/postgres/migrations/V11__Support_multiple_GPU.sql b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V11__Support_multiple_GPU.sql new file mode 100644 index 000000000..acb292586 --- /dev/null +++ b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V11__Support_multiple_GPU.sql @@ -0,0 +1,1078 @@ +-- Support multiple GPU + +-- frame_history + +ALTER TABLE frame_history ADD COLUMN int_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE frame_history ADD COLUMN int_gpu_mem_reserved BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE frame_history ADD COLUMN int_gpu_mem_max_used BIGINT DEFAULT 0 NOT NULL; + + +-- show_service + +ALTER TABLE show_service RENAME COLUMN int_gpu_min TO int_gpu_mem_min; +ALTER TABLE show_service ALTER COLUMN int_gpu_mem_min TYPE BIGINT; +ALTER TABLE show_service ADD COLUMN int_gpus_min INT DEFAULT 0 NOT NULL; +ALTER TABLE show_service ADD COLUMN int_gpus_max INT DEFAULT 0 NOT NULL; + +ALTER INDEX i_show_service_int_gpu_min RENAME TO i_show_service_int_gpu_mem_min;; +CREATE INDEX i_show_service_int_gpus_min ON show_service (int_gpus_min); + + +-- host_local + +DROP TRIGGER verify_host_local ON host_local; +ALTER TABLE host_local ALTER COLUMN int_mem_max TYPE BIGINT; +ALTER TABLE host_local ALTER COLUMN int_mem_idle TYPE BIGINT; +ALTER TABLE host_local RENAME COLUMN int_gpu_idle TO int_gpu_mem_idle; +ALTER TABLE host_local ALTER COLUMN int_gpu_mem_idle TYPE BIGINT; +ALTER TABLE host_local RENAME COLUMN int_gpu_max TO int_gpu_mem_max; +ALTER TABLE host_local ALTER COLUMN int_gpu_mem_max TYPE BIGINT; +ALTER TABLE host_local ADD COLUMN int_gpus_idle INT DEFAULT 0 NOT NULL; +ALTER TABLE host_local ADD COLUMN int_gpus_max INT DEFAULT 0 NOT NULL; + +CREATE INDEX i_host_local_int_gpus_idle ON host_local (int_gpus_idle); +CREATE INDEX i_host_local_int_gpus_max ON host_local (int_gpus_max); + + +-- service + +ALTER TABLE service RENAME COLUMN int_gpu_min TO int_gpu_mem_min; +ALTER TABLE service ALTER COLUMN int_gpu_mem_min TYPE BIGINT; +ALTER TABLE service ADD COLUMN int_gpus_min INT DEFAULT 0 NOT NULL; +ALTER TABLE service ADD COLUMN int_gpus_max INT DEFAULT 0 NOT NULL; + +ALTER INDEX i_service_int_gpu_min RENAME TO i_service_int_gpu_mem_min; +CREATE INDEX i_service_int_gpus_min ON service (int_gpus_min); + + +-- job_local + +ALTER TABLE job_local ADD COLUMN int_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE job_local ADD COLUMN int_max_gpus INT DEFAULT 0 NOT NULL; + + +-- task + +ALTER TABLE task ADD COLUMN int_min_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE task ADD COLUMN int_adjust_gpus INT DEFAULT 0 NOT NULL; + + +-- point + +ALTER TABLE point ADD COLUMN int_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE point ADD COLUMN int_min_gpus INT DEFAULT 0 NOT NULL; + + +-- folder_resource + +ALTER TABLE folder_resource ADD COLUMN int_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE folder_resource ADD COLUMN int_max_gpus INT DEFAULT -1 NOT NULL; +ALTER TABLE folder_resource ADD COLUMN int_min_gpus INT DEFAULT 0 NOT NULL; + +CREATE INDEX i_folder_res_int_max_gpus ON folder_resource (int_max_gpus); + + +-- layer_history + +ALTER TABLE layer_history ADD COLUMN int_gpus_min INT DEFAULT 0 NOT NULL; +ALTER TABLE layer_history ADD COLUMN int_gpu_time_success BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE layer_history ADD COLUMN int_gpu_time_fail BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE layer_history ADD COLUMN int_gpu_mem_min BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE layer_history ADD COLUMN int_gpu_mem_max BIGINT DEFAULT 0 NOT NULL; + + +-- job_history + +ALTER TABLE job_history ADD COLUMN int_gpu_time_success BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE job_history ADD COLUMN int_gpu_time_fail BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE job_history ADD COLUMN int_gpu_mem_max BIGINT DEFAULT 0 NOT NULL; + + +-- job_usage + +ALTER TABLE job_usage ADD COLUMN int_gpu_time_success BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE job_usage ADD COLUMN int_gpu_time_fail BIGINT DEFAULT 0 NOT NULL; + + +-- job_resource + +ALTER TABLE job_resource ALTER COLUMN int_max_rss TYPE BIGINT; +ALTER TABLE job_resource ALTER COLUMN int_max_vss TYPE BIGINT; +ALTER TABLE job_resource ADD COLUMN int_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE job_resource ADD COLUMN int_min_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE job_resource ADD COLUMN int_max_gpus INT DEFAULT 100 NOT NULL; +ALTER TABLE job_resource ADD COLUMN int_local_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE job_resource ADD COLUMN int_gpu_mem_max BIGINT DEFAULT 0 NOT NULL; + +CREATE INDEX i_job_resource_gpus_min_max ON job_resource (int_min_gpus, int_max_gpus); +CREATE INDEX i_job_resource_gpus ON job_resource (int_gpus); +CREATE INDEX i_job_resource_max_gpus ON job_resource (int_max_gpus); + + +-- subscription + +ALTER TABLE subscription ADD COLUMN int_gpus INT DEFAULT 0 NOT NULL; + + +-- show + +ALTER TABLE show ADD COLUMN int_default_min_gpus INT DEFAULT 100 NOT NULL; +ALTER TABLE show ADD COLUMN int_default_max_gpus INT DEFAULT 100000 NOT NULL; + + +-- proc + +ALTER TABLE proc RENAME COLUMN int_gpu_reserved TO int_gpu_mem_reserved; +ALTER TABLE proc ALTER COLUMN int_gpu_mem_reserved TYPE BIGINT; +ALTER TABLE proc ADD COLUMN int_gpus_reserved INT DEFAULT 0 NOT NULL; +ALTER TABLE proc ADD COLUMN int_gpu_mem_used BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE proc ADD COLUMN int_gpu_mem_max_used BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE proc ADD COLUMN int_gpu_mem_pre_reserved BIGINT DEFAULT 0 NOT NULL; + +ALTER INDEX i_proc_int_gpu_reserved RENAME TO i_proc_int_gpu_mem_reserved; + + +-- layer_usage + +ALTER TABLE layer_usage ADD COLUMN int_gpu_time_success BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE layer_usage ADD COLUMN int_gpu_time_fail BIGINT DEFAULT 0 NOT NULL; + + +-- layer_mem + +ALTER TABLE layer_mem ALTER COLUMN int_max_rss TYPE BIGINT; +ALTER TABLE layer_mem ALTER COLUMN int_max_vss TYPE BIGINT; +ALTER TABLE layer_mem ADD COLUMN int_gpu_mem_max BIGINT DEFAULT 0 NOT NULL; + + +-- layer_resource + +ALTER TABLE layer_resource ALTER COLUMN int_max_rss TYPE BIGINT; +ALTER TABLE layer_resource ALTER COLUMN int_max_vss TYPE BIGINT; +ALTER TABLE layer_resource ADD COLUMN int_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE layer_resource ADD COLUMN int_gpu_mem_max BIGINT DEFAULT 0 NOT NULL; + + +-- layer + +ALTER TABLE layer RENAME COLUMN int_gpu_min TO int_gpu_mem_min; +ALTER TABLE layer ALTER COLUMN int_gpu_mem_min TYPE BIGINT; +ALTER TABLE layer ADD COLUMN int_gpus_min BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE layer ADD COLUMN int_gpus_max BIGINT DEFAULT 0 NOT NULL; + +ALTER INDEX i_layer_int_gpu_min RENAME TO i_layer_int_gpu_mem_min; +CREATE INDEX i_layer_cores_gpus_mem ON layer (int_cores_min, int_gpus_min, int_mem_min, int_gpu_mem_min); +CREATE INDEX i_layer_cores_gpus_mem_thread ON layer (int_cores_min, int_gpus_min, int_mem_min, int_gpu_mem_min, b_threadable); + + +-- job_mem + +ALTER TABLE job_mem ALTER COLUMN int_max_rss TYPE BIGINT; +ALTER TABLE job_mem ALTER COLUMN int_max_vss TYPE BIGINT; +ALTER TABLE job_mem ADD COLUMN int_gpu_mem_max BIGINT DEFAULT 0 NOT NULL; + + +-- job + +ALTER TABLE job ADD COLUMN int_min_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE job ADD COLUMN int_max_gpus INT DEFAULT 100000 NOT NULL; + + +-- host_stat + +ALTER TABLE host_stat RENAME COLUMN int_gpu_total TO int_gpu_mem_total; +ALTER TABLE host_stat ALTER COLUMN int_gpu_mem_total TYPE BIGINT; +ALTER TABLE host_stat RENAME COLUMN int_gpu_free TO int_gpu_mem_free; +ALTER TABLE host_stat ALTER COLUMN int_gpu_mem_free TYPE BIGINT; + +ALTER INDEX i_host_stat_int_gpu_total RENAME TO i_host_stat_int_gpu_mem_total; +ALTER INDEX i_host_stat_int_gpu_free RENAME TO i_host_stat_int_gpu_mem_free; + + +-- host + +ALTER TABLE host RENAME COLUMN int_gpu TO int_gpu_mem; +ALTER TABLE host ALTER COLUMN int_gpu_mem TYPE BIGINT; +ALTER TABLE host RENAME COLUMN int_gpu_idle TO int_gpu_mem_idle; +ALTER TABLE host ALTER COLUMN int_gpu_mem_idle TYPE BIGINT; +ALTER TABLE host ADD COLUMN int_gpus BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE host ADD COLUMN int_gpus_idle BIGINT DEFAULT 0 NOT NULL; + +CREATE INDEX i_host_int_gpu_mem ON host (int_gpu_mem); +CREATE INDEX i_host_int_gpu_mem_idle ON host (int_gpu_mem_idle); +CREATE INDEX i_host_int_gpus ON host (int_gpus); +CREATE INDEX i_host_int_gpus_idle ON host (int_gpus_idle); + + +-- frame + +ALTER TABLE frame RENAME COLUMN int_gpu_reserved TO int_gpu_mem_reserved; +ALTER TABLE frame ALTER COLUMN int_gpu_mem_reserved TYPE BIGINT; +ALTER TABLE frame ADD COLUMN int_gpu_mem_used BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE frame ADD COLUMN int_gpu_mem_max_used BIGINT DEFAULT 0 NOT NULL; +ALTER TABLE frame ADD COLUMN int_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE frame ADD COLUMN int_total_past_gpu_time INT DEFAULT 0 NOT NULL; + +ALTER INDEX i_frame_int_gpu_reserved RENAME TO i_frame_int_gpu_mem_reserved; + + +-- folder + +ALTER TABLE folder ADD COLUMN int_job_min_gpus INT DEFAULT -1 NOT NULL; +ALTER TABLE folder ADD COLUMN int_job_max_gpus INT DEFAULT -1 NOT NULL; +ALTER TABLE folder ADD COLUMN int_min_gpus INT DEFAULT 0 NOT NULL; +ALTER TABLE folder ADD COLUMN int_max_gpus INT DEFAULT -1 NOT NULL; + + +-- Views + +DROP VIEW vs_show_resource; +CREATE VIEW vs_show_resource (pk_show, int_cores, int_gpus) AS + SELECT + job.pk_show, + SUM(int_cores) AS int_cores, SUM(int_gpus) AS int_gpus + FROM + job, + job_resource + WHERE + job.pk_job = job_resource.pk_job + AND + job.str_state='PENDING' + GROUP BY + job.pk_show; + + +DROP VIEW vs_job_resource; +CREATE VIEW vs_job_resource (pk_job, int_procs, int_cores, int_gpus, int_mem_reserved) AS + SELECT + job.pk_job, + COUNT(proc.pk_proc) AS int_procs, + COALESCE(SUM(int_cores_reserved),0) AS int_cores, + COALESCE(SUM(int_gpus_reserved),0) AS int_gpus, + COALESCE(SUM(int_mem_reserved),0) AS int_mem_reserved + FROM + job LEFT JOIN proc ON (proc.pk_job = job.pk_job) + GROUP BY + job.pk_job; + + +DROP VIEW vs_alloc_usage; +CREATE VIEW vs_alloc_usage (pk_alloc, int_cores, int_idle_cores, int_running_cores, int_locked_cores, int_available_cores, int_gpus, int_idle_gpus, int_running_gpus, int_locked_gpus, int_available_gpus, int_hosts, int_locked_hosts, int_down_hosts) AS + SELECT + alloc.pk_alloc, + COALESCE(SUM(host.int_cores),0) AS int_cores, + COALESCE(SUM(host.int_cores_idle),0) AS int_idle_cores, + COALESCE(SUM(host.int_cores - host.int_cores_idle),0) as int_running_cores, + COALESCE((SELECT SUM(int_cores) FROM host WHERE host.pk_alloc=alloc.pk_alloc AND (str_lock_state='NIMBY_LOCKED' OR str_lock_state='LOCKED')),0) AS int_locked_cores, + COALESCE((SELECT SUM(int_cores_idle) FROM host h,host_stat hs WHERE h.pk_host = hs.pk_host AND h.pk_alloc=alloc.pk_alloc AND h.str_lock_state='OPEN' AND hs.str_state ='UP'),0) AS int_available_cores, + COALESCE(SUM(host.int_gpus),0) AS int_gpus, + COALESCE(SUM(host.int_gpus_idle),0) AS int_idle_gpus, + COALESCE(SUM(host.int_gpus - host.int_gpus_idle),0) as int_running_gpus, + COALESCE((SELECT SUM(int_gpus) FROM host WHERE host.pk_alloc=alloc.pk_alloc AND (str_lock_state='NIMBY_LOCKED' OR str_lock_state='LOCKED')),0) AS int_locked_gpus, + COALESCE((SELECT SUM(int_gpus_idle) FROM host h,host_stat hs WHERE h.pk_host = hs.pk_host AND h.pk_alloc=alloc.pk_alloc AND h.str_lock_state='OPEN' AND hs.str_state ='UP'),0) AS int_available_gpus, + COUNT(host.pk_host) AS int_hosts, + (SELECT COUNT(*) FROM host WHERE host.pk_alloc=alloc.pk_alloc AND str_lock_state='LOCKED') AS int_locked_hosts, + (SELECT COUNT(*) FROM host h,host_stat hs WHERE h.pk_host = hs.pk_host AND h.pk_alloc=alloc.pk_alloc AND hs.str_state='DOWN') AS int_down_hosts + FROM + alloc LEFT JOIN host ON (alloc.pk_alloc = host.pk_alloc) + GROUP BY + alloc.pk_alloc; + + +DROP VIEW vs_folder_counts; +CREATE VIEW vs_folder_counts (pk_folder, int_depend_count, int_waiting_count, int_running_count, int_dead_count, int_cores, int_gpus, int_job_count) AS + SELECT + folder.pk_folder, + COALESCE(SUM(int_depend_count),0) AS int_depend_count, + COALESCE(SUM(int_waiting_count),0) AS int_waiting_count, + COALESCE(SUM(int_running_count),0) AS int_running_count, + COALESCE(SUM(int_dead_count),0) AS int_dead_count, + COALESCE(SUM(int_cores),0) AS int_cores, + COALESCE(SUM(int_gpus),0) AS int_gpus, + COALESCE(COUNT(job.pk_job),0) AS int_job_count +FROM + folder + LEFT JOIN + job ON (folder.pk_folder = job.pk_folder AND job.str_state='PENDING') + LEFT JOIN + job_stat ON (job.pk_job = job_stat.pk_job) + LEFT JOIN + job_resource ON (job.pk_job = job_resource.pk_job) + GROUP BY + folder.pk_folder; + + +DROP VIEW v_history_frame; +CREATE VIEW v_history_frame (pk_frame_history, pk_frame, pk_layer, pk_job, str_name, str_state, + int_mem_reserved, int_mem_max_used, int_cores, int_gpu_mem_reserved, int_gpu_mem_max_used, int_gpus, + str_host, int_exit_status, str_alloc_name, + b_alloc_billable, str_facility_name, int_ts_started, int_ts_stopped, int_checkpoint_count, + str_show_name, dt_last_modified) AS + SELECT + fh.PK_FRAME_HISTORY, + fh.PK_FRAME, + fh.PK_LAYER, + fh.PK_JOB, + fh.STR_NAME, + fh.STR_STATE, + fh.INT_MEM_RESERVED, + fh.INT_MEM_MAX_USED, + fh.INT_CORES, + fh.INT_GPU_MEM_RESERVED, + fh.INT_GPU_MEM_MAX_USED, + fh.INT_GPUS, + fh.STR_HOST, + fh.INT_EXIT_STATUS, + a.STR_NAME STR_ALLOC_NAME, + a.B_BILLABLE B_ALLOC_BILLABLE, + f.STR_NAME STR_FACILITY_NAME, + fh.INT_TS_STARTED, + fh.INT_TS_STOPPED, + fh.INT_CHECKPOINT_COUNT, + null str_show_name, + fh.dt_last_modified + FROM frame_history fh + JOIN job_history jh + ON fh.pk_job = jh.pk_job + LEFT OUTER JOIN alloc a + ON fh.pk_alloc = a.pk_alloc + LEFT OUTER JOIN facility f + ON a.pk_facility = f.pk_facility + WHERE fh.dt_last_modified >= (SELECT dt_begin FROM history_period) + AND fh.dt_last_modified < (SELECT dt_end FROM history_period); + + +DROP VIEW v_history_job; +CREATE VIEW v_history_job (pk_job, str_name, str_shot, str_user, int_core_time_success, int_core_time_fail, int_gpu_time_success, int_gpu_time_fail, int_frame_count, int_layer_count, int_waiting_count, int_dead_count, int_depend_count, int_eaten_count, int_succeeded_count, int_running_count, int_max_rss, int_gpu_mem_max, b_archived, str_facility_name, str_dept_name, int_ts_started, int_ts_stopped, str_show_name, dt_last_modified) AS + select +jh.PK_JOB, +jh.STR_NAME, +jh.STR_SHOT, +jh.STR_USER, +jh.INT_CORE_TIME_SUCCESS, +jh.INT_CORE_TIME_FAIL, +jh.INT_GPU_TIME_SUCCESS, +jh.INT_GPU_TIME_FAIL, +jh.INT_FRAME_COUNT, +jh.INT_LAYER_COUNT, +jh.INT_WAITING_COUNT, +jh.INT_DEAD_COUNT, +jh.INT_DEPEND_COUNT, +jh.INT_EATEN_COUNT, +jh.INT_SUCCEEDED_COUNT, +jh.INT_RUNNING_COUNT, +jh.INT_MAX_RSS, +jh.INT_GPU_MEM_MAX, +jh.B_ARCHIVED, +f.str_name STR_FACILITY_NAME, +d.str_name str_dept_name, +jh.INT_TS_STARTED, +jh.INT_TS_STOPPED, +s.str_name str_show_name, +jh.dt_last_modified +from job_history jh, show s, facility f, dept d +where jh.pk_show = s.pk_show +and jh.pk_facility = f.pk_facility +and jh.pk_dept = d.pk_dept +and ( + jh.dt_last_modified >= ( + select dt_begin + from history_period + ) + or + jh.int_ts_stopped = 0 +); + + +DROP VIEW v_history_layer; +CREATE VIEW v_history_layer (pk_layer, pk_job, str_name, str_type, int_cores_min, + int_mem_min, int_gpus_min, int_gpu_mem_min, int_core_time_success, int_core_time_fail, + int_gpu_time_success, int_gpu_time_fail, int_frame_count, int_layer_count, + int_waiting_count, int_dead_count, int_depend_count, int_eaten_count, int_succeeded_count, + int_running_count, int_max_rss, int_gpu_mem_max, b_archived, str_services, str_show_name, dt_last_modified) AS + SELECT +lh.PK_LAYER, +lh.PK_JOB, +lh.STR_NAME, +lh.STR_TYPE, +lh.INT_CORES_MIN, +lh.INT_MEM_MIN, +lh.INT_GPUS_MIN, +lh.INT_GPU_MEM_MIN, +lh.INT_CORE_TIME_SUCCESS, +lh.INT_CORE_TIME_FAIL, +lh.INT_GPU_TIME_SUCCESS, +lh.INT_GPU_TIME_FAIL, +lh.INT_FRAME_COUNT, +lh.INT_LAYER_COUNT, +lh.INT_WAITING_COUNT, +lh.INT_DEAD_COUNT, +lh.INT_DEPEND_COUNT, +lh.INT_EATEN_COUNT, +lh.INT_SUCCEEDED_COUNT, +lh.INT_RUNNING_COUNT, +lh.INT_MAX_RSS, +lh.INT_GPU_MEM_MAX, +lh.B_ARCHIVED, +lh.STR_SERVICES, +s.str_name str_show_name, +lh.dt_last_modified +from layer_history lh, job_history jh, show s +where lh.pk_job = jh.pk_job +and jh.pk_show = s.pk_show +and jh.dt_last_modified >= ( + select dt_begin + from history_period +) +and jh.dt_last_modified < ( + select dt_end + from history_period +); + + +-- Types + +ALTER TYPE JobStatType ADD ATTRIBUTE int_gpu_time_success BIGINT; +ALTER TYPE JobStatType ADD ATTRIBUTE int_gpu_time_fail BIGINT; +ALTER TYPE JobStatType ADD ATTRIBUTE int_gpu_mem_max BIGINT; + +ALTER TYPE LayerStatType ADD ATTRIBUTE int_gpu_time_success BIGINT; +ALTER TYPE LayerStatType ADD ATTRIBUTE int_gpu_time_fail BIGINT; +ALTER TYPE LayerStatType ADD ATTRIBUTE int_gpu_mem_max BIGINT; + + +-- Functions + +CREATE OR REPLACE FUNCTION recalculate_subs() +RETURNS VOID AS $body$ +DECLARE + r RECORD; +BEGIN + -- + -- concatenates all tags in host_tag and sets host.str_tags + -- + UPDATE subscription SET int_cores = 0; + UPDATE subscription SET int_gpus = 0; + FOR r IN + SELECT proc.pk_show, alloc.pk_alloc, sum(proc.int_cores_reserved) as c, sum(proc.int_gpus_reserved) as d + FROM proc, host, alloc + WHERE proc.pk_host = host.pk_host AND host.pk_alloc = alloc.pk_alloc + GROUP BY proc.pk_show, alloc.pk_alloc + LOOP + UPDATE subscription SET int_cores = r.c, int_gpus = r.d WHERE pk_alloc=r.pk_alloc AND pk_show=r.pk_show; + + END LOOP; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION tmp_populate_folder() +RETURNS VOID AS $body$ +DECLARE + t RECORD; +BEGIN + FOR t IN + SELECT pk_folder, pk_show, sum(int_cores) AS c, sum(int_gpus) AS d + FROM job, job_resource + WHERE job.pk_job = job_resource.pk_job + GROUP by pk_folder, pk_show + LOOP + UPDATE folder_resource SET int_cores = t.c, int_gpus = t.d WHERE pk_folder = t.pk_folder; + COMMIT; + END LOOP; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION tmp_populate_point() +RETURNS VOID AS $body$ +DECLARE + t RECORD; +BEGIN + FOR t IN + SELECT pk_dept, pk_show, sum(int_cores) AS c, sum(int_gpus) AS d + FROM job, job_resource + WHERE job.pk_job = job_resource.pk_job + GROUP BY pk_dept, pk_show + LOOP + UPDATE point SET int_cores = t.c , int_gpus = t.d WHERE pk_show = t.pk_show AND pk_dept = t.pk_dept; + END LOOP; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION tmp_populate_sub() +RETURNS VOID AS $body$ +DECLARE + t RECORD; +BEGIN + FOR t IN + SELECT proc.pk_show, host.pk_alloc, sum(int_cores_reserved) AS c, sum(int_gpus_reserved) AS d + FROM proc, host + WHERE proc.pk_host = host.pk_host + GROUP BY proc.pk_show, host.pk_alloc + LOOP + UPDATE subscription SET int_cores = t.c, int_gpus = t.d WHERE pk_show = t.pk_show AND pk_alloc = t.pk_alloc; + END LOOP; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__after_job_moved() +RETURNS TRIGGER AS $body$ +DECLARE + int_core_count INT; + int_gpu_count INT; +BEGIN + SELECT int_cores, int_gpus INTO int_core_count, int_gpu_count + FROM job_resource WHERE pk_job = NEW.pk_job; + + IF int_core_count > 0 THEN + UPDATE folder_resource SET int_cores = int_cores + int_core_count + WHERE pk_folder = NEW.pk_folder; + + UPDATE folder_resource SET int_cores = int_cores - int_core_count + WHERE pk_folder = OLD.pk_folder; + END IF; + + IF int_gpu_count > 0 THEN + UPDATE folder_resource SET int_gpus = int_gpus + int_gpu_count + WHERE pk_folder = NEW.pk_folder; + + UPDATE folder_resource SET int_gpus = int_gpus - int_gpu_count + WHERE pk_folder = OLD.pk_folder; + END IF; + RETURN NULL; +END +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__before_delete_job() +RETURNS TRIGGER AS $body$ +DECLARE + js JobStatType; +BEGIN + SELECT + job_usage.int_core_time_success, + job_usage.int_core_time_fail, + job_usage.int_gpu_time_success, + job_usage.int_gpu_time_fail, + job_stat.int_waiting_count, + job_stat.int_dead_count, + job_stat.int_depend_count, + job_stat.int_eaten_count, + job_stat.int_succeeded_count, + job_stat.int_running_count, + job_mem.int_max_rss, + job_mem.int_gpu_mem_max + INTO + js + FROM + job_mem, + job_usage, + job_stat + WHERE + job_usage.pk_job = job_mem.pk_job + AND + job_stat.pk_job = job_mem.pk_job + AND + job_mem.pk_job = OLD.pk_job; + + UPDATE + job_history + SET + pk_dept = OLD.pk_dept, + int_core_time_success = js.int_core_time_success, + int_core_time_fail = js.int_core_time_fail, + int_gpu_time_success = js.int_gpu_time_success, + int_gpu_time_fail = js.int_gpu_time_fail, + int_frame_count = OLD.int_frame_count, + int_layer_count = OLD.int_layer_count, + int_waiting_count = js.int_waiting_count, + int_dead_count = js.int_dead_count, + int_depend_count = js.int_depend_count, + int_eaten_count = js.int_eaten_count, + int_succeeded_count = js.int_succeeded_count, + int_running_count = js.int_running_count, + int_max_rss = js.int_max_rss, + int_gpu_mem_max = js.int_gpu_mem_max, + b_archived = true, + int_ts_stopped = COALESCE(epoch(OLD.ts_stopped), epoch(current_timestamp)) + WHERE + pk_job = OLD.pk_job; + + DELETE FROM depend WHERE pk_job_depend_on=OLD.pk_job OR pk_job_depend_er=OLD.pk_job; + DELETE FROM frame WHERE pk_job=OLD.pk_job; + DELETE FROM layer WHERE pk_job=OLD.pk_job; + DELETE FROM job_env WHERE pk_job=OLD.pk_job; + DELETE FROM job_stat WHERE pk_job=OLD.pk_job; + DELETE FROM job_resource WHERE pk_job=OLD.pk_job; + DELETE FROM job_usage WHERE pk_job=OLD.pk_job; + DELETE FROM job_mem WHERE pk_job=OLD.pk_job; + DELETE FROM comments WHERE pk_job=OLD.pk_job; + + RETURN OLD; +END +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__after_job_finished() +RETURNS TRIGGER AS $body$ +DECLARE + ts INT := cast(epoch(current_timestamp) as integer); + js JobStatType; + ls LayerStatType; + one_layer RECORD; +BEGIN + SELECT + job_usage.int_core_time_success, + job_usage.int_core_time_fail, + job_usage.int_gpu_time_success, + job_usage.int_gpu_time_fail, + job_stat.int_waiting_count, + job_stat.int_dead_count, + job_stat.int_depend_count, + job_stat.int_eaten_count, + job_stat.int_succeeded_count, + job_stat.int_running_count, + job_mem.int_max_rss, + job_mem.int_gpu_mem_max + INTO + js + FROM + job_mem, + job_usage, + job_stat + WHERE + job_usage.pk_job = job_mem.pk_job + AND + job_stat.pk_job = job_mem.pk_job + AND + job_mem.pk_job = NEW.pk_job; + + UPDATE + job_history + SET + pk_dept = NEW.pk_dept, + int_core_time_success = js.int_core_time_success, + int_core_time_fail = js.int_core_time_fail, + int_gpu_time_success = js.int_gpu_time_success, + int_gpu_time_fail = js.int_gpu_time_fail, + int_frame_count = NEW.int_frame_count, + int_layer_count = NEW.int_layer_count, + int_waiting_count = js.int_waiting_count, + int_dead_count = js.int_dead_count, + int_depend_count = js.int_depend_count, + int_eaten_count = js.int_eaten_count, + int_succeeded_count = js.int_succeeded_count, + int_running_count = js.int_running_count, + int_max_rss = js.int_max_rss, + int_gpu_mem_max = js.int_gpu_mem_max, + int_ts_stopped = ts + WHERE + pk_job = NEW.pk_job; + + FOR one_layer IN (SELECT pk_layer from layer where pk_job = NEW.pk_job) + LOOP + SELECT + layer_usage.int_core_time_success, + layer_usage.int_core_time_fail, + layer_usage.int_gpu_time_success, + layer_usage.int_gpu_time_fail, + layer_stat.int_total_count, + layer_stat.int_waiting_count, + layer_stat.int_dead_count, + layer_stat.int_depend_count, + layer_stat.int_eaten_count, + layer_stat.int_succeeded_count, + layer_stat.int_running_count, + layer_mem.int_max_rss, + layer_mem.int_gpu_mem_max + INTO + ls + FROM + layer_mem, + layer_usage, + layer_stat + WHERE + layer_usage.pk_layer = layer_mem.pk_layer + AND + layer_stat.pk_layer = layer_mem.pk_layer + AND + layer_mem.pk_layer = one_layer.pk_layer; + + UPDATE + layer_history + SET + int_core_time_success = ls.int_core_time_success, + int_core_time_fail = ls.int_core_time_fail, + int_gpu_time_success = ls.int_gpu_time_success, + int_gpu_time_fail = ls.int_gpu_time_fail, + int_frame_count = ls.int_total_count, + int_waiting_count = ls.int_waiting_count, + int_dead_count = ls.int_dead_count, + int_depend_count = ls.int_depend_count, + int_eaten_count = ls.int_eaten_count, + int_succeeded_count = ls.int_succeeded_count, + int_running_count = ls.int_running_count, + int_max_rss = ls.int_max_rss, + int_gpu_mem_max = ls.int_gpu_mem_max + WHERE + pk_layer = one_layer.pk_layer; + END LOOP; + + /** + * Delete any local core assignments from this job. + **/ + DELETE FROM job_local WHERE pk_job=NEW.pk_job; + + RETURN NEW; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__after_job_dept_update() +RETURNS TRIGGER AS $body$ +DECLARE + int_running_cores INT; + int_running_gpus INT; +BEGIN + /** + * Handles the accounting for moving a job between departments. + **/ + SELECT int_cores, int_gpus INTO int_running_cores, int_running_gpus + FROM job_resource WHERE pk_job = NEW.pk_job; + + IF int_running_cores > 0 THEN + UPDATE point SET int_cores = int_cores + int_running_cores + WHERE pk_dept = NEW.pk_dept AND pk_show = NEW.pk_show; + + UPDATE point SET int_cores = int_cores - int_running_cores + WHERE pk_dept = OLD.pk_dept AND pk_show = OLD.pk_show; + END IF; + + IF int_running_gpus > 0 THEN + UPDATE point SET int_gpus = int_gpus + int_running_gpus + WHERE pk_dept = NEW.pk_dept AND pk_show = NEW.pk_show; + + UPDATE point SET int_gpus = int_gpus - int_running_gpus + WHERE pk_dept = OLD.pk_dept AND pk_show = OLD.pk_show; + END IF; + + RETURN NULL; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__verify_host_local() +RETURNS TRIGGER AS $body$ +BEGIN + /** + * Check to see if the new cores exceeds max cores. This check is only + * done if NEW.int_max_cores is equal to OLD.int_max_cores and + * NEW.int_cores > OLD.int_cores, otherwise this error will be thrown + * when people lower the max. + **/ + IF NEW.int_cores_idle < 0 THEN + RAISE EXCEPTION 'host local doesnt have enough idle cores.'; + END IF; + + IF NEW.int_mem_idle < 0 THEN + RAISE EXCEPTION 'host local doesnt have enough idle memory'; + END IF; + + IF NEW.int_gpus_idle < 0 THEN + RAISE EXCEPTION 'host local doesnt have enough GPU idle cores.'; + END IF; + + IF NEW.int_gpu_mem_idle < 0 THEN + RAISE EXCEPTION 'host local doesnt have enough GPU idle memory.'; + END IF; + + RETURN NEW; +END; +$body$ +LANGUAGE PLPGSQL; + +CREATE TRIGGER verify_host_local BEFORE UPDATE ON host_local +FOR EACH ROW + WHEN ((NEW.int_cores_max = OLD.int_cores_max AND NEW.int_mem_max = OLD.int_mem_max) AND + (NEW.int_cores_idle != OLD.int_cores_idle OR NEW.int_mem_idle != OLD.int_mem_idle) AND + (NEW.int_gpus_max = OLD.int_gpus_max AND NEW.int_gpu_mem_max = OLD.int_gpu_mem_max) AND + (NEW.int_gpus_idle != OLD.int_gpus_idle OR NEW.int_gpu_mem_idle != OLD.int_gpu_mem_idle)) + EXECUTE PROCEDURE trigger__verify_host_local(); + + +CREATE OR REPLACE FUNCTION trigger__after_insert_layer() +RETURNS TRIGGER AS $body$ +BEGIN + INSERT INTO layer_stat (pk_layer_stat, pk_layer, pk_job) VALUES (NEW.pk_layer, NEW.pk_layer, NEW.pk_job); + INSERT INTO layer_resource (pk_layer_resource, pk_layer, pk_job) VALUES (NEW.pk_layer, NEW.pk_layer, NEW.pk_job); + INSERT INTO layer_usage (pk_layer_usage, pk_layer, pk_job) VALUES (NEW.pk_layer, NEW.pk_layer, NEW.pk_job); + INSERT INTO layer_mem (pk_layer_mem, pk_layer, pk_job) VALUES (NEW.pk_layer, NEW.pk_layer, NEW.pk_job); + + INSERT INTO layer_history + (pk_layer, pk_job, str_name, str_type, int_cores_min, int_mem_min, int_gpus_min, int_gpu_mem_min, b_archived,str_services) + VALUES + (NEW.pk_layer, NEW.pk_job, NEW.str_name, NEW.str_type, NEW.int_cores_min, NEW.int_mem_min, NEW.int_gpus_min, NEW.int_gpu_mem_min, false, NEW.str_services); + + RETURN NEW; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__before_delete_layer() +RETURNS TRIGGER AS $body$ +DECLARE + js LayerStatType; +BEGIN + SELECT + layer_usage.int_core_time_success, + layer_usage.int_core_time_fail, + layer_usage.int_gpu_time_success, + layer_usage.int_gpu_time_fail, + layer_stat.int_total_count, + layer_stat.int_waiting_count, + layer_stat.int_dead_count, + layer_stat.int_depend_count, + layer_stat.int_eaten_count, + layer_stat.int_succeeded_count, + layer_stat.int_running_count, + layer_mem.int_max_rss, + layer_mem.int_gpu_mem_max + INTO + js + FROM + layer_mem, + layer_usage, + layer_stat + WHERE + layer_usage.pk_layer = layer_mem.pk_layer + AND + layer_stat.pk_layer = layer_mem.pk_layer + AND + layer_mem.pk_layer = OLD.pk_layer; + + UPDATE + layer_history + SET + int_core_time_success = js.int_core_time_success, + int_core_time_fail = js.int_core_time_fail, + int_gpu_time_success = js.int_gpu_time_success, + int_gpu_time_fail = js.int_gpu_time_fail, + int_frame_count = js.int_total_count, + int_waiting_count = js.int_waiting_count, + int_dead_count = js.int_dead_count, + int_depend_count = js.int_depend_count, + int_eaten_count = js.int_eaten_count, + int_succeeded_count = js.int_succeeded_count, + int_running_count = js.int_running_count, + int_max_rss = js.int_max_rss, + int_gpu_mem_max = js.int_gpu_mem_max, + b_archived = true + WHERE + pk_layer = OLD.pk_layer; + + DELETE FROM layer_resource where pk_layer=OLD.pk_layer; + DELETE FROM layer_stat where pk_layer=OLD.pk_layer; + DELETE FROM layer_usage where pk_layer=OLD.pk_layer; + DELETE FROM layer_env where pk_layer=OLD.pk_layer; + DELETE FROM layer_mem where pk_layer=OLD.pk_layer; + DELETE FROM layer_output where pk_layer=OLD.pk_layer; + + RETURN OLD; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__verify_host_resources() +RETURNS TRIGGER AS $body$ +BEGIN + IF NEW.int_cores_idle < 0 THEN + RAISE EXCEPTION 'unable to allocate additional core units'; + END IF; + + If NEW.int_mem_idle < 0 THEN + RAISE EXCEPTION 'unable to allocate additional memory'; + END IF; + + If NEW.int_gpus_idle < 0 THEN + RAISE EXCEPTION 'unable to allocate additional GPU units'; + END IF; + + If NEW.int_gpu_mem_idle < 0 THEN + RAISE EXCEPTION 'unable to allocate additional GPU memory'; + END IF; + RETURN NEW; +END; +$body$ +LANGUAGE PLPGSQL; + +DROP TRIGGER verify_host_resources ON host; +CREATE TRIGGER verify_host_resources BEFORE UPDATE ON host +FOR EACH ROW + WHEN (NEW.int_cores_idle != OLD.int_cores_idle + OR NEW.int_mem_idle != OLD.int_mem_idle + OR NEW.int_gpus_idle != OLD.int_gpus_idle + OR NEW.int_gpu_mem_idle != OLD.int_gpu_mem_idle) + EXECUTE PROCEDURE trigger__verify_host_resources(); + + +CREATE OR REPLACE FUNCTION trigger__verify_job_resources() +RETURNS TRIGGER AS $body$ +BEGIN + /** + * Check to see if the new cores exceeds max cores. This check is only + * done if NEW.int_max_cores is equal to OLD.int_max_cores and + * NEW.int_cores > OLD.int_cores, otherwise this error will be thrown + * at the wrong time. + **/ + IF NEW.int_cores > NEW.int_max_cores THEN + RAISE EXCEPTION 'job has exceeded max cores'; + END IF; + IF NEW.int_gpus > NEW.int_max_gpus THEN + RAISE EXCEPTION 'job has exceeded max GPU units'; + END IF; + RETURN NEW; +END; +$body$ +LANGUAGE PLPGSQL; + +DROP TRIGGER verify_job_resources ON job_resource; +CREATE TRIGGER verify_job_resources BEFORE UPDATE ON job_resource +FOR EACH ROW + WHEN (NEW.int_max_cores = OLD.int_max_cores AND NEW.int_cores > OLD.int_cores OR + NEW.int_max_gpus = OLD.int_max_gpus AND NEW.int_gpus > OLD.int_gpus) + EXECUTE PROCEDURE trigger__verify_job_resources(); + + +CREATE OR REPLACE FUNCTION trigger__update_proc_update_layer() +RETURNS TRIGGER AS $body$ +DECLARE + lr RECORD; +BEGIN + FOR lr IN ( + SELECT + pk_layer + FROM + layer_stat + WHERE + pk_layer IN (OLD.pk_layer, NEW.pk_layer) + ORDER BY layer_stat.pk_layer DESC + ) LOOP + + IF lr.pk_layer = OLD.pk_layer THEN + + UPDATE layer_resource SET + int_cores = int_cores - OLD.int_cores_reserved, + int_gpus = int_gpus - OLD.int_gpus_reserved + WHERE + pk_layer = OLD.pk_layer; + + ELSE + + UPDATE layer_resource SET + int_cores = int_cores + NEW.int_cores_reserved, + int_gpus = int_gpus + NEW.int_gpus_reserved + WHERE + pk_layer = NEW.pk_layer; + END IF; + + END LOOP; + RETURN NULL; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__frame_history_open() +RETURNS TRIGGER AS $body$ +DECLARE + str_pk_alloc VARCHAR(36) := null; + int_checkpoint INT := 0; +BEGIN + + IF OLD.str_state = 'RUNNING' THEN + + IF NEW.int_exit_status = 299 THEN + + EXECUTE 'DELETE FROM frame_history WHERE int_ts_stopped = 0 AND pk_frame=$1' USING + NEW.pk_frame; + + ELSE + If NEW.str_state = 'CHECKPOINT' THEN + int_checkpoint := 1; + END IF; + + EXECUTE + 'UPDATE + frame_history + SET + int_mem_max_used=$1, + int_gpu_mem_max_used=$2, + int_ts_stopped=$3, + int_exit_status=$4, + int_checkpoint_count=$5 + WHERE + int_ts_stopped = 0 AND pk_frame=$6' + USING + NEW.int_mem_max_used, + NEW.int_gpu_mem_max_used, + epoch(current_timestamp), + NEW.int_exit_status, + int_checkpoint, + NEW.pk_frame; + END IF; + END IF; + + IF NEW.str_state = 'RUNNING' THEN + + SELECT pk_alloc INTO str_pk_alloc FROM host WHERE str_name=NEW.str_host; + + EXECUTE + 'INSERT INTO + frame_history + ( + pk_frame, + pk_layer, + pk_job, + str_name, + str_state, + int_cores, + int_mem_reserved, + int_gpus, + int_gpu_mem_reserved, + str_host, + int_ts_started, + pk_alloc + ) + VALUES + ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12)' + USING NEW.pk_frame, + NEW.pk_layer, + NEW.pk_job, + NEW.str_name, + 'RUNNING', + NEW.int_cores, + NEW.int_mem_reserved, + NEW.int_gpus, + NEW.int_gpu_mem_reserved, + NEW.str_host, + epoch(current_timestamp), + str_pk_alloc; + END IF; + RETURN NULL; + +END; +$body$ +LANGUAGE PLPGSQL; diff --git a/cuebot/src/main/resources/conf/ddl/postgres/migrations/V12__Add_limit_index.sql b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V12__Add_limit_index.sql new file mode 100644 index 000000000..a6ec10696 --- /dev/null +++ b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V12__Add_limit_index.sql @@ -0,0 +1,4 @@ +-- Add limit index +CREATE INDEX i_layer_limit_pk_layer ON layer_limit (pk_layer); +CREATE INDEX i_layer_limit_pk_limit_record ON layer_limit (pk_limit_record); +CREATE INDEX i_limit_record_pk_limit_record ON limit_record (pk_limit_record); diff --git a/cuebot/src/main/resources/conf/ddl/postgres/migrations/V13__Add_history_control.sql b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V13__Add_history_control.sql new file mode 100644 index 000000000..16896d4c5 --- /dev/null +++ b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V13__Add_history_control.sql @@ -0,0 +1,397 @@ +-- Add history control + +CREATE OR REPLACE FUNCTION trigger__after_insert_job() +RETURNS TRIGGER AS $body$ +BEGIN + INSERT INTO job_stat (pk_job_stat,pk_job) VALUES(NEW.pk_job,NEW.pk_job); + INSERT INTO job_resource (pk_job_resource,pk_job) VALUES(NEW.pk_job,NEW.pk_job); + INSERT INTO job_usage (pk_job_usage,pk_job) VALUES(NEW.pk_job,NEW.pk_job); + INSERT INTO job_mem (pk_job_mem,pk_job) VALUES (NEW.pk_job,NEW.pk_job); + + IF NOT EXISTS (SELECT FROM config WHERE str_key='DISABLE_HISTORY') THEN + + INSERT INTO job_history + (pk_job, pk_show, pk_facility, pk_dept, str_name, str_shot, str_user, int_ts_started) + VALUES + (NEW.pk_job, NEW.pk_show, NEW.pk_facility, NEW.pk_dept, + NEW.str_name, NEW.str_shot, NEW.str_user, epoch(current_timestamp)); + + END IF; + + RETURN NULL; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__before_delete_job() +RETURNS TRIGGER AS $body$ +DECLARE + js JobStatType; +BEGIN + IF NOT EXISTS (SELECT FROM config WHERE str_key='DISABLE_HISTORY') THEN + + SELECT + job_usage.int_core_time_success, + job_usage.int_core_time_fail, + job_usage.int_gpu_time_success, + job_usage.int_gpu_time_fail, + job_stat.int_waiting_count, + job_stat.int_dead_count, + job_stat.int_depend_count, + job_stat.int_eaten_count, + job_stat.int_succeeded_count, + job_stat.int_running_count, + job_mem.int_max_rss, + job_mem.int_gpu_mem_max + INTO + js + FROM + job_mem, + job_usage, + job_stat + WHERE + job_usage.pk_job = job_mem.pk_job + AND + job_stat.pk_job = job_mem.pk_job + AND + job_mem.pk_job = OLD.pk_job; + + UPDATE + job_history + SET + pk_dept = OLD.pk_dept, + int_core_time_success = js.int_core_time_success, + int_core_time_fail = js.int_core_time_fail, + int_gpu_time_success = js.int_gpu_time_success, + int_gpu_time_fail = js.int_gpu_time_fail, + int_frame_count = OLD.int_frame_count, + int_layer_count = OLD.int_layer_count, + int_waiting_count = js.int_waiting_count, + int_dead_count = js.int_dead_count, + int_depend_count = js.int_depend_count, + int_eaten_count = js.int_eaten_count, + int_succeeded_count = js.int_succeeded_count, + int_running_count = js.int_running_count, + int_max_rss = js.int_max_rss, + int_gpu_mem_max = js.int_gpu_mem_max, + b_archived = true, + int_ts_stopped = COALESCE(epoch(OLD.ts_stopped), epoch(current_timestamp)) + WHERE + pk_job = OLD.pk_job; + + END IF; + + DELETE FROM depend WHERE pk_job_depend_on=OLD.pk_job OR pk_job_depend_er=OLD.pk_job; + DELETE FROM frame WHERE pk_job=OLD.pk_job; + DELETE FROM layer WHERE pk_job=OLD.pk_job; + DELETE FROM job_env WHERE pk_job=OLD.pk_job; + DELETE FROM job_stat WHERE pk_job=OLD.pk_job; + DELETE FROM job_resource WHERE pk_job=OLD.pk_job; + DELETE FROM job_usage WHERE pk_job=OLD.pk_job; + DELETE FROM job_mem WHERE pk_job=OLD.pk_job; + DELETE FROM comments WHERE pk_job=OLD.pk_job; + + RETURN OLD; +END +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__after_job_finished() +RETURNS TRIGGER AS $body$ +DECLARE + ts INT := cast(epoch(current_timestamp) as integer); + js JobStatType; + ls LayerStatType; + one_layer RECORD; +BEGIN + IF NOT EXISTS (SELECT FROM config WHERE str_key='DISABLE_HISTORY') THEN + + SELECT + job_usage.int_core_time_success, + job_usage.int_core_time_fail, + job_usage.int_gpu_time_success, + job_usage.int_gpu_time_fail, + job_stat.int_waiting_count, + job_stat.int_dead_count, + job_stat.int_depend_count, + job_stat.int_eaten_count, + job_stat.int_succeeded_count, + job_stat.int_running_count, + job_mem.int_max_rss, + job_mem.int_gpu_mem_max + INTO + js + FROM + job_mem, + job_usage, + job_stat + WHERE + job_usage.pk_job = job_mem.pk_job + AND + job_stat.pk_job = job_mem.pk_job + AND + job_mem.pk_job = NEW.pk_job; + + UPDATE + job_history + SET + pk_dept = NEW.pk_dept, + int_core_time_success = js.int_core_time_success, + int_core_time_fail = js.int_core_time_fail, + int_gpu_time_success = js.int_gpu_time_success, + int_gpu_time_fail = js.int_gpu_time_fail, + int_frame_count = NEW.int_frame_count, + int_layer_count = NEW.int_layer_count, + int_waiting_count = js.int_waiting_count, + int_dead_count = js.int_dead_count, + int_depend_count = js.int_depend_count, + int_eaten_count = js.int_eaten_count, + int_succeeded_count = js.int_succeeded_count, + int_running_count = js.int_running_count, + int_max_rss = js.int_max_rss, + int_gpu_mem_max = js.int_gpu_mem_max, + int_ts_stopped = ts + WHERE + pk_job = NEW.pk_job; + + FOR one_layer IN (SELECT pk_layer from layer where pk_job = NEW.pk_job) + LOOP + SELECT + layer_usage.int_core_time_success, + layer_usage.int_core_time_fail, + layer_usage.int_gpu_time_success, + layer_usage.int_gpu_time_fail, + layer_stat.int_total_count, + layer_stat.int_waiting_count, + layer_stat.int_dead_count, + layer_stat.int_depend_count, + layer_stat.int_eaten_count, + layer_stat.int_succeeded_count, + layer_stat.int_running_count, + layer_mem.int_max_rss, + layer_mem.int_gpu_mem_max + INTO + ls + FROM + layer_mem, + layer_usage, + layer_stat + WHERE + layer_usage.pk_layer = layer_mem.pk_layer + AND + layer_stat.pk_layer = layer_mem.pk_layer + AND + layer_mem.pk_layer = one_layer.pk_layer; + + UPDATE + layer_history + SET + int_core_time_success = ls.int_core_time_success, + int_core_time_fail = ls.int_core_time_fail, + int_gpu_time_success = ls.int_gpu_time_success, + int_gpu_time_fail = ls.int_gpu_time_fail, + int_frame_count = ls.int_total_count, + int_waiting_count = ls.int_waiting_count, + int_dead_count = ls.int_dead_count, + int_depend_count = ls.int_depend_count, + int_eaten_count = ls.int_eaten_count, + int_succeeded_count = ls.int_succeeded_count, + int_running_count = ls.int_running_count, + int_max_rss = ls.int_max_rss, + int_gpu_mem_max = ls.int_gpu_mem_max + WHERE + pk_layer = one_layer.pk_layer; + END LOOP; + + END IF; + + /** + * Delete any local core assignments from this job. + **/ + DELETE FROM job_local WHERE pk_job=NEW.pk_job; + + RETURN NEW; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__after_insert_layer() +RETURNS TRIGGER AS $body$ +BEGIN + INSERT INTO layer_stat (pk_layer_stat, pk_layer, pk_job) VALUES (NEW.pk_layer, NEW.pk_layer, NEW.pk_job); + INSERT INTO layer_resource (pk_layer_resource, pk_layer, pk_job) VALUES (NEW.pk_layer, NEW.pk_layer, NEW.pk_job); + INSERT INTO layer_usage (pk_layer_usage, pk_layer, pk_job) VALUES (NEW.pk_layer, NEW.pk_layer, NEW.pk_job); + INSERT INTO layer_mem (pk_layer_mem, pk_layer, pk_job) VALUES (NEW.pk_layer, NEW.pk_layer, NEW.pk_job); + + IF NOT EXISTS (SELECT FROM config WHERE str_key='DISABLE_HISTORY') THEN + + INSERT INTO layer_history + (pk_layer, pk_job, str_name, str_type, int_cores_min, int_mem_min, int_gpus_min, int_gpu_mem_min, b_archived,str_services) + VALUES + (NEW.pk_layer, NEW.pk_job, NEW.str_name, NEW.str_type, NEW.int_cores_min, NEW.int_mem_min, NEW.int_gpus_min, NEW.int_gpu_mem_min, false, NEW.str_services); + + END IF; + + RETURN NEW; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__before_delete_layer() +RETURNS TRIGGER AS $body$ +DECLARE + js LayerStatType; +BEGIN + IF NOT EXISTS (SELECT FROM config WHERE str_key='DISABLE_HISTORY') THEN + + SELECT + layer_usage.int_core_time_success, + layer_usage.int_core_time_fail, + layer_usage.int_gpu_time_success, + layer_usage.int_gpu_time_fail, + layer_stat.int_total_count, + layer_stat.int_waiting_count, + layer_stat.int_dead_count, + layer_stat.int_depend_count, + layer_stat.int_eaten_count, + layer_stat.int_succeeded_count, + layer_stat.int_running_count, + layer_mem.int_max_rss, + layer_mem.int_gpu_mem_max + INTO + js + FROM + layer_mem, + layer_usage, + layer_stat + WHERE + layer_usage.pk_layer = layer_mem.pk_layer + AND + layer_stat.pk_layer = layer_mem.pk_layer + AND + layer_mem.pk_layer = OLD.pk_layer; + + UPDATE + layer_history + SET + int_core_time_success = js.int_core_time_success, + int_core_time_fail = js.int_core_time_fail, + int_gpu_time_success = js.int_gpu_time_success, + int_gpu_time_fail = js.int_gpu_time_fail, + int_frame_count = js.int_total_count, + int_waiting_count = js.int_waiting_count, + int_dead_count = js.int_dead_count, + int_depend_count = js.int_depend_count, + int_eaten_count = js.int_eaten_count, + int_succeeded_count = js.int_succeeded_count, + int_running_count = js.int_running_count, + int_max_rss = js.int_max_rss, + int_gpu_mem_max = js.int_gpu_mem_max, + b_archived = true + WHERE + pk_layer = OLD.pk_layer; + + END IF; + + DELETE FROM layer_resource where pk_layer=OLD.pk_layer; + DELETE FROM layer_stat where pk_layer=OLD.pk_layer; + DELETE FROM layer_usage where pk_layer=OLD.pk_layer; + DELETE FROM layer_env where pk_layer=OLD.pk_layer; + DELETE FROM layer_mem where pk_layer=OLD.pk_layer; + DELETE FROM layer_output where pk_layer=OLD.pk_layer; + + RETURN OLD; +END; +$body$ +LANGUAGE PLPGSQL; + + +CREATE OR REPLACE FUNCTION trigger__frame_history_open() +RETURNS TRIGGER AS $body$ +DECLARE + str_pk_alloc VARCHAR(36) := null; + int_checkpoint INT := 0; +BEGIN + + IF NOT EXISTS (SELECT FROM config WHERE str_key='DISABLE_HISTORY') THEN + + IF OLD.str_state = 'RUNNING' THEN + + IF NEW.int_exit_status = 299 THEN + + EXECUTE 'DELETE FROM frame_history WHERE int_ts_stopped = 0 AND pk_frame=$1' USING + NEW.pk_frame; + + ELSE + If NEW.str_state = 'CHECKPOINT' THEN + int_checkpoint := 1; + END IF; + + EXECUTE + 'UPDATE + frame_history + SET + int_mem_max_used=$1, + int_gpu_mem_max_used=$2, + int_ts_stopped=$3, + int_exit_status=$4, + int_checkpoint_count=$5 + WHERE + int_ts_stopped = 0 AND pk_frame=$6' + USING + NEW.int_mem_max_used, + NEW.int_gpu_mem_max_used, + epoch(current_timestamp), + NEW.int_exit_status, + int_checkpoint, + NEW.pk_frame; + END IF; + END IF; + + IF NEW.str_state = 'RUNNING' THEN + + SELECT pk_alloc INTO str_pk_alloc FROM host WHERE str_name=NEW.str_host; + + EXECUTE + 'INSERT INTO + frame_history + ( + pk_frame, + pk_layer, + pk_job, + str_name, + str_state, + int_cores, + int_mem_reserved, + int_gpus, + int_gpu_mem_reserved, + str_host, + int_ts_started, + pk_alloc + ) + VALUES + ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12)' + USING NEW.pk_frame, + NEW.pk_layer, + NEW.pk_job, + NEW.str_name, + 'RUNNING', + NEW.int_cores, + NEW.int_mem_reserved, + NEW.int_gpus, + NEW.int_gpu_mem_reserved, + NEW.str_host, + epoch(current_timestamp), + str_pk_alloc; + END IF; + + END IF; + RETURN NULL; + +END; +$body$ +LANGUAGE PLPGSQL; diff --git a/cuebot/src/main/resources/conf/ddl/postgres/migrations/V14__Add_bytea_field_Proc_table.sql b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V14__Add_bytea_field_Proc_table.sql new file mode 100644 index 000000000..9b013b8ed --- /dev/null +++ b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V14__Add_bytea_field_Proc_table.sql @@ -0,0 +1,2 @@ +ALTER TABLE proc +ADD COLUMN bytea_children BYTEA; \ No newline at end of file diff --git a/cuebot/src/main/resources/conf/ddl/postgres/migrations/V15__Add_min_memory_increase_service.sql b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V15__Add_min_memory_increase_service.sql new file mode 100644 index 000000000..0b0655521 --- /dev/null +++ b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V15__Add_min_memory_increase_service.sql @@ -0,0 +1,5 @@ + +-- Add minimum memory increase - default 2G + +ALTER TABLE show_service ADD COLUMN int_min_memory_increase INT DEFAULT 2097152 NOT NULL; +ALTER TABLE service ADD COLUMN int_min_memory_increase INT DEFAULT 2097152 NOT NULL; diff --git a/cuebot/src/main/resources/conf/ddl/postgres/migrations/V16__AddShowStats.sql b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V16__AddShowStats.sql new file mode 100644 index 000000000..f97c14f7c --- /dev/null +++ b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V16__AddShowStats.sql @@ -0,0 +1,33 @@ +CREATE TABLE show_stats ( + pk_show VARCHAR(36) NOT NULL, + int_frame_insert_count BIGINT DEFAULT 0 NOT NULL, + int_job_insert_count BIGINT DEFAULT 0 NOT NULL, + int_frame_success_count BIGINT DEFAULT 0 NOT NULL, + int_frame_fail_count BIGINT DEFAULT 0 NOT NULL +); + +INSERT INTO show_stats ( + pk_show, + int_frame_insert_count, + int_job_insert_count, + int_frame_success_count, + int_frame_fail_count +) SELECT + pk_show, + int_frame_insert_count, + int_job_insert_count, + int_frame_success_count, + int_frame_fail_count + FROM show; + +CREATE UNIQUE INDEX c_show_stats_pk ON show_stats (pk_show); +ALTER TABLE show_stats ADD CONSTRAINT c_show_stats_pk PRIMARY KEY + USING INDEX c_show_stats_pk; + + +-- Destructive changes. Please test changes above prior to executing this. +ALTER TABLE show + DROP COLUMN int_frame_insert_count, + DROP COLUMN int_job_insert_count, + DROP COLUMN int_frame_success_count, + DROP COLUMN int_frame_fail_count; diff --git a/cuebot/src/main/resources/conf/ddl/postgres/migrations/V18_Add_New_Indexes b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V18_Add_New_Indexes new file mode 100644 index 000000000..107f426a0 --- /dev/null +++ b/cuebot/src/main/resources/conf/ddl/postgres/migrations/V18_Add_New_Indexes @@ -0,0 +1,29 @@ + +--Performance issue, Created new index on column int_gpus_min + + +CREATE INDEX IF NOT EXISTS i_layer_int_gpu_mem_min + ON public.layer USING btree + (int_gpus_min ASC NULLS LAST) + TABLESPACE pg_default; + + +CREATE INDEX IF NOT EXISTS i_layer_int_gpu_mem_min_1 + ON public.layer USING btree + (int_gpu_min ASC NULLS LAST) + TABLESPACE pg_default; + + +create index concurrently i_layer_int_cores_max on layer(int_cores_max); + +create index concurrently i_job_resource_int_priority on job_resource(int_priority); + +create index concurrently i_job_int_min_cores on job(int_min_cores); + +create index concurrently i_layer_limit_pk_layer on layer_limit(pk_layer); + +create index concurrently i_folder_resource_int_cores on folder_resource(int_cores); + +create index concurrently i_job_ts_updated on job(ts_updated); + +create index concurrently i_layer_str_tags on layer(str_tags); diff --git a/cuebot/src/main/resources/conf/ddl/postgres/demo_data.sql b/cuebot/src/main/resources/conf/ddl/postgres/seed_data.sql similarity index 93% rename from cuebot/src/main/resources/conf/ddl/postgres/demo_data.sql rename to cuebot/src/main/resources/conf/ddl/postgres/seed_data.sql index ab2787dca..7b189174c 100644 --- a/cuebot/src/main/resources/conf/ddl/postgres/demo_data.sql +++ b/cuebot/src/main/resources/conf/ddl/postgres/seed_data.sql @@ -1,4 +1,6 @@ -Insert into SHOW (PK_SHOW,STR_NAME,INT_DEFAULT_MAX_CORES,INT_DEFAULT_MIN_CORES,INT_FRAME_INSERT_COUNT,INT_JOB_INSERT_COUNT,INT_FRAME_SUCCESS_COUNT,INT_FRAME_FAIL_COUNT,B_BOOKING_ENABLED,B_DISPATCH_ENABLED,B_ACTIVE) values ('00000000-0000-0000-0000-000000000000', 'testing', 200000, 100, 0, 0, 0, 0, true, true, true); +Insert into SHOW (PK_SHOW,STR_NAME,INT_DEFAULT_MAX_CORES,INT_DEFAULT_MIN_CORES,B_BOOKING_ENABLED,B_DISPATCH_ENABLED,B_ACTIVE) values ('00000000-0000-0000-0000-000000000000', 'testing', 200000, 100, true, true, true); + +Insert into SHOW_STATS (PK_SHOW,INT_FRAME_INSERT_COUNT,INT_JOB_INSERT_COUNT,INT_FRAME_SUCCESS_COUNT,INT_FRAME_FAIL_COUNT) values ('00000000-0000-0000-0000-000000000000',0,0,0,0); Insert into SHOW_ALIAS (PK_SHOW_ALIAS,PK_SHOW,STR_NAME) values ('00000000-0000-0000-0000-000000000001', '00000000-0000-0000-0000-000000000000', 'test'); @@ -63,8 +65,6 @@ Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN, Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA7','katana',true,100,2097152,'general | desktop | util'); -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA8','shake',false,100,2097152,'general | desktop'); - Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA9','nuke',false,100,2097152,'general | desktop'); Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAA11','preprocess',false,10,393216,'util'); diff --git a/cuebot/src/main/resources/conf/spring/applicationContext-dao-oracle.xml b/cuebot/src/main/resources/conf/spring/applicationContext-dao-oracle.xml deleted file mode 100644 index afe238bcf..000000000 --- a/cuebot/src/main/resources/conf/spring/applicationContext-dao-oracle.xml +++ /dev/null @@ -1,147 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cuebot/src/main/resources/conf/spring/applicationContext-service.xml b/cuebot/src/main/resources/conf/spring/applicationContext-service.xml index c30812aa5..5aedc91b3 100644 --- a/cuebot/src/main/resources/conf/spring/applicationContext-service.xml +++ b/cuebot/src/main/resources/conf/spring/applicationContext-service.xml @@ -46,6 +46,12 @@ ${grpc.rqd_cache_expiration} + + ${grpc.rqd_cache_concurrency} + + + ${grpc.rqd_task_deadline} + @@ -54,46 +60,94 @@ - - - - - - - - - - + + + ${booking_queue.threadpool.health_threshold} + + + ${healthy_threadpool.min_unhealthy_period_min} + + + ${booking_queue.threadpool.queue_capacity} + + + ${booking_queue.threadpool.core_pool_size} + + + ${booking_queue.threadpool.max_pool_size} + DispatchQueue - + + ${healthy_threadpool.health_threshold} + + + ${healthy_threadpool.min_unhealthy_period_min} + + + ${dispatch.threadpool.queue_capacity} + + + ${dispatch.threadpool.core_pool_size} + + + ${dispatch.threadpool.max_pool_size} + - + ManageQueue - + + ${healthy_threadpool.health_threshold} + + + ${healthy_threadpool.min_unhealthy_period_min} + + + ${dispatch.threadpool.queue_capacity} + + + ${dispatch.threadpool.core_pool_size} + + + ${dispatch.threadpool.max_pool_size} + - - - - - + - 300 + ${report_queue.threadPoolSizeInitial} + + + ${report_queue.threadPoolSizeMax} + + + ${report_queue.queueSize} + + + ${kill_queue.threadPoolSizeInitial} + + + ${kill_queue.threadPoolSizeMax} + + + ${kill_queue.queueSize} + + + @@ -109,7 +163,6 @@ - @@ -310,12 +363,15 @@ - + + + + @@ -328,7 +384,6 @@ - @@ -342,6 +397,7 @@ + @@ -379,6 +435,76 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + false @@ -428,6 +567,12 @@ + + + + + + diff --git a/cuebot/src/main/resources/conf/spring/applicationContext-trackit.xml b/cuebot/src/main/resources/conf/spring/applicationContext-trackit.xml index 0d8a913e8..a6f8e0f48 100644 --- a/cuebot/src/main/resources/conf/spring/applicationContext-trackit.xml +++ b/cuebot/src/main/resources/conf/spring/applicationContext-trackit.xml @@ -21,8 +21,4 @@ xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.5.xsd"> - - - - diff --git a/cuebot/src/main/resources/conf/spring/healthServlet-servlet.xml b/cuebot/src/main/resources/conf/spring/healthServlet-servlet.xml new file mode 100644 index 000000000..a6f8e0f48 --- /dev/null +++ b/cuebot/src/main/resources/conf/spring/healthServlet-servlet.xml @@ -0,0 +1,24 @@ + + + + + + + diff --git a/cuebot/src/main/resources/log4j.properties b/cuebot/src/main/resources/log4j.properties deleted file mode 100644 index 0eb102e58..000000000 --- a/cuebot/src/main/resources/log4j.properties +++ /dev/null @@ -1,43 +0,0 @@ -############################################################## -# SpCue Logging Configuration -############################################################## - -############################################################### -# Root Logger -# Logs Application wide INFO messages / Tomcat messges -############################################################### - -log4j.rootLogger=INFO, STDOUT, FILE -log4j.appender.STDOUT=org.apache.log4j.ConsoleAppender -log4j.appender.STDOUT.Threshold=WARN -log4j.appender.STDOUT.layout=org.apache.log4j.PatternLayout -log4j.appender.STDOUT.layout.ConversionPattern=%d %p %t %c - %m%n -log4j.appender.FILE=org.apache.log4j.RollingFileAppender -log4j.appender.FILE.File=logs/spcue.log -log4j.appender.FILE.MaxFileSize=10MB -log4j.appender.FILE.MaxBackupIndex=10 -log4j.appender.FILE.layout=org.apache.log4j.PatternLayout -log4j.appender.FILE.layout.ConversionPattern=%d %p %t %c - %m%n - -log4j.category.API=INFO, API -log4j.additivity.API=false -log4j.appender.API=org.apache.log4j.RollingFileAppender -log4j.appender.API.File=logs/api.log -log4j.appender.API.MaxFileSize=10MB -log4j.appender.API.MaxBackupIndex=20 -log4j.appender.API.layout=org.apache.log4j.PatternLayout -log4j.appender.API.layout.ConversionPattern=%d:%m%n - -log4j.logger.org.apache.catalina=INFO -log4j.logger.com.imageworks.spcue=DEBUG -log4j.logger.com.imageworks.spcue.dispatcher.RqdReportManagerService=DEBUG -log4j.logger.com.imageworks.spcue.service.HostManagerService=TRACE -log4j.logger.com.imageworks.spcue.dispatcher=TRACE -log4j.logger.com.imageworks.spcue.dao.oracle.DispatcherDaoJdbc=DEBUG - -#log4j.logger.org.springframework=DEBUG - -# Very verbose sql output: -#log4j.logger.org.springframework.jdbc.core=DEBUG -#log4j.logger.org.springframework.jdbc.core.JdbcTemplate=DEBUG -#log4j.logger.org.springframework.jdbc.core.StatementCreatorUtils=TRACE diff --git a/cuebot/src/main/resources/log4j2.properties b/cuebot/src/main/resources/log4j2.properties new file mode 100644 index 000000000..b924a762f --- /dev/null +++ b/cuebot/src/main/resources/log4j2.properties @@ -0,0 +1,103 @@ +############################################################## +# OpenCue Logging Configuration +############################################################## + +# Log4j uses "appenders" and "loggers". Loggers define the logging behavior within the +# application. Appenders deliver the log messages to the intended targets. Loggers must +# be associated with appenders in order for log messages to be written out. + +# Stdout. +appender.console.type = Console +appender.console.name = STDOUT +appender.console.layout.type = PatternLayout +appender.console.layout.pattern = %d %p %t %c - %m%n +appender.console.filter.threshold.type = ThresholdFilter +appender.console.filter.threshold.level = warn + +# Main log file. +appender.rolling.type = RollingFile +appender.rolling.name = FILE +appender.rolling.fileName = logs/spcue.log +appender.rolling.filePattern = logs/spcue.log.%i +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = %d %p %t %c - %m%n +appender.rolling.policies.type = Policies +appender.rolling.policies.size.type = SizeBasedTriggeringPolicy +appender.rolling.policies.size.size=10MB +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.max = 10 + +# API log file, for logging API requests only. +appender.api.type = RollingFile +appender.api.name = API +appender.api.fileName = logs/api.log +appender.api.filePattern = logs/api.log.%i +appender.api.layout.type = PatternLayout +appender.api.layout.pattern = %d:%m%n +appender.api.policies.type = Policies +appender.api.policies.size.type = SizeBasedTriggeringPolicy +appender.api.policies.size.size = 10MB +appender.api.strategy.type = DefaultRolloverStrategy +appender.api.strategy.max = 20 +appender.api.filter.threshold.type = ThresholdFilter +appender.api.filter.threshold.level = info + +# HEALTH log file +appender.health.type = RollingFile +appender.health.name = HEALTH +appender.health.fileName = logs/health.log +appender.health.filePattern = logs/health.log.%i +appender.health.layout.type = PatternLayout +appender.health.layout.pattern = %d:%m%n +appender.health.policies.type = Policies +appender.health.policies.size.type = SizeBasedTriggeringPolicy +appender.health.policies.size.size = 10MB +appender.health.strategy.type = DefaultRolloverStrategy +appender.health.strategy.max = 20 +appender.health.filter.threshold.type = ThresholdFilter +appender.health.filter.threshold.level = debug + +# Root-level logger. All messages will go to both stdout and the main log file, though they +# may not appear there if the appender filters based on log level. For example INFO messages +# will not appear by default in stdout as the default log level for that appender is WARN. +rootLogger.level = info +rootLogger.appenderRef.stdout.ref = STDOUT +rootLogger.appenderRef.file.ref = FILE + +# API logger. Does not inherit from the root logger, so only API requests will be logged. +logger.api.name = API +logger.api.level = info +logger.api.additivity = false +logger.api.appenderRef.api.ref = API + +# HEALTH logger. Does not inherit from the root logger, so only HEALTH requests will be logged. +logger.health.name = HEALTH +logger.health.level = debug +logger.health.additivity = false +logger.health.appenderRef.health.ref = HEALTH + +# Child loggers. These inherit from the root logger, so will be sent to the relevant appenders. +# This allows us to increase verbosity for specific modules. + +logger.catalina.name = org.apache.catalina +logger.catalina.level = info + +logger.spcue.name = com.imageworks.spcue +logger.spcue.level = debug + +logger.rqdReport.name = com.imageworks.spcue.dispatcher.RqdReportManagerService +logger.rqdReport.level = debug + +logger.hostManager.name = com.imageworks.spcue.service.HostManagerService +logger.hostManager.level = trace + +logger.dispatcher.name = com.imageworks.spcue.dispatcher +logger.dispatcher.level = trace + +# For very verbose sql output: +# logger.sql.name = org.springframework.jdbc.core +# logger.sql.level = debug +# logger.sqlJdbcTemplate.name = org.springframework.jdbc.core.JdbcTemplate +# logger.sqlJdbcTemplate.level = debug +# logger.sqlStatementCreator.name = org.springframework.jdbc.core.StatementCreatorUtils +# logger.sqlStatementCreator.level = trace diff --git a/cuebot/src/main/resources/opencue.properties b/cuebot/src/main/resources/opencue.properties index 20d85970b..b7f2a23ff 100644 --- a/cuebot/src/main/resources/opencue.properties +++ b/cuebot/src/main/resources/opencue.properties @@ -1,10 +1,6 @@ cue.proxy = tcp -h cuetest01-vm -p 9019 -t 10000:tcp -h cuetest02-vm -p 9019 -t 10000:tcp -h cuetest03-vm -p 9019 -t 10000 spring.velocity.checkTemplateLocation=false -# Oracle versions of config values will look like: -# datasource.cueDataSource.driverClassName=oracle.jdbc.OracleDriver -# datasource.cueDataSource.jdbc-url=jdbc:oracle:oci:@dbname - datasource.cue-data-source.driver-class-name=org.postgresql.Driver datasource.cue-data-source.jdbc-url=jdbc:postgresql://dbhost/dbname datasource.cue-data-source.username=cue @@ -13,46 +9,162 @@ datasource.cue-data-source.password=password # connection rebalancing. datasource.cue-data-source.maxAge=21600000 -cue.trackit.enabled=false -# If using Oracle trackit, ensure the drivers are installed and use the following config value: -# datasource.trackit-data-source.driver-class-name=oracle.jdbc.OracleDriver -datasource.trackit-data-source.jdbc-url=jdbc:oracle:oci:@dbname -datasource.trackit-data-source.username=element_ro -datasource.trackit-data-source.password=password -# Discard connections after 6 hours, this allows for gradual -# connection rebalancing. -datasource.trackit-data-source.max-age=21600000 - grpc.cue_port=${CUEBOT_GRPC_CUE_PORT:8443} grpc.rqd_server_port=${CUEBOT_GRPC_RQD_SERVER_PORT:8444} grpc.max_message_bytes=104857600 # Number of entries allowed in the RQD channel cache -grpc.rqd_cache_size=500 +grpc.rqd_cache_size=2000 # RQD Channel Cache Expiration in Minutes -grpc.rqd_cache_expiration=30 +grpc.rqd_cache_expiration=5 +# RQD Channel Cache expected concurrency +grpc.rqd_cache_concurrency=20 +# RQD Channel task deadline in seconds +grpc.rqd_task_deadline=10 + +# Healthy Threadpool Executor +booking_queue.threadpool.health_threshold=10 +booking_queue.threadpool.core_pool_size=10 +booking_queue.threadpool.max_pool_size=14 +booking_queue.threadpool.queue_capacity=2000 +dispatch.threadpool.core_pool_size=6 +dispatch.threadpool.max_pool_size=8 +dispatch.threadpool.queue_capacity=2000 +healthy_threadpool.health_threshold=6 +healthy_threadpool.min_unhealthy_period_min=3 +report_queue.threadPoolSizeInitial=6 +report_queue.threadPoolSizeMax=12 +# The queue size should be bigger then the expected amount of hosts +report_queue.queueSize=5000 +kill_queue.threadPoolSizeInitial=2 +kill_queue.threadPoolSizeMax=6 +kill_queue.queueSize=1000 + +# Turn on/off jobCompletion mailing module +mailing.enabled=true # Whether or not to enable publishing to a messaging topic. # Set to a boolean value. See com/imageworks/spcue/services/JmsMover.java. messaging.enabled=false -# Root directory for which logs will be stored. See com/imageworks/spcue/util/JobLogUtil.java. +# Default root directory for which logs will be stored if no other OS is defined. +# See com/imageworks/spcue/util/JobLogUtil.java. # Override this via environment variable (CUE_FRAME_LOG_DIR) or command line flag -# (--log.frame-log-root). Command line flag will be preferred if both are provided. -log.frame-log-root=${CUE_FRAME_LOG_DIR:/shots} +# (--log.frame-log-root.default_os). Command line flag will be preferred if both are provided. +log.frame-log-root.default_os=${CUE_FRAME_LOG_DIR:/shots} +# To set up root directories for other OS create new environment +# variable as `log.frame-log-root.[OS] where OS relates to str_os on the job table +# For example: +# - log.frame-log-root.linux=${CUE_FRAME_LOG_DIR:/shots} +# - log.frame-log-root.Windows=${CUE_FRAME_LOG_DIR:/S:} +# Note that for Windows, either forward or back slashes will work. However if CUE_FRAME_LOG_DIR is empty +# and S directory is in the root, the path will be broken due to the slash in front of S. Hence, if you +# are planning to use a folder in the root, use: +# - log.frame-log-root.Windows=${S:} # Maximum number of jobs to query. dispatcher.job_query_max=20 # Number of seconds before waiting to book the same job from a different host. # "0" disables the job_lock -dispatcher.job_lock_expire_seconds=0 +dispatcher.job_lock_expire_seconds=20 # Concurrency level to allow on the job lock cache -dispatcher.job_lock_concurrency_level=3 +dispatcher.job_lock_concurrency_level=14 # Maximum number of frames to query from the DB to attempt to dispatch. dispatcher.frame_query_max=20 # Maximum number of frames to book at one time on the same host. dispatcher.job_frame_dispatch_max=8 # Maximum number of frames to dispatch from a host at one time. dispatcher.host_frame_dispatch_max=12 +# Choose between different scheduling strategies: +# - PRIORITY_ONLY: Sort by priority only +# - FIFO: Whether or not to enable FIFO scheduling in the same priority. +# - BALANCED: Use a rank formula that takes into account time waiting, and number +# of cores required: rank = priority + (100 * (1 - (job.cores/job.int_min_cores))) + age in days +# layer limiting is also disabled in this mode for performance reasons +dispatcher.scheduling_mode=PRIORITY_ONLY + +# Number of threads to keep in the pool for launching job. +dispatcher.launch_queue.core_pool_size=1 +# Maximum number of threads to allow in the pool for launching job. +dispatcher.launch_queue.max_pool_size=1 +# Queue capacity for launching job. +dispatcher.launch_queue.queue_capacity=100 + +# Number of threads to keep in the pool for various operation. +dispatcher.dispatch_pool.core_pool_size=4 +# Maximum number of threads to allow in the pool for various operation. +dispatcher.dispatch_pool.max_pool_size=4 +# Queue capacity for various operation. +dispatcher.dispatch_pool.queue_capacity=500 + +# Number of threads to keep in the pool for management operation. +dispatcher.manage_pool.core_pool_size=8 +# Maximum number of threads to allow in the pool for management operation. +dispatcher.manage_pool.max_pool_size=8 +# Queue capacity for management operation. +dispatcher.manage_pool.queue_capacity=250 + +# Number of threads to keep in the pool for handling Host Report. +dispatcher.report_queue.core_pool_size=6 +# Maximum number of threads to allow in the pool for handling Host Report. +dispatcher.report_queue.max_pool_size=8 +# Queue capacity for handling Host Report. +dispatcher.report_queue.queue_capacity=1000 + +# The minimum amount of free space in the temporary directory (mcp) to book a host. +# E.g: 1G = 1048576 kB => dispatcher.min_bookable_free_temp_dir_kb=1048576 +# Default = -1 (deactivated) +# If equals to -1, it means the feature is turned off +dispatcher.min_bookable_free_temp_dir_kb=-1 + +# Number of threads to keep in the pool for kill frame operation. +dispatcher.kill_queue.core_pool_size=6 +# Maximum number of threads to allow in the pool for kill frame operation. +dispatcher.kill_queue.max_pool_size=8 +# Queue capacity for kill frame operation. +dispatcher.kill_queue.queue_capacity=1000 + +# Number of threads to keep in the pool for booking. +dispatcher.booking_queue.core_pool_size=6 +# Maximum number of threads to allow in the pool for booking. +dispatcher.booking_queue.max_pool_size=6 +# Queue capacity for booking. +dispatcher.booking_queue.queue_capacity=1000 + +# Percentage of used memory to consider a risk for triggering oom-killer +dispatcher.oom_max_safe_used_memory_threshold=0.95 + +# How much can a frame exceed its reserved memory. +# - 0.5 means 50% above reserve +# - -1.0 makes the feature inactive +# This feature is being kept inactive for now as we work on improving the +# frame retry logic (See commit comment for more details). +dispatcher.oom_frame_overboard_allowed_threshold=-1.0 + +# How many times should cuebot send a kill request for the same frame-host before reporting +# the frame as stuck +dispatcher.frame_kill_retry_limit=3 + +# Whether to satisfy dependents (*_ON_FRAME and *_ON_LAYER) only on Frame success +depend.satisfy_only_on_frame_success=true # Jobs will be archived to the history tables after being completed for this long. history.archive_jobs_cutoff_hours=72 + +# Delete down hosts automatically. +maintenance.auto_delete_down_hosts=false + +# Set hostname/IP of the smtp host. Will be used for mailing +smtp_host=smtp + +# Flags related to a job that runs periodically to deactivate shows that haven't been +# receiving jobs. +# A comma separated list of shows that won't be deactivated by the scheduled tasks +protected_shows=testing +# Number of days a show needs to be stale before it gets deactivated. +# -1 means shows should not get deactivated at all. +max_show_stale_days=-1 + +# These flags determine whether layers/frames will be readonly when job is finished. +# If flags are set as true, layers/frames cannot be retried, eaten, edited dependency on, etc. +layer.finished_jobs_readonly=false +frame.finished_jobs_readonly=false \ No newline at end of file diff --git a/cuebot/src/main/resources/public/dtd/cjsl-1.12.dtd b/cuebot/src/main/resources/public/dtd/cjsl-1.12.dtd new file mode 100644 index 000000000..222e04cfc --- /dev/null +++ b/cuebot/src/main/resources/public/dtd/cjsl-1.12.dtd @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cuebot/src/main/resources/public/dtd/cjsl-1.13.dtd b/cuebot/src/main/resources/public/dtd/cjsl-1.13.dtd new file mode 100644 index 000000000..ff7ad71e3 --- /dev/null +++ b/cuebot/src/main/resources/public/dtd/cjsl-1.13.dtd @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java b/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java index afbc6c6b5..e86126839 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java +++ b/cuebot/src/test/java/com/imageworks/spcue/config/TestAppConfig.java @@ -37,26 +37,11 @@ "classpath:conf/spring/applicationContext-grpcServer.xml", "classpath:conf/spring/applicationContext-service.xml", "classpath:conf/spring/applicationContext-jms.xml", - "classpath:conf/spring/applicationContext-trackit.xml", "classpath:conf/spring/applicationContext-criteria.xml"}) @EnableConfigurationProperties @PropertySource({"classpath:opencue.properties"}) public class TestAppConfig { - @Bean - @ConfigurationProperties(prefix="datasource.trackit-data-source") - public DataSource trackitDataSource() { - return DataSourceBuilder.create().build(); - } - - @Configuration - @Conditional(OracleDatabaseCondition.class) - @ImportResource({ - "classpath:conf/spring/applicationContext-oracle-datasource.xml", - "classpath:conf/spring/applicationContext-dao-oracle.xml" - }) - static class OracleEngineConfig {} - @Configuration @Conditional(PostgresDatabaseCondition.class) @ImportResource({ diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/AssumingOracleEngine.java b/cuebot/src/test/java/com/imageworks/spcue/test/AssumingOracleEngine.java deleted file mode 100644 index d705a7240..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/AssumingOracleEngine.java +++ /dev/null @@ -1,58 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package com.imageworks.spcue.test; - -import org.junit.AssumptionViolatedException; -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; - -import com.imageworks.spcue.config.DatabaseEngine; - - -public class AssumingOracleEngine implements TestRule { - - private DatabaseEngine dbEngine; - - public AssumingOracleEngine() {} - - @Override - public Statement apply(Statement base, Description description) { - return new Statement() { - @Override - public void evaluate() throws Throwable { - if (dbEngine == DatabaseEngine.ORACLE) { - base.evaluate(); - } else { - throw new AssumptionViolatedException( - "Current database engine is " + dbEngine.toString() + - ", test requires ORACLE. Skipping"); - } - } - }; - } - - public DatabaseEngine getDbEngine() { - return dbEngine; - } - - public void setDbEngine(DatabaseEngine dbEngine) { - this.dbEngine = dbEngine; - } -} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/AssumingTrackitEnabled.java b/cuebot/src/test/java/com/imageworks/spcue/test/AssumingTrackitEnabled.java deleted file mode 100644 index 6469ef710..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/AssumingTrackitEnabled.java +++ /dev/null @@ -1,50 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -package com.imageworks.spcue.test; - -import org.junit.AssumptionViolatedException; -import org.junit.rules.TestRule; -import org.junit.runner.Description; -import org.junit.runners.model.Statement; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.core.env.Environment; - - -public class AssumingTrackitEnabled implements TestRule { - - @Autowired - private Environment env; - - public AssumingTrackitEnabled() {} - - @Override - public Statement apply(Statement base, Description description) { - return new Statement() { - @Override - public void evaluate() throws Throwable { - if (env.getRequiredProperty("cue.trackit.enabled", Boolean.class)) { - base.evaluate(); - } else { - throw new AssumptionViolatedException( - "Trackit is not enabled in this environment. Skipping"); - } - } - }; - } -} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetup.java b/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetup.java deleted file mode 100644 index b46977064..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetup.java +++ /dev/null @@ -1,217 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test; - -import java.io.File; -import java.io.FileNotFoundException; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Scanner; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public final class TestDatabaseSetup { - private static final String USERNAME = "ct" + System.currentTimeMillis(); - private static final String PASSWORD = "password"; - private String sysPwd; - private String dbTns = "oraxelocal"; - private static AtomicBoolean setupComplete = new AtomicBoolean(false); - - public TestDatabaseSetup() { - String tns = System.getenv("CUEBOT_DB_TNS"); - if (tns != null) { - setDbTns(tns); - } - String pwd = System.getenv("CUEBOT_DB_SYS_PWD"); - if (pwd == null) { - throw new RuntimeException("CUEBOT_DB_SYS_PWD must be set in your environment"); - } - setSysPwd(pwd); - } - - private String getDbTns() { - return dbTns; - } - - private void setDbTns(String dbTns) { - this.dbTns = dbTns; - } - - private String getSysPwd() { - return sysPwd; - } - - private void setSysPwd(String sysPwd) { - this.sysPwd = sysPwd; - } - - public String getUsername() { - return USERNAME; - } - - public String getPassword() { - return PASSWORD; - } - - public String getUrl() { - return "jdbc:oracle:oci:@" + getDbTns(); - } - - public void create() throws Exception { - if (!setupComplete.compareAndSet(false, true)) { - return; - } - - if (System.getenv("TNS_ADMIN") == null) { - throw new RuntimeException("TNS_ADMIN must be set in your environment"); - } - System.setProperty("oracle.net.tns_admin", System.getenv("TNS_ADMIN")); - System.out.println("CREATING CUE3 TEST USER"); - Connection sysConn = DriverManager.getConnection( - "jdbc:oracle:oci:@" + getDbTns(), - "sys as sysdba", - getSysPwd() - ); - - purgeOldUsers(sysConn); - - Statement stmt = null; - try { - stmt = sysConn.createStatement(); - stmt.execute("CREATE USER " + USERNAME + " IDENTIFIED BY " + PASSWORD); - stmt.execute("GRANT CONNECT, RESOURCE, DBA TO " + USERNAME); - } finally { - if (stmt != null) { - stmt.close(); - } - - if (sysConn != null) { - sysConn.close(); - } - } - - // The spring junit runner doesn't want to call the destroy-method on this bean, even if we tell it to in the XML. As such, - // we're adding a shutdown hook here to ensure that the database gets cleaned up. Newer version of spring have a class-level - // @DirtiesContext annotation that you can use to tell spring to destroy everything after the test class runs. - Runtime.getRuntime().addShutdownHook(new Thread() { - @Override - public void run() { - try { - TestDatabaseSetup.this.destroy(); - } catch (Exception e) { - e.printStackTrace(); - } - } - - }); - - System.out.println("CREATING CUE3 TEST DATABASE " + USERNAME); - Connection conn = DriverManager.getConnection( - "jdbc:oracle:oci:@" + getDbTns(), - USERNAME, - PASSWORD - ); - stmt = null; - try { - stmt = conn.createStatement(); - - ClassLoader classLoader = getClass().getClassLoader(); - applySqlFile(new File(classLoader.getResource("conf/ddl/oracle/schema.sql").getFile()), stmt); - applySqlFile(new File(classLoader.getResource("conf/ddl/oracle/test_data.sql").getFile()), stmt); - } finally { - if (stmt != null) { - stmt.close(); - } - - if (conn != null) { - conn.close(); - } - } - } - - public void destroy() throws Exception { - System.out.println("DESTROYING CUE3 TEST DATABASE " + USERNAME); - try (Connection conn = DriverManager.getConnection( - "jdbc:oracle:oci:@" + getDbTns(), - "sys as sysdba", - getSysPwd() - )) { - purgeUser(conn, USERNAME); - } - } - - private void purgeOldUsers(Connection conn) throws SQLException { - long EXPIRE_TIME = TimeUnit.MILLISECONDS.convert(6, TimeUnit.HOURS); - long now = System.currentTimeMillis(); - Pattern ct_re = Pattern.compile("^CT(\\d+)$"); - - try ( - Statement stmt = conn.createStatement(); - ResultSet rs = stmt.executeQuery( - "SELECT username FROM dba_users WHERE username LIKE 'CT%'"); - ) { - while (rs.next()) { - String username = rs.getString(1); - Matcher m = ct_re.matcher(username); - if (!m.matches()) { - continue; - } - - long ts = Long.valueOf(m.group(1)); - if (ts >= now - EXPIRE_TIME) { - System.out.println("FOUND NON-EXPIRED USER " + username); - continue; - } - - System.out.println("REMOVING EXPIRED USER " + username); - purgeUser(conn, username); - } - } - } - - private void purgeUser(Connection conn, String username) throws SQLException { - try (Statement rmstmt = conn.createStatement()) { - rmstmt.execute("DROP USER " + username + " CASCADE"); - } - } - - private void applySqlFile(File sqlFile, Statement stmt) throws FileNotFoundException, SQLException { - // http://stackoverflow.com/a/18897411 - String dbCreateScript = new Scanner(sqlFile, "UTF-8").useDelimiter("\\A").next(); - String[] dbCreateScriptPieces = dbCreateScript.split("-- SPLIT HERE!"); - - for (String dbCreateScriptPiece : dbCreateScriptPieces) { - System.out.print("."); - try { - stmt.execute(dbCreateScriptPiece); - } catch (Exception e) { - System.out.println(dbCreateScriptPiece); - throw e; - } - } - System.out.println(); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java b/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java index 713af7cbc..49513708b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/TestDatabaseSetupPostgres.java @@ -18,7 +18,7 @@ import com.google.common.base.Charsets; import com.google.common.io.Resources; -import com.opentable.db.postgres.embedded.EmbeddedPostgres; +import io.zonky.test.db.postgres.embedded.EmbeddedPostgres; import org.flywaydb.core.Flyway; import java.net.URL; diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java index ca6d3c2c2..1e8f01669 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/FrameSearchTests.java @@ -235,7 +235,7 @@ public void filterByMemoryRange() { i -> { FrameInterface frame = frameDao.findFrame(layer, i); frameDao.updateFrameState(frame, FrameState.RUNNING); - frameDao.updateFrameMemoryUsage(frame, CueUtil.GB * 5, CueUtil.GB); + frameDao.updateFrameMemoryUsageAndLluTime(frame, CueUtil.GB * 5, CueUtil.GB, 0); }); FrameSearchInterface frameSearch = frameSearchFactory.create(); diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java index 59f00df9a..78a13b321 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/criteria/ProcSearchTests.java @@ -50,6 +50,7 @@ import com.imageworks.spcue.service.HostManager; import com.imageworks.spcue.service.JobLauncher; import com.imageworks.spcue.service.JobManager; +import com.imageworks.spcue.util.CueUtil; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.Assert.assertEquals; @@ -209,11 +210,12 @@ private void launchJobs() { private RenderHost.Builder buildRenderHost() { return RenderHost.newBuilder() .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem(8173264) .setTotalSwap(20960) .setNimbyEnabled(false) diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ActionDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ActionDaoTests.java deleted file mode 100644 index 6fa99a270..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ActionDaoTests.java +++ /dev/null @@ -1,208 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.ActionEntity; -import com.imageworks.spcue.FilterEntity; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.ActionDao; -import com.imageworks.spcue.dao.FilterDao; -import com.imageworks.spcue.dao.GroupDao; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.grpc.filter.ActionType; -import com.imageworks.spcue.grpc.filter.ActionValueType; -import com.imageworks.spcue.grpc.filter.FilterType; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; - - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ActionDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - ActionDao actionDao; - - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; - - @Resource - GroupDao groupDao; - - @Resource - JobManager jobManager; - - private static String FILTER_NAME = "test_filter"; - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public FilterEntity buildFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = "00000000-0000-0000-0000-000000000000"; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testCreateAction() { - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.PAUSE_JOB; - a1.filterId = f.getFilterId(); - a1.booleanValue = true; - a1.valueType = ActionValueType.BOOLEAN_TYPE; - actionDao.createAction(a1); - - ActionEntity a2 = new ActionEntity(); - a2.type = ActionType.MOVE_JOB_TO_GROUP; - a2.filterId = f.getFilterId(); - a2.groupValue = groupDao.getRootGroupId(getShow()); - a2.valueType = ActionValueType.GROUP_TYPE; - actionDao.createAction(a2); - - ActionEntity a3 = new ActionEntity(); - a3.type = ActionType.SET_JOB_MAX_CORES; - a3.filterId = f.getFilterId(); - a3.floatValue = 1f; - a3.valueType = ActionValueType.FLOAT_TYPE; - actionDao.createAction(a3); - - ActionEntity a4 = new ActionEntity(); - a4.type = ActionType.SET_JOB_MIN_CORES; - a4.filterId = f.getFilterId(); - a4.floatValue = 1; - a4.valueType = ActionValueType.FLOAT_TYPE; - actionDao.createAction(a4); - - ActionEntity a5 = new ActionEntity(); - a5.type = ActionType.STOP_PROCESSING; - a5.filterId = f.getFilterId(); - a5.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a5); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteAction() { - - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - actionDao.deleteAction(a); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAction() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - actionDao.getAction(a); - actionDao.getAction(a.getActionId()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAction() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.name = null; - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - - a.floatValue = 1f; - a.type = ActionType.SET_JOB_MIN_CORES; - a.valueType = ActionValueType.FLOAT_TYPE; - - actionDao.updateAction(a); - - assertEquals(Integer.valueOf(1), - jdbcTemplate.queryForObject( - "SELECT float_value FROM action WHERE pk_action=?", - Integer.class, a.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetActions() { - FilterEntity f = buildFilter(); - filterDao.insertFilter(f); - - ActionEntity a = new ActionEntity(); - a.type = ActionType.STOP_PROCESSING; - a.filterId = f.getFilterId(); - a.name = null; - a.valueType = ActionValueType.NONE_TYPE; - actionDao.createAction(a); - - actionDao.getActions(f); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/AllocationDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/AllocationDaoTests.java deleted file mode 100644 index 98dacfc5e..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/AllocationDaoTests.java +++ /dev/null @@ -1,185 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.AllocationEntity; -import com.imageworks.spcue.FacilityInterface; -import com.imageworks.spcue.ShowEntity; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.AllocationDao; -import com.imageworks.spcue.dao.FacilityDao; -import com.imageworks.spcue.service.AdminManager; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class AllocationDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - AllocationDao allocDao; - - @Resource - FacilityDao facilityDao; - - @Resource - AdminManager adminManager; - - public static final String ALLOC_FQN = "spi.test_alloc"; - public static final String ALLOC_NAME = "test_alloc"; - public static final String ALLOC_TAG = "test"; - - private AllocationEntity alloc; - - @Before - public void before() { - - alloc = new AllocationEntity(); - alloc.name = ALLOC_NAME; - alloc.tag = ALLOC_TAG; - - allocDao.insertAllocation( - facilityDao.getFacility("spi"), alloc); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAllocation() { - allocDao.getAllocationEntity(alloc.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindAllocation() { - FacilityInterface f = facilityDao.getFacility("spi"); - allocDao.findAllocationEntity(f.getName(), ALLOC_NAME); - } - - @Test - @Transactional - @Rollback(true) - public void testFindAllocation2() { - FacilityInterface f = facilityDao.getFacility("spi"); - allocDao.findAllocationEntity(ALLOC_FQN); - } - - - @Test - @Transactional - @Rollback(true) - public void testDeleteAllocation() { - allocDao.deleteAllocation(alloc); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteAllocationWithProc() { - - // Use the alloc so deleting triggers it just to be disaled. - ShowEntity show = adminManager.getShowEntity( - "00000000-0000-0000-0000-000000000000"); - adminManager.createSubscription(show, alloc, 10, 10); - allocDao.deleteAllocation(alloc); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = 0", - Integer.class, alloc.getAllocationId())); - - assertEquals(ALLOC_FQN, jdbcTemplate.queryForObject( - "SELECT str_name FROM alloc WHERE pk_alloc=? AND b_enabled = 0", - String.class, alloc.getAllocationId())); - - // Now re-enable it. - allocDao.insertAllocation(facilityDao.getDefaultFacility(), alloc); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM alloc WHERE pk_alloc=? AND b_enabled = 1", - Integer.class, alloc.getAllocationId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAllocationName() { - allocDao.updateAllocationName(alloc, "frickjack"); - assertEquals("spi.frickjack", jdbcTemplate.queryForObject( - "SELECT str_name FROM alloc WHERE pk_alloc=?", - String.class, - alloc.getId())); - } - - @Test(expected = IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testUpdateAllocationNameBad() { - allocDao.updateAllocationName(alloc, "spi.frickjack"); - assertEquals("spi.frickjack", jdbcTemplate.queryForObject( - "SELECT str_name FROM alloc WHERE pk_alloc=?", - String.class, alloc.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAllocationTag() { - allocDao.updateAllocationTag(alloc, "foo"); - assertEquals("foo",jdbcTemplate.queryForObject( - "SELECT str_tag FROM alloc WHERE pk_alloc=?", - String.class, alloc.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateAllocationBillable() { - allocDao.updateAllocationBillable(alloc, false); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_billable FROM alloc WHERE pk_alloc=?", - Integer.class, alloc.getId())); - - allocDao.updateAllocationBillable(alloc, true); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_billable FROM alloc WHERE pk_alloc=?", - Integer.class, alloc.getId())); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/BookingDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/BookingDaoTests.java deleted file mode 100644 index 58b47bfdb..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/BookingDaoTests.java +++ /dev/null @@ -1,456 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.FrameInterface; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LocalHostAssignment; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.BookingDao; -import com.imageworks.spcue.dao.DispatcherDao; -import com.imageworks.spcue.dao.HostDao; -import com.imageworks.spcue.dao.ProcDao; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.renderpartition.RenderPartition; -import com.imageworks.spcue.grpc.renderpartition.RenderPartitionType; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.service.AdminManager; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.service.Whiteboard; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class BookingDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobManager jobManager; - - @Resource - HostDao hostDao; - - @Resource - BookingDao bookingDao; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - ProcDao procDao; - - @Resource - Whiteboard whiteboard; - - public DispatchHost createHost() { - RenderHost host = RenderHost.newBuilder() - .setName("test_host") - .setBootTime(1192369572) - .setFreeMcp(76020) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(195430) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .addTags("general") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) - .build(); - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail d = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.setJobPaused(d, false); - return d; - } - - @Test - @Transactional - @Rollback(true) - public void insertLocalJobAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpu(1); - lja.setThreads(2); - - bookingDao.insertLocalHostAssignment(h, j, lja); - - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_threads FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_max FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_idle FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_idle FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void insertLocalLayerAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - LayerInterface layer = jobManager.getLayers(j).get(0); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpu(1); - lja.setThreads(2); - - bookingDao.insertLocalHostAssignment(h, layer, lja); - - assertEquals(layer.getLayerId(), jdbcTemplate.queryForObject( - "SELECT pk_layer FROM host_local WHERE pk_host_local=?", - String.class, lja.getId())); - - assertEquals(RenderPartitionType.LAYER_PARTITION.toString(), - jdbcTemplate.queryForObject( - "SELECT str_type FROM host_local WHERE pk_host_local=?", - String.class, lja.getId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_threads FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_max FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_idle FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_idle FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void insertLocalFrameAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - LayerInterface layer = jobManager.getLayers(j).get(0); - FrameInterface frame = jobManager.findFrame(layer, 1); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpu(1); - lja.setThreads(2); - - bookingDao.insertLocalHostAssignment(h, frame, lja); - - assertEquals(frame.getFrameId(), jdbcTemplate.queryForObject( - "SELECT pk_frame FROM host_local WHERE pk_host_local=?", - String.class, lja.getId())); - - assertEquals(RenderPartitionType.FRAME_PARTITION.toString(), - jdbcTemplate.queryForObject( - "SELECT str_type FROM host_local WHERE pk_host_local=?", - String.class, lja.getId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT int_threads FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_max FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_max FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_cores_idle FROM host_local WHERE pk_job=?", - Integer.class, j.getJobId())); - - assertEquals(Long.valueOf(CueUtil.GB4), jdbcTemplate.queryForObject( - "SELECT int_mem_idle FROM host_local WHERE pk_job=?", - Long.class, j.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLocalJobAssignment() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpu(1); - - bookingDao.insertLocalHostAssignment(h, j, lja); - - LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), - j.getJobId()); - - assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); - assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); - assertEquals(lja.getMaxGpu(), lja2.getMaxGpu()); - assertEquals(lja.getThreads(), lja2.getThreads()); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetRenderPartition() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpu(1); - - bookingDao.insertLocalHostAssignment(h, j, lja); - - LocalHostAssignment lja2 = bookingDao.getLocalJobAssignment(h.getHostId(), - j.getJobId()); - - assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); - assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); - assertEquals(lja.getThreads(), lja2.getThreads()); - assertEquals(lja.getMaxGpu(), lja2.getMaxGpu()); - - RenderPartition rp = whiteboard.getRenderPartition(lja2); - - assertEquals(lja2.getMaxCoreUnits(), rp.getMaxCores()); - assertEquals(lja2.getMaxMemory(), rp.getMaxMemory()); - assertEquals(lja2.getThreads(), rp.getThreads()); - logger.info("--------------------"); - logger.info(lja2.getMaxGpu()); - logger.info(rp.getMaxGpu()); - assertEquals(lja2.getMaxGpu(), rp.getMaxGpu()); - assertEquals(h.getName(), rp.getHost()); - assertEquals(j.getName(), rp.getJob()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetProcs() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpu(1); - - bookingDao.insertLocalHostAssignment(h, j, lja); - - assertEquals(0, procDao.findVirtualProcs(lja).size()); - } - - @Test - @Transactional - @Rollback(true) - public void updateMaxCores() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpu(1); - - bookingDao.insertLocalHostAssignment(h, j, lja); - assertTrue(bookingDao.updateMaxCores(lja, 100)); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM host_local WHERE pk_host=?", - Integer.class, h.getHostId())); - - LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); - - assertEquals(100, lj2.getIdleCoreUnits()); - assertEquals(100, lj2.getMaxCoreUnits()); - - bookingDao.updateMaxCores(lja, 200); - - lj2 = bookingDao.getLocalJobAssignment(lja.id); - - assertEquals(200, lj2.getIdleCoreUnits()); - assertEquals(200, lj2.getMaxCoreUnits()); - } - - @Test - @Transactional - @Rollback(true) - public void updateMaxMemory() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpu(1); - - bookingDao.insertLocalHostAssignment(h, j, lja); - bookingDao.updateMaxMemory(lja, CueUtil.GB2); - - LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); - - assertEquals(CueUtil.GB2, lj2.getIdleMemory()); - assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - - bookingDao.updateMaxMemory(lja, CueUtil.GB4); - - lj2 = bookingDao.getLocalJobAssignment(lja.id); - - assertEquals(CueUtil.GB4, lj2.getIdleMemory()); - assertEquals(CueUtil.GB4, lj2.getMaxMemory()); -} - - @Test - @Transactional - @Rollback(true) - public void updateMaxGpu() { - - DispatchHost h = createHost(); - JobDetail j = launchJob(); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setMaxCoreUnits(200); - lja.setMaxMemory(CueUtil.GB4); - lja.setThreads(2); - lja.setMaxGpu(1); - - bookingDao.insertLocalHostAssignment(h, j, lja); - bookingDao.updateMaxMemory(lja, CueUtil.GB2); - - LocalHostAssignment lj2 = bookingDao.getLocalJobAssignment(lja.id); - - assertEquals(CueUtil.GB2, lj2.getIdleMemory()); - assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - assertEquals(1, lj2.getMaxGpu()); - - bookingDao.updateMaxGpu(lja, 2); - - lj2 = bookingDao.getLocalJobAssignment(lja.id); - - assertEquals(CueUtil.GB2, lj2.getIdleMemory()); - assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - assertEquals(2, lj2.getMaxGpu()); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/CommentDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/CommentDaoTests.java deleted file mode 100644 index eecd2b8e2..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/CommentDaoTests.java +++ /dev/null @@ -1,238 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.CommentDetail; -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.CommentDao; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class CommentDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - CommentDao commentDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostManager hostManager; - - @Before - public void testMode() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteComment() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - commentDao.deleteComment(d.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetComment() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - - assertEquals(d.message,nd.message); - assertEquals(d.subject,nd.subject); - assertEquals(d.user,nd.user); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertCommentOnJob() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - - assertEquals(d.message,nd.message); - assertEquals(d.subject,nd.subject); - assertEquals(d.user,nd.user); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertCommentOnHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("boo") - .setBootTime(1192369572) - .setFreeMcp(76020) - .setFreeMem(15290520) - .setFreeSwap(2076) - .setLoad(1) - .setTotalMcp(19543) - .setTotalMem(15290520) - .setTotalSwap(2096) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(400) - .addTags("linux") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) - .build(); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - DispatchHost h = hostManager.createHost(host); - commentDao.insertComment(h, d); - - assertNotNull(d.id); - - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - - assertEquals(d.message,nd.message); - assertEquals(d.subject,nd.subject); - assertEquals(d.user,nd.user); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateComment() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - - d.message = "no"; - d.subject = "no"; - - commentDao.updateComment(d); - - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - - assertEquals("no",nd.message); - assertEquals("no",nd.subject); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateCommentMessage() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - commentDao.updateCommentMessage(d.getId(), "no"); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals("no",nd.message); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateCommentSubject() { - - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - CommentDetail d = new CommentDetail(); - d.message = "a message"; - d.subject = "a subject"; - d.user = "user"; - - commentDao.insertComment(job, d); - commentDao.updateCommentSubject(d.getId(), "no"); - CommentDetail nd = commentDao.getCommentDetail(d.getId()); - assertEquals("no",nd.subject); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DeedDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DeedDaoTests.java deleted file mode 100644 index d174c089e..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DeedDaoTests.java +++ /dev/null @@ -1,212 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DeedEntity; -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.OwnerEntity; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.DeedDao; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.service.AdminManager; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.service.OwnerManager; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DeedDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - OwnerManager ownerManager; - - @Resource - DeedDao deedDao; - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("test_host") - .setBootTime(1192369572) - .setFreeMcp(76020) - .setFreeMem(15290520) - .setFreeSwap(2076) - .setLoad(1) - .setTotalMcp(19543) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .addTags("general") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) - .build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - @Test - @Transactional - @Rollback(true) - public void testInsertDeed() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - - assertEquals(host.getName(), d.host); - } - - @Test - @Transactional - @Rollback(true) - public void tesDeleteDeed() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - - assertTrue(deedDao.deleteDeed(d)); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - - assertFalse(deedDao.deleteDeed(d)); - } - - @Test - @Transactional - @Rollback(true) - public void tesGetDeed() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - DeedEntity d2 = deedDao.getDeed(d.id); - - assertEquals(d, d2); - } - - @Test - @Transactional - @Rollback(true) - public void tesGetDeeds() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - assertEquals(1, deedDao.getDeeds(o).size()); - assertEquals(d, deedDao.getDeeds(o).get(0)); - } - - - @Test - @Transactional - @Rollback(true) - public void testEnableDisableBlackoutTime() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - deedDao.updateBlackoutTimeEnabled(d, true); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_blackout FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - - deedDao.updateBlackoutTimeEnabled(d, false); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_blackout FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testSetBlackOutTimes() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - deedDao.setBlackoutTime(d, 3600, 7200); - - assertEquals(Integer.valueOf(3600), jdbcTemplate.queryForObject( - "SELECT int_blackout_start FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - - assertEquals(Integer.valueOf(7200), jdbcTemplate.queryForObject( - "SELECT int_blackout_stop FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DepartmentDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DepartmentDaoTests.java deleted file mode 100644 index e309d1630..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DepartmentDaoTests.java +++ /dev/null @@ -1,107 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DepartmentInterface; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.DepartmentDao; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DepartmentDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - DepartmentDao departmentDao; - - @Test - @Transactional - @Rollback(true) - public void testGetDepartment() { - String dept= "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; - assertEquals(dept, departmentDao.getDepartment(dept).getId()); - assertEquals(dept, departmentDao.getDepartment(dept).getDepartmentId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDepartment() { - String dept= "Hair"; - assertEquals(dept, departmentDao.findDepartment(dept).getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testgetDefaultDepartment() { - assertEquals(jdbcTemplate.queryForObject( - "SELECT pk_dept FROM dept WHERE b_default=1", - String.class),departmentDao.getDefaultDepartment().getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testDepartmentExists() { - String dept= "Cloth"; - assertTrue(departmentDao.departmentExists(dept)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertDepartment() { - String deptName = "TestDept"; - departmentDao.insertDepartment(deptName); - DepartmentInterface d = departmentDao.findDepartment(deptName); - assertEquals(d.getName(), deptName); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteDepartment() { - String deptName = "TestDept"; - departmentDao.insertDepartment(deptName); - DepartmentInterface d = departmentDao.findDepartment(deptName); - assertEquals(d.getName(), deptName); - departmentDao.deleteDepartment(d); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DependDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DependDaoTests.java deleted file mode 100644 index e01ed32d4..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DependDaoTests.java +++ /dev/null @@ -1,455 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.FrameDetail; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LightweightDependency; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.DependDao; -import com.imageworks.spcue.dao.FrameDao; -import com.imageworks.spcue.dao.LayerDao; -import com.imageworks.spcue.depend.FrameByFrame; -import com.imageworks.spcue.depend.FrameOnFrame; -import com.imageworks.spcue.depend.FrameOnJob; -import com.imageworks.spcue.depend.FrameOnLayer; -import com.imageworks.spcue.depend.JobOnFrame; -import com.imageworks.spcue.depend.JobOnJob; -import com.imageworks.spcue.depend.JobOnLayer; -import com.imageworks.spcue.depend.LayerOnFrame; -import com.imageworks.spcue.depend.LayerOnJob; -import com.imageworks.spcue.depend.LayerOnLayer; -import com.imageworks.spcue.depend.PreviousFrame; -import com.imageworks.spcue.grpc.depend.DependTarget; -import com.imageworks.spcue.grpc.depend.DependType; -import com.imageworks.spcue.service.DependManager; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.service.JobManagerSupport; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DependDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - DependDao dependDao; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - JobManager jobManager; - - @Resource - DependManager dependManager; - - @Resource - JobManagerSupport jobManagerSupport; - - @Resource - JobLauncher jobLauncher; - - @Before - public void launchTestJobs() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_depend_test.xml")); - } - - public JobDetail getJobA() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_a"); - } - - public JobDetail getJobB() { - return jobManager.findJobDetail("pipe-dev.cue-testuser_depend_test_b"); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - JobOnJob depend = new JobOnJob(job_a, job_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.JOB_ON_JOB, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); - JobOnLayer depend = new JobOnLayer(job_a, layer); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.JOB_ON_LAYER, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - JobOnFrame depend = new JobOnFrame(job_a, frame); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.JOB_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer = layerDao.findLayer(job_b, "pass_1"); - - LayerOnJob depend = new LayerOnJob(layer, job_a); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.LAYER_ON_JOB, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.LAYER_ON_LAYER, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - LayerOnFrame depend = new LayerOnFrame(layer, frame); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.LAYER_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameOnJob() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnJob depend = new FrameOnJob(frame, job_a); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_JOB, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer = layerDao.findLayer(job_a, "pass_1"); - FrameDetail frame = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnLayer depend = new FrameOnLayer(frame,layer); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_LAYER, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFrameByFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - FrameByFrame depend = new FrameByFrame(layer_a, layer_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_BY_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertPreviousFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - PreviousFrame depend = new PreviousFrame(layer_a, layer_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.PREVIOUS_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - } - - @Test - @Transactional - @Rollback(true) - public void testReinsertFrameOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - - LightweightDependency lwd = dependDao.getDepend(depend.getId()); - assertEquals(depend.getId(), lwd.getId()); - assertEquals(DependType.FRAME_ON_FRAME, lwd.type); - assertEquals(DependTarget.EXTERNAL, lwd.target); - assertTrue(lwd.active); - assertFalse(lwd.anyFrame); - - dependDao.setInactive(lwd); - - // Try to reinsert it now that the original is inactive. - depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnJob() { - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - JobOnJob depend = new JobOnJob(job_a, job_b); - dependDao.insertDepend(depend); - - assertEquals(1, dependDao.getWhatDependsOn(job_b).size()); - assertEquals(0, dependDao.getWhatDependsOn(job_a).size()); - } - - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnLayer() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependDao.insertDepend(depend); - - assertEquals(1, dependDao.getWhatDependsOn(layer_b).size()); - assertEquals(0, dependDao.getWhatDependsOn(layer_a).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnLayerInactive() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - LayerInterface layer_a = layerDao.findLayer(job_a, "pass_1"); - LayerInterface layer_b = layerDao.findLayer(job_b, "pass_1"); - - LayerOnLayer depend = new LayerOnLayer(layer_a, layer_b); - dependDao.insertDepend(depend); - - dependDao.setInactive(dependDao.getDepend(depend.getId())); - - assertEquals(1, dependDao.getWhatDependsOn(layer_b, false).size()); - assertEquals(0, dependDao.getWhatDependsOn(layer_b, true).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnFrame() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - - assertEquals(1, dependDao.getWhatDependsOn(frame_b).size()); - assertEquals(0, dependDao.getWhatDependsOn(frame_a).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnFrameInactive() { - - JobDetail job_a = getJobA(); - JobDetail job_b = getJobB(); - - FrameDetail frame_a = frameDao.findFrameDetail(job_a, "0001-pass_1"); - FrameDetail frame_b = frameDao.findFrameDetail(job_b, "0001-pass_1"); - - FrameOnFrame depend = new FrameOnFrame(frame_a, frame_b); - dependDao.insertDepend(depend); - - dependDao.setInactive(dependDao.getDepend(depend.getId())); - - assertEquals(1, dependDao.getWhatDependsOn(frame_b, false).size()); - assertEquals(0, dependDao.getWhatDependsOn(frame_b, true).size()); - assertEquals(0, dependDao.getWhatDependsOn(frame_a, true).size()); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DispatcherDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DispatcherDaoTests.java deleted file mode 100644 index ef35e9558..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/DispatcherDaoTests.java +++ /dev/null @@ -1,522 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import java.util.List; -import java.util.Set; -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DispatchFrame; -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.LayerDetail; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LocalHostAssignment; -import com.imageworks.spcue.VirtualProc; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.AllocationDao; -import com.imageworks.spcue.dao.BookingDao; -import com.imageworks.spcue.dao.DispatcherDao; -import com.imageworks.spcue.dao.HostDao; -import com.imageworks.spcue.dao.JobDao; -import com.imageworks.spcue.dao.LayerDao; -import com.imageworks.spcue.dao.ProcDao; -import com.imageworks.spcue.dispatcher.DispatchSupport; -import com.imageworks.spcue.dispatcher.Dispatcher; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.job.JobState; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.service.AdminManager; -import com.imageworks.spcue.service.GroupManager; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class DispatcherDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - HostDao hostDao; - - @Resource - ProcDao procDao; - - @Resource - LayerDao layerDao; - - @Resource - JobDao jobDao; - - @Resource - AllocationDao allocationDao; - - @Resource - JobManager jobManager; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - GroupManager groupManager; - - @Resource - Dispatcher dispatcher; - - @Resource - JobLauncher jobLauncher; - - @Resource - BookingDao bookingDao; - - private static final String HOSTNAME="beta"; - - public DispatchHost getHost() { - return hostDao.findDispatchHost(HOSTNAME); - } - - public JobDetail getJob1() { - return jobManager.findJobDetail( - "pipe-dev.cue-testuser_shell_dispatch_test_v1"); - } - - public JobDetail getJob2() { - return jobManager.findJobDetail( - "pipe-dev.cue-testuser_shell_dispatch_test_v2"); - } - - @Before - public void launchJob() { - dispatcher.setTestMode(true); - jobLauncher.testMode = true; - jobLauncher.launch( - new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - } - - @Before - public void createHost() { - RenderHost host = RenderHost.newBuilder() - .setName(HOSTNAME) - .setBootTime(1192369572) - .setFreeMcp(76020) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(195430) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .addTags("test") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("SP_OS", "Linux") - .build(); - - hostManager.createHost(host, - adminManager.findAllocationDetail("spi", "general")); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFrameByHost() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - - for (LayerDetail layer: layerDao.getLayerDetails(job)) { - assertTrue(layer.tags.contains("general")); - } - - assertTrue(jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, - host.id).contains("general")); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - assertNotNull(frame); - assertEquals(frame.name, "0001-pass_1"); - } - - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFrameByProc() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - - // TODO: fix the fact you can book the same proc on multiple frames - // probably just need to make sure you can't update a proc's frame - // assignment unless the frame id is null. - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job, host); - assertNotNull(frame); - assertEquals("0001-pass_1", frame.name); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals("0001-pass_2", frame.name); - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals("0002-pass_1", frame.name); - dispatcher.dispatch(frame, proc); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByProc() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - - // TODO: fix the fact you can book the same proc on multiple frames - // probably just need to make sure you can't update a proc's frame - // assignment unless the frame id is null. - - List frames = - dispatcherDao.findNextDispatchFrames(job, host,10); - assertEquals(10, frames.size()); - - DispatchFrame frame = frames.get(0); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals(frame.name,"0001-pass_2"); - dispatcher.dispatch(frame, proc); - - frame = dispatcherDao.findNextDispatchFrame(job, proc); - assertNotNull(frame); - assertEquals(frame.name,"0002-pass_1"); - dispatcher.dispatch(frame, proc); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByHostAndJobLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - host.isLocalDispatch = true; - List frames = - dispatcherDao.findNextDispatchFrames(job, host, 10); - assertEquals(10, frames.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByHostAndLayerLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - LayerInterface layer = jobManager.getLayers(job).get(0); - host.isLocalDispatch = true; - - List frames = - dispatcherDao.findNextDispatchFrames(layer, host, 10); - assertEquals(10, frames.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByProcAndJobLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - host.isLocalDispatch = true; - List frames = - dispatcherDao.findNextDispatchFrames(job, host, 10); - assertEquals(10, frames.size()); - - DispatchFrame frame = frames.get(0); - VirtualProc proc = VirtualProc.build(host, frame); - proc.coresReserved = 100; - proc.isLocalDispatch = true; - - frames = dispatcherDao.findNextDispatchFrames(job, proc, 10); - assertEquals(10, frames.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindNextDispatchFramesByProcAndLayerLocal() { - DispatchHost host = getHost(); - JobDetail job = getJob1(); - LayerInterface layer = jobManager.getLayers(job).get(0); - host.isLocalDispatch = true; - - List frames = - dispatcherDao.findNextDispatchFrames(layer, host, 10); - assertEquals(10, frames.size()); - - DispatchFrame frame = frames.get(0); - VirtualProc proc = VirtualProc.build(host, frame); - proc.coresReserved = 100; - proc.isLocalDispatch = true; - - frames = dispatcherDao.findNextDispatchFrames(layer, proc, 10); - assertEquals(10, frames.size()); - } - - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobs() { - DispatchHost host = getHost(); - - assertTrue(jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job WHERE str_state='PENDING'", Integer.class) > 0); - - Set jobs = dispatcherDao.findDispatchJobs(host, 10); - assertTrue(jobs.size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByGroup() { - DispatchHost host = getHost(); - final JobDetail job = getJob1(); - - assertNotNull(job); - assertNotNull(job.groupId); - - Set jobs = dispatcherDao.findDispatchJobs(host, - groupManager.getGroupDetail(job)); - assertTrue(jobs.size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByShow() { - DispatchHost host = getHost(); - final JobDetail job = getJob1(); - assertNotNull(job); - - Set jobs = dispatcherDao.findDispatchJobs(host, - adminManager.findShowEntity("pipe"), 5); - assertTrue(jobs.size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testFindDispatchJobsByLocal() { - DispatchHost host = getHost(); - final JobDetail job = getJob1(); - assertNotNull(job); - - Set jobs = dispatcherDao.findLocalDispatchJobs(host); - assertEquals(0, jobs.size()); - - LocalHostAssignment lja = new LocalHostAssignment(); - lja.setThreads(1); - lja.setMaxMemory(CueUtil.GB16); - lja.setMaxCoreUnits(200); - lja.setMaxGpu(1); - bookingDao.insertLocalHostAssignment(host, job, lja); - - jobs = dispatcherDao.findLocalDispatchJobs(host); - assertTrue(jobs.size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testfindUnderProcedJob() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - - jobDao.updateMinCores(job1, 0); - jobDao.updateMinCores(job2, 1000); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean under = dispatcherDao.findUnderProcedJob(job1, proc); - assertTrue(under); - } - - @Test - @Transactional - @Rollback(true) - public void testHigherPriorityJobExistsTrue() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - job1.priority = 100; - job2.priority = 200; - - jobDao.updateMinCores(job1, 0); - jobDao.updateMinCores(job2, 0); - jobDao.updatePriority(job1, 100); - jobDao.updatePriority(job2, 200); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); - assertTrue(isHigher); - } - - @Test - @Transactional - @Rollback(true) - public void testHigherPriorityJobExistsFalse() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - job1.priority = 20000; - job2.priority = 100; - - jobDao.updateMinCores(job1, 0); - jobDao.updateMinCores(job2, 0); - jobDao.updatePriority(job1, 20000); - jobDao.updatePriority(job2, 100); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); - assertFalse(isHigher); - } - - @Test - @Transactional - @Rollback(true) - public void testHigherPriorityJobExistsMaxProcBound() { - DispatchHost host = getHost(); - JobDetail job1 = getJob1(); - JobDetail job2 = getJob2(); - job1.priority = 100; - job2.priority = 200; - - jobDao.updateMaxCores(job2, 0); - jobDao.updatePriority(job1, 100); - jobDao.updatePriority(job2, 200); - - DispatchFrame frame = dispatcherDao.findNextDispatchFrame(job1, host); - assertNotNull(frame); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job1.id)); - - assertEquals(JobState.PENDING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job2.id)); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.coresReserved = 100; - dispatcher.dispatch(frame, proc); - - boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); - assertFalse(isHigher); - } -} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/FacilityInterfaceDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/FacilityInterfaceDaoTests.java deleted file mode 100644 index 84c7c4a16..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/FacilityInterfaceDaoTests.java +++ /dev/null @@ -1,78 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.FacilityDao; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class FacilityInterfaceDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - FacilityDao facilityDao; - - @Test - @Transactional - @Rollback(true) - public void testGetDetaultFacility() { - assertEquals(jdbcTemplate.queryForObject( - "SELECT pk_facility FROM facility WHERE b_default=1", - String.class),facilityDao.getDefaultFacility().getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFacility() { - String id = "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"; - assertEquals(id, facilityDao.getFacility(id).getId()); - assertEquals(id, facilityDao.getFacility("spi").getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFacilityExists() { - assertTrue(facilityDao.facilityExists("spi")); - assertFalse(facilityDao.facilityExists("rambo")); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/FilterDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/FilterDaoTests.java deleted file mode 100644 index 8e2a1c3be..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/FilterDaoTests.java +++ /dev/null @@ -1,302 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.FilterEntity; -import com.imageworks.spcue.ShowEntity; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.FilterDao; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.grpc.filter.FilterType; -import com.imageworks.spcue.service.AdminManager; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class FilterDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; - - @Resource - AdminManager adminManager; - - private static String FILTER_NAME = "test_filter"; - - public ShowInterface createShow() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - return show; - } - - public ShowInterface getShow() { - return showDao.findShowDetail("testtest"); - } - - public FilterEntity buildFilter(ShowInterface show) { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = show.getId(); - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testGetActiveFilters() { - filterDao.getActiveFilters(createShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilters() { - filterDao.getFilters(createShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterEnabled() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - filterDao.updateSetFilterEnabled(f, false); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_enabled FROM filter WHERE pk_filter=?", - Integer.class, f.getFilterId())); - filterDao.updateSetFilterEnabled(f, true); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_enabled FROM filter WHERE pk_filter=?", - Integer.class, f.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterName() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - assertEquals(FILTER_NAME, jdbcTemplate.queryForObject( - "SELECT str_name FROM filter WHERE pk_filter=?", - String.class, - f.getFilterId())); - filterDao.updateSetFilterName(f, "TEST"); - assertEquals("TEST", jdbcTemplate.queryForObject( - "SELECT str_name FROM filter WHERE pk_filter=?", - String.class, - f.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterType() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - assertEquals(FilterType.MATCH_ANY.toString(), jdbcTemplate.queryForObject( - "SELECT str_type FROM filter WHERE pk_filter=?", - String.class, - f.getFilterId())); - filterDao.updateSetFilterType(f, FilterType.MATCH_ALL); - assertEquals(FilterType.MATCH_ALL.toString(), jdbcTemplate.queryForObject( - "SELECT str_type FROM filter WHERE pk_filter=?", - String.class, - f.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSetFilterOrder() { - - ShowInterface show = createShow(); - int currentFilters = jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM filter WHERE pk_show=?", - Integer.class, show.getShowId()); - - FilterEntity f1 = buildFilter(show); - filterDao.insertFilter(f1); - - FilterEntity f2 = buildFilter(show); - f2.name = "TEST"; - filterDao.insertFilter(f2); - - assertEquals(Integer.valueOf(currentFilters+1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(currentFilters+2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - - filterDao.updateSetFilterOrder(f2,1); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - filterDao.deleteFilter(f); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - } - - @Test - @Transactional - @Rollback(true) - public void testReorderFilters() { - buildFilter(createShow()); - filterDao.reorderFilters(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testLowerFilterOrder() { - - ShowInterface show = createShow(); - - FilterEntity f1 = buildFilter(show); - filterDao.insertFilter(f1); - - FilterEntity f2 = buildFilter(show); - f2.name = "TEST"; - filterDao.insertFilter(f2); - - - /** - * These could fail if the test DB has other filters. - */ - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - - filterDao.lowerFilterOrder(f2,1); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testRaiseFilterOrder() { - - ShowInterface show = createShow(); - - FilterEntity f1 = buildFilter(show); - filterDao.insertFilter(f1); - - FilterEntity f2 = buildFilter(show); - f2.name = "TEST"; - filterDao.insertFilter(f2); - - /** - * These could fail if the test DB has other filters. - */ - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - - filterDao.raiseFilterOrder(f1, 1); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f1.getFilterId())); - - assertEquals(Integer.valueOf(2), jdbcTemplate.queryForObject( - "SELECT f_order FROM filter WHERE pk_filter=?", - Integer.class, f2.getFilterId())); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - - filterDao.getFilter(f); - filterDao.getFilter(f.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFilter() { - FilterEntity f = buildFilter(createShow()); - filterDao.insertFilter(f); - - filterDao.findFilter(getShow(), FILTER_NAME); - } - -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/FrameDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/FrameDaoTests.java deleted file mode 100644 index f1532bcec..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/FrameDaoTests.java +++ /dev/null @@ -1,659 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import java.math.BigDecimal; -import java.util.Map; -import javax.annotation.Resource; - -import com.google.common.collect.ImmutableList; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.test.context.transaction.AfterTransaction; -import org.springframework.test.context.transaction.BeforeTransaction; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DispatchFrame; -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.FrameDetail; -import com.imageworks.spcue.FrameInterface; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.VirtualProc; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.AllocationDao; -import com.imageworks.spcue.dao.FrameDao; -import com.imageworks.spcue.dao.HostDao; -import com.imageworks.spcue.dao.LayerDao; -import com.imageworks.spcue.dao.ProcDao; -import com.imageworks.spcue.dao.criteria.FrameSearchFactory; -import com.imageworks.spcue.dao.criteria.FrameSearchInterface; -import com.imageworks.spcue.depend.FrameOnFrame; -import com.imageworks.spcue.dispatcher.DispatchSupport; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.job.CheckpointState; -import com.imageworks.spcue.grpc.job.FrameSearchCriteria; -import com.imageworks.spcue.grpc.job.FrameState; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.service.DependManager; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class FrameDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - HostDao hostDao; - - @Resource - ProcDao procDao; - - @Resource - AllocationDao allocationDao; - - @Resource - HostManager hostManager; - - @Resource - DependManager dependManager; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - FrameSearchFactory frameSearchFactory; - - private static final String HOST = "beta"; - - public DispatchHost createHost() { - return hostDao.findDispatchHost(HOST); - } - - @BeforeTransaction - public void create() { - - RenderHost host = RenderHost.newBuilder() - .setName(HOST) - .setBootTime(1192369572) - .setFreeMcp(76020) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(195430) - .setTotalMem(8173264) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(1) - .setCoresPerProc(100) - .addAllTags(ImmutableList.of("mcore", "4core", "8g")) - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("freeGpu", "512") - .putAttributes("totalGpu", "512") - .build(); - - hostManager.createHost(host); - } - - @AfterTransaction - public void destroy() { - jdbcTemplate.update( - "DELETE FROM host WHERE str_name=?",HOST); - } - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void testCheckRetries() { - JobDetail job = launchJob(); - frameDao.checkRetries(frameDao.findFrame(job,"0001-pass_1")); - // TODO: check to see if it actually works - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameDetail() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - FrameDetail frame = frameDao.getFrameDetail(f); - frame = frameDao.getFrameDetail(f.getFrameId()); - assertEquals("0001-pass_1", frame.name); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrameDetail() { - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - assertEquals("0001-pass_1", frame.name); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrame() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - FrameInterface frame = frameDao.getFrame(f.getFrameId()); - assertEquals("0001-pass_1", frame.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameByLayer() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - FrameInterface f2 = frameDao.findFrame((LayerInterface)f, 1); - - assertEquals(f.getFrameId(), f2.getFrameId()); - assertEquals(f.getLayerId(), f2.getLayerId()); - assertEquals(f.getJobId(), f2.getJobId()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrame() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - assertEquals(f.getName(),"0001-pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrames() { - JobDetail job = launchJob(); - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder() - .addFrames("0001-pass_1") - .build()); - assertEquals(1, frameDao.findFrames(r).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrameDetails() { - JobDetail job = launchJob(); - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder() - .addFrames("0001-pass_1") - .build()); - assertEquals(1, frameDao.findFrameDetails(r).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testgetOrphanedFrames() { - assertEquals(0, frameDao.getOrphanedFrames().size()); - - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - - /* - * Update the first frame to the orphan state, which is a frame - * that is in the running state, has no corresponding proc entry - * and has not been udpated in the last 5 min. - */ - jdbcTemplate.update( - "UPDATE frame SET str_state='RUNNING', " + - "ts_updated=systimestamp - interval '301' second WHERE pk_frame=?", - f.getFrameId()); - - assertEquals(1, frameDao.getOrphanedFrames().size()); - assertTrue(frameDao.isOrphan(f)); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameState() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - assertTrue(frameDao.updateFrameState(f, FrameState.RUNNING)); - - assertEquals(FrameState.RUNNING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, - f.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testFailUpdateFrameState() { - JobDetail job = launchJob(); - FrameInterface f = frameDao.findFrame(job, "0001-pass_1"); - - /** Change the version so the update fails **/ - jdbcTemplate.update( - "UPDATE frame SET int_version = int_version + 1 WHERE pk_frame=?", - f.getFrameId()); - - assertEquals(false, frameDao.updateFrameState(f, FrameState.RUNNING)); - } - - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameStarted() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - assertEquals(FrameState.WAITING, frame.state); - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - frameDao.updateFrameStarted(proc, fd); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameStopped() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); - - assertEquals("0001-pass_1_preprocess",frame.getName()); - assertEquals(FrameState.WAITING, frame.state); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - frameDao.updateFrameStarted(proc, fd); - - try { - Thread.sleep(1001); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - DispatchFrame fd2 = frameDao.getDispatchFrame(frame.getId()); - assertTrue(frameDao.updateFrameStopped(fd2, FrameState.DEAD, 1, 1000l)); - - assertEquals(FrameState.DEAD.toString(),jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, frame.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameFixed() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame fd = frameDao.getDispatchFrame(frame.getId()); - - assertEquals("0001-pass_1_preprocess",frame.getName()); - assertEquals(FrameState.WAITING, frame.state); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - frameDao.updateFrameStarted(proc, fd); - - try { - Thread.sleep(1001); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - frameDao.updateFrameState(frame, FrameState.WAITING); - frameDao.updateFrameFixed(proc, frame); - - assertEquals(FrameState.RUNNING.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, frame.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDispatchFrame() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); - assertEquals(dframe.id, frame.id); - } - - @Test - @Transactional - @Rollback(true) - public void testMarkFrameAsWaiting() { - JobDetail job = launchJob(); - - FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - - frameDao.markFrameAsWaiting(f); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - } - - @Test - @Transactional - @Rollback(true) - public void testMarkFrameAsDepend() { - JobDetail job = launchJob(); - - FrameInterface f = frameDao.findFrameDetail(job, "0001-pass_1"); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_active FROM depend WHERE pk_layer_depend_er=?", - Integer.class, f.getLayerId())); - - frameDao.markFrameAsWaiting(f); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - - /* - * Need to grab new version of frame - * object once the state has changed. - */ - f = frameDao.findFrameDetail(job, "0001-pass_1"); - - frameDao.markFrameAsDepend(f); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_depend_count FROM frame WHERE pk_frame=?", - Integer.class, f.getFrameId())); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindLongestFrame() { - JobDetail job = launchJob(); - frameDao.findLongestFrame(job); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindShortestFrame() { - JobDetail job = launchJob(); - frameDao.findShortestFrame(job); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void findHighestMemoryFrame() { - JobDetail job = launchJob(); - frameDao.findHighestMemoryFrame(job); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void findLowestMemoryFrame() { - JobDetail job = launchJob(); - frameDao.findLowestMemoryFrame(job); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDependentFrames() { - JobDetail job = launchJob(); - FrameInterface frame_a = frameDao.findFrame(job, "0001-pass_1"); - FrameInterface frame_b = frameDao.findFrame(job, "0002-pass_1"); - - dependManager.createDepend(new FrameOnFrame( - frame_a, frame_b)); - - assertEquals(1, frameDao.getDependentFrames( - dependManager.getWhatDependsOn(frame_b).get(0)).size(),1); - } - - @Test - @Transactional - @Rollback(true) - public void testGetResourceUsage() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); - frameDao.getResourceUsage(dframe); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameCleared() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = host.allocationId; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - /* - * Only frames without active procs can be cleared. - */ - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.id); - assertFalse(frameDao.updateFrameCleared(dframe)); - - dispatchSupport.unbookProc(proc); - assertTrue(frameDao.updateFrameCleared(dframe)); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetStaleCheckpoints() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - assertEquals(0, frameDao.getStaleCheckpoints(300).size()); - jdbcTemplate.update("UPDATE frame SET str_state=?, " + - "ts_stopped=systimestamp - interval '400' second WHERE pk_frame=?", - FrameState.CHECKPOINT.toString(), frame.getFrameId()); - assertEquals(1, frameDao.getStaleCheckpoints(300).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testSetCheckpointState() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - frameDao.updateFrameCheckpointState(frame, CheckpointState.ENABLED); - - String state = jdbcTemplate.queryForObject( - "SELECT str_checkpoint_state FROM frame WHERE pk_frame=?", - String.class, frame.getFrameId()); - - assertEquals(CheckpointState.ENABLED.toString(), state); - - /** - * To set a checkpoint complete the frame state must be in the checkpoint state. - */ - frameDao.updateFrameState(frame, FrameState.CHECKPOINT); - jdbcTemplate.update( - "UPDATE frame SET ts_started=systimestamp, ts_stopped=systimestamp + INTERVAL '20' second WHERE pk_frame=?", - frame.getFrameId()); - - assertTrue(frameDao.updateFrameCheckpointState(frame, CheckpointState.COMPLETE)); - Map result = jdbcTemplate.queryForMap( - "SELECT int_checkpoint_count FROM frame WHERE pk_frame=?", - frame.getFrameId()); - - BigDecimal checkPointCount = (BigDecimal) result.get("int_checkpoint_count"); - assertEquals(1, checkPointCount.intValue()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsFrameComplete() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - frameDao.updateFrameState(frame, FrameState.EATEN); - assertTrue(frameDao.isFrameComplete(frame)); - - frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - frameDao.updateFrameState(frame, FrameState.SUCCEEDED); - assertTrue(frameDao.isFrameComplete(frame)); - - frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - frameDao.updateFrameState(frame, FrameState.WAITING); - assertFalse(frameDao.isFrameComplete(frame)); - } -} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/GroupDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/GroupDaoTests.java deleted file mode 100644 index 1a83971f3..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/GroupDaoTests.java +++ /dev/null @@ -1,393 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.GroupDetail; -import com.imageworks.spcue.GroupInterface; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.DepartmentDao; -import com.imageworks.spcue.dao.GroupDao; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class GroupDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - GroupDao groupDao; - - @Resource - ShowDao showDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Before - public void before() { - jobLauncher.testMode = true; - } - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - public GroupDetail createGroup() { - GroupDetail group = new GroupDetail(); - group.name = "Shit"; - group.parentId = groupDao.getRootGroupId(getShow()); - group.showId = getShow().getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(group, groupDao.getRootGroupDetail(getShow())); - return group; - } - - public GroupDetail createSubGroup(GroupDetail parent) { - GroupDetail group = new GroupDetail(); - group.name = "SubShit"; - group.parentId = parent.id; - group.showId = getShow().getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(group, groupDao.getGroup(parent.id)); - return group; - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroup() { - GroupDetail group = createGroup(); - GroupInterface g = groupDao.getGroup(group.id); - assertEquals(group.id,g.getGroupId()); - assertEquals(group.id,g.getId()); - assertEquals(group.name, g.getName()); - assertEquals(group.showId, g.getShowId()); - } - - - @Test - @Transactional - @Rollback(true) - public void testGetGroups() { - GroupDetail group = createGroup(); - List l = new ArrayList(); - l.add(group.id); - List g = groupDao.getGroups(l); - assertEquals(1, g.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetRootGroupId() { - groupDao.getRootGroupId(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertGroup() { - GroupDetail group = createGroup(); - assertFalse(group.isNew()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertGroupAlternateMethod() { - GroupDetail group = new GroupDetail(); - group.name = "Shit"; - group.parentId = groupDao.getRootGroupId(getShow()); - group.showId = getShow().getId(); - group.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(group); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteGroup() { - // Can't delete groups yet, will fail - GroupDetail group = createGroup(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM folder WHERE pk_folder=?", - Integer.class, group.getId())); - - groupDao.deleteGroup(group); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM folder WHERE pk_folder=?", - Integer.class, group.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateGroupParent() { - GroupDetail group = createGroup(); - GroupDetail subgroup = createSubGroup(group); - groupDao.updateGroupParent(subgroup, - groupDao.getGroupDetail( - groupDao.getRootGroupId(getShow()))); - - assertEquals(Integer.valueOf(1),jdbcTemplate.queryForObject( - "SELECT int_level FROM folder_level WHERE pk_folder=?", - Integer.class, subgroup.getId())); - - groupDao.updateGroupParent(subgroup, group); - - assertEquals(Integer.valueOf(2),jdbcTemplate.queryForObject( - "SELECT int_level FROM folder_level WHERE pk_folder=?", - Integer.class, subgroup.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMaxCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_max_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxCores(group, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_job_max_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMaxCores(group, -1); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_max_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobMinCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_min_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobMinCores(group, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_job_min_cores FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDefaultJobPriority() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_job_priority FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateDefaultJobPriority(group, 1000); - assertEquals(Integer.valueOf(1000), jdbcTemplate.queryForObject( - "SELECT int_job_priority FROM folder WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMinCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMinCores(group, 10); - assertEquals(Integer.valueOf(10), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMaxCores() { - GroupDetail group = createGroup(); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxCores(group, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - groupDao.updateMaxCores(group, -5); - assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM folder_resource WHERE pk_folder=?", - Integer.class, group.getGroupId())); - } - - @Test - @Transactional - @Rollback(true) - public void testIsManaged() { - GroupDetail group = createGroup(); - assertEquals(false, groupDao.isManaged(group)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateName() { - GroupDetail group = createGroup(); - groupDao.updateName(group, "NewName"); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateDepartment() { - GroupDetail group = createGroup(); - groupDao.updateDepartment(group, departmentDao.findDepartment("Lighting")); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroupDetail() { - GroupDetail group = createGroup(); - GroupDetail group2 = groupDao.getGroupDetail(group.id); - } - - @Test - @Transactional - @Rollback(true) - public void testGetChildrenRecursive() { - boolean is_test2 = false; - boolean is_test3 = false; - - GroupDetail g1 = new GroupDetail(); - g1.name = "Test1"; - g1.showId = getShow().getId(); - g1.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); - - GroupDetail g2 = new GroupDetail(); - g2.name = "Test2"; - g2.showId = getShow().getId(); - g2.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); - - for ( GroupInterface g: groupDao.getChildrenRecursive(groupDao.getGroup("A0000000-0000-0000-0000-000000000000"))) { - if (g.getName().equals("Test1")) { - is_test2 = true; - } - if (g.getName().equals("Test2")) { - is_test3 = true; - } - } - assertTrue(is_test2); - assertTrue(is_test3); - } - - @Test - @Transactional - @Rollback(true) - public void testGetChildren() { - boolean is_testuserA = false; - boolean is_testuserB = false; - - GroupDetail g1 = new GroupDetail(); - g1.name = "testuserA"; - g1.showId = getShow().getId(); - g1.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g1, groupDao.getRootGroupDetail(getShow())); - - GroupDetail g2 = new GroupDetail(); - g2.name = "testuserB"; - g2.showId = getShow().getId(); - g2.deptId = departmentDao.getDefaultDepartment().getId(); - groupDao.insertGroup(g2, groupDao.getRootGroupDetail(getShow())); - - List groups = groupDao.getChildren(groupDao.getGroup("A0000000-0000-0000-0000-000000000000")); - for (GroupInterface g : groups) { - if (g.getName().equals("testuserA")) { - is_testuserA = true; - } - if (g.getName().equals("testuserB")) { - is_testuserB = true; - } - } - assertTrue(is_testuserA); - assertTrue(is_testuserB); - } - - @Test - @Transactional - @Rollback(true) - public void testIsOverMinCores() { - - JobDetail job = launchJob(); - assertFalse(groupDao.isOverMinCores(job)); - - String groupid = jdbcTemplate.queryForObject("SELECT pk_folder FROM job WHERE pk_job=?", - String.class, job.getJobId()); - - // Now update some values so it returns true. - jdbcTemplate.update("UPDATE folder_resource SET int_cores = int_min_cores + 1 WHERE pk_folder=?", - groupid); - - assertTrue(groupDao.isOverMinCores(job)); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/HistoricalDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/HistoricalDaoTests.java deleted file mode 100644 index 993e03de3..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/HistoricalDaoTests.java +++ /dev/null @@ -1,82 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.HistoricalDao; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class HistoricalDaoTests extends - AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - private JobManager jobManager; - - @Resource - private JobLauncher jobLauncher; - - @Resource - private HistoricalDao historicalDao; - - @Test - @Transactional - @Rollback(true) - public void testGetFinishedJobs() { - historicalDao.getFinishedJobs(24); - } - - @Test - @Transactional - @Rollback(true) - public void testTransferJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail j = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - jobManager.shutdownJob(j); - historicalDao.transferJob(j); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job_history WHERE pk_job=?", - Integer.class, j.getJobId())); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/HostDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/HostDaoTests.java deleted file mode 100644 index 37ffb61ab..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/HostDaoTests.java +++ /dev/null @@ -1,571 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.math.BigDecimal; -import java.sql.Timestamp; -import java.util.Map; -import javax.annotation.Resource; - -import com.google.common.collect.ImmutableList; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.test.context.transaction.AfterTransaction; -import org.springframework.test.context.transaction.BeforeTransaction; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.HostEntity; -import com.imageworks.spcue.HostInterface; -import com.imageworks.spcue.Source; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.AllocationDao; -import com.imageworks.spcue.dao.FacilityDao; -import com.imageworks.spcue.dao.HostDao; -import com.imageworks.spcue.dispatcher.Dispatcher; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.host.HostTagType; -import com.imageworks.spcue.grpc.host.LockState; -import com.imageworks.spcue.grpc.host.ThreadMode; -import com.imageworks.spcue.grpc.report.HostReport; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class HostDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - private static final String TEST_HOST = "beta"; - - @Resource - protected AllocationDao allocationDao; - - @Resource - protected HostDao hostDao; - - @Resource - protected HostManager hostManager; - - @Resource - protected FacilityDao facilityDao; - - public HostDaoTests() { } - - public static RenderHost buildRenderHost(String name) { - RenderHost host = RenderHost.newBuilder() - .setName(name) - .setBootTime(1192369572) - .setFreeMcp(7602) - .setFreeMem(15290520) - .setFreeSwap((int) CueUtil.MB512) - .setLoad(1) - .setNimbyEnabled(false) - .setTotalMcp(19543) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB2) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(400) - .addAllTags(ImmutableList.of("linux", "64bit")) - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) - .build(); - - return host; - } - - @Test - public void testInit() { } - - @BeforeTransaction - public void clear() { - jdbcTemplate.update( - "DELETE FROM host WHERE str_name=?", TEST_HOST); - } - - @AfterTransaction - public void destroy() { - jdbcTemplate.update( - "DELETE FROM host WHERE str_name=?", TEST_HOST); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHost() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - assertEquals(Long.valueOf(CueUtil.GB16 - Dispatcher.MEM_RESERVED_SYSTEM), jdbcTemplate.queryForObject( - "SELECT int_mem FROM host WHERE str_name=?", - Long.class, TEST_HOST)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN1() { - String TEST_HOST_NEW = "ice-ns1.yvr"; - String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; - hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), - hostManager.getDefaultAllocationDetail(), - true); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - - HostInterface host = hostDao.findHost(FQDN_HOST); - HostEntity hostDetail2 = hostDao.getHostDetail(host); - assertEquals(TEST_HOST_NEW, hostDetail2.name); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN2() { - String TEST_HOST_NEW = "compile21"; - String FQDN_HOST = TEST_HOST_NEW + ".spimageworks.com"; - hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - - HostInterface host = hostDao.findHost(FQDN_HOST); - HostEntity hostDetail2 = hostDao.getHostDetail(host); - assertEquals(TEST_HOST_NEW, hostDetail2.name); - - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostFQDN3() { - String TEST_HOST_NEW = "hostname"; - String FQDN_HOST = TEST_HOST_NEW + ".fake.co.uk"; - hostDao.insertRenderHost(buildRenderHost(FQDN_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST_NEW); - assertEquals(TEST_HOST_NEW, hostDetail.name); - - HostInterface host = hostDao.findHost(FQDN_HOST); - HostEntity hostDetail2 = hostDao.getHostDetail(host); - assertEquals(TEST_HOST_NEW, hostDetail2.name); - - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostAlternateOS() { - - RenderHost host = buildRenderHost(TEST_HOST).toBuilder() - .putAttributes("SP_OS", "spinux1") - .build(); - - hostDao.insertRenderHost(host, - hostManager.getDefaultAllocationDetail(), - false); - - assertEquals("spinux1",jdbcTemplate.queryForObject( - "SELECT str_os FROM host_stat, host " + - "WHERE host.pk_host = host_stat.pk_host " + - "AND host.str_name=?",String.class, TEST_HOST), "spinux1"); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertHostDesktop() { - - RenderHost host = buildRenderHost(TEST_HOST); - hostDao.insertRenderHost(host, - hostManager.getDefaultAllocationDetail(), - false); - - assertEquals(Long.valueOf(CueUtil.GB16 - Dispatcher.MEM_RESERVED_SYSTEM), jdbcTemplate.queryForObject( - "SELECT int_mem FROM host WHERE str_name=?", - Long.class, TEST_HOST)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateThreadMode() { - - RenderHost host = buildRenderHost(TEST_HOST); - host.toBuilder().setNimbyEnabled(true).build(); - hostDao.insertRenderHost(host, - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity d = hostDao.findHostDetail(TEST_HOST); - hostDao.updateThreadMode(d, ThreadMode.AUTO); - - assertEquals(Integer.valueOf(ThreadMode.AUTO_VALUE), jdbcTemplate.queryForObject( - "SELECT int_thread_mode FROM host WHERE pk_host=?", - Integer.class, d.id)); - - hostDao.updateThreadMode(d, ThreadMode.ALL); - - assertEquals(Integer.valueOf(ThreadMode.ALL_VALUE), jdbcTemplate.queryForObject( - "SELECT int_thread_mode FROM host WHERE pk_host=?", - Integer.class, d.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetHostDetail() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - hostDao.getHostDetail(host); - hostDao.getHostDetail(host.getHostId()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsHostLocked() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertEquals(hostDao.isHostLocked(host),false); - - hostDao.updateHostLock(host, LockState.LOCKED, new Source("TEST")); - assertEquals(hostDao.isHostLocked(host),true); - } - - @Test - @Transactional - @Rollback(true) - public void testIsKillMode() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertFalse(hostDao.isKillMode(host)); - - jdbcTemplate.update( - "UPDATE host_stat SET int_swap_free = ?, int_mem_free = ? WHERE pk_host = ?", - CueUtil.MB256, CueUtil.MB256, host.getHostId()); - - assertTrue(hostDao.isKillMode(host)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsHostUp() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - assertTrue(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); - - hostDao.updateHostState(hostDao.findHostDetail(TEST_HOST), - HardwareState.DOWN); - assertFalse(hostDao.isHostUp(hostDao.findHostDetail(TEST_HOST))); - } - - @Test - @Transactional - @Rollback(true) - public void testHostExists() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - assertEquals(hostDao.hostExists(TEST_HOST),true); - assertEquals(hostDao.hostExists("frickjack"),false); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteHost() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertEquals(hostDao.hostExists(TEST_HOST),true); - hostDao.deleteHost(host); - assertEquals(hostDao.hostExists(TEST_HOST),false); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostRebootWhenIdle() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_reboot_idle FROM host WHERE pk_host=?", - Integer.class, host.getHostId())); - hostDao.updateHostRebootWhenIdle(host, true); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_reboot_idle FROM host WHERE pk_host=?", - Integer.class, host.getHostId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateHostStats() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); - hostDao.updateHostStats(dispatchHost, - CueUtil.GB8, - CueUtil.GB8, - CueUtil.GB8, - CueUtil.GB8, - CueUtil.GB8, - CueUtil.GB8, - 1, - 1, - 100, - new Timestamp(1247526000 * 1000l), - "spinux1"); - - Map result = jdbcTemplate.queryForMap( - "SELECT * FROM host_stat WHERE pk_host=?", - dispatchHost.getHostId()); - - assertEquals(CueUtil.GB8, ((BigDecimal) - (result.get("int_mem_total"))).longValue()); - assertEquals(CueUtil.GB8, ((BigDecimal) - (result.get("int_mem_free"))).longValue()); - assertEquals(CueUtil.GB8, ((BigDecimal) - (result.get("int_swap_total"))).longValue()); - assertEquals(CueUtil.GB8, ((BigDecimal) - (result.get("int_swap_free"))).longValue()); - assertEquals(CueUtil.GB8, ((BigDecimal) - (result.get("int_mcp_total"))).longValue()); - assertEquals(CueUtil.GB8, ((BigDecimal) - (result.get("int_mcp_free"))).longValue()); - assertEquals(100, ((BigDecimal) - (result.get("int_load"))).intValue()); - assertEquals(new Timestamp(1247526000 * 1000l), - (Timestamp) result.get("ts_booted")); - - } - - @Test - @Transactional - @Rollback(true) - public void updateHostResources() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); - HostReport report = HostReport.newBuilder() - .setHost( - buildRenderHost(TEST_HOST).toBuilder() - .setCoresPerProc(1200) - .setNumProcs(2) - .setTotalMem((int) CueUtil.GB32) - ).build(); - hostDao.updateHostResources(dispatchHost, report); - - // Verify what the original values are - assertEquals(800, dispatchHost.cores); - assertEquals(800, dispatchHost.idleCores); - assertEquals(CueUtil.GB16 - Dispatcher.MEM_RESERVED_SYSTEM, - dispatchHost.idleMemory); - assertEquals(CueUtil.GB16- Dispatcher.MEM_RESERVED_SYSTEM, - dispatchHost.memory); - - dispatchHost = hostDao.findDispatchHost(TEST_HOST); - - // Now verify they've changed. - assertEquals(2400, dispatchHost.cores); - assertEquals(2400, dispatchHost.idleCores); - assertEquals(CueUtil.GB32 - Dispatcher.MEM_RESERVED_SYSTEM, - dispatchHost.idleMemory); - assertEquals(CueUtil.GB32- Dispatcher.MEM_RESERVED_SYSTEM, - dispatchHost.memory); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDispatchHost() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); - DispatchHost dispatchHost = hostDao.findDispatchHost(TEST_HOST); - - assertEquals(dispatchHost.name, TEST_HOST); - assertEquals(dispatchHost.allocationId, hostDetail.getAllocationId()); - assertEquals(dispatchHost.id, hostDetail.getHostId()); - assertEquals(dispatchHost.cores, hostDetail.cores); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostSetAllocation() { - - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity hostDetail = hostDao.findHostDetail(TEST_HOST); - - hostDao.updateHostSetAllocation(hostDetail, - hostManager.getDefaultAllocationDetail()); - - hostDetail = hostDao.findHostDetail(TEST_HOST); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostSetManualTags() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - - hostDao.tagHost(host,"frick", HostTagType.MANUAL); - hostDao.tagHost(host,"jack", HostTagType.MANUAL); - hostDao.recalcuateTags(host.id); - - String tag = jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, host.id); - assertEquals("unassigned beta frick jack", tag); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateHostSetOS() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - hostDao.updateHostOs(host, "foo"); - String tag = jdbcTemplate.queryForObject( - "SELECT str_os FROM host_stat WHERE pk_host=?",String.class, host.id); - assertEquals("foo", tag); - } - - @Test - @Transactional - @Rollback(true) - public void testChangeTags() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - - String tag = jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, host.id); - assertEquals("unassigned beta", tag); - - hostDao.removeTag(host, "linux"); - hostDao.recalcuateTags(host.id); - - assertEquals("unassigned beta", jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, host.id)); - - hostDao.tagHost(host, "32bit",HostTagType.MANUAL); - hostDao.recalcuateTags(host.id); - - assertEquals("unassigned beta 32bit", jdbcTemplate.queryForObject( - "SELECT str_tags FROM host WHERE pk_host=?",String.class, host.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetStrandedCoreUnits() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - - jdbcTemplate.update( - "UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", - CueUtil.GB, host.getHostId()); - - assertEquals(host.idleCores, hostDao.getStrandedCoreUnits(host)); - - jdbcTemplate.update( - "UPDATE host SET int_mem_idle = ? WHERE pk_host = ?", - CueUtil.GB2, host.getHostId()); - - assertEquals(0, hostDao.getStrandedCoreUnits(host)); - - // Check to see if fractional cores is rounded to the lowest - // whole core properly. - jdbcTemplate.update( - "UPDATE host SET int_cores_idle=150, int_mem_idle = ? WHERE pk_host = ?", - CueUtil.GB, host.getHostId()); - - assertEquals(100, hostDao.getStrandedCoreUnits(host)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsPreferShow() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - assertFalse(hostDao.isPreferShow(host)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsNimby() { - DispatchHost host = hostManager.createHost(buildRenderHost(TEST_HOST)); - assertFalse(hostDao.isNimbyHost(host)); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/JobDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/JobDaoTests.java deleted file mode 100644 index 31f8dca66..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/JobDaoTests.java +++ /dev/null @@ -1,681 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import java.util.HashMap; -import java.util.Map; -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DispatchJob; -import com.imageworks.spcue.ExecutionSummary; -import com.imageworks.spcue.FacilityEntity; -import com.imageworks.spcue.GroupDetail; -import com.imageworks.spcue.GroupInterface; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.PointInterface; -import com.imageworks.spcue.ResourceUsage; -import com.imageworks.spcue.TaskEntity; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.DepartmentDao; -import com.imageworks.spcue.dao.FacilityDao; -import com.imageworks.spcue.dao.GroupDao; -import com.imageworks.spcue.dao.JobDao; -import com.imageworks.spcue.dao.PointDao; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.dao.TaskDao; -import com.imageworks.spcue.grpc.job.JobState; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.service.JobSpec; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.JobLogUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class JobDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - JobDao jobDao; - - @Resource - PointDao pointDao; - - @Resource - ShowDao showDao; - - @Resource - TaskDao taskDao; - - @Resource - GroupDao groupDao; - - @Resource - FacilityDao facilityDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - JobLogUtil jobLogUtil; - - private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; - private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; - private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - public JobDetail buildJobDetail() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return spec.getJobs().get(0).detail; - } - - public JobDetail insertJob() { - JobDetail job = this.buildJobDetail(); - job.groupId = ROOT_FOLDER; - job.showId = ROOT_SHOW; - job.logDir = jobLogUtil.getJobLogPath(job); - job.deptId = departmentDao.getDefaultDepartment().getId(); - job.facilityId = facilityDao.getDefaultFacility().getId(); - job.state = JobState.PENDING; - jobDao.insertJob(job, jobLogUtil); - return job; - } - - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDispatchJob() { - JobDetail job = insertJob(); - DispatchJob djob = jobDao.getDispatchJob(job.id); - assertEquals(djob.id, job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobComplete() { - JobDetail job = insertJob(); - // returns true because there are no dispatchable frames - assertEquals(true,jobDao.isJobComplete(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJob() { - JobDetail job = this.buildJobDetail(); - job.groupId = ROOT_FOLDER; - job.showId = ROOT_SHOW; - job.logDir = jobLogUtil.getJobLogPath(job); - job.deptId = departmentDao.getDefaultDepartment().getId(); - job.facilityId= facilityDao.getDefaultFacility().getId(); - jobDao.insertJob(job, jobLogUtil); - assertNotNull(job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testFindJob() { - JobDetail job = insertJob(); - JobInterface j1 = jobDao.findJob(job.name); - JobDetail j2 = jobDao.findJobDetail(job.name); - assertEquals(job.name, j1.getName()); - assertEquals(job.name, j2.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJob() { - JobDetail job = insertJob(); - jobDao.getJobDetail(job.id); - jobDao.getJob(job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobsByTask() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - taskDao.insertTask(t); - jobDao.getJobs(t); - } - - @Test - @Transactional - @Rollback(true) - public void testJobExists() { - assertFalse(jobDao.exists(JOB_NAME)); - JobDetail job = insertJob(); - jdbcTemplate.update("UPDATE job SET str_state='PENDING' WHERE pk_job=?", - job.id); - assertTrue(jobDao.exists(JOB_NAME)); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteJob() { - jobDao.deleteJob(insertJob()); - } - - @Test - @Transactional - @Rollback(true) - public void testActivateJob() { - jobDao.activateJob(insertJob(), JobState.PENDING); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobState() { - JobDetail job = insertJob(); - assertEquals(JobState.PENDING, job.state); - jobDao.updateState(job, JobState.FINISHED); - assertEquals(JobState.FINISHED.toString(), - jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", - String.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobFinished() { - jobDao.updateJobFinished(insertJob()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobOverMinProc() { - JobDetail job = insertJob(); - assertFalse(jobDao.isOverMinCores(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testHasPendingFrames() { - assertFalse(jobDao.hasPendingFrames(insertJob())); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobOverMaxProc() { - JobDetail job = insertJob(); - assertFalse(jobDao.isOverMaxCores(job)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsJobAtMaxCores() { - JobDetail job = insertJob(); - assertFalse(jobDao.isAtMaxCores(job)); - - jdbcTemplate.update( - "UPDATE job_resource SET int_cores = int_max_cores WHERE pk_job=?", - job.getJobId()); - - assertTrue(jobDao.isAtMaxCores(job)); - - } - - @Test - @Transactional - @Rollback(true) - public void testIsOverMaxCores() { - JobDetail job = insertJob(); - jobDao.updateMaxCores(job, 500); - jdbcTemplate.update( - "UPDATE job_resource SET int_cores = 450 WHERE pk_job=?", - job.getJobId()); - - assertFalse(jobDao.isOverMaxCores(job)); - assertFalse(jobDao.isOverMaxCores(job, 50)); - assertTrue(jobDao.isOverMaxCores(job, 100)); - - jdbcTemplate.update( - "UPDATE job_resource SET int_max_cores = 200 WHERE pk_job=?", - job.getJobId()); - assertTrue(jobDao.isOverMaxCores(job)); - } - - @Test(expected=org.springframework.jdbc.UncategorizedSQLException.class) - @Transactional - @Rollback(true) - public void testMaxCoreTrigger() { - JobDetail job = insertJob(); - int maxCores = jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId()); - - jdbcTemplate.update( - "UPDATE job_resource SET int_cores = ? WHERE pk_job=?", - maxCores + 1, job.getJobId()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobPriority() { - JobDetail job = insertJob(); - jobDao.updatePriority(job, 199); - assertEquals(Integer.valueOf(199), jdbcTemplate.queryForObject( - "SELECT int_priority FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMinCores() { - JobDetail job = insertJob(); - jobDao.updateMinCores(job, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxCores() { - JobDetail job = insertJob(); - jobDao.updateMaxCores(job, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMinCoresByGroup() { - JobDetail job = insertJob(); - GroupInterface g = groupDao.getGroup(job.groupId); - jobDao.updateMinCores(g, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxCoresByGroup() { - JobDetail job = insertJob(); - GroupInterface g = groupDao.getGroup(job.groupId); - jobDao.updateMaxCores(g, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobPriorityByGroup() { - JobDetail job = insertJob(); - GroupInterface g = groupDao.getGroup(job.groupId); - jobDao.updatePriority(g, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_priority FROM job_resource WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxRss() { - long maxRss = 100000; - JobDetail job = insertJob(); - jobDao.updateMaxRSS(job, maxRss); - assertEquals(Long.valueOf(maxRss), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM job_mem WHERE pk_job=?", - Long.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobPaused() { - JobDetail job = insertJob(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_paused FROM job WHERE pk_job=?", - Integer.class, job.getJobId())); - - jobDao.updatePaused(job, false); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_paused FROM job WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobAutoEat() { - JobDetail job = insertJob(); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_autoeat FROM job WHERE pk_job=?", - Integer.class, job.getJobId())); - - jobDao.updateAutoEat(job, true); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_autoeat FROM job WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobMaxRetries() { - JobDetail job = insertJob(); - jobDao.updateMaxFrameRetries(job,10); - assertEquals(Integer.valueOf(10), jdbcTemplate.queryForObject( - "SELECT int_max_retries FROM job WHERE pk_job=?", - Integer.class, job.getJobId())); - } - - @Test(expected=IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testUpdateJobMaxRetriesTooLow() { - JobDetail job = insertJob(); - jobDao.updateMaxFrameRetries(job,-1); - } - - @Test(expected=IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testUpdateJobMaxRetriesTooHigh() { - JobDetail job = insertJob(); - jobDao.updateMaxFrameRetries(job,100000); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameStateTotals() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - jobDao.getFrameStateTotals(spec.getJobs().get(0).detail); - } - - @Test - @Transactional - @Rollback(true) - public void testGetExecutionSummary() { - JobDetail job = launchJob(); - ExecutionSummary summary = jobDao.getExecutionSummary(job); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobEnvironment() { - JobDetail job = launchJob(); - Map map = jobDao.getEnvironment(job); - for (Map.Entry e : map.entrySet()) { - assertEquals("VNP_VCR_SESSION", e.getKey()); - assertEquals( "9000", e.getValue()); - } - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobEnvironment() { - JobDetail job = launchJob(); - jobDao.insertEnvironment(job, "CHAMBERS","123"); - Map map = jobDao.getEnvironment(job); - assertEquals(2,map.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertJobEnvironmentMap() { - JobDetail job = launchJob(); - Map map = new HashMap(); - map.put("CHAMBERS","123"); - map.put("OVER9000","123"); - - jobDao.insertEnvironment(job, map); - Map env = jobDao.getEnvironment(job); - assertEquals(3,env.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLastJob() { - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - - JobInterface job = spec.getJobs().get(0).detail; - jobDao.getFrameStateTotals(job); - jobManager.shutdownJob(job); - // this might fail - JobDetail oldJob = jobDao.findLastJob(job.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobLogPath() { - JobDetail job = launchJob(); - String newLogDir = "/path/to/nowhere"; - jobDao.updateLogPath(job,newLogDir); - assertEquals(newLogDir,jdbcTemplate.queryForObject( - "SELECT str_log_dir FROM job WHERE pk_job=?",String.class, job.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateJobParent() { - JobDetail job = launchJob(); - - // Make a new test group. - GroupDetail root = groupDao.getRootGroupDetail(job); - - GroupDetail testGroup = new GroupDetail(); - testGroup.name = "testGroup"; - testGroup.deptId = departmentDao.getDefaultDepartment().getId(); - testGroup.showId = root.getShowId(); - - groupDao.insertGroup(testGroup, root); - - jdbcTemplate.update( - "UPDATE folder SET int_job_max_cores=-1, int_job_min_cores=-1, int_job_priority=-1 WHERE pk_folder=?", - testGroup.getId()); - - GroupDetail group = groupDao.getGroupDetail(testGroup.getId()); - jobDao.updateParent(job, group); - - assertEquals(-1,group.jobMaxCores); - assertEquals(-1,group.jobMinCores); - assertEquals(-1,group.jobPriority); - - assertEquals(group.getGroupId(),jdbcTemplate.queryForObject( - "SELECT pk_folder FROM job WHERE pk_job=?",String.class, job.id)); - - assertEquals(group.getDepartmentId(),jdbcTemplate.queryForObject( - "SELECT pk_dept FROM job WHERE pk_job=?",String.class, job.id)); - - group.jobMaxCores = 100; - group.jobMinCores = 100; - group.jobPriority = 100; - - jobDao.updateParent(job, group); - - assertEquals(Integer.valueOf(group.jobMaxCores) ,jdbcTemplate.queryForObject( - "SELECT int_max_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.id)); - - assertEquals(Integer.valueOf(group.jobMinCores) ,jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM job_resource WHERE pk_job=?", - Integer.class, job.id)); - - assertEquals(Integer.valueOf(group.jobPriority) ,jdbcTemplate.queryForObject( - "SELECT int_priority FROM job_resource WHERE pk_job=?", - Integer.class, job.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testCueHasPendingJobs() { - jobDao.cueHasPendingJobs(new FacilityEntity("0")); - - } - - @Test - @Transactional - @Rollback(true) - public void mapPostJob() { - JobSpec spec = jobLauncher.parse( - new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); - jobLauncher.launch(spec); - - final String pk_job = spec.getJobs().get(0).detail.id; - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM job_post WHERE pk_job=?", - Integer.class, pk_job)); - } - - @Test - @Transactional - @Rollback(true) - public void activatePostJob() { - JobSpec spec = jobLauncher.parse( - new File("src/test/resources/conf/jobspec/jobspec_postframes.xml")); - jobLauncher.launch(spec); - - jobDao.activatePostJob(spec.getJobs().get(0).detail); - - assertEquals(JobState.PENDING.toString(),jdbcTemplate.queryForObject( - "SELECT str_state FROM job WHERE pk_job=?", String.class, - spec.getJobs().get(0).getPostJob().detail.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateUsage() { - - JobSpec spec = jobLauncher.parse( - new File("src/test/resources/conf/jobspec/jobspec.xml")); - jobLauncher.launch(spec); - - JobInterface job = jobDao.findJob(spec.getJobs().get(0).detail.name); - - /** 60 seconds of 100 core units **/ - ResourceUsage usage = new ResourceUsage(60, 33); - - assertTrue(usage.getClockTimeSeconds() > 0); - assertTrue(usage.getCoreTimeSeconds() > 0); - - /** - * Successful frame - */ - jobDao.updateUsage(job, usage, 0); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_success FROM job_usage WHERE pk_job=?", - Long.class, job.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_success FROM job_usage WHERE pk_job=?", - Long.class, job.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM job_usage WHERE pk_job=?", - Integer.class, job.getId())); - - /** - * Failed frame - */ - jobDao.updateUsage(job, usage, 1); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_fail FROM job_usage WHERE pk_job=?", - Long.class, job.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_fail FROM job_usage WHERE pk_job=?", - Long.class, job.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM job_usage WHERE pk_job=?", - Integer.class, job.getId())); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/LayerDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/LayerDaoTests.java deleted file mode 100644 index 61ee6e864..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/LayerDaoTests.java +++ /dev/null @@ -1,742 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import javax.annotation.Resource; - -import org.apache.commons.lang.StringUtils; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.BuildableLayer; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.LayerDetail; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LimitEntity; -import com.imageworks.spcue.LimitInterface; -import com.imageworks.spcue.ResourceUsage; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.DepartmentDao; -import com.imageworks.spcue.dao.FacilityDao; -import com.imageworks.spcue.dao.JobDao; -import com.imageworks.spcue.dao.LayerDao; -import com.imageworks.spcue.dao.LimitDao; -import com.imageworks.spcue.dispatcher.Dispatcher; -import com.imageworks.spcue.grpc.job.JobState; -import com.imageworks.spcue.grpc.job.LayerType; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.service.JobSpec; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; -import com.imageworks.spcue.util.FrameSet; -import com.imageworks.spcue.util.JobLogUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class LayerDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - JobDao jobDao; - - @Resource - LayerDao layerDao; - - @Resource - LimitDao limitDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - DepartmentDao departmentDao; - - @Resource - FacilityDao facilityDao; - - @Resource - JobLogUtil jobLogUtil; - - private static String ROOT_FOLDER = "A0000000-0000-0000-0000-000000000000"; - private static String ROOT_SHOW = "00000000-0000-0000-0000-000000000000"; - private static String LAYER_NAME = "pass_1"; - private static String JOB_NAME = "pipe-dev.cue-testuser_shell_v1"; - private static String LIMIT_NAME = "test-limit"; - private static String LIMIT_TEST_A = "testlimita"; - private static String LIMIT_TEST_B = "testlimitb"; - private static String LIMIT_TEST_C = "testlimitc"; - private static int LIMIT_MAX_VALUE = 32; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - public LayerDetail getLayer() { - - JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = spec.getJobs().get(0).detail; - job.groupId = ROOT_FOLDER; - job.showId = ROOT_SHOW; - job.logDir = jobLogUtil.getJobLogPath(job); - job.deptId = departmentDao.getDefaultDepartment().getId(); - job.facilityId = facilityDao.getDefaultFacility().getId(); - jobDao.insertJob(job, jobLogUtil); - - LayerDetail lastLayer= null; - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - limitDao.createLimit(LIMIT_TEST_A, 1); - limitDao.createLimit(LIMIT_TEST_B, 2); - limitDao.createLimit(LIMIT_TEST_C, 3); - - for (BuildableLayer buildableLayer: spec.getJobs().get(0).getBuildableLayers()) { - - LayerDetail layer = buildableLayer.layerDetail; - FrameSet frameSet = new FrameSet(layer.range); - int num_frames = frameSet.size(); - int chunk_size = layer.chunkSize; - - layer.jobId = job.id; - layer.showId = ROOT_SHOW; - layer.totalFrameCount = num_frames / chunk_size; - if (num_frames % chunk_size > 0) { layer.totalFrameCount++; } - - layerDao.insertLayerDetail(layer); - layerDao.insertLayerEnvironment(layer, buildableLayer.env); - layerDao.addLimit(layer, limitId); - lastLayer = layer; - } - - return lastLayer; - } - - public JobDetail getJob() { - return jobDao.findJobDetail(JOB_NAME); - } - - public String getTestLimitId(String name) { - return limitDao.findLimit(name).getLimitId(); - } - - @Test - @Transactional - @Rollback(true) - public void testIsLayerComplete() { - layerDao.isLayerComplete(getLayer()); - } - - @Test - @Transactional - @Rollback(true) - public void testIsLayerDispatchable() { - layerDao.isLayerDispatchable(getLayer()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerDetail() { - LayerDetail layer = getLayer(); - assertEquals(LAYER_NAME, layer.name); - assertEquals(layer.chunkSize, 1); - assertEquals(layer.dispatchOrder,2); - assertNotNull(layer.id); - assertNotNull(layer.jobId); - assertEquals(layer.showId,ROOT_SHOW); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerDetail() { - LayerDetail layer = getLayer(); - assertEquals(LAYER_NAME, layer.name); - assertEquals(layer.chunkSize, 1); - assertEquals(layer.dispatchOrder,2); - assertNotNull(layer.id); - assertNotNull(layer.jobId); - assertEquals(layer.showId,ROOT_SHOW); - - LayerDetail l2 = layerDao.getLayerDetail(layer); - LayerDetail l3 = layerDao.getLayerDetail(layer.id); - assertEquals(l2, l3); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerDetails() { - LayerDetail layer = getLayer(); - List ld = layerDao.getLayerDetails(getJob()); - assertEquals(ld.get(0).name, LAYER_NAME); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLayerDetail() { - LayerDetail layer = getLayer(); - layerDao.findLayer(getJob(), "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayer() { - LayerDetail layer = getLayer(); - layerDao.getLayer(layer.id); - layerDao.getLayerDetail(layer); - layerDao.getLayerDetail(layer.id); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLayer() { - LayerDetail layer = getLayer(); - layerDao.findLayer(getJob(), "pass_1"); - layerDao.findLayerDetail(getJob(), "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerMinCores() { - LayerDetail layer = getLayer(); - layerDao.updateLayerMinCores(layer, 200); - LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(l2.minimumCores,200); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerThreadable() { - LayerDetail layer = getLayer(); - layerDao.updateThreadable(layer, false); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_threadable FROM layer WHERE pk_layer=?", - Integer.class, layer.getLayerId())); - } - - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerMinMemory() { - LayerDetail layer = getLayer(); - - /* - * Check to ensure going below Dispatcher.MEM_RESERVED_MIN is - * not allowed. - */ - layerDao.updateLayerMinMemory(layer, 8096); - LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(l2.minimumMemory, Dispatcher.MEM_RESERVED_MIN); - - /* - * Check regular operation. - */ - layerDao.updateLayerMinMemory(layer, CueUtil.GB); - LayerDetail l3 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(l3.minimumMemory, CueUtil.GB); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerTags() { - LayerDetail layer = getLayer(); - - HashSet tags = new HashSet(); - tags.add("frickjack"); - tags.add("pancake"); - - layerDao.updateLayerTags(layer, tags); - LayerDetail l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(StringUtils.join(l2.tags," | "), "frickjack | pancake"); - - tags.clear(); - tags.add("frickjack"); - - layerDao.updateLayerTags(layer, tags); - l2 = layerDao.findLayerDetail(getJob(), "pass_1"); - assertEquals(StringUtils.join(l2.tags," | "), "frickjack"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFrameStateTotals() { - LayerDetail layer = getLayer(); - layerDao.getFrameStateTotals(layer); - jobDao.getFrameStateTotals(layer); - } - - @Test - @Transactional - @Rollback(true) - public void testGetExecutionSummary() { - LayerDetail layer = getLayer(); - layerDao.getExecutionSummary(layer); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerEnvironment() { - LayerDetail layer = getLayer(); - Map map = layerDao.getLayerEnvironment(layer); - for (Map.Entry e : map.entrySet()) { - - } - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerEnvironment() { - LayerDetail layer = getLayer(); - layerDao.insertLayerEnvironment(layer, "CHAMBERS","123"); - Map env = layerDao.getLayerEnvironment(layer); - assertEquals(2,env.size()); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerEnvironmentMap() { - LayerDetail layer = getLayer(); - Map map = new HashMap(); - map.put("CHAMBERS","123"); - map.put("OVER9000","123"); - - layerDao.insertLayerEnvironment(layer, map); - Map env = layerDao.getLayerEnvironment(layer); - assertEquals(3,env.size()); - } - - - @Test - @Transactional - @Rollback(true) - public void testFindPastSameNameMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastTimeStampMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v1_2011_05_03_16_03"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastNewVersionMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindPastNewVersionTimeStampMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_v2_2011_05_03_16_03"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test(expected=org.springframework.dao.EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindPastNewVersionFailMaxRSS() { - getLayer(); - jobDao.updateState(getJob(), JobState.FINISHED); - assertEquals(JobState.FINISHED, getJob().state); - - JobDetail lastJob = null; - lastJob = jobDao.findLastJob("pipe-dev.cue-testuser_shell_vfail_v2"); - long maxRss = layerDao.findPastMaxRSS(lastJob, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateLayerMaxRSS() { - LayerDetail layer = getLayer(); - - layerDao.updateLayerMaxRSS(layer, 1000, true); - assertEquals(Long.valueOf(1000), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", - Long.class, layer.getId())); - - layerDao.updateLayerMaxRSS(layer, 999, true); - assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", - Long.class, layer.getId())); - - layerDao.updateLayerMaxRSS(layer, 900, false); - assertEquals(Long.valueOf(999), jdbcTemplate.queryForObject( - "SELECT int_max_rss FROM layer_mem WHERE pk_layer=?", - Long.class, layer.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateTags() { - String tag = "dillweed"; - LayerDetail layer = getLayer(); - layerDao.updateTags(layer, tag, LayerType.RENDER); - assertEquals(tag,jdbcTemplate.queryForObject( - "SELECT str_tags FROM layer WHERE pk_layer=?", String.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMinMemory() { - long mem = CueUtil.GB; - LayerDetail layer = getLayer(); - layerDao.updateMinMemory(layer, mem, LayerType.RENDER); - assertEquals(Long.valueOf(mem), jdbcTemplate.queryForObject( - "SELECT int_mem_min FROM layer WHERE pk_layer=?", - Long.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMinGpu() { - long gpu = CueUtil.GB; - LayerDetail layer = getLayer(); - layerDao.updateMinGpu(layer, gpu, LayerType.RENDER); - assertEquals(Long.valueOf(gpu),jdbcTemplate.queryForObject( - "SELECT int_gpu_min FROM layer WHERE pk_layer=?", - Long.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMinCores() { - int cores = CueUtil.ONE_CORE * 2; - LayerDetail layer = getLayer(); - layerDao.updateMinCores(layer, cores, LayerType.RENDER); - assertEquals(Integer.valueOf(cores), jdbcTemplate.queryForObject( - "SELECT int_cores_min FROM layer WHERE pk_layer=?", - Integer.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void updateMaxCores() { - int cores = CueUtil.ONE_CORE * 2; - LayerDetail layer = getLayer(); - layerDao.updateLayerMaxCores(layer, cores); - assertEquals(Integer.valueOf(cores), jdbcTemplate.queryForObject( - "SELECT int_cores_max FROM layer WHERE pk_layer=?", - Integer.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void isOptimizable() { - LayerDetail layer = getLayer(); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * The succeeded count is good but the frames are too long - * Assert False - */ - jdbcTemplate.update("UPDATE layer_stat SET int_succeeded_count = 5 WHERE pk_layer=?", - layer.getLayerId()); - - jdbcTemplate.update( - "UPDATE layer_usage SET layer_usage.int_core_time_success = 3600 * 6" + - "WHERE pk_layer=?", layer.getLayerId()); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * Set the frame times lower, so now we meet the criteria - * Assert True - */ - jdbcTemplate.update( - "UPDATE layer_usage SET layer_usage.int_core_time_success = 3500 * 5" + - "WHERE pk_layer=?", layer.getLayerId()); - - assertTrue(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * Take the general tag away. If a layer is not a general layer - * it cannot be optmiized. - * Assert False - */ - jdbcTemplate.update( - "UPDATE layer SET str_tags=? WHERE pk_layer=?", - "desktop",layer.getLayerId()); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - - /* - * Layers that are already tagged util should return - * false as well. - * - * Assert False - */ - jdbcTemplate.update( - "UPDATE layer SET str_tags=? WHERE pk_layer=?", - "general | util",layer.getLayerId()); - - assertFalse(layerDao.isOptimizable(layer, 5, 3600)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateUsage() { - LayerDetail layer = getLayer(); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - /** 60 seconds of 100 core units **/ - ResourceUsage usage = new ResourceUsage(60, 33); - - assertTrue(usage.getClockTimeSeconds() > 0); - assertTrue(usage.getCoreTimeSeconds() > 0); - - /** - * Successful frame - */ - layerDao.updateUsage(layer, usage, 0); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_success FROM layer_usage WHERE pk_layer=?", - Long.class, layer.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_success FROM layer_usage WHERE pk_layer=?", - Long.class, layer.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - - /** - * Failed frame - */ - layerDao.updateUsage(layer, usage, 1); - assertEquals(Long.valueOf(usage.getClockTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_clock_time_fail FROM layer_usage WHERE pk_layer=?", - Long.class, layer.getId())); - - assertEquals(Long.valueOf(usage.getCoreTimeSeconds()), jdbcTemplate.queryForObject( - "SELECT int_core_time_fail FROM layer_usage WHERE pk_layer=?", - Long.class, layer.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM layer_usage WHERE pk_layer=?", - Integer.class, layer.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void isLayerThreadable() { - LayerDetail layer = getLayer(); - jdbcTemplate.update( - "UPDATE layer set b_threadable = 0 WHERE pk_layer=?", - layer.getId()); - - assertFalse(layerDao.isThreadable(layer)); - - jdbcTemplate.update( - "UPDATE layer set b_threadable = 1 WHERE pk_layer=?", - layer.getId()); - - assertTrue(layerDao.isThreadable(layer)); - } - - @Test - @Transactional - @Rollback(true) - public void enableMemoryOptimizer() { - LayerDetail layer = getLayer(); - layerDao.enableMemoryOptimizer(layer, false); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_optimize FROM layer WHERE pk_layer=?", - Integer.class, layer.getLayerId())); - - layerDao.enableMemoryOptimizer(layer, true); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_optimize FROM layer WHERE pk_layer=?", - Integer.class, layer.getLayerId())); - } - - @Test - @Transactional - @Rollback(true) - public void testBalanceMemory() { - LayerDetail layer = getLayer(); - assertTrue(layerDao.balanceLayerMinMemory(layer, CueUtil.GB)); - jdbcTemplate.update("UPDATE layer_mem SET int_max_rss=? WHERE pk_layer=?", - CueUtil.GB8, layer.getId()); - assertFalse(layerDao.balanceLayerMinMemory(layer, CueUtil.MB512)); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertLayerOutput() { - LayerDetail layer = getLayer(); - layerDao.insertLayerOutput(layer, "filespec1"); - layerDao.insertLayerOutput(layer, "filespec2"); - layerDao.insertLayerOutput(layer, "filespec3"); - assertEquals(3, layerDao.getLayerOutputs(layer).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimits() { - LayerDetail layer = getLayer(); - List limits = layerDao.getLimits(layer); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0).id, getTestLimitId(LIMIT_NAME)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimitNames() { - LayerDetail layer = getLayer(); - List limits = layerDao.getLimitNames(layer); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0), LIMIT_NAME); - } - - @Test - @Transactional - @Rollback(true) - public void testAddLimit() { - LayerDetail layer = getLayer(); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_B)); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_C)); - LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); - List limits = layerDao.getLimits(layerResult); - assertEquals(limits.size(), 4); - assertEquals(limits.get(0).id, getTestLimitId(LIMIT_NAME)); - assertEquals(limits.get(1).id, getTestLimitId(LIMIT_TEST_A)); - assertEquals(limits.get(2).id, getTestLimitId(LIMIT_TEST_B)); - assertEquals(limits.get(3).id, getTestLimitId(LIMIT_TEST_C)); - } - - @Test - @Transactional - @Rollback(true) - public void testDropLimit() { - LayerDetail layer = getLayer(); - layerDao.addLimit(layer, getTestLimitId(LIMIT_TEST_A)); - layerDao.dropLimit(layer, getTestLimitId(LIMIT_NAME)); - LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); - List limits = layerDao.getLimits(layerResult); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0).id, getTestLimitId(LIMIT_TEST_A)); - layerDao.dropLimit(layer, getTestLimitId(LIMIT_TEST_A)); - LayerInterface layerResultB = layerDao.getLayer(layer.getLayerId()); - List limitsB = layerDao.getLimits(layerResultB); - assertEquals(limitsB.size(), 0); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/LimitDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/LimitDaoTests.java deleted file mode 100644 index ab3ce2a9a..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/LimitDaoTests.java +++ /dev/null @@ -1,137 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.LimitEntity; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.LimitDao; -import com.imageworks.spcue.test.AssumingPostgresEngine; - -import static org.junit.Assert.assertEquals; - - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class LimitDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingPostgresEngine assumingPostgresEngine; - - @Resource - LimitDao limitDao; - - private static String LIMIT_NAME = "test-limit"; - private static int LIMIT_MAX_VALUE = 32; - - @Test - @Transactional - @Rollback(true) - public void testCreateLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - assertEquals(limit.id, limitId); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", - Integer.class, limitId)); - - limitDao.deleteLimit(limit); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM limit_record WHERE pk_limit_record=?", - Integer.class, limitId)); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - - LimitEntity limit = limitDao.findLimit(LIMIT_NAME); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimit() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - - LimitEntity limit = limitDao.getLimit(limitId); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testSetLimitName() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - String newName = "heyIChanged"; - - limitDao.setLimitName(limit, newName); - - limit = limitDao.getLimit(limitId); - assertEquals(limit.id, limitId); - assertEquals(limit.name, newName); - assertEquals(limit.maxValue, LIMIT_MAX_VALUE); - } - - @Test - @Transactional - @Rollback(true) - public void testSetMaxValue() { - String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); - LimitEntity limit = limitDao.getLimit(limitId); - int newValue = 600; - - limitDao.setMaxValue(limit, newValue); - - limit = limitDao.getLimit(limitId); - assertEquals(limit.id, limitId); - assertEquals(limit.name, LIMIT_NAME); - assertEquals(limit.maxValue, newValue); - } -} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/MaintenanceDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/MaintenanceDaoTests.java deleted file mode 100644 index 4f52e10f0..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/MaintenanceDaoTests.java +++ /dev/null @@ -1,76 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.MaintenanceTask; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.MaintenanceDao; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class MaintenanceDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - MaintenanceDao maintenanceDao; - - @Test - @Transactional - @Rollback(true) - public void testSetUpHostsToDown() { - maintenanceDao.setUpHostsToDown(); - } - - @Test - @Transactional - @Rollback(true) - public void testLockHistoricalTask() { - assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); - assertFalse(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); - } - - @Test - @Transactional - @Rollback(true) - public void testUnlockHistoricalTask() { - assertTrue(maintenanceDao.lockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER)); - maintenanceDao.unlockTask(MaintenanceTask.LOCK_HISTORICAL_TRANSFER); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/MatcherDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/MatcherDaoTests.java deleted file mode 100644 index e65ac8f2c..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/MatcherDaoTests.java +++ /dev/null @@ -1,140 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.FilterEntity; -import com.imageworks.spcue.MatcherEntity; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.FilterDao; -import com.imageworks.spcue.dao.GroupDao; -import com.imageworks.spcue.dao.MatcherDao; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.grpc.filter.FilterType; -import com.imageworks.spcue.grpc.filter.MatchSubject; -import com.imageworks.spcue.grpc.filter.MatchType; -import com.imageworks.spcue.test.AssumingOracleEngine; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class MatcherDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - MatcherDao matcherDao; - - @Resource - FilterDao filterDao; - - @Resource - ShowDao showDao; - - @Resource - GroupDao groupDao; - - private static String FILTER_NAME = "test_filter"; - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public MatcherEntity createMatcher() { - FilterEntity filter = createFilter(); - MatcherEntity matcher = new MatcherEntity(); - matcher.filterId = filter.id; - matcher.name = null; - matcher.showId = getShow().getId(); - matcher.subject = MatchSubject.JOB_NAME; - matcher.type = MatchType.CONTAINS; - matcher.value = "testuser"; - matcherDao.insertMatcher(matcher); - return matcher; - } - - public FilterEntity createFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = FILTER_NAME; - filter.showId = "00000000-0000-0000-0000-000000000000"; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - filterDao.insertFilter(filter); - return filter; - } - - @Test - @Transactional - @Rollback(true) - public void testInsertMatcher() { - createMatcher(); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteMatcher() { - MatcherEntity matcher = createMatcher(); - matcherDao.deleteMatcher(matcher); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateMatcher() { - MatcherEntity matcher = createMatcher(); - matcher.subject = MatchSubject.USER; - matcher.value = "testuser"; - matcher.type = MatchType.IS; - matcherDao.updateMatcher(matcher); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatcher() { - MatcherEntity matcher = createMatcher(); - matcherDao.getMatcher(matcher); - matcherDao.getMatcher(matcher.getMatcherId()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatchers() { - MatcherEntity matcher = createMatcher(); - matcherDao.getMatchers(matcher); - } - -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/NestedWhiteboardDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/NestedWhiteboardDaoTests.java deleted file mode 100644 index f5686e602..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/NestedWhiteboardDaoTests.java +++ /dev/null @@ -1,68 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.ShowEntity; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.NestedWhiteboardDao; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.test.AssumingOracleEngine; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class NestedWhiteboardDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - NestedWhiteboardDao nestedWhiteboardDao; - - @Resource - ShowDao showDao; - - public ShowEntity getShow() { - return showDao.findShowDetail("pipe"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetNestedJobWhiteboard() { - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - nestedWhiteboardDao.getJobWhiteboard(getShow()); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/OwnerDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/OwnerDaoTests.java deleted file mode 100644 index 7d1951a29..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/OwnerDaoTests.java +++ /dev/null @@ -1,131 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.OwnerEntity; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.OwnerDao; -import com.imageworks.spcue.service.AdminManager; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class OwnerDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - OwnerDao ownerDao; - - @Resource - AdminManager adminManager; - - @Resource - HostManager hostManager; - - @Test - @Transactional - @Rollback(true) - public void testInsertOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - } - - @Test - @Transactional - @Rollback(true) - public void testIsOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - } - - @Test - @Transactional - @Rollback(true) - public void testGetOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - - assertEquals(o, ownerDao.findOwner("spongebob")); - assertEquals(o, ownerDao.getOwner(o.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteOwner() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM owner WHERE pk_owner=?", - Integer.class, o.id)); - - ownerDao.deleteOwner(o); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM owner WHERE pk_owner=?", - Integer.class, o.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShow() { - ShowInterface show = adminManager.findShowEntity("pipe"); - OwnerEntity o = new OwnerEntity(); - o.name = "spongebob"; - ownerDao.insertOwner(o, show); - - ShowInterface newShow = adminManager.findShowEntity("edu"); - - ownerDao.updateShow(o, newShow); - - assertEquals(newShow.getShowId(), jdbcTemplate.queryForObject( - "SELECT pk_show FROM owner WHERE pk_owner=?", - String.class, o.id)); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/PointDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/PointDaoTests.java deleted file mode 100644 index abaa34b23..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/PointDaoTests.java +++ /dev/null @@ -1,171 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DepartmentInterface; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.PointInterface; -import com.imageworks.spcue.ShowEntity; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.DepartmentDao; -import com.imageworks.spcue.dao.PointDao; -import com.imageworks.spcue.service.AdminManager; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class PointDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - DepartmentDao departmentDao; - - @Resource - AdminManager adminManager; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - PointDao pointDao; - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Test - @Transactional - @Rollback(true) - public void insertDepartmentConfig() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - DepartmentInterface dept = departmentDao.findDepartment("Lighting"); - PointInterface d = pointDao.insertPointConf(show, dept); - - assertEquals(show.id, jdbcTemplate.queryForObject( - "SELECT pk_show FROM point WHERE pk_point=?", - String.class, d.getPointId())); - - assertEquals(dept.getDepartmentId(), jdbcTemplate.queryForObject( - "SELECT pk_dept FROM point WHERE pk_point=?", - String.class, d.getPointId())); - } - - @Test - @Transactional - @Rollback(true) - public void departmentConfigExists() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - - assertTrue(pointDao.pointConfExists(show, - departmentDao.getDefaultDepartment())); - - assertFalse(pointDao.pointConfExists(show, - departmentDao.findDepartment("Lighting"))); - } - - @Test - @Transactional - @Rollback(true) - public void updateEnableTiManaged() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - - PointInterface config = pointDao.getPointConfigDetail(show, - departmentDao.getDefaultDepartment()); - - //pointDao.updateEnableManaged(config, "Lighting", 10); - } - - @Test - @Transactional - @Rollback(true) - public void getDepartmentConfig() { - ShowEntity show = new ShowEntity(); - show.name = "testtest"; - adminManager.createShow(show); - - /* Tests both overlodaed methods */ - PointInterface configA = pointDao.getPointConfigDetail(show, - departmentDao.getDefaultDepartment()); - - PointInterface configB = pointDao.getPointConfDetail( - configA.getPointId()); - - assertEquals(configA.getPointId(), configB.getPointId()); - assertEquals(configA.getDepartmentId(), configB.getDepartmentId()); - assertEquals(configA.getShowId(), configB.getShowId()); - } - - - @Test - @Transactional - @Rollback(true) - public void testIsOverMinCores() { - - JobDetail job = launchJob(); - - PointInterface pointConfig = pointDao.getPointConfigDetail(job, - departmentDao.getDepartment(job.getDepartmentId())); - - assertFalse(pointDao.isOverMinCores(job)); - - // Now update some values so it returns true. - jdbcTemplate.update("UPDATE point SET int_cores = int_min_cores + 2000000 WHERE pk_point=?", - pointConfig.getId()); - - logger.info(jdbcTemplate.queryForObject("SELECT int_min_cores from point where pk_point=?", - Integer.class, pointConfig.getId())); - - assertTrue(pointDao.isOverMinCores(job)); - } - -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ProcDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ProcDaoTests.java deleted file mode 100644 index a069234f1..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ProcDaoTests.java +++ /dev/null @@ -1,846 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DispatchFrame; -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.FrameDetail; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.VirtualProc; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.DispatcherDao; -import com.imageworks.spcue.dao.FrameDao; -import com.imageworks.spcue.dao.HostDao; -import com.imageworks.spcue.dao.LayerDao; -import com.imageworks.spcue.dao.ProcDao; -import com.imageworks.spcue.dao.criteria.Direction; -import com.imageworks.spcue.dao.criteria.FrameSearchFactory; -import com.imageworks.spcue.dao.criteria.ProcSearchFactory; -import com.imageworks.spcue.dao.criteria.ProcSearchInterface; -import com.imageworks.spcue.dao.criteria.Sort; -import com.imageworks.spcue.dispatcher.Dispatcher; -import com.imageworks.spcue.dispatcher.ResourceReservationFailureException; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.host.ProcSearchCriteria; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.service.AdminManager; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ProcDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - ProcDao procDao; - - @Resource - HostDao hostDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - FrameDao frameDao; - - @Resource - LayerDao layerDao; - - @Resource - DispatcherDao dispatcherDao; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - @Resource - Dispatcher dispatcher; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - ProcSearchFactory procSearchFactory; - - private static String PK_ALLOC = "00000000-0000-0000-0000-000000000000"; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("beta") - .setBootTime(1192369572) - .setFreeMcp(76020) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(195430) - .setTotalMem((int) CueUtil.GB32) - .setTotalSwap(20960) - .setNimbyEnabled(false) - .setNumProcs(8) - .setCoresPerProc(100) - .setState(HardwareState.UP) - .setFacility("spi") - .build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return hostDao.findDispatchHost("beta"); - } - - public JobDetail launchJob() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - @Before - public void setDispatcherTestMode() { - dispatcher.setTestMode(true); - jobLauncher.testMode = true; - } - - @Test - @Transactional - @Rollback(true) - public void testDontVerifyRunningProc() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); - VirtualProc proc = VirtualProc.build(host, frame); - dispatcher.dispatch(frame, proc); - - // Confirm was have a running frame. - assertEquals("RUNNING", jdbcTemplate.queryForObject( - "SELECT str_state FROM frame WHERE pk_frame=?", - String.class, frame.id)); - - assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); - jobManager.shutdownJob(job); - - int result = jdbcTemplate.update( - "UPDATE job SET ts_stopped = " + - "systimestamp - interval '10' minute " + - "WHERE pk_job=?", job.id); - - assertEquals(1, result); - assertFalse(procDao.verifyRunningProc(proc.getId(), frame.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertVirtualProc() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteVirtualProc() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - procDao.deleteVirtualProc(proc); - } - - @Test - @Transactional - @Rollback(true) - public void testClearVirtualProcAssignment() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - procDao.clearVirtualProcAssignment(proc); - } - - @Test - @Transactional - @Rollback(true) - public void testClearVirtualProcAssignmentByFrame() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - assertTrue(procDao.clearVirtualProcAssignment(frame)); - } - - - @Test - @Transactional - @Rollback(true) - public void testUpdateVirtualProcAssignment() { - - DispatchHost host = createHost(); - - JobDetail job = launchJob(); - FrameDetail frame1 = frameDao.findFrameDetail(job, "0001-pass_1"); - FrameDetail frame2 = frameDao.findFrameDetail(job, "0002-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame1.id; - proc.layerId = frame1.layerId; - proc.showId = frame1.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame1.getId()); - - proc.frameId = frame2.id; - - procDao.updateVirtualProcAssignment(proc); - procDao.verifyRunningProc(proc.getId(), frame2.getId()); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateProcMemoryUsage() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - procDao.verifyRunningProc(proc.getId(), frame.getId()); - - procDao.updateProcMemoryUsage(frame, 100, 100, 1000, 1000); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetVirtualProc() { - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM host WHERE pk_host=?", - Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail fd = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - DispatchFrame frame = frameDao.getDispatchFrame(fd.getId()); - VirtualProc proc = VirtualProc.build(host, frame); - dispatcher.dispatch(frame, proc); - - assertTrue(procDao.verifyRunningProc(proc.getId(), frame.getId())); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM proc WHERE pk_proc=?", - Integer.class, proc.id)); - - - VirtualProc verifyProc = procDao.getVirtualProc(proc.getId()); - assertEquals(host.allocationId, verifyProc.allocationId); - assertEquals(proc.coresReserved, verifyProc.coresReserved); - assertEquals(proc.frameId, verifyProc.frameId); - assertEquals(proc.hostId, verifyProc.hostId); - assertEquals(proc.id, verifyProc.id); - assertEquals(proc.jobId, verifyProc.jobId); - assertEquals(proc.layerId, verifyProc.layerId); - assertEquals(proc.showId, verifyProc.showId); - } - - @Test - @Transactional - @Rollback(true) - public void testFindVirtualProc() { - - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM host WHERE pk_host=?", - Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.findVirtualProc(frame); - } - - @Test - @Transactional - @Rollback(true) - public void testFindVirtualProcs() { - - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM host WHERE pk_host=?", - Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - assertEquals(1, procDao.findVirtualProcs(HardwareState.UP).size()); - assertEquals(1, procDao.findVirtualProcs(host).size()); - assertEquals(1, procDao.findVirtualProcs(job).size()); - assertEquals(1, procDao.findVirtualProcs(frame).size()); - assertEquals(1, procDao.findVirtualProcs(frameSearchFactory.create(job)).size()); - assertEquals(1, procDao.findVirtualProcs(frameSearchFactory.create((LayerInterface) frame)).size()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindOrphanedVirtualProcs() { - DispatchHost host = createHost(); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM host WHERE pk_host=?", - Integer.class, host.id)); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - assertEquals(0, procDao.findOrphanedVirtualProcs().size()); - - /** - * This is destructive to running jobs - */ - jdbcTemplate.update( - "UPDATE proc SET ts_ping = (systimestamp - interval '30' day)"); - - assertEquals(1, procDao.findOrphanedVirtualProcs().size()); - assertTrue(procDao.isOrphan(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testUnbookProc() { - - DispatchHost host = createHost(); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.unbookProc(proc); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_unbooked FROM proc WHERE pk_proc=?", - Integer.class, proc.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUnbookVirtualProcs() { - - DispatchHost host = createHost(); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - - List procs = new ArrayList(); - procs.add(proc); - - procDao.unbookVirtualProcs(procs); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_unbooked FROM proc WHERE pk_proc=?", - Integer.class, proc.id)); - } - - - @Test(expected=ResourceReservationFailureException.class) - @Transactional - @Rollback(true) - public void testIncreaseReservedMemoryFail() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.increaseReservedMemory(proc, 8173264l * 8); - } - - @Test - @Transactional - @Rollback(true) - public void testIncreaseReservedMemory() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = PK_ALLOC; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - procDao.insertVirtualProc(proc); - - procDao.increaseReservedMemory(proc, 3145728); - } - - @Test - @Transactional - @Rollback(true) - public void testFindReservedMemoryOffender() { - DispatchHost host = createHost(); - - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - jobManager.setJobPaused(job, false); - - int i = 1; - List frames = dispatcherDao.findNextDispatchFrames(job, host, 6); - assertEquals(6, frames.size()); - - for (DispatchFrame frame: frames) { - - VirtualProc proc = VirtualProc.build(host, frame); - frame.minMemory = Dispatcher.MEM_RESERVED_DEFAULT; - dispatcher.dispatch(frame, proc); - - // Increase the memory usage as frames are added - procDao.updateProcMemoryUsage(frame, - 1000*i, 1000*i, 1000*i, 1000*i); - i++; - } - - // Now compare the last frame which has the highest memory - // usage to the what is returned by getWorstMemoryOffender - VirtualProc offender = procDao.getWorstMemoryOffender(host); - - FrameDetail f = frameDao.getFrameDetail(frames.get(5)); - FrameDetail o = frameDao.getFrameDetail(offender); - - assertEquals(f.getName(), o.getName()); - assertEquals(f.id, o.getFrameId()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetReservedMemory() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - VirtualProc _proc = procDao.findVirtualProc(frame); - assertEquals(Long.valueOf(Dispatcher.MEM_RESERVED_DEFAULT), jdbcTemplate.queryForObject( - "SELECT int_mem_reserved FROM proc WHERE pk_proc=?", - Long.class, _proc.id)); - assertEquals(Dispatcher.MEM_RESERVED_DEFAULT, - procDao.getReservedMemory(_proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetReservedGpu() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - VirtualProc _proc = procDao.findVirtualProc(frame); - assertEquals(Long.valueOf(Dispatcher.GPU_RESERVED_DEFAULT), jdbcTemplate.queryForObject( - "SELECT int_gpu_reserved FROM proc WHERE pk_proc=?", - Long.class, _proc.id)); - assertEquals(Dispatcher.GPU_RESERVED_DEFAULT, - procDao.getReservedGpu(_proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testBalanceUnderUtilizedProcs() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail1 = frameDao.findFrameDetail(job, "0001-pass_1"); - DispatchFrame frame1 = frameDao.getDispatchFrame(frameDetail1.id); - - VirtualProc proc1 = VirtualProc.build(host, frame1); - proc1.frameId = frame1.id; - procDao.insertVirtualProc(proc1); - - procDao.updateProcMemoryUsage(frame1, 250000, 250000, 250000, 250000); - layerDao.updateLayerMaxRSS(frame1, 250000, true); - - FrameDetail frameDetail2 = frameDao.findFrameDetail(job, "0002-pass_1"); - DispatchFrame frame2 = frameDao.getDispatchFrame(frameDetail2.id); - - VirtualProc proc2 = VirtualProc.build(host, frame2); - proc2.frameId = frame2.id; - procDao.insertVirtualProc(proc2); - - procDao.updateProcMemoryUsage(frame2, 255000, 255000,255000, 255000); - layerDao.updateLayerMaxRSS(frame2, 255000, true); - - FrameDetail frameDetail3 = frameDao.findFrameDetail(job, "0003-pass_1"); - DispatchFrame frame3 = frameDao.getDispatchFrame(frameDetail3.id); - - VirtualProc proc3 = VirtualProc.build(host, frame3); - proc3.frameId = frame3.id; - procDao.insertVirtualProc(proc3); - - procDao.updateProcMemoryUsage(frame3, 3145728, 3145728,3145728, 3145728); - layerDao.updateLayerMaxRSS(frame3,300000, true); - - procDao.balanceUnderUtilizedProcs(proc3, 100000); - procDao.increaseReservedMemory(proc3, Dispatcher.MEM_RESERVED_DEFAULT + 100000); - - // Check the target proc - VirtualProc targetProc = procDao.getVirtualProc(proc3.getId()); - assertEquals( Dispatcher.MEM_RESERVED_DEFAULT+ 100000, targetProc.memoryReserved); - - // Check other procs - VirtualProc firstProc = procDao.getVirtualProc(proc1.getId()); - assertEquals( Dispatcher.MEM_RESERVED_DEFAULT - 50000 -1 , firstProc.memoryReserved); - - VirtualProc secondProc = procDao.getVirtualProc(proc2.getId()); - assertEquals(Dispatcher.MEM_RESERVED_DEFAULT - 50000 -1, secondProc.memoryReserved); - - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentShowId() { - - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(job.getShowId(), procDao.getCurrentShowId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentJobId() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(job.getJobId(), procDao.getCurrentJobId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentLayerId() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(frame.getLayerId(), procDao.getCurrentLayerId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCurrentFrameId() { - DispatchHost host = createHost(); - JobDetail job = launchJob(); - - FrameDetail frameDetail = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - DispatchFrame frame = frameDao.getDispatchFrame(frameDetail.id); - - VirtualProc proc = VirtualProc.build(host, frame); - proc.frameId = frame.id; - procDao.insertVirtualProc(proc); - - assertEquals(frame.getFrameId(), procDao.getCurrentFrameId(proc)); - } - - @Test - @Transactional - @Rollback(true) - public void getProcsBySearch() { - DispatchHost host = createHost(); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - /* - * Book 5 procs. - */ - for (int i=1; i<6; i++) { - FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1",i)); - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = host.id; - proc.hostName = host.name; - proc.jobId = job.id; - proc.frameId = f.id; - proc.layerId = f.layerId; - proc.showId = f.showId; - procDao.insertVirtualProc(proc); - } - - ProcSearchInterface r; - - /* - * Search for all 5 running procs - */ - r = procSearchFactory.create(); - r.addSort(new Sort("proc.ts_booked",Direction.ASC)); - ProcSearchCriteria criteriaA = r.getCriteria(); - r.setCriteria(criteriaA.toBuilder().addShows("pipe").build()); - assertEquals(5, procDao.findVirtualProcs(r).size()); - - /* - * Limit the result to 1 result. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaB = r.getCriteria(); - r.setCriteria(criteriaB.toBuilder().addShows("pipe").addMaxResults(1).build()); - assertEquals(1, procDao.findVirtualProcs(r).size()); - - /* - * Change the first result to 1, which should limt - * the result to 4. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaC = r.getCriteria(); - r.setCriteria(criteriaC.toBuilder().addShows("pipe").setFirstResult(2).build()); - r.addSort(new Sort("proc.ts_booked",Direction.ASC)); - assertEquals(4, procDao.findVirtualProcs(r).size()); - - /* - * Now try to do the eqivalent of a limit/offset - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaD = r.getCriteria(); - r.setCriteria(criteriaD.toBuilder() - .addShows("pipe") - .setFirstResult(3) - .addMaxResults(2) - .build()); - assertEquals(2, procDao.findVirtualProcs(r).size()); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ServiceDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ServiceDaoTests.java deleted file mode 100644 index 5a90c256a..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ServiceDaoTests.java +++ /dev/null @@ -1,210 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import com.google.common.collect.Sets; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.ServiceEntity; -import com.imageworks.spcue.ServiceOverrideEntity; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.ServiceDao; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; - -import static org.junit.Assert.assertEquals; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ServiceDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - ServiceDao serviceDao; - - @Test - @Transactional - @Rollback(true) - public void testGetService() { - ServiceEntity s1 = serviceDao.get("default"); - ServiceEntity s2 = serviceDao.get("AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); - assertEquals(s1, s2); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - - serviceDao.insert(s); - assertEquals(s, serviceDao.get("dillweed")); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - - serviceDao.insert(s); - assertEquals(s, serviceDao.get("dillweed")); - - s.name = "smacktest"; - s.minCores = 200; - s.minMemory = CueUtil.GB8; - s.minGpu = CueUtil.GB2; - s.threadable = true; - s.tags = Sets.newLinkedHashSet(); - s.tags.add("linux"); - - serviceDao.update(s); - ServiceEntity s1 = serviceDao.get(s.getId()); - - assertEquals(s.name, s1.name); - assertEquals(s.minCores, s1.minCores); - assertEquals(s.minMemory, s1.minMemory); - assertEquals(s.threadable, s1.threadable); - assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteService() { - ServiceEntity s = new ServiceEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - - serviceDao.insert(s); - assertEquals(s, serviceDao.get("dillweed")); - - serviceDao.delete(s); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM service WHERE pk_service=?", - Integer.class, s.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertServiceOverride() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - s.showId = "00000000-0000-0000-0000-000000000000"; - - serviceDao.insert(s); - assertEquals(s, serviceDao.getOverride("dillweed")); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateServiceOverride() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB2; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - s.showId = "00000000-0000-0000-0000-000000000000"; - - serviceDao.insert(s); - assertEquals(s, serviceDao.getOverride("dillweed")); - assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); - - s.name = "smacktest"; - s.minCores = 200; - s.minMemory = CueUtil.GB8; - s.minGpu = CueUtil.GB4; - s.threadable = true; - s.tags = Sets.newLinkedHashSet(); - s.tags.add("linux"); - - serviceDao.update(s); - ServiceEntity s1 = serviceDao.getOverride(s.getId()); - - assertEquals(s.name, s1.name); - assertEquals(s.minCores, s1.minCores); - assertEquals(s.minMemory, s1.minMemory); - assertEquals(s.minGpu, s1.minGpu); - assertEquals(s.threadable, s1.threadable); - assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteServiceOverride() { - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "dillweed"; - s.minCores = 100; - s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; - s.threadable = false; - s.tags.addAll(Sets.newHashSet(new String[] { "general"})); - s.showId = "00000000-0000-0000-0000-000000000000"; - - serviceDao.insert(s); - assertEquals(s, serviceDao.getOverride("dillweed")); - assertEquals(s, serviceDao.getOverride("dillweed", s.showId)); - serviceDao.delete(s); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(1) FROM show_service WHERE pk_show_service=?", - Integer.class, s.getId())); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ShowDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ShowDaoTests.java deleted file mode 100644 index c05c8baab..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/ShowDaoTests.java +++ /dev/null @@ -1,241 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.dao.EmptyResultDataAccessException; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.ShowEntity; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.service.AdminManager; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class ShowDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - ShowDao showDao; - - @Resource - HostManager hostManager; - - @Resource - AdminManager adminManager; - - private static String SHOW_ID = "00000000-0000-0000-0000-000000000000"; - private static String SHOW_NAME= "pipe"; - - public DispatchHost createHost() { - - RenderHost host = RenderHost.newBuilder() - .setName("test_host") - .setBootTime(1192369572) - .setFreeMcp(76020) - .setFreeMem(53500) - .setFreeSwap(20760) - .setLoad(1) - .setTotalMcp(195430) - .setTotalMem((int) CueUtil.GB16) - .setTotalSwap((int) CueUtil.GB16) - .setNimbyEnabled(false) - .setNumProcs(2) - .setCoresPerProc(100) - .addTags("general") - .setState(HardwareState.UP) - .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) - .build(); - - DispatchHost dh = hostManager.createHost(host); - hostManager.setAllocation(dh, - adminManager.findAllocationDetail("spi", "general")); - - return dh; - } - - @Test - @Transactional - @Rollback(true) - public void testFindShowDetail() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - assertEquals(SHOW_ID, show.id); - assertEquals(SHOW_NAME,show.name); - assertFalse(show.paused); - } - - @Test(expected=EmptyResultDataAccessException.class) - @Transactional - @Rollback(true) - public void testFindShowDetailByHost() { - // TODO: Add code to setup a host and make the sow - // prefer the host, then check result again. - showDao.getShowDetail(createHost()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetShowDetail() { - ShowEntity show = showDao.getShowDetail(SHOW_ID); - assertEquals(SHOW_ID, show.id); - assertEquals(SHOW_NAME,show.name); - assertFalse(show.paused); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertShow() { - ShowEntity show = new ShowEntity(); - show.name = "uber"; - showDao.insertShow(show); - - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT count(*) FROM show where str_name=?", - Integer.class, show.name)); - - ShowEntity newShow = showDao.findShowDetail(show.name); - assertEquals(newShow.id, show.id); - assertEquals(newShow.name,show.name); - assertFalse(show.paused); - } - - @Test - @Transactional - @Rollback(true) - public void testShowExists() { - assertFalse(showDao.showExists("uber")); - assertTrue(showDao.showExists("pipe")); - assertTrue(showDao.showExists("fx")); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShowDefaultMinCores() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateShowDefaultMinCores(show, 100); - assertTrue(jdbcTemplate.queryForObject( - "SELECT int_default_min_cores FROM show WHERE pk_show=?", - Integer.class, show.id) == 100); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShowDefaultMaxCores() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateShowDefaultMaxCores(show, 1000); - assertTrue(jdbcTemplate.queryForObject( - "SELECT int_default_max_cores FROM show WHERE pk_show=?", - Integer.class, show.id) == 1000); - - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateShowCommentEmail() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateShowCommentEmail(show, new String[] {"test@imageworks.com"}); - String email = jdbcTemplate.queryForObject( - "SELECT str_comment_email FROM show WHERE pk_show=?", - String.class, show.id); - assertEquals("test@imageworks.com", email); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateBookingEnabled() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateBookingEnabled(show,false); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_booking_enabled FROM show WHERE pk_show=?", - Integer.class, show.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateActive() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - showDao.updateActive(show, false); - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT b_active FROM show WHERE pk_show=?", - Integer.class, show.id)); - showDao.updateActive(show, true); - assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT b_active FROM show WHERE pk_show=?", - Integer.class, show.id)); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateFrameCounters() { - ShowEntity show = showDao.findShowDetail(SHOW_NAME); - int frameSuccess = jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM show WHERE pk_show=?", - Integer.class, show.id); - showDao.updateFrameCounters(show, 0); - int frameSucces2 = jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM show WHERE pk_show=?", - Integer.class, show.id); - assertEquals(frameSuccess + 1,frameSucces2); - - int frameFail= jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM show WHERE pk_show=?", - Integer.class, show.id); - showDao.updateFrameCounters(show, 1); - int frameFail2 = jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM show WHERE pk_show=?", - Integer.class, show.id); - assertEquals(frameFail+ 1,frameFail2); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/SubscriptionDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/SubscriptionDaoTests.java deleted file mode 100644 index 5f801c475..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/SubscriptionDaoTests.java +++ /dev/null @@ -1,262 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.AllocationEntity; -import com.imageworks.spcue.AllocationInterface; -import com.imageworks.spcue.FacilityInterface; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.SubscriptionEntity; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.AllocationDao; -import com.imageworks.spcue.dao.FacilityDao; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.dao.SubscriptionDao; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class SubscriptionDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - AllocationDao allocDao; - - @Resource - SubscriptionDao subscriptionDao; - - @Resource - AllocationDao allocationDao; - - @Resource - ShowDao showDao; - - @Resource - FacilityDao facilityDao; - - public static final String SUB_NAME = "test.pipe"; - public static final String ALLOC_NAME = "test"; - - private AllocationEntity alloc; - - public ShowInterface getShow() { - return showDao.getShowDetail("00000000-0000-0000-0000-000000000000"); - } - - public SubscriptionEntity buildSubscription(ShowInterface t, AllocationInterface a) { - SubscriptionEntity s = new SubscriptionEntity(); - s.allocationId = a.getId(); - s.showId = t.getId(); - s.burst = 500; - s.size = 100; - return s; - } - - public AllocationEntity buildAllocation() { - AllocationEntity a = new AllocationEntity(); - a.tag = "test"; - a.name = ALLOC_NAME; - a.facilityId = facilityDao.getDefaultFacility().getFacilityId(); - return a; - } - - @Before - public void before() { - alloc = new AllocationEntity(); - alloc.name = ALLOC_NAME; - alloc.tag = "test"; - allocationDao.insertAllocation( - facilityDao.getDefaultFacility(), alloc); - } - - @Test - @Transactional - @Rollback(true) - public void testHasRunningProcs() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - assertFalse(subscriptionDao.hasRunningProcs(s)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsShowOverSize() { - - SubscriptionEntity sub = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(sub); - - assertFalse(this.subscriptionDao.isShowOverSize(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 100, sub.getSubscriptionId()); - - assertFalse(subscriptionDao.isShowOverSize(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 101, sub.getSubscriptionId()); - - assertEquals(true, subscriptionDao.isShowOverSize(getShow(), alloc)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsShowAtOrOverSize() { - - SubscriptionEntity sub = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(sub); - assertFalse(this.subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 100, sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 200, sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverSize(getShow(), alloc)); - } - - @Test - @Transactional - @Rollback(true) - public void testIsShowOverBurst() { - subscriptionDao.insertSubscription(buildSubscription(getShow(), alloc)); - - // Burst is 500 so 600 would be over burst. - assertTrue(subscriptionDao.isShowOverBurst(getShow(), alloc, 600)); - - // Burst is 500 so 300 would be under burst. - assertFalse(subscriptionDao.isShowOverBurst(getShow(), alloc, 300)); - } - - @Test(expected=org.springframework.jdbc.UncategorizedSQLException.class) - @Transactional - @Rollback(true) - public void testIsShowAtOrOverBurst() { - - SubscriptionEntity sub = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(sub); - assertFalse(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 500, sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); - - jdbcTemplate.update( - "UPDATE subscription SET int_cores = ? WHERE pk_subscription = ?", - 501, sub.getSubscriptionId()); - - assertTrue(subscriptionDao.isShowAtOrOverBurst(getShow(), alloc)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptionDetail() { - - FacilityInterface f = facilityDao.getDefaultFacility(); - - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - assertNotNull(s.id); - assertNotNull(s.getId()); - - SubscriptionEntity s1 = subscriptionDao.getSubscriptionDetail( - s.getSubscriptionId()); - - assertEquals(alloc.getName() + ".pipe", s1.name); - assertEquals(s.burst, s1.burst); - assertEquals(s.id, s1.id); - assertEquals(s.size, s1.size); - assertEquals(s.allocationId, s1.allocationId); - } - - @Test - @Transactional - @Rollback(true) - public void testInsertSubscription() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - } - - @Test - @Transactional - @Rollback(true) - public void testDeleteSubscription() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - subscriptionDao.deleteSubscription(s); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSubscriptionSize() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - subscriptionDao.updateSubscriptionSize(s, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_size FROM subscription WHERE pk_subscription=?", - Integer.class, s.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testUpdateSubscriptionBurst() { - SubscriptionEntity s = buildSubscription(getShow(), alloc); - subscriptionDao.insertSubscription(s); - subscriptionDao.updateSubscriptionBurst(s, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_burst FROM subscription WHERE pk_subscription=?", - Integer.class, s.getId())); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/TaskDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/TaskDaoTests.java deleted file mode 100644 index aac333ad7..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/TaskDaoTests.java +++ /dev/null @@ -1,288 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.PointInterface; -import com.imageworks.spcue.TaskEntity; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.DepartmentDao; -import com.imageworks.spcue.dao.PointDao; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.dao.TaskDao; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.test.AssumingOracleEngine; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class TaskDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - ShowDao showDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - TaskDao taskDao; - - @Resource - PointDao pointDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - @Test - @Transactional - @Rollback(true) - public void insertTask() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - - String dept = jdbcTemplate.queryForObject( - "SELECT pk_dept FROM job WHERE pk_job=?", String.class, job.getJobId()); - - // Add in a new task, the job should switch to using this task. - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDepartment(dept)); - - TaskEntity t = new TaskEntity(p, "dev.foo", 100); - taskDao.insertTask(t); - - t = taskDao.getTaskDetail(t.id); - taskDao.deleteTask(t); - } - - @Test - @Transactional - @Rollback(true) - public void deleteTask() { - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - TaskEntity t = new TaskEntity(p, "dev.cue", 100); - taskDao.insertTask(t); - taskDao.deleteTask(t); - } - - @Test - @Transactional - @Rollback(true) - public void deleteTasksByShowAndDepartment() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - int task_count = jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", - Integer.class, p.getPointId()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - taskDao.insertTask(t); - - assertEquals(Integer.valueOf(task_count + 1), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", - Integer.class, p.getPointId())); - - taskDao.deleteTasks(p); - - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", - Integer.class, p.getPointId())); - } - - @Test - @Transactional - @Rollback(true) - public void deleteTasksByDepartmentConfig() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, - "dev.cue"); - t.minCoreUnits = 100; - taskDao.insertTask(t); - - taskDao.deleteTasks(p); - - /** - * This is always - */ - assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task WHERE pk_point=?", - Integer.class, p.getPointId())); - } - - @Test - @Transactional - @Rollback(true) - public void getTaskDetail() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - assertEquals(newTask.id,t.id); - } - - @Test - @Transactional - @Rollback(true) - public void getTaskDetailByDept() { - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(departmentDao.getDefaultDepartment(), "dev.cue"); - assertEquals(newTask.id,t.id); - } - - @Test - @Transactional - @Rollback(true) - public void updateTaskMinProcs() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - t.minCoreUnits = 100; - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - taskDao.updateTaskMinCores(newTask, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - } - - @Test - @Transactional - @Rollback(true) - public void adjustTaskMinProcs() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p,"dev.cue"); - t.minCoreUnits = 10; - taskDao.insertTask(t); - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - taskDao.updateTaskMinCores(newTask, 100); - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - - taskDao.adjustTaskMinCores(t, 105); - - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - assertEquals(Integer.valueOf(5), jdbcTemplate.queryForObject( - "SELECT int_adjust_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - - taskDao.adjustTaskMinCores(t, 50); - - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - assertEquals(Integer.valueOf(-50), jdbcTemplate.queryForObject( - "SELECT int_adjust_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - } - - - @Test - @Transactional - @Rollback(true) - public void mergeTask() { - - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p, "dev.cue"); - taskDao.insertTask(t); - - assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, t.getTaskId())); - - TaskEntity newTask = taskDao.getTaskDetail(t.getTaskId()); - newTask.minCoreUnits = 200; - taskDao.mergeTask(newTask); - - assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( - "SELECT int_min_cores FROM task WHERE pk_task=?", - Integer.class, newTask.getTaskId())); - } - - @Test - @Transactional - @Rollback(true) - public void isJobManaged() { - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - assertFalse(taskDao.isManaged(job)); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/TrackitDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/TrackitDaoTests.java deleted file mode 100644 index 9fcb60e0a..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/TrackitDaoTests.java +++ /dev/null @@ -1,60 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.util.List; -import javax.annotation.Resource; - -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; - -import com.imageworks.spcue.TrackitTaskDetail; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.TrackitDao; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.test.AssumingTrackitEnabled; - -import static org.junit.Assert.assertTrue; - -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class TrackitDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Autowired - @Rule - public AssumingTrackitEnabled assumingTrackitEnabled; - - @Resource - TrackitDao trackitDao; - - @Test - public void testGetTasks() { - List result = trackitDao.getTasks("clo","Lighting"); - assertTrue(result.size() > 0); - } -} - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/WhiteboardDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/WhiteboardDaoTests.java deleted file mode 100644 index 294d22718..000000000 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/oracle/WhiteboardDaoTests.java +++ /dev/null @@ -1,1279 +0,0 @@ - -/* - * Copyright Contributors to the OpenCue Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - - -package com.imageworks.spcue.test.dao.oracle; - -import java.io.File; -import java.util.ArrayList; -import java.util.List; -import javax.annotation.Resource; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.annotation.Rollback; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; -import org.springframework.test.context.support.AnnotationConfigContextLoader; -import org.springframework.transaction.annotation.Transactional; - -import com.imageworks.spcue.ActionEntity; -import com.imageworks.spcue.AllocationEntity; -import com.imageworks.spcue.CommentDetail; -import com.imageworks.spcue.DeedEntity; -import com.imageworks.spcue.DepartmentInterface; -import com.imageworks.spcue.DispatchFrame; -import com.imageworks.spcue.DispatchHost; -import com.imageworks.spcue.FilterEntity; -import com.imageworks.spcue.FrameDetail; -import com.imageworks.spcue.FrameInterface; -import com.imageworks.spcue.HostEntity; -import com.imageworks.spcue.JobDetail; -import com.imageworks.spcue.JobInterface; -import com.imageworks.spcue.LayerInterface; -import com.imageworks.spcue.LightweightDependency; -import com.imageworks.spcue.LocalHostAssignment; -import com.imageworks.spcue.MatcherEntity; -import com.imageworks.spcue.OwnerEntity; -import com.imageworks.spcue.PointInterface; -import com.imageworks.spcue.ServiceOverrideEntity; -import com.imageworks.spcue.ShowEntity; -import com.imageworks.spcue.ShowInterface; -import com.imageworks.spcue.Source; -import com.imageworks.spcue.TaskEntity; -import com.imageworks.spcue.VirtualProc; -import com.imageworks.spcue.config.TestAppConfig; -import com.imageworks.spcue.dao.ActionDao; -import com.imageworks.spcue.dao.AllocationDao; -import com.imageworks.spcue.dao.DepartmentDao; -import com.imageworks.spcue.dao.FilterDao; -import com.imageworks.spcue.dao.FrameDao; -import com.imageworks.spcue.dao.GroupDao; -import com.imageworks.spcue.dao.HostDao; -import com.imageworks.spcue.dao.LayerDao; -import com.imageworks.spcue.dao.LimitDao; -import com.imageworks.spcue.dao.MatcherDao; -import com.imageworks.spcue.dao.PointDao; -import com.imageworks.spcue.dao.ProcDao; -import com.imageworks.spcue.dao.ShowDao; -import com.imageworks.spcue.dao.WhiteboardDao; -import com.imageworks.spcue.dao.criteria.FrameSearchFactory; -import com.imageworks.spcue.dao.criteria.FrameSearchInterface; -import com.imageworks.spcue.dao.criteria.HostSearchFactory; -import com.imageworks.spcue.dao.criteria.HostSearchInterface; -import com.imageworks.spcue.dao.criteria.JobSearchFactory; -import com.imageworks.spcue.dao.criteria.JobSearchInterface; -import com.imageworks.spcue.dao.criteria.ProcSearchFactory; -import com.imageworks.spcue.dao.criteria.ProcSearchInterface; -import com.imageworks.spcue.dispatcher.DispatchSupport; -import com.imageworks.spcue.dispatcher.Dispatcher; -import com.imageworks.spcue.grpc.department.Department; -import com.imageworks.spcue.grpc.filter.ActionType; -import com.imageworks.spcue.grpc.filter.ActionValueType; -import com.imageworks.spcue.grpc.filter.FilterType; -import com.imageworks.spcue.grpc.filter.MatchSubject; -import com.imageworks.spcue.grpc.filter.MatchType; -import com.imageworks.spcue.grpc.host.HardwareState; -import com.imageworks.spcue.grpc.host.Host; -import com.imageworks.spcue.grpc.host.HostSearchCriteria; -import com.imageworks.spcue.grpc.host.LockState; -import com.imageworks.spcue.grpc.host.Owner; -import com.imageworks.spcue.grpc.host.ProcSearchCriteria; -import com.imageworks.spcue.grpc.job.Frame; -import com.imageworks.spcue.grpc.job.FrameSearchCriteria; -import com.imageworks.spcue.grpc.job.FrameState; -import com.imageworks.spcue.grpc.job.Job; -import com.imageworks.spcue.grpc.job.JobSearchCriteria; -import com.imageworks.spcue.grpc.job.Layer; -import com.imageworks.spcue.grpc.limit.Limit; -import com.imageworks.spcue.grpc.report.RenderHost; -import com.imageworks.spcue.service.BookingManager; -import com.imageworks.spcue.service.CommentManager; -import com.imageworks.spcue.service.DepartmentManager; -import com.imageworks.spcue.service.DependManager; -import com.imageworks.spcue.service.HostManager; -import com.imageworks.spcue.service.JobLauncher; -import com.imageworks.spcue.service.JobManager; -import com.imageworks.spcue.service.OwnerManager; -import com.imageworks.spcue.service.ServiceManager; -import com.imageworks.spcue.test.AssumingOracleEngine; -import com.imageworks.spcue.util.CueUtil; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - - -@Transactional -@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) -public class WhiteboardDaoTests extends AbstractTransactionalJUnit4SpringContextTests { - - @Autowired - @Rule - public AssumingOracleEngine assumingOracleEngine; - - @Resource - AllocationDao allocationDao; - - @Resource - HostDao hostDao; - - @Resource - WhiteboardDao whiteboardDao; - - @Resource - ShowDao showDao; - - @Resource - FilterDao filterDao; - - @Resource - ProcDao procDao; - - @Resource - MatcherDao matcherDao; - - @Resource - ActionDao actionDao; - - @Resource - JobManager jobManager; - - @Resource - JobLauncher jobLauncher; - - @Resource - GroupDao groupDao; - - @Resource - LayerDao layerDao; - - @Resource - LimitDao limitDao; - - @Resource - DepartmentDao departmentDao; - - @Resource - DependManager dependManager; - - @Resource - FrameDao frameDao; - - @Resource - PointDao pointDao; - - @Resource - HostManager hostManager; - - @Resource - CommentManager commentManager; - - @Resource - DepartmentManager departmentManager; - - @Resource - Dispatcher dispatcher; - - @Resource - DispatchSupport dispatchSupport; - - @Resource - OwnerManager ownerManager; - - @Resource - BookingManager bookingManager; - - @Resource - ServiceManager serviceManager; - - @Resource - FrameSearchFactory frameSearchFactory; - - @Resource - HostSearchFactory hostSearchFactory; - - @Resource - JobSearchFactory jobSearchFactory; - - @Resource - ProcSearchFactory procSearchFactory; - - private static final String HOST = "testest"; - private static final String SHOW = "pipe"; - - @Before - public void testMode() { - jobLauncher.testMode = true; - } - - public ShowEntity getShow() { - return showDao.findShowDetail(SHOW); - } - - public FilterEntity createFilter() { - FilterEntity filter = new FilterEntity(); - filter.name = "Default"; - filter.showId = getShow().id; - filter.type = FilterType.MATCH_ANY; - filter.enabled = true; - filterDao.insertFilter(filter); - return filter; - } - - public MatcherEntity createMatcher(FilterEntity f) { - MatcherEntity matcher = new MatcherEntity(); - matcher.filterId = f.id; - matcher.name = null; - matcher.showId = getShow().getId(); - matcher.subject = MatchSubject.JOB_NAME; - matcher.type = MatchType.CONTAINS; - matcher.value = "testuser"; - matcherDao.insertMatcher(matcher); - return matcher; - } - - public ActionEntity createAction(FilterEntity f) { - ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.PAUSE_JOB; - a1.filterId = f.getFilterId(); - a1.booleanValue = true; - a1.name = null; - a1.valueType = ActionValueType.BOOLEAN_TYPE; - actionDao.createAction(a1); - return a1; - } - - public RenderHost getRenderHost() { - - RenderHost host = RenderHost.newBuilder() - .setName(HOST) - .setBootTime(1192369572) - .setFreeMcp(7602) - .setFreeMem((int) Dispatcher.MEM_RESERVED_MIN * 4) - .setFreeSwap(2076) - .setLoad(1) - .setTotalMcp(19543) - .setTotalMem((int) Dispatcher.MEM_RESERVED_MIN * 4) - .setTotalSwap(2096) - .setNimbyEnabled(true) - .setNumProcs(2) - .setCoresPerProc(400) - .setState(HardwareState.DOWN) - .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) - .build(); - return host; - } - - public JobDetail launchJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - public JobDetail launchLimitJob() { - jobLauncher.testMode = true; - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_limit.xml")); - return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); - } - - private void createTestLimits() { - limitDao.createLimit("util", 15); - limitDao.createLimit("arnold", 20); - } - - @Test - @Transactional - @Rollback(true) - public void getService() { - whiteboardDao.getService("arnold"); - } - - @Test - @Transactional - @Rollback(true) - public void getServices() { - whiteboardDao.getDefaultServices(); - } - - @Test - @Transactional - @Rollback(true) - public void getServiceOverride() { - - ShowInterface show = getShow(); - ServiceOverrideEntity s = new ServiceOverrideEntity(); - s.name = "test"; - s.minCores = 100; - s.minMemory = 320000; - s.tags.add("general"); - s.threadable = false; - s.showId = show.getId(); - - serviceManager.createService(s); - whiteboardDao.getServiceOverride(getShow(), "test"); - } - - @Test - @Transactional - @Rollback(true) - public void getServiceOverrides() { - whiteboardDao.getServiceOverrides(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepend() { - - List depends = dependManager.getWhatDependsOn(launchJob()); - for (LightweightDependency depend: depends) { - whiteboardDao.getDepend(depend); - } - } - - @Test - @Transactional - @Rollback(true) - public void testGetDependById() { - - List depends = dependManager.getWhatDependsOn(launchJob()); - for (LightweightDependency depend: depends) { - whiteboardDao.getDepend(depend); - whiteboardDao.getDepend(depend.id); - } - } - - - @Test - @Transactional - @Rollback(true) - public void testGetWhatDependsOnThis() { - JobDetail job = launchJob(); - assertEquals(1,whiteboardDao.getWhatDependsOnThis(job).getDependsCount()); - - LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); - assertEquals(0, whiteboardDao.getWhatDependsOnThis(layer1).getDependsCount()); - - LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); - assertEquals(1, whiteboardDao.getWhatDependsOnThis(layer2).getDependsCount()); - - FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); - assertEquals(0, whiteboardDao.getWhatDependsOnThis(frame).getDependsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetWhatThisDependsOn() { - JobDetail job = launchJob(); - assertEquals(0, whiteboardDao.getWhatThisDependsOn(job).getDependsCount()); - - LayerInterface layer1 = layerDao.findLayer(job, "pass_1"); - assertEquals(1, whiteboardDao.getWhatThisDependsOn(layer1).getDependsCount()); - - LayerInterface layer2 = layerDao.findLayer(job, "pass_1_preprocess"); - assertEquals(0, whiteboardDao.getWhatThisDependsOn(layer2).getDependsCount()); - - FrameInterface frame = frameDao.findFrame(job, "0001-pass_1"); - assertEquals(1, whiteboardDao.getWhatThisDependsOn(frame).getDependsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepends() { - JobDetail job = launchJob(); - assertEquals(1,whiteboardDao.getDepends(job).getDependsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCommentsOnJob() { - JobDetail job = launchJob(); - assertEquals(0,whiteboardDao.getComments(job).getCommentsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetCommentsOnHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); - - CommentDetail c = new CommentDetail(); - c.message = "you suck"; - c.subject = "a useful message"; - c.user = "testuser"; - c.timestamp = null; - - commentManager.addComment(hd, c); - assertEquals(1,whiteboardDao.getComments(hd).getCommentsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFilter() { - createFilter(); - whiteboardDao.findFilter(getShow(), "Default"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilter() { - whiteboardDao.getFilter(createFilter()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatchers() { - FilterEntity f = createFilter(); - createMatcher(f); - whiteboardDao.getMatchers(f); - } - - @Test - @Transactional - @Rollback(true) - public void testGetMatcher() { - FilterEntity f = createFilter(); - MatcherEntity m = createMatcher(f); - whiteboardDao.getMatcher(m); - } - - @Test - @Transactional - @Rollback(true) - public void testGetActions() { - FilterEntity f = createFilter(); - createAction(f); - whiteboardDao.getActions(f); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAction() { - FilterEntity f = createFilter(); - whiteboardDao.getAction(createAction(f)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFilters() { - createFilter(); - whiteboardDao.getFilters(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetFramesByFrameSearch() { - JobInterface job = launchJob(); - FrameSearchInterface r = frameSearchFactory.create(job); - FrameSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder() - .setPage(1) - .setLimit(5) - .addLayers("pass_1") - .build()); - assertEquals(5, whiteboardDao.getFrames(r).getFramesCount()); - for (Frame f: whiteboardDao.getFrames(r).getFramesList()) { - assertEquals(f.getLayerName(), "pass_1"); - } - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayers() { - JobDetail job = launchJob(); - whiteboardDao.getLayers(job); - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); - dispatcher.setTestMode(true); - dispatcher.dispatch(dframe, proc); - - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - dframe = frameDao.getDispatchFrame(frame.getId()); - - assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0)); - dispatchSupport.updateUsageCounters(frame, 0); - whiteboardDao.getLayers(job); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimits() { - createTestLimits(); - List limits = whiteboardDao.getLimits(); - assertEquals(limits.size(), 2); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLayerLimits() { - createTestLimits(); - JobDetail job = launchLimitJob(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - List limits = whiteboardDao.getLimits(layer); - assertEquals(limits.size(), 1); - assertEquals(limits.get(0).getName(), "arnold"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetLimit() { - String limitName = "testing"; - int limitMaxValue = 20; - String limitId = limitDao.createLimit(limitName, limitMaxValue); - Limit limit = whiteboardDao.getLimit(limitId); - assertEquals(limit.getName(), limitName); - assertEquals(limit.getMaxValue(), limitMaxValue); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLimit() { - String limitName = "testing"; - int limitMaxValue = 20; - String limitId = limitDao.createLimit(limitName, limitMaxValue); - Limit limit = whiteboardDao.findLimit(limitName); - assertEquals(limit.getName(), limitName); - assertEquals(limit.getMaxValue(), limitMaxValue); - assertEquals(limit.getId(), limitId); - } - - @Test - @Transactional - @Rollback(true) - public void testStopFrameUpdatesLayerMaxRSS() { - long max_rss = 123456L; - - JobDetail job = launchJob(); - whiteboardDao.getLayers(job); - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); - dispatcher.setTestMode(true); - dispatcher.dispatch(dframe, proc); - - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - dframe = frameDao.getDispatchFrame(frame.getId()); - - // Note use of 4-arg stopFrame here to update max rss. - assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); - dispatchSupport.updateUsageCounters(frame, 0); - Layer layer = whiteboardDao.getLayer(frame.layerId); - assertEquals(max_rss, layer.getLayerStats().getMaxRss()); - } - - @Test - @Transactional - @Rollback(true) - public void testStopFrameUpdatesJobMaxRSS() { - long max_rss = 123456L; - - JobDetail job = launchJob(); - whiteboardDao.getLayers(job); - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - DispatchFrame dframe = frameDao.getDispatchFrame(frame.getId()); - dispatcher.setTestMode(true); - dispatcher.dispatch(dframe, proc); - - try { - Thread.sleep(2000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - - dframe = frameDao.getDispatchFrame(frame.getId()); - - // Note use of 4-arg stopFrame here to update max rss. - assertTrue(dispatchSupport.stopFrame(dframe, FrameState.SUCCEEDED, 0, max_rss)); - dispatchSupport.updateUsageCounters(frame, 0); - Job grpc_job = whiteboardDao.getJob(job.id); - assertEquals(max_rss, grpc_job.getJobStats().getMaxRss()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobs() { - launchJob(); - JobSearchCriteria r = JobSearchInterface.criteriaFactory(); - r = r.toBuilder().addShows("pipe").build(); - whiteboardDao.getJobs(jobSearchFactory.create(r)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJobNames() { - launchJob(); - JobSearchCriteria r = JobSearchInterface.criteriaFactory(); - r = r.toBuilder().addShows("pipe").build(); - whiteboardDao.getJobNames(jobSearchFactory.create(r)); - } - - @Test - @Transactional - @Rollback(true) - public void testGetUpdatedFrames() { - final JobDetail job = launchJob(); - List jobs = new ArrayList(); - - jobs.add(new JobInterface() { - public String getJobId() { return job.getId(); } - public String getShowId() { return null; } - public String getId() { return job.getId(); } - public String getName() { return null; } - public String getFacilityId() { throw new RuntimeException("not implemented"); } - }); - - whiteboardDao.getUpdatedFrames(job, new ArrayList(), - (int) (System.currentTimeMillis() / 1000)); - - } - - @Test(expected=IllegalArgumentException.class) - @Transactional - @Rollback(true) - public void testGetUpdatedFramesFailure() { - final JobDetail job = launchJob(); - List jobs = new ArrayList(); - - jobs.add(new JobInterface() { - public String getJobId() { return job.getId(); } - public String getShowId() { return null; } - public String getId() { return job.getId(); } - public String getName() { return null; } - public String getFacilityId() { throw new RuntimeException("not implemented"); } - }); - - // this one should fail - whiteboardDao.getUpdatedFrames(job, new ArrayList(), - (int) (System.currentTimeMillis() / 1000 - 1000000)); - } - - @Test - @Transactional - @Rollback(true) - public void testFindJob() { - JobDetail job = launchJob(); - whiteboardDao.findJob(job.name); - } - - @Test - @Transactional - @Rollback(true) - public void testGetJob() { - JobDetail job = launchJob(); - whiteboardDao.getJob(job.id); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptionByID() { - whiteboardDao.getSubscription("00000000-0000-0000-0000-000000000001"); - } - - @Test - @Transactional - @Rollback(true) - public void findFindSubscription() { - whiteboardDao.findSubscription("pipe", "spi.general"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptions() { - whiteboardDao.getSubscriptions(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetSubscriptionsByAllocation() { - whiteboardDao.getSubscriptions( - allocationDao.findAllocationEntity("spi", "general")); - } - - @Test - @Transactional - @Rollback(true) - public void testGetShow() { - whiteboardDao.getShow(getShow().id); - } - - @Test - @Transactional - @Rollback(true) - public void testFindShow() { - whiteboardDao.findShow(getShow().name); - } - - @Test - @Transactional - @Rollback(true) - public void testGetShows() { - whiteboardDao.getShows(); - } - - @Test - @Transactional - @Rollback(true) - public void testGetActiveShows() { - whiteboardDao.getActiveShows(); - } - - @Test - @Transactional - @Rollback(true) - public void testFindHost() { - - try { - HostEntity h = hostManager.findHostDetail(HOST); - hostManager.deleteHost(h); - } catch (Exception e) { } - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - hostDao.updateHostLock(hd, LockState.LOCKED, new Source("TEST")); - Host h = whiteboardDao.findHost(host.getName()); - assertEquals(host.getName(), h.getName()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetHosts() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - HostSearchCriteria h = HostSearchInterface.criteriaFactory(); - h = h.toBuilder().addHosts(HOST).build(); - assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetHostsByAllocation() { - RenderHost host = getRenderHost(); - AllocationEntity alloc = allocationDao.getAllocationEntity("00000000-0000-0000-0000-000000000006"); - DispatchHost hd = hostManager.createHost(host, alloc); - - HostSearchCriteria h = HostSearchInterface.criteriaFactory(); - h = h.toBuilder().addAllocs(alloc.getName()).build(); - assertEquals(1, whiteboardDao.getHosts(hostSearchFactory.create(h)).getHostsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAllocation() { - whiteboardDao.getAllocation("00000000-0000-0000-0000-000000000000"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindAllocation() { - whiteboardDao.findAllocation("spi.general"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetAllocations() { - whiteboardDao.getAllocations(); - } - - @Test - @Transactional - @Rollback(true) - public void testGetRootGroup() { - whiteboardDao.getRootGroup(getShow()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroup() { - whiteboardDao.getGroup("A0000000-0000-0000-0000-000000000000"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetGroups() { - whiteboardDao.getGroups(getShow()); - whiteboardDao.getGroup(groupDao.getRootGroupId(getShow())); - whiteboardDao.getGroups(groupDao.getRootGroupDetail(getShow())); - } - - @Test - @Transactional - @Rollback(true) - public void testFindGroup() { - whiteboardDao.findGroup("pipe", "pipe"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFrame() { - JobDetail job = launchJob(); - whiteboardDao.findFrame(job.name, "pass_1", 1); - } - - @Test - @Transactional - @Rollback(true) - public void testFindFilterByName() { - createFilter(); - whiteboardDao.findFilter("pipe", "Default"); - } - - @Test - @Transactional - @Rollback(true) - public void testFindLayer() { - JobDetail job = launchJob(); - whiteboardDao.findLayer(job.name, "pass_1"); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepartment() { - ShowInterface show = showDao.findShowDetail("pipe"); - DepartmentInterface dept = departmentDao.getDefaultDepartment(); - - Department d = whiteboardDao.getDepartment(show, dept.getName()); - - assertEquals("pipe.Unknown", d.getName()); - assertEquals("Unknown", d.getDept()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetDepartments() { - ShowInterface show = showDao.findShowDetail("pipe"); - whiteboardDao.getDepartments(show); - } - - - @Test - @Transactional - @Rollback(true) - public void testGetDepartmentNames() { - assertTrue(whiteboardDao.getDepartmentNames().size() > 0); - } - - @Test - @Transactional - @Rollback(true) - public void testGetTasks() { - whiteboardDao.getTasks(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - } - - @Test - @Transactional - @Rollback(true) - public void testGetTask() { - PointInterface p = pointDao.getPointConfigDetail( - showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment()); - - TaskEntity t = new TaskEntity(p,"dev.cue"); - departmentManager.createTask(t); - - whiteboardDao.getTask(showDao.findShowDetail("pipe"), - departmentDao.getDefaultDepartment(), "dev.cue"); - } - - @Test - @Transactional - @Rollback(true) - public void getFrame() { - JobDetail job = launchJob(); - FrameInterface frame = frameDao.findFrame(job, "0001-pass_1_preprocess"); - assertEquals(1, whiteboardDao.getFrame(frame.getFrameId()).getNumber()); - } - - @Test - @Transactional - @Rollback(true) - public void getLayer() { - JobDetail job = launchJob(); - LayerInterface layer = layerDao.findLayer(job, "pass_1"); - assertEquals(layer.getName(),whiteboardDao.getLayer(layer.getLayerId()).getName()); - } - - @Test - @Transactional - @Rollback(true) - public void getHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - assertEquals(hd.getName(), whiteboardDao.getHost(proc.getHostId()).getName()); - } - - @Test - @Transactional - @Rollback(true) - public void getProcs() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - JobDetail job = launchJob(); - FrameDetail frame = frameDao.findFrameDetail(job, "0001-pass_1_preprocess"); - - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = frame.id; - proc.layerId = frame.layerId; - proc.showId = frame.showId; - - procDao.insertVirtualProc(proc); - assertEquals(1,whiteboardDao.getProcs(proc).getProcsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getProcsBySearch() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - /* - * Book 5 procs. - */ - for (int i=1; i<6; i++) { - FrameDetail f = frameDao.findFrameDetail(job, String.format("%04d-pass_1",i)); - VirtualProc proc = new VirtualProc(); - proc.allocationId = null; - proc.coresReserved = 100; - proc.hostId = hd.id; - proc.hostName = host.getName(); - proc.jobId = job.id; - proc.frameId = f.id; - proc.layerId = f.layerId; - proc.showId = f.showId; - procDao.insertVirtualProc(proc); - } - - ProcSearchInterface r; - - /* - * Search for all 5 running procs - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteria = r.getCriteria(); - r.setCriteria(criteria.toBuilder().addShows("pipe").build()); - assertEquals(5, whiteboardDao.getProcs(r).getProcsCount()); - - /* - * Limit the result to 1 result. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaA = r.getCriteria(); - r.setCriteria(criteriaA.toBuilder() - .addShows("pipe") - .addMaxResults(1) - .build()); - assertEquals(1, whiteboardDao.getProcs(r).getProcsCount()); - - /* - * Change the first result to 1, which should limit - * the result to 4. - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaB = r.getCriteria(); - r.setCriteria(criteriaB.toBuilder() - .addShows("pipe") - .setFirstResult(2) - .build()); - assertEquals(4, whiteboardDao.getProcs(r).getProcsCount()); - - /* - * Now try to do the equivalent of a limit/offset - */ - r = procSearchFactory.create(); - ProcSearchCriteria criteriaC = r.getCriteria(); - r.setCriteria(criteriaC.toBuilder() - .addShows("pipe") - .setFirstResult(3) - .addMaxResults(2) - .build()); - assertEquals(2, whiteboardDao.getProcs(r).getProcsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getOwner() { - ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - whiteboardDao.getOwner("spongebob"); - } - - @Test - @Transactional - @Rollback(true) - public void getOwnersByShow() { - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - - ownerManager.takeOwnership(owner, hd); - - assertTrue(whiteboardDao.getOwners( - showDao.findShowDetail("pipe")).size() != 0); - } - - @Test - @Transactional - @Rollback(true) - public void getDeedsByShow() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - - ownerManager.takeOwnership(owner, hd); - assertTrue(whiteboardDao.getDeeds( - showDao.findShowDetail("pipe")).getDeedsCount() != 0); - - assertTrue(whiteboardDao.getDeeds( - showDao.findShowDetail("pipe")).getDeedsCount() != 0); - } - - @Test - @Transactional - @Rollback(true) - public void getDeedsByOwner() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - - ownerManager.takeOwnership(owner, hd); - assertTrue(whiteboardDao.getDeeds( - owner).getDeedsCount() != 0); - } - - @Test - @Transactional - @Rollback(true) - public void getHostsByOwner() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - ownerManager.takeOwnership(owner, hd); - - assertEquals(1, whiteboardDao.getHosts(owner).getHostsCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getOwnerFromDeed() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - DeedEntity deed = ownerManager.takeOwnership(owner, hd); - - Owner o2 = whiteboardDao.getOwner(deed); - - assertEquals(owner.getName(), o2.getName()); - assertEquals(1, o2.getHostCount()); - } - - @Test - @Transactional - @Rollback(true) - public void getOwnerFromHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - OwnerEntity owner = ownerManager.createOwner("spongebob", - showDao.findShowDetail("pipe")); - ownerManager.takeOwnership(owner, hd); - - Owner o2 = whiteboardDao.getOwner(hd); - - assertEquals(owner.getName(), o2.getName()); - assertEquals(1, o2.getHostCount()); - } - - - @Test - @Transactional - @Rollback(true) - public void getRenderPartition() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1); - bookingManager.createLocalHostAssignment(hd, job, lba); - - whiteboardDao.getRenderPartition(lba); - } - - @Test - @Transactional - @Rollback(true) - public void getRenderPartitionsByHost() { - - RenderHost host = getRenderHost(); - DispatchHost hd = hostManager.createHost(host); - - jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); - JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1); - bookingManager.createLocalHostAssignment(hd, job, lba); - - assertEquals(1, whiteboardDao.getRenderPartitions(hd).getRenderPartitionsCount()); - - } - - @Test - @Transactional - @Rollback(true) - public void getFacility() { - whiteboardDao.getFacilities(); - whiteboardDao.getFacility("spi"); - } -} - - diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java index 669ecb981..bbc6914a4 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/AllocationDaoTests.java @@ -182,6 +182,26 @@ public void testUpdateAllocationBillable() { "SELECT b_billable FROM alloc WHERE pk_alloc=?", Boolean.class, alloc.getId())); } + + @Test + @Transactional + @Rollback(true) + public void testSetDefaultAllocation() { + AllocationEntity newAlloc = new AllocationEntity(); + newAlloc.name = "spi.new_alloc"; + newAlloc.tag = "new_alloc"; + allocDao.insertAllocation( + facilityDao.getFacility("spi"), newAlloc); + + allocDao.setDefaultAllocation(newAlloc); + AllocationEntity defaultAlloc = allocDao.getDefaultAllocationEntity(); + assertEquals(newAlloc.getAllocationId(), defaultAlloc.getAllocationId()); + assertEquals(newAlloc.name, defaultAlloc.name); + assertEquals(newAlloc.tag, defaultAlloc.tag); + assertEquals( + facilityDao.getFacility("spi").getFacilityId(), + defaultAlloc.getFacilityId()); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java index 8c09dfde4..577b53eac 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/BookingDaoTests.java @@ -96,11 +96,12 @@ public DispatchHost createHost() { RenderHost host = RenderHost.newBuilder() .setName("test_host") .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB16) .setTotalSwap((int) CueUtil.GB16) .setNimbyEnabled(false) @@ -109,8 +110,8 @@ public DispatchHost createHost() { .setState(HardwareState.UP) .setFacility("spi") .addTags("general") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); DispatchHost dh = hostManager.createHost(host); hostManager.setAllocation(dh, @@ -138,7 +139,7 @@ public void insertLocalJobAssignment() { LocalHostAssignment lja = new LocalHostAssignment(); lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); lja.setThreads(2); bookingDao.insertLocalHostAssignment(h, j, lja); @@ -149,7 +150,7 @@ public void insertLocalJobAssignment() { Integer.class, j.getJobId())); assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_max FROM host_local WHERE pk_job=?", + "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( @@ -161,7 +162,7 @@ public void insertLocalJobAssignment() { Long.class, j.getJobId())); assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_max FROM host_local WHERE pk_job=?", + "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( @@ -185,7 +186,7 @@ public void insertLocalLayerAssignment() { LocalHostAssignment lja = new LocalHostAssignment(); lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); lja.setThreads(2); bookingDao.insertLocalHostAssignment(h, layer, lja); @@ -212,7 +213,7 @@ public void insertLocalLayerAssignment() { Long.class, j.getJobId())); assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_max FROM host_local WHERE pk_job=?", + "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( @@ -237,7 +238,7 @@ public void insertLocalFrameAssignment() { LocalHostAssignment lja = new LocalHostAssignment(); lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); lja.setThreads(2); bookingDao.insertLocalHostAssignment(h, frame, lja); @@ -264,7 +265,7 @@ public void insertLocalFrameAssignment() { Long.class, j.getJobId())); assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( - "SELECT int_gpu_max FROM host_local WHERE pk_job=?", + "SELECT int_gpu_mem_max FROM host_local WHERE pk_job=?", Integer.class, j.getJobId())); assertEquals(Integer.valueOf(200), jdbcTemplate.queryForObject( @@ -288,7 +289,7 @@ public void testGetLocalJobAssignment() { lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); lja.setThreads(2); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); bookingDao.insertLocalHostAssignment(h, j, lja); @@ -297,7 +298,7 @@ public void testGetLocalJobAssignment() { assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); - assertEquals(lja.getMaxGpu(), lja2.getMaxGpu()); + assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); assertEquals(lja.getThreads(), lja2.getThreads()); } @@ -314,7 +315,7 @@ public void testGetRenderPartition() { lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); lja.setThreads(2); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); bookingDao.insertLocalHostAssignment(h, j, lja); @@ -324,7 +325,7 @@ public void testGetRenderPartition() { assertEquals(lja.getMaxCoreUnits(), lja2.getMaxCoreUnits()); assertEquals(lja.getMaxMemory(), lja2.getMaxMemory()); assertEquals(lja.getThreads(), lja2.getThreads()); - assertEquals(lja.getMaxGpu(), lja2.getMaxGpu()); + assertEquals(lja.getMaxGpuMemory(), lja2.getMaxGpuMemory()); RenderPartition rp = whiteboard.getRenderPartition(lja2); @@ -332,9 +333,9 @@ public void testGetRenderPartition() { assertEquals(lja2.getMaxMemory(), rp.getMaxMemory()); assertEquals(lja2.getThreads(), rp.getThreads()); logger.info("--------------------"); - logger.info(lja2.getMaxGpu()); - logger.info(rp.getMaxGpu()); - assertEquals(lja2.getMaxGpu(), rp.getMaxGpu()); + logger.info(lja2.getMaxGpuMemory()); + logger.info(rp.getMaxGpuMemory()); + assertEquals(lja2.getMaxGpuMemory(), rp.getMaxGpuMemory()); assertEquals(h.getName(), rp.getHost()); assertEquals(j.getName(), rp.getJob()); } @@ -351,7 +352,7 @@ public void testGetProcs() { lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); lja.setThreads(2); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); bookingDao.insertLocalHostAssignment(h, j, lja); @@ -370,7 +371,7 @@ public void updateMaxCores() { lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); lja.setThreads(2); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); bookingDao.insertLocalHostAssignment(h, j, lja); assertTrue(bookingDao.updateMaxCores(lja, 100)); @@ -403,7 +404,7 @@ public void updateMaxMemory() { lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); lja.setThreads(2); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); bookingDao.insertLocalHostAssignment(h, j, lja); bookingDao.updateMaxMemory(lja, CueUtil.GB2); @@ -424,7 +425,7 @@ public void updateMaxMemory() { @Test @Transactional @Rollback(true) - public void updateMaxGpu() { + public void updateMaxGpuMemory() { DispatchHost h = createHost(); JobDetail j = launchJob(); @@ -433,7 +434,7 @@ public void updateMaxGpu() { lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); lja.setThreads(2); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); bookingDao.insertLocalHostAssignment(h, j, lja); bookingDao.updateMaxMemory(lja, CueUtil.GB2); @@ -442,15 +443,15 @@ public void updateMaxGpu() { assertEquals(CueUtil.GB2, lj2.getIdleMemory()); assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - assertEquals(1, lj2.getMaxGpu()); + assertEquals(1, lj2.getMaxGpuMemory()); - bookingDao.updateMaxGpu(lja, 2); + bookingDao.updateMaxGpuMemory(lja, 2); lj2 = bookingDao.getLocalJobAssignment(lja.id); assertEquals(CueUtil.GB2, lj2.getIdleMemory()); assertEquals(CueUtil.GB2, lj2.getMaxMemory()); - assertEquals(2, lj2.getMaxGpu()); + assertEquals(2, lj2.getMaxGpuMemory()); } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java index 886400823..9282d7b79 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/CommentDaoTests.java @@ -140,11 +140,12 @@ public void testInsertCommentOnHost() { RenderHost host = RenderHost.newBuilder() .setName("boo") .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(15290520) .setFreeSwap(2076) .setLoad(1) - .setTotalMcp(19543) + .setTotalMcp(CueUtil.GB4) .setTotalMem(15290520) .setTotalSwap(2096) .setNimbyEnabled(false) @@ -153,8 +154,8 @@ public void testInsertCommentOnHost() { .addTags("linux") .setState(HardwareState.UP) .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); CommentDetail d = new CommentDetail(); diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java index 3b06eb59c..962b669bb 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DeedDaoTests.java @@ -73,11 +73,12 @@ public DispatchHost createHost() { RenderHost host = RenderHost.newBuilder() .setName("test_host") .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(15290520) .setFreeSwap(2076) .setLoad(1) - .setTotalMcp(19543) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB16) .setTotalSwap((int) CueUtil.GB16) .setNimbyEnabled(false) @@ -86,8 +87,8 @@ public DispatchHost createHost() { .addTags("general") .setState(HardwareState.UP) .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); DispatchHost dh = hostManager.createHost(host); @@ -165,48 +166,5 @@ public void tesGetDeeds() { assertEquals(1, deedDao.getDeeds(o).size()); assertEquals(d, deedDao.getDeeds(o).get(0)); } - - - @Test - @Transactional - @Rollback(true) - public void testEnableDisableBlackoutTime() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - deedDao.updateBlackoutTimeEnabled(d, true); - assertTrue(jdbcTemplate.queryForObject( - "SELECT b_blackout FROM deed WHERE pk_deed=?", - Boolean.class, d.getId())); - - deedDao.updateBlackoutTimeEnabled(d, false); - assertFalse(jdbcTemplate.queryForObject( - "SELECT b_blackout FROM deed WHERE pk_deed=?", - Boolean.class, d.getId())); - } - - @Test - @Transactional - @Rollback(true) - public void testSetBlackOutTimes() { - - DispatchHost host = createHost(); - ShowInterface s = adminManager.findShowEntity("pipe"); - OwnerEntity o = ownerManager.createOwner("squarepants", s); - DeedEntity d = deedDao.insertDeed(o, host); - - deedDao.setBlackoutTime(d, 3600, 7200); - - assertEquals(Integer.valueOf(3600), jdbcTemplate.queryForObject( - "SELECT int_blackout_start FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - - assertEquals(Integer.valueOf(7200), jdbcTemplate.queryForObject( - "SELECT int_blackout_stop FROM deed WHERE pk_deed=?", - Integer.class, d.getId())); - } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java new file mode 100644 index 000000000..4f6db1072 --- /dev/null +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoFifoTests.java @@ -0,0 +1,254 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.imageworks.spcue.test.dao.postgres; + +import java.io.File; +import java.sql.Timestamp; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import javax.annotation.Resource; + +import org.jdom.Document; +import org.jdom.Element; +import org.jdom.input.SAXBuilder; +import org.jdom.output.XMLOutputter; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.test.annotation.Rollback; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; +import org.springframework.test.context.support.AnnotationConfigContextLoader; +import org.springframework.transaction.annotation.Transactional; + +import com.imageworks.spcue.DispatchHost; +import com.imageworks.spcue.JobDetail; +import com.imageworks.spcue.config.TestAppConfig; +import com.imageworks.spcue.dao.DispatcherDao; +import com.imageworks.spcue.dao.HostDao; +import com.imageworks.spcue.dispatcher.Dispatcher; +import com.imageworks.spcue.grpc.host.HardwareState; +import com.imageworks.spcue.grpc.report.RenderHost; +import com.imageworks.spcue.service.AdminManager; +import com.imageworks.spcue.service.GroupManager; +import com.imageworks.spcue.service.HostManager; +import com.imageworks.spcue.service.JobLauncher; +import com.imageworks.spcue.service.JobManager; +import com.imageworks.spcue.test.AssumingPostgresEngine; +import com.imageworks.spcue.util.CueUtil; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +@Transactional +@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +public class DispatcherDaoFifoTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Autowired + @Rule + public AssumingPostgresEngine assumingPostgresEngine; + + @Resource + DispatcherDao dispatcherDao; + + @Resource + HostDao hostDao; + + @Resource + JobManager jobManager; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + GroupManager groupManager; + + @Resource + Dispatcher dispatcher; + + @Resource + JobLauncher jobLauncher; + + private static final String HOSTNAME="beta"; + + public DispatchHost getHost() { + return hostDao.findDispatchHost(HOSTNAME); + } + + private void launchJobs(int count) throws Exception { + Document docTemplate = new SAXBuilder(true).build( + new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + docTemplate.getDocType().setSystemID("http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); + Element root = docTemplate.getRootElement(); + Element jobTemplate = root.getChild("job"); + Element depends = root.getChild("depends"); + assertEquals(jobTemplate.getAttributeValue("name"), "test"); + root.removeContent(jobTemplate); + root.removeContent(depends); + + long t = System.currentTimeMillis(); + for (int i = 0; i < count; i++) { + Document doc = (Document) docTemplate.clone(); + root = doc.getRootElement(); + Element job = (Element) jobTemplate.clone(); + job.setAttribute("name", "job" + i); + root.addContent(job); + root.addContent((Element) depends.clone()); + jobLauncher.launch(new XMLOutputter().outputString(doc)); + + // Force to set incremental ts_started to the jobs + // because current_timestamp is not updated during test. + jdbcTemplate.update("UPDATE job SET ts_started = ? WHERE str_name = ?", + new Timestamp(t + i), "pipe-default-testuser_job" + i); + } + } + + @Before + public void launchJob() { + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); + + dispatcher.setTestMode(true); + jobLauncher.testMode = true; + } + + @After + public void resetFifoScheduling() { + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder() + .setName(HOSTNAME) + .setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) + .setFreeMem(53500) + .setFreeSwap(20760) + .setLoad(1) + .setTotalMcp(CueUtil.GB4) + .setTotalMem(8173264) + .setTotalSwap(20960) + .setNimbyEnabled(false) + .setNumProcs(2) + .setCoresPerProc(100) + .addTags("test") + .setState(HardwareState.UP) + .setFacility("spi") + .putAttributes("SP_OS", "Linux") + .build(); + + hostManager.createHost(host, + adminManager.findAllocationDetail("spi", "general")); + } + + @Test + @Transactional + @Rollback(true) + public void testFifoSchedulingEnabled() { + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.FIFO); + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.FIFO); + } + + @Test + @Transactional + @Rollback(true) + public void testAllSorted() throws Exception { + int count = 10; + launchJobs(count); + + List jobs = dispatcherDao.findDispatchJobs(getHost(), count); + assertEquals(count, jobs.size()); + for (int i = 0; i < count; i++) { + assertEquals("pipe-default-testuser_job" + i, + jobManager.getJob(jobs.get(i)).getName()); + } + } + + @Test + @Transactional + @Rollback(true) + public void testPortionSorted() throws Exception { + int count = 100; + launchJobs(count); + + int portion = 19; + List jobs = dispatcherDao.findDispatchJobs(getHost(), (portion + 1) / 10); + assertEquals(portion, jobs.size()); + for (int i = 0; i < portion; i++) { + assertEquals("pipe-default-testuser_job" + i, + jobManager.getJob(jobs.get(i)).getName()); + } + } + + @Test + @Transactional + @Rollback(true) + public void testFifoSchedulingDisabled() throws Exception { + dispatcherDao.setSchedulingMode(DispatcherDao.SchedulingMode.PRIORITY_ONLY); + + int count = 10; + launchJobs(count); + + List jobs = dispatcherDao.findDispatchJobs(getHost(), count); + assertEquals(count, jobs.size()); + + List sortedJobs = new ArrayList(jobs); + Collections.sort(sortedJobs, + Comparator.comparing(jobId -> jobManager.getJob(jobId).getName())); + + for (int i = 0; i < count; i++) { + assertEquals("pipe-default-testuser_job" + i, + jobManager.getJob(sortedJobs.get(i)).getName()); + } + } + + @Test + @Transactional + @Rollback(true) + public void testGroup() throws Exception { + int count = 10; + launchJobs(count); + + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_job0"); + assertNotNull(job); + + List jobs = dispatcherDao.findDispatchJobs(getHost(), + groupManager.getGroupDetail(job)); + assertEquals(count, jobs.size()); + for (int i = 0; i < count; i++) { + assertEquals("pipe-default-testuser_job" + i, + jobManager.getJob(jobs.get(i)).getName()); + } + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java index abf9e34a7..99fe2543a 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/DispatcherDaoTests.java @@ -146,11 +146,12 @@ public void createHost() { RenderHost host = RenderHost.newBuilder() .setName(HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem(8173264) .setTotalSwap(20960) .setNimbyEnabled(false) @@ -327,7 +328,7 @@ public void testFindDispatchJobs() { assertTrue(jdbcTemplate.queryForObject( "SELECT COUNT(*) FROM job WHERE str_state='PENDING'", Integer.class) > 0); - Set jobs = dispatcherDao.findDispatchJobs(host, 10); + List jobs = dispatcherDao.findDispatchJobs(host, 10); assertTrue(jobs.size() > 0); } @@ -341,7 +342,7 @@ public void testFindDispatchJobsByGroup() { assertNotNull(job); assertNotNull(job.groupId); - Set jobs = dispatcherDao.findDispatchJobs(host, + List jobs = dispatcherDao.findDispatchJobs(host, groupManager.getGroupDetail(job)); assertTrue(jobs.size() > 0); } @@ -354,7 +355,7 @@ public void testFindDispatchJobsByShow() { final JobDetail job = getJob1(); assertNotNull(job); - Set jobs = dispatcherDao.findDispatchJobs(host, + List jobs = dispatcherDao.findDispatchJobs(host, adminManager.findShowEntity("pipe"), 5); assertTrue(jobs.size() > 0); } @@ -374,7 +375,7 @@ public void testFindDispatchJobsByLocal() { lja.setThreads(1); lja.setMaxMemory(CueUtil.GB16); lja.setMaxCoreUnits(200); - lja.setMaxGpu(1); + lja.setMaxGpuMemory(1); bookingDao.insertLocalHostAssignment(host, job, lja); jobs = dispatcherDao.findLocalDispatchJobs(host); @@ -517,4 +518,11 @@ public void testHigherPriorityJobExistsMaxProcBound() { boolean isHigher = dispatcherDao.higherPriorityJobExists(job1, proc); assertFalse(isHigher); } + + @Test + @Transactional + @Rollback(true) + public void testFifoSchedulingEnabled() { + assertEquals(dispatcherDao.getSchedulingMode(), DispatcherDao.SchedulingMode.PRIORITY_ONLY); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java index 602b68b96..6312e6502 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/FrameDaoTests.java @@ -60,6 +60,7 @@ import com.imageworks.spcue.service.JobLauncher; import com.imageworks.spcue.service.JobManager; import com.imageworks.spcue.test.AssumingPostgresEngine; +import com.imageworks.spcue.util.CueUtil; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -113,11 +114,12 @@ public void create() { RenderHost host = RenderHost.newBuilder() .setName(HOST) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem(8173264) .setTotalSwap(20960) .setNimbyEnabled(false) @@ -126,8 +128,8 @@ public void create() { .addAllTags(ImmutableList.of("mcore", "4core", "8g")) .setState(HardwareState.UP) .setFacility("spi") - .putAttributes("freeGpu", "512") - .putAttributes("totalGpu", "512") + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); hostManager.createHost(host); @@ -145,15 +147,6 @@ public JobDetail launchJob() { return jobManager.findJobDetail("pipe-dev.cue-testuser_shell_v1"); } - @Test - @Transactional - @Rollback(true) - public void testCheckRetries() { - JobDetail job = launchJob(); - frameDao.checkRetries(frameDao.findFrame(job,"0001-pass_1")); - // TODO: check to see if it actually works - } - @Test @Transactional @Rollback(true) diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java index aa528be95..8700204d9 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/GroupDaoTests.java @@ -230,6 +230,39 @@ public void testUpdateDefaultJobMinCores() { Integer.class, group.getGroupId())); } + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMaxGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( + "SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMaxGpus(group, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMaxGpus(group, -1); + assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( + "SELECT int_job_max_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateDefaultJobMinGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( + "SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateDefaultJobMinGpus(group, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_job_min_gpus FROM folder WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + @Test @Transactional @Rollback(true) @@ -276,6 +309,38 @@ public void testUpdateMaxCores() { Integer.class, group.getGroupId())); } + @Test + @Transactional + @Rollback(true) + public void testUpdateMinGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMinGpus(group, 10); + assertEquals(Integer.valueOf(10), jdbcTemplate.queryForObject( + "SELECT int_min_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + + @Test + @Transactional + @Rollback(true) + public void testUpdateMaxGpus() { + GroupDetail group = createGroup(); + assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( + "SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMaxGpus(group, 100); + assertEquals(Integer.valueOf(100), jdbcTemplate.queryForObject( + "SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + groupDao.updateMaxGpus(group, -5); + assertEquals(Integer.valueOf(-1), jdbcTemplate.queryForObject( + "SELECT int_max_gpus FROM folder_resource WHERE pk_folder=?", + Integer.class, group.getGroupId())); + } + @Test @Transactional @Rollback(true) diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java index 9327fc8dd..918b43679 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/HostDaoTests.java @@ -86,12 +86,13 @@ public static RenderHost buildRenderHost(String name) { RenderHost host = RenderHost.newBuilder() .setName(name) .setBootTime(1192369572) - .setFreeMcp(7602) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(15290520) .setFreeSwap((int) CueUtil.MB512) .setLoad(1) .setNimbyEnabled(false) - .setTotalMcp(19543) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB16) .setTotalSwap((int) CueUtil.GB2) .setNimbyEnabled(false) @@ -100,8 +101,8 @@ public static RenderHost buildRenderHost(String name) { .addAllTags(ImmutableList.of("linux", "64bit")) .setState(HardwareState.UP) .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); return host; @@ -330,24 +331,6 @@ public void testIsHostLocked() { assertEquals(hostDao.isHostLocked(host),true); } - @Test - @Transactional - @Rollback(true) - public void testIsKillMode() { - hostDao.insertRenderHost(buildRenderHost(TEST_HOST), - hostManager.getDefaultAllocationDetail(), - false); - - HostEntity host = hostDao.findHostDetail(TEST_HOST); - assertFalse(hostDao.isKillMode(host)); - - jdbcTemplate.update( - "UPDATE host_stat SET int_swap_free = ?, int_mem_free = ? WHERE pk_host = ?", - CueUtil.MB256, CueUtil.MB256, host.getHostId()); - - assertTrue(hostDao.isKillMode(host)); - } - @Test @Transactional @Rollback(true) @@ -389,6 +372,30 @@ public void testDeleteHost() { assertEquals(hostDao.hostExists(TEST_HOST),false); } + @Test + @Transactional + @Rollback(true) + public void testDeleteDownHosts() { + for (int i = 0; i < 3; i++) { + String name = TEST_HOST + i; + hostDao.insertRenderHost(buildRenderHost(name), + hostManager.getDefaultAllocationDetail(), + false); + if (i != 1) { + HostEntity host = hostDao.findHostDetail(name); + assertEquals(name, host.name); + hostDao.updateHostState(host, HardwareState.DOWN); + } + } + + hostDao.deleteDownHosts(); + + for (int i = 0; i < 3; i++) { + String name = TEST_HOST + i; + assertEquals(hostDao.hostExists(name), i == 1); + } + } + @Test @Transactional @Rollback(true) diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java index 3c51f34c1..3bfff75d6 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/JobDaoTests.java @@ -120,10 +120,14 @@ public JobDetail insertJob() { JobDetail job = this.buildJobDetail(); job.groupId = ROOT_FOLDER; job.showId = ROOT_SHOW; + job.showName = "pipe"; job.logDir = jobLogUtil.getJobLogPath(job); job.deptId = departmentDao.getDefaultDepartment().getId(); + job.deptName = departmentDao.getDefaultDepartment().getName(); job.facilityId = facilityDao.getDefaultFacility().getId(); + job.facilityName = facilityDao.getDefaultFacility().getName(); job.state = JobState.PENDING; + job.maxCoreUnits = 10000; jobDao.insertJob(job, jobLogUtil); return job; } @@ -186,6 +190,46 @@ public void testGetJob() { jobDao.getJob(job.id); } + @Test + @Transactional + @Rollback(true) + public void testGetJobDetail() { + JobDetail src = insertJob(); + JobDetail job = jobDao.getJobDetail(src.id); + assertEquals(job.id, src.id); + assertEquals(job.name, src.name); + assertEquals(job.showId, src.showId); + assertEquals(job.facilityId, src.facilityId); + assertEquals(job.groupId, src.groupId); + assertEquals(job.deptId, src.deptId); + assertEquals(job.state, src.state); + assertEquals(job.shot, src.shot); + assertEquals(job.user, src.user); + assertEquals(job.email, src.email); + assertEquals(job.uid, src.uid); + assertEquals(job.logDir, src.logDir); + assertEquals(job.isPaused, src.isPaused); + assertEquals(job.isAutoEat, src.isAutoEat); + assertEquals(job.totalFrames, src.totalFrames); + assertEquals(job.totalLayers, src.totalLayers); + assertEquals(job.startTime, src.startTime); + assertEquals(job.stopTime, src.stopTime); + assertEquals(job.maxRetries, src.maxRetries); + assertEquals(job.os, src.os); + assertEquals(job.facilityName, src.facilityName); + assertEquals(job.deptName, src.deptName); + assertEquals(job.showName, src.showName); + assertEquals(job.priority, src.priority); + assertEquals(job.minCoreUnits, src.minCoreUnits); + assertEquals(job.maxCoreUnits, src.maxCoreUnits); + assertEquals(job.isLocal, src.isLocal); + assertEquals(job.localHostName, src.localHostName); + assertEquals(job.localMaxCores, src.localMaxCores); + assertEquals(job.localMaxMemory, src.localMaxMemory); + assertEquals(job.localThreadNumber, src.localThreadNumber); + assertEquals(job.localMaxGpus, src.localMaxGpus); + } + @Test @Transactional @Rollback(true) @@ -639,7 +683,7 @@ public void testUpdateUsage() { JobInterface job = jobDao.findJob(spec.getJobs().get(0).detail.name); /** 60 seconds of 100 core units **/ - ResourceUsage usage = new ResourceUsage(60, 33); + ResourceUsage usage = new ResourceUsage(60, 33, 0); assertTrue(usage.getClockTimeSeconds() > 0); assertTrue(usage.getCoreTimeSeconds() > 0); @@ -676,6 +720,16 @@ public void testUpdateUsage() { "SELECT int_frame_fail_count FROM job_usage WHERE pk_job=?", Integer.class, job.getId())); } + + @Test + @Transactional + @Rollback(true) + public void testOverrideMaxCoresAndGpus() { + jobLauncher.launch(new File("src/test/resources/conf/jobspec/override_max_cores_gpus.xml")); + JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_test"); + assertEquals(job.maxCoreUnits, 42000); + assertEquals(job.maxGpuUnits, 42); + } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java index 938107f36..189178689 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/LayerDaoTests.java @@ -20,6 +20,9 @@ package com.imageworks.spcue.test.dao.postgres; import java.io.File; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -42,7 +45,6 @@ import com.imageworks.spcue.LayerDetail; import com.imageworks.spcue.LayerInterface; import com.imageworks.spcue.LimitEntity; -import com.imageworks.spcue.LimitInterface; import com.imageworks.spcue.ResourceUsage; import com.imageworks.spcue.config.TestAppConfig; import com.imageworks.spcue.dao.DepartmentDao; @@ -61,9 +63,11 @@ import com.imageworks.spcue.util.FrameSet; import com.imageworks.spcue.util.JobLogUtil; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; @Transactional @@ -114,7 +118,11 @@ public void testMode() { } public LayerDetail getLayer() { + List layers = getLayers(); + return layers.get(layers.size()-1); + } + public List getLayers() { JobSpec spec = jobLauncher.parse(new File("src/test/resources/conf/jobspec/jobspec.xml")); JobDetail job = spec.getJobs().get(0).detail; job.groupId = ROOT_FOLDER; @@ -124,14 +132,13 @@ public LayerDetail getLayer() { job.facilityId = facilityDao.getDefaultFacility().getId(); jobDao.insertJob(job, jobLogUtil); - LayerDetail lastLayer= null; + List result = new ArrayList<>(); String limitId = limitDao.createLimit(LIMIT_NAME, LIMIT_MAX_VALUE); limitDao.createLimit(LIMIT_TEST_A, 1); limitDao.createLimit(LIMIT_TEST_B, 2); limitDao.createLimit(LIMIT_TEST_C, 3); for (BuildableLayer buildableLayer: spec.getJobs().get(0).getBuildableLayers()) { - LayerDetail layer = buildableLayer.layerDetail; FrameSet frameSet = new FrameSet(layer.range); int num_frames = frameSet.size(); @@ -145,10 +152,10 @@ public LayerDetail getLayer() { layerDao.insertLayerDetail(layer); layerDao.insertLayerEnvironment(layer, buildableLayer.env); layerDao.addLimit(layer, limitId); - lastLayer = layer; + result.add(layer); } - return lastLayer; + return result; } public JobDetail getJob() { @@ -200,16 +207,17 @@ public void testGetLayerDetail() { LayerDetail l2 = layerDao.getLayerDetail(layer); LayerDetail l3 = layerDao.getLayerDetail(layer.id); - assertEquals(l2, l3); + assertEquals(layer, l2); + assertEquals(layer, l3); } @Test @Transactional @Rollback(true) public void testGetLayerDetails() { - LayerDetail layer = getLayer(); - List ld = layerDao.getLayerDetails(getJob()); - assertEquals(ld.get(0).name, LAYER_NAME); + List wantLayers = getLayers(); + List gotLayers = layerDao.getLayerDetails(getJob()); + assertThat(gotLayers, containsInAnyOrder(wantLayers.toArray())); } @Test @@ -471,12 +479,12 @@ public void updateMinMemory() { @Test @Transactional @Rollback(true) - public void updateMinGpu() { - long gpu = CueUtil.GB; + public void updateMinGpuMemory() { + long mem = CueUtil.GB; LayerDetail layer = getLayer(); - layerDao.updateMinGpu(layer, gpu, LayerType.RENDER); - assertEquals(Long.valueOf(gpu),jdbcTemplate.queryForObject( - "SELECT int_gpu_min FROM layer WHERE pk_layer=?", + layerDao.updateMinGpuMemory(layer, mem, LayerType.RENDER); + assertEquals(Long.valueOf(mem),jdbcTemplate.queryForObject( + "SELECT int_gpu_mem_min FROM layer WHERE pk_layer=?", Long.class, layer.getLayerId())); } @@ -520,7 +528,7 @@ public void isOptimizable() { layer.getLayerId()); jdbcTemplate.update( - "UPDATE layer_usage SET int_core_time_success = 3600 * 6" + + "UPDATE layer_usage SET int_core_time_success = 3600 * 6 " + "WHERE pk_layer=?", layer.getLayerId()); assertFalse(layerDao.isOptimizable(layer, 5, 3600)); @@ -530,7 +538,7 @@ public void isOptimizable() { * Assert True */ jdbcTemplate.update( - "UPDATE layer_usage SET int_core_time_success = 3500 * 5" + + "UPDATE layer_usage SET int_core_time_success = 3500 * 5 " + "WHERE pk_layer=?", layer.getLayerId()); assertTrue(layerDao.isOptimizable(layer, 5, 3600)); @@ -590,7 +598,7 @@ public void testUpdateUsage() { Integer.class, layer.getId())); /** 60 seconds of 100 core units **/ - ResourceUsage usage = new ResourceUsage(60, 33); + ResourceUsage usage = new ResourceUsage(60, 33, 0); assertTrue(usage.getClockTimeSeconds() > 0); assertTrue(usage.getCoreTimeSeconds() > 0); @@ -715,10 +723,21 @@ public void testAddLimit() { LayerInterface layerResult = layerDao.getLayer(layer.getLayerId()); List limits = layerDao.getLimits(layerResult); assertEquals(limits.size(), 4); - assertEquals(limits.get(0).id, getTestLimitId(LIMIT_NAME)); - assertEquals(limits.get(1).id, getTestLimitId(LIMIT_TEST_A)); - assertEquals(limits.get(2).id, getTestLimitId(LIMIT_TEST_B)); - assertEquals(limits.get(3).id, getTestLimitId(LIMIT_TEST_C)); + List sourceIds = Arrays.asList( + getTestLimitId(LIMIT_NAME), + getTestLimitId(LIMIT_TEST_A), + getTestLimitId(LIMIT_TEST_B), + getTestLimitId(LIMIT_TEST_C) + ); + List resultIds = Arrays.asList( + limits.get(0).id, + limits.get(1).id, + limits.get(2).id, + limits.get(3).id + ); + Collections.sort(sourceIds); + Collections.sort(resultIds); + assertEquals(sourceIds, resultIds); } @Test diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java index c43d98180..f6cabc23a 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ProcDaoTests.java @@ -118,11 +118,12 @@ public DispatchHost createHost() { RenderHost host = RenderHost.newBuilder() .setName("beta") .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB32) .setTotalSwap(20960) .setNimbyEnabled(false) @@ -325,8 +326,9 @@ public void testUpdateProcMemoryUsage() { procDao.insertVirtualProc(proc); procDao.verifyRunningProc(proc.getId(), frame.getId()); + byte[] children = new byte[100]; - procDao.updateProcMemoryUsage(frame, 100, 100, 1000, 1000); + procDao.updateProcMemoryUsage(frame, 100, 100, 1000, 1000, 0, 0, children); } @@ -584,16 +586,17 @@ public void testFindReservedMemoryOffender() { int i = 1; List frames = dispatcherDao.findNextDispatchFrames(job, host, 6); assertEquals(6, frames.size()); - + byte[] children = new byte[100]; for (DispatchFrame frame: frames) { VirtualProc proc = VirtualProc.build(host, frame); + proc.childProcesses = children; frame.minMemory = Dispatcher.MEM_RESERVED_DEFAULT; dispatcher.dispatch(frame, proc); // Increase the memory usage as frames are added procDao.updateProcMemoryUsage(frame, - 1000*i, 1000*i, 1000*i, 1000*i); + 1000*i, 1000*i, 1000*i, 1000*i, 0, 0, children); i++; } @@ -633,7 +636,7 @@ public void testGetReservedMemory() { @Test @Transactional @Rollback(true) - public void testGetReservedGpu() { + public void testGetReservedGpuMemory() { DispatchHost host = createHost(); JobDetail job = launchJob(); @@ -645,11 +648,11 @@ public void testGetReservedGpu() { procDao.insertVirtualProc(proc); VirtualProc _proc = procDao.findVirtualProc(frame); - assertEquals(Long.valueOf(Dispatcher.GPU_RESERVED_DEFAULT), jdbcTemplate.queryForObject( - "SELECT int_gpu_reserved FROM proc WHERE pk_proc=?", + assertEquals(Long.valueOf(Dispatcher.MEM_GPU_RESERVED_DEFAULT), jdbcTemplate.queryForObject( + "SELECT int_gpu_mem_reserved FROM proc WHERE pk_proc=?", Long.class, _proc.id)); - assertEquals(Dispatcher.GPU_RESERVED_DEFAULT, - procDao.getReservedGpu(_proc)); + assertEquals(Dispatcher.MEM_GPU_RESERVED_DEFAULT, + procDao.getReservedGpuMemory(_proc)); } @Test @@ -666,7 +669,8 @@ public void testBalanceUnderUtilizedProcs() { proc1.frameId = frame1.id; procDao.insertVirtualProc(proc1); - procDao.updateProcMemoryUsage(frame1, 250000, 250000, 250000, 250000); + byte[] children = new byte[100]; + procDao.updateProcMemoryUsage(frame1, 250000, 250000, 250000, 250000, 0, 0, children); layerDao.updateLayerMaxRSS(frame1, 250000, true); FrameDetail frameDetail2 = frameDao.findFrameDetail(job, "0002-pass_1"); @@ -676,7 +680,7 @@ public void testBalanceUnderUtilizedProcs() { proc2.frameId = frame2.id; procDao.insertVirtualProc(proc2); - procDao.updateProcMemoryUsage(frame2, 255000, 255000,255000, 255000); + procDao.updateProcMemoryUsage(frame2, 255000, 255000,255000, 255000, 0, 0, children); layerDao.updateLayerMaxRSS(frame2, 255000, true); FrameDetail frameDetail3 = frameDao.findFrameDetail(job, "0003-pass_1"); @@ -686,7 +690,7 @@ public void testBalanceUnderUtilizedProcs() { proc3.frameId = frame3.id; procDao.insertVirtualProc(proc3); - procDao.updateProcMemoryUsage(frame3, 3145728, 3145728,3145728, 3145728); + procDao.updateProcMemoryUsage(frame3, 3145728, 3145728,3145728, 3145728, 0, 0, children); layerDao.updateLayerMaxRSS(frame3,300000, true); procDao.balanceUnderUtilizedProcs(proc3, 100000); @@ -797,6 +801,7 @@ public void getProcsBySearch() { proc.frameId = f.id; proc.layerId = f.layerId; proc.showId = f.showId; + proc.childProcesses = "".getBytes(); procDao.insertVirtualProc(proc); } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java index fee824fc1..811cb129d 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ServiceDaoTests.java @@ -70,9 +70,10 @@ public void testInsertService() { s.timeout = 0; s.timeout_llu = 0; s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; + s.minGpuMemory = CueUtil.GB; s.threadable = false; s.tags.addAll(Sets.newHashSet(new String[] { "general"})); + s.minMemoryIncrease = CueUtil.GB4; serviceDao.insert(s); assertEquals(s, serviceDao.get("dillweed")); @@ -88,9 +89,10 @@ public void testUpdateService() { s.timeout = 0; s.timeout_llu = 0; s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; + s.minGpuMemory = CueUtil.GB; s.threadable = false; s.tags.addAll(Sets.newHashSet(new String[] { "general"})); + s.minMemoryIncrease = CueUtil.GB; serviceDao.insert(s); assertEquals(s, serviceDao.get("dillweed")); @@ -100,10 +102,11 @@ public void testUpdateService() { s.timeout = 0; s.timeout_llu = 0; s.minMemory = CueUtil.GB8; - s.minGpu = CueUtil.GB2; + s.minGpuMemory = CueUtil.GB2; s.threadable = true; s.tags = Sets.newLinkedHashSet(); s.tags.add("linux"); + s.minMemoryIncrease = CueUtil.GB4 + CueUtil.GB2; serviceDao.update(s); ServiceEntity s1 = serviceDao.get(s.getId()); @@ -113,6 +116,7 @@ public void testUpdateService() { assertEquals(s.minMemory, s1.minMemory); assertEquals(s.threadable, s1.threadable); assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); + assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); } @Test @@ -125,9 +129,10 @@ public void testDeleteService() { s.timeout = 0; s.timeout_llu = 0; s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; + s.minGpuMemory = CueUtil.GB; s.threadable = false; s.tags.addAll(Sets.newHashSet(new String[] { "general"})); + s.minMemoryIncrease = CueUtil.GB2; serviceDao.insert(s); assertEquals(s, serviceDao.get("dillweed")); @@ -149,10 +154,11 @@ public void testInsertServiceOverride() { s.timeout = 0; s.timeout_llu = 0; s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; + s.minGpuMemory = CueUtil.GB; s.threadable = false; s.tags.addAll(Sets.newHashSet(new String[] { "general"})); s.showId = "00000000-0000-0000-0000-000000000000"; + s.minMemoryIncrease = CueUtil.GB2; serviceDao.insert(s); assertEquals(s, serviceDao.getOverride("dillweed")); @@ -168,10 +174,11 @@ public void testUpdateServiceOverride() { s.timeout = 0; s.timeout_llu = 0; s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB2; + s.minGpuMemory = CueUtil.GB2; s.threadable = false; s.tags.addAll(Sets.newHashSet(new String[] { "general"})); s.showId = "00000000-0000-0000-0000-000000000000"; + s.minMemoryIncrease = CueUtil.GB2; serviceDao.insert(s); assertEquals(s, serviceDao.getOverride("dillweed")); @@ -182,10 +189,11 @@ public void testUpdateServiceOverride() { s.timeout = 10; s.timeout_llu = 10; s.minMemory = CueUtil.GB8; - s.minGpu = CueUtil.GB4; + s.minGpuMemory = CueUtil.GB4; s.threadable = true; s.tags = Sets.newLinkedHashSet(); s.tags.add("linux"); + s.minMemoryIncrease = CueUtil.GB4; serviceDao.update(s); ServiceEntity s1 = serviceDao.getOverride(s.getId()); @@ -195,9 +203,11 @@ public void testUpdateServiceOverride() { assertEquals(s.timeout, s1.timeout); assertEquals(s.timeout_llu, s1.timeout_llu); assertEquals(s.minMemory, s1.minMemory); - assertEquals(s.minGpu, s1.minGpu); + assertEquals(s.minGpuMemory, s1.minGpuMemory); assertEquals(s.threadable, s1.threadable); assertEquals(s.tags.toArray()[0], s1.tags.toArray()[0]); + assertEquals(s.minMemoryIncrease, s1.minMemoryIncrease); + assertEquals(s1.minMemoryIncrease, CueUtil.GB4); } @Test @@ -210,10 +220,11 @@ public void testDeleteServiceOverride() { s.timeout = 0; s.timeout_llu = 0; s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB; + s.minGpuMemory = CueUtil.GB; s.threadable = false; s.tags.addAll(Sets.newHashSet(new String[] { "general"})); s.showId = "00000000-0000-0000-0000-000000000000"; + s.minMemoryIncrease = CueUtil.GB2; serviceDao.insert(s); assertEquals(s, serviceDao.getOverride("dillweed")); diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java index b27114554..ea0ed67b8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/ShowDaoTests.java @@ -71,11 +71,12 @@ public DispatchHost createHost() { RenderHost host = RenderHost.newBuilder() .setName("test_host") .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB16) .setTotalSwap((int) CueUtil.GB16) .setNimbyEnabled(false) @@ -84,8 +85,8 @@ public DispatchHost createHost() { .addTags("general") .setState(HardwareState.UP) .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); DispatchHost dh = hostManager.createHost(host); @@ -219,20 +220,20 @@ public void testUpdateActive() { public void testUpdateFrameCounters() { ShowEntity show = showDao.findShowDetail(SHOW_NAME); int frameSuccess = jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM show WHERE pk_show=?", + "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); showDao.updateFrameCounters(show, 0); int frameSucces2 = jdbcTemplate.queryForObject( - "SELECT int_frame_success_count FROM show WHERE pk_show=?", + "SELECT int_frame_success_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); assertEquals(frameSuccess + 1,frameSucces2); int frameFail= jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM show WHERE pk_show=?", + "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); showDao.updateFrameCounters(show, 1); int frameFail2 = jdbcTemplate.queryForObject( - "SELECT int_frame_fail_count FROM show WHERE pk_show=?", + "SELECT int_frame_fail_count FROM show_stats WHERE pk_show=?", Integer.class, show.id); assertEquals(frameFail+ 1,frameFail2); } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java index 99449337b..293359f8d 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dao/postgres/WhiteboardDaoTests.java @@ -266,11 +266,12 @@ public RenderHost getRenderHost() { RenderHost host = RenderHost.newBuilder() .setName(HOST) .setBootTime(1192369572) - .setFreeMcp(7602) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem((int) Dispatcher.MEM_RESERVED_MIN * 4) .setFreeSwap(2076) .setLoad(1) - .setTotalMcp(19543) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) Dispatcher.MEM_RESERVED_MIN * 4) .setTotalSwap(2096) .setNimbyEnabled(true) @@ -278,8 +279,8 @@ public RenderHost getRenderHost() { .setCoresPerProc(400) .setState(HardwareState.DOWN) .setFacility("spi") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); return host; } @@ -330,6 +331,7 @@ public void getServiceOverride() { s.tags.add("general"); s.threadable = false; s.showId = show.getId(); + s.minMemoryIncrease = CueUtil.GB4; serviceManager.createService(s); whiteboardDao.getServiceOverride(getShow(), "test"); @@ -1246,7 +1248,7 @@ public void getRenderPartition() { jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1); + LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); bookingManager.createLocalHostAssignment(hd, job, lba); whiteboardDao.getRenderPartition(lba); @@ -1263,7 +1265,7 @@ public void getRenderPartitionsByHost() { jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_dispatch_test.xml")); JobDetail job = jobManager.findJobDetail("pipe-dev.cue-testuser_shell_dispatch_test_v1"); - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1); + LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1, 1); bookingManager.createLocalHostAssignment(hd, job, lba); assertEquals(1, whiteboardDao.getRenderPartitions(hd).getRenderPartitionsCount()); diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java index d99041b65..55bd44463 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuJobTests.java @@ -100,11 +100,12 @@ public void createHost() { RenderHost host = RenderHost.newBuilder() .setName(HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem((int) CueUtil.GB8) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB8) .setTotalSwap((int) CueUtil.GB2) .setNimbyEnabled(false) @@ -114,8 +115,8 @@ public void createHost() { .setState(HardwareState.UP) .setFacility("spi") .putAttributes("SP_OS", "Linux") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); hostManager.createHost(host, @@ -153,7 +154,7 @@ public void testDispatchGpuRemovedHostToNonGpuJob() { host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); host.idleCores = host.idleCores - Math.min(100, host.idleCores); - host.idleGpu = 0; + host.idleGpuMemory = 0; List procs = dispatcher.dispatchHost(host, job); assertEquals(0, procs.size()); } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java index 9318258ad..c61c9553f 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpuTests.java @@ -100,11 +100,12 @@ public void createHost() { RenderHost host = RenderHost.newBuilder() .setName(HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem((int) CueUtil.GB8) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB8) .setTotalSwap((int) CueUtil.GB2) .setNimbyEnabled(false) @@ -114,8 +115,8 @@ public void createHost() { .setState(HardwareState.UP) .setFacility("spi") .putAttributes("SP_OS", "Linux") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); hostManager.createHost(host, @@ -153,7 +154,7 @@ public void testDispatchGpuRemovedHostToNonGpuJob() { host.idleMemory = host.idleMemory - Math.min(CueUtil.GB4, host.idleMemory); host.idleCores = host.idleCores - Math.min(100, host.idleCores); - host.idleGpu = 0; + host.idleGpuMemory = 0; List procs = dispatcher.dispatchHost(host, job); assertEquals(1, procs.size()); } @@ -202,17 +203,20 @@ public void testDispatchHostRemoveRestoreGpu() { long idleMemoryOrig = host.idleMemory; int idleCoresOrig = host.idleCores; - long idleGpuOrig = host.idleGpu; + long idleGpuMemoryOrig = host.idleGpuMemory; + int idleGpusOrig = host.idleGpus; host.removeGpu(); - assertEquals(0, host.idleGpu); + assertEquals(0, host.idleGpuMemory); + assertEquals(0, host.idleGpus); assertEquals(idleMemoryOrig - CueUtil.GB4, host.idleMemory); assertEquals(idleCoresOrig - 100, host.idleCores); host.restoreGpu(); assertEquals(idleMemoryOrig, host.idleMemory); assertEquals(idleCoresOrig, host.idleCores); - assertEquals(idleGpuOrig, host.idleGpu); + assertEquals(idleGpuMemoryOrig, host.idleGpuMemory); + assertEquals(idleGpusOrig, host.idleGpus); } @Test @@ -222,7 +226,7 @@ public void dispatchProcToJob() { DispatchHost host = getHost(); JobDetail job = getJob(); - host.idleGpu = 0; + host.idleGpuMemory = 0; List procs = dispatcher.dispatchHost(host, job); VirtualProc proc = procs.get(0); dispatcher.dispatchProcToJob(proc, job); diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java new file mode 100644 index 000000000..e2d1cb564 --- /dev/null +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherGpusJobTests.java @@ -0,0 +1,278 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.imageworks.spcue.test.dispatcher; + +import java.io.File; +import java.util.List; +import javax.annotation.Resource; + +import org.junit.Before; +import org.junit.Test; +import org.springframework.test.annotation.Rollback; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.transaction.annotation.Transactional; + +import com.imageworks.spcue.DispatchFrame; +import com.imageworks.spcue.DispatchHost; +import com.imageworks.spcue.LayerDetail; +import com.imageworks.spcue.JobDetail; +import com.imageworks.spcue.VirtualProc; +import com.imageworks.spcue.dao.criteria.FrameSearchFactory; +import com.imageworks.spcue.dao.FrameDao; +import com.imageworks.spcue.dao.LayerDao; +import com.imageworks.spcue.depend.LayerOnLayer; +import com.imageworks.spcue.dispatcher.DispatchSupport; +import com.imageworks.spcue.dispatcher.Dispatcher; +import com.imageworks.spcue.grpc.host.HardwareState; +import com.imageworks.spcue.grpc.job.FrameState; +import com.imageworks.spcue.grpc.report.RenderHost; +import com.imageworks.spcue.service.AdminManager; +import com.imageworks.spcue.service.DependManager; +import com.imageworks.spcue.service.HostManager; +import com.imageworks.spcue.service.JobLauncher; +import com.imageworks.spcue.service.JobManager; +import com.imageworks.spcue.test.TransactionalTest; +import com.imageworks.spcue.util.CueUtil; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +@ContextConfiguration +public class CoreUnitDispatcherGpusJobTests extends TransactionalTest { + + @Resource + JobManager jobManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + HostManager hostManager; + + @Resource + AdminManager adminManager; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + LayerDao layerDao; + + @Resource + FrameDao frameDao; + + @Resource + FrameSearchFactory frameSearchFactory; + + @Resource + DependManager dependManager; + + private static final String HOSTNAME = "beta"; + + private static final String CPU_JOB = "pipe-default-testuser_test_cpu"; + + private static final String GPU_JOB = "pipe-default-testuser_test_gpu"; + + private static final String GPU_OVERBOOK_JOB = "pipe-default-testuser_test_gpu_overbook"; + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch( + new File("src/test/resources/conf/jobspec/jobspec_dispatch_gpus_test.xml")); + } + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder() + .setName(HOSTNAME) + .setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) + .setFreeMem((int) CueUtil.GB8) + .setFreeSwap(20760) + .setLoad(0) + .setTotalMcp(CueUtil.GB4) + .setTotalMem(CueUtil.GB8) + .setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false) + .setNumProcs(40) + .setCoresPerProc(100) + .addTags("test") + .setState(HardwareState.UP) + .setFacility("spi") + .putAttributes("SP_OS", "Linux") + .setNumGpus(8) + .setFreeGpuMem(CueUtil.GB32) + .setTotalGpuMem(CueUtil.GB32) + .build(); + + hostManager.createHost(host, + adminManager.findAllocationDetail("spi", "general")); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchHost() { + DispatchHost host = getHost(); + + List procs = dispatcher.dispatchHost(host); + // All jobs are paused. procs should be empty. + assertTrue(procs.isEmpty()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchCpuJob() { + JobDetail job = jobManager.findJobDetail(CPU_JOB); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + // Cuebot doesn't dispatch non-GPU job to GPU host. procs should be empty. + assertTrue(procs.isEmpty()); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuJob() { + JobDetail job = jobManager.findJobDetail(GPU_JOB); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + + /* + * The job contains 4 layers. + * - test_gpus_0_layer gpus=0 gpu_memory=1 + * - test_gpu_memory_0_layer gpus=1 gpu_memory=0 + * - test_gpus_1_layer gpus=1 gpu_memory=1 + * - test_gpus_4_kayer gpus=4 gpu_memory=7g + * + * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. + * Also job_frame_dispatch_max is 2, + * the procs should be test_gpus_0_layer and test_gpus_1_layer. + */ + assertEquals(2, procs.size()); + + VirtualProc proc0 = procs.get(0); + LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); + assertEquals(layer0.id, proc0.layerId); + assertEquals(100, proc0.coresReserved); + assertEquals(3355443, proc0.memoryReserved); + assertEquals(0, proc0.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + + VirtualProc proc1 = procs.get(1); + LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); + assertEquals(layer1.id, proc1.layerId); + assertEquals(100, proc1.coresReserved); + assertEquals(3355443, proc1.memoryReserved); + assertEquals(1, proc1.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuJobWithDependency() { + JobDetail job = jobManager.findJobDetail(GPU_JOB); + LayerDetail dl0 = layerDao.findLayerDetail(job, "test_gpus_0_layer"); + LayerDetail dl1 = layerDao.findLayerDetail(job, "test_gpu_memory_0_layer"); + LayerOnLayer depend = new LayerOnLayer(dl0, dl1); + dependManager.createDepend(depend); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + + /* + * The job contains 4 layers. + * - test_gpus_0_layer gpus=0 gpu_memory=1 + * - test_gpu_memory_0_layer gpus=1 gpu_memory=0 + * - test_gpus_1_layer gpus=1 gpu_memory=1 + * - test_gpus_4_kayer gpus=4 gpu_memory=7g + * + * Cuebot doesn't dispatch test_gpu_memory_0_layer because gpu_memory is 0. + * And test_gpus_0_layer depends on test_gpu_memory_0_layer. + * So the procs should be test_gpus_1_layer and test_gpus_4_layer. + */ + assertEquals(2, procs.size()); + + VirtualProc proc0 = procs.get(0); + LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_1_layer"); + assertEquals(layer0.id, proc0.layerId); + assertEquals(100, proc0.coresReserved); + assertEquals(3355443, proc0.memoryReserved); + assertEquals(1, proc0.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + + VirtualProc proc1 = procs.get(1); + LayerDetail layer1 = layerDao.findLayerDetail(job, "test_gpus_4_layer"); + assertEquals(layer1.id, proc1.layerId); + assertEquals(100, proc1.coresReserved); + assertEquals(3355443, proc1.memoryReserved); + assertEquals(4, proc1.gpusReserved); + assertEquals(7340032, proc1.gpuMemoryReserved); + } + + @Test + @Transactional + @Rollback(true) + public void testDispatchGpuOverbookJob() { + JobDetail job = jobManager.findJobDetail(GPU_OVERBOOK_JOB); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host, job); + + /* + * The job contains 2 layers. + * - test_gpus_6_layer gpus=6 gpu_memory=1 + * - test_gpus_3_layer gpus=3 gpu_memory=1 + * the procs should be only test_gpus_6_layer since host only has 8 GPUs. + */ + assertEquals(1, procs.size()); + + VirtualProc proc0 = procs.get(0); + LayerDetail layer0 = layerDao.findLayerDetail(job, "test_gpus_6_layer"); + assertEquals(layer0.id, proc0.layerId); + assertEquals(100, proc0.coresReserved); + assertEquals(3355443, proc0.memoryReserved); + assertEquals(6, proc0.gpusReserved); + assertEquals(1048576, proc0.gpuMemoryReserved); + } +} + diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java index adb6c404d..89112dd69 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/CoreUnitDispatcherTests.java @@ -45,6 +45,7 @@ import com.imageworks.spcue.service.JobLauncher; import com.imageworks.spcue.service.JobManager; import com.imageworks.spcue.test.TransactionalTest; +import com.imageworks.spcue.util.CueUtil; import static org.junit.Assert.assertEquals; @@ -99,11 +100,12 @@ public void createHost() { RenderHost host = RenderHost.newBuilder() .setName(HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem(8173264) .setTotalSwap(20960) .setNimbyEnabled(false) diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java index baa2353fb..55a7806c0 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/DispatchSupportTests.java @@ -96,11 +96,12 @@ public void createHost() { RenderHost host = RenderHost.newBuilder() .setName(HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(0) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem(8173264) .setTotalSwap(20960) .setNimbyEnabled(false) @@ -110,8 +111,8 @@ public void createHost() { .setState(HardwareState.UP) .setFacility("spi") .putAttributes("SP_OS", "Linux") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); hostManager.createHost(host, diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java new file mode 100644 index 000000000..1f452e92a --- /dev/null +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/FrameCompleteHandlerTests.java @@ -0,0 +1,431 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.imageworks.spcue.test.dispatcher; + +import java.io.File; +import java.util.List; +import javax.annotation.Resource; +import java.util.LinkedHashSet; + +import org.junit.Before; +import org.junit.Test; +import org.springframework.test.annotation.Rollback; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.transaction.annotation.Propagation; +import org.springframework.transaction.annotation.Transactional; + +import com.imageworks.spcue.DispatchFrame; +import com.imageworks.spcue.DispatchHost; +import com.imageworks.spcue.DispatchJob; +import com.imageworks.spcue.FrameDetail; +import com.imageworks.spcue.JobDetail; +import com.imageworks.spcue.LayerDetail; +import com.imageworks.spcue.ServiceOverrideEntity; +import com.imageworks.spcue.VirtualProc; +import com.imageworks.spcue.dao.FrameDao; +import com.imageworks.spcue.dao.LayerDao; +import com.imageworks.spcue.dispatcher.Dispatcher; +import com.imageworks.spcue.dispatcher.DispatchSupport; +import com.imageworks.spcue.dispatcher.FrameCompleteHandler; +import com.imageworks.spcue.grpc.host.HardwareState; +import com.imageworks.spcue.grpc.job.FrameState; +import com.imageworks.spcue.grpc.report.FrameCompleteReport; +import com.imageworks.spcue.grpc.report.RenderHost; +import com.imageworks.spcue.grpc.report.RunningFrameInfo; +import com.imageworks.spcue.service.AdminManager; +import com.imageworks.spcue.service.HostManager; +import com.imageworks.spcue.service.JobLauncher; +import com.imageworks.spcue.service.JobManager; +import com.imageworks.spcue.service.ServiceManager; +import com.imageworks.spcue.test.TransactionalTest; +import com.imageworks.spcue.util.CueUtil; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +@ContextConfiguration +public class FrameCompleteHandlerTests extends TransactionalTest { + + @Resource + AdminManager adminManager; + + @Resource + FrameCompleteHandler frameCompleteHandler; + + @Resource + HostManager hostManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + FrameDao frameDao; + + @Resource + LayerDao layerDao; + + @Resource + Dispatcher dispatcher; + + @Resource + DispatchSupport dispatchSupport; + + @Resource + ServiceManager serviceManager; + + private static final String HOSTNAME = "beta"; + private static final String HOSTNAME2 = "zeta"; + + @Before + public void setTestMode() { + + dispatcher.setTestMode(true); + } + + @Before + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch( + new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder() + .setName(HOSTNAME) + .setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) + .setFreeMem((int) CueUtil.GB8) + .setFreeSwap(20760) + .setLoad(0) + .setTotalMcp(CueUtil.GB4) + .setTotalMem(CueUtil.GB8) + .setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false) + .setNumProcs(40) + .setCoresPerProc(100) + .setState(HardwareState.UP) + .setFacility("spi") + .putAttributes("SP_OS", "Linux") + .setNumGpus(8) + .setFreeGpuMem(CueUtil.GB16 * 8) + .setTotalGpuMem(CueUtil.GB16 * 8) + .build(); + + hostManager.createHost(host, + adminManager.findAllocationDetail("spi", "general")); + + RenderHost host2 = RenderHost.newBuilder() + .setName(HOSTNAME2) + .setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) + .setFreeMem((int) CueUtil.GB4) + .setFreeSwap((int) CueUtil.GB4) + .setLoad(0) + .setTotalMcp(CueUtil.GB4) + .setTotalMem((int) CueUtil.GB8) + .setTotalSwap((int) CueUtil.GB8) + .setNimbyEnabled(false) + .setNumProcs(8) + .setCoresPerProc(100) + .setState(HardwareState.UP) + .setFacility("spi") + .putAttributes("SP_OS", "Linux") + .build(); + + hostManager.createHost(host2, + adminManager.findAllocationDetail("spi", "general")); + } + + public DispatchHost getHost(String hostname) { + return hostManager.findDispatchHost(hostname); + } + + @Test + @Transactional + @Rollback(true) + public void testGpuReport() { + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); + LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + assertEquals(7, host.idleGpus); + assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); + + RunningFrameInfo info = RunningFrameInfo.newBuilder() + .setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder() + .setFrame(info) + .setExitStatus(0) + .build(); + frameCompleteHandler.handleFrameCompleteReport(report); + + assertTrue(jobManager.isLayerComplete(layer)); + assertTrue(jobManager.isJobComplete(job)); + } + + @Test + @Transactional + @Rollback(true) + public void testGpuReportMultiple() { + JobDetail job0 = jobManager.findJobDetail("pipe-default-testuser_test0"); + LayerDetail layer0_0 = layerDao.findLayerDetail(job0, "layer0"); + jobManager.setJobPaused(job0, false); + + JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); + LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); + jobManager.setJobPaused(job1, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(2, procs.size()); + + assertEquals(4, host.idleGpus); + assertEquals(CueUtil.GB16 * 8 - CueUtil.GB2, host.idleGpuMemory); + + for (VirtualProc proc : procs) { + RunningFrameInfo info = RunningFrameInfo.newBuilder() + .setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder() + .setFrame(info) + .setExitStatus(0) + .build(); + frameCompleteHandler.handleFrameCompleteReport(report); + } + + assertTrue(jobManager.isLayerComplete(layer0_0)); + assertTrue(jobManager.isJobComplete(job0)); + assertTrue(jobManager.isLayerComplete(layer1_0)); + assertTrue(jobManager.isJobComplete(job1)); + } + + @Test + @Transactional + @Rollback(true) + public void testGpuReportOver() { + JobDetail job1 = jobManager.findJobDetail("pipe-default-testuser_test1"); + LayerDetail layer1_0 = layerDao.findLayerDetail(job1, "layer0"); + jobManager.setJobPaused(job1, false); + + JobDetail job2 = jobManager.findJobDetail("pipe-default-testuser_test2"); + LayerDetail layer2_0 = layerDao.findLayerDetail(job2, "layer0"); + jobManager.setJobPaused(job2, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + + assertTrue(host.idleGpus == 5 || host.idleGpus == 2); + assertEquals(CueUtil.GB16 * 8 - CueUtil.GB, host.idleGpuMemory); + + for (VirtualProc proc : procs) { + RunningFrameInfo info = RunningFrameInfo.newBuilder() + .setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder() + .setFrame(info) + .setExitStatus(0) + .build(); + frameCompleteHandler.handleFrameCompleteReport(report); + } + + assertEquals(1, + (jobManager.isLayerComplete(layer1_0) ? 1 : 0) + + (jobManager.isLayerComplete(layer2_0) ? 1 : 0)); + assertEquals(1, + (jobManager.isJobComplete(job1) ? 1 : 0) + + (jobManager.isJobComplete(job2) ? 1 : 0)); + } + + private void executeDepend( + FrameState frameState, int exitStatus, int dependCount, FrameState dependState) { + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test_depend"); + LayerDetail layerFirst = layerDao.findLayerDetail(job, "layer_first"); + LayerDetail layerSecond = layerDao.findLayerDetail(job, "layer_second"); + FrameDetail frameFirst = frameDao.findFrameDetail(job, "0000-layer_first"); + FrameDetail frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); + + assertEquals(1, frameSecond.dependCount); + assertEquals(FrameState.DEPEND, frameSecond.state); + + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + assertEquals(job.getId(), proc.getJobId()); + assertEquals(layerFirst.getId(), proc.getLayerId()); + assertEquals(frameFirst.getId(), proc.getFrameId()); + + RunningFrameInfo info = RunningFrameInfo.newBuilder() + .setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder() + .setFrame(info) + .setExitStatus(exitStatus) + .build(); + + DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); + dispatchSupport.stopFrame(dispatchFrame, frameState, report.getExitStatus(), + report.getFrame().getMaxRss()); + frameCompleteHandler.handlePostFrameCompleteOperations(proc, + report, dispatchJob, dispatchFrame, frameState, frameDetail); + + assertTrue(jobManager.isLayerComplete(layerFirst)); + assertFalse(jobManager.isLayerComplete(layerSecond)); + + frameSecond = frameDao.findFrameDetail(job, "0000-layer_second"); + assertEquals(dependCount, frameSecond.dependCount); + assertEquals(dependState, frameSecond.state); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnSuccess() { + assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnFailure() { + assertTrue(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.EATEN, -1, 1, FrameState.DEPEND); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnSuccessSatifyOnAny() { + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); + assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.SUCCEEDED, 0, 0, FrameState.WAITING); + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); + } + + @Test + @Transactional + @Rollback(true) + public void testDependOnFailureSatisfyOnAny() { + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(false); + assertFalse(frameCompleteHandler.getSatisfyDependOnlyOnFrameSuccess()); + executeDepend(FrameState.EATEN, -1, 0, FrameState.WAITING); + frameCompleteHandler.setSatisfyDependOnlyOnFrameSuccess(true); + } + + private void executeMinMemIncrease(int expected, boolean override) { + if (override) { + ServiceOverrideEntity soe = new ServiceOverrideEntity(); + soe.showId = "00000000-0000-0000-0000-000000000000"; + soe.name = "apitest"; + soe.threadable = false; + soe.minCores = 10; + soe.minMemory = (int) CueUtil.GB2; + soe.tags = new LinkedHashSet<>(); + soe.tags.add("general"); + soe.minMemoryIncrease = (int) CueUtil.GB8; + + serviceManager.createService(soe); + } + + String jobName = "pipe-default-testuser_min_mem_test"; + JobDetail job = jobManager.findJobDetail(jobName); + LayerDetail layer = layerDao.findLayerDetail(job, "test_layer"); + FrameDetail frame = frameDao.findFrameDetail(job, "0000-test_layer"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(HOSTNAME2); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + assertEquals(job.getId(), proc.getJobId()); + assertEquals(layer.getId(), proc.getLayerId()); + assertEquals(frame.getId(), proc.getFrameId()); + + RunningFrameInfo info = RunningFrameInfo.newBuilder() + .setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder() + .setFrame(info) + .setExitStatus(Dispatcher.EXIT_STATUS_MEMORY_FAILURE) + .build(); + + DispatchJob dispatchJob = jobManager.getDispatchJob(proc.getJobId()); + DispatchFrame dispatchFrame = jobManager.getDispatchFrame(report.getFrame().getFrameId()); + FrameDetail frameDetail = jobManager.getFrameDetail(report.getFrame().getFrameId()); + dispatchSupport.stopFrame(dispatchFrame, FrameState.DEAD, report.getExitStatus(), + report.getFrame().getMaxRss()); + frameCompleteHandler.handlePostFrameCompleteOperations(proc, + report, dispatchJob, dispatchFrame, FrameState.WAITING, frameDetail); + + assertFalse(jobManager.isLayerComplete(layer)); + + JobDetail ujob = jobManager.findJobDetail(jobName); + LayerDetail ulayer = layerDao.findLayerDetail(ujob, "test_layer"); + assertEquals(expected, ulayer.getMinimumMemory()); + } + + @Test + @Transactional + @Rollback(true) + public void testMinMemIncrease() { + executeMinMemIncrease(6291456, false); + } + + @Test + @Transactional + @Rollback(true) + public void testMinMemIncreaseShowOverride() { + executeMinMemIncrease(10485760, true); + } +} diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java new file mode 100644 index 000000000..de67ff26a --- /dev/null +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HistoryControlTests.java @@ -0,0 +1,199 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.imageworks.spcue.test.dispatcher; + +import java.io.File; +import java.util.List; +import javax.annotation.Resource; + +import org.junit.Before; +import org.junit.Test; +import org.springframework.test.annotation.Rollback; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.transaction.annotation.Transactional; + +import com.imageworks.spcue.DispatchHost; +import com.imageworks.spcue.FrameInterface; +import com.imageworks.spcue.JobDetail; +import com.imageworks.spcue.LayerDetail; +import com.imageworks.spcue.VirtualProc; +import com.imageworks.spcue.dao.LayerDao; +import com.imageworks.spcue.dispatcher.Dispatcher; +import com.imageworks.spcue.dispatcher.FrameCompleteHandler; +import com.imageworks.spcue.grpc.host.HardwareState; +import com.imageworks.spcue.grpc.report.FrameCompleteReport; +import com.imageworks.spcue.grpc.report.RenderHost; +import com.imageworks.spcue.grpc.report.RunningFrameInfo; +import com.imageworks.spcue.service.AdminManager; +import com.imageworks.spcue.service.HostManager; +import com.imageworks.spcue.service.JobLauncher; +import com.imageworks.spcue.service.JobManager; +import com.imageworks.spcue.test.TransactionalTest; +import com.imageworks.spcue.util.CueUtil; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +@ContextConfiguration +public class HistoryControlTests extends TransactionalTest { + + @Resource + AdminManager adminManager; + + @Resource + FrameCompleteHandler frameCompleteHandler; + + @Resource + HostManager hostManager; + + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + LayerDao layerDao; + + @Resource + Dispatcher dispatcher; + + private static final String HOSTNAME = "beta"; + private static final String DELETE_HISTORY = + "DELETE FROM frame_history; " + + "DELETE FROM job_history; "; + private static final String DISABLE_HISTORY = + "INSERT INTO " + + "config (pk_config,str_key) " + + "VALUES " + + "(uuid_generate_v1(),'DISABLE_HISTORY');"; + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + public void launchJob() { + jobLauncher.testMode = true; + jobLauncher.launch( + new File("src/test/resources/conf/jobspec/jobspec_gpus_test.xml")); + } + + @Before + public void createHost() { + RenderHost host = RenderHost.newBuilder() + .setName(HOSTNAME) + .setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) + .setFreeMem((int) CueUtil.GB8) + .setFreeSwap(20760) + .setLoad(0) + .setTotalMcp(CueUtil.GB4) + .setTotalMem(CueUtil.GB8) + .setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false) + .setNumProcs(40) + .setCoresPerProc(100) + .setState(HardwareState.UP) + .setFacility("spi") + .putAttributes("SP_OS", "Linux") + .setNumGpus(8) + .setFreeGpuMem(CueUtil.GB16 * 8) + .setTotalGpuMem(CueUtil.GB16 * 8) + .build(); + + hostManager.createHost(host, + adminManager.findAllocationDetail("spi", "general")); + } + + public DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + public void launchAndDeleteJob() { + launchJob(); + + JobDetail job = jobManager.findJobDetail("pipe-default-testuser_test0"); + LayerDetail layer = layerDao.findLayerDetail(job, "layer0"); + jobManager.setJobPaused(job, false); + + DispatchHost host = getHost(); + List procs = dispatcher.dispatchHost(host); + VirtualProc proc = procs.get(0); + + RunningFrameInfo info = RunningFrameInfo.newBuilder() + .setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .build(); + FrameCompleteReport report = FrameCompleteReport.newBuilder() + .setFrame(info) + .setExitStatus(0) + .build(); + frameCompleteHandler.handleFrameCompleteReport(report); + + assertTrue(jobManager.isLayerComplete(layer)); + assertTrue(jobManager.isJobComplete(job)); + + jdbcTemplate.update("DELETE FROM job WHERE pk_job=?", job.getId()); + } + + @Test + @Transactional + @Rollback(true) + public void testEnabled() { + jdbcTemplate.update(DELETE_HISTORY); + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM frame_history", Integer.class)); + + launchAndDeleteJob(); + + assertEquals(Integer.valueOf(5), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(1), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM frame_history", Integer.class)); + } + + @Test + @Transactional + @Rollback(true) + public void testDisabled() { + jdbcTemplate.update(DELETE_HISTORY); + jdbcTemplate.update(DISABLE_HISTORY); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM frame_history", Integer.class)); + + launchAndDeleteJob(); + + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM job_history", Integer.class)); + assertEquals(Integer.valueOf(0), jdbcTemplate.queryForObject( + "SELECT COUNT(*) FROM frame_history", Integer.class)); + } +} + diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java new file mode 100644 index 000000000..120c620a1 --- /dev/null +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerGpuTests.java @@ -0,0 +1,125 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +package com.imageworks.spcue.test.dispatcher; + +import javax.annotation.Resource; + +import org.junit.Before; +import org.junit.Test; +import org.springframework.test.annotation.Rollback; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.transaction.annotation.Transactional; + +import com.imageworks.spcue.DispatchHost; +import com.imageworks.spcue.dispatcher.Dispatcher; +import com.imageworks.spcue.dispatcher.HostReportHandler; +import com.imageworks.spcue.grpc.host.HardwareState; +import com.imageworks.spcue.grpc.host.LockState; +import com.imageworks.spcue.grpc.report.CoreDetail; +import com.imageworks.spcue.grpc.report.HostReport; +import com.imageworks.spcue.grpc.report.RenderHost; +import com.imageworks.spcue.service.AdminManager; +import com.imageworks.spcue.service.HostManager; +import com.imageworks.spcue.test.TransactionalTest; +import com.imageworks.spcue.util.CueUtil; + +import static org.junit.Assert.assertEquals; + +@ContextConfiguration +public class HostReportHandlerGpuTests extends TransactionalTest { + + @Resource + AdminManager adminManager; + + @Resource + HostManager hostManager; + + @Resource + HostReportHandler hostReportHandler; + + @Resource + Dispatcher dispatcher; + + private static final String HOSTNAME = "beta"; + + @Before + public void setTestMode() { + dispatcher.setTestMode(true); + } + + private static CoreDetail getCoreDetail(int total, int idle, int booked, int locked) { + return CoreDetail.newBuilder() + .setTotalCores(total) + .setIdleCores(idle) + .setBookedCores(booked) + .setLockedCores(locked) + .build(); + } + + private DispatchHost getHost() { + return hostManager.findDispatchHost(HOSTNAME); + } + + private static RenderHost getRenderHost() { + return RenderHost.newBuilder() + .setName(HOSTNAME) + .setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) + .setFreeMem(CueUtil.GB8) + .setFreeSwap(CueUtil.GB2) + .setLoad(0) + .setTotalMcp(CueUtil.GB4) + .setTotalMem(CueUtil.GB8) + .setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false) + .setNumProcs(2) + .setCoresPerProc(100) + .addTags("test") + .setState(HardwareState.UP) + .setFacility("spi") + .putAttributes("SP_OS", "Linux") + .setNumGpus(64) + .setFreeGpuMem(1048576L * 2000) + .setTotalGpuMem(1048576L * 2048) + .build(); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReport() { + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = HostReport.newBuilder() + .setHost(getRenderHost()) + .setCoreInfo(cores) + .build(); + + hostReportHandler.handleHostReport(report, true); + DispatchHost host = getHost(); + assertEquals(host.lockState, LockState.OPEN); + assertEquals(host.memory, CueUtil.GB8 - 524288); + assertEquals(host.gpus, 64); + assertEquals(host.idleGpus, 64); + assertEquals(host.gpuMemory, 1048576L * 2048); + assertEquals(host.idleGpuMemory, 2147483648L); + } +} + diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java index 81fe8fd68..971df8d14 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/HostReportHandlerTests.java @@ -19,28 +19,51 @@ package com.imageworks.spcue.test.dispatcher; +import java.io.File; +import java.sql.Timestamp; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.ThreadPoolExecutor; import javax.annotation.Resource; +import com.imageworks.spcue.dispatcher.DispatchSupport; +import com.imageworks.spcue.dispatcher.HostReportQueue; +import com.imageworks.spcue.dispatcher.FrameCompleteHandler; +import com.imageworks.spcue.grpc.job.FrameState; import org.junit.Before; import org.junit.Test; import org.springframework.test.annotation.Rollback; import org.springframework.test.context.ContextConfiguration; import org.springframework.transaction.annotation.Transactional; +import com.imageworks.spcue.AllocationEntity; +import com.imageworks.spcue.CommentDetail; import com.imageworks.spcue.DispatchHost; import com.imageworks.spcue.dispatcher.Dispatcher; import com.imageworks.spcue.dispatcher.HostReportHandler; +import com.imageworks.spcue.FacilityInterface; +import com.imageworks.spcue.FrameDetail; import com.imageworks.spcue.grpc.host.HardwareState; import com.imageworks.spcue.grpc.host.LockState; import com.imageworks.spcue.grpc.report.CoreDetail; import com.imageworks.spcue.grpc.report.HostReport; import com.imageworks.spcue.grpc.report.RenderHost; +import com.imageworks.spcue.grpc.report.RunningFrameInfo; +import com.imageworks.spcue.grpc.report.FrameCompleteReport; import com.imageworks.spcue.service.AdminManager; +import com.imageworks.spcue.service.CommentManager; import com.imageworks.spcue.service.HostManager; +import com.imageworks.spcue.service.JobLauncher; +import com.imageworks.spcue.service.JobManager; import com.imageworks.spcue.test.TransactionalTest; import com.imageworks.spcue.util.CueUtil; +import com.imageworks.spcue.VirtualProc; +import com.imageworks.spcue.LayerDetail; + +import java.util.UUID; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; @ContextConfiguration public class HostReportHandlerTests extends TransactionalTest { @@ -54,10 +77,28 @@ public class HostReportHandlerTests extends TransactionalTest { @Resource HostReportHandler hostReportHandler; + @Resource + FrameCompleteHandler frameCompleteHandler; + @Resource Dispatcher dispatcher; + @Resource + JobLauncher jobLauncher; + + @Resource + JobManager jobManager; + + @Resource + CommentManager commentManager; + private static final String HOSTNAME = "beta"; + private static final String NEW_HOSTNAME = "gamma"; + private String hostname; + private String hostname2; + private static final String SUBJECT_COMMENT_FULL_TEMP_DIR = "Host set to REPAIR for not having enough storage " + + "space on the temporary directory (mcp)"; + private static final String CUEBOT_COMMENT_USER = "cuebot"; @Before public void setTestMode() { @@ -66,7 +107,11 @@ public void setTestMode() { @Before public void createHost() { - hostManager.createHost(getRenderHost(), + hostname = UUID.randomUUID().toString().substring(0, 8); + hostname2 = UUID.randomUUID().toString().substring(0, 8); + hostManager.createHost(getRenderHost(hostname), + adminManager.findAllocationDetail("spi","general")); + hostManager.createHost(getRenderHost(hostname2), adminManager.findAllocationDetail("spi","general")); } @@ -79,25 +124,54 @@ private static CoreDetail getCoreDetail(int total, int idle, int booked, int loc .build(); } - private DispatchHost getHost() { - return hostManager.findDispatchHost(HOSTNAME); + private DispatchHost getHost(String hostname) { + return hostManager.findDispatchHost(hostname); + } + + private static RenderHost.Builder getRenderHostBuilder(String hostname) { + return RenderHost.newBuilder() + .setName(hostname) + .setBootTime(1192369572) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) + .setFreeMem(CueUtil.GB8) + .setFreeSwap(CueUtil.GB2) + .setLoad(0) + .setTotalMcp(CueUtil.GB4) + .setTotalMem(CueUtil.GB8) + .setTotalSwap(CueUtil.GB2) + .setNimbyEnabled(false) + .setNumProcs(16) + .setCoresPerProc(100) + .addTags("test") + .setState(HardwareState.UP) + .setFacility("spi") + .putAttributes("SP_OS", "Linux") + .setNumGpus(0) + .setFreeGpuMem(0) + .setTotalGpuMem(0); + } + + private static RenderHost getRenderHost(String hostname) { + return getRenderHostBuilder(hostname).build(); } - private static RenderHost getRenderHost() { + private static RenderHost getNewRenderHost(String tags) { return RenderHost.newBuilder() - .setName(HOSTNAME) + .setName(NEW_HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) - .setFreeMem(53500) - .setFreeSwap(20760) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) + .setFreeMem(CueUtil.GB8) + .setFreeSwap(CueUtil.GB2) .setLoad(0) .setTotalMcp(195430) - .setTotalMem(8173264) - .setTotalSwap(20960) + .setTotalMem(CueUtil.GB8) + .setTotalSwap(CueUtil.GB2) .setNimbyEnabled(false) .setNumProcs(2) .setCoresPerProc(100) - .addTags("test") + .addTags(tags) .setState(HardwareState.UP) .setFacility("spi") .putAttributes("SP_OS", "Linux") @@ -109,17 +183,439 @@ private static RenderHost getRenderHost() { @Test @Transactional @Rollback(true) - public void testHandleHostReport() { - boolean isBoot = false; + public void testHandleHostReport() throws InterruptedException { + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report1 = HostReport.newBuilder() + .setHost(getRenderHost(hostname)) + .setCoreInfo(cores) + .build(); + HostReport report2 = HostReport.newBuilder() + .setHost(getRenderHost(hostname2)) + .setCoreInfo(cores) + .build(); + HostReport report1_2 = HostReport.newBuilder() + .setHost(getRenderHost(hostname)) + .setCoreInfo(getCoreDetail(200, 200, 100, 0)) + .build(); + + hostReportHandler.handleHostReport(report1, false); + DispatchHost host = getHost(hostname); + assertEquals(LockState.OPEN, host.lockState); + assertEquals(HardwareState.UP, host.hardwareState); + hostReportHandler.handleHostReport(report1_2, false); + host = getHost(hostname); + assertEquals(HardwareState.UP, host.hardwareState); + + // Test Queue thread handling + ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + // Expecting results from a ThreadPool based class on JUnit is tricky + // A future test will be developed in the future to better address the behavior of + // this feature + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report2); // HOSTNAME2 + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1_2); // HOSTNAME + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithNewAllocation() { + FacilityInterface facility = adminManager.getFacility( + "AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0"); + assertEquals(facility.getName(), "spi"); + + AllocationEntity detail = new AllocationEntity(); + detail.name = "test"; + detail.tag = "test"; + adminManager.createAllocation(facility, detail); + detail = adminManager.findAllocationDetail("spi", "test"); + + boolean isBoot = true; CoreDetail cores = getCoreDetail(200, 200, 0, 0); HostReport report = HostReport.newBuilder() - .setHost(getRenderHost()) + .setHost(getNewRenderHost("test")) .setCoreInfo(cores) .build(); hostReportHandler.handleHostReport(report, isBoot); - DispatchHost host = getHost(); - assertEquals(host.lockState, LockState.OPEN); + DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); + assertEquals(host.getAllocationId(), detail.id); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithExistentAllocation() { + AllocationEntity alloc = adminManager.getAllocationDetail( + "00000000-0000-0000-0000-000000000006"); + assertEquals(alloc.getName(), "spi.general"); + + boolean isBoot = true; + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = HostReport.newBuilder() + .setHost(getNewRenderHost("general")) + .setCoreInfo(cores) + .build(); + + hostReportHandler.handleHostReport(report, isBoot); + DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); + assertEquals(host.getAllocationId(), alloc.id); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithNonExistentTags() { + AllocationEntity alloc = adminManager.getAllocationDetail( + "00000000-0000-0000-0000-000000000002"); + assertEquals(alloc.getName(), "lax.unassigned"); + + boolean isBoot = true; + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + HostReport report = HostReport.newBuilder() + .setHost(getNewRenderHost("nonexistent")) + .setCoreInfo(cores) + .build(); + + hostReportHandler.handleHostReport(report, isBoot); + DispatchHost host = hostManager.findDispatchHost(NEW_HOSTNAME); + assertEquals(host.getAllocationId(), alloc.id); + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithFullTemporaryDirectories() { + // Create CoreDetail + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + + /* + * Test 1: + * Precondition: + * - HardwareState=UP + * Action: + * - Receives a HostReport with freeTempDir < dispatcher.min_bookable_free_temp_dir_kb (opencue.properties) + * Postcondition: + * - Host hardwareState changes to REPAIR + * - A comment is created with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER + * */ + // Create HostReport + HostReport report1 = HostReport.newBuilder() + .setHost(getRenderHostBuilder(hostname).setFreeMcp(1024L).build()) + .setCoreInfo(cores) + .build(); + // Call handleHostReport() => Create the comment with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the + // host's hardwareState to REPAIR + hostReportHandler.handleHostReport(report1, false); + // Get host + DispatchHost host = getHost(hostname); + // Get list of comments by host, user, and subject + List comments = commentManager.getCommentsByHostUserAndSubject(host, CUEBOT_COMMENT_USER, + SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check if there is 1 comment + assertEquals(comments.size(), 1); + // Get host comment + CommentDetail comment = comments.get(0); + // Check if the comment has the user = CUEBOT_COMMENT_USER + assertEquals(comment.user, CUEBOT_COMMENT_USER); + // Check if the comment has the subject = SUBJECT_COMMENT_FULL_TEMP_DIR + assertEquals(comment.subject, SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check host lock state + assertEquals(LockState.OPEN, host.lockState); + // Check if host hardware state is REPAIR + assertEquals(HardwareState.REPAIR, host.hardwareState); + // Test Queue thread handling + ThreadPoolExecutor queue = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1); // HOSTNAME + + /* + * Test 2: + * Precondition: + * - HardwareState=REPAIR + * - There is a comment for the host with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER + * Action: + * - Receives a HostReport with freeTempDir >= dispatcher.min_bookable_free_temp_dir_kb (opencue.properties) + * Postcondition: + * - Host hardwareState changes to UP + * - Comment with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and user=CUEBOT_COMMENT_USER gets deleted + * */ + // Set the host freeTempDir to the minimum size required = 1GB (1048576 KB) + HostReport report2 = HostReport.newBuilder() + .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()) + .setCoreInfo(cores) + .build(); + // Call handleHostReport() => Delete the comment with subject=SUBJECT_COMMENT_FULL_TEMP_DIR and change the + // host's hardwareState to UP + hostReportHandler.handleHostReport(report2, false); + // Get host + host = getHost(hostname); + // Get list of comments by host, user, and subject + comments = commentManager.getCommentsByHostUserAndSubject(host, CUEBOT_COMMENT_USER, + SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check if there is no comment associated with the host + assertEquals(comments.size(), 0); + // Check host lock state + assertEquals(LockState.OPEN, host.lockState); + // Check if host hardware state is UP + assertEquals(HardwareState.UP, host.hardwareState); + // Test Queue thread handling + queue = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + hostReportHandler.queueHostReport(report1); // HOSTNAME + hostReportHandler.queueHostReport(report1); // HOSTNAME + } + + @Test + @Transactional + @Rollback(true) + public void testHandleHostReportWithHardwareStateRepairNotRelatedToFullTempDir() { + // Create CoreDetail + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + + /* + * Test if host.hardwareState == HardwareState.REPAIR + * (Not related to freeMcp < dispatcher.min_bookable_free_mcp_kb (opencue.properties)) + * + * - There is no comment with subject=SUBJECT_COMMENT_FULL_MCP_DIR and user=CUEBOT_COMMENT_USER associated with + * the host + * The host.hardwareState continue as HardwareState.REPAIR + * */ + // Create HostReport + HostReport report = HostReport.newBuilder() + .setHost(getRenderHostBuilder(hostname).setFreeMcp(CueUtil.GB).build()) + .setCoreInfo(cores) + .build(); + // Get host + DispatchHost host = getHost(hostname); + // Host's HardwareState set to REPAIR + hostManager.setHostState(host, HardwareState.REPAIR); + host.hardwareState = HardwareState.REPAIR; + // Get list of comments by host, user, and subject + List hostComments = commentManager.getCommentsByHostUserAndSubject(host, CUEBOT_COMMENT_USER, + SUBJECT_COMMENT_FULL_TEMP_DIR); + // Check if there is no comment + assertEquals(hostComments.size(), 0); + // There is no comment to delete + boolean commentsDeleted = commentManager.deleteCommentByHostUserAndSubject(host, + CUEBOT_COMMENT_USER, SUBJECT_COMMENT_FULL_TEMP_DIR); + assertFalse(commentsDeleted); + // Call handleHostReport() + hostReportHandler.handleHostReport(report, false); + // Check host lock state + assertEquals(LockState.OPEN, host.lockState); + // Check if host hardware state is REPAIR + assertEquals(HardwareState.REPAIR, host.hardwareState); + // Test Queue thread handling + ThreadPoolExecutor queueThread = hostReportHandler.getReportQueue(); + // Make sure jobs flow normally without any nullpointer exception + hostReportHandler.queueHostReport(report); // HOSTNAME + hostReportHandler.queueHostReport(report); // HOSTNAME + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAndLlu() { + jobLauncher.testMode = true; + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + CoreDetail cores = getCoreDetail(200, 200, 0, 0); + long now = System.currentTimeMillis(); + + RunningFrameInfo info = RunningFrameInfo.newBuilder() + .setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .setLluTime(now / 1000) + .setMaxRss(420000) + .build(); + HostReport report = HostReport.newBuilder() + .setHost(getRenderHost(hostname)) + .setCoreInfo(cores) + .addFrames(info) + .build(); + + hostReportHandler.handleHostReport(report, false); + + FrameDetail frame = jobManager.getFrameDetail(proc.getFrameId()); + assertEquals(frame.dateLLU, new Timestamp(now / 1000 * 1000)); + assertEquals(420000, frame.maxRss); + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAggressionRss() { + jobLauncher.testMode = true; + dispatcher.setTestMode(true); + + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + // 1.6 = 1 + dispatcher.oom_frame_overboard_allowed_threshold + long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * 1.6); + + // Test rss overboard + RunningFrameInfo info = RunningFrameInfo.newBuilder() + .setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .setRss(memoryOverboard) + .setMaxRss(memoryOverboard) + .build(); + HostReport report = HostReport.newBuilder() + .setHost(getRenderHost(hostname)) + .setCoreInfo(getCoreDetail(200, 200, 0, 0)) + .addFrames(info) + .build(); + + long killCount = DispatchSupport.killedOffenderProcs.get(); + hostReportHandler.handleHostReport(report, false); + assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAggressionMaxRss() { + jobLauncher.testMode = true; + dispatcher.setTestMode(true); + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_simple.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(1, procs.size()); + VirtualProc proc = procs.get(0); + + // 0.6 = dispatcher.oom_frame_overboard_allowed_threshold + long memoryOverboard = (long) Math.ceil((double) proc.memoryReserved * + (1.0 + (2 * 0.6))); + + // Test rss>90% and maxRss overboard + RunningFrameInfo info = RunningFrameInfo.newBuilder() + .setJobId(proc.getJobId()) + .setLayerId(proc.getLayerId()) + .setFrameId(proc.getFrameId()) + .setResourceId(proc.getProcId()) + .setRss((long)Math.ceil(0.95 * proc.memoryReserved)) + .setMaxRss(memoryOverboard) + .build(); + HostReport report = HostReport.newBuilder() + .setHost(getRenderHost(hostname)) + .setCoreInfo(getCoreDetail(200, 200, 0, 0)) + .addFrames(info) + .build(); + + long killCount = DispatchSupport.killedOffenderProcs.get(); + hostReportHandler.handleHostReport(report, false); + assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); + } + + @Test + @Transactional + @Rollback(true) + public void testMemoryAggressionMemoryWarning() { + jobLauncher.testMode = true; + dispatcher.setTestMode(true); + jobLauncher.launch(new File("src/test/resources/conf/jobspec/jobspec_multiple_frames.xml")); + + DispatchHost host = getHost(hostname); + List procs = dispatcher.dispatchHost(host); + assertEquals(3, procs.size()); + VirtualProc proc1 = procs.get(0); + VirtualProc proc2 = procs.get(1); + VirtualProc proc3 = procs.get(2); + + // Ok + RunningFrameInfo info1 = RunningFrameInfo.newBuilder() + .setJobId(proc1.getJobId()) + .setLayerId(proc1.getLayerId()) + .setFrameId(proc1.getFrameId()) + .setResourceId(proc1.getProcId()) + .setRss(CueUtil.GB2) + .setMaxRss(CueUtil.GB2) + .build(); + + // Overboard Rss + RunningFrameInfo info2 = RunningFrameInfo.newBuilder() + .setJobId(proc2.getJobId()) + .setLayerId(proc2.getLayerId()) + .setFrameId(proc2.getFrameId()) + .setResourceId(proc2.getProcId()) + .setRss(CueUtil.GB4) + .setMaxRss(CueUtil.GB4) + .build(); + + // Overboard Rss + long memoryUsedProc3 = CueUtil.GB8; + RunningFrameInfo info3 = RunningFrameInfo.newBuilder() + .setJobId(proc3.getJobId()) + .setLayerId(proc3.getLayerId()) + .setFrameId(proc3.getFrameId()) + .setResourceId(proc3.getProcId()) + .setRss(memoryUsedProc3) + .setMaxRss(memoryUsedProc3) + .build(); + + RenderHost hostAfterUpdate = getRenderHostBuilder(hostname).setFreeMem(0).build(); + + HostReport report = HostReport.newBuilder() + .setHost(hostAfterUpdate) + .setCoreInfo(getCoreDetail(200, 200, 0, 0)) + .addAllFrames(Arrays.asList(info1, info2, info3)) + .build(); + + // Get layer state before report gets sent + LayerDetail layerBeforeIncrease = jobManager.getLayerDetail(proc3.getLayerId()); + + // In this case, killing one job should be enough to ge the machine to a safe state + long killCount = DispatchSupport.killedOffenderProcs.get(); + hostReportHandler.handleHostReport(report, false); + assertEquals(killCount + 1, DispatchSupport.killedOffenderProcs.get()); + + // Confirm the frame will be set to retry after it's completion has been processed + + RunningFrameInfo runningFrame = RunningFrameInfo.newBuilder() + .setFrameId(proc3.getFrameId()) + .setFrameName("frame_name") + .setLayerId(proc3.getLayerId()) + .setRss(memoryUsedProc3) + .setMaxRss(memoryUsedProc3) + .setResourceId(proc3.id) + .build(); + FrameCompleteReport completeReport = FrameCompleteReport.newBuilder() + .setHost(hostAfterUpdate) + .setFrame(runningFrame) + .setExitSignal(9) + .setRunTime(1) + .setExitStatus(1) + .build(); + + frameCompleteHandler.handleFrameCompleteReport(completeReport); + FrameDetail killedFrame = jobManager.getFrameDetail(proc3.getFrameId()); + LayerDetail layer = jobManager.getLayerDetail(proc3.getLayerId()); + assertEquals(FrameState.WAITING, killedFrame.state); + // Memory increases are processed in two different places one will set the new value to proc.reserved + 2GB + // and the other will set to the maximum reported proc.maxRss the end value will be whoever is higher. + // In this case, proc.maxRss + assertEquals(Math.max(memoryUsedProc3, layerBeforeIncrease.getMinimumMemory() + CueUtil.GB2), + layer.getMinimumMemory()); } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java index 88f195111..a7218b47a 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/LocalDispatcherTests.java @@ -95,11 +95,12 @@ public void createHost() { RenderHost host = RenderHost.newBuilder() .setName(HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(0) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem(8173264) .setTotalSwap(20960) .setNimbyEnabled(false) @@ -109,8 +110,8 @@ public void createHost() { .setFacility("spi") .addTags("test") .putAttributes("SP_OS", "Linux") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); hostManager.createHost(host, @@ -171,7 +172,7 @@ public void testDispatchHostAutoDetectLayer() { JobDetail job = getJob(); LayerInterface layer = jobManager.getLayers(job).get(0); - LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 1); + LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); bookingManager.createLocalHostAssignment(host, layer, lba); List procs = localDispatcher.dispatchHost(host); @@ -205,7 +206,7 @@ public void testDispatchHostAutoDetectFrame() { LayerInterface layer = jobManager.getLayers(job).get(0); FrameInterface frame = jobManager.findFrame(layer, 5); - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 1); + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); bookingManager.createLocalHostAssignment(host, frame, lba); List procs = localDispatcher.dispatchHost(host); @@ -228,7 +229,7 @@ public void testDispatchHostToLocalJob() { DispatchHost host = getHost(); JobDetail job = getJob(); - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 1); + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); bookingManager.createLocalHostAssignment(host, job, lba); List procs = localDispatcher.dispatchHost(host, job); @@ -258,7 +259,7 @@ public void testDispatchHostToLocalLayer() { JobDetail job = getJob(); LayerInterface layer = jobManager.getLayers(job).get(0); - LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 1); + LocalHostAssignment lba = new LocalHostAssignment(300, 1, CueUtil.GB8, 0, 0); bookingManager.createLocalHostAssignment(host, layer, lba); List procs = localDispatcher.dispatchHost(host, layer); @@ -292,7 +293,7 @@ public void testDispatchHostToLocalFrame() { LayerInterface layer = jobManager.getLayers(job).get(0); FrameInterface frame = jobManager.findFrame(layer, 5); - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 1); + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); bookingManager.createLocalHostAssignment(host, frame, lba); List procs = localDispatcher.dispatchHost(host, frame); @@ -317,7 +318,7 @@ public void testDispatchHostToLocalFrameTwice() { LayerInterface layer = jobManager.getLayers(job).get(0); FrameInterface frame = jobManager.findFrame(layer, 5); - LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 1); + LocalHostAssignment lba = new LocalHostAssignment(200, 1, CueUtil.GB8, 0, 0); bookingManager.createLocalHostAssignment(host, frame, lba); List procs = localDispatcher.dispatchHost(host, frame); @@ -345,7 +346,7 @@ public void testDispatchHostToLocalJobDeficit() { DispatchHost host = getHost(); JobDetail job = getJob(); - LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 1); + LocalHostAssignment lba = new LocalHostAssignment(800, 8, CueUtil.GB8, 0, 0); bookingManager.createLocalHostAssignment(host, job, lba); List procs = localDispatcher.dispatchHost(host, job); @@ -365,7 +366,7 @@ public void testDispatchHostToLocalJobDeficit() { * Now, lower our min cores to create a deficit. */ assertFalse(bookingManager.hasResourceDeficit(host)); - bookingManager.setMaxResources(lba, 700, 0, 1); + bookingManager.setMaxResources(lba, 700, 0, 0, 0); assertTrue(bookingManager.hasResourceDeficit(host)); } } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java index 25ccf69c5..70e3db4af 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/RedirectManagerTests.java @@ -60,6 +60,7 @@ import com.imageworks.spcue.service.RedirectService; import com.imageworks.spcue.service.Whiteboard; import com.imageworks.spcue.util.Convert; +import com.imageworks.spcue.util.CueUtil; import static org.hamcrest.Matchers.emptyString; import static org.hamcrest.Matchers.is; @@ -137,11 +138,12 @@ public void createHost() { RenderHost host = RenderHost.newBuilder() .setName(HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem(8173264) .setTotalSwap(20960) .setNimbyEnabled(false) diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java index 4211c9866..7d02d44e8 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/StrandedCoreTests.java @@ -43,6 +43,7 @@ import com.imageworks.spcue.service.JobLauncher; import com.imageworks.spcue.service.JobManager; import com.imageworks.spcue.test.TransactionalTest; +import com.imageworks.spcue.util.CueUtil; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -98,11 +99,12 @@ public void createHost() { RenderHost host = RenderHost.newBuilder() .setName(HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem(8173264) .setTotalSwap(20960) .setNimbyEnabled(false) diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java index a34cc1d3e..7654570a0 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/dispatcher/TestBookingQueue.java @@ -54,6 +54,9 @@ public class TestBookingQueue extends AbstractTransactionalJUnit4SpringContextTe @Resource HostManager hostManager; + @Resource + BookingQueue bookingQueue; + private static final String HOSTNAME = "beta"; @Before @@ -61,11 +64,12 @@ public void create() { RenderHost host = RenderHost.newBuilder() .setName(HOSTNAME) .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem(8173264) .setTotalSwap(20960) .setNimbyEnabled(false) @@ -74,8 +78,8 @@ public void create() { .setState(HardwareState.UP) .setFacility("spi") .addAllTags(ImmutableList.of("mcore", "4core", "8g")) - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); hostManager.createHost(host); @@ -86,15 +90,21 @@ public void create() { @Rollback(true) public void testBookingQueue() { + int healthThreshold = 10; + int minUnhealthyPeriodMin = 3; + int queueCapacity = 2000; + int corePoolSize = 10; + int maxPoolSize = 14; + DispatchHost host1 = hostDao.findDispatchHost(HOSTNAME); host1.idleCores = 500; DispatchHost host2 = hostDao.findDispatchHost(HOSTNAME); DispatchHost host3 = hostDao.findDispatchHost(HOSTNAME); - BookingQueue queue = new BookingQueue(1000); - - queue.execute(new DispatchBookHost(host2,dispatcher)); - queue.execute(new DispatchBookHost(host3,dispatcher)); - queue.execute(new DispatchBookHost(host1,dispatcher)); + BookingQueue queue = new BookingQueue(healthThreshold, minUnhealthyPeriodMin, queueCapacity, + corePoolSize, maxPoolSize); + bookingQueue.execute(new DispatchBookHost(host2,dispatcher)); + bookingQueue.execute(new DispatchBookHost(host3,dispatcher)); + bookingQueue.execute(new DispatchBookHost(host1,dispatcher)); try { Thread.sleep(10000); } catch (InterruptedException e) { diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java b/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java new file mode 100644 index 000000000..3ac9bcc93 --- /dev/null +++ b/cuebot/src/test/java/com/imageworks/spcue/test/servant/FakeStreamObserver.java @@ -0,0 +1,37 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package com.imageworks.spcue.test.servant; + +import io.grpc.stub.StreamObserver; + +public class FakeStreamObserver implements StreamObserver { + + @Override + public void onNext(T value) { + } + + @Override + public void onError(Throwable t) { + } + + @Override + public void onCompleted() { + } +} + diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java new file mode 100644 index 000000000..ed2a83cd6 --- /dev/null +++ b/cuebot/src/test/java/com/imageworks/spcue/test/servant/ManageAllocationTests.java @@ -0,0 +1,154 @@ + +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package com.imageworks.spcue.test.servant; + +import javax.annotation.Resource; + +import io.grpc.stub.StreamObserver; + +import org.junit.Test; +import org.springframework.dao.EmptyResultDataAccessException; +import org.springframework.test.annotation.Rollback; +import org.springframework.test.context.ContextConfiguration; +import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; +import org.springframework.test.context.support.AnnotationConfigContextLoader; +import org.springframework.transaction.annotation.Transactional; + +import com.imageworks.spcue.AllocationEntity; +import com.imageworks.spcue.config.TestAppConfig; +import com.imageworks.spcue.dao.AllocationDao; +import com.imageworks.spcue.dao.FacilityDao; +import com.imageworks.spcue.grpc.facility.AllocCreateRequest; +import com.imageworks.spcue.grpc.facility.AllocCreateResponse; +import com.imageworks.spcue.grpc.facility.AllocDeleteRequest; +import com.imageworks.spcue.grpc.facility.AllocDeleteResponse; +import com.imageworks.spcue.grpc.facility.AllocSetDefaultRequest; +import com.imageworks.spcue.grpc.facility.AllocSetDefaultResponse; +import com.imageworks.spcue.grpc.facility.Allocation; +import com.imageworks.spcue.grpc.facility.Facility; +import com.imageworks.spcue.servant.ManageAllocation; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; + + +@Transactional +@ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) +public class ManageAllocationTests extends AbstractTransactionalJUnit4SpringContextTests { + + @Resource + AllocationDao allocationDao; + + @Resource + FacilityDao facilityDao; + + @Resource + ManageAllocation manageAllocation; + + @Test + @Transactional + @Rollback(true) + public void testCreate() { + Facility facility = Facility.newBuilder() + .setName(facilityDao.getFacility("spi").getName()) + .build(); + + // Use . name + AllocCreateRequest request = AllocCreateRequest.newBuilder() + .setName("spi.test_tag") + .setTag("test_tag") + .setFacility(facility) + .build(); + + FakeStreamObserver responseObserver = + new FakeStreamObserver(); + manageAllocation.create(request, responseObserver); + + allocationDao.findAllocationEntity("spi", "test_tag"); + } + + @Test + @Transactional + @Rollback(true) + public void testDelete() { + Facility facility = Facility.newBuilder() + .setName(facilityDao.getFacility("spi").getName()) + .build(); + + // Non . name should work too. + AllocCreateRequest createRequest = AllocCreateRequest.newBuilder() + .setName("test_tag") + .setTag("test_tag") + .setFacility(facility) + .build(); + + FakeStreamObserver createResponseObserver = + new FakeStreamObserver(); + manageAllocation.create(createRequest, createResponseObserver); + + Allocation allocation = Allocation.newBuilder() + .setName("spi.test_tag") + .setTag("test_tag") + .setFacility("spi") + .build(); + + AllocDeleteRequest deleteRequest = AllocDeleteRequest.newBuilder() + .setAllocation(allocation) + .build(); + + FakeStreamObserver deleteResponseObserver = + new FakeStreamObserver(); + + manageAllocation.delete(deleteRequest, deleteResponseObserver); + + try { + allocationDao.findAllocationEntity("spi", "test_tag"); + fail("Expected exception"); + } catch (EmptyResultDataAccessException e) { + assertEquals(e.getMessage(), + "Incorrect result size: expected 1, actual 0"); + } + } + + @Test + @Transactional + @Rollback(true) + public void testSetDefault() { + AllocationEntity alloc = allocationDao.getDefaultAllocationEntity(); + assertEquals(alloc.getName(), "lax.unassigned"); + + Allocation allocation = Allocation.newBuilder() + .setName("spi.general") + .setTag("general") + .setFacility("spi") + .build(); + AllocSetDefaultRequest request = AllocSetDefaultRequest.newBuilder() + .setAllocation(allocation) + .build(); + + FakeStreamObserver observer = + new FakeStreamObserver(); + manageAllocation.setDefault(request, observer); + + alloc = allocationDao.getDefaultAllocationEntity(); + assertEquals(alloc.getName(), "spi.general"); + } +} + + diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java index 8f1d24ffe..c4538f79f 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/AdminManagerTests.java @@ -65,6 +65,34 @@ public void createAllocation() { adminManager.createAllocation(facilityDao.getDefaultFacility(), a); } + @Test + @Transactional + @Rollback(true) + public void deleteAllocation() { + AllocationEntity a = new AllocationEntity(); + a.name = facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME; + a.tag = "general"; + adminManager.createAllocation(facilityDao.getDefaultFacility(), a); + adminManager.deleteAllocation(a); + } + + @Test + @Transactional + @Rollback(true) + public void setDefaultAllocation() { + AllocationEntity a = adminManager.getDefaultAllocation(); + assertEquals(a.name, facilityDao.getDefaultFacility().getName() + ".unassigned"); + + a = new AllocationEntity(); + a.name = TEST_ALLOC_NAME; + a.tag = "general"; + adminManager.createAllocation(facilityDao.getDefaultFacility(), a); + adminManager.setDefaultAllocation(a); + + a = adminManager.getDefaultAllocation(); + assertEquals(a.name, facilityDao.getDefaultFacility().getName() + "." + TEST_ALLOC_NAME); + } + @Test @Transactional @Rollback(true) diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java index e94705898..1e894eb1c 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/BookingManagerTests.java @@ -112,11 +112,12 @@ public DispatchHost createHost() { RenderHost host = RenderHost.newBuilder() .setName("test_host") .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB16) .setTotalSwap((int) CueUtil.GB16) .setNimbyEnabled(false) @@ -125,8 +126,8 @@ public DispatchHost createHost() { .setState(HardwareState.UP) .setFacility("spi") .addTags("general") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); DispatchHost dh = hostManager.createHost(host); @@ -319,23 +320,23 @@ public void setMaxResources() { /* * Lower the cores. */ - bookingManager.setMaxResources(lja, 100, CueUtil.GB2, CueUtil.MB256); + bookingManager.setMaxResources(lja, 100, CueUtil.GB2, 1, CueUtil.MB256); LocalHostAssignment l2 = bookingManager.getLocalHostAssignment(lja.id); assertEquals(100, l2.getMaxCoreUnits()); assertEquals(CueUtil.GB2, l2.getMaxMemory()); - assertEquals(CueUtil.MB256, l2.getMaxGpu()); + assertEquals(CueUtil.MB256, l2.getMaxGpuMemory()); /* * Raise the values. */ - bookingManager.setMaxResources(lja, 200, CueUtil.GB4, CueUtil.MB512); + bookingManager.setMaxResources(lja, 200, CueUtil.GB4, 1, CueUtil.MB512); l2 = bookingManager.getLocalHostAssignment(lja.id); assertEquals(200, l2.getMaxCoreUnits()); assertEquals(CueUtil.GB4, l2.getMaxMemory()); - assertEquals(CueUtil.MB512, l2.getMaxGpu()); + assertEquals(CueUtil.MB512, l2.getMaxGpuMemory()); } @Test @@ -351,7 +352,7 @@ public void setIllegalMaxResources() { LocalHostAssignment lja = new LocalHostAssignment(); lja.setMaxCoreUnits(200); lja.setMaxMemory(CueUtil.GB4); - lja.setMaxGpu(CueUtil.MB512); + lja.setMaxGpuMemory(CueUtil.MB512); lja.setThreads(2); bookingManager.createLocalHostAssignment(h, j, lja); @@ -359,7 +360,7 @@ public void setIllegalMaxResources() { /* * Raise the cores too high */ - bookingManager.setMaxResources(lja, 800, CueUtil.GB2, 0); + bookingManager.setMaxResources(lja, 800, CueUtil.GB2, 0, 0); } @Test diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java index b9bd5b5ac..4dd2bd13f 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/DepartmentManagerTests.java @@ -38,7 +38,6 @@ import com.imageworks.spcue.dao.ShowDao; import com.imageworks.spcue.service.AdminManager; import com.imageworks.spcue.service.DepartmentManager; -import com.imageworks.spcue.test.AssumingTrackitEnabled; import static org.junit.Assert.assertTrue; @@ -47,10 +46,6 @@ @ContextConfiguration(classes=TestAppConfig.class, loader=AnnotationConfigContextLoader.class) public class DepartmentManagerTests extends AbstractTransactionalJUnit4SpringContextTests { - @Autowired - @Rule - public AssumingTrackitEnabled assumingTrackitEnabled; - @Resource DepartmentManager departmentManager; @@ -78,13 +73,6 @@ public void enableTiManaged() { departmentManager.disableTiManaged(rp); departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); - - // TODO(bcipriano) Once this test is enabled this assert should be updated to use - // DAO objects instead of querying the db directly. - assertTrue(0 < jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task,point WHERE point.pk_point = task.pk_point AND " + - "point.pk_dept=? AND point.pk_show=?", - Integer.class, dept.getDepartmentId(), show.getShowId())); } @Test @@ -105,21 +93,8 @@ public void updateTiManagedTasks() { departmentManager.disableTiManaged(rp); departmentManager.enableTiManaged(rp, TEST_TI_TASK_NAME, 1000); - // TODO(bcipriano) Once this test is enabled these asserts should be updated to use - // DAO objects instead of querying the db directly. - - assertTrue(0 < jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task,point WHERE point.pk_point = task.pk_point AND " + - "point.pk_dept=? AND point.pk_show=?", - Integer.class, dept.getDepartmentId(), show.getShowId())); - departmentManager.updateManagedTasks(rp); - assertTrue(0 < jdbcTemplate.queryForObject( - "SELECT COUNT(*) FROM task,point WHERE point.pk_point = task.pk_point AND " + - "point.pk_dept=? AND point.pk_show=?", - Integer.class, dept.getDepartmentId(), show.getShowId())); - departmentManager.disableTiManaged(rp); } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java index 4bce07507..13da1c7de 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/FilterManagerTests.java @@ -359,7 +359,7 @@ public void testApplyActionSetRenderCoreLayers() { filterDao.insertFilter(f); ActionEntity a1 = new ActionEntity(); - a1.type = ActionType.SET_ALL_RENDER_LAYER_CORES; + a1.type = ActionType.SET_ALL_RENDER_LAYER_MIN_CORES; a1.filterId = f.getFilterId(); a1.valueType = ActionValueType.FLOAT_TYPE; a1.floatValue = 40f; diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java index ed89219da..29970441d 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/HostManagerTests.java @@ -101,11 +101,12 @@ public DispatchHost createHost() { RenderHost host = RenderHost.newBuilder() .setName(HOST_NAME) .setBootTime(1192369572) - .setFreeMcp(7602) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(15290520) .setFreeSwap(2076) .setLoad(1) - .setTotalMcp(19543) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB16) .setTotalSwap(2076) .setNimbyEnabled(true) @@ -114,8 +115,8 @@ public DispatchHost createHost() { .setState(HardwareState.UP) .setFacility("spi") .addAllTags(ImmutableList.of("linux", "64bit")) - .putAttributes("freeGpu", "512") - .putAttributes("totalGpu", "512") + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); hostDao.insertRenderHost(host, diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java index b2446fe20..2ea9b5dde 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobManagerTests.java @@ -129,11 +129,12 @@ public DispatchHost createHost() { RenderHost host = RenderHost.newBuilder() .setName("test_host") .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB16) .setTotalSwap((int) CueUtil.GB16) .setNimbyEnabled(false) @@ -463,7 +464,7 @@ public void optimizeLayer() { .stream() .limit(5) .forEach(frame -> frameDao.updateFrameState(frame, FrameState.SUCCEEDED)); - layerDao.updateUsage(layer, new ResourceUsage(100, 3500 * 5), 0); + layerDao.updateUsage(layer, new ResourceUsage(100, 3500 * 5, 0), 0); // Test to make sure our optimization jobManager.optimizeLayer(layer, 100, CueUtil.MB512, 120); diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java index e68daa551..119be6160 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/JobSpecTests.java @@ -29,6 +29,8 @@ import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests; import org.springframework.test.context.support.AnnotationConfigContextLoader; +import com.imageworks.spcue.BuildableJob; +import com.imageworks.spcue.LayerDetail; import com.imageworks.spcue.SpecBuilderException; import com.imageworks.spcue.config.TestAppConfig; import com.imageworks.spcue.service.JobLauncher; @@ -95,4 +97,35 @@ public void testParseInvalidShot() { "Shot names must be alpha numeric, no dashes or punctuation."); } } + + @Test + public void testParseGpuSuccess() { + String xml = readJobSpec("jobspec_1_12.xml"); + JobSpec spec = jobLauncher.parse(xml); + assertEquals(spec.getDoc().getDocType().getPublicID(), + "SPI Cue Specification Language"); + assertEquals(spec.getDoc().getDocType().getSystemID(), + "http://localhost:8080/spcue/dtd/cjsl-1.12.dtd"); + assertEquals(spec.getJobs().size(), 1); + BuildableJob job = spec.getJobs().get(0); + assertEquals(job.detail.name, "testing-default-testuser_test"); + LayerDetail layer = job.getBuildableLayers().get(0).layerDetail; + assertEquals(layer.getMinimumGpus(), 1); + assertEquals(layer.getMinimumGpuMemory(), 1048576); + } + + @Test + public void testParseMaxCoresAndMaxGpus() { + String xml = readJobSpec("jobspec_1_13.xml"); + JobSpec spec = jobLauncher.parse(xml); + assertEquals(spec.getDoc().getDocType().getPublicID(), + "SPI Cue Specification Language"); + assertEquals(spec.getDoc().getDocType().getSystemID(), + "http://localhost:8080/spcue/dtd/cjsl-1.13.dtd"); + assertEquals(spec.getJobs().size(), 1); + BuildableJob job = spec.getJobs().get(0); + assertEquals(job.maxCoresOverride, Integer.valueOf(420)); + assertEquals(job.maxGpusOverride, Integer.valueOf(42)); + } + } diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java index 38d178444..51dcafec4 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/OwnerManagerTests.java @@ -69,11 +69,12 @@ public DispatchHost createHost() { RenderHost host = RenderHost.newBuilder() .setName("test_host") .setBootTime(1192369572) - .setFreeMcp(76020) + // The minimum amount of free space in the temporary directory to book a host. + .setFreeMcp(CueUtil.GB) .setFreeMem(53500) .setFreeSwap(20760) .setLoad(1) - .setTotalMcp(195430) + .setTotalMcp(CueUtil.GB4) .setTotalMem((int) CueUtil.GB16) .setTotalSwap((int) CueUtil.GB16) .setNimbyEnabled(true) @@ -82,8 +83,8 @@ public DispatchHost createHost() { .setState(HardwareState.UP) .setFacility("spi") .addTags("general") - .putAttributes("freeGpu", String.format("%d", CueUtil.MB512)) - .putAttributes("totalGpu", String.format("%d", CueUtil.MB512)) + .setFreeGpuMem((int) CueUtil.MB512) + .setTotalGpuMem((int) CueUtil.MB512) .build(); DispatchHost dh = hostManager.createHost(host); @@ -175,41 +176,6 @@ public void testGetDeed() { assertEquals(d, ownerManager.getDeed(d.id)); } - @Test - @Transactional - @Rollback(true) - public void testSetBlackoutTimes() { - OwnerEntity o = ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - DispatchHost host = createHost(); - DeedEntity d = ownerManager.takeOwnership(o, host); - - ownerManager.setBlackoutTime(d, 0, 3600); - - assertEquals(0, deedDao.getDeed(d.id).blackoutStart); - assertEquals(3600, deedDao.getDeed(d.id).blackoutStop); - } - - @Test - @Transactional - @Rollback(true) - public void testEnableDisableBlackout() { - OwnerEntity o = ownerManager.createOwner("spongebob", - adminManager.findShowEntity("pipe")); - - DispatchHost host = createHost(); - DeedEntity d = ownerManager.takeOwnership(o, host); - - ownerManager.setBlackoutTimeEnabled(d, true); - - assertTrue(deedDao.getDeed(d.id).isBlackoutEnabled); - - ownerManager.setBlackoutTimeEnabled(d, false); - - assertFalse(deedDao.getDeed(d.id).isBlackoutEnabled); - } - @Test @Transactional @Rollback(true) diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java index 3573cbe59..5354d763e 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/service/ServiceManagerTests.java @@ -84,7 +84,7 @@ public void testCreateService() { s.name = "dillweed"; s.minCores = 100; s.minMemory = CueUtil.GB4; - s.minGpu = CueUtil.GB2; + s.minGpuMemory = CueUtil.GB2; s.threadable = false; s.timeout = 0; s.timeout_llu = 0; @@ -105,7 +105,7 @@ public void testOverrideExistingService() { s.timeout = 10; s.timeout_llu = 10; s.minMemory = CueUtil.GB8; - s.minGpu = CueUtil.GB2; + s.minGpuMemory = CueUtil.GB2; s.threadable = false; s.tags.addAll(Sets.newHashSet("general")); s.showId = "00000000-0000-0000-0000-000000000000"; @@ -118,7 +118,7 @@ public void testOverrideExistingService() { assertEquals(10, newService.timeout); assertEquals(10, newService.timeout_llu); assertEquals(CueUtil.GB8, newService.minMemory); - assertEquals(CueUtil.GB2, newService.minGpu); + assertEquals(CueUtil.GB2, newService.minGpuMemory); assertFalse(newService.threadable); assertTrue(s.tags.contains("general")); @@ -127,7 +127,7 @@ public void testOverrideExistingService() { // now check the original is back. newService = serviceManager.getService("arnold", s.showId); assertEquals(100, newService.minCores); - assertEquals(0, newService.minGpu); + assertEquals(0, newService.minGpuMemory); } @Test @@ -151,7 +151,7 @@ public void testJobLaunch() { assertEquals(shell.minCores, shellLayer.minimumCores); assertEquals(shell.minMemory, shellLayer.minimumMemory); - assertEquals(shell.minGpu, shellLayer.minimumGpu); + assertEquals(shell.minGpuMemory, shellLayer.minimumGpuMemory); assertFalse(shellLayer.isThreadable); assertEquals(shell.tags, shellLayer.tags); assertThat(shellLayer.services, contains("shell", "katana", "unknown")); @@ -164,7 +164,7 @@ public void testJobLaunch() { assertEquals(cuda.minCores, cudaLayer.minimumCores); assertEquals(cuda.minMemory, cudaLayer.minimumMemory); - assertEquals(cuda.minGpu, cudaLayer.minimumGpu); + assertEquals(cuda.minGpuMemory, cudaLayer.minimumGpuMemory); assertFalse(cudaLayer.isThreadable); assertEquals(cuda.tags, cudaLayer.tags); assertThat(cudaLayer.services, contains("cuda")); diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java index 9bfc19e41..d3a4abe76 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/CueUtilTester.java @@ -154,9 +154,9 @@ public void testCoreUnitsToCoresWithScale() { @Test public void testBuildProcName() { - assertEquals("drack100/1.00", CueUtil.buildProcName("drack100",100)); - assertEquals("drack100/1.40", CueUtil.buildProcName("drack100",140)); - assertEquals("drack100/2.01", CueUtil.buildProcName("drack100",201)); + assertEquals("drack100/1.00/1", CueUtil.buildProcName("drack100",100,1)); + assertEquals("drack100/1.40/0", CueUtil.buildProcName("drack100",140,0)); + assertEquals("drack100/2.01/2", CueUtil.buildProcName("drack100",201,2)); } @Test diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java index a49bf88c3..aa9cfcc07 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameRangeTests.java @@ -103,14 +103,14 @@ public void testNegativeInvertedStep() { public void testInterleave() { FrameRange result = new FrameRange("1-10:5"); - assertThat(result.getAll()).containsExactly(1, 6, 2, 4, 8, 10, 3, 5, 7, 9); + assertThat(result.getAll()).containsExactly(1, 6, 3, 5, 7, 9, 2, 4, 8, 10); } @Test public void testNegativeInterleave() { FrameRange result = new FrameRange("10-1:-5"); - assertThat(result.getAll()).containsExactly(10, 5, 9, 7, 3, 1, 8, 6, 4, 2); + assertThat(result.getAll()).containsExactly(10, 5, 8, 6, 4, 2, 9, 7, 3, 1); } @Test diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java index 223813f46..404d53b0f 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/FrameSetTests.java @@ -12,7 +12,7 @@ public void shouldSplitListAndMaintainOrder() { FrameSet result = new FrameSet("57,1-3,4-2,12-15x2,76-70x-3,5-12y3,1-7:5"); assertThat(result.getAll()).containsExactly( - 57, 1, 2, 3, 4, 3, 2, 12, 14, 76, 73, 70, 6, 7, 9, 10, 12, 1, 6, 2, 4, 3, 5, 7); + 57, 1, 2, 3, 4, 3, 2, 12, 14, 76, 73, 70, 6, 7, 9, 10, 12, 1, 6, 3, 5, 7, 2, 4); } @Test diff --git a/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java b/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java index bbb550ec2..417924b9b 100644 --- a/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java +++ b/cuebot/src/test/java/com/imageworks/spcue/test/util/JobLogUtilTests.java @@ -36,32 +36,55 @@ public class JobLogUtilTests extends AbstractTransactionalJUnit4SpringContextTes @Resource private JobLogUtil jobLogUtil; - private String logRoot; + private String logRootDefault; + private String logRootSomeOs; @Before public void setUp() { - // This value should match what's defined in test/resources/opencue.properties. - logRoot = "/arbitraryLogDirectory"; + // The values should match what's defined in test/resources/opencue.properties. + logRootDefault = "/arbitraryLogDirectory"; + logRootSomeOs = "/arbitrarySomeOsLogDirectory"; } @Test - public void testGetJobLogRootDir() { - assertEquals(logRoot, jobLogUtil.getJobLogRootDir()); + public void testGetJobLogRootDirDefault() { + assertEquals(logRootDefault, jobLogUtil.getJobLogRootDir("someUndefinedOs")); } @Test - public void testGetJobLogDir() { - assertEquals(logRoot + "/show/shot/logs", jobLogUtil.getJobLogDir("show", "shot")); + public void testGetJobLogRootSomeOs() { + assertEquals(logRootSomeOs, jobLogUtil.getJobLogRootDir("some_os")); } @Test - public void testGetJobLogPath() { + public void testGetJobLogDirDefault() { + assertEquals(logRootDefault + "/show/shot/logs", jobLogUtil.getJobLogDir("show", "shot", "someUndefinedOs")); + } + + @Test + public void testGetJobLogDirSomeOs() { + assertEquals(logRootSomeOs + "/show/shot/logs", jobLogUtil.getJobLogDir("show", "shot", "some_os")); + } + + @Test + public void testGetJobLogPathDefault() { JobDetail jobDetail = new JobDetail(); jobDetail.id = "id"; jobDetail.name = "name"; jobDetail.showName = "show"; jobDetail.shot = "shot"; - assertEquals(logRoot + "/show/shot/logs/name--id", jobLogUtil.getJobLogPath(jobDetail)); + jobDetail.os = "someUndefinedOs"; + assertEquals(logRootDefault + "/show/shot/logs/name--id", jobLogUtil.getJobLogPath(jobDetail)); } -} + @Test + public void testGetJobLogPathSomeOs() { + JobDetail jobDetail = new JobDetail(); + jobDetail.id = "id"; + jobDetail.name = "name"; + jobDetail.showName = "show"; + jobDetail.shot = "shot"; + jobDetail.os = "some_os"; + assertEquals(logRootSomeOs + "/show/shot/logs/name--id", jobLogUtil.getJobLogPath(jobDetail)); + } +} diff --git a/cuebot/src/test/resources/conf/ddl/oracle/test_data.sql b/cuebot/src/test/resources/conf/ddl/oracle/test_data.sql deleted file mode 100644 index bdc0e4c8f..000000000 --- a/cuebot/src/test/resources/conf/ddl/oracle/test_data.sql +++ /dev/null @@ -1,118 +0,0 @@ -Insert into SHOW (PK_SHOW,STR_NAME,INT_DEFAULT_MAX_CORES,INT_DEFAULT_MIN_CORES,INT_FRAME_INSERT_COUNT,INT_JOB_INSERT_COUNT,INT_FRAME_SUCCESS_COUNT,INT_FRAME_FAIL_COUNT,B_BOOKING_ENABLED,B_DISPATCH_ENABLED,B_ACTIVE) values ('00000000-0000-0000-0000-000000000000','pipe',20000,100,0,0,0,0,1,1,1) --- SPLIT HERE! -Insert into SHOW (PK_SHOW,STR_NAME,INT_DEFAULT_MAX_CORES,INT_DEFAULT_MIN_CORES,INT_FRAME_INSERT_COUNT,INT_JOB_INSERT_COUNT,INT_FRAME_SUCCESS_COUNT,INT_FRAME_FAIL_COUNT,B_BOOKING_ENABLED,B_DISPATCH_ENABLED,B_ACTIVE) values ('00000000-0000-0000-0000-000000000001','edu',20000,100,0,0,0,0,1,1,1) --- SPLIT HERE! - -Insert into SHOW_ALIAS (PK_SHOW_ALIAS,PK_SHOW,STR_NAME) values ('00000000-0000-0000-0000-000000000001','00000000-0000-0000-0000-000000000000','fx') --- SPLIT HERE! - -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0','Lighting',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1','Animation',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA2','Hair',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA3','Cloth',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA4','Layout',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA5','FX',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA6','Pipeline',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA7','S3D',0) --- SPLIT HERE! -Insert into DEPT (PK_DEPT,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA8','Unknown',1) --- SPLIT HERE! - -Insert into FACILITY (PK_FACILITY,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1','lax',1) --- SPLIT HERE! -Insert into FACILITY (PK_FACILITY,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0','spi',0) --- SPLIT HERE! -Insert into FACILITY (PK_FACILITY,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA2','maa',0) --- SPLIT HERE! -Insert into FACILITY (PK_FACILITY,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA3','abq',0) --- SPLIT HERE! -Insert into FACILITY (PK_FACILITY,STR_NAME,B_DEFAULT) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA4','brs',0) --- SPLIT HERE! - -Insert into FOLDER (PK_FOLDER,PK_PARENT_FOLDER,PK_SHOW,STR_NAME,B_DEFAULT,PK_DEPT,INT_JOB_MIN_CORES,INT_JOB_MAX_CORES,INT_JOB_PRIORITY,F_ORDER,B_EXCLUDE_MANAGED) values ('A0000000-0000-0000-0000-000000000000',null,'00000000-0000-0000-0000-000000000000','pipe',1,'AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA8',-1,-1,-1,1,0) --- SPLIT HERE! -Insert into FOLDER (PK_FOLDER,PK_PARENT_FOLDER,PK_SHOW,STR_NAME,B_DEFAULT,PK_DEPT,INT_JOB_MIN_CORES,INT_JOB_MAX_CORES,INT_JOB_PRIORITY,F_ORDER,B_EXCLUDE_MANAGED) values ('B0000000-0000-0000-0000-000000000000',null,'00000000-0000-0000-0000-000000000001','edu',1,'AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA8',-1,-1,-1,1,0) --- SPLIT HERE! - -Insert into POINT (PK_POINT,PK_DEPT,PK_SHOW,STR_TI_TASK,INT_CORES,B_MANAGED,INT_MIN_CORES,FLOAT_TIER) values ('FFEEDDCC-AAAA-AAAA-AAAA-AAAAAAAAAAA0','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA8','00000000-0000-0000-0000-000000000000',null,0,0,0,0) --- SPLIT HERE! -Insert into POINT (PK_POINT,PK_DEPT,PK_SHOW,STR_TI_TASK,INT_CORES,B_MANAGED,INT_MIN_CORES,FLOAT_TIER) values ('FFEEDDCC-AAAA-AAAA-AAAA-AAAAAAAAAAA1','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA8','00000000-0000-0000-0000-000000000001',null,0,0,0,0) --- SPLIT HERE! - -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000000','lax.general',0,0,'general','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000001','lax.desktop',0,0,'desktop','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000002','lax.unassigned',0,1,'unassigned','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000003','maa.general',0,0,'general','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA2',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000004','maa.desktop',0,0,'desktop','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA2',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000005','maa.unassigned',0,0,'unassigned','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA2',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000006','spi.general',1,0,'general','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0',0,1) --- SPLIT HERE! -Insert into ALLOC (PK_ALLOC,STR_NAME,B_ALLOW_EDIT,B_DEFAULT,STR_TAG,PK_FACILITY,B_BILLABLE,B_ENABLED) values ('00000000-0000-0000-0000-000000000007','spi.desktop',1,0,'desktop','AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0',0,1) --- SPLIT HERE! - -Insert into SUBSCRIPTION (PK_SUBSCRIPTION,PK_ALLOC,PK_SHOW,INT_SIZE,INT_BURST,INT_CORES,FLOAT_TIER) values ('00000000-0000-0000-0000-000000000001','00000000-0000-0000-0000-000000000000','00000000-0000-0000-0000-000000000000',1000,1000,0,0) --- SPLIT HERE! -Insert into SUBSCRIPTION (PK_SUBSCRIPTION,PK_ALLOC,PK_SHOW,INT_SIZE,INT_BURST,INT_CORES,FLOAT_TIER) values ('00000000-0000-0000-0000-000000000002','00000000-0000-0000-0000-000000000001','00000000-0000-0000-0000-000000000000',1000,1000,0,0) --- SPLIT HERE! -Insert into SUBSCRIPTION (PK_SUBSCRIPTION,PK_ALLOC,PK_SHOW,INT_SIZE,INT_BURST,INT_CORES,FLOAT_TIER) values ('00000000-0000-0000-0000-000000000003','00000000-0000-0000-0000-000000000007','00000000-0000-0000-0000-000000000000',1000,1000,0,0) --- SPLIT HERE! -Insert into SUBSCRIPTION (PK_SUBSCRIPTION,PK_ALLOC,PK_SHOW,INT_SIZE,INT_BURST,INT_CORES,FLOAT_TIER) values ('00000000-0000-0000-0000-000000000004','00000000-0000-0000-0000-000000000006','00000000-0000-0000-0000-000000000000',1000,1000,0,0) --- SPLIT HERE! - -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA0','default',0,100,3355443,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA1','prman',0,100,3355443,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA2','arnold',1,100,3355443,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA3','shell',0,100,3355443,'general | util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA4','maya',0,100,2097152,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA5','houdini',0,100,3355443,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA6','svea',1,100,3355443,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA7','katana',1,100,2097152,'general | desktop | util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA8','shake',0,100,2097152,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAA9','nuke',0,100,2097152,'general | desktop') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAA10','ginsu',0,50,524288,'general | desktop | util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAA11','preprocess',0,10,393216,'util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAA12','postprocess',0,10,524288,'util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAA14','refcollect',0,50,1048576,'general | util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAA15','makemovie',0,50,1048576,'util') --- SPLIT HERE! -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS,INT_CORES_MAX,INT_GPU_MIN) values ('488c75f0-eae4-4dd0-83e0-29b982adbbff','cuda',1,100,3354624,'cuda',0,262144) --- SPLIT HERE! - --- SPLIT HERE! -Insert into CONFIG (PK_CONFIG,STR_KEY,INT_VALUE,LONG_VALUE,STR_VALUE,B_VALUE) values ('00000000-0000-0000-0000-000000000005','MAX_FRAME_RETRIES',16,0,null,0) - --- SPLIT HERE! -Insert into TASK_LOCK (PK_TASK_LOCK,STR_NAME,INT_LOCK,INT_TIMEOUT) values ('00000000-0000-0000-0000-000000000002','LOCK_HARDWARE_STATE_CHECK',0,30) --- SPLIT HERE! -Insert into TASK_LOCK (PK_TASK_LOCK,STR_NAME,INT_LOCK,INT_TIMEOUT) values ('00000000-0000-0000-0000-000000000001','LOCK_HISTORICAL_TRANSFER',0,3600) --- SPLIT HERE! -Insert into TASK_LOCK (PK_TASK_LOCK,STR_NAME,INT_LOCK,INT_TIMEOUT) values ('00000000-0000-0000-0000-000000000003','LOCK_ORPHANED_PROC_CHECK',0,30) --- SPLIT HERE! -Insert into TASK_LOCK (PK_TASK_LOCK,STR_NAME,INT_LOCK,INT_TIMEOUT) values ('00000000-0000-0000-0000-000000000005','LOCK_TASK_UPDATE',1240618998852,3600) diff --git a/cuebot/src/test/resources/conf/ddl/postgres/test_data.sql b/cuebot/src/test/resources/conf/ddl/postgres/test_data.sql index b5596b91d..4f9c2a0a0 100644 --- a/cuebot/src/test/resources/conf/ddl/postgres/test_data.sql +++ b/cuebot/src/test/resources/conf/ddl/postgres/test_data.sql @@ -1,6 +1,10 @@ -Insert into SHOW (PK_SHOW,STR_NAME,INT_DEFAULT_MAX_CORES,INT_DEFAULT_MIN_CORES,INT_FRAME_INSERT_COUNT,INT_JOB_INSERT_COUNT,INT_FRAME_SUCCESS_COUNT,INT_FRAME_FAIL_COUNT,B_BOOKING_ENABLED,B_DISPATCH_ENABLED,B_ACTIVE) values ('00000000-0000-0000-0000-000000000000','pipe',20000,100,0,0,0,0,true,true,true) +Insert into SHOW (PK_SHOW,STR_NAME,INT_DEFAULT_MAX_CORES,INT_DEFAULT_MIN_CORES,B_BOOKING_ENABLED,B_DISPATCH_ENABLED,B_ACTIVE) values ('00000000-0000-0000-0000-000000000000','pipe',20000,100,true,true,true) -Insert into SHOW (PK_SHOW,STR_NAME,INT_DEFAULT_MAX_CORES,INT_DEFAULT_MIN_CORES,INT_FRAME_INSERT_COUNT,INT_JOB_INSERT_COUNT,INT_FRAME_SUCCESS_COUNT,INT_FRAME_FAIL_COUNT,B_BOOKING_ENABLED,B_DISPATCH_ENABLED,B_ACTIVE) values ('00000000-0000-0000-0000-000000000001','edu',20000,100,0,0,0,0,true,true,true) +Insert into SHOW (PK_SHOW,STR_NAME,INT_DEFAULT_MAX_CORES,INT_DEFAULT_MIN_CORES,B_BOOKING_ENABLED,B_DISPATCH_ENABLED,B_ACTIVE) values ('00000000-0000-0000-0000-000000000001','edu',20000,100,true,true,true) + +Insert into SHOW_STATS (PK_SHOW,INT_FRAME_INSERT_COUNT,INT_JOB_INSERT_COUNT,INT_FRAME_SUCCESS_COUNT,INT_FRAME_FAIL_COUNT) values ('00000000-0000-0000-0000-000000000000',0,0,0,0) + +Insert into SHOW_STATS (PK_SHOW,INT_FRAME_INSERT_COUNT,INT_JOB_INSERT_COUNT,INT_FRAME_SUCCESS_COUNT,INT_FRAME_FAIL_COUNT) values ('00000000-0000-0000-0000-000000000001',0,0,0,0) Insert into SHOW_ALIAS (PK_SHOW_ALIAS,PK_SHOW,STR_NAME) values ('00000000-0000-0000-0000-000000000001','00000000-0000-0000-0000-000000000000','fx') @@ -102,7 +106,9 @@ Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN, Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS) values ('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAA15','makemovie',false,50,1048576,'util') -Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS,INT_CORES_MAX,INT_GPU_MIN) values ('488c75f0-eae4-4dd0-83e0-29b982adbbff','cuda',true,100,3354624,'cuda',0,262144) +Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS,INT_CORES_MAX,INT_GPU_MEM_MIN) values ('488c75f0-eae4-4dd0-83e0-29b982adbbff','cuda',true,100,3354624,'cuda',0,262144) + +Insert into SERVICE (PK_SERVICE,STR_NAME,B_THREADABLE,INT_CORES_MIN,INT_MEM_MIN,STR_TAGS,INT_MIN_MEMORY_INCREASE) values ('123c75f0-eie4-4cc0-84e0-46b982abcdef','apitest',false,10,2097152,'general',4194304) Insert into CONFIG (PK_CONFIG,STR_KEY,INT_VALUE,LONG_VALUE,STR_VALUE,B_VALUE) values ('00000000-0000-0000-0000-000000000005','MAX_FRAME_RETRIES',16,0,null,false) diff --git a/cuebot/src/test/resources/conf/dtd/cjsl-1.12.dtd b/cuebot/src/test/resources/conf/dtd/cjsl-1.12.dtd new file mode 100644 index 000000000..222e04cfc --- /dev/null +++ b/cuebot/src/test/resources/conf/dtd/cjsl-1.12.dtd @@ -0,0 +1,97 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cuebot/src/test/resources/conf/dtd/cjsl-1.13.dtd b/cuebot/src/test/resources/conf/dtd/cjsl-1.13.dtd new file mode 100644 index 000000000..ff7ad71e3 --- /dev/null +++ b/cuebot/src/test/resources/conf/dtd/cjsl-1.13.dtd @@ -0,0 +1,99 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/cuebot/src/test/resources/conf/jobspec/jobspec_1_12.xml b/cuebot/src/test/resources/conf/jobspec/jobspec_1_12.xml new file mode 100644 index 000000000..65036370b --- /dev/null +++ b/cuebot/src/test/resources/conf/jobspec/jobspec_1_12.xml @@ -0,0 +1,49 @@ + + + + + + + + + local + testing + default + testuser + 9860 + + + False + 2 + False + + + + echo $CUE_GPU_CORES + 1-10 + 1 + 1 + 1 + + + shell + + + + + + diff --git a/cuebot/src/test/resources/conf/jobspec/jobspec_1_13.xml b/cuebot/src/test/resources/conf/jobspec/jobspec_1_13.xml new file mode 100644 index 000000000..36d40c95d --- /dev/null +++ b/cuebot/src/test/resources/conf/jobspec/jobspec_1_13.xml @@ -0,0 +1,51 @@ + + + + + + + + + local + testing + default + testuser + 9860 + + + False + 2 + 420 + 42 + False + + + + echo $CUE_GPU_CORES + 1-10 + 1 + 1 + 1 + + + shell + + + + + + diff --git a/cuebot/src/test/resources/conf/jobspec/jobspec_dispatch_gpus_test.xml b/cuebot/src/test/resources/conf/jobspec/jobspec_dispatch_gpus_test.xml new file mode 100644 index 000000000..dd3ce459b --- /dev/null +++ b/cuebot/src/test/resources/conf/jobspec/jobspec_dispatch_gpus_test.xml @@ -0,0 +1,133 @@ + + + + + + + + + spi + pipe + default + testuser + 9860 + + + True + 2 + False + + + + echo $CUE_GPU_CORES + 1-10 + 1 + 1 + + + shell + + + + + echo $CUE_GPU_CORES + 1-10 + 1 + 1 + + + shell + + + + + echo $CUE_GPU_CORES + 1-10 + 1 + 1 + 1 + + + shell + + + + + echo $CUE_GPU_CORES + 1-10 + 1 + 4 + 7g + + + shell + + + + + + + True + 2 + False + + + + echo $CUE_GPU_CORES + 1-10 + 1 + 6 + 1 + + + shell + + + + + echo $CUE_GPU_CORES + 1-10 + 1 + 3 + 7g + + + shell + + + + + + + True + 2 + False + + + + echo CPU + 1-10 + 1 + + + shell + + + + + + diff --git a/cuebot/src/test/resources/conf/jobspec/jobspec_gpus_test.xml b/cuebot/src/test/resources/conf/jobspec/jobspec_gpus_test.xml new file mode 100644 index 000000000..cfa6e694c --- /dev/null +++ b/cuebot/src/test/resources/conf/jobspec/jobspec_gpus_test.xml @@ -0,0 +1,128 @@ + + + + + + + + + spi + pipe + default + testuser + 9860 + + + True + + + true + 0 + 1 + 1 + 1 + + shell + + + + + + + True + + + true + 0 + 1 + 3 + 1 + + shell + + + + + + + True + + + true + 0 + 1 + 6 + 1 + + shell + + + + + + + true + 3 + + + /shots/pipe/usr_testuser/logs/help.py + 0 + 1 + false + + apitest + + + + + + + True + + + true + 0 + 1 + 1 + 1 + + shell + + + + true + 0 + 1 + 1 + 1 + + shell + + + + + + + + test_depend + layer_second + test_depend + layer_first + + + + diff --git a/cuebot/src/test/resources/conf/jobspec/jobspec_multiple_frames.xml b/cuebot/src/test/resources/conf/jobspec/jobspec_multiple_frames.xml new file mode 100644 index 000000000..3baa0b22b --- /dev/null +++ b/cuebot/src/test/resources/conf/jobspec/jobspec_multiple_frames.xml @@ -0,0 +1,48 @@ + + + + + + + + + spi + pipe + default + testuser + 9860 + + + False + 2 + False + + + + echo hello + 1-3 + 1 + 2gb + + + shell + + + + + + diff --git a/cuebot/src/test/resources/conf/jobspec/jobspec_simple.xml b/cuebot/src/test/resources/conf/jobspec/jobspec_simple.xml new file mode 100644 index 000000000..d717374d5 --- /dev/null +++ b/cuebot/src/test/resources/conf/jobspec/jobspec_simple.xml @@ -0,0 +1,47 @@ + + + + + + + + + spi + pipe + default + testuser + 9860 + + + False + 2 + False + + + + echo hello + 0 + 1 + + + shell + + + + + + diff --git a/cuebot/src/test/resources/conf/jobspec/override_max_cores_gpus.xml b/cuebot/src/test/resources/conf/jobspec/override_max_cores_gpus.xml new file mode 100644 index 000000000..273fcb0ee --- /dev/null +++ b/cuebot/src/test/resources/conf/jobspec/override_max_cores_gpus.xml @@ -0,0 +1,50 @@ + + + + + + + + + pipe + dev.cue + testuser + 9860 + + + False + 2 + 420 + 42 + False + + + + echo $CUE_GPU_CORES + 1-10 + 1 + 1 + 1 + + + shell + + + + + + diff --git a/cuebot/src/test/resources/conf/spring/applicationContext-assumptions.xml b/cuebot/src/test/resources/conf/spring/applicationContext-assumptions.xml index bd4b161af..3cf18976f 100644 --- a/cuebot/src/test/resources/conf/spring/applicationContext-assumptions.xml +++ b/cuebot/src/test/resources/conf/spring/applicationContext-assumptions.xml @@ -21,14 +21,8 @@ xsi:schemaLocation=" http://www.springframework.org/schema/beans http://www.springframework.org/schema/beans/spring-beans-2.5.xsd"> - - - - - - diff --git a/cuebot/src/test/resources/conf/spring/applicationContext-oracle-datasource.xml b/cuebot/src/test/resources/conf/spring/applicationContext-oracle-datasource.xml deleted file mode 100644 index 67dddc41b..000000000 --- a/cuebot/src/test/resources/conf/spring/applicationContext-oracle-datasource.xml +++ /dev/null @@ -1,59 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/cuebot/src/test/resources/log4j.properties b/cuebot/src/test/resources/log4j.properties index 712cdc4b3..71a45df96 100644 --- a/cuebot/src/test/resources/log4j.properties +++ b/cuebot/src/test/resources/log4j.properties @@ -29,7 +29,6 @@ log4j.logger.com.imageworks.spcue=DEBUG log4j.logger.com.imageworks.spcue.dispatcher.RqdReportManagerService=DEBUG log4j.logger.com.imageworks.spcue.service.HostManagerService=TRACE log4j.logger.com.imageworks.spcue.dispatcher=TRACE -log4j.logger.com.imageworks.spcue.dao.oracle.DispatcherDaoJdbc=DEBUG #log4j.logger.org.springframework=DEBUG diff --git a/cuebot/src/test/resources/opencue.properties b/cuebot/src/test/resources/opencue.properties index 0a59548ce..10516f881 100644 --- a/cuebot/src/test/resources/opencue.properties +++ b/cuebot/src/test/resources/opencue.properties @@ -1,16 +1,6 @@ cue.proxy = tcp -h cuetest01-vm -p 9019 -t 10000:tcp -h cuetest02-vm -p 9019 -t 10000:tcp -h cuetest03-vm -p 9019 -t 10000 spring.velocity.checkTemplateLocation=false -cue.trackit.enabled=false -# If using Oracle trackit, ensure the drivers are installed and use the following config value: -# datasource.trackit-data-source.driver-class-name=oracle.jdbc.OracleDriver -datasource.trackit-data-source.jdbc-url=jdbc:oracle:oci:@dbname -datasource.trackit-data-source.username=element_ro -datasource.trackit-data-source.password=password -# Discard connections after 6 hours, this allows for gradual -# connection rebalancing. -datasource.trackit-data-source.max-age=21600000 - grpc.cue_port=8453 grpc.rqd_server_port=${CUEBOT_GRPC_RQD_SERVER_PORT:50051} grpc.max_message_bytes=104857600 @@ -18,12 +8,64 @@ grpc.max_message_bytes=104857600 grpc.rqd_cache_size=500 # RQD Channel Cache Expiration in Minutes grpc.rqd_cache_expiration=30 +# RQD Channel Cache expected concurrency +grpc.rqd_cache_concurrency=20 +# RQD Channel task deadline in seconds +grpc.rqd_task_deadline=10 + +# Healthy Threadpool Executor +booking_queue.threadpool.health_threshold=10 +booking_queue.threadpool.core_pool_size=10 +booking_queue.threadpool.max_pool_size=14 +booking_queue.threadpool.queue_capacity=2000 +dispatch.threadpool.core_pool_size=6 +dispatch.threadpool.max_pool_size=8 +dispatch.threadpool.queue_capacity=2000 +healthy_threadpool.health_threshold=6 +healthy_threadpool.min_unhealthy_period_min=3 +report_queue.threadPoolSizeInitial=6 +report_queue.threadPoolSizeMax=12 +# The queue size should be bigger then the expected amount of hosts +report_queue.queueSize=5000 +kill_queue.threadPoolSizeInitial=2 +kill_queue.threadPoolSizeMax=6 +kill_queue.queueSize=1000 -log.frame-log-root=/arbitraryLogDirectory +log.frame-log-root.default_os=/arbitraryLogDirectory +log.frame-log-root.some_os=/arbitrarySomeOsLogDirectory dispatcher.job_query_max=20 dispatcher.job_lock_expire_seconds=2 dispatcher.job_lock_concurrency_level=3 dispatcher.frame_query_max=10 -dispatcher.job_frame_dispatch_max=2 +dispatcher.job_frame_dispatch_max=3 dispatcher.host_frame_dispatch_max=12 + +dispatcher.launch_queue.core_pool_size=1 +dispatcher.launch_queue.max_pool_size=1 +dispatcher.launch_queue.queue_capacity=100 + +dispatcher.dispatch_pool.core_pool_size=4 +dispatcher.dispatch_pool.max_pool_size=4 +dispatcher.dispatch_pool.queue_capacity=500 + +dispatcher.manage_pool.core_pool_size=8 +dispatcher.manage_pool.max_pool_size=8 +dispatcher.manage_pool.queue_capacity=250 + +dispatcher.report_queue.core_pool_size=6 +dispatcher.report_queue.max_pool_size=8 +dispatcher.report_queue.queue_capacity=1000 + +dispatcher.kill_queue.core_pool_size=6 +dispatcher.kill_queue.max_pool_size=8 +dispatcher.kill_queue.queue_capacity=1000 + +dispatcher.booking_queue.core_pool_size=6 +dispatcher.booking_queue.max_pool_size=6 +dispatcher.booking_queue.queue_capacity=1000 +dispatcher.min_bookable_free_temp_dir_kb=1048576 +dispatcher.min_bookable_free_mcp_kb=1048576 +dispatcher.oom_max_safe_used_memory_threshold=0.95 +dispatcher.oom_frame_overboard_allowed_threshold=0.6 +dispatcher.frame_kill_retry_limit=3 \ No newline at end of file diff --git a/cuegui/Dockerfile b/cuegui/Dockerfile index 4804478f6..8b0b189f9 100644 --- a/cuegui/Dockerfile +++ b/cuegui/Dockerfile @@ -1,10 +1,4 @@ -# ----------------- -# BUILD -# ----------------- -FROM centos:7 as build - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "CueGUI build stage" +FROM --platform=linux/x86_64 centos:7 WORKDIR /src @@ -15,21 +9,22 @@ RUN yum -y install \ gcc \ libXi \ libXrender \ + libxkbcommon-x11.x86_64 \ mesa-libGL \ python-devel \ which \ - Xvfb + Xvfb \ + xcb-util-image.x86_64 \ + xcb-util-keysyms.x86_64 \ + xcb-util-renderutil.x86_64 \ + xcb-util-wm.x86_64 RUN yum -y install \ - python-pip \ python36 \ python36-devel \ python36-pip -RUN python -m pip install --upgrade 'pip<21' RUN python3.6 -m pip install --upgrade pip - -RUN python -m pip install --upgrade 'setuptools<45' RUN python3.6 -m pip install --upgrade setuptools RUN dbus-uuidgen > /etc/machine-id @@ -38,7 +33,6 @@ COPY LICENSE ./ COPY requirements.txt ./ COPY requirements_gui.txt ./ -RUN python -m pip install -r requirements.txt -r requirements_gui.txt RUN python3.6 -m pip install -r requirements.txt -r requirements_gui.txt COPY proto/ ./proto @@ -47,7 +41,7 @@ COPY pycue/setup.py ./pycue/ COPY pycue/FileSequence ./pycue/FileSequence COPY pycue/opencue ./pycue/opencue -RUN python -m grpc_tools.protoc \ +RUN python3.6 -m grpc_tools.protoc \ -I=./proto \ --python_out=./pycue/opencue/compiled_proto \ --grpc_python_out=./pycue/opencue/compiled_proto \ @@ -65,32 +59,14 @@ COPY cuegui/cuegui ./cuegui/cuegui COPY VERSION.in VERSIO[N] ./ RUN test -e VERSION || echo "$(cat VERSION.in)-custom" | tee VERSION -RUN cd pycue && python setup.py install - RUN cd pycue && python3.6 setup.py install -# TODO(bcipriano) Lint the code here. (Issue #78) - -RUN cd cuegui && xvfb-run -d python setup.py test - RUN cd cuegui && xvfb-run -d python3.6 setup.py test RUN cp LICENSE requirements.txt requirements_gui.txt VERSION cuegui/ RUN versioned_name="cuegui-$(cat ./VERSION)-all" \ - && mv cuegui $versioned_name \ - && tar -cvzf $versioned_name.tar.gz $versioned_name/* - - -# ----------------- -# RUN -# ----------------- -FROM centos:7 - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "CueGUI runtime stage" - -WORKDIR /opt/opencue - -COPY --from=build /src/cuegui-*-all.tar.gz ./ - + && mv cuegui "${versioned_name}" \ + && tar -cvzf "${versioned_name}.tar.gz" ${versioned_name}/* \ + && mkdir -p /opt/opencue \ + && cp "${versioned_name}.tar.gz" /opt/opencue/ diff --git a/cuegui/cuegui/AbstractDialog.py b/cuegui/cuegui/AbstractDialog.py index 613b3d83c..2474cf14b 100644 --- a/cuegui/cuegui/AbstractDialog.py +++ b/cuegui/cuegui/AbstractDialog.py @@ -21,8 +21,8 @@ from __future__ import absolute_import from builtins import str -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets class AbstractDialog(QtWidgets.QDialog): @@ -37,8 +37,10 @@ def _newCheckBoxSelectionMatrix(title, allowedOptions, checkedOptions, parent=No def _newDialogButtonBox(self, buttons, orientation=QtCore.Qt.Horizontal): buttonBox = QtWidgets.QDialogButtonBox(buttons, orientation, self) + # pylint: disable=no-member buttonBox.accepted.connect(self.accept) buttonBox.rejected.connect(self.reject) + # pylint: enable=no-member return buttonBox def _addWidgetRow(self, *widgets): diff --git a/cuegui/cuegui/AbstractDockWidget.py b/cuegui/cuegui/AbstractDockWidget.py index 15857d5a2..0697662e3 100644 --- a/cuegui/cuegui/AbstractDockWidget.py +++ b/cuegui/cuegui/AbstractDockWidget.py @@ -22,8 +22,8 @@ from __future__ import print_function from __future__ import division -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.Plugins diff --git a/cuegui/cuegui/AbstractTreeWidget.py b/cuegui/cuegui/AbstractTreeWidget.py index cf74590ef..0f4e9500a 100644 --- a/cuegui/cuegui/AbstractTreeWidget.py +++ b/cuegui/cuegui/AbstractTreeWidget.py @@ -26,9 +26,9 @@ from builtins import range import time -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import cuegui.AbstractWidgetItem import cuegui.Constants @@ -75,6 +75,7 @@ def __init__(self, parent): @type parent: QWidget @param parent: The widget to set as the parent""" QtWidgets.QTreeWidget.__init__(self, parent) + self.app = cuegui.app() self._items = {} self._lastUpdate = 0 @@ -100,12 +101,12 @@ def __init__(self, parent): self.__setupColumnMenu() + # pylint: disable=no-member self.itemClicked.connect(self.__itemSingleClickedEmitToApp) self.itemDoubleClicked.connect(self.__itemDoubleClickedEmitToApp) self._timer.timeout.connect(self.updateRequest) - # pylint: disable=no-member - QtGui.qApp.request_update.connect(self.updateRequest) # pylint: enable=no-member + self.app.request_update.connect(self.updateRequest) self.updateRequest() self.setUpdateInterval(10) @@ -215,7 +216,7 @@ def startTicksUpdate(self, updateInterval, self.ticksLock = QtCore.QMutex() self.__ticksTimer = QtCore.QTimer(self) - self.__ticksTimer.timeout.connect(self.__tick) + self.__ticksTimer.timeout.connect(self.__tick) # pylint: disable=no-member self.__ticksTimer.start(1000) self.ticksWithoutUpdate = 999 @@ -279,9 +280,7 @@ def __itemSingleClickedEmitToApp(item, col): @type col: int @param col: Column number single clicked on""" del col - # pylint: disable=no-member - QtGui.qApp.single_click.emit(item.rpcObject) - # pylint: enable=no-member + cuegui.app().single_click.emit(item.rpcObject) @staticmethod def __itemDoubleClickedEmitToApp(item, col): @@ -293,10 +292,8 @@ def __itemDoubleClickedEmitToApp(item, col): @type col: int @param col: Column number double clicked on""" del col - # pylint: disable=no-member - QtGui.qApp.view_object.emit(item.rpcObject) - QtGui.qApp.double_click.emit(item.rpcObject) - # pylint: enable=no-member + cuegui.app().view_object.emit(item.rpcObject) + cuegui.app().double_click.emit(item.rpcObject) def addObject(self, rpcObject): """Adds or updates an rpcObject in the list using the _createItem function @@ -385,11 +382,9 @@ def _update(self): """Updates the items in the TreeWidget without checking when it was last updated""" self._lastUpdate = time.time() - if hasattr(QtGui.qApp, "threadpool"): - # pylint: disable=no-member - QtGui.qApp.threadpool.queue( + if self.app.threadpool is not None: + self.app.threadpool.queue( self._getUpdate, self._processUpdate, "getting data for %s" % self.__class__) - # pylint: enable=no-member else: logger.warning("threadpool not found, doing work in gui thread") self._processUpdate(None, self._getUpdate()) @@ -509,7 +504,7 @@ def __setupColumnMenu(self): self.__dropdown.setFixedHeight(self.header().height() - 10) self.__dropdown.setToolTip("Click to select columns to display") self.__dropdown.setIcon(QtGui.QIcon(":column_popdown.png")) - self.__dropdown.clicked.connect(self.__displayColumnMenu) + self.__dropdown.clicked.connect(self.__displayColumnMenu) # pylint: disable=no-member layout = QtWidgets.QHBoxLayout(self.header()) layout.setContentsMargins(0, 0, 0, 0) @@ -521,7 +516,7 @@ def __displayColumnMenu(self): self.__dropdown.height())) menu = QtWidgets.QMenu(self) - menu.triggered.connect(self.__handleColumnMenu) + menu.triggered.connect(self.__handleColumnMenu) # pylint: disable=no-member for col in range(self.columnCount()): if self.columnWidth(col) or self.isColumnHidden(col): name = self.__columnInfoByType[self.__columnPrimaryType][col][COLUMN_NAME] diff --git a/cuegui/cuegui/AbstractWidgetItem.py b/cuegui/cuegui/AbstractWidgetItem.py index fec1dc5fe..8a05699e8 100644 --- a/cuegui/cuegui/AbstractWidgetItem.py +++ b/cuegui/cuegui/AbstractWidgetItem.py @@ -24,11 +24,8 @@ from builtins import str -from PySide2 import QtCore -from PySide2 import QtWidgets - -import opencue -import opencue.wrappers.job +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.Constants import cuegui.Logger @@ -50,6 +47,7 @@ class AbstractWidgetItem(QtWidgets.QTreeWidgetItem): def __init__(self, itemType, rpcObject, parent, source=None): QtWidgets.QTreeWidgetItem.__init__(self, parent, itemType) + self.app = cuegui.app() self.column_info = self.treeWidget().getColumnInfo(itemType) self._cache = {} self._source = source @@ -92,14 +90,15 @@ def data(self, col, role): return cuegui.Constants.QVARIANT_NULL def __lt__(self, other): - """Custom sorting for columns that have a function defined for sorting""" + """Custom sorting for columns that have a function defined for sorting + (uses the sort lambda function defined in the subclasses' addColumn definition).""" sortLambda = self.column_info[self.treeWidget().sortColumn()][SORT_LAMBDA] column = self.treeWidget().sortColumn() - if sortLambda and isinstance(other.rpcObject, opencue.wrappers.job.Job): + if sortLambda: # pylint: disable=broad-except try: return sortLambda(self.rpcObject) < sortLambda(other.rpcObject) except Exception: - logger.warning("Sort failed on column %s, using text sort.", column) + logger.info("Sort failed on column %s, using text sort.", column) return str(self.text(column)) < str(other.text(column)) diff --git a/cuegui/cuegui/Action.py b/cuegui/cuegui/Action.py index 853e6a087..0f24c4957 100644 --- a/cuegui/cuegui/Action.py +++ b/cuegui/cuegui/Action.py @@ -20,8 +20,8 @@ from __future__ import print_function from __future__ import division -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtGui +from qtpy import QtWidgets import cuegui.Constants @@ -103,4 +103,4 @@ def __init__(self,callback=None, parent=None): self.setText("Refresh") self.setIcon(QtGui.QIcon(":/images/stock-refresh.png")) if callback: - self.triggered.connect(callback) + self.triggered.connect(callback) # pylint: disable=no-member diff --git a/cuegui/cuegui/App.py b/cuegui/cuegui/App.py new file mode 100644 index 000000000..8e5409520 --- /dev/null +++ b/cuegui/cuegui/App.py @@ -0,0 +1,78 @@ +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Module for CueGUI's custom QApplication and associated helper functions.""" + +from qtpy import QtCore +from qtpy import QtWidgets + +import cuegui.Exception + +__QAPPLICATION_SINGLETON = None + + +class CueGuiApplication(QtWidgets.QApplication): + """The CueGUI application.""" + + # Settings + settings = None + + # Global signals + display_log_file_content = QtCore.Signal(object) + double_click = QtCore.Signal(object) + facility_changed = QtCore.Signal() + single_click = QtCore.Signal(object) + unmonitor = QtCore.Signal(object) + view_hosts = QtCore.Signal(object) + view_object = QtCore.Signal(object) + view_procs = QtCore.Signal(object) + request_update = QtCore.Signal() + status = QtCore.Signal() + quit = QtCore.Signal() + + # Thread pool + threadpool = None + threads = [] + + # Shutdown signal + closingApp = False + + +def create_app(argv): + """ + Create an instance of the CueGUI application. + + :param argv: user-provided commandline arguments + :type argv: list + :return: the application instance + :rtype: CueGuiApplication + """ + # pylint: disable=global-statement + global __QAPPLICATION_SINGLETON + if __QAPPLICATION_SINGLETON is None: + __QAPPLICATION_SINGLETON = CueGuiApplication(argv) + return __QAPPLICATION_SINGLETON + + +def app(): + """Returns the current application instance. + + :return: the current application instance + :rtype: CueGuiApplication + :raises: opencue.exception.ApplicationNotRunningException: the application has not been + initialized yet + """ + if __QAPPLICATION_SINGLETON is None: + raise cuegui.Exception.ApplicationNotRunningException() + return __QAPPLICATION_SINGLETON diff --git a/cuegui/cuegui/Comments.py b/cuegui/cuegui/Comments.py index dc493433f..9cccdf576 100644 --- a/cuegui/cuegui/Comments.py +++ b/cuegui/cuegui/Comments.py @@ -23,9 +23,8 @@ from builtins import str import pickle -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.Utils @@ -50,6 +49,8 @@ def __init__(self, source, parent=None): @type parent: QWidget @param parent: The dialog's parent""" QtWidgets.QDialog.__init__(self, parent) + self.app = cuegui.app() + self.__source = source self.__labelTitle = QtWidgets.QLabel(self.__source.data.name, self) @@ -94,6 +95,7 @@ def __init__(self, source, parent=None): btnLayout.addWidget(self.__btnClose) layout.addLayout(btnLayout) + # pylint: disable=no-member self.__treeSubjects.itemSelectionChanged.connect(self.__itemChanged) self.__comboMacro.currentTextChanged.connect(self.__macroHandle) self.__btnSave.pressed.connect(self.__saveComment) @@ -102,6 +104,7 @@ def __init__(self, source, parent=None): self.__btnClose.pressed.connect(self.__close) self.__textSubject.textEdited.connect(self.__textEdited) self.__textMessage.textChanged.connect(self.__textEdited) + # pylint: enable=no-member self.refreshComments() self.__macroLoad() @@ -109,6 +112,8 @@ def __init__(self, source, parent=None): def __textEdited(self, text=None): """Called when the text boxes are modified, enables the save button""" del text + self.__textSubject.setReadOnly(False) + self.__textMessage.setReadOnly(False) self.__btnSave.setEnabled(True) def __close(self): @@ -204,10 +209,11 @@ def refreshComments(self): def __macroLoad(self): """Loads the defined comment macros from settings""" - # pylint: disable=no-member - self.__macroList = pickle.loads( - str(QtGui.qApp.settings.value("Comments", pickle.dumps({})))) - # pylint: enable=no-member + try: + self.__macroList = pickle.loads(self.app.settings.value("Comments", pickle.dumps({}))) + except TypeError: + self.__macroList = pickle.loads( + str(self.app.settings.value("Comments", pickle.dumps({})))) self.__macroRefresh() def __macroRefresh(self): @@ -221,9 +227,7 @@ def __macroRefresh(self): def __macroSave(self): """Saves the current comment macros to settings""" - # pylint: disable=no-member - QtGui.qApp.settings.setValue("Comments", pickle.dumps(self.__macroList)) - # pylint: enable=no-member + self.app.settings.setValue("Comments", pickle.dumps(self.__macroList)) def __macroHandle(self, selection): """Called when the comment macro combo box is selected @@ -331,8 +335,10 @@ def __init__(self, name="", subject="", message="", parent=None): layout.addWidget(self.__textMessage) layout.addLayout(btnLayout) + # pylint: disable=no-member self.__btnSave.pressed.connect(self.__save) self.__btnCancel.pressed.connect(self.reject) + # pylint: enable=no-member def __save(self): """Validates and then exits from the dialog in success""" diff --git a/cuegui/cuegui/ConfirmationDialog.py b/cuegui/cuegui/ConfirmationDialog.py index 0c71e88e3..919a19563 100644 --- a/cuegui/cuegui/ConfirmationDialog.py +++ b/cuegui/cuegui/ConfirmationDialog.py @@ -20,9 +20,9 @@ from __future__ import division from __future__ import absolute_import -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets class ConfirmationDialog(QtWidgets.QDialog): @@ -74,5 +74,7 @@ def __init__(self, title, text, items=[], parent=None): self.setMaximumSize(400,300) self.setWindowTitle(title) + # pylint: disable=no-member __btn_accept.clicked.connect(self.accept) __btn_cancel.clicked.connect(self.reject) + # pylint: enable=no-member diff --git a/cuegui/cuegui/Constants.py b/cuegui/cuegui/Constants.py index bb38e9495..38f5dbafe 100644 --- a/cuegui/cuegui/Constants.py +++ b/cuegui/cuegui/Constants.py @@ -17,91 +17,165 @@ Application constants. """ - from __future__ import print_function from __future__ import division from __future__ import absolute_import +import logging import os import platform -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtGui +from qtpy import QtWidgets +import yaml import opencue +import opencue.config -possible_version_path = os.path.join( - os.path.abspath(os.path.join(__file__ , "../../..")), 'VERSION.in') -if os.path.exists(possible_version_path): - with open(possible_version_path) as fp: - VERSION = fp.read().strip() -else: - VERSION = "1.3.0" +__CONFIG_FILE_ENV_VAR = 'CUEGUI_CONFIG_FILE' +__DEFAULT_INI_PATH_ENV_VAR = 'CUEGUI_DEFAULT_INI_PATH' +__DEFAULT_CONFIG_FILE_NAME = 'cuegui.yaml' +__DEFAULT_CONFIG_FILE = os.path.join( + os.path.dirname(__file__), 'config', __DEFAULT_CONFIG_FILE_NAME) + + +def __getLogger(): + """Other code should use cuegui.Logger to get a logger; we avoid using that module here + to avoid creating a circular dependency.""" + logger_format = logging.Formatter("%(levelname)-9s %(module)-10s %(message)s") + logger_stream = logging.StreamHandler() + logger_stream.setLevel(logging.INFO) + logger_stream.setFormatter(logger_format) + logger = logging.getLogger(__file__) + logger.addHandler(logger_stream) + return logger + + +def __loadConfigFromFile(): + logger = __getLogger() + with open(__DEFAULT_CONFIG_FILE) as fp: + config = yaml.load(fp, Loader=yaml.SafeLoader) + + user_config_file = None + + logger.debug('Checking for cuegui config file path in %s', __CONFIG_FILE_ENV_VAR) + config_file_from_env = os.environ.get(__CONFIG_FILE_ENV_VAR) + if config_file_from_env and os.path.exists(config_file_from_env): + user_config_file = config_file_from_env + + if not user_config_file: + config_file_from_user_profile = os.path.join( + opencue.config.config_base_directory(), __DEFAULT_CONFIG_FILE_NAME) + logger.debug('Checking for cuegui config at %s', config_file_from_user_profile) + if os.path.exists(config_file_from_user_profile): + user_config_file = config_file_from_user_profile + + if user_config_file: + logger.info('Loading cuegui config from %s', user_config_file) + with open(user_config_file, 'r') as fp: + config.update(yaml.load(fp, Loader=yaml.SafeLoader)) + + return config + + +def __packaged_version(): + possible_version_path = os.path.join( + os.path.abspath(os.path.join(__file__, "../../..")), 'VERSION.in') + if os.path.exists(possible_version_path): + with open(possible_version_path) as fp: + default_version = fp.read().strip() + return default_version + return "1.3.0" -STARTUP_NOTICE_DATE = 0 -STARTUP_NOTICE_MSG = "" -JOB_UPDATE_DELAY = 10000 # msec -LAYER_UPDATE_DELAY = 10000 # msec -FRAME_UPDATE_DELAY = 10000 # msec -HOST_UPDATE_DELAY = 20000 # msec -AFTER_ACTION_UPDATE_DELAY = 1000 # msec +__config = __loadConfigFromFile() -MAX_LOG_POPUPS = 5 -MINIMUM_UPDATE_INTERVAL = 5 # sec +VERSION = __config.get('version', __packaged_version()) -FONT_SIZE = 10 # 8 -STANDARD_FONT = QtGui.QFont("Luxi Sans", FONT_SIZE) -STANDARD_ROW_HEIGHT = 16 # 14 +STARTUP_NOTICE_DATE = __config.get('startup_notice.date') +STARTUP_NOTICE_MSG = __config.get('startup_notice.msg') -MEMORY_WARNING_LEVEL = 5242880 +JOB_UPDATE_DELAY = __config.get('refresh.job_update_delay') +LAYER_UPDATE_DELAY = __config.get('refresh.layer_update_delay') +FRAME_UPDATE_DELAY = __config.get('refresh.frame_update_delay') +HOST_UPDATE_DELAY = __config.get('refresh.host_update_delay') +AFTER_ACTION_UPDATE_DELAY = __config.get('refresh.after_action_update_delay') +MINIMUM_UPDATE_INTERVAL = __config.get('refresh.min_update_interval') // 1000 -RESOURCE_PATH = os.path.dirname(__file__) + "/images" -DEFAULT_INI_PATH = os.getenv('CUEGUI_DEFAULT_INI_PATH', os.path.dirname(__file__) + '/config') +FONT_FAMILY = __config.get('style.font.family') +FONT_SIZE = __config.get('style.font.size') +STANDARD_FONT = QtGui.QFont(FONT_FAMILY, FONT_SIZE) -DEFAULT_PLUGIN_PATHS = [os.path.dirname(__file__) + "/plugins"] +RESOURCE_PATH = __config.get('paths.resources') +if not os.path.isabs(RESOURCE_PATH): + RESOURCE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), RESOURCE_PATH)) -LOGGER_FORMAT = "%(levelname)-9s %(module)-10s %(message)s" -LOGGER_LEVEL = "WARNING" +CONFIG_PATH = __config.get('paths.config') +if not os.path.isabs(CONFIG_PATH): + CONFIG_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), CONFIG_PATH)) -EMAIL_SUBJECT_PREFIX = "cuemail: please check " -EMAIL_BODY_PREFIX = "Your PSTs request that you check " -EMAIL_BODY_SUFFIX = "\n\n" -EMAIL_DOMAIN = "" +DEFAULT_INI_PATH = os.getenv('CUEGUI_DEFAULT_INI_PATH', __config.get('paths.default_ini_path')) +if not os.path.isabs(DEFAULT_INI_PATH): + DEFAULT_INI_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), DEFAULT_INI_PATH)) -GITHUB_CREATE_ISSUE_URL = 'https://github.com/AcademySoftwareFoundation/OpenCue/issues/new' -URL_USERGUIDE = "https://www.opencue.io/docs/" -URL_SUGGESTION = "%s?labels=enhancement&template=enhancement.md" % GITHUB_CREATE_ISSUE_URL -URL_BUG = "%s?labels=bug&template=bug_report.md" % GITHUB_CREATE_ISSUE_URL +DEFAULT_PLUGIN_PATHS = __config.get('paths.plugins') +for i, path in enumerate(DEFAULT_PLUGIN_PATHS): + if not os.path.isabs(path): + DEFAULT_PLUGIN_PATHS[i] = os.path.abspath(os.path.join(os.path.dirname(__file__), path)) -if platform.system() == "Windows": - DEFAULT_EDITOR = "notepad" +LOGGER_FORMAT = __config.get('logger.format') +LOGGER_LEVEL = __config.get('logger.level') + +EMAIL_SUBJECT_PREFIX = __config.get('email.subject_prefix') +EMAIL_BODY_PREFIX = __config.get('email.body_prefix') +EMAIL_BODY_SUFFIX = __config.get('email.body_suffix') +EMAIL_DOMAIN = __config.get('email.domain') + +GITHUB_CREATE_ISSUE_URL = __config.get('links.issue.create') +URL_USERGUIDE = __config.get('links.user_guide') +URL_SUGGESTION = GITHUB_CREATE_ISSUE_URL + __config.get('links.issue.suggestion') +URL_BUG = GITHUB_CREATE_ISSUE_URL + __config.get('links.issue.bug') + +if platform.system() == 'Windows': + DEFAULT_EDITOR = __config.get('editor.windows') +elif platform.system() == 'Darwin': + DEFAULT_EDITOR = __config.get('editor.mac') else: - DEFAULT_EDITOR = "gview -R -m -M -U %s/gvimrc +" % DEFAULT_INI_PATH + DEFAULT_EDITOR = __config.get('editor.linux') +DEFAULT_EDITOR = DEFAULT_EDITOR.format(config_path=CONFIG_PATH) -EMPTY_INDEX = QtCore.QModelIndex() +LOG_ROOT_OS = __config.get('render_logs.root') -QVARIANT_CENTER = QtCore.Qt.AlignCenter -QVARIANT_RIGHT = QtCore.Qt.AlignRight -QVARIANT_NULL = None -QVARIANT_BLACK = QtGui.QColor(QtCore.Qt.black) -QVARIANT_GREY = QtGui.QColor(QtCore.Qt.gray) - -ALLOWED_TAGS = ("general", "desktop", "playblast", "util", "preprocess", "wan", "cuda", "splathw", - 'naiad', 'massive') - -RGB_FRAME_STATE = {opencue.api.job_pb2.DEAD: QtGui.QColor(255, 0, 0), - opencue.api.job_pb2.DEPEND: QtGui.QColor(160, 32, 240), - opencue.api.job_pb2.EATEN: QtGui.QColor(150, 0, 0), - opencue.api.job_pb2.RUNNING: QtGui.QColor(200, 200, 55), - opencue.api.job_pb2.SETUP: QtGui.QColor(160, 32, 240), - opencue.api.job_pb2.SUCCEEDED: QtGui.QColor(55, 200, 55), - opencue.api.job_pb2.WAITING: QtGui.QColor(135, 207, 235), - opencue.api.job_pb2.CHECKPOINT: QtGui.QColor(61, 98, 247)} -QVARIANT_FRAME_STATE = \ - dict((key, RGB_FRAME_STATE[key]) for key in list(RGB_FRAME_STATE.keys())) +ALLOWED_TAGS = tuple(__config.get('allowed_tags')) + +DARK_STYLE_SHEET = os.path.join(CONFIG_PATH, __config.get('style.style_sheet')) +COLOR_THEME = __config.get('style.color_theme') +__bg_colors = __config.get('style.colors.background') +COLOR_USER_1 = QtGui.QColor(*__bg_colors[0]) +COLOR_USER_2 = QtGui.QColor(*__bg_colors[1]) +COLOR_USER_3 = QtGui.QColor(*__bg_colors[2]) +COLOR_USER_4 = QtGui.QColor(*__bg_colors[3]) + +__frame_colors = __config.get('style.colors.frame_state') +RGB_FRAME_STATE = { + opencue.api.job_pb2.DEAD: QtGui.QColor(*__frame_colors.get('DEAD')), + opencue.api.job_pb2.DEPEND: QtGui.QColor(*__frame_colors.get('DEPEND')), + opencue.api.job_pb2.EATEN: QtGui.QColor(*__frame_colors.get('EATEN')), + opencue.api.job_pb2.RUNNING: QtGui.QColor(*__frame_colors.get('RUNNING')), + opencue.api.job_pb2.SETUP: QtGui.QColor(*__frame_colors.get('SETUP')), + opencue.api.job_pb2.SUCCEEDED: QtGui.QColor(*__frame_colors.get('SUCCEEDED')), + opencue.api.job_pb2.WAITING: QtGui.QColor(*__frame_colors.get('WAITING')), + opencue.api.job_pb2.CHECKPOINT: QtGui.QColor(*__frame_colors.get('CHECKPOINT')), +} + +MEMORY_WARNING_LEVEL = __config.get('memory_warning_level') + +LOG_HIGHLIGHT_ERROR = __config.get('render_logs.highlight.error') +LOG_HIGHLIGHT_WARN = __config.get('render_logs.highlight.warning') +LOG_HIGHLIGHT_INFO = __config.get('render_logs.highlight.info') + +RESOURCE_LIMITS = __config.get('resources') TYPE_JOB = QtWidgets.QTreeWidgetItem.UserType + 1 TYPE_LAYER = QtWidgets.QTreeWidgetItem.UserType + 2 @@ -120,19 +194,7 @@ TYPE_TASK = QtWidgets.QTreeWidgetItem.UserType + 15 TYPE_LIMIT = QtWidgets.QTreeWidgetItem.UserType + 16 -COLUMN_INFO_DISPLAY = 2 - -DARK_STYLE_SHEET = os.path.join(DEFAULT_INI_PATH, "darkpalette.qss") -COLOR_THEME = "plastique" -COLOR_USER_1 = QtGui.QColor(50, 50, 100) -COLOR_USER_2 = QtGui.QColor(100, 100, 50) -COLOR_USER_3 = QtGui.QColor(0, 50, 0) -COLOR_USER_4 = QtGui.QColor(50, 30, 0) - +QVARIANT_NULL = None QT_MAX_INT = 2147483647 -LOG_HIGHLIGHT_ERROR = [ - 'error', 'aborted', 'fatal', 'failed', 'killed', 'command not found', - 'no licenses could be found', 'killMessage'] -LOG_HIGHLIGHT_WARN = ['warning', 'not found'] -LOG_HIGHLIGHT_INFO = ['info:', 'rqd cmd:'] +COLUMN_INFO_DISPLAY = 2 diff --git a/cuegui/cuegui/CreateShowDialog.py b/cuegui/cuegui/CreateShowDialog.py new file mode 100644 index 000000000..36e66fb49 --- /dev/null +++ b/cuegui/cuegui/CreateShowDialog.py @@ -0,0 +1,208 @@ +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""A dialog for creating new shows.""" + + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + + +from qtpy import QtCore +from qtpy import QtWidgets + +import opencue + + +class CreateShowDialog(QtWidgets.QDialog): + """A dialog for creating new shows. + _________________________________________________ + | Show name |__Enter show name here__| | + | | + | subscriptions_______________________________ | + | |_| local.general size |_____| burst |____| | + | |_| local.desktop size |_____| burst |____| | + | |_| cloud.general size |_____| burst |____| | + | | + | |_create_| |_cancel_| | + |_______________________________________________| + """ + + def __init__(self, parent=None): + QtWidgets.QDialog.__init__(self, parent) + self.subscription_fields = [] + + self.setWindowTitle("Create New Show") + self.setAttribute(QtCore.Qt.WA_DeleteOnClose) + self.setSizeGripEnabled(True) + + self.__create_btn = QtWidgets.QPushButton("Create", self) + self.__cancel_btn = QtWidgets.QPushButton("Close", self) + + self.__name_label = QtWidgets.QLabel("Show name") + self.__name_field = QtWidgets.QLineEdit() + self.__name_field.setPlaceholderText("Enter show name here") + + self.__subscription_grpbox = self.__createSubscriptionWidget() + + QtWidgets.QGridLayout(self) + self.layout().addWidget(self.__name_label, 0, 0, 1, 1) + self.layout().addWidget(self.__name_field, 0, 1, 1, 2) + self.layout().addWidget(self.__subscription_grpbox, 1, 0, 4, 3) + self.layout().addWidget(self.__create_btn, 5, 1) + self.layout().addWidget(self.__cancel_btn, 5, 2) + + # pylint: disable=no-member + self.__create_btn.clicked.connect(self.__createShow) + self.__cancel_btn.clicked.connect(self.__cancelDialog) + # pylint: enable=no-member + self.adjustSize() + + def __createSubscriptionWidget(self): + """Create the groupbox widget containing subscription fields""" + widget = QtWidgets.QGroupBox("Subscriptions") + layout = QtWidgets.QGridLayout() + + layout.addWidget(QtWidgets.QLabel("Allocation"), 0, 0 , 1, 1) + layout.addWidget(QtWidgets.QLabel("Size"), 0, 1 , 1, 1) + layout.addWidget(QtWidgets.QLabel("Burst"), 0, 2 , 1, 1) + + row = 1 + for allocation in opencue.api.getAllocations(): + alloc_checkbox = QtWidgets.QCheckBox(allocation.name()) + layout.addWidget(alloc_checkbox, row, 0 , 1, 1) + + size_spinbox = QtWidgets.QDoubleSpinBox(self) + size_spinbox.setMaximum(1000000) + size_spinbox.setValue(100) + layout.addWidget(size_spinbox, row, 1 , 1, 1) + + burst_spinbox = QtWidgets.QDoubleSpinBox(self) + burst_spinbox.setMaximum(1000000) + burst_spinbox.setValue(100) + layout.addWidget(burst_spinbox, row, 2 , 1, 1) + + self.subscription_fields.append({ + "allocation": allocation, + "enabled": alloc_checkbox, + "size": size_spinbox, + "burst": burst_spinbox + }) + + row += 1 + + widget.setLayout(layout) + return widget + + def __createShow(self): + """Create the show and specified subscriptions""" + if not self.__validate(): + return + + show = self.tryCreateShow() + if not show: + return + + for subscription in self.subscription_fields: + self.tryCreateSubscription(show, subscription) + + self.accept() + + def __cancelDialog(self): + """Abort creating a new show""" + self.reject() + + def __validate(self): + """Validate fields before creating a show""" + if not self.__validateName(): + return False + + if not self.__validateNoDuplicateShow(): + return False + + return True + + def __validateName(self): + """Validate the name field""" + show_name = self.__name_field.text() + if not show_name: + QtWidgets.QMessageBox.critical( + self, + "Invalid Show Name", + "Please enter a valid show name.", + QtWidgets.QMessageBox.Ok + ) + return False + return True + + def __validateNoDuplicateShow(self): + """Validate an existing show with the same name doesn't exist""" + show_name = self.__name_field.text() + try: + opencue.api.findShow(show_name) + except opencue.EntityNotFoundException: + return True + + QtWidgets.QMessageBox.critical( + self, + "Show Already Exists", + "A show with that name already exists, please enter a unique show name.", + QtWidgets.QMessageBox.Ok + ) + + return False + + def tryCreateShow(self): + """Try to create the show in OpenCue + + @return: An opencue.wrappers.show.Show if successful + """ + try: + show = opencue.api.createShow(self.__name_field.text()) + return show + except opencue.exception.CueException as e: + QtWidgets.QMessageBox.critical( + self, + "Failed To Create Show", + str(e), + QtWidgets.QMessageBox.Ok + ) + + def tryCreateSubscription(self, show, subscription): + """Try to create a subscription for the show in OpenCue + + @type show: opencue.wrappers.show.Show + @param show: The show to create a subscription for. + @type subscription: dict + @param subscription: A dictionary containing the Allocation instance + along with the other subscription field widgets. + """ + if not subscription["enabled"].isChecked(): + return + + try: + show.createSubscription( + subscription["allocation"], + float(subscription["size"].value()), + float(subscription["burst"].value()) + ) + except opencue.exception.CueException as e: + QtWidgets.QMessageBox.critical( + self, + "Failed To Create Subscription", + str(e), + QtWidgets.QMessageBox.Ok + ) diff --git a/cuegui/cuegui/CreatorDialog.py b/cuegui/cuegui/CreatorDialog.py index 0e436b201..5d2e5e450 100644 --- a/cuegui/cuegui/CreatorDialog.py +++ b/cuegui/cuegui/CreatorDialog.py @@ -23,7 +23,7 @@ from builtins import str from builtins import zip -from PySide2 import QtWidgets +from qtpy import QtWidgets import opencue @@ -97,8 +97,10 @@ def __init__(self, show=None, parent=None): layout.addWidget(self.__buttons) self.resize(400, 0) + # pylint: disable=no-member self.__buttons.accepted.connect(self.create) self.__buttons.rejected.connect(self.close) + # pylint: enable=no-member def create(self): self.__creator.create() diff --git a/cuegui/cuegui/CueJobMonitorTree.py b/cuegui/cuegui/CueJobMonitorTree.py index 8d1307771..2305835b1 100644 --- a/cuegui/cuegui/CueJobMonitorTree.py +++ b/cuegui/cuegui/CueJobMonitorTree.py @@ -22,11 +22,12 @@ from builtins import str from builtins import map +from collections import namedtuple import time -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import opencue import opencue.compiled_proto.job_pb2 @@ -43,6 +44,7 @@ logger = cuegui.Logger.getLogger(__file__) +Body = namedtuple("Body", "group_names, group_ids, job_names, job_ids") COLUMN_COMMENT = 1 COLUMN_EAT = 2 @@ -98,53 +100,72 @@ def __init__(self, parent): data=lambda job: "%.02f" % job.data.job_stats.reserved_cores, sort=lambda job: job.data.job_stats.reserved_cores, tip="The number of reserved cores.") - self.addColumn("Wait", 45, id=6, + self.addColumn("Gpus", 55, id=6, + data=lambda job: "%d" % job.data.job_stats.reserved_gpus, + sort=lambda job: job.data.job_stats.reserved_gpus, + tip="The number of reserved gpus.") + self.addColumn("Wait", 45, id=7, data=lambda job: job.data.job_stats.waiting_frames, sort=lambda job: job.data.job_stats.waiting_frames, tip="The number of waiting frames.") - self.addColumn("Depend", 55, id=7, + self.addColumn("Depend", 55, id=8, data=lambda job: job.data.job_stats.depend_frames, sort=lambda job: job.data.job_stats.depend_frames, tip="The number of dependent frames.") - self.addColumn("Total", 50, id=8, + self.addColumn("Total", 50, id=9, data=lambda job: job.data.job_stats.total_frames, sort=lambda job: job.data.job_stats.total_frames, tip="The total number of frames.") - self.addColumn("_Booking Bar", 150, id=9, + self.addColumn("_Booking Bar", 150, id=10, delegate=cuegui.ItemDelegate.JobBookingBarDelegate) - self.addColumn("Min", 38, id=10, + self.addColumn("Min", 38, id=11, data=lambda job: "%.0f" % job.data.min_cores, sort=lambda job: job.data.min_cores, tip="The minimum number of running cores that the cuebot\n" "will try to maintain.") - self.addColumn("Max", 38, id=11, + self.addColumn("Max", 38, id=12, data=lambda job: "%.0f" % job.data.max_cores, sort=lambda job: job.data.max_cores, tip="The maximum number of running cores that the cuebot\n" "will allow.") + self.addColumn("Min Gpus", 38, id=13, + data=lambda job: "%d" % job.data.min_gpus, + sort=lambda job: job.data.min_gpus, + tip="The minimum number of running gpus that the cuebot\n" + "will try to maintain.") + self.addColumn("Max Gpus", 38, id=14, + data=lambda job: "%d" % job.data.max_gpus, + sort=lambda job: job.data.max_gpus, + tip="The maximum number of running gpus that the cuebot\n" + "will allow.") self.addColumn( - "Age", 50, id=12, + "Age", 50, id=15, data=lambda job: cuegui.Utils.secondsToHHHMM(self.currtime - job.data.start_time), sort=lambda job: self.currtime - job.data.start_time, tip="The HOURS:MINUTES since the job was launched.") - self.addColumn("Pri", 30, id=13, + self.addColumn("Pri", 30, id=16, data=lambda job: job.data.priority, sort=lambda job: job.data.priority, tip="The job priority. The cuebot uses this as a suggestion\n" "to determine what job needs the next available matching\n" "resource.") - self.addColumn("ETA", 65, id=14, + self.addColumn("ETA", 65, id=17, data=lambda job: "", tip="(Inacurate and disabled until a better solution exists)\n" "A very rough estimate of the number of HOURS:MINUTES\n" "it will be before the entire job is done.") - self.addColumn("MaxRss", 60, id=15, + self.addColumn("MaxRss", 60, id=18, data=lambda job: cuegui.Utils.memoryToString(job.data.job_stats.max_rss), sort=lambda job: job.data.job_stats.max_rss, tip="The most memory used at one time by any single frame.") - self.addColumn("_Blank", 20, id=16, + self.addColumn("MaxGpuMem", 60, id=19, + data=lambda job: cuegui.Utils.memoryToString( + job.data.job_stats.max_gpu_memory), + sort=lambda job: job.data.job_stats.max_gpu_memory, + tip="The most gpu memory used at one time by any single frame.") + self.addColumn("_Blank", 20, id=20, tip="Spacer") - self.addColumn("Progress", 0, id=17, + self.addColumn("Progress", 0, id=21, delegate=cuegui.ItemDelegate.JobThinProgressBarDelegate, tip="A visual overview of the job progress.\n" "Green \t is succeeded\n" @@ -164,23 +185,31 @@ def __init__(self, parent): self.addColumn("", 0, id=5, data=lambda group: "%.2f" % group.data.stats.reserved_cores) self.addColumn("", 0, id=6, + data=lambda group: "%d" % group.data.stats.reserved_gpus) + self.addColumn("", 0, id=7, data=lambda group: group.data.stats.waiting_frames) - self.addColumn("", 0, id=7) self.addColumn("", 0, id=8) - self.addColumn("", 0, id=9, - data=lambda group: (group.data.min_cores or "")) + self.addColumn("", 0, id=9) self.addColumn("", 0, id=10, + data=lambda group: (group.data.min_cores or "")) + self.addColumn("", 0, id=11, data=lambda group: ( group.data.max_cores > 0 and group.data.max_cores or "")) - self.addColumn("", 0, id=11) - self.addColumn("", 0, id=12) - self.addColumn("", 0, id=13) + self.addColumn("", 0, id=12, + data=lambda group: (group.data.min_gpus or "")) + self.addColumn("", 0, id=13, + data=lambda group: ( + group.data.max_gpus > 0 and group.data.max_gpus or "")) self.addColumn("", 0, id=14) self.addColumn("", 0, id=15) - self.addColumn("", 0, id=16, + self.addColumn("", 0, id=16) + self.addColumn("", 0, id=17) + self.addColumn("", 0, id=18) + self.addColumn("", 0, id=19) + self.addColumn("", 0, id=20, data=lambda group: (group.data.department != "Unknown" and group.data.department or "")) - self.addColumn("", 0, id=17) + self.addColumn("", 0, id=21) cuegui.AbstractTreeWidget.AbstractTreeWidget.__init__(self, parent) @@ -194,11 +223,11 @@ def __init__(self, parent): self.__menuActions = cuegui.MenuActions.MenuActions( self, self.updateSoon, self.selectedObjects) + self.app.facility_changed.connect(self.removeAllShows) # pylint: disable=no-member - QtGui.qApp.facility_changed.connect(self.removeAllShows) - # pylint: enable=no-member self.itemClicked.connect(self.__itemSingleClickedCopy) self.itemClicked.connect(self.__itemSingleClickedComment) + # pylint: enable=no-member # Skip updates if the user is scrolling self._limitUpdatesDuringScrollSetup() @@ -264,35 +293,23 @@ def dropEvent(self, event): if item and item.type() in (cuegui.Constants.TYPE_ROOTGROUP, cuegui.Constants.TYPE_GROUP): job_ids = cuegui.Utils.dropEvent(event, "application/x-job-ids") group_ids = cuegui.Utils.dropEvent(event, "application/x-group-ids") + job_names = cuegui.Utils.dropEvent(event, "application/x-job-names") + group_names = cuegui.Utils.dropEvent(event, "application/x-group-names") if job_ids or group_ids: - body = "" - if group_ids: - body += "Groups:\n" + "\n".join( - cuegui.Utils.dropEvent(event, "application/x-group-names")) - if group_ids and job_ids: - body += "\n\n" - if job_ids: - body += "Jobs:\n" + "\n".join( - cuegui.Utils.dropEvent(event, "application/x-job-names")) - - result = QtWidgets.QMessageBox.question( - self, - "Move groups/jobs?", - "Move the following into the group: " + - "\"%s\"?\n\n%s" % ( - item.rpcObject.data.name, body), - QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No) - - if result == QtWidgets.QMessageBox.Yes: - if job_ids: - jobs = [opencue.api.getJob(id_) for id_ in job_ids] - item.rpcObject.asGroup().reparentJobs(jobs) - - if group_ids: - item.rpcObject.asGroup().reparentGroupIds(group_ids) - - self.updateRequest() + body_content = Body(group_names=group_names, + group_ids=group_ids, + job_names=job_names, + job_ids=job_ids) + + dialog = MoveDialog(title="Move Groups/Jobs", + text="Move the following into the group: %s?" \ + % item.rpcObject.data.name, + event_item=item, + items=body_content, + dist_groups={}, + parent=self) + dialog.exec_() def addShow(self, show, update=True): """Adds a show to the list of monitored shows @@ -342,7 +359,7 @@ def getShowNames(self): return list(self.__shows.keys()) def __getCollapsed(self): - return [item.rpcObject for item in list(self._items.values()) if not item.isExpanded()] + return [item.rpcObject.id() for item in list(self._items.values()) if not item.isExpanded()] def __setCollapsed(self, collapsed): self.expandAll() @@ -356,13 +373,19 @@ def _getUpdate(self): @return: List that contains updated nested groups and a set of all updated item ideas""" self.currtime = time.time() + allIds = [] try: groups = [show.getJobWhiteboard() for show in self.getShows()] nestedGroups = [] allIds = [] for group in groups: + # add jobs and parent group to match self._items + allIds.append(group.id) + allIds.extend(group.jobs) nestedGroups.append(opencue.wrappers.group.NestedGroup(group)) - allIds.extend(self.__getNestedIds(group)) + # pylint: disable=no-value-for-parameter + allIds.extend(self.__getNestedIds(group, updated=[])) + # pylint: enable=no-value-for-parameter except opencue.exception.CueException as e: list(map(logger.warning, cuegui.Utils.exceptionOutput(e))) return None @@ -383,8 +406,9 @@ def _processUpdate(self, work, rpcObjects): try: current = set(self._items.keys()) if current == set(rpcObjects[1]): - # Only updates - self.__processUpdateHandleNested(self.invisibleRootItem(), rpcObjects[0]) + # Only updates if return rpcObjects doesn't equal current _items + collapsed = self.__getCollapsed() + self.__setCollapsed(collapsed) self.redraw() else: # (Something removed) or (Something added) @@ -403,25 +427,20 @@ def _processUpdate(self, work, rpcObjects): finally: self._itemsLock.unlock() - def __getNestedIds(self, group): - """Returns all the ids founds in the nested list + def __getNestedIds(self, group, updated): + """Returns all the ids founds in the nested list including + group and job ids. @type group: job_pb2.Group - @param group: A group that can contain groups and/or jobs + @param group: A group that can contain groups and their associated jobs @rtype: list @return: The list of all child ids""" - updated = [] - for innerGroup in group.groups.nested_groups: - updated.append(innerGroup.id) - - # If group has groups, recursively call this function - for g in innerGroup.groups.nested_groups: - updated_g = self.__getNestedIds(g) - if updated_g: - updated.extend(updated_g) - - # If group has jobs, update them - for jobId in innerGroup.jobs: - updated.append(jobId) + updated = updated if updated else [] + if group.groups.nested_groups: + for g in group.groups.nested_groups: + updated.append(g.id) + if g.jobs: + updated.extend(g.jobs) + self.__getNestedIds(g, updated) return updated @@ -449,16 +468,18 @@ def __processUpdateHandleNested(self, parent, groups): for nestedGroup in group.data.groups.nested_groups] self.__processUpdateHandleNested(groupItem, nestedGroups) - for jobId in group.data.jobs: - job = opencue.api.getJob(jobId) - try: - if job.id() in self._items: - self._items[job.id()].update(job, groupItem) - else: - self._items[job.id()] = JobWidgetItem(job, groupItem) - except RuntimeError: - logger.warning( - "Failed to create tree item. RootView might be closed", exc_info=True) + if group.data.jobs: + jobsObject = opencue.api.getJobs(id=list(group.data.jobs)) + + for job in jobsObject: + try: + if job.id() in self._items: + self._items[job.id()].update(job, groupItem) + else: + self._items[job.id()] = JobWidgetItem(job, groupItem) + except RuntimeError: + logger.warning( + "Failed to create tree item. RootView might be closed", exc_info=True) def mouseDoubleClickEvent(self, event): del event @@ -528,6 +549,8 @@ def contextMenuEvent(self, e): menu.addSeparator() self.__menuActions.jobs().addAction(menu, "setMinCores") self.__menuActions.jobs().addAction(menu, "setMaxCores") + self.__menuActions.jobs().addAction(menu, "setMinGpus") + self.__menuActions.jobs().addAction(menu, "setMaxGpus") self.__menuActions.jobs().addAction(menu, "setPriority") self.__menuActions.jobs().addAction(menu, "setMaxRetries") if counts["job"] == 1: @@ -703,9 +726,7 @@ def __init__(self, rpcObject, parent): self.__class__.__initialized = True self.__class__.__commentIcon = QtGui.QIcon(":comment.png") self.__class__.__eatIcon = QtGui.QIcon(":eat.png") - # pylint: disable=no-member - self.__class__.__backgroundColor = QtGui.qApp.palette().color(QtGui.QPalette.Base) - # pylint: enable=no-member + self.__class__.__backgroundColor = cuegui.app().palette().color(QtGui.QPalette.Base) self.__class__.__foregroundColor = cuegui.Style.ColorTheme.COLOR_JOB_FOREGROUND self.__class__.__pausedColor = cuegui.Style.ColorTheme.COLOR_JOB_PAUSED_BACKGROUND self.__class__.__finishedColor = cuegui.Style.ColorTheme.COLOR_JOB_FINISHED_BACKGROUND @@ -779,3 +800,84 @@ def data(self, col, role): return self._cache.get("FST", cuegui.Constants.QVARIANT_NULL) return cuegui.Constants.QVARIANT_NULL + + +class MoveDialog(QtWidgets.QDialog): + """ + A dialog for moving selected Jobs/Groups into another Group + """ + def __init__(self, title, text, event_item, items, dst_groups, + send_to_groups=False, parent=None): + """ + Initializes the list of jobs/groups to move + @type title: str + @param title: Window Title + @type text: str + @param text: Confirmation question to the user + @type event_item: rpcObject + @param event_item: the rpcObject to act on + @type items: namedtuple + @param items: object that holds job_ids, group_ids, group_names, job_names to act on + @type dst_groups: dict + @param dst_groups: dict of destination groups to move jobs/groups to + @type parent: AbstractTreeWidget + @param parent: The dialog's parent + """ + QtWidgets.QDialog.__init__(self, parent) + self.parent = parent + self.items = items + self.event_item = event_item + self.send_to_groups = send_to_groups + self.dst_groups = dst_groups + _btn_accept = QtWidgets.QPushButton("Ok", self) + _btn_cancel = QtWidgets.QPushButton("Cancel", self) + _label_text = QtWidgets.QLabel(text, self) + _label_text.setWordWrap(True) + + _vlayout = QtWidgets.QVBoxLayout(self) + _vlayout.addWidget(_label_text) + + self._listView = QtWidgets.QListView(self) + _vlayout.addWidget(self._listView) + _model = QtGui.QStandardItemModel(self._listView) + self.setWindowTitle(title) + for item in self.items.job_names: + standard_item = QtGui.QStandardItem(item) + _model.appendRow(standard_item) + for item in self.items.group_names: + _standard_item = QtGui.QStandardItem(item) + _model.appendRow(_standard_item) + self._listView.setModel(_model) + + if self.send_to_groups: + self.combo = QtWidgets.QComboBox(self) + self.combo.addItems(sorted(self.dst_groups.keys())) + self.layout().addWidget(self.combo) + + _hlayout = QtWidgets.QHBoxLayout() + _hlayout.addWidget(_btn_accept) + _hlayout.addWidget(_btn_cancel) + _vlayout.addLayout(_hlayout) + + self.connect(_btn_accept, + QtCore.SIGNAL("clicked()"), + self.move_items) + self.connect(_btn_cancel, + QtCore.SIGNAL("clicked()"), + self.reject) + + def move_items(self): + """Reparent jobs to new group""" + + if not self.send_to_groups: + if self.items.job_ids: + jobs = [opencue.api.getJob(id_) for id_ in self.items.job_ids] + self.event_item.rpcObject.asGroup().reparentJobs(jobs) + + if self.items.group_ids: + self.event_item.rpcObject.asGroup().reparentGroupIds(self.items.group_ids) + self.parent.updateRequest() + else: + selected_group = self.combo.currentText() + self.dst_groups[str(selected_group)].reparentJobs(self.items.job_ids) + self.accept() diff --git a/cuegui/cuegui/CueStateBarWidget.py b/cuegui/cuegui/CueStateBarWidget.py index 3bf8be62d..26b7feb6f 100644 --- a/cuegui/cuegui/CueStateBarWidget.py +++ b/cuegui/cuegui/CueStateBarWidget.py @@ -24,9 +24,9 @@ import time import weakref -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import cuegui.Logger @@ -47,6 +47,7 @@ def __init__(self, sourceTree, parent=None): @type parent: QWidget @param parent: The parent widget""" QtWidgets.QWidget.__init__(self, parent) + self.app = cuegui.app() self.__background = None @@ -55,14 +56,12 @@ def __init__(self, sourceTree, parent=None): self.__sourceTree = weakref.proxy(sourceTree) self.__colors = [] - # pylint: disable=no-member - self.__baseColor = QtGui.qApp.palette().color(QtGui.QPalette.Base) - # pylint: enable=no-member + self.__baseColor = self.app.palette().color(QtGui.QPalette.Base) self.__colorsLock = QtCore.QReadWriteLock() self.__timer = QtCore.QTimer(self) self.__lastUpdate = 0 - self.__timer.timeout.connect(self.updateColors) + self.__timer.timeout.connect(self.updateColors) # pylint: disable=no-member self.__sourceTree.verticalScrollBar().valueChanged.connect(self.update) self.__sourceTree.verticalScrollBar().rangeChanged.connect(self.__updateColors) diff --git a/cuegui/cuegui/Cuedepend.py b/cuegui/cuegui/Cuedepend.py index 2997513e5..7335f6d59 100644 --- a/cuegui/cuegui/Cuedepend.py +++ b/cuegui/cuegui/Cuedepend.py @@ -22,7 +22,7 @@ JobOnJob / joj JobOnLayer / jol JobOnFrame / jof LayerOnJob / loj LayerOnLayer / lol LayerOnFrame / lof FrameOnJob / foj FrameOnLayer / fol FrameOnFrame / fof - FrameByFRame / fbf HardDepend / hd + FrameByFrame / fbf HardDepend / hd """ @@ -44,13 +44,13 @@ ERR_INVALID_ON_LAYER = ( "Error, a dependency of this type requires a valid layer name to depend on. See -on-layer.") ERR_INVALID_ON_FRAME = ( - "Error, a dependency of this type requries a valid frame name to depend on. See -on-frame.") + "Error, a dependency of this type requires a valid frame name to depend on. See -on-frame.") ERR_INVALID_ER_JOB = ( "Error, a dependency of this type requires a valid job name to depend on. See -job.") ERR_INVALID_ER_LAYER = ( "Error, a dependency of this type requires a valid layer name to depend on. See -layer.") ERR_INVALID_ER_FRAME = ( - "Error, a dependency of this type requries a valid frame name to depend on. See -frame.") + "Error, a dependency of this type requires a valid frame name to depend on. See -frame.") def __is_valid(value, error): diff --git a/cuegui/cuegui/DarkPalette.py b/cuegui/cuegui/DarkPalette.py index 2b15bd66a..fed62996b 100644 --- a/cuegui/cuegui/DarkPalette.py +++ b/cuegui/cuegui/DarkPalette.py @@ -22,8 +22,8 @@ import platform -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtGui +from qtpy import QtWidgets import cuegui.Constants @@ -32,20 +32,19 @@ def init(): """Convenience function that takes the QApplication object for the application and configures the palette and style for the Plastique color scheme""" - # pylint: disable=no-member - QtGui.qApp.setPalette(DarkPalette()) + app = cuegui.app() + app.setPalette(DarkPalette()) if platform.system() in ['Darwin', 'Linux']: setDarkStyleSheet() elif platform.system() == 'Windows': - QtGui.qApp.setStyle('Fusion') + app.setStyle('Fusion') else: - QtGui.qApp.setStyle(QtWidgets.QStyleFactory.create(cuegui.Constants.COLOR_THEME)) + app.setStyle(QtWidgets.QStyleFactory.create(cuegui.Constants.COLOR_THEME)) def setDarkStyleSheet(): """Sets the stylesheet.""" - # pylint: disable=no-member - QtGui.qApp.setStyleSheet(open(cuegui.Constants.DARK_STYLE_SHEET).read()) + cuegui.app().setStyleSheet(open(cuegui.Constants.DARK_STYLE_SHEET).read()) def DarkPalette(): diff --git a/cuegui/cuegui/DependDialog.py b/cuegui/cuegui/DependDialog.py index ce94f526f..613e3da39 100644 --- a/cuegui/cuegui/DependDialog.py +++ b/cuegui/cuegui/DependDialog.py @@ -20,8 +20,8 @@ from __future__ import print_function from __future__ import division -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.DependMonitorTree import cuegui.Logger diff --git a/cuegui/cuegui/DependMonitorTree.py b/cuegui/cuegui/DependMonitorTree.py index fed246a36..8f778280b 100644 --- a/cuegui/cuegui/DependMonitorTree.py +++ b/cuegui/cuegui/DependMonitorTree.py @@ -22,7 +22,7 @@ from builtins import map -from PySide2 import QtWidgets +from qtpy import QtWidgets from opencue.compiled_proto import depend_pb2 import opencue.exception diff --git a/cuegui/cuegui/DependWizard.py b/cuegui/cuegui/DependWizard.py index 6910ee685..b07a5ee46 100644 --- a/cuegui/cuegui/DependWizard.py +++ b/cuegui/cuegui/DependWizard.py @@ -25,8 +25,8 @@ from builtins import range import re -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import FileSequence import opencue @@ -521,7 +521,7 @@ def __init__(self, parent): self._addLabel("Depend on Job:", 0, 0) self.__jobFilterLineEdit = self._addLineEdit(2, 0, "") - self.__jobFilterLineEdit.textChanged.connect(self.filterJobs) + self.__jobFilterLineEdit.textChanged.connect(self.filterJobs) # pylint: disable=no-member self.__jobList = self._addListWidget(3, 0) diff --git a/cuegui/cuegui/EmailDialog.py b/cuegui/cuegui/EmailDialog.py index 4775553d9..227b12627 100644 --- a/cuegui/cuegui/EmailDialog.py +++ b/cuegui/cuegui/EmailDialog.py @@ -38,9 +38,9 @@ pass import smtplib -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import opencue @@ -130,8 +130,10 @@ def __init__(self, job, frames, parent=None): ly.addWidget(self.__txt_find) ly.addWidget(self.__txt_log) + # pylint: disable=no-member self.__sel_frames.activated.connect(self.switchLogEvent) self.__txt_find.returnPressed.connect(self.findEvent) + # pylint: enable=no-member # pylint: disable=inconsistent-return-statements def __getFrame(self, name): @@ -247,8 +249,10 @@ def __init__(self, job, parent=None): hlayout.addWidget(self.__btnCancel) vlayout.addLayout(hlayout) + # pylint: disable=no-member self.__btnSend.clicked.connect(self.sendEmail) self.__btnCancel.clicked.connect(self.cancel.emit) + # pylint: enable=no-member def giveFocus(self): """Initializes widget state when the widget gains focus.""" @@ -281,7 +285,7 @@ def email_subject(self): def email_body(self): """Get the email body text.""" - return "%s" % self.__email_body.toPlainText().toAscii() + return "%s" % self.__email_body.toPlainText() def appendToBody(self, txt): """Appends text to the email body.""" @@ -295,8 +299,8 @@ def sendEmail(self): """Sends the email.""" self.send.emit() - msg = MIMEText(self.email_body()) - msg["Subject"] = Header(self.email_subject(), continuation_ws=' ') + msg = MIMEText(self.email_body(), 'plain', 'utf-8') + msg["Subject"] = Header(self.email_subject(), 'utf-8', continuation_ws=' ') msg["To"] = self.email_to() msg["From"] = self.email_from() msg["Cc"] = self.email_cc() diff --git a/cuegui/cuegui/Exception.py b/cuegui/cuegui/Exception.py new file mode 100644 index 000000000..151386b32 --- /dev/null +++ b/cuegui/cuegui/Exception.py @@ -0,0 +1,35 @@ +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Custom exception classes for CueGUI application errors.""" + + +class CueGuiException(Exception): + """Base class for all CueGUI exceptions. + + Note that this class does NOT inherit from opencue.exception.CueException, so that error + handling code can easily distinguish between API errors and CueGUI errors. + """ + + +class ApplicationNotRunningException(CueGuiException): + """Raised when the CueGUI application has not been instantiated but is required to be.""" + + default_message = ( + 'attempted to access the CueGUI application before cuegui.create_app() was called') + + def __init__(self, message=None): + if message is None: + message = self.default_message + super().__init__(message) diff --git a/cuegui/cuegui/FilterDialog.py b/cuegui/cuegui/FilterDialog.py index f130eb77e..479f1c6e5 100644 --- a/cuegui/cuegui/FilterDialog.py +++ b/cuegui/cuegui/FilterDialog.py @@ -23,9 +23,9 @@ from builtins import map from builtins import str -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import opencue import opencue.compiled_proto.filter_pb2 @@ -63,7 +63,7 @@ def __init__(self, show, parent=None): :type show: opencue.wrappers.show.Show :param show: the show to manage filters for - :type parent: PySide2.QtWidgets.QWidget.QWidget + :type parent: qtpy.QtWidgets.QWidget.QWidget :param parent: the parent widget """ QtWidgets.QDialog.__init__(self, parent) @@ -110,6 +110,7 @@ def __init__(self, show, parent=None): glayout.addWidget(self.__btnAddAction, 7, 7, 1, 1) glayout.addWidget(self.__btnDone, 8, 7, 1, 1) + # pylint: disable=no-member self.__filters.itemClicked.connect(self.__itemSingleClicked) self.__btnRefresh.clicked.connect(self.__refresh) self.__btnAddFilter.clicked.connect(self.__createFilter) @@ -120,6 +121,7 @@ def __init__(self, show, parent=None): self.__btnDeleteAllActions.clicked.connect(self.__actions.deleteAllActions) self.__btnAddAction.clicked.connect(self.__actions.createAction) self.__btnDone.clicked.connect(self.accept) + # pylint: enable=no-member def __createFilter(self): """Prompts the user to create a new filter""" @@ -478,11 +480,22 @@ def createAction(self): 2) value = int(value * 1048576) - elif actionType in (opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_CORES,): + elif actionType in (opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_MIN_CORES,): (value, choice) = QtWidgets.QInputDialog.getDouble( self, "Create Action", - "How many cores should every render layer require?", + "How many min cores should every render layer require?", + 1, + 0.1, + 100, + 2) + value = float(value) + + elif actionType in (opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_MAX_CORES,): + (value, choice) = QtWidgets.QInputDialog.getDouble( + self, + "Create Action", + "How many max cores should every render layer require?", 1, 0.1, 100, @@ -584,13 +597,13 @@ def updateWidgets(self): combo = QtWidgets.QCheckBox(self.parent()) combo.setFocusPolicy(QtCore.Qt.NoFocus) self.treeWidget().setItemWidget(self, 1, combo) - combo.stateChanged.connect(self.setEnabled) + combo.stateChanged.connect(self.setEnabled) # pylint: disable=no-member self.__widgets["enabled"] = combo combo = NoWheelComboBox(self.parent()) combo.addItems(FILTERTYPE) self.treeWidget().setItemWidget(self, 3, combo) - combo.currentIndexChanged.connect(self.setType) + combo.currentIndexChanged.connect(self.setType) # pylint: disable=no-member self.__widgets["type"] = combo self.__widgets["type"].setCurrentIndex(self.rpcObject.type()) @@ -653,23 +666,23 @@ def updateWidgets(self): combo = NoWheelComboBox(parent) combo.addItems(MATCHSUBJECT) treeWidget.setItemWidget(self, 0, combo) - combo.currentIndexChanged.connect(self.setSubject) + combo.currentIndexChanged.connect(self.setSubject) # pylint: disable=no-member self.__widgets["subject"] = combo combo = NoWheelComboBox(parent) combo.addItems(MATCHTYPE) treeWidget.setItemWidget(self, 1, combo) - combo.currentIndexChanged.connect(self.setType) + combo.currentIndexChanged.connect(self.setType) # pylint: disable=no-member self.__widgets["type"] = combo edit = QtWidgets.QLineEdit("", parent) treeWidget.setItemWidget(self, 2, edit) - edit.editingFinished.connect(self.setInput) + edit.editingFinished.connect(self.setInput) # pylint: disable=no-member self.__widgets["input"] = edit btn = QtWidgets.QPushButton(QtGui.QIcon(":kill.png"), "", parent) treeWidget.setItemWidget(self, 3, btn) - btn.clicked.connect(self.delete) + btn.clicked.connect(self.delete) # pylint: disable=no-member self.__widgets["delete"] = btn self.__widgets["subject"].setCurrentIndex(self.rpcObject.subject()) @@ -726,7 +739,8 @@ def __setValue(self, value=None): elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_JOB_MAX_CORES, opencue.api.filter_pb2.SET_JOB_MIN_CORES, - opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_CORES): + opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_MIN_CORES, + opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_MAX_CORES): value = float(widget.value()) elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_TAGS,): @@ -755,20 +769,21 @@ def updateWidgets(self): if self.rpcObject.type() in (opencue.api.filter_pb2.PAUSE_JOB,): widget = NoWheelComboBox(self.parent()) widget.addItems(PAUSETYPE) - widget.currentIndexChanged.connect(self.__setValue) + widget.currentIndexChanged.connect(self.__setValue) # pylint: disable=no-member elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_JOB_PRIORITY,): widget = NoWheelSpinBox(self.parent()) widget.setMaximum(99999) - widget.editingFinished.connect(self.__setValue) + widget.editingFinished.connect(self.__setValue) # pylint: disable=no-member elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_MEMORY, - opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_CORES): + opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_MIN_CORES, + opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_MAX_CORES): widget = NoWheelDoubleSpinBox(self.parent()) widget.setDecimals(2) widget.setSingleStep(.10) widget.setMaximum(MAX_RENDER_MEM) - widget.editingFinished.connect(self.__setValue) + widget.editingFinished.connect(self.__setValue) # pylint: disable=no-member elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_JOB_MAX_CORES, opencue.api.filter_pb2.SET_JOB_MIN_CORES): @@ -776,21 +791,21 @@ def updateWidgets(self): widget.setDecimals(0) widget.setSingleStep(1) widget.setMaximum(1000) - widget.editingFinished.connect(self.__setValue) + widget.editingFinished.connect(self.__setValue) # pylint: disable=no-member elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_TAGS,): widget = QtWidgets.QLineEdit("", self.parent()) - widget.editingFinished.connect(self.__setValue) + widget.editingFinished.connect(self.__setValue) # pylint: disable=no-member elif self.rpcObject.type() in (opencue.api.filter_pb2.MOVE_JOB_TO_GROUP,): widget = NoWheelComboBox(self.parent()) widget.addItems(list(self.treeWidget().groupNames.keys())) - widget.currentIndexChanged.connect(self.__setValue) + widget.currentIndexChanged.connect(self.__setValue) # pylint: disable=no-member elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_MEMORY_OPTIMIZER,): widget = NoWheelComboBox(self.parent()) widget.addItems(MEMOPTTYPE) - widget.currentIndexChanged.connect(self.__setValue) + widget.currentIndexChanged.connect(self.__setValue) # pylint: disable=no-member if widget: self.treeWidget().setItemWidget(self, 1, widget) @@ -798,7 +813,7 @@ def updateWidgets(self): btn = QtWidgets.QPushButton(QtGui.QIcon(":kill.png"), "", self.parent()) self.treeWidget().setItemWidget(self, 2, btn) - btn.clicked.connect(self.delete) + btn.clicked.connect(self.delete) # pylint: disable=no-member self.__widgets["delete"] = btn # Update the widget with the current value @@ -815,7 +830,8 @@ def updateWidgets(self): elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_TAGS,): self.__widgets["ActionValue"].setText(self.rpcObject.value()) - elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_CORES, + elif self.rpcObject.type() in (opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_MIN_CORES, + opencue.api.filter_pb2.SET_ALL_RENDER_LAYER_MAX_CORES, opencue.api.filter_pb2.SET_JOB_MAX_CORES, opencue.api.filter_pb2.SET_JOB_MIN_CORES): self.__widgets["ActionValue"].setValue(float(str(self.rpcObject.value()))) diff --git a/cuegui/cuegui/FrameMonitor.py b/cuegui/cuegui/FrameMonitor.py index fc83fe1a4..56c586cd9 100644 --- a/cuegui/cuegui/FrameMonitor.py +++ b/cuegui/cuegui/FrameMonitor.py @@ -24,8 +24,9 @@ from copy import deepcopy import math -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import FileSequence from opencue.compiled_proto import job_pb2 @@ -168,7 +169,7 @@ def _refreshButtonSetup(self, layout): self.btn_refresh = QtWidgets.QPushButton("Refresh") self.btn_refresh.setFocusPolicy(QtCore.Qt.NoFocus) layout.addWidget(self.btn_refresh) - self.btn_refresh.clicked.connect(self.frameMonitorTree.updateRequest) + self.btn_refresh.clicked.connect(self.frameMonitorTree.updateRequest) # pylint: disable=no-member self.frameMonitorTree.updated.connect(self._refreshButtonDisableHandle) def _refreshButtonEnableHandle(self): @@ -191,7 +192,7 @@ def _clearButtonSetup(self, layout): btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setContentsMargins(0,0,0,0) layout.addWidget(btn) - btn.clicked.connect(self._clearButtonHandle) + btn.clicked.connect(self._clearButtonHandle) # pylint: disable=no-member def _clearButtonHandle(self): """Called when the clear button is clicked""" @@ -213,14 +214,14 @@ def _pageButtonSetup(self, layout): self.prev_page_btn.setFocusPolicy(QtCore.Qt.NoFocus) self.prev_page_btn.setContentsMargins(0, 0, 0, 0) layout.addWidget(self.prev_page_btn) - self.prev_page_btn.clicked.connect(lambda: self._pageButtonsHandle(-1)) + self.prev_page_btn.clicked.connect(lambda: self._pageButtonsHandle(-1)) # pylint: disable=no-member # Next page button self.next_page_btn = QtWidgets.QPushButton(">") self.next_page_btn.setFocusPolicy(QtCore.Qt.NoFocus) self.next_page_btn.setContentsMargins(0, 0, 0, 0) layout.addWidget(self.next_page_btn) - self.next_page_btn.clicked.connect(lambda: self._pageButtonsHandle(1)) + self.next_page_btn.clicked.connect(lambda: self._pageButtonsHandle(1)) # pylint: disable=no-member self.frameMonitorTree.job_changed.connect(self._updatePageButtonState) # Page number label @@ -299,7 +300,7 @@ def _selectStatusSetup(self, layout): menu = QtWidgets.QMenu(self) btn.setMenu(menu) - menu.triggered.connect(self._selectStatusHandle) + menu.triggered.connect(self._selectStatusHandle) # pylint: disable=no-member for item in ["Clear", None, "Succeeded", "Running", "Waiting", "Depend", "Dead", "Eaten"]: if item: @@ -347,7 +348,7 @@ def _filterLayersUpdate(self): else: menu = QtWidgets.QMenu(self) btn.setMenu(menu) - menu.triggered[QtWidgets.QAction].connect(self._filterLayersHandle) + menu.triggered[QtWidgets.QAction].connect(self._filterLayersHandle) # pylint: disable=unsubscriptable-object if self.frameMonitorTree.getJob(): layers = [x.data.name for x in self.frameMonitorTree.getJob().getLayers()] @@ -425,7 +426,7 @@ def _filterStatusSetup(self, layout): menu = QtWidgets.QMenu(self) btn.setMenu(menu) - menu.triggered.connect(self._filterStatusHandle) + menu.triggered.connect(self._filterStatusHandle) # pylint: disable=no-member for item in [("Clear", QtCore.Qt.ALT + QtCore.Qt.Key_QuoteLeft), None, @@ -440,7 +441,7 @@ def _filterStatusSetup(self, layout): if item[0] != "Clear": a.setCheckable(True) if item[1]: - a.setShortcut(item[1]) + a.setShortcut(QtGui.QKeySequence(item[1])) menu.addAction(a) else: menu.addSeparator() diff --git a/cuegui/cuegui/FrameMonitorTree.py b/cuegui/cuegui/FrameMonitorTree.py index 33ccccad9..e40be0f4d 100644 --- a/cuegui/cuegui/FrameMonitorTree.py +++ b/cuegui/cuegui/FrameMonitorTree.py @@ -29,9 +29,9 @@ import re import time -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import opencue from opencue.compiled_proto import job_pb2 @@ -100,25 +100,29 @@ def __init__(self, parent): data=lambda job, frame: (self.getCores(frame, format_as_string=True) or ""), sort=lambda job, frame: (self.getCores(frame)), tip="The number of cores a frame is using") - self.addColumn("Host", 120, id=6, + self.addColumn("GPUs", 55, id=6, + data=lambda job, frame: (self.getGpus(frame, format_as_string=True) or ""), + sort=lambda job, frame: (self.getGpus(frame)), + tip="The number of gpus a frame is using") + self.addColumn("Host", 120, id=7, data=lambda job, frame: frame.data.last_resource, sort=lambda job, frame: frame.data.last_resource, tip="The last or current resource that the frame used or is using.") - self.addColumn("Retries", 55, id=7, + self.addColumn("Retries", 55, id=8, data=lambda job, frame: frame.data.retry_count, sort=lambda job, frame: frame.data.retry_count, tip="The number of times that each frame has had to retry.") - self.addColumn("_CheckpointEnabled", 20, id=8, + self.addColumn("_CheckpointEnabled", 20, id=9, data=lambda job, frame: "", sort=lambda job, frame: ( frame.data.checkpoint_state == opencue.api.job_pb2.ENABLED), tip="A green check mark here indicates the frame has written out at least " "1 checkpoint segment.") - self.addColumn("CheckP", 55, id=9, + self.addColumn("CheckP", 55, id=10, data=lambda job, frame: frame.data.checkpoint_count, sort=lambda job, frame: frame.data.checkpoint_count, tip="The number of times a frame has been checkpointed.") - self.addColumn("Runtime", 70, id=10, + self.addColumn("Runtime", 70, id=11, data=lambda job, frame: (cuegui.Utils.secondsToHMMSS( frame.data.start_time and frame.data.stop_time and @@ -138,7 +142,7 @@ def __init__(self, parent): tip="The amount of HOURS:MINUTES:SECONDS that the frame\n" "has run for or last ran for.\n") - self.addColumn("LLU", 70, id=11, + self.addColumn("LLU", 70, id=12, data=lambda job, frame: (frame.data.state == opencue.api.job_pb2.RUNNING and self.frameLogDataBuffer.getLastLineData( job, frame)[FrameLogDataBuffer.LLU] or ""), @@ -150,7 +154,7 @@ def __init__(self, parent): "time without an update is an indication of a stuck\n" "frame for most types of jobs") - self.addColumn("Memory", 60, id=12, + self.addColumn("Memory", 60, id=13, data=lambda job, frame: ( frame.data.state == opencue.api.job_pb2.RUNNING and cuegui.Utils.memoryToString(frame.data.used_memory) or @@ -162,7 +166,20 @@ def __init__(self, parent): "If a frame is not running:\n" "\t The most memory this frame has used at one time.") - self.addColumn("Remain", 70, id=13, + self.addColumn("GPU Memory", 60, id=14, + data=lambda job, frame: ( + frame.data.state == opencue.api.job_pb2.RUNNING and + cuegui.Utils.memoryToString(frame.data.used_gpu_memory) or + cuegui.Utils.memoryToString(frame.data.max_gpu_memory)), + sort=lambda job, frame: (frame.data.state == opencue.api.job_pb2.RUNNING and + frame.data.used_gpu_memory or + frame.data.max_gpu_memory), + tip="If a frame is running:\n" + "\t The amount of GPU memory currently used by the frame.\n" + "If a frame is not running:\n" + "\t The most GPU memory this frame has used at one time.") + + self.addColumn("Remain", 70, id=15, data=lambda job, frame: (frame.data.state == opencue.api.job_pb2.RUNNING and self.frameEtaDataBuffer.getEtaFormatted(job, frame) or ""), @@ -170,16 +187,16 @@ def __init__(self, parent): self.frameEtaDataBuffer.getEta(job, frame) or -1), tip="Hours:Minutes:Seconds remaining.") - self.addColumn("Start Time", 100, id=14, + self.addColumn("Start Time", 100, id=16, data=lambda job, frame: (self.getTimeString(frame.data.start_time) or ""), sort=lambda job, frame: (self.getTimeString(frame.data.start_time) or ""), tip="The time the frame was started or retried.") - self.addColumn("Stop Time", 100, id=15, + self.addColumn("Stop Time", 100, id=17, data=lambda job, frame: (self.getTimeString(frame.data.stop_time) or ""), sort=lambda job, frame: (self.getTimeString(frame.data.stop_time) or ""), tip="The time that the frame finished or died.") - self.addColumn("Last Line", 0, id=16, + self.addColumn("Last Line", 0, id=18, data=lambda job, frame: (frame.data.state == opencue.api.job_pb2.RUNNING and self.frameLogDataBuffer.getLastLineData( job, frame)[FrameLogDataBuffer.LASTLINE] or ""), @@ -195,12 +212,15 @@ def __init__(self, parent): cuegui.AbstractTreeWidget.AbstractTreeWidget.__init__(self, parent) + # Used to build right click context menus + self.__menuActions = cuegui.MenuActions.MenuActions( + self, self.updateSoon, self.selectedObjects, self.getJob) self.__sortByColumnCache = {} self.ticksWithoutUpdate = 999 self.__lastUpdateTime = None - self.itemClicked.connect(self.__itemSingleClickedCopy) - self.itemClicked.connect(self.__itemSingleClickedViewLog) + self.itemClicked.connect(self.__itemSingleClickedCopy) # pylint: disable=no-member + self.itemClicked.connect(self.__itemSingleClickedViewLog) # pylint: disable=no-member self.itemDoubleClicked.connect(self.__itemDoubleClickedViewLog) self.header().sortIndicatorChanged.connect(self.__sortByColumnSave) @@ -240,7 +260,7 @@ def getCores(frame, format_as_string=False): """Gets the number of cores a frame is using.""" cores = None - m = re.search(r".*\/(\d+\.?\d*)", frame.data.last_resource) + m = re.search(r".*\/(\d+\.?\d*)\/.*", frame.data.last_resource) if m: cores = float(m.group(1)) @@ -249,6 +269,20 @@ def getCores(frame, format_as_string=False): return cores + @staticmethod + def getGpus(frame, format_as_string=False): + """Gets the number of gpus a frame is using.""" + gpus = None + + m = re.search(r".*\/.*\/(\d+)", frame.data.last_resource) + if m: + gpus = m.group(1) + + if not format_as_string: + gpus = int(gpus) + + return gpus + @staticmethod def getTimeString(timestamp): """Gets a timestamp formatted as a string.""" @@ -299,7 +333,8 @@ def __itemSingleClickedCopy(self, item, col): selected = [ frame.data.name for frame in self.selectedObjects() if cuegui.Utils.isFrame(frame)] if selected: - QtWidgets.QApplication.clipboard().setText(" ".join(selected)) + QtWidgets.QApplication.clipboard().setText(" ".join(selected), + QtGui.QClipboard.Selection) def __itemSingleClickedViewLog(self, item, col): """Called when an item is clicked on. Views the log file contents @@ -314,10 +349,9 @@ def __itemSingleClickedViewLog(self, item, col): key=lambda l: int(l.split('rqlog.')[-1]), reverse=True) except ValueError: - pass - # pylint: disable=no-member - QtGui.qApp.display_log_file_content.emit([current_log_file] + old_log_files) - # pylint: enable=no-member + old_log_files = [] + + self.app.display_log_file_content.emit([current_log_file] + old_log_files) def __itemDoubleClickedViewLog(self, item, col): """Called when a frame is double clicked, views the frame log in a popup @@ -411,11 +445,9 @@ def _update(self): updated""" logger.info("_update") self._lastUpdate = time.time() - if hasattr(QtGui.qApp, "threadpool"): - # pylint: disable=no-member - QtGui.qApp.threadpool.queue( + if self.app.threadpool is not None: + self.app.threadpool.queue( self._getUpdate, self._processUpdate, "getting data for %s" % self.__class__) - # pylint: enable=no-member else: logger.warning("threadpool not found, doing work in gui thread") self._processUpdate(None, self._getUpdate()) @@ -425,12 +457,10 @@ def _updateChanged(self): updated""" logger.info("_updateChanged") self._lastUpdate = time.time() - if hasattr(QtGui.qApp, "threadpool"): - # pylint: disable=no-member - QtGui.qApp.threadpool.queue( + if self.app.threadpool is not None: + self.app.threadpool.queue( self._getUpdateChanged, self._processUpdateChanged, "getting data for %s" % self.__class__) - # pylint: enable=no-member else: logger.warning("threadpool not found, doing work in gui thread") self._processUpdateChanged(None, self._getUpdateChanged()) @@ -560,9 +590,7 @@ class FrameWidgetItem(cuegui.AbstractWidgetItem.AbstractWidgetItem): def __init__(self, rpcObject, parent, job): if not self.__initialized: self.__class__.__initialized = True - # pylint: disable=no-member - self.__class__.__backgroundColor = QtGui.qApp.palette().color(QtGui.QPalette.Base) - # pylint: enable=no-member + self.__class__.__backgroundColor = cuegui.app().palette().color(QtGui.QPalette.Base) self.__class__.__foregroundColor = cuegui.Style.ColorTheme.COLOR_JOB_FOREGROUND self.__class__.__foregroundColorBlack = QCOLOR_BLACK self.__class__.__foregroundColorGreen = QCOLOR_GREEN @@ -826,6 +854,7 @@ class FrameContextMenu(QtWidgets.QMenu): def __init__(self, widget, filterSelectedLayersCallback): super(FrameContextMenu, self).__init__() + self.app = cuegui.app() self.__menuActions = cuegui.MenuActions.MenuActions( widget, widget.updateSoon, widget.selectedObjects, widget.getJob) @@ -844,12 +873,11 @@ def __init__(self, widget, filterSelectedLayersCallback): elif count == 2: self.__menuActions.frames().addAction(self, "xdiff2") - self.__menuActions.frames().addAction(self, "useLocalCores") + if bool(int(self.app.settings.value("AllowDeeding", 0))): + self.__menuActions.frames().addAction(self, "useLocalCores") - # pylint: disable=no-member - if QtGui.qApp.applicationName() == "CueCommander": + if self.app.applicationName() == "CueCommander": self.__menuActions.frames().addAction(self, "viewHost") - # pylint: enable=no-member depend_menu = QtWidgets.QMenu("&Dependencies", self) self.__menuActions.frames().addAction(depend_menu, "viewDepends") @@ -875,3 +903,4 @@ def __init__(self, widget, filterSelectedLayersCallback): self.__menuActions.frames().addAction(self, "eat") self.__menuActions.frames().addAction(self, "kill") self.__menuActions.frames().addAction(self, "eatandmarkdone") + self.__menuActions.frames().addAction(self, "viewProcesses") diff --git a/cuegui/cuegui/FrameRangeSelection.py b/cuegui/cuegui/FrameRangeSelection.py index 851e2aab2..003c4bc22 100644 --- a/cuegui/cuegui/FrameRangeSelection.py +++ b/cuegui/cuegui/FrameRangeSelection.py @@ -25,9 +25,9 @@ from builtins import range import math -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets class FrameRangeSelectionWidget(QtWidgets.QWidget): @@ -303,7 +303,7 @@ def __paintLabels(self, painter): oldPen = painter.pen() # draw hatches for labelled frames - painter.setPen(self.palette().color(QtGui.QPalette.Foreground)) + painter.setPen(self.palette().color(QtGui.QPalette.WindowText)) for frame in frames: xPos = self.__getTickArea(frame).left() painter.drawLine(xPos, -labelHeight, xPos, 0) @@ -313,7 +313,7 @@ def __paintLabels(self, painter): metric = QtGui.QFontMetrics(painter.font()) yPos = metric.ascent() + 1 rightEdge = -10000 - width = metric.width(str(frames[-1])) + width = metric.horizontalAdvance(str(frames[-1])) farEdge = self.__getTickArea(frames[-1]).right() - width // 2 farEdge -= 4 @@ -321,7 +321,7 @@ def __paintLabels(self, painter): for frame in frames: xPos = self.__getTickArea(frame).left() frameString = str(frame) - width = metric.width(frameString) + width = metric.horizontalAdvance(frameString) xPos = xPos - width // 2 if (xPos > rightEdge and xPos + width < farEdge) or frame is frames[-1]: painter.drawText(xPos, yPos, frameString) @@ -337,7 +337,7 @@ def __paintStartTime(self, painter): metric = QtGui.QFontMetrics(painter.font()) frameString = str(int(startFrame)) - xPos = timeExtent.left() - metric.width(frameString) // 2 + xPos = timeExtent.left() - metric.horizontalAdvance(frameString) // 2 yPos = metric.ascent() + 1 painter.drawText(xPos, yPos, frameString) painter.setPen(oldPen) @@ -351,7 +351,7 @@ def __paintEndTime(self, painter): metric = QtGui.QFontMetrics(painter.font()) frameString = str(int(endFrame)) - xPos = timeExtent.left() - metric.width(frameString) // 2 + xPos = timeExtent.left() - metric.horizontalAdvance(frameString) // 2 yPos = metric.ascent() + 1 painter.drawText(xPos, yPos, frameString) painter.setPen(oldPen) @@ -372,7 +372,7 @@ def __paintFloatTime(self, painter): painter.setPen(QtGui.QColor(128, 128, 128)) metric = QtGui.QFontMetrics(painter.font()) frameString = str(self.__floatTime) - xPos = timeExtent.left() - metric.width(frameString) // 2 + xPos = timeExtent.left() - metric.horizontalAdvance(frameString) // 2 yPos = timeExtent.top() + metric.ascent() painter.drawText(xPos, yPos, frameString) painter.setPen(oldPen) diff --git a/cuegui/cuegui/GarbageCollector.py b/cuegui/cuegui/GarbageCollector.py index ec0587dbd..7f999d98f 100644 --- a/cuegui/cuegui/GarbageCollector.py +++ b/cuegui/cuegui/GarbageCollector.py @@ -23,7 +23,7 @@ import gc -from PySide2 import QtCore +from qtpy import QtCore class GarbageCollector(QtCore.QObject): @@ -41,7 +41,7 @@ def __init__(self, parent, debug=False): self.debug = debug self.timer = QtCore.QTimer(self) - self.timer.timeout.connect(self.check) + self.timer.timeout.connect(self.check) # pylint: disable=no-member self.threshold = gc.get_threshold() gc.disable() diff --git a/cuegui/cuegui/GroupDialog.py b/cuegui/cuegui/GroupDialog.py index 2c59f405c..d5f26ac18 100644 --- a/cuegui/cuegui/GroupDialog.py +++ b/cuegui/cuegui/GroupDialog.py @@ -23,8 +23,8 @@ from builtins import str -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -56,6 +56,11 @@ def __init__(self, parentGroup, modifyGroup, defaults, parent): __minCores = defaults["minCores"] __maxCores = defaults["maxCores"] + __defaultJobMinGpus = defaults["defaultJobMinGpus"] + __defaultJobMaxGpus = defaults["defaultJobMaxGpus"] + __minGpus = defaults["minGpus"] + __maxGpus = defaults["maxGpus"] + self.setWindowTitle(__title) layout.addWidget(QtWidgets.QLabel(__message, self), 0, 1, 1, 3) @@ -90,8 +95,25 @@ def __init__(self, parentGroup, modifyGroup, defaults, parent): __modify and __maxCores != -1.0, __maxCores, 1) + (self._defaultJobMinGpusCheck, self._defaultJobMinGpusValue) = \ + self.__createToggleSpinBox("Job Default Minimum Gpus", 8, + __modify and __defaultJobMinGpus != -1, + __defaultJobMinGpus, 1) + (self._defaultJobMaxGpusCheck, self._defaultJobMaxGpusValue) = \ + self.__createToggleSpinBox("Job Default Maximum Gpus", 9, + __modify and __defaultJobMaxGpus != -1, + __defaultJobMaxGpus, 1) + (self._minGpusCheck, self._minGpusValue) = \ + self.__createToggleSpinBox("Group Minimum Gpus", 10, + __modify and __minGpus != 0, + __minGpus) + (self._maxGpusCheck, self._maxGpusValue) = \ + self.__createToggleSpinBox("Group Maximum Gpus", 11, + __modify and __maxGpus != -1, + __maxGpus, 1) + self.__createButtons( - QtWidgets.QDialogButtonBox.Save | QtWidgets.QDialogButtonBox.Cancel, 8, 3) + QtWidgets.QDialogButtonBox.Save | QtWidgets.QDialogButtonBox.Cancel, 12, 3) def __createToggleDoubleSpinBox( self, text, row, startEnabled = False, currentValue = 0, minValue = 0): @@ -118,15 +140,19 @@ def __createToggleInput(self, text, row, inputWidget, startEnabled): self.layout().addWidget(check, row, 0) self.layout().addWidget(label, row, 1) self.layout().addWidget(inputWidget, row, 2) + # pylint: disable=no-member check.stateChanged.connect(inputWidget.setEnabled) check.stateChanged.connect(label.setEnabled) + # pylint: enable=no-member return (check, inputWidget) def __createButtons(self, buttons, row, width): self.__buttons = QtWidgets.QDialogButtonBox(buttons, QtCore.Qt.Horizontal, self) self.layout().addWidget(self.__buttons, row, 1, 1, width) + # pylint: disable=no-member self.__buttons.accepted.connect(self.accept) self.__buttons.rejected.connect(self.reject) + # pylint: enable=no-member def accept(self): __name = str(self._nameValue.text()) @@ -169,6 +195,26 @@ def accept(self): float(self._maxCoresValue.value()), __group.data.max_cores, float(-1)) + self.__setValue(self._defaultJobMinGpusCheck, + __group.setDefaultJobMinGpus, + float(self._defaultJobMinGpusValue.value()), + __group.data.default_job_min_gpus, -1) + + self.__setValue(self._defaultJobMaxGpusCheck, + __group.setDefaultJobMaxGpus, + float(self._defaultJobMaxGpusValue.value()), + __group.data.default_job_max_gpus, -1) + + self.__setValue(self._minGpusCheck, + __group.setMinGpus, + float(self._minGpusValue.value()), + __group.data.min_gpus, 0) + + self.__setValue(self._maxGpusCheck, + __group.setMaxGpus, + float(self._maxGpusValue.value()), + __group.data.max_gpus, -1) + self.close() @staticmethod @@ -195,7 +241,11 @@ def __init__(self, modifyGroup, parent=None): "defaultJobMinCores": modifyGroup.data.default_job_min_cores, "defaultJobMaxCores": modifyGroup.data.default_job_max_cores, "minCores": modifyGroup.data.min_cores, - "maxCores": modifyGroup.data.max_cores} + "maxCores": modifyGroup.data.max_cores, + "defaultJobMinGpus": modifyGroup.data.default_job_min_gpus, + "defaultJobMaxGpus": modifyGroup.data.default_job_max_gpus, + "minGpus": modifyGroup.data.min_gpus, + "maxGpus": modifyGroup.data.max_gpus} GroupDialog.__init__(self, None, modifyGroup, defaults, parent) @@ -212,5 +262,9 @@ def __init__(self, parentGroup, parent=None): "defaultJobMinCores": 1.0, "defaultJobMaxCores": 1.0, "minCores": 0.0, - "maxCores": 1.0} + "maxCores": 1.0, + "defaultJobMinGpus": 0, + "defaultJobMaxGpus": 0, + "minGpus": 0, + "maxGpus": 0} GroupDialog.__init__(self, parentGroup, None, defaults, parent) diff --git a/cuegui/cuegui/HostMonitor.py b/cuegui/cuegui/HostMonitor.py index 0c467d004..66ee94287 100644 --- a/cuegui/cuegui/HostMonitor.py +++ b/cuegui/cuegui/HostMonitor.py @@ -22,9 +22,8 @@ from builtins import str -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -43,6 +42,7 @@ class HostMonitor(QtWidgets.QWidget): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent) + self.app = cuegui.app() self.__filterByHostNameLastInput = None self.hostMonitorTree = cuegui.HostMonitorTree.HostMonitorTree(self) @@ -70,10 +70,8 @@ def __init__(self, parent): self.__viewHostsSetup() - # pylint: disable=no-member - if bool(int(QtGui.qApp.settings.value("AutoRefreshMonitorHost", 1))): + if bool(int(self.app.settings.value("AutoRefreshMonitorHost", 1))): self.updateRequest() - # pylint: enable=no-member def updateRequest(self): """Requests an update of the displayed information.""" @@ -108,7 +106,7 @@ def __filterByHostNameSetup(self, layout): self.__filterByHostNameLastInput = None - self.__filterByHostName.editingFinished.connect(self.__filterByHostNameHandle) + self.__filterByHostName.editingFinished.connect(self.__filterByHostNameHandle) # pylint: disable=no-member btn = QtWidgets.QPushButton("Clr") btn.setMaximumHeight(FILTER_HEIGHT) @@ -279,9 +277,7 @@ def __refreshToggleCheckBoxSetup(self, layout): def __refreshToggleCheckBoxHandle(self, state): self.hostMonitorTree.enableRefresh = bool(state) - # pylint: disable=no-member - QtGui.qApp.settings.setValue("AutoRefreshMonitorHost", int(bool(state))) - # pylint: enable=no-member + self.app.settings.setValue("AutoRefreshMonitorHost", int(bool(state))) # ============================================================================== # Button to refresh @@ -294,7 +290,7 @@ def __refreshButtonSetup(self, layout): self.btn_refresh.setMaximumHeight(FILTER_HEIGHT) self.btn_refresh.setFocusPolicy(QtCore.Qt.NoFocus) layout.addWidget(self.btn_refresh) - self.btn_refresh.clicked.connect(self.hostMonitorTree.updateRequest) + self.btn_refresh.clicked.connect(self.hostMonitorTree.updateRequest) # pylint: disable=no-member self.hostMonitorTree.updated.connect(self.__refreshButtonDisableHandle) def __refreshButtonEnableHandle(self): @@ -318,7 +314,7 @@ def __clearButtonSetup(self, layout): btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setContentsMargins(0,0,0,0) layout.addWidget(btn) - btn.clicked.connect(self.__clearButtonHandle) + btn.clicked.connect(self.__clearButtonHandle) # pylint: disable=no-member def __clearButtonHandle(self): """Called when the clear button is clicked""" @@ -332,9 +328,7 @@ def __clearButtonHandle(self): # Monitors and handles the view_hosts signal # ============================================================================== def __viewHostsSetup(self): - # pylint: disable=no-member - QtGui.qApp.view_hosts.connect(self.__viewHostsHandle) - # pylint: enable=no-member + self.app.view_hosts.connect(self.__viewHostsHandle) def __viewHostsHandle(self, hosts): self.__clearButtonHandle() diff --git a/cuegui/cuegui/HostMonitorTree.py b/cuegui/cuegui/HostMonitorTree.py index f9e5a7c90..bff3d6ab2 100644 --- a/cuegui/cuegui/HostMonitorTree.py +++ b/cuegui/cuegui/HostMonitorTree.py @@ -23,9 +23,9 @@ from builtins import map import time -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import opencue from opencue.compiled_proto.host_pb2 import HardwareState @@ -78,9 +78,9 @@ def __init__(self, parent): data=lambda host: cuegui.Utils.memoryToString(host.data.free_memory), sort=lambda host: host.data.free_memory, tip="The amount of used memory (red) vs available gpu memory (green)") - self.addColumn("GPU", 60, id=6, - data=lambda host: cuegui.Utils.memoryToString(host.data.free_gpu), - sort=lambda host: host.data.free_gpu, + self.addColumn("GPU Memory", 60, id=6, + data=lambda host: cuegui.Utils.memoryToString(host.data.free_gpu_memory), + sort=lambda host: host.data.free_gpu_memory, delegate=cuegui.ItemDelegate.HostGpuBarDelegate, tip="The amount of used gpu memory (red) vs available gpu memory (green)") self.addColumn("freeMcp", 60, id=7, @@ -105,27 +105,36 @@ def __init__(self, parent): data=lambda host: cuegui.Utils.memoryToString(host.data.idle_memory), sort=lambda host: host.data.idle_memory, tip="The amount of unreserved memory.") - self.addColumn("GPU", 50, id=12, - data=lambda host: cuegui.Utils.memoryToString(host.data.gpu), - sort=lambda host: host.data.gpu, + self.addColumn("GPUs", 50, id=12, + data=lambda host: "%d" % host.data.gpus, + sort=lambda host: host.data.gpus, + tip="The total number of gpus.\n\n" + "On a frame it is the number of gpus reserved.") + self.addColumn("Idle GPUs", 40, id=13, + data=lambda host: "%d" % host.data.idle_gpus, + sort=lambda host: host.data.idle_gpus, + tip="The number of gpus that are not reserved.") + self.addColumn("GPU Mem", 50, id=14, + data=lambda host: cuegui.Utils.memoryToString(host.data.gpu_memory), + sort=lambda host: host.data.gpu_memory, tip="The total amount of reservable gpu memory.\n\n" "On a frame it is the amount of gpu memory reserved.") - self.addColumn("Idle", 50, id=13, - data=lambda host: cuegui.Utils.memoryToString(host.data.idle_gpu), - sort=lambda host: host.data.idle_gpu, + self.addColumn("Gpu Mem Idle", 50, id=15, + data=lambda host: cuegui.Utils.memoryToString(host.data.idle_gpu_memory), + sort=lambda host: host.data.idle_gpu_memory, tip="The amount of unreserved gpu memory.") - self.addColumn("Ping", 50, id=14, + self.addColumn("Ping", 50, id=16, data=lambda host: int(time.time() - host.data.ping_time), sort=lambda host: host.data.ping_time, tip="The number of seconds since the cuebot last received\n" "a report from the host. A host is configured to report\n" "in every 60 seconds so a number larger than this\n" "indicates a problem") - self.addColumn("Hardware", 70, id=15, + self.addColumn("Hardware", 70, id=17, data=lambda host: HardwareState.Name(host.data.state), tip="The state of the hardware as Up or Down.\n\n" "On a frame it is the amount of memory used.") - self.addColumn("Locked", 90, id=16, + self.addColumn("Locked", 90, id=18, data=lambda host: LockState.Name(host.data.lock_state), tip="A host can be:\n" "Locked \t\t It was manually locked to prevent booking\n" @@ -133,12 +142,12 @@ def __init__(self, parent): "NimbyLocked \t It is a desktop machine and there is\n" "\t\t someone actively using it or not enough \n" "\t\t resources are available on a desktop.") - self.addColumn("ThreadMode", 80, id=17, + self.addColumn("ThreadMode", 80, id=19, data=lambda host: ThreadMode.Name(host.data.thread_mode), tip="A frame that runs on this host will:\n" "All: Use all cores.\n" "Auto: Use the number of cores as decided by the cuebot.\n") - self.addColumn("Tags/Job", 50, id=18, + self.addColumn("Tags/Job", 50, id=20, data=lambda host: ",".join(host.data.tags), tip="The tags applied to the host.\n\n" "On a frame it is the name of the job.") @@ -154,21 +163,19 @@ def __init__(self, parent): self.setDropIndicatorShown(True) self.setDragEnabled(True) + # pylint: disable=no-member self.itemClicked.connect(self.__itemSingleClickedCopy) self.itemClicked.connect(self.__itemSingleClickedComment) + # pylint: enable=no-member # Don't use the standard space bar to refresh - # pylint: disable=no-member - QtGui.qApp.request_update.connect(self.updateRequest) - # pylint: enable=no-member + self.app.request_update.connect(self.updateRequest) self.startTicksUpdate(40) # Don't start refreshing until the user sets a filter or hits refresh self.ticksWithoutUpdate = -1 - # pylint: disable=no-member - self.enableRefresh = bool(int(QtGui.qApp.settings.value("AutoRefreshMonitorHost", 1))) - # pylint: enable=no-member + self.enableRefresh = bool(int(self.app.settings.value("AutoRefreshMonitorHost", 1))) def tick(self): if self.ticksWithoutUpdate >= self.updateInterval and \ @@ -290,9 +297,7 @@ def __init__(self, rpcObject, parent): cuegui.Style.init() self.__class__.__initialized = True self.__class__.__commentIcon = QtGui.QIcon(":comment.png") - # pylint: disable=no-member - self.__class__.__backgroundColor = QtGui.qApp.palette().color(QtGui.QPalette.Base) - # pylint: enable=no-member + self.__class__.__backgroundColor = cuegui.app().palette().color(QtGui.QPalette.Base) self.__class__.__foregroundColor = cuegui.Style.ColorTheme.COLOR_JOB_FOREGROUND self.__class__.__pausedColor = cuegui.Style.ColorTheme.COLOR_JOB_PAUSED_BACKGROUND self.__class__.__dyingColor = cuegui.Style.ColorTheme.COLOR_JOB_DYING_BACKGROUND @@ -340,7 +345,8 @@ def data(self, col, role): self.rpcObject.data.total_memory] if role == QtCore.Qt.UserRole + 3: - return [self.rpcObject.data.total_gpu - self.rpcObject.data.free_gpu, - self.rpcObject.data.total_gpu] + return [self.rpcObject.data.total_gpu_memory - + self.rpcObject.data.free_gpu_memory, + self.rpcObject.data.total_gpu_memory] return cuegui.Constants.QVARIANT_NULL diff --git a/cuegui/cuegui/ItemDelegate.py b/cuegui/cuegui/ItemDelegate.py index 27aff67ec..e4de3df02 100644 --- a/cuegui/cuegui/ItemDelegate.py +++ b/cuegui/cuegui/ItemDelegate.py @@ -24,9 +24,9 @@ from builtins import range from math import ceil -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import opencue diff --git a/cuegui/cuegui/JobMonitorTree.py b/cuegui/cuegui/JobMonitorTree.py index 58773c0ac..5cb49f3d0 100644 --- a/cuegui/cuegui/JobMonitorTree.py +++ b/cuegui/cuegui/JobMonitorTree.py @@ -20,13 +20,13 @@ from __future__ import print_function from __future__ import division - +from future.utils import iteritems from builtins import map import time -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import opencue @@ -71,6 +71,7 @@ class JobMonitorTree(cuegui.AbstractTreeWidget.AbstractTreeWidget): """Tree widget to display a list of monitored jobs.""" __loadMine = True + __groupDependent = True view_object = QtCore.Signal(object) def __init__(self, parent): @@ -151,7 +152,9 @@ def __init__(self, parent): self.__jobTimeLoaded = {} self.__userColors = {} - + self.__dependentJobs = {} + self._dependent_items = {} + self.__reverseDependents = {} # Used to build right click context menus self.__menuActions = cuegui.MenuActions.MenuActions( self, self.updateSoon, self.selectedObjects) @@ -161,8 +164,10 @@ def __init__(self, parent): self.setDragEnabled(True) self.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop) + # pylint: disable=no-member self.itemClicked.connect(self.__itemSingleClickedCopy) self.itemClicked.connect(self.__itemSingleClickedComment) + # pylint: enable=no-member self.__load = {} self.startTicksUpdate(20, False, 60) @@ -178,8 +183,21 @@ def tick(self): self._update() return + self.updateJobCount() self.ticksWithoutUpdate += 1 + def updateJobCount(self): + """Called at every tick. The total number of monitored + jobs is added to the column header + """ + count = 0 + iterator = QtWidgets.QTreeWidgetItemIterator(self) + while iterator.value(): + count += 1 + iterator += 1 + + self.headerItem().setText(0, "Job [Total Count: {}]".format(count)) + def __itemSingleClickedCopy(self, item, col): """Called when an item is clicked on. Copies selected object names to the middle click selection clip board. @@ -224,43 +242,127 @@ def setLoadMine(self, value): @type value: boolean or QtCore.Qt.Checked or QtCore.Qt.Unchecked""" self.__loadMine = (value is True or value == QtCore.Qt.Checked) - def addJob(self, job): + def setGroupDependent(self, value): + """Enables or disables the auto grouping of the dependent jobs + @param value: New groupDependent state + @type value: boolean or QtCore.Qt.Checked or QtCore.Qt.Unchecked""" + self.__groupDependent = (value is True or value == QtCore.Qt.Checked) + self.updateRequest() + + def addJob(self, job, timestamp=None, loading_from_config=False): """Adds a job to the list. With locking" @param job: Job can be None, a job object, or a job name. - @type job: job, string, None""" + @type job: job, string, None + @param loading_from_config: Whether or not this method is being called + for loading jobs found in user config + @type loading_from_config: bool + """ newJobObj = cuegui.Utils.findJob(job) self.ticksLock.lock() try: if newJobObj: - objectKey = cuegui.Utils.getObjectKey(newJobObj) - self.__load[objectKey] = newJobObj - self.__jobTimeLoaded[objectKey] = time.time() + jobKey = cuegui.Utils.getObjectKey(newJobObj) + if not self.__groupDependent: + self.__load[jobKey] = newJobObj + self.__jobTimeLoaded[jobKey] = timestamp if timestamp else time.time() + else: + # We'll only add the new job if it's not already listed + # as a dependent on another job + if jobKey not in self.__reverseDependents.keys(): + self.__load[jobKey] = newJobObj + + # when we are adding jobs manually, we want to calculate + # all dependencies (active or not), so the user can see + # all the dependent jobs, even after the main/parent job + # has finished. + # When we're loading jobs from user config, we want to + # only include the active dependents. This is because + # the dependencies have already been calculated and + # listed in the config as a flat list, so attempting + # to re-add them will result in duplicates that will + # throw off the cleanup loop at the end of this method + active_only = not loading_from_config + dep = self.__menuActions.jobs( + ).getRecursiveDependentJobs([newJobObj], + active_only=active_only) + + # Remove dependent if it has the same name as the job + # - This avoids missing jobs on MonitorJobs + # - Remove the parent job is necessary to avoid remove + # the parent job and all the dependents + # in the step 2 below + dep = [j for j in dep if j.data.name != newJobObj.data.name] + + self.__dependentJobs[jobKey] = dep + # we'll also store a reversed dictionary for + # dependencies with the dependent as key and the main + # job as the value, this will be used in step 2 + # below to remove jobs that are added here + # as dependents + for j in dep: + depKey = cuegui.Utils.getObjectKey(j) + self.__reverseDependents[depKey] = newJobObj + self.__jobTimeLoaded[depKey] = time.time() + self.__jobTimeLoaded[jobKey] = time.time() + + for j in self.__reverseDependents: + if j in self.__load: + del self.__load[j] finally: self.ticksLock.unlock() def getJobProxies(self): - """Gets a list of IDs of monitored jobs.""" - return list(self._items.keys()) + """Get a list of the JobProxies that are being monitored in the session + which will be saved to the config file + + Returning a sorted list based on the most recent timestamp - restoring jobs is capped + by LOAD_LIMIT, so restore the most recent jobs the user added to their session + + :return: list of tuples of the JobId and timestamp + """ + jobIdsTimeLoaded = [] + + for jobProxy, _ in self._items.items(): + try: + jobIdsTimeLoaded.append((jobProxy, self.__jobTimeLoaded[jobProxy])) + except KeyError: + # set timestamp to epoch time if timestamp not found + jobIdsTimeLoaded.append((jobProxy, 0)) + + # sort list on recent timestamps, only restoring the first n jobs (defined by LOAD_LIMIT) + return list(sorted(jobIdsTimeLoaded, key=lambda x: x[1], reverse=True)) def _removeItem(self, item): """Removes an item from the TreeWidget without locking @param item: A tree widget item @type item: AbstractTreeWidgetItem""" - # pylint: disable=no-member - QtGui.qApp.unmonitor.emit(item.rpcObject) - # pylint: enable=no-member + self.app.unmonitor.emit(item.rpcObject) cuegui.AbstractTreeWidget.AbstractTreeWidget._removeItem(self, item) self.__jobTimeLoaded.pop(item.rpcObject, "") + try: + jobKey = cuegui.Utils.getObjectKey(item) + # Remove the item from the main _items dictionary as well as the + # __dependentJobs and the reverseDependent dictionaries + cuegui.AbstractTreeWidget.AbstractTreeWidget._removeItem(self, item) + dependent_jobs = self.__dependentJobs.get(jobKey, []) + for djob in dependent_jobs: + del self.__reverseDependents[djob] + del self.__reverseDependents[jobKey] + except KeyError: + # Dependent jobs are not stored in as keys the main self._items + # dictionary, trying to remove dependent jobs from self._items + # raises a KeyError, which we can safely ignore + pass def removeAllItems(self): """Notifies the other widgets of each item being unmonitored, then calls the the AbstractTreeWidget.removeAllItems like normal""" for proxy in list(self._items.keys()): - # pylint: disable=no-member - QtGui.qApp.unmonitor.emit(proxy) - # pylint: enable=no-member + self.app.unmonitor.emit(proxy) if proxy in self.__jobTimeLoaded: del self.__jobTimeLoaded[proxy] + self.__dependentJobs.clear() + self.__reverseDependents.clear() cuegui.AbstractTreeWidget.AbstractTreeWidget.removeAllItems(self) def removeFinishedItems(self): @@ -273,6 +375,7 @@ def contextMenuEvent(self, e): @param e: Right click QEvent @type e: QEvent""" menu = QtWidgets.QMenu() + menu.setToolTipsVisible(True) __selectedObjects = self.selectedObjects() __count = len(__selectedObjects) @@ -282,7 +385,9 @@ def contextMenuEvent(self, e): self.__menuActions.jobs().addAction(menu, "view") self.__menuActions.jobs().addAction(menu, "emailArtist") self.__menuActions.jobs().addAction(menu, "viewComments") - self.__menuActions.jobs().addAction(menu, "useLocalCores") + + if bool(int(self.app.settings.value("AllowDeeding", 0))): + self.__menuActions.jobs().addAction(menu, "useLocalCores") depend_menu = QtWidgets.QMenu("&Dependencies",self) self.__menuActions.jobs().addAction(depend_menu, "viewDepends") @@ -381,11 +486,21 @@ def _getUpdate(self): # Gather list of all other jobs to update monitored_proxies.append(objectKey) + # Refresh the dependent proxies for the next update + for job, dependents in iteritems(self.__dependentJobs): + ids = [d.id() for d in dependents] + # If the job has no dependents, then ids is an empty list, + # The getJobs call returns every job on the cue when called + # an empty list for the id argument! + if not ids: + continue + tmp = opencue.api.getJobs(id=ids, include_finished=True) + self.__dependentJobs[job] = tmp + if self.__loadMine: # This auto-loads all the users jobs for job in opencue.api.getJobs(user=[cuegui.Utils.getUsername()]): - objectKey = cuegui.Utils.getObjectKey(job) - jobs[objectKey] = job + self.addJob(job) # Prune the users jobs from the remaining proxies to update for proxy, job in list(jobs.items()): @@ -411,34 +526,50 @@ def _processUpdate(self, work, rpcObjects): self._itemsLock.lockForWrite() - # include rpcObjects from self._items that are not in jobObjects + # include rpcObjects from self._items that are not in rpcObjects for proxy, item in list(self._items.items()): if not proxy in rpcObjects: rpcObjects[proxy] = item.rpcObject - + # pylint: disable=too-many-nested-blocks try: selectedKeys = [ cuegui.Utils.getObjectKey(item.rpcObject) for item in self.selectedItems()] scrolled = self.verticalScrollBar().value() + expanded = [cuegui.Utils.getObjectKey(item.rpcObject) + for item in self._items.values() if item.isExpanded()] # Store the creation time for the current item for item in list(self._items.values()): self.__jobTimeLoaded[cuegui.Utils.getObjectKey(item.rpcObject)] = item.created + # Store the creation time for the dependent jobs + for item in self._dependent_items.values(): + self.__jobTimeLoaded[cuegui.Utils.getObjectKey(item.rpcObject)] = item.created self._items = {} self.clear() - for proxy, job in list(rpcObjects.items()): + for proxy, job in iteritems(rpcObjects): self._items[proxy] = JobWidgetItem(job, self.invisibleRootItem(), self.__jobTimeLoaded.get(proxy, None)) if proxy in self.__userColors: self._items[proxy].setUserColor(self.__userColors[proxy]) - - self.verticalScrollBar().setValue(scrolled) + if self.__groupDependent: + dependent_jobs = self.__dependentJobs.get(proxy, []) + for djob in dependent_jobs: + item = JobWidgetItem(djob, + self._items[proxy], + self.__jobTimeLoaded.get(proxy, None)) + dkey = cuegui.Utils.getObjectKey(djob) + self._dependent_items[dkey] = item + if dkey in self.__userColors: + self._dependent_items[dkey].setUserColor( + self.__userColors[dkey]) + + self.verticalScrollBar().setRange(scrolled, len(rpcObjects.keys()) - scrolled) list(map(lambda key: self._items[key].setSelected(True), [key for key in selectedKeys if key in self._items])) - + list(self._items[key].setExpanded(True) for key in expanded if key in self._items) except opencue.exception.CueException as e: list(map(logger.warning, cuegui.Utils.exceptionOutput(e))) finally: @@ -470,9 +601,7 @@ def __init__(self, rpcObject, parent, created): self.__class__.__initialized = True self.__class__.__commentIcon = QtGui.QIcon(":comment.png") self.__class__.__eatIcon = QtGui.QIcon(":eat.png") - # pylint: disable=no-member - self.__class__.__backgroundColor = QtGui.qApp.palette().color(QtGui.QPalette.Base) - # pylint: enable=no-member + self.__class__.__backgroundColor = cuegui.app().palette().color(QtGui.QPalette.Base) self.__class__.__foregroundColor = cuegui.Style.ColorTheme.COLOR_JOB_FOREGROUND self.__class__.__pausedColor = cuegui.Style.ColorTheme.COLOR_JOB_PAUSED_BACKGROUND self.__class__.__dyingColor = cuegui.Style.ColorTheme.COLOR_JOB_DYING_BACKGROUND diff --git a/cuegui/cuegui/LayerDialog.py b/cuegui/cuegui/LayerDialog.py index c337dda80..8c9f0e57f 100644 --- a/cuegui/cuegui/LayerDialog.py +++ b/cuegui/cuegui/LayerDialog.py @@ -20,9 +20,8 @@ from __future__ import division from __future__ import print_function -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -51,7 +50,7 @@ def __init__(self, widget, enable, parent=None): if enable: self.__widget.setDisabled(True) layout.addWidget(self.__checkbox) - self.__checkbox.toggled.connect(self.enable) + self.__checkbox.toggled.connect(self.enable) # pylint: disable=no-member layout.addWidget(self.__widget) def getWidget(self): @@ -106,6 +105,7 @@ class LayerPropertiesDialog(QtWidgets.QDialog): def __init__(self, layers, parent=None): QtWidgets.QDialog.__init__(self, parent) + self.app = cuegui.app() self.__layers = [opencue.api.getLayer(opencue.id(layer)) for layer in layers] self.setWindowTitle("Layer Properties") @@ -117,12 +117,12 @@ def __init__(self, layers, parent=None): self.mem_max_kb = int(self.mem_max_gb * 1024 * 1024) self.mem_min_kb = int(self.mem_min_gb * 1024 * 1024) - self.gpu_max_kb = 2 * 1024 * 1024 - self.gpu_min_kb = 0 - self.gpu_tick_kb = 256 * 1024 - self.gpu_max_gb = 2.0 - self.gpu_min_gb = 0.0 - self.gpu_tick_gb = .25 + self.gpu_mem_max_kb = 256 * 1024 * 1024 + self.gpu_mem_min_kb = 0 + self.gpu_mem_tick_kb = 256 * 1024 + self.gpu_mem_max_gb = 256.0 + self.gpu_mem_min_gb = 0.0 + self.gpu_mem_tick_gb = .25 self.__group = QtWidgets.QGroupBox("Resource Options", self) @@ -147,10 +147,8 @@ def __init__(self, layers, parent=None): self.__max_cores.setSingleStep(1) # Disable this for everything except commander. - # pylint: disable=no-member - if QtGui.qApp.applicationName() != "CueCommander": + if self.app.applicationName() != "CueCommander": self.__core.setDisabled(True) - # pylint: enable=no-member # Threads self.__thread = QtWidgets.QCheckBox(self) @@ -180,16 +178,28 @@ def __init__(self, layers, parent=None): # Limits self.__limits = LayerLimitsWidget(self.__layers, self) + # Min gpus + self.__min_gpus = QtWidgets.QSpinBox(self) + self.__min_gpus.setValue(0) + self.__min_gpus.setRange(0, int(self._cfg().get('max_gpus', 16))) + self.__min_gpus.setSingleStep(1) + + # Max gpus + self.__max_gpus = QtWidgets.QSpinBox(self) + self.__max_gpus.setRange(0, int(self._cfg().get('max_gpus', 16))) + self.__max_gpus.setSingleStep(1) + # GPU Memory - self.__gpu = SlideSpinner(self) - self.__gpu.slider.setMinimumWidth(200) - self.__gpu.slider.setRange(self.gpu_min_kb, self.gpu_max_kb // self.gpu_tick_kb) - self.__gpu.slider.setTickInterval(1) - self.__gpu.slider.setSingleStep(1) - self.__gpu.slider.setPageStep(1) - self.__gpu.spinner.setSuffix(' GB') - self.__gpu.spinner.setRange(self.gpu_min_gb, self.gpu_max_gb) - self.__gpu.spinner.setSingleStep(self.gpu_tick_gb) + self.__gpu_mem = SlideSpinner(self) + self.__gpu_mem.slider.setMinimumWidth(200) + self.__gpu_mem.slider.setRange(self.gpu_mem_min_kb, + self.gpu_mem_max_kb // self.gpu_mem_tick_kb) + self.__gpu_mem.slider.setTickInterval(1) + self.__gpu_mem.slider.setSingleStep(1) + self.__gpu_mem.slider.setPageStep(1) + self.__gpu_mem.spinner.setSuffix(' GB') + self.__gpu_mem.spinner.setRange(self.gpu_mem_min_gb, self.gpu_mem_max_gb) + self.__gpu_mem.spinner.setSingleStep(self.gpu_mem_tick_gb) # Our dialog buttons. self.__buttons = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Save | @@ -198,22 +208,33 @@ def __init__(self, layers, parent=None): self) # Setup signals + # pylint: disable=no-member self.__mem.slider.valueChanged.connect(self.__translateToMemSpinbox) self.__mem.spinner.valueChanged.connect(self.__translateToMemSlider) - self.__gpu.slider.valueChanged.connect(self.__translateToGpuSpinbox) - self.__gpu.spinner.valueChanged.connect(self.__translateToGpuSlider) + self.__gpu_mem.slider.valueChanged.connect(self.__translateToGpuMemSpinbox) + self.__gpu_mem.spinner.valueChanged.connect(self.__translateToGpuMemSlider) self.__buttons.accepted.connect(self.verify) self.__buttons.rejected.connect(self.reject) + # pylint: enable=no-member # Set actual values once signals are setup self.__mem.slider.setValue(self.getMaxMemory()) - self.__gpu.slider.setValue(self.getMaxGpu()) + self.__gpu_mem.slider.setValue(self.getMaxGpuMemory()) self.__core.setValue(self.getMinCores()) self.__max_cores.setValue(self.getMaxCores()) + self.__min_gpus.setValue(self.getMinGpus()) + self.__max_gpus.setValue(self.getMaxGpus()) self.__timeout.setValue(self.getTimeout()) self.__timeout_llu.setValue(self.getTimeoutLLU()) + topLayout = QtWidgets.QVBoxLayout() + topWidget = QtWidgets.QWidget() + topWidget.setLayout(topLayout) + scrollArea = QtWidgets.QScrollArea(widgetResizable=True) + scrollArea.setWidget(topWidget) + QtWidgets.QVBoxLayout(self) + self.layout().addWidget(scrollArea) layout = QtWidgets.QVBoxLayout() layout.addWidget(EnableableItem(LayerPropertiesItem("Minimum Memory:", @@ -236,8 +257,16 @@ def __init__(self, layers, parent=None): self.__thread, True), multiSelect)) + layout.addWidget(EnableableItem(LayerPropertiesItem("Min GPUs:", + self.__min_gpus, + False), + multiSelect)) + layout.addWidget(EnableableItem(LayerPropertiesItem("Max GPUs:", + self.__max_gpus, + False), + multiSelect)) layout.addWidget(EnableableItem(LayerPropertiesItem("Minimum Gpu Memory:", - self.__gpu, + self.__gpu_mem, False), multiSelect)) layout.addWidget(EnableableItem(LayerPropertiesItem("Timeout:", @@ -251,10 +280,10 @@ def __init__(self, layers, parent=None): layout.addStretch() self.__group.setLayout(layout) - self.layout().addWidget(EnableableItem(self.__tags, multiSelect)) - self.layout().addWidget(EnableableItem(self.__limits, multiSelect)) - self.layout().addWidget(self.__group) - self.layout().addWidget(self.__buttons) + topLayout.addWidget(EnableableItem(self.__tags, multiSelect)) + topLayout.addWidget(EnableableItem(self.__limits, multiSelect)) + topLayout.addWidget(self.__group) + topLayout.addWidget(self.__buttons) def _cfg(self): """ @@ -280,8 +309,8 @@ def verify(self): if mem_value < self.mem_min_kb or mem_value > self.mem_max_kb: warning("The memory setting is too high.") return False - gpu_value = self.__gpu.slider.value() - if gpu_value < self.gpu_min_kb or gpu_value > self.gpu_max_kb: + gpu_mem_value = self.__gpu_mem.slider.value() + if gpu_mem_value < self.gpu_mem_min_kb or gpu_mem_value > self.gpu_mem_max_kb: warning("The gpu memory setting is too high.") return False @@ -302,8 +331,12 @@ def apply(self): layer.setMaxCores(self.__max_cores.value() * 100.0) if self.__thread.isEnabled(): layer.setThreadable(self.__thread.isChecked()) - if self.__gpu.isEnabled(): - layer.setMinGpu(self.__gpu.slider.value() * self.gpu_tick_kb) + if self.__min_gpus.isEnabled(): + layer.setMinGpus(self.__min_gpus.value()) + if self.__max_gpus.isEnabled(): + layer.setMaxGpus(self.__max_cores.value()) + if self.__gpu_mem.isEnabled(): + layer.setMinGpuMemory(self.__gpu_mem.slider.value() * self.gpu_mem_tick_kb) if self.__timeout.isEnabled(): layer.setTimeout(self.__timeout.value()) if self.__timeout_llu.isEnabled(): @@ -322,9 +355,9 @@ def getMaxMemory(self): result = layer.data.min_memory return result - def getMaxGpu(self): - """Gets the layer max GPU.""" - return max([layer.data.min_gpu // self.gpu_tick_kb for layer in self.__layers]) + def getMaxGpuMemory(self): + """Gets the layer max GPU memory.""" + return max([layer.data.min_gpu_memory // self.gpu_mem_tick_kb for layer in self.__layers]) def getMinCores(self): """Gets the layer min cores.""" @@ -342,6 +375,22 @@ def getMaxCores(self): result = layer.data.max_cores return result + def getMinGpus(self): + """Gets the layer min gpus.""" + result = 0 + for layer in self.__layers: + if layer.data.min_gpus > result: + result = layer.data.min_gpus + return result + + def getMaxGpus(self): + """Gets the layer max gpus.""" + result = 0 + for layer in self.__layers: + if layer.data.max_gpus > result: + result = layer.data.max_gpus + return result + def getThreading(self): """Gets whether the layer is threadable.""" result = False @@ -382,12 +431,11 @@ def __translateToMemSpinbox(self, value): def __translateToMemSlider(self, value): self.__mem.slider.setValue(int(value * 1048576.0)) - def __translateToGpuSpinbox(self, value): - self.__gpu.spinner.setValue(float(value * self.gpu_tick_kb) / 1024.0 / 1024.0) - - def __translateToGpuSlider(self, value): - self.__gpu.slider.setValue(int(value * 1024.0 * 1024.0) // self.gpu_tick_kb) + def __translateToGpuMemSpinbox(self, value): + self.__gpu_mem.spinner.setValue(float(value * self.gpu_mem_tick_kb) / 1024.0 / 1024.0) + def __translateToGpuMemSlider(self, value): + self.__gpu_mem.slider.setValue(int(value * 1024.0 * 1024.0) // self.gpu_mem_tick_kb) class LayerTagsWidget(QtWidgets.QWidget): """ @@ -483,8 +531,10 @@ def __init__(self, layers, parent=None): QtCore.Qt.Horizontal, self) + # pylint: disable=no-member self.__buttons.accepted.connect(self.accept) self.__buttons.rejected.connect(self.reject) + # pylint: enable=no-member def accept(self): self._tags_widget.apply() diff --git a/cuegui/cuegui/LayerMonitorTree.py b/cuegui/cuegui/LayerMonitorTree.py index fdf0c249e..25916e16f 100644 --- a/cuegui/cuegui/LayerMonitorTree.py +++ b/cuegui/cuegui/LayerMonitorTree.py @@ -20,8 +20,8 @@ from __future__ import print_function from __future__ import division -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets from opencue.exception import EntityNotFoundException @@ -74,65 +74,70 @@ def __init__(self, parent): "will reserve for its use. If the frame begins to use\n" "more memory than this, the cuebot will increase this\n" "number.") - self.addColumn("Gpu", 40, id=8, - data=lambda layer: cuegui.Utils.memoryToString(layer.data.min_gpu), - sort=lambda layer: layer.data.min_gpu, + self.addColumn("Gpus", 45, id=8, + data=lambda layer: "%d" % layer.data.min_gpus, + sort=lambda layer: layer.data.min_gpus, + tip="The number of gpus that the frames in this layer\n" + "will reserve as a minimum.") + self.addColumn("Gpu Memory", 40, id=9, + data=lambda layer: cuegui.Utils.memoryToString(layer.data.min_gpu_memory), + sort=lambda layer: layer.data.min_gpu_memory, tip="The amount of gpu memory each frame in this layer\n" "will reserve for its use. Note that we may not have\n" "machines as much gpu memory as you request.") self.addColumn( - "MaxRss", 60, id=9, + "MaxRss", 60, id=10, data=lambda layer: cuegui.Utils.memoryToString(layer.data.layer_stats.max_rss), sort=lambda layer: layer.data.layer_stats.max_rss, tip="Maximum amount of memory used by any frame in\n" "this layer at any time since the job was launched.") - self.addColumn("Total", 40, id=10, + self.addColumn("Total", 40, id=11, data=lambda layer: layer.data.layer_stats.total_frames, sort=lambda layer: layer.data.layer_stats.total_frames, tip="Total number of frames in this layer.") - self.addColumn("Done", 40, id=11, + self.addColumn("Done", 40, id=12, data=lambda layer: layer.data.layer_stats.succeeded_frames, sort=lambda layer: layer.data.layer_stats.succeeded_frames, tip="Total number of done frames in this layer.") - self.addColumn("Run", 40, id=12, + self.addColumn("Run", 40, id=13, data=lambda layer: layer.data.layer_stats.running_frames, sort=lambda layer: layer.data.layer_stats.running_frames, tip="Total number or running frames in this layer.") - self.addColumn("Depend", 53, id=13, + self.addColumn("Depend", 53, id=14, data=lambda layer: layer.data.layer_stats.depend_frames, sort=lambda layer: layer.data.layer_stats.depend_frames, tip="Total number of dependent frames in this layer.") - self.addColumn("Wait", 40, id=14, + self.addColumn("Wait", 40, id=15, data=lambda layer: layer.data.layer_stats.waiting_frames, sort=lambda layer: layer.data.layer_stats.waiting_frames, tip="Total number of waiting frames in this layer.") - self.addColumn("Eaten", 40, id=15, + self.addColumn("Eaten", 40, id=16, data=lambda layer: layer.data.layer_stats.eaten_frames, sort=lambda layer: layer.data.layer_stats.eaten_frames, tip="Total number of eaten frames in this layer.") - self.addColumn("Dead", 40, id=16, + self.addColumn("Dead", 40, id=17, data=lambda layer: layer.data.layer_stats.dead_frames, sort=lambda layer: layer.data.layer_stats.dead_frames, tip="Total number of dead frames in this layer.") self.addColumn( - "Avg", 65, id=17, + "Avg", 65, id=18, data=lambda layer: cuegui.Utils.secondsToHHMMSS(layer.data.layer_stats.avg_frame_sec), sort=lambda layer: layer.data.layer_stats.avg_frame_sec, tip="Average number of HOURS:MINUTES:SECONDS per frame\nin this layer.") - self.addColumn("Tags", 100, id=18, + self.addColumn("Tags", 100, id=19, data=lambda layer: " | ".join(layer.data.tags), tip="The tags define what resources may be booked on\n" "frames in this layer.") - self.addColumn("Progress", 100, id=19, + self.addColumn("Progress", 100, id=20, delegate=cuegui.ItemDelegate.ProgressDelegate, data=lambda layer: layer.percentCompleted(), sort=lambda layer: layer.percentCompleted(), tip="Progress for the Layer") - self.addColumn("Timeout", 45, id=20, + self.addColumn("Timeout", 45, id=21, data=lambda layer: cuegui.Utils.secondsToHHHMM(layer.data.timeout*60), sort=lambda layer: layer.data.timeout, tip="Timeout for the frames, Hours:Minutes") - self.addColumn("Timeout LLU", 45, id=21, + self.addColumn("Timeout LLU", 45, id=22, data=lambda layer: cuegui.Utils.secondsToHHHMM(layer.data.timeout_llu*60), sort=lambda layer: layer.data.timeout_llu, tip="Timeout for a frames\' LLU, Hours:Minutes") @@ -226,7 +231,8 @@ def contextMenuEvent(self, e): if len(__selectedObjects) == 1: menu.addSeparator() - self.__menuActions.layers().addAction(menu, "useLocalCores") + if bool(int(self.app.settings.value("AllowDeeding", 0))): + self.__menuActions.layers().addAction(menu, "useLocalCores") if len({layer.data.range for layer in __selectedObjects}) == 1: self.__menuActions.layers().addAction(menu, "reorder") self.__menuActions.layers().addAction(menu, "stagger") diff --git a/cuegui/cuegui/Layout.py b/cuegui/cuegui/Layout.py new file mode 100644 index 000000000..9b26d23c8 --- /dev/null +++ b/cuegui/cuegui/Layout.py @@ -0,0 +1,62 @@ +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Functions for loading application layout and other state from disk.""" + +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import + +import os +import shutil + +from qtpy import QtCore + +import cuegui.Constants +import cuegui.Logger + +logger = cuegui.Logger.getLogger(__file__) + + +def startup(app_name): + """ + Reads config from disk, restoring default config if necessary. + + :param app_name: application window name + :type app_name: str + :return: settings object containing the loaded settings + :rtype: QtCore.QSettings + """ + # E.g. ~/.config/.cuecommander/config.ini + config_path = "/.%s/config" % app_name.lower() + settings = QtCore.QSettings(QtCore.QSettings.IniFormat, QtCore.QSettings.UserScope, config_path) + logger.info('Reading config file from %s', settings.fileName()) + local = settings.fileName() + + # If the user has chose to revert the layout. delete the file and copy the default back. + if settings.value('RevertLayout'): + logger.warning('Found RevertLayout flag, will restore default config') + os.remove(local) + + # If the config file does not exist, copy over the default + if not os.path.exists(local): + default = os.path.join(cuegui.Constants.DEFAULT_INI_PATH, "%s.ini" % app_name.lower()) + logger.warning('Local config file not found at %s', local) + logger.warning('Copying %s to %s', default, local) + os.makedirs(os.path.dirname(local), exist_ok=True) + shutil.copy2(default, local) + settings.sync() + + return settings diff --git a/cuegui/cuegui/LimitSelectionWidget.py b/cuegui/cuegui/LimitSelectionWidget.py index 8a37e370e..a760d1b79 100644 --- a/cuegui/cuegui/LimitSelectionWidget.py +++ b/cuegui/cuegui/LimitSelectionWidget.py @@ -22,7 +22,7 @@ from builtins import str -from PySide2 import QtWidgets +from qtpy import QtWidgets import cuegui.AbstractDialog diff --git a/cuegui/cuegui/LimitsWidget.py b/cuegui/cuegui/LimitsWidget.py index b7eaccd3b..25c962492 100644 --- a/cuegui/cuegui/LimitsWidget.py +++ b/cuegui/cuegui/LimitsWidget.py @@ -20,9 +20,8 @@ from __future__ import print_function from __future__ import division -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -57,8 +56,10 @@ def __init__(self, parent): layout.addWidget(self.__btnRefresh, 0, 2) layout.addWidget(self.__monitorLimits, 2, 0, 3, 4) + # pylint: disable=no-member self.__btnAddLimit.clicked.connect(self.__addLimit) self.__btnRefresh.clicked.connect(self.updateSoon) + # pylint: enable=no-member self.__menuActions = cuegui.MenuActions.MenuActions(self, self.updateSoon, list) @@ -108,10 +109,8 @@ def __init__(self, parent): self.__menuActions = cuegui.MenuActions.MenuActions( self, self.updateSoon, self.selectedObjects) - self.itemClicked.connect(self.__itemSingleClickedToDouble) - # pylint: disable=no-member - QtGui.qApp.facility_changed.connect(self.__facilityChanged) - # pylint: enable=no-member + self.itemClicked.connect(self.__itemSingleClickedToDouble) # pylint: disable=no-member + self.app.facility_changed.connect(self.__facilityChanged) self.setUpdateInterval(60) diff --git a/cuegui/cuegui/LocalBooking.py b/cuegui/cuegui/LocalBooking.py index 475f0536f..23220e113 100644 --- a/cuegui/cuegui/LocalBooking.py +++ b/cuegui/cuegui/LocalBooking.py @@ -27,8 +27,8 @@ import os from socket import gethostname -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -76,7 +76,7 @@ def __init__(self, target, parent=None): self.__msg_widget = QtWidgets.QLabel(msg, self) self.layout().addWidget(self.__msg_widget) self.layout().addWidget(self.__deed_button) - self.__deed_button.pressed.connect(self.deedLocalhost) + self.__deed_button.pressed.connect(self.deedLocalhost) # pylint: disable=no-member self.__lba_group.setDisabled(True) self.__text_target = QtWidgets.QLabel(self.__target.data.name, self) @@ -124,9 +124,8 @@ def __init__(self, target, parent=None): self.__btn_clear = QtWidgets.QPushButton("Clear", self) - # # Setup the signals. - # + # pylint: disable=no-member self.__btn_clear.pressed.connect(self.clearCurrentHost) self.__select_host.activated.connect(self.__host_changed) self.__num_mem.valueChanged.connect(self.__text_num_mem.setValue) @@ -135,6 +134,7 @@ def __init__(self, target, parent=None): self.__num_frames.valueChanged.connect(self.__calculateCores) self.__run_mem.valueChanged.connect(self.__text_run_mem.setValue) self.__text_run_mem.valueChanged.connect(self.__run_mem.setValue) + # pylint: enable=no-member self.layout().addWidget(QtWidgets.QLabel("Target Host:")) self.layout().addWidget(self.__select_host) @@ -367,9 +367,11 @@ def __init__(self, target, parent=None): self.layout().addWidget(self.__booking) self.layout().addLayout(btn_layout) + # pylint: disable=no-member self.__booking.hosts_changed.connect(self.__updateOkButtion) self.__btn_ok.pressed.connect(self.doLocalBooking) self.__btn_cancel.pressed.connect(self.close) + # pylint: enable=no-member def __updateOkButtion(self): self.__btn_ok.setDisabled(not self.__booking.hostAvailable()) diff --git a/cuegui/cuegui/Main.py b/cuegui/cuegui/Main.py index b2c540c74..e46102b56 100644 --- a/cuegui/cuegui/Main.py +++ b/cuegui/cuegui/Main.py @@ -20,14 +20,12 @@ from __future__ import print_function from __future__ import division -import os -import shutil import signal -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtGui +import cuegui +import cuegui.Layout import cuegui.Constants import cuegui.Logger import cuegui.MainWindow @@ -41,23 +39,6 @@ logger = cuegui.Logger.getLogger(__file__) -class CueGuiApplication(QtWidgets.QApplication): - """The CueGUI application.""" - - # Global signals - display_log_file_content = QtCore.Signal(object) - double_click = QtCore.Signal(object) - facility_changed = QtCore.Signal() - single_click = QtCore.Signal(object) - unmonitor = QtCore.Signal(object) - view_hosts = QtCore.Signal(object) - view_object = QtCore.Signal(object) - view_procs = QtCore.Signal(object) - request_update = QtCore.Signal() - status = QtCore.Signal() - quit = QtCore.Signal() - - def cuetopia(argv): """Starts the Cuetopia window.""" startup("Cuetopia", cuegui.Constants.VERSION, argv) @@ -71,7 +52,7 @@ def cuecommander(argv): def startup(app_name, app_version, argv): """Starts an application window.""" - app = CueGuiApplication(argv) + app = cuegui.create_app(argv) # Start splash screen splash = cuegui.SplashWindow.SplashWindow( @@ -84,38 +65,15 @@ def startup(app_name, app_version, argv): app.setWindowIcon(QtGui.QIcon('%s/windowIcon.png' % cuegui.Constants.RESOURCE_PATH)) app.setApplicationName(app_name) - app.lastWindowClosed.connect(app.quit) + app.lastWindowClosed.connect(app.quit) # pylint: disable=no-member - QtGui.qApp.threadpool = cuegui.ThreadPool.ThreadPool(3, parent=app) - QtGui.qApp.threads = [] + app.threadpool = cuegui.ThreadPool.ThreadPool(3, parent=app) - config_path = "/.%s/config" % app_name.lower() - settings = QtCore.QSettings(QtCore.QSettings.IniFormat, QtCore.QSettings.UserScope, config_path) - local = settings.fileName() - # If the user has chose to revert the layout. delete the file and copy the default back. - if settings.value('RevertLayout'): - os.remove(local) - - QtGui.qApp.settings = settings + settings = cuegui.Layout.startup(app_name) + app.settings = settings cuegui.Style.init() - # If the config file does not exist, copy over the default - # pylint: disable=broad-except - if not os.path.exists(local): - default = os.path.join(cuegui.Constants.DEFAULT_INI_PATH, "%s.ini" % app_name.lower()) - logger.warning('Not found: %s\nCopying: %s', local, default) - try: - os.mkdir(os.path.dirname(local)) - except Exception as e: - logger.debug(e) - try: - shutil.copy2(default, local) - except Exception as e: - logger.debug(e) - settings.sync() - # pylint: enable=broad-except - mainWindow = cuegui.MainWindow.MainWindow(app_name, app_version, None) mainWindow.displayStartupNotice() mainWindow.show() @@ -130,18 +88,13 @@ def startup(app_name, app_version, argv): # TODO(#609) Refactor the CueGUI classes to make this garbage collector # replacement unnecessary. - # pylint: disable=unused-variable - gc = cuegui.GarbageCollector.GarbageCollector(parent=app, debug=False) - # pylint: enable=unused-variable - app.aboutToQuit.connect(closingTime) + gc = cuegui.GarbageCollector.GarbageCollector(parent=app, debug=False) # pylint: disable=unused-variable + app.aboutToQuit.connect(closingTime) # pylint: disable=no-member app.exec_() - def closingTime(): """Window close callback.""" logger.info("Closing all threads...") - # pylint: disable=no-member - threads = QtGui.qApp.threads - # pylint: enable=no-member + threads = cuegui.app().threads for thread in threads: cuegui.Utils.shutdownThread(thread) diff --git a/cuegui/cuegui/MainWindow.py b/cuegui/cuegui/MainWindow.py index 35891e2fd..942a9a717 100644 --- a/cuegui/cuegui/MainWindow.py +++ b/cuegui/cuegui/MainWindow.py @@ -28,9 +28,9 @@ import sys import time -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import opencue @@ -53,16 +53,14 @@ class MainWindow(QtWidgets.QMainWindow): def __init__(self, app_name, app_version, window_name, parent = None): QtWidgets.QMainWindow.__init__(self, parent) + self.app = cuegui.app() self.__actions_facility = {} self.facility_default = None self.facility_dict = None self.windowMenu = None - self.qApp = QtGui.qApp - # pylint: disable=no-member - self.settings = QtGui.qApp.settings - # pylint: enable=no-member + self.settings = self.app.settings self.windows_names = [app_name] + ["%s_%s" % (app_name, num) for num in range(2, 5)] self.app_name = app_name self.app_version = app_version @@ -96,10 +94,7 @@ def __init__(self, app_name, app_version, window_name, parent = None): # Restore saved settings self.__restoreSettings() - # pylint: disable=no-member - QtGui.qApp.status.connect(self.showStatusBarMessage) - # pylint: enable=no-member - + self.app.status.connect(self.showStatusBarMessage) self.showStatusBarMessage("Ready") def displayStartupNotice(self): @@ -184,9 +179,7 @@ def __facilityMenuHandle(self, action): for facility in list(self.__actions_facility.values()): if facility.isChecked(): opencue.Cuebot.setFacility(str(facility.text())) - # pylint: disable=no-member - QtGui.qApp.facility_changed.emit() - # pylint: enable=no-member + self.app.facility_changed.emit() return ################################################################################ @@ -205,14 +198,14 @@ def __createMenus(self): # Menu Bar: File -> Close Window close = QtWidgets.QAction(QtGui.QIcon('icons/exit.png'), '&Close Window', self) close.setStatusTip('Close Window') - close.triggered.connect(self.__windowCloseWindow) + close.triggered.connect(self.__windowCloseWindow) # pylint: disable=no-member self.fileMenu.addAction(close) # Menu Bar: File -> Exit Application exitAction = QtWidgets.QAction(QtGui.QIcon('icons/exit.png'), 'E&xit Application', self) exitAction.setShortcut('Ctrl+Q') exitAction.setStatusTip('Exit application') - exitAction.triggered.connect(self.__windowCloseApplication) + exitAction.triggered.connect(self.__windowCloseApplication) # pylint: disable=no-member self.fileMenu.addAction(exitAction) self.__windowMenuSetup(self.windowMenu) @@ -223,17 +216,17 @@ def __createMenus(self): # Menu Bar: Help -> Online User Guide. action = QtWidgets.QAction('Online User Guide', self) - action.triggered.connect(self.openUserGuide) + action.triggered.connect(self.openUserGuide) # pylint: disable=no-member self.helpMenu.addAction(action) # Menu Bar: Help -> Make a Suggestion action = QtWidgets.QAction('Make a Suggestion', self) - action.triggered.connect(self.openSuggestionPage) + action.triggered.connect(self.openSuggestionPage) # pylint: disable=no-member self.helpMenu.addAction(action) # Menu Bar: Help -> Report a Bug action = QtWidgets.QAction('Report a Bug', self) - action.triggered.connect(self.openBugPage) + action.triggered.connect(self.openBugPage) # pylint: disable=no-member self.helpMenu.addAction(action) self.helpMenu.addSeparator() @@ -242,7 +235,7 @@ def __createMenus(self): about = QtWidgets.QAction(QtGui.QIcon('icons/about.png'), 'About', self) about.setShortcut('F1') about.setStatusTip('About') - about.triggered.connect(self.displayAbout) + about.triggered.connect(self.displayAbout) # pylint: disable=no-member self.helpMenu.addAction(about) ################################################################################ @@ -255,17 +248,17 @@ def __windowMenuSetup(self, menu): # Menu Bar: Window -> Change Window Title changeTitle = QtWidgets.QAction("Change Window Title", self) - changeTitle.triggered.connect(self.__windowMenuHandleChangeTitle) + changeTitle.triggered.connect(self.__windowMenuHandleChangeTitle) # pylint: disable=no-member menu.addAction(changeTitle) # Menu Bar: Window -> Save Window Settings saveWindowSettings = QtWidgets.QAction("Save Window Settings", self) - saveWindowSettings.triggered.connect(self.__saveSettings) + saveWindowSettings.triggered.connect(self.__saveSettings) # pylint: disable=no-member menu.addAction(saveWindowSettings) # Menu Bar: Window -> Revert To Default Window Layout revertWindowSettings = QtWidgets.QAction("Revert To Default Window Layout", self) - revertWindowSettings.triggered.connect(self.__revertLayout) + revertWindowSettings.triggered.connect(self.__revertLayout) # pylint: disable=no-member menu.addAction(revertWindowSettings) menu.addSeparator() @@ -359,24 +352,18 @@ def windowMenuOpenWindow(self, name): def __windowOpened(self): """Called from __init__ on window creation""" - # pylint: disable=no-member - self.qApp.quit.connect(self.close) + self.app.quit.connect(self.close) self.windows.append(self) - self.qApp.closingApp = False - # pylint: enable=no-member + self.app.closingApp = False def __windowClosed(self): """Called from closeEvent on window close""" # Disconnect to avoid multiple attempts to close a window - # pylint: disable=no-member - self.qApp.quit.connect(self.close) - # pylint: enable=no-member + self.app.quit.connect(self.close) # Save the fact that this window is open or not when the app closed - # pylint: disable=no-member - self.settings.setValue("%s/Open" % self.name, self.qApp.closingApp) - # pylint: enable=no-member + self.settings.setValue("%s/Open" % self.name, self.app.closingApp) # pylint: disable=bare-except try: @@ -392,10 +379,8 @@ def __windowCloseWindow(self): def __windowCloseApplication(self): """Called when the entire application should exit. Signals other windows to exit.""" - # pylint: disable=no-member - self.qApp.closingApp = True - self.qApp.quit.emit() - # pylint: enable=no-member + self.app.closingApp = True + self.app.quit.emit() ################################################################################ @@ -405,7 +390,7 @@ def __toggleFullscreenSetup(self, menu): QtGui.QIcon('icons/fullscreen.png'), 'Toggle Full-Screen', self) fullscreen.setShortcut('Ctrl+F') fullscreen.setStatusTip('Toggle Full-Screen') - fullscreen.triggered.connect(self.__toggleFullscreen) + fullscreen.triggered.connect(self.__toggleFullscreen) # pylint: disable=no-member menu.addAction(fullscreen) def __toggleFullscreen(self): @@ -420,9 +405,7 @@ def __toggleFullscreen(self): def keyPressEvent(self, event): if event.key() == QtCore.Qt.Key_Space: - # pylint: disable=no-member - QtGui.qApp.request_update.emit() - # pylint: enable=no-member + self.app.request_update.emit() event.accept() def closeEvent(self, event): diff --git a/cuegui/cuegui/MenuActions.py b/cuegui/cuegui/MenuActions.py index 5c9b63139..8dc89577f 100644 --- a/cuegui/cuegui/MenuActions.py +++ b/cuegui/cuegui/MenuActions.py @@ -28,19 +28,21 @@ import subprocess import time -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtGui +from qtpy import QtWidgets import six import FileSequence import opencue import opencue.compiled_proto.job_pb2 +import opencue.wrappers.depend # pylint: disable=cyclic-import import cuegui.Action import cuegui.Comments import cuegui.Constants import cuegui.CreatorDialog +import cuegui.CueJobMonitorTree import cuegui.DependDialog import cuegui.DependWizard import cuegui.EmailDialog @@ -50,6 +52,7 @@ import cuegui.LocalBooking import cuegui.Logger import cuegui.PreviewWidget +import cuegui.ProcChildren import cuegui.ServiceDialog import cuegui.ShowDialog import cuegui.TasksDialog @@ -77,6 +80,7 @@ def __init__(self, caller, updateCallable, selectedRpcObjectsCallable, sourceCal self.__selectedRpcObjects = selectedRpcObjectsCallable self._getSource = sourceCallable self._update = updateCallable + self.app = cuegui.app() self.__actionCache = {} @@ -159,7 +163,7 @@ def addAction(self, menu, actionName, callback = None): if isinstance(callback, six.string_types): callback = getattr(self, callback) - action.triggered.connect(callback) + action.triggered.connect(callback) # pylint: disable=no-member self.__actionCache[key] = action menu.addAction(self.__actionCache[key]) @@ -217,9 +221,7 @@ def unmonitor(self, rpcObjects=None): def view(self, rpcObjects=None): for job in self._getOnlyJobObjects(rpcObjects): - # pylint: disable=no-member - QtGui.qApp.view_object.emit(job) - # pylint: enable=no-member + self.app.view_object.emit(job) viewDepends_info = ["&View Dependencies...", None, "log"] @@ -269,6 +271,38 @@ def setMaxCores(self, rpcObjects=None): job.setMaxCores(float(value)) self._update() + setMinGpus_info = ["Set Minimum Gpus...", "Set Job(s) Minimum Gpus", "configure"] + def setMinGpus(self, rpcObjects=None): + jobs = self._getOnlyJobObjects(rpcObjects) + if jobs: + current = max([job.data.min_gpus for job in jobs]) + title = "Set Minimum Gpus" + body = "Please enter the new minimum gpus value:" + (value, choice) = QtWidgets.QInputDialog.getInt(self._caller, + title, body, + current, + 0, 500, 0) + if choice: + for job in jobs: + job.setMinGpus(int(value)) + self._update() + + setMaxGpus_info = ["Set Maximum Gpus...", "Set Job(s) Maximum Gpus", "configure"] + def setMaxGpus(self, rpcObjects=None): + jobs = self._getOnlyJobObjects(rpcObjects) + if jobs: + current = max([job.data.max_gpus for job in jobs]) + title = "Set Maximum Gpus" + body = "Please enter the new maximum gpus value:" + (value, choice) = QtWidgets.QInputDialog.getInt(self._caller, + title, body, + current, + 0, 500, 0) + if choice: + for job in jobs: + job.setMaxGpus(int(value)) + self._update() + setPriority_info = ["Set Priority...", None, "configure"] def setPriority(self, rpcObjects=None): @@ -327,13 +361,76 @@ def resume(self, rpcObjects=None): def kill(self, rpcObjects=None): jobs = self._getOnlyJobObjects(rpcObjects) if jobs: - if cuegui.Utils.questionBoxYesNo(self._caller, "Kill jobs?", - "Are you sure you want to kill these jobs?", + msg = ("Are you sure you want to kill these jobs?\n\n" + "** Note: This will stop all running frames and " + "permanently remove the jobs from the cue. " + "The jobs will NOT be able to return once killed.") + if cuegui.Utils.questionBoxYesNo(self._caller, "Kill jobs?", msg, [job.data.name for job in jobs]): for job in jobs: job.kill() + self.killDependents(jobs) self._update() + def killDependents(self, jobs): + dependents = self.getRecursiveDependentJobs(jobs) + if not dependents: + return + if cuegui.Utils.questionBoxYesNo(self._caller, + "Kill depending jobs?", + "The jobs have been killed. " + "Do you want to kill %s jobs that depend on it?" % + len(dependents), + sorted([dep.name() for dep in dependents])): + for depJob in dependents: + try: + depJob.kill() + except opencue.exception.CueException as e: + errMsg = "Failed to kill depending job: %s - %s" % (depJob.name(), e) + logger.warning(errMsg) + else: + # Drop only direct dependents. + for job in dependents: + try: + self.dropJobsDependingOnThis(job) + except opencue.exception.CueException as e: + logger.warning("Failed to drop dependencies: %s", e) + + def getRecursiveDependentJobs(self, jobs, seen=None, active_only=True): + seen = set() if seen is None else seen + dependents = [] + if not jobs: + return dependents + for job in jobs: + for dep in self.getExternalDependentNames(job, active_only): + if dep.data.name not in seen: + dependents.append(dep) + seen.add(dep.data.name) + return dependents + self.getRecursiveDependentJobs(dependents, + seen, + active_only) + + def getExternalDependentNames(self, job, active_only=True): + # pylint: disable=consider-using-set-comprehension + job_names = set([dep.dependErJob() + for dep in job.getWhatDependsOnThis() + if (not dep.isInternal()) + and (dep.isActive() if active_only else True)]) + + return [self.getJobByName(job_name) for job_name in job_names] + + def getJobByName(self, job_name): + jobs = opencue.api.getJobs(substr=[job_name], include_finished=True) + if not jobs: + raise Exception("Job %s not found" % job_name) + return jobs[0] + + def dropJobsDependingOnThis(self, job): + for dep in job.getWhatDependsOnThis(): + if not dep.isInternal(): + job = self.getJobByName(dep.dependOnJob()) + job.dropDepends(opencue.wrappers.depend.Depend.DependTarget.EXTERNAL) + eatDead_info = ["Eat dead frames", None, "eat"] def eatDead(self, rpcObjects=None): @@ -496,17 +593,21 @@ def sendToGroup(self, rpcObjects=None): return title = "Send jobs to group" - groups = { - group.data.name: group for group in opencue.api.findShow(jobs[0].data.show).getGroups()} - body = "What group should these jobs move to?\n" + \ - "\n".join([job.data.name for job in jobs]) - - (group, choice) = QtWidgets.QInputDialog.getItem( - self._caller, title, body, sorted(groups.keys()), 0, False) - if not choice: - return - - groups[str(group)].reparentJobs(jobs) + groups = {group.data.name: group for group in opencue.api.findShow( + jobs[0].data.show).getGroups()} + + body_content = cuegui.CueJobMonitorTree.Body(group_names=[], + group_ids=[], + job_names=[job.name() for job in jobs], + job_ids=jobs) + + dialog = cuegui.CueJobMonitorTree.MoveDialog(title=title, + text="What group should these jobs move to?", + event_item=None, + items=body_content, + dst_groups=groups, + send_to_groups=True) + dialog.exec_() self._update() useLocalCores_info = [ @@ -613,6 +714,24 @@ def setMinMemoryKb(self, rpcObjects=None): layer.setMinMemory(int(value * 1048576)) self._update() + setMinGpuMemoryKb_info = [ + "Set Minimum Gpu Memory", + "Set the amount of Gpu memory required for this layer", "configure"] + + def setMinGpuMemoryKb(self, rpcObjects=None): + layers = self._getOnlyLayerObjects(rpcObjects) + if layers: + current = max([layer.data.min_gpu_memory / 1048576 for layer in layers]) + title = "Set minimum amount of Gpu memory required" + body = ('Please enter the new minimum amount of Gpu memory in GB that frames ' + 'in the selected layer(s) should require:') + (value, choice) = QtWidgets.QInputDialog.getDouble( + self._caller, title, body, current, 0.01, 64.0, 1) + if choice: + for layer in layers: + layer.setMinGpuMemory(int(value * 1048576)) + self._update() + useLocalCores_info = [ "Use local cores...", "Set a single layer to use the local desktop cores.", "configure"] @@ -821,6 +940,28 @@ def viewLastLog(self, rpcObjects=None): else: cuegui.Utils.popupView(path) + viewProcesses_info = ["View Processes", None, "viewProcesses"] + + def viewProcesses(self, rpcObjects=None): + """ Display a Proc's child processes Host statistics.""" + frames = self._getOnlyFrameObjects(rpcObjects) + hosts = list({frame.data.last_resource.split("/")[0] + for frame in frames if frame.data.last_resource}) + if hosts: + layers = self._getSource().getLayers() + layer = [l for l in layers if l.data.name == frames[0].data.layer_name] + + if len(layer) > 0: + job = self._getSource() + text = "Displaying host stats for each child process for job:\n%s" % job.name() + title = "View Running Child Proc Host Stats" + procDialog = cuegui.ProcChildren.ProcChildrenDialog(job=job, + layer=layer[0], + hosts=hosts, + text=text, + title=title) + procDialog.exec_() + useLocalCores_info = ["Use local cores...", "Set a single frame to use the local desktop cores.", "configure"] @@ -853,10 +994,8 @@ def viewHost(self, rpcObjects=None): hosts = list({frame.data.last_resource.split("/")[0] for frame in frames if frame.data.last_resource}) if hosts: - # pylint: disable=no-member - QtGui.qApp.view_hosts.emit(hosts) - QtGui.qApp.single_click.emit(opencue.api.findHost(hosts[0])) - # pylint: enable=no-member + self.app.view_hosts.emit(hosts) + self.app.single_click.emit(opencue.api.findHost(hosts[0])) getWhatThisDependsOn_info = ["print getWhatThisDependsOn", None, "log"] @@ -1310,9 +1449,7 @@ def viewProc(self, rpcObjects=None): hosts = self._getOnlyHostObjects(rpcObjects) hosts = list({host.data.name for host in hosts}) if hosts: - # pylint: disable=no-member - QtGui.qApp.view_procs.emit(hosts) - # pylint: enable=no-member + self.app.view_procs.emit(hosts) lock_info = ["Lock Host", None, "lock"] @@ -1341,7 +1478,6 @@ def delete(self, rpcObjects=None): body, [host.data.name for host in hosts]): for host in hosts: - # Delete current render partitions to avoid oracle exception for rp in host.getRenderPartitions(): rp.delete() @@ -1434,10 +1570,28 @@ def changeAllocation(self, rpcObjects=None): self._caller, title, body, sorted(allocations.keys()), 0, False) if choice: allocation = allocations[str(allocationName)] + error_hosts = [] for host in hosts: - self.cuebotCall(host.setAllocation, - "Set Allocation on %s Failed" % host.data.name, - allocation) + # pylint: disable=broad-except + try: + self.cuebotCall(host.setAllocation, + "Set Allocation on %s Failed" % host.data.name, + allocation) + except Exception as e: + # Handle allocation modification errors separately + # pylint: disable=no-member + if (hasattr(e, "details") and + "EntityModificationError" in str(e.details())): + error_hosts.append(host.name()) + else: + raise + if error_hosts: + error_msg = "{hosts} not moved.\nHosts with reserved cores " \ + "cannot be moved between allocations." + QtWidgets.QMessageBox.warning(self._caller, + "Warning", + error_msg.format(hosts=", ".join(error_hosts)), + QtWidgets.QMessageBox.Ok) self._update() setRepair_info = ["Set Repair State", None, "configure"] @@ -1461,6 +1615,24 @@ def clearRepair(self, rpcObjects=None): host.setHardwareState(down) self._update() + setThreadModeAuto_info = ["Thread Mode Auto", None, "configure"] + def setThreadModeAuto(self, rpcObjects=None): + for host in self._getOnlyHostObjects(rpcObjects): + host.setThreadMode("AUTO") + self._update() + + setThreadModeAll_info = ["Thread Mode All", None, "configure"] + def setThreadModeAll(self, rpcObjects=None): + for host in self._getOnlyHostObjects(rpcObjects): + host.setThreadMode("ALL") + self._update() + + setThreadModeVariable_info = ["Thread Mode Variable", None, "configure"] + def setThreadModeVariable(self, rpcObjects=None): + for host in self._getOnlyHostObjects(rpcObjects): + host.setThreadMode("VARIABLE") + self._update() + class ProcActions(AbstractActions): """Actions for procs.""" @@ -1473,9 +1645,7 @@ def __init__(self, *args): def view(self, rpcObjects=None): for job in list({proc.data.job_name for proc in self._getOnlyProcObjects(rpcObjects)}): try: - # pylint: disable=no-member - QtGui.qApp.view_object.emit(opencue.api.findJob(job)) - # pylint: enable=no-member + self.app.view_object.emit(opencue.api.findJob(job)) except opencue.exception.CueException: logger.warning("Unable to load: %s", job) diff --git a/cuegui/cuegui/MiscDialog.py b/cuegui/cuegui/MiscDialog.py index c3b1185f8..d2712af93 100644 --- a/cuegui/cuegui/MiscDialog.py +++ b/cuegui/cuegui/MiscDialog.py @@ -20,7 +20,7 @@ from __future__ import print_function from __future__ import division -from PySide2 import QtWidgets +from qtpy import QtWidgets import cuegui.AbstractDialog diff --git a/cuegui/cuegui/Plugins.py b/cuegui/cuegui/Plugins.py index 2b67ebd7e..a4d48563c 100644 --- a/cuegui/cuegui/Plugins.py +++ b/cuegui/cuegui/Plugins.py @@ -57,9 +57,8 @@ import traceback import pickle -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.Constants import cuegui.Logger @@ -98,13 +97,12 @@ def __init__(self, mainWindow, name): self.__running = [] self.name = name self.mainWindow = mainWindow + self.app = cuegui.app() self.__menu_separator = " \t-> " # Load plugin paths from the config file - # pylint: disable=no-member - __pluginPaths = QtGui.qApp.settings.value("Plugin_Paths", []) - # pylint: enable=no-member + __pluginPaths = self.app.settings.value("Plugin_Paths", []) for path in cuegui.Constants.DEFAULT_PLUGIN_PATHS + __pluginPaths: self.loadPluginPath(str(path)) @@ -123,9 +121,7 @@ def loadConfigFilePlugins(self, configGroup): The imported module must have an init function and a QMainWindow will be passed to it. """ - # pylint: disable=no-member - __plugins = QtGui.qApp.settings.value("%s/Plugins" % configGroup, []) - # pylint: enable=no-member + __plugins = self.app.settings.value("%s/Plugins" % configGroup, []) for plugin in __plugins: path = os.path.dirname(str(plugin)) @@ -181,26 +177,20 @@ def saveState(self): opened.append("%s::%s" % (plugin[0], json.dumps(plugin[1].pluginSaveState()))) except Exception as e: logger.warning("Error saving plugin state for: %s\n%s", plugin[0], e) - # pylint: disable=no-member - QtGui.qApp.settings.setValue("%s/Plugins_Opened" % self.name, opened) - # pylint: enable=no-member + self.app.settings.setValue("%s/Plugins_Opened" % self.name, opened) def restoreState(self): """Loads any user defined plugin directories and restores all open plugins. Calls .restoreSettings (if available) on all plugins.""" # Loads any user defined plugin directories - # pylint: disable=no-member - pluginPaths = QtGui.qApp.settings.value("Plugins/Paths", []) - # pylint: enable=no-member + pluginPaths = self.app.settings.value("Plugins/Paths", []) for path in pluginPaths: self.loadPluginPath(str(path)) # Runs any plugins that were saved to the settings - # pylint: disable=no-member - openPlugins = QtGui.qApp.settings.value("%s/Plugins_Opened" % self.name) or [] - # pylint: enable=no-member + openPlugins = self.app.settings.value("%s/Plugins_Opened" % self.name) or [] for plugin in openPlugins: if '::' in plugin: plugin_name, plugin_state = str(plugin).split("::") @@ -357,6 +347,7 @@ class Plugin(object): def __init__(self): self.__settings = [] + self.app = cuegui.app() def pluginRestoreState(self, saved_settings): """Called on plugin start with any previously saved state. diff --git a/cuegui/cuegui/PreviewWidget.py b/cuegui/cuegui/PreviewWidget.py index 6e60a0a6a..5dfea8658 100644 --- a/cuegui/cuegui/PreviewWidget.py +++ b/cuegui/cuegui/PreviewWidget.py @@ -33,9 +33,8 @@ import urllib.request import xml.etree.ElementTree as Et -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.Logger import cuegui.Utils @@ -55,10 +54,12 @@ def __init__(self, job, frame, aovs=False, parent=None): :param frame: frame to display :type aovs: bool :param aovs: whether to display AOVs or just the main image - :type parent: PySide2.QtWidgets.QWidget + :type parent: qtpy.QtWidgets.QWidget :param parent: the parent widget """ QtWidgets.QDialog.__init__(self, parent) + self.app = cuegui.app() + self.__job = job self.__frame = frame self.__aovs = aovs @@ -93,9 +94,7 @@ def process(self): self.__itvFile = self.__writePlaylist(playlist) self.__previewThread = PreviewProcessorWatchThread(items, self) - # pylint: disable=no-member - QtGui.qApp.threads.append(self.__previewThread) - # pylint: enable=no-member + self.app.threads.append(self.__previewThread) self.__previewThread.start() self.__progbar.setRange(0, len(items)) diff --git a/cuegui/cuegui/ProcChildren.py b/cuegui/cuegui/ProcChildren.py new file mode 100644 index 000000000..c281f7970 --- /dev/null +++ b/cuegui/cuegui/ProcChildren.py @@ -0,0 +1,174 @@ +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""An interface for redirecting resources from one job to another job. + +The concept here is that there is a target job that needs procs. The user would choose the job. +The highest core/memory value would be detected and would populate 2 text boxes for cores and +memory. The user could then adjust these and hit search. The search will find all hosts that have +frames running that can be redirected to the target job.""" + + +from __future__ import absolute_import +from __future__ import print_function +from __future__ import division + +from builtins import str + +from qtpy import QtGui +from qtpy import QtWidgets + +import opencue + +import cuegui.Utils + +class ProcChildren(QtWidgets.QWidget): + """Widget for displaying Host statistics for a Proc's child processes.""" + + HEADERS = ["PID", "Name", "Start Time", "Rss (KB)", "VSize (KB)", + "Statm Rss (KB)", "Statm Size (KB)", "Cmd line"] + + def __init__(self, job, layer, hosts, parent=None): + """ + Initializes the list of procs for a given job to display + + :param job: job Object for this item (opencue.wrappers.job.Job) + :param layer: job Object for this item (opencue.wrappers.layer.Layer) + :param hosts: list of host Object for this item (List[opencue.wrappers.host.Host]) + :param parent: Optional parent for this item + """ + QtWidgets.QWidget.__init__(self, parent) + self._data = {} + + self._job = job + self._layer = layer + self._hosts = hosts + self._model = QtGui.QStandardItemModel(self) + self._model.setColumnCount(5) + self._model.setHorizontalHeaderLabels(ProcChildren.HEADERS) + + self._tree = QtWidgets.QTreeView(self) + self._tree.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) + self._tree.setModel(self._model) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(self._tree) + + def update(self): + """ Updates visual representation with latest data""" + self._model.clear() + self._model.setHorizontalHeaderLabels(ProcChildren.HEADERS) + childrenProc = opencue.compiled_proto.report_pb2.ChildrenProcStats() + data = { } + + try: + procs = opencue.api.getProcs(job=[self._job.name()], + layer=[x.name() for x in self._job.getLayers()], + host=self._hosts) + for proc in procs: + data['children_processes'] =\ + childrenProc.FromString(proc.data.child_processes).children + + name = proc.data.name.split("/")[0] + if name not in data: + cue_host = opencue.api.findHost(name) + data['host'] = cue_host + self._addProc(data) + + self._data = data + + except opencue.exception.CueException: + msg = ('No Proc Data available: \n%s ' + % (self._job.name())) + cuegui.Utils.showErrorMessageBox(msg) + + def _addProc(self, entry): + host = entry["host"] + + checkbox = QtGui.QStandardItem(host.data.name) + + self._model.appendRow([checkbox]) + + for proc in entry['children_processes']: + checkbox.appendRow([QtGui.QStandardItem(proc.stat.pid), + QtGui.QStandardItem(proc.stat.name), + QtGui.QStandardItem(proc.start_time), + QtGui.QStandardItem(str(proc.stat.rss)), + QtGui.QStandardItem(str(proc.stat.vsize)), + QtGui.QStandardItem(str(proc.statm.rss)), + QtGui.QStandardItem(str(proc.statm.size)), + QtGui.QStandardItem(str(proc.cmdline))]) + + self._tree.setExpanded(self._model.indexFromItem(checkbox), True) + self._tree.resizeColumnToContents(0) + + +class ProcChildrenDialog(QtWidgets.QDialog): + """ + Dialog for displaying Host statistics for a Proc's child processes + """ + def __init__(self, job, layer, hosts, text, title, parent=None): + """ + Initializes the data to be displayed + :ptype job: opencue.wrappers.job.Job + :param job: job Object for this item + :ptype layer: opencue.wrappers.layer.Layer + :param layer: layer Object for this item + :ptype hosts: List[opencue.wrappers.host.Host] + :param hosts: list of hosts Object for this item + :ptype text: str + :param text: Description of what is being displayed + :ptype title: str + :param title: Window Title + :param parent: AbstractActions + :param parent: The dialog's parent + """ + + QtWidgets.QDialog.__init__(self, parent) + self.parent = parent + self.job = job + self.text = text + self.title = title + self.setWindowTitle(self.title) + self._childProcStats = ProcChildren(job, layer, hosts, parent=parent) + self.resize(920, 420) + + _labelText = QtWidgets.QLabel(text, self) + _labelText.setWordWrap(True) + _btnUpdate = QtWidgets.QPushButton("Refresh", self) + _btnClose = QtWidgets.QPushButton("Close", self) + + _vlayout = QtWidgets.QVBoxLayout(self) + _vlayout.addWidget(_labelText) + _vlayout.addWidget(self._childProcStats) + + _hlayout = QtWidgets.QHBoxLayout() + _hlayout.addWidget(_btnUpdate) + _hlayout.addWidget(_btnClose) + _vlayout.addLayout(_hlayout) + + self._childProcStats.update() + # pylint: disable=no-member + _btnClose.clicked.connect(self.accept) + _btnUpdate.clicked.connect(self.refresh) + # pylint: enable=no-member + + def refresh(self): + """Update host report statistics""" + self._childProcStats.update() + + def accept(self): + """Exit""" + self.close() diff --git a/cuegui/cuegui/ProcMonitor.py b/cuegui/cuegui/ProcMonitor.py index 63035d7da..b4ae4f5d4 100644 --- a/cuegui/cuegui/ProcMonitor.py +++ b/cuegui/cuegui/ProcMonitor.py @@ -22,9 +22,8 @@ from builtins import str -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.Logger import cuegui.ProcMonitorTree @@ -42,6 +41,7 @@ class ProcMonitor(QtWidgets.QWidget): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent) + self.app = cuegui.app() self.__filterByHostNameLastInput = None @@ -71,10 +71,8 @@ def __init__(self, parent): self.__viewHostsSetup() - # pylint: disable=no-member - if bool(int(QtGui.qApp.settings.value("AutoRefreshMonitorProc", 1))): + if bool(int(self.app.settings.value("AutoRefreshMonitorProc", 1))): self.updateRequest() - # pylint: enable=no-member def updateRequest(self): """Requests an update to the widget's contents.""" @@ -112,11 +110,11 @@ def __filterByHostNameSetup(self, layout): btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setFixedWidth(24) layout.addWidget(btn) - btn.clicked.connect(self.__filterByHostNameClear) + btn.clicked.connect(self.__filterByHostNameClear) # pylint: disable=no-member self.__filterByHostNameClearBtn = btn self.__filterByHostNameLastInput = None - self.__filterByHostName.editingFinished.connect(self.__filterByHostNameHandle) + self.__filterByHostName.editingFinished.connect(self.__filterByHostNameHandle) # pylint: disable=no-member def __filterByHostNameHandle(self): hosts = str(self.__filterByHostName.text()).split() @@ -138,14 +136,12 @@ def __refreshToggleCheckBoxSetup(self, layout): layout.addWidget(checkBox) if self.procMonitorTree.enableRefresh: checkBox.setCheckState(QtCore.Qt.Checked) - checkBox.stateChanged.connect(self.__refreshToggleCheckBoxHandle) + checkBox.stateChanged.connect(self.__refreshToggleCheckBoxHandle) # pylint: disable=no-member __refreshToggleCheckBoxCheckBox = checkBox def __refreshToggleCheckBoxHandle(self, state): self.procMonitorTree.enableRefresh = bool(state) - # pylint: disable=no-member - QtGui.qApp.settings.setValue("AutoRefreshMonitorProc", int(bool(state))) - # pylint: enable=no-member + self.app.settings.setValue("AutoRefreshMonitorProc", int(bool(state))) # ============================================================================== # Button to refresh @@ -158,7 +154,7 @@ def __refreshButtonSetup(self, layout): self.btn_refresh.setMaximumHeight(FILTER_HEIGHT) self.btn_refresh.setFocusPolicy(QtCore.Qt.NoFocus) layout.addWidget(self.btn_refresh) - self.btn_refresh.clicked.connect(self.procMonitorTree.updateRequest) + self.btn_refresh.clicked.connect(self.procMonitorTree.updateRequest) # pylint: disable=no-member self.procMonitorTree.updated.connect(self.__refreshButtonDisableHandle) def __refreshButtonEnableHandle(self): @@ -182,7 +178,7 @@ def __clearButtonSetup(self, layout): btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setContentsMargins(0,0,0,0) layout.addWidget(btn) - btn.clicked.connect(self.__clearButtonHandle) + btn.clicked.connect(self.__clearButtonHandle) # pylint: disable=no-member def __clearButtonHandle(self): """Called when the clear button is clicked""" @@ -194,9 +190,7 @@ def __clearButtonHandle(self): # Monitors and handles the view_procs signal # ============================================================================== def __viewProcsSetup(self): - # pylint: disable=no-member - QtGui.qApp.view_procs.connect(self.__viewProcsHandle) - # pylint: enable=no-member + self.app.view_procs.connect(self.__viewProcsHandle) def __viewProcsHandle(self, hosts): self.procMonitorTree.procSearch.options['host'] = hosts @@ -206,9 +200,7 @@ def __viewProcsHandle(self, hosts): # Views procs when a host is double clicked # ============================================================================== def __hostDoubleClickedSetup(self): - # pylint: disable=no-member - QtGui.qApp.view_object.connect(self.__hostDoubleClickedHandle) - # pylint: enable=no-member + self.app.view_object.connect(self.__hostDoubleClickedHandle) def __hostDoubleClickedHandle(self, rpcObject): if cuegui.Utils.isHost(rpcObject): @@ -219,9 +211,7 @@ def __hostDoubleClickedHandle(self, rpcObject): # Monitors and handles the view_hosts signal # ============================================================================== def __viewHostsSetup(self): - # pylint: disable=no-member - QtGui.qApp.view_hosts.connect(self.__viewHostsHandle) - # pylint: enable=no-member + self.app.view_hosts.connect(self.__viewHostsHandle) def __viewHostsHandle(self, hosts): if hosts: diff --git a/cuegui/cuegui/ProcMonitorTree.py b/cuegui/cuegui/ProcMonitorTree.py index efbf0c86f..0f4710b53 100644 --- a/cuegui/cuegui/ProcMonitorTree.py +++ b/cuegui/cuegui/ProcMonitorTree.py @@ -23,9 +23,8 @@ from builtins import map import time -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -60,7 +59,7 @@ def __init__(self, parent): tip="The amount of memory used.") self.addColumn( "GPU Used", 100, id=5, - data=lambda proc: cuegui.Utils.memoryToString(proc.data.reserved_gpu), + data=lambda proc: cuegui.Utils.memoryToString(proc.data.reserved_gpu_memory), tip="The amount of gpu memory used.") self.addColumn( "Age", 60, id=6, @@ -85,21 +84,17 @@ def __init__(self, parent): self.__menuActions = cuegui.MenuActions.MenuActions( self, self.updateSoon, self.selectedObjects) - self.itemClicked.connect(self.__itemSingleClickedCopy) + self.itemClicked.connect(self.__itemSingleClickedCopy) # pylint: disable=no-member self.itemDoubleClicked.connect(self.__itemDoubleClickedViewLog) # Don't use the standard space bar to refresh - # pylint: disable=no-member - QtGui.qApp.request_update.connect(self.updateRequest) - # pylint: enable=no-member + self.app.request_update.connect(self.updateRequest) self.startTicksUpdate(40) # Don't start refreshing until the user sets a filter or hits refresh self.ticksWithoutUpdate = -1 - # pylint: disable=no-member - self.enableRefresh = bool(int(QtGui.qApp.settings.value("AutoRefreshMonitorProc", 1))) - # pylint: enable=no-member + self.enableRefresh = bool(int(self.app.settings.value("AutoRefreshMonitorProc", 1))) def tick(self): if self.ticksWithoutUpdate >= self.updateInterval and \ @@ -140,9 +135,7 @@ def __itemDoubleClickedViewLog(self, item, col): @param col: Column number double clicked on""" del col job_name = item.rpcObject.data.job_name - # pylint: disable=no-member - QtGui.qApp.view_object.emit(opencue.api.findJob(job_name)) - # pylint: enable=no-member + self.app.view_object.emit(opencue.api.findJob(job_name)) def clearFilters(self): """Removes all sorting and filtering to restore default state.""" diff --git a/cuegui/cuegui/ProgressDialog.py b/cuegui/cuegui/ProgressDialog.py index 399a1c64a..f9b49c453 100644 --- a/cuegui/cuegui/ProgressDialog.py +++ b/cuegui/cuegui/ProgressDialog.py @@ -23,9 +23,8 @@ from builtins import map from builtins import range -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.Logger import cuegui.Utils @@ -57,6 +56,7 @@ def __init__(self, title, function, work, concurrent, cancelTitle, @type parent: QObject @param parent: The parent for this object""" QtWidgets.QDialog.__init__(self, parent) + self.app = cuegui.app() self.__work = work self.__function = function @@ -83,7 +83,7 @@ def __init__(self, title, function, work, concurrent, cancelTitle, self.setFixedSize(300, 100) self.setWindowTitle(title) - self.__btn_cancel.clicked.connect(self.cancel) + self.__btn_cancel.clicked.connect(self.cancel) # pylint: disable=no-member self.show() @@ -161,12 +161,9 @@ def _submitWork(self): """Submits a new unit of work to threadpool""" self.__count += 1 - if hasattr(QtGui.qApp, "threadpool"): - # pylint: disable=no-member - QtGui.qApp.threadpool.queue(self.__doWork, - self.__doneWork, - "getting data for %s" % self.__class__) - # pylint: enable=no-member + if self.app.threadpool is not None: + self.app.threadpool.queue( + self.__doWork, self.__doneWork, "getting data for %s" % self.__class__) else: logger.warning("threadpool not found, doing work in gui thread") self.__doneWork(None, self.__doWork()) diff --git a/cuegui/cuegui/Redirect.py b/cuegui/cuegui/Redirect.py index cdbaa5b35..191bfb6ad 100644 --- a/cuegui/cuegui/Redirect.py +++ b/cuegui/cuegui/Redirect.py @@ -31,14 +31,20 @@ import re import time -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import opencue +import cuegui.Logger import cuegui.Utils +logger = cuegui.Logger.getLogger(__file__) + +MEMORY_PATTERN = re.compile("[0-9]+(?:TB|GB|MB|KB)") +MEMORY_BTYPE = "TB|GB|MB|KB" + class ShowCombo(QtWidgets.QComboBox): """ @@ -80,7 +86,7 @@ def __init__(self, parent=None): # This is used to provide the number of allocations selected # on the button title. - self.__menu.triggered.connect(self.__afterClicked) + self.__menu.triggered.connect(self.__afterClicked) # pylint: disable=no-member def refresh(self): """Refreshes the full list of allocations.""" @@ -165,7 +171,7 @@ def __init__(self, show, name, parent=None): self.setMenu(self.__menu) - self.__menu.aboutToShow.connect(self.__populate_menu) + self.__menu.aboutToShow.connect(self.__populate_menu) # pylint: disable=no-member # pylint: disable=inconsistent-return-statements def __loadShow(self, show): @@ -216,8 +222,12 @@ def __init__(self, parent=None): self.__cores_spin.setRange(1, self._cfg().get('max_cores', 32)) self.__cores_spin.setValue(1) + self.__max_cores_spin = QtWidgets.QSpinBox(self) + self.__max_cores_spin.setRange(1, self._cfg().get('max_cores', 32)) + self.__max_cores_spin.setValue(32) + self.__mem_spin = QtWidgets.QDoubleSpinBox(self) - self.__mem_spin.setRange(1, self._cfg().get('max_memory', 200)) + self.__mem_spin.setRange(1, self._cfg().get('max_memory', 250)) self.__mem_spin.setDecimals(1) self.__mem_spin.setValue(4) self.__mem_spin.setSuffix("GB") @@ -227,9 +237,10 @@ def __init__(self, parent=None): self.__limit_spin.setValue(10) self.__prh_spin = QtWidgets.QDoubleSpinBox(self) - self.__prh_spin.setRange(1, self._cfg().get('max_proc_hour_cutoff', 30)) + # increase Proc Hour upper bound limit + self.__prh_spin.setRange(1, 500) self.__prh_spin.setDecimals(1) - self.__prh_spin.setValue(10) + self.__prh_spin.setValue(20) self.__prh_spin.setSuffix("PrcHrs") # Job Filters @@ -243,7 +254,7 @@ def __init__(self, parent=None): self.__clear_btn = QtWidgets.QPushButton("Clr", self) self.__group = QtWidgets.QGroupBox("Resource Filters") - self.__group_filter = QtWidgets.QGroupBox("Job Filters") + self.__groupFilter = QtWidgets.QGroupBox("Job Filters") layout1 = QtWidgets.QHBoxLayout() layout1.addWidget(self.__update_btn) @@ -257,6 +268,8 @@ def __init__(self, parent=None): layout2.addWidget(self.__alloc_filter) layout2.addWidget(QtWidgets.QLabel("Minimum Cores:", self)) layout2.addWidget(self.__cores_spin) + layout2.addWidget(QtWidgets.QLabel("Max Cores:", self)) + layout2.addWidget(self.__max_cores_spin) layout2.addWidget(QtWidgets.QLabel("Minimum Memory:", self)) layout2.addWidget(self.__mem_spin) layout2.addWidget(QtWidgets.QLabel("Result Limit:", self)) @@ -274,15 +287,17 @@ def __init__(self, parent=None): layout3.addWidget(self.__exclude_regex) self.__group.setLayout(layout2) - self.__group_filter.setLayout(layout3) + self.__groupFilter.setLayout(layout3) layout = QtWidgets.QVBoxLayout(self) - layout.addWidget(self.__group_filter) + layout.addWidget(self.__groupFilter) layout.addWidget(self.__group) layout.addLayout(layout1) + # pylint: disable=no-member self.__job_box.textChanged.connect(self.detect) self.__show_combo.currentIndexChanged.connect(self.showChanged) + # pylint: enable=no-member def _cfg(self): ''' @@ -333,6 +348,10 @@ def getCores(self): """Gets the core count.""" return int(self.__cores_spin.value()) + def getMaxCores(self): + """Gets the max core count.""" + return int(self.__max_cores_spin.value()) + def getMemory(self): """Gets the memory amount.""" return int(self.__mem_spin.value() * 1048576.0) @@ -391,52 +410,88 @@ class RedirectWidget(QtWidgets.QWidget): Displays a table of procs that can be selected for redirect. """ - HEADERS = ["Name", "Cores", "Memory", "PrcTime", "Group", "Service"] + HEADERS = ["Name", "Cores", "Memory", "PrcTime", "Group", "Service", + "Job Cores", "Pending", "LLU", "Log"] def __init__(self, parent=None): QtWidgets.QWidget.__init__(self, parent) + self.app = cuegui.app() self.__hosts = {} self.__controls = RedirectControls(self) self.__model = QtGui.QStandardItemModel(self) - self.__model.setColumnCount(5) + self.__model.setColumnCount(7) self.__model.setHorizontalHeaderLabels(RedirectWidget.HEADERS) + self.__proxyModel = ProxyModel(self) + self.__proxyModel.setSourceModel(self.__model) + self.__tree = QtWidgets.QTreeView(self) self.__tree.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers) - self.__tree.setModel(self.__model) + self.__tree.setSortingEnabled(True) + self.__tree.setModel(self.__proxyModel) layout = QtWidgets.QVBoxLayout(self) layout.addWidget(self.__controls) layout.addWidget(self.__tree) + # pylint: disable=no-member self.__controls.getUpdateButton().pressed.connect(self.update) self.__controls.getRedirectButton().pressed.connect(self.redirect) self.__controls.getSelectAllButton().pressed.connect(self.selectAll) self.__controls.getClearButton().pressed.connect(self.clearTarget) - def __get_selected_procs_by_alloc(self, selected_items): + self.__tree.doubleClicked.connect(self.mouseDoubleClickEvent) + self.__tree.clicked.connect(self.mousePressEvent) + # pylint: disable=no-member + + @QtCore.Slot("QModelIndex") + def mousePressEvent(self, item): + """Called when an item is clicked on. Copies selected object names to + the middle click selection clip board. + """ + try: + QtWidgets.QApplication.clipboard().setText(item.data(), QtGui.QClipboard.Selection) + except AttributeError as e: + logger.info("Error item no longer available %s", e) + + @QtCore.Slot("QModelIndex") + def mouseDoubleClickEvent(self, index): + """ emit proc to Job Monitor Tree """ + attr = getattr(index, 'data', None) + if attr is not None: + try: + jobObject = opencue.api.getJobs(job=[index.data()]) + if jobObject: + if cuegui.Utils.isJob(jobObject[0]): + self.app.view_object.emit(jobObject[0]) + except opencue.exception.CueException as e: + text = ('Not able to add job to Job Monitor Tree. ' + 'Error Message:\n %s' % e) + self.__warn(text) + + def __getSelectedProcsByAlloc(self, selectedItems): """ Gathers and returns the selected procs, grouped by allocation their allocation names - @param selected_items: The selected rows to analyze - @type selected_items: list> + @param selectedItems: The selected rows to analyze + @type selectedItems: list> @return: A dictionary with the allocation names are the keys and the selected procs are the values. @rtype: dict """ - procs_by_alloc = {} - for item in selected_items: + procByAlloc = {} + for item in selectedItems: entry = self.__hosts.get(str(item.text())) alloc = entry.get('alloc') - alloc_procs = procs_by_alloc.get(alloc, []) - alloc_procs.extend(list(entry["procs"])) - procs_by_alloc[alloc] = alloc_procs - return procs_by_alloc + allocProcs = procByAlloc.get(alloc, []) + allocProcs.extend(list(entry["procs"])) + procByAlloc[alloc] = allocProcs + return procByAlloc def __warn(self, msg): """ @@ -450,7 +505,7 @@ def __warn(self, msg): message.setText(msg) message.exec_() - def __is_cross_show_safe(self, procs, target_show): + def __isCrossShowSafe(self, procs, targetShow): """ Determines whether or not it's safe to redirect cores from a show to another, based on user response to the warning message @@ -458,36 +513,37 @@ def __is_cross_show_safe(self, procs, target_show): @param procs: The procs to redirect @type procs: L{opencue.wrappers.proc.Proc} - @param target_show: The name of the target show - @type target_show: str + @param targetShow: The name of the target show + @type targetShow: str @return: Whether or not it's safe to redirect the given procs to the target show @rtype: bool """ - xshow_jobs = [proc.getJob() for proc in procs if not - proc.getJob().show() == target_show] - if not xshow_jobs: + xshowJobs = [proc.getJob() for proc in procs if + proc.getJob().show() != targetShow] + + if not xshowJobs: return True # No cross-show procs msg = ('Redirecting the selected procs to the target will result ' 'in killing frames on other show/s.\nDo you have approval ' 'from (%s) to redirect cores from the following jobs?' - % ', '.join([j.show().upper() for j in xshow_jobs])) + % ', '.join([j.show().upper() for j in xshowJobs])) return cuegui.Utils.questionBoxYesNo(parent=self, title="Cross-show Redirect!", text=msg, items=[j.name() for j - in xshow_jobs]) + in xshowJobs]) - def __is_burst_safe(self, alloc, procs, show): + def __isBurstSafe(self, alloc, procs, show): """ Determines whether or not it's safe to redirect cores by checking the burst target show burst and the number of cores being redirected. If there's a number of cores that may not be possible to pick up by the target show, that number should be lower than the threshold set in the - cue_resources config. + cuegui.yaml `resources` config. @param alloc: The name of the allocation for the cores @type alloc: str @@ -507,33 +563,34 @@ def __is_burst_safe(self, alloc, procs, show): # pylint: disable=protected-access cfg = self.__controls._cfg() # pylint: enable=protected-access - wc_ok = cfg.get('wasted_cores_threshold', 100) - if wc_ok < 0: + wcThreshold = cfg.get('redirect_wasted_cores_threshold', 100) + if wcThreshold < 0: return True - show_obj = opencue.api.findShow(show) - show_subs = dict((s.data.name.rstrip('.%s' % show), s) - for s in show_obj.getSubscriptions() + showObj = opencue.api.findShow(show) + stripShowRegex = '\\.%s' % show + showSubs = dict((re.sub(stripShowRegex, "", s.data.name), s) + for s in showObj.getSubscriptions() if s.data.allocation_name in alloc) try: - procs_to_burst = (show_subs.get(alloc).data.burst - - show_subs.get(alloc).data.reserved_cores) - procs_to_redirect = int(sum([p.data.reserved_cores + procsBurst = (showSubs.get(alloc).data.burst - + showSubs.get(alloc).data.reserved_cores) + procsRedirect = int(sum([p.data.reserved_cores for p in procs])) - wasted_cores = int(procs_to_redirect - procs_to_burst) - if wasted_cores <= wc_ok: + wastedCores = int(procsRedirect - procsBurst) + if wastedCores <= wcThreshold: return True # wasted cores won't exceed threshold - status = ('at burst' if procs_to_burst == 0 else + status = ('at burst' if procsBurst == 0 else '%d cores %s burst' - % (procs_to_burst, - 'below' if procs_to_burst > 0 else 'above')) + % (abs(procsBurst), + 'below' if procsBurst > 0 else 'above')) msg = ('Target show\'s %s subscription is %s. Redirecting ' 'the selected procs will kill frames to free up %d ' 'cores. You will be killing %d cores ' 'that the target show will not be able to use. ' 'Do you want to redirect anyway?' - % (alloc, status, int(procs_to_redirect), wasted_cores)) + % (alloc, status, int(procsRedirect), wastedCores)) return cuegui.Utils.questionBoxYesNo(parent=self, title=status.title(), text=msg) @@ -543,6 +600,67 @@ def __is_burst_safe(self, alloc, procs, show): % (alloc, show, show, alloc)) return False + @classmethod + def __isAllowed(cls, procs, targetJob): + """Checks if the follow criteria are met to allow redirect to target job: + - if source/target job have pending frames + - if target job hasn't reached maximum cores + - check if adding frames will push target job over it's max cores + + @param procs: The (source) procs to be redirected + @type procs: L{opencue.wrappers.proc.Proc} + @param targetJob: target job to move procs to + @return: true/false of whether criteria are met + and error message if any + @rtype: tuple(boolean, string) + """ + errMsg = "" + allowed = False + + # Case 1: Check if target job hasn't reached it's max cores + if targetJob.coresReserved() < targetJob.maxCores(): + allowed = True + errMsg = "Target job %s cores reserved %s \ + reached max cores %s " %(targetJob.name(), + targetJob.coresReserved(), + targetJob.maxCores()) + + # Case 2: 1. Check target job for pending frames + # 2. Check source procs for pending frames + if allowed and targetJob.waitingFrames() <= 0: + allowed = False + errMsg = "Target job %s has no pending (waiting) frames" % targetJob.name() + + if allowed: + for proc in procs: + job = proc.getJob() + if job.waitingFrames() <= 0: + allowed = False + errMsg = "Source job %s has no pending (waiting) frames" % job.name() + break + + # Case 3: Check if each proc or summed up procs will + # push targetJob over it's max cores + if allowed: + totalProcCores = 0 + for proc in procs: + totalProcCores += proc.coresReserved() + msg = ('proc cores reserved of %s will push %s ' + 'over it\'s max cores limit of %s') + if (proc.coresReserved() + targetJob.coresReserved()) > targetJob.maxCores() or \ + (totalProcCores + targetJob.coresReserved()) > targetJob.maxCores(): + errMsg = msg % (str(proc.coresReserved() + targetJob.coresReserved()), + targetJob.name(), str(targetJob.maxCores())) + allowed = False + break + + if totalProcCores > targetJob.maxCores(): + errMsg = msg % (totalProcCores, targetJob.name(), + str(targetJob.maxCores())) + allowed = False + + return allowed, errMsg + def redirect(self): """ Redirect the selected procs to the target job, after running a few @@ -554,37 +672,58 @@ def redirect(self): # Get selected items items = [self.__model.item(row) for row in range(0, self.__model.rowCount())] - selected_items = [item for item in items + selectedItems = [item for item in items if item.checkState() == QtCore.Qt.Checked] - if not selected_items: # Nothing selected, exit + if not selectedItems: # Nothing selected, exit self.__warn('You have not selected anything to redirect.') return # Get the Target Job - job_name = self.__controls.getJob() - if not job_name: # No target job, exit + jobName = self.__controls.getJob() + if not jobName: # No target job, exit self.__warn('You must have a job name selected.') return job = None try: - job = opencue.api.findJob(job_name) + job = opencue.api.findJob(jobName) except opencue.EntityNotFoundException: # Target job finished, exit - self.__warn_and_stop('The job you\'re trying to redirect to ' - 'appears to be no longer in the cue!') + text = ('The job you\'re trying to redirect to ' + 'appears to be no longer in the cue!') + cuegui.Utils.showErrorMessageBox(text, title="ERROR!") + return # Gather Selected Procs - procs_by_alloc = self.__get_selected_procs_by_alloc(selected_items) - show_name = job.show() - for alloc, procs in list(procs_by_alloc.items()): - if not self.__is_cross_show_safe(procs, show_name): # Cross-show - return - if not self.__is_burst_safe(alloc, procs, show_name): # At burst - return + procByAlloc = self.__getSelectedProcsByAlloc(selectedItems) + showName = job.show() + # Check if safe to redirect + # 1. don't redirect if target job's reserved cores reached job max + # 2. at burst + # 3. cross-show safe + warning = "" + try: + for alloc, procs in list(procByAlloc.items()): + if not self.__isCrossShowSafe(procs, showName): + warning = "Is not cross show safe" + break + if not self.__isBurstSafe(alloc, procs, showName): + warning = "Is not burst safe" + break + allowed, errMsg = self.__isAllowed(procs, targetJob=job) + if not allowed: + warning = errMsg + break + except opencue.exception.CueException as e: + warning = str(e) + + if warning: + warning = "Failed to Redirect:\n" + warning + self.__warn(warning) + return # Redirect errors = [] - for item in selected_items: + for item in selectedItems: entry = self.__hosts.get(str(item.text())) procs = entry["procs"] # pylint: disable=broad-except @@ -597,7 +736,12 @@ def redirect(self): item.setEnabled(False) if errors: # Something went wrong! - self.__warn('Some procs failed to redirect.') + stackTrace = "\n".join(errors) + text = 'Some procs failed to redirect with errors:\n' + stackTrace + self.__warn(text) + else: + text = 'Redirect To Job Request sent for:\n' + job.name() + self.__warn(text) def selectAll(self): """ @@ -614,15 +758,16 @@ def clearTarget(self): self.__controls.getJobBox().clear() def update(self): + """ Update the model """ self.__model.clear() self.__model.setHorizontalHeaderLabels(RedirectWidget.HEADERS) hosts = { } ok = 0 - service_filter = self.__controls.getRequiredService() - group_filter = self.__controls.getIncludedGroups() - job_regex = self.__controls.getJobNameExcludeRegex() + serviceFilter = self.__controls.getRequiredService() + groupFilter = self.__controls.getIncludedGroups() + jobRegexFilter = self.__controls.getJobNameExcludeRegex() show = self.__controls.getShow() alloc = self.__controls.getAllocFilter() @@ -650,19 +795,23 @@ def update(self): if ok >= self.__controls.getLimit(): break - if job_regex: - if re.match(job_regex, proc.data.job_name): + if jobRegexFilter: + if re.match(jobRegexFilter, proc.data.job_name): continue - if service_filter: - if service_filter not in proc.data.services: + if serviceFilter: + if serviceFilter not in proc.data.services: continue - if group_filter: - if proc.data.group_name not in group_filter: + if groupFilter: + if proc.data.group_name not in groupFilter: continue name = proc.data.name.split("/")[0] + lluTime = cuegui.Utils.getLLU(proc) + job = proc.getJob() + logLines = cuegui.Utils.getLastLine(proc.data.log_path) or "" + if name not in hosts: cue_host = opencue.api.findHost(name) hosts[name] = { @@ -680,10 +829,15 @@ def update(self): host["procs"].append(proc) host["mem"] = host["mem"] + proc.data.reserved_memory - host["cores"] = host["cores"] + proc.data.reserved_cores + host["cores"] = int(host["cores"]) + int(proc.data.reserved_cores) host["time"] = host["time"] + (int(time.time()) - proc.data.dispatch_time) + host["llu"] = cuegui.Utils.numFormat(lluTime, "t") + host["log"] = logLines + host['job_cores'] = job.data.job_stats.reserved_cores + host['waiting'] = job.pendingFrames() or 0 if host["cores"] >= self.__controls.getCores() and \ + host["cores"] <= self.__controls.getMaxCores() and \ host["mem"] >= self.__controls.getMemory() and \ host["time"] < self.__controls.getCutoffTime(): self.__addHost(host) @@ -696,6 +850,7 @@ def update(self): self.__hosts = hosts def __addHost(self, entry): + """ Add Host to ProxyModel """ host = entry["host"] procs = entry["procs"] rtime = entry["time"] @@ -703,10 +858,10 @@ def __addHost(self, entry): checkbox = QtGui.QStandardItem(host.data.name) checkbox.setCheckable(True) - self.__model.appendRow([checkbox, - QtGui.QStandardItem(str(entry["cores"])), - QtGui.QStandardItem("%0.2fGB" % (entry["mem"] / 1048576.0)), - QtGui.QStandardItem(cuegui.Utils.secondsToHHMMSS(rtime))]) + self.__proxyModel.sourceModel().appendRow([checkbox, + QtGui.QStandardItem(str(entry["cores"])), + QtGui.QStandardItem("%0.2fGB" % (entry["mem"] / 1048576.0)), + QtGui.QStandardItem(cuegui.Utils.secondsToHHMMSS(rtime))]) for proc in procs: checkbox.appendRow([QtGui.QStandardItem(proc.data.job_name), @@ -714,9 +869,47 @@ def __addHost(self, entry): QtGui.QStandardItem( "%0.2fGB" % (proc.data.reserved_memory / 1048576.0)), QtGui.QStandardItem(cuegui.Utils.secondsToHHMMSS(time.time() - - proc.data.dispatch_time)), + proc.data.dispatch_time)), QtGui.QStandardItem(proc.data.group_name), - QtGui.QStandardItem(",".join(proc.data.services))]) + QtGui.QStandardItem(",".join(proc.data.services)), + QtGui.QStandardItem(str(entry["job_cores"])), + QtGui.QStandardItem(str(entry["waiting"])), + QtGui.QStandardItem(str(entry["llu"])), + QtGui.QStandardItem(str(entry["log"])) + ]) + + proxy = self.__tree.model() + model = proxy.sourceModel() + for row in range(model.rowCount()): + index = model.index(row, 0) + self.__tree.expand(proxy.mapFromSource(index)) + self.__tree.resizeColumnToContents(0) + self.__tree.setWordWrap(True) + + +class ProxyModel(QtCore.QSortFilterProxyModel): + """Provides support for sorting data passed between the model and the tree view""" - self.__tree.setExpanded(self.__model.indexFromItem(checkbox), True) - self.__tree.resizeColumnToContents(0) + def lessThan(self, left, right): + + leftData = self.sourceModel().data(left) + rightData = self.sourceModel().data(right) + + try: + return int(leftData) < int(rightData) + except ValueError: + if re.search(MEMORY_PATTERN, leftData): + # strip memory type to compare + leftDataBtype = re.search(MEMORY_BTYPE, leftData).group() + leftDataMem = re.sub(MEMORY_BTYPE, "", leftData) + leftBtyes = cuegui.Utils.byteConversion(float(leftDataMem), leftDataBtype) + + rightDataBtype = re.search(MEMORY_BTYPE, rightData).group() + rightDataMem = re.sub(MEMORY_BTYPE, "", rightData) + rightBytes = cuegui.Utils.byteConversion(float(rightDataMem), rightDataBtype) + return float(leftBtyes) < float(rightBytes) + + return leftData < rightData + + except TypeError: + return leftData < rightData diff --git a/cuegui/cuegui/ServiceDialog.py b/cuegui/cuegui/ServiceDialog.py index 04f1b1a89..1fac432af 100644 --- a/cuegui/cuegui/ServiceDialog.py +++ b/cuegui/cuegui/ServiceDialog.py @@ -23,14 +23,15 @@ from builtins import str from builtins import range -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue import cuegui.Constants import cuegui.TagsWidget import cuegui.Utils +from opencue.wrappers.service import ServiceOverride class ServiceForm(QtWidgets.QWidget): @@ -42,7 +43,9 @@ def __init__(self, parent=None): QtWidgets.QWidget.__init__(self, parent) self.__service = None - self.gpu_max_mb = 2 * 1024 + # NOTE: As min_gpu value will be passed on later in KB, its max value in + # *KiloBytes* should not be higher than Int32(2147483647). + self.gpu_max_mb = int(2147483647 / 1024) self.gpu_min_mb = 0 self.gpu_tick_mb = 256 @@ -59,17 +62,20 @@ def __init__(self, parent=None): self.min_memory = QtWidgets.QSpinBox(self) self.min_memory.setRange(512, int(self._cfg().get('max_memory', 48)) * 1024) self.min_memory.setValue(3276) - self.min_gpu = QtWidgets.QSpinBox(self) - self.min_gpu.setRange(self.gpu_min_mb, self.gpu_max_mb) - self.min_gpu.setValue(0) - self.min_gpu.setSingleStep(self.gpu_tick_mb) - self.min_gpu.setSuffix(" MB") + self.min_gpu_memory = QtWidgets.QSpinBox(self) + self.min_gpu_memory.setRange(self.gpu_min_mb, self.gpu_max_mb) + self.min_gpu_memory.setValue(self.gpu_min_mb) + self.min_gpu_memory.setSingleStep(self.gpu_tick_mb) + self.min_gpu_memory.setSuffix(" MB") self.timeout = QtWidgets.QSpinBox(self) self.timeout.setRange(0, 4320) self.timeout.setValue(0) self.timeout_llu = QtWidgets.QSpinBox(self) self.timeout_llu.setRange(0, 4320) self.timeout_llu.setValue(0) + self.min_memory_increase = QtWidgets.QSpinBox(self) + self.min_memory_increase.setRange(0, int(self._cfg().get('max_memory', 48)) * 1024) + self.min_memory_increase.setValue(0) layout = QtWidgets.QGridLayout(self) layout.addWidget(QtWidgets.QLabel("Name:", self), 0, 0) layout.addWidget(self.name, 0, 1) @@ -82,22 +88,24 @@ def __init__(self, parent=None): layout.addWidget(QtWidgets.QLabel("Min Memory MB:", self), 4, 0) layout.addWidget(self.min_memory, 4, 1) layout.addWidget(QtWidgets.QLabel("Min Gpu Memory MB:", self), 5, 0) - layout.addWidget(self.min_gpu, 5, 1) + layout.addWidget(self.min_gpu_memory, 5, 1) layout.addWidget(QtWidgets.QLabel("Timeout (in minutes):", self), 6, 0) layout.addWidget(self.timeout, 6, 1) layout.addWidget(QtWidgets.QLabel("Timeout LLU (in minutes):", self), 7, 0) layout.addWidget(self.timeout_llu, 7, 1) + layout.addWidget(QtWidgets.QLabel("OOM Increase MB:", self), 8, 0) + layout.addWidget(self.min_memory_increase, 8, 1) self._tags_w = cuegui.TagsWidget.TagsWidget(allowed_tags=cuegui.Constants.ALLOWED_TAGS) - layout.addWidget(self._tags_w, 8, 0, 1, 2) + layout.addWidget(self._tags_w, 9, 0, 1, 2) self.__buttons = QtWidgets.QDialogButtonBox(QtWidgets.QDialogButtonBox.Save, QtCore.Qt.Horizontal, self) self.__buttons.setDisabled(True) - layout.addWidget(self.__buttons, 9, 1) + layout.addWidget(self.__buttons, 10, 1) - self.__buttons.accepted.connect(self.save) + self.__buttons.accepted.connect(self.save) # pylint: disable=no-member def _cfg(self): """ @@ -115,16 +123,19 @@ def setService(self, service): """ Update the form with data from the given service. """ + self.__service = service self.__buttons.setDisabled(False) - self.name.setText(service.data.name) - self.threadable.setChecked(service.data.threadable) - self.min_cores.setValue(service.data.min_cores) - self.max_cores.setValue(service.data.max_cores) - self.min_memory.setValue(service.data.min_memory // 1024) - self.min_gpu.setValue(service.data.min_gpu // 1024) - self._tags_w.set_tags(service.data.tags) + self.name.setText(service.name()) + self.threadable.setChecked(service.threadable()) + self.min_cores.setValue(service.minCores()) + self.max_cores.setValue(service.maxCores()) + self.min_gpu_memory.setValue(service.data.min_gpu_memory // 1024) + self.min_memory.setValue(service.minMemory() // 1024) + self._tags_w.set_tags(service.tags()) self.timeout.setValue(service.data.timeout) self.timeout_llu.setValue(service.data.timeout_llu) + self.min_memory_increase.setValue(service.data.min_memory_increase // 1024) + self.__service = service.data def new(self): """ @@ -138,9 +149,10 @@ def new(self): self.min_cores.setValue(100) self.max_cores.setValue(100) self.min_memory.setValue(3276) - self.min_gpu.setValue(0) + self.min_gpu_memory.setValue(self.gpu_min_mb) self.timeout.setValue(0) self.timeout_llu.setValue(0) + self.min_memory_increase.setValue(2048) self._tags_w.set_tags(['general']) def save(self): @@ -157,17 +169,23 @@ def save(self): QtWidgets.QMessageBox.critical(self, "Error", "The service name must alphanumeric.") return + if self.min_memory_increase.value() <= 0: + QtWidgets.QMessageBox.critical(self, "Error", + "The minimum memory increase must be more than 0 MB") + return + service = opencue.wrappers.service.Service() if self.__service: - service.data.id = self.__service.id + service.data.id = self.__service.data.id service.setName(str(self.name.text())) service.setThreadable(self.threadable.isChecked()) service.setMinCores(self.min_cores.value()) service.setMaxCores(self.max_cores.value()) service.setMinMemory(self.min_memory.value() * 1024) - service.setMinGpu(self.min_gpu.value() * 1024) + service.setMinGpuMemory(self.min_gpu_memory.value() * 1024) service.setTimeout(self.timeout.value()) service.setTimeoutLLU(self.timeout_llu.value()) + service.setMinMemoryIncrease(self.min_memory_increase.value() * 1024) service.setTags(self._tags_w.get_tags()) self.saved.emit(service) @@ -204,10 +222,12 @@ def __init__(self, show, parent=None): self.__btn_layout.addStretch() layout.addLayout(self.__btn_layout) + # pylint: disable=no-member self.__btn_new.clicked.connect(self.newService) self.__btn_del.clicked.connect(self.delService) self.__form.saved.connect(self.saved) self.__service_list.currentItemChanged.connect(self.selected) + # pylint: enable=no-member self.refresh() self.__service_list.setCurrentRow(0, QtCore.QItemSelectionModel.Select) @@ -244,11 +264,16 @@ def saved(self, service): if self.__new_service: if self.__show: - self.__show.createServiceOverride(service.data) + serviceOverride = self.__show.createServiceOverride(service.data) else: opencue.api.createService(service.data) else: - service.update() + if self.__show: + serviceOverride = ServiceOverride(service) + serviceOverride.id = service.id() + serviceOverride.update() + else: + service.update() self.refresh() self.__new_service = False @@ -325,4 +350,4 @@ def __init__(self, show, parent=None): self.setWindowTitle("Services") self.setAttribute(QtCore.Qt.WA_DeleteOnClose) self.setSizeGripEnabled(True) - self.resize(620, 420) + self.resize(700, 700) diff --git a/cuegui/cuegui/ShowDialog.py b/cuegui/cuegui/ShowDialog.py index 865e75aa6..f0376fcf5 100644 --- a/cuegui/cuegui/ShowDialog.py +++ b/cuegui/cuegui/ShowDialog.py @@ -22,8 +22,8 @@ from builtins import str -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.Utils @@ -56,8 +56,10 @@ def __init__(self, show, parent=None): self.layout().addWidget(self.__btnSave, 5, 1) self.layout().addWidget(self.__btnClose, 5, 2) + # pylint: disable=no-member self.__btnSave.clicked.connect(self.__saveChanges) self.__btnClose.clicked.connect(self.__closeDialog) + # pylint: enable=no-member def __createSettingsPage(self): """Settings Page""" @@ -72,7 +74,7 @@ def __createSettingsPage(self): ctrl.setValue(self.__show.data.default_max_cores) page.layout().addWidget(ctrl, 0, 0) page.layout().addWidget(label, 0, 1, 1, 4) - ctrl.valueChanged.connect(self.__valueChanged) + ctrl.valueChanged.connect(self.__valueChanged) # pylint: disable=no-member self.__defaultMaxCores = ctrl label = QtWidgets.QLabel("Default minimum cores", self) @@ -82,7 +84,7 @@ def __createSettingsPage(self): ctrl.setValue(self.__show.data.default_min_cores) page.layout().addWidget(ctrl, 1, 0) page.layout().addWidget(label, 1, 1, 1, 4) - ctrl.valueChanged.connect(self.__valueChanged) + ctrl.valueChanged.connect(self.__valueChanged) # pylint: disable=no-member self.__defaultMinCores = ctrl label = QtWidgets.QLabel("Comment Notification Email", self) @@ -90,7 +92,7 @@ def __createSettingsPage(self): text.setText(self.__show.data.comment_email) page.layout().addWidget(text, 2, 0) page.layout().addWidget(label, 2, 1, 1, 4) - text.textChanged.connect(self.__valueChanged) + text.textChanged.connect(self.__valueChanged) # pylint: disable=no-member self.__show_email = text return page @@ -105,7 +107,7 @@ def __createBookingPage(self): ctrl.setChecked(self.__show.data.booking_enabled) page.layout().addWidget(ctrl, 0, 0) page.layout().addWidget(label, 0, 1, 1, 4) - ctrl.stateChanged.connect(self.__valueChanged) + ctrl.stateChanged.connect(self.__valueChanged) # pylint: disable=no-member self.__bookingEnabled = ctrl label = QtWidgets.QLabel("Enable dispatch", self) @@ -113,7 +115,7 @@ def __createBookingPage(self): ctrl.setChecked(self.__show.data.dispatch_enabled) page.layout().addWidget(ctrl, 1, 0) page.layout().addWidget(label, 1, 1, 1, 4) - ctrl.stateChanged.connect(self.__valueChanged) + ctrl.stateChanged.connect(self.__valueChanged) # pylint: disable=no-member self.__dispatchEnabled = ctrl return page diff --git a/cuegui/cuegui/ShowsWidget.py b/cuegui/cuegui/ShowsWidget.py index b0343834f..20b62ae73 100644 --- a/cuegui/cuegui/ShowsWidget.py +++ b/cuegui/cuegui/ShowsWidget.py @@ -20,9 +20,8 @@ from __future__ import print_function from __future__ import division -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -62,10 +61,8 @@ def __init__(self, parent): self.__menuActions = cuegui.MenuActions.MenuActions( self, self.updateSoon, self.selectedObjects) - self.itemClicked.connect(self.__itemSingleClickedToDouble) - # pylint: disable=no-member - QtGui.qApp.facility_changed.connect(self.__facilityChanged) - # pylint: enable=no-member + self.itemClicked.connect(self.__itemSingleClickedToDouble) # pylint: disable=no-member + self.app.facility_changed.connect(self.__facilityChanged) self.setUpdateInterval(60) diff --git a/cuegui/cuegui/SplashWindow.py b/cuegui/cuegui/SplashWindow.py index 29c268c7a..57075a261 100644 --- a/cuegui/cuegui/SplashWindow.py +++ b/cuegui/cuegui/SplashWindow.py @@ -24,9 +24,9 @@ import os import time -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets __all__ = ["SplashWindow"] diff --git a/cuegui/cuegui/Style.py b/cuegui/cuegui/Style.py index 3f25bbed0..95acea64f 100644 --- a/cuegui/cuegui/Style.py +++ b/cuegui/cuegui/Style.py @@ -22,7 +22,9 @@ import importlib -from PySide2 import QtGui +from qtpy import QtGui + +import cuegui DEFAULT_COLOR = "DarkPalette" @@ -56,16 +58,12 @@ def setFont(font): """Sets the application font.""" global Font Font = font - # pylint: disable=no-member - QtGui.qApp.setFont(font) - # pylint: enable=no-member + cuegui.app().setFont(font) def init(): """Initializes the global style settings.""" - # pylint: disable=no-member - settings = QtGui.qApp.settings - # pylint: enable=no-member + settings = cuegui.app().settings loadColorTheme(settings.value("Style/colorTheme", DEFAULT_COLOR)) setIconTheme(settings.value("Style/iconTheme", DEFAULT_ICON)) diff --git a/cuegui/cuegui/SubscriptionGraphWidget.py b/cuegui/cuegui/SubscriptionGraphWidget.py index 2e122328c..b17004d11 100644 --- a/cuegui/cuegui/SubscriptionGraphWidget.py +++ b/cuegui/cuegui/SubscriptionGraphWidget.py @@ -20,8 +20,8 @@ import opencue -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.AbstractTreeWidget import cuegui.AbstractWidgetItem @@ -43,7 +43,7 @@ def __init__(self, parent): self.__showMenuActions = {} self.__subBars = [] self.__timer = QtCore.QTimer(self) - self.__timer.timeout.connect(self.update_data) + self.__timer.timeout.connect(self.update_data) # pylint: disable=no-member self.__timer.setInterval(1000 * 5) widget = QtWidgets.QWidget() @@ -57,7 +57,7 @@ def __init__(self, parent): showMenuBtn = QtWidgets.QPushButton(" Shows") showMenuBtn.setFixedWidth(100) - showMenuBtn.pressed.connect(self.__showMenuCheck) + showMenuBtn.pressed.connect(self.__showMenuCheck) # pylint: disable=no-member self.__showMenu = QtWidgets.QMenu(self) showMenuBtn.setMenu(self.__showMenu) @@ -65,7 +65,7 @@ def __init__(self, parent): showMenuBtn.setFocusPolicy(QtCore.Qt.NoFocus) self.__showMenu.setFont(cuegui.Constants.STANDARD_FONT) - self.__showMenu.triggered.connect(self.__showMenuHandle) + self.__showMenu.triggered.connect(self.__showMenuHandle) # pylint: disable=no-member layout = QtWidgets.QVBoxLayout(self) layout.addWidget(showMenuBtn) @@ -228,7 +228,7 @@ def contextMenuEvent(self, e): menu.addSeparator() if self.__show: new_action = QtWidgets.QAction('Add new subscription', self) - new_action.triggered.connect(self.createSubscription) + new_action.triggered.connect(self.createSubscription) # pylint: disable=no-member menu.addAction(new_action) menu.exec_(QtCore.QPoint(e.globalX(),e.globalY())) diff --git a/cuegui/cuegui/SubscriptionsWidget.py b/cuegui/cuegui/SubscriptionsWidget.py index 3acb97921..eb474e074 100644 --- a/cuegui/cuegui/SubscriptionsWidget.py +++ b/cuegui/cuegui/SubscriptionsWidget.py @@ -24,9 +24,8 @@ import opencue -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.AbstractTreeWidget import cuegui.AbstractWidgetItem @@ -41,6 +40,7 @@ class SubscriptionsWidget(QtWidgets.QWidget): def __init__(self, parent): QtWidgets.QWidget.__init__(self, parent) + self.app = cuegui.app() self.__show = None self.__shows = None @@ -64,13 +64,13 @@ def __init__(self, parent): layout.addWidget(self.__btnAddSubscription, 0, 3) layout.addWidget(self.__monitorSubscriptions, 2, 0, 3, 4) + # pylint: disable=no-member self.__btnShowProperties.clicked.connect(self.__showProperties) self.__btnAddSubscription.clicked.connect(self.__addSubscription) self.__comboShows.currentIndexChanged.connect(self.setShow) - # pylint: disable=no-member - QtGui.qApp.view_object.connect(self.setShow) - QtGui.qApp.facility_changed.connect(self.changeFacility) # pylint: enable=no-member + self.app.view_object.connect(self.setShow) + self.app.facility_changed.connect(self.changeFacility) self.__menuActions = cuegui.MenuActions.MenuActions( self, self.updateSoon, self.selectedObjects) diff --git a/cuegui/cuegui/TagsWidget.py b/cuegui/cuegui/TagsWidget.py index cde10b910..548ba2b53 100644 --- a/cuegui/cuegui/TagsWidget.py +++ b/cuegui/cuegui/TagsWidget.py @@ -23,7 +23,7 @@ from builtins import str import re -from PySide2 import QtWidgets +from qtpy import QtWidgets import cuegui.AbstractDialog import cuegui.Constants @@ -61,7 +61,7 @@ def __init__(self, allowed_tags=None, parent=None): self.__enable_custom = QtWidgets.QCheckBox('Custom Tags', self) self.__custom = QtWidgets.QLineEdit(self) self.__custom.setDisabled(True) - self.__enable_custom.toggled.connect(self.toggleCustom) + self.__enable_custom.toggled.connect(self.toggleCustom) # pylint: disable=no-member layout.addWidget(self.__enable_custom) layout.addWidget(self.__custom) layout.setContentsMargins(0, 0, 0, 0) diff --git a/cuegui/cuegui/TasksDialog.py b/cuegui/cuegui/TasksDialog.py index f624fcf6e..2f820d76c 100644 --- a/cuegui/cuegui/TasksDialog.py +++ b/cuegui/cuegui/TasksDialog.py @@ -23,8 +23,8 @@ from builtins import map from builtins import str -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue.exception @@ -76,12 +76,14 @@ def __init__(self, show, parent=None): glayout.addWidget(self.__btnRefresh, 4, 1) glayout.addWidget(self.__btnDone, 4, 2) + # pylint: disable=no-member self.__btnMinCores.clicked.connect(self.setMinCores) self.__checkManaged.clicked.connect(self.setManaged) self.__btnAddTask.clicked.connect(self.__tasks.createTask) self.__btnRefresh.clicked.connect(self.refresh) self.__comboDepartments.currentIndexChanged.connect(self.setDepartment) self.__btnDone.clicked.connect(self.accept) + # pylint: enable=no-member self.getDepartments() diff --git a/cuegui/cuegui/TextEditDialog.py b/cuegui/cuegui/TextEditDialog.py index d6a14526d..a45687d92 100644 --- a/cuegui/cuegui/TextEditDialog.py +++ b/cuegui/cuegui/TextEditDialog.py @@ -20,8 +20,8 @@ from __future__ import division from __future__ import absolute_import -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets class TextEditDialog(QtWidgets.QDialog): @@ -57,8 +57,10 @@ def __init__(self, title, text, default="", parent=None): self.setMaximumSize(400,300) self.setWindowTitle(title) + # pylint: disable=no-member __btn_accept.clicked.connect(self.accept) __btn_cancel.clicked.connect(self.reject) + # pylint: enable=no-member self.__textEdit.setText(default) self.__textEdit.setFocus(QtCore.Qt.OtherFocusReason) diff --git a/cuegui/cuegui/ThreadPool.py b/cuegui/cuegui/ThreadPool.py index 73d791d50..fa6e6c412 100644 --- a/cuegui/cuegui/ThreadPool.py +++ b/cuegui/cuegui/ThreadPool.py @@ -49,8 +49,7 @@ def someWorkCallback(work, result): from builtins import range import os -from PySide2 import QtCore -from PySide2 import QtGui +from qtpy import QtCore import cuegui.Logger @@ -74,6 +73,7 @@ class ThreadPool(QtCore.QObject): def __init__(self, num_threads, max_queue=20, parent=None): QtCore.QObject.__init__(self, parent=parent) + self.app = cuegui.app() self.__threads = [] self.__started = False self.__max_queue = max_queue @@ -90,9 +90,7 @@ def start(self): self.__started = True for i in range(0, self.__num_threads): thread = ThreadPool.WorkerThread(i, self) - # pylint: disable=no-member - QtGui.qApp.threads.append(thread) - # pylint: enable=no-member + self.app.threads.append(thread) self.__threads.append(thread) self.__threads[i].start() self.__threads[i].workComplete.connect(self.runCallback, diff --git a/cuegui/cuegui/UnbookDialog.py b/cuegui/cuegui/UnbookDialog.py index e36d3e6ac..a661faa93 100644 --- a/cuegui/cuegui/UnbookDialog.py +++ b/cuegui/cuegui/UnbookDialog.py @@ -25,8 +25,9 @@ from builtins import object import re -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets +import six import opencue @@ -132,6 +133,7 @@ def __createRangeBox(self, layout, name, units, max_frame): __layout.addWidget(__maxLabel, 0, 4) # Setting the minimum should disable the right hand side of the range + # pylint: disable=no-member __lessThan.toggled.connect(__min.setDisabled) __lessThan.toggled.connect(__toLabel.setDisabled) __lessThan.toggled.connect(__minLabel.setDisabled) @@ -140,6 +142,7 @@ def __createRangeBox(self, layout, name, units, max_frame): __moreThan.toggled.connect(__max.setDisabled) __moreThan.toggled.connect(__toLabel.setDisabled) __moreThan.toggled.connect(__maxLabel.setDisabled) + # pylint: enable=no-member layout.addWidget(__group) @@ -163,22 +166,23 @@ def _convert(val): return int(convert(float(val))) if isinstance(mixed, (float, int)): - result = opencue.api.criterion_pb2.GreaterThanIntegerSearchCriterion(_convert(mixed)) - elif isinstance(mixed, str): + result = opencue.api.criterion_pb2.GreaterThanIntegerSearchCriterion( + value=_convert(mixed)) + elif isinstance(mixed, six.string_types): if mixed.startswith("gt"): result = opencue.api.criterion_pb2.GreaterThanIntegerSearchCriterion( - _convert(mixed[2:])) + value=_convert(mixed[2:])) elif mixed.startswith("lt"): result = opencue.api.criterion_pb2.LessThanIntegerSearchCriterion( - _convert(mixed[2:])) + value=_convert(mixed[2:])) elif mixed.find("-") > -1: min_frame, max_frame = mixed.split("-", 1) result = opencue.api.criterion_pb2.InRangeIntegerSearchCriterion( - _convert(min_frame), _convert(max_frame)) + min=_convert(min_frame), max=_convert(max_frame)) else: try: result = opencue.api.criterion_pb2.GreaterThanIntegerSearchCriterion( - _convert(mixed)) + value=_convert(mixed)) except ValueError: raise Exception("invalid int search input value: " + str(mixed)) elif issubclass(mixed.__class__, opencue.api.criterion_pb2.EqualsIntegerSearchCriterion): @@ -195,15 +199,17 @@ def accept(self): self.close() procSearch = opencue.search.ProcSearch() - procSearch.maxResults = [int(self.__amount.value())] - procSearch.jobs = self.__jobs - procSearch.allocs = [str(checkedBox.text()) for checkedBox in self.__matrix.checkedBoxes()] + procSearch.options['maxResults'] = [int(self.__amount.value())] + procSearch.options['jobs'] = self.__jobs + procSearch.options['allocs'] = [ + str(checkedBox.text()) for checkedBox in self.__matrix.checkedBoxes()] memoryRange = self.__memoryRangeBox.result() if memoryRange: - procSearch.memoryRange = self.handleIntCriterion(memoryRange, lambda mb: (mb*1024)) + procSearch.options['memoryRange'] = self.handleIntCriterion( + memoryRange, lambda mb: (mb*1024)) runtimeRange = self.__runtimeRangeBox.result() if runtimeRange: - procSearch.durationRange = self.handleIntCriterion( + procSearch.options['durationRange'] = self.handleIntCriterion( runtimeRange, lambda rangeMin: (rangeMin*60)) if self.__redirect.isChecked(): @@ -253,7 +259,7 @@ def accept(self): group = groups[str(group)] if job or group: - procs = opencue.api.getProcs(procSearch) + procs = opencue.api.getProcs(**procSearch.options) kill = self.__kill.isChecked() amount = 0 @@ -277,7 +283,7 @@ def accept(self): if dialog.result(): self.close() else: - procs = opencue.api.getProcs(procSearch) + procs = opencue.api.getProcs(**procSearch.options) amount = 0 for proc in procs: try: @@ -341,7 +347,7 @@ def __init__( self.__filter = QtWidgets.QLineEdit("", self) self.layout().addWidget(self.__filter, 2, 0) - self.__filter.textChanged.connect(self.filterJobs) + self.__filter.textChanged.connect(self.filterJobs) # pylint: disable=no-member self.__list = QtWidgets.QListWidget(self) self.__list.setSelectionMode(selectionMode) @@ -400,7 +406,7 @@ def __init__(self, procSearch, parent=None): self.setWindowTitle("Unbook and kill frames?") self.__procSearch = procSearch - self.__procs = opencue.api.getProcs(procSearch) + self.__procs = opencue.api.getProcs(**procSearch.options) self.__amount = len(self.__procs) if self.__amount == 1: @@ -413,7 +419,7 @@ def __init__(self, procSearch, parent=None): self.__jobList = QtWidgets.QTextEdit(self) self.__jobList.setText( "\n".join( - ["%s %s" % (proc.data.jobName, proc.data.frameName) for proc in self.__procs])) + ["%s %s" % (proc.data.job_name, proc.data.frame_name) for proc in self.__procs])) self.__jobList.setReadOnly(True) self.__jobList.setSizePolicy( QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)) @@ -427,8 +433,10 @@ def __init__(self, procSearch, parent=None): layout.addWidget(self.__jobList) layout.addWidget(self.__buttons) + # pylint: disable=no-member self.__buttons.accepted.connect(self.accept) self.__buttons.rejected.connect(self.reject) + # pylint: enable=no-member def accept(self): """Kills the procs.""" diff --git a/cuegui/cuegui/Utils.py b/cuegui/cuegui/Utils.py index 98ef4a6fb..4666797b5 100644 --- a/cuegui/cuegui/Utils.py +++ b/cuegui/cuegui/Utils.py @@ -32,12 +32,10 @@ import traceback import webbrowser -from PySide2 import QtCore -from PySide2 import QtGui -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtGui +from qtpy import QtWidgets import six -import yaml -from yaml.scanner import ScannerError import opencue import opencue.wrappers.group @@ -285,7 +283,7 @@ def checkShellOut(cmdList, lockGui=False): if not lockGui and platform.system() != "Windows": cmdList.append('&') try: - subprocess.check_call(cmdList) + subprocess.check_call(" ".join(cmdList), shell=True) except subprocess.CalledProcessError as e: text = 'Command {cmd} failed with returncode {code}. {msg}.\n' \ 'Please check your EDITOR environment variable and the ' \ @@ -389,26 +387,16 @@ def memoryToString(kmem, unit=None): return "%.01fG" % (float(kmem) / pow(k, 2)) -def getResourceConfig(path=None): +def getResourceConfig(): """Reads the given yaml file and returns the entries as a dictionary. If no config path is given, the default resources config will be read If the given path does not exist, a warning will be printed and an empty dictionary will be returned - @param path: The path for the yaml file to read - @type path: str - @return: The entries in the given yaml file + @return: Resource config settings @rtype: dict """ - config = {} - if not path: - path = '{}/cue_resources.yaml'.format(cuegui.Constants.DEFAULT_INI_PATH) - try: - with open(path, 'r') as fileObject: - config = yaml.load(fileObject, Loader=yaml.SafeLoader) - except (IOError, ScannerError) as e: - print('WARNING: Could not read config file %s: %s' % (path, e)) - return config + return cuegui.Constants.RESOURCE_LIMITS ################################################################################ @@ -416,8 +404,19 @@ def getResourceConfig(path=None): ################################################################################ def getFrameLogFile(job, frame): - """Get the log file associated with a frame.""" - return os.path.join(job.data.log_dir, "%s.%s.rqlog" % (job.data.name, frame.data.name)) + """Get the log file associated with a frame. Return path based on the + current OS path using Constants.LOG_ROOT_OS to translate paths.""" + my_os = platform.system().lower() + job_os = job.data.os.lower() + + log_dir = job.data.log_dir + if my_os != job_os and \ + my_os in cuegui.Constants.LOG_ROOT_OS and \ + job_os in cuegui.Constants.LOG_ROOT_OS: + log_dir = log_dir.replace(cuegui.Constants.LOG_ROOT_OS[job_os], + cuegui.Constants.LOG_ROOT_OS[my_os], 1) + + return os.path.join(log_dir, "%s.%s.rqlog" % (job.data.name, frame.data.name)) def getFrameLLU(job, frame): @@ -448,17 +447,17 @@ def getLastLine(path): ansiEscape = r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]' try: - fp = open(path, 'r') + fp = open(path, 'rb') fp.seek(0, 2) backseek = min(4096, fp.tell()) fp.seek(-backseek, 1) buf = fp.read(4096) - newline_pos = buf.rfind("\n",0,len(buf)-1) + newline_pos = buf.rfind(b'\n', 0, len(buf)-1) fp.close() - line = buf[newline_pos+1:].strip() + line = buf[newline_pos+1:].strip().decode("utf-8") return re.sub(ansiEscape, "", line) except IOError: @@ -478,14 +477,13 @@ def popupView(file, facility=None): """Opens the given file in your editor.""" if file and not popupWeb(file, facility): editor_from_env = os.getenv('EDITOR') - # pylint: disable=no-member + app = cuegui.app() if editor_from_env: job_log_cmd = editor_from_env.split() - elif QtGui.qApp.settings.contains('LogEditor'): - job_log_cmd = QtGui.qApp.settings.value("LogEditor") + elif app.settings.contains('LogEditor'): + job_log_cmd = app.settings.value("LogEditor") else: job_log_cmd = cuegui.Constants.DEFAULT_EDITOR.split() - # pylint: enable=no-member job_log_cmd.append(str(file)) checkShellOut(job_log_cmd) @@ -625,3 +623,50 @@ def shutdownThread(thread): """Shuts down a WorkerThread.""" thread.stop() return thread.wait(1500) + +def getLLU(item): + """ LLU time from log_path """ + if isProc(item): + logFile = item.data.log_path + elif isFrame(item): + logFile = item.log_path + else: + return "" + try: + statInfo = os.path.getmtime(logFile) + except Exception as e: + logger.info("not able to extract LLU: %s", e) + return None + + lluTime = time.time() - statInfo + + return lluTime + +def numFormat(num, _type): + """ format LLU time """ + if num == "" or num < .001 or num is None: + return "" + if _type == "t": + return secondsToHHMMSS(int(num)) + if _type == "f": + return "%.2f" % float(num) + +def byteConversion(amount, btype): + """ convert unit of memory size into bytes for comparing different + unit measures + + :param amount: unit of memory size + :ptype amount: float + :param btype: unit type + :ptype btype: string + :return: unit in bytes + :rtype: float + """ + n = 1 + conversionMap = {"KB": 1, "TB": 4, "GB": 3, "MB": 2} + _bytes = amount + if btype.upper() in conversionMap: + n = conversionMap[btype.upper()] + for _ in range(n): + _bytes *= 1024 + return _bytes diff --git a/cuegui/cuegui/__init__.py b/cuegui/cuegui/__init__.py index d9c80f13d..ce0dbd72e 100644 --- a/cuegui/cuegui/__init__.py +++ b/cuegui/cuegui/__init__.py @@ -11,3 +11,11 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + +"""Top level module for the CueGUI application. + +See __main__.py and Main.py for the application entrypoint. +""" + +from .App import create_app +from .App import app diff --git a/cuegui/cuegui/config/cue_resources.yaml b/cuegui/cuegui/config/cue_resources.yaml deleted file mode 100644 index d54cfcbfc..000000000 --- a/cuegui/cuegui/config/cue_resources.yaml +++ /dev/null @@ -1,25 +0,0 @@ - - -# Host Specs: -# Use this section to set the max cores and max memory based on the available -# hardware. -# These values are used by: -# - layer-properties -# - redirect plugin -# - service properties -max_cores: 32 -max_memory: 128 - - -# Redirect Plugin maximum allowed core-hour cutoff. -# Users will not be able to search for procs with frames that have been -# already used more than this many core-hours: -max_proc_hour_cutoff: 30 - -# Redirect plugin wasted cores threshold: -# When redirecting, and the target show is at or very close to subscription -# burst, killing frames will free up cores that may not be picked up by the -# target job. The plugin will warn the user if the number of potentially lost -# cores is higher that this threshold. To disable this warning, set the -# threshold to -1 -redirect_wasted_cores_threshold: 100 diff --git a/cuegui/cuegui/config/cuegui.yaml b/cuegui/cuegui/config/cuegui.yaml new file mode 100644 index 000000000..38f492cb7 --- /dev/null +++ b/cuegui/cuegui/config/cuegui.yaml @@ -0,0 +1,112 @@ +# Default CueGUI config file + +logger.format: '%(levelname)-9s %(module)-10s %(message)s' +logger.level: 'WARNING' + +# Path for static resources like images/icons. +paths.resources: './images' +# Path for various config files. +paths.config: './config' +# Path for the default application layout .ini file. If users do not have a layout stored +# in their local filesystem, the layout stored here will be copied. This value can also +# be set via the CUEGUI_DEFAULT_INI_PATH environment variable. +paths.default_ini_path: './config' +# Paths for CueGUI plugins. +paths.plugins: ['./plugins'] + +# How often the UI will refresh its contents. All values in milliseconds. +refresh.job_update_delay: 10000 +refresh.layer_update_delay: 10000 +refresh.frame_update_delay: 10000 +refresh.host_update_delay: 20000 +refresh.after_action_update_delay: 1000 +refresh.min_update_interval: 5000 + +# Log roots used by various operating systems. Used for remapping paths so logs produced on +# one platform will be accessible locally. +render_logs.root: + windows: 'S:' + mac: '/Users/shots' + darwin: '/Users/shots' + linux: '/shots' + rhel7: '/shots' +# Substrings which, when found in render logs, will cause that line to be highlighted. +render_logs.highlight.error: [ + 'error', 'aborted', 'fatal', 'failed', 'killed', 'command not found', + 'no licenses could be found', 'killMessage'] +render_logs.highlight.warning: ['warning', 'not found'] +render_logs.highlight.info: ['info:', 'rqd cmd:'] + +# File should be stored in paths.config. +style.style_sheet: 'darkpalette.qss' +style.font.family: 'Luxi Sans' +style.font.size: 10 +style.color_theme: 'plastique' +# RGB values. +style.colors.background: [ + [50, 50, 100], + [100, 100, 50], + [0, 50, 0], + [50, 30, 0], +] +style.colors.frame_state: + DEAD: [255, 0, 0] + DEPEND: [160, 32, 240] + EATEN: [150, 0, 0] + RUNNING: [200, 200, 55] + SETUP: [160, 32, 240] + SUCCEEDED: [55, 200, 55] + WAITING: [135, 207, 235] + CHECKPOINT: [61, 98, 247] + +# Default editor to use for viewing log files. +editor.windows: 'notepad' +editor.mac: 'open -t' +editor.linux: 'gview -R -m -M -U {config_path}/gvimrc +' + +resources: + # The max cores and max memory based on the available hardware. + # These values are used by: + # - layer-properties + # - redirect plugin + # - service properties + max_cores: 32 + max_memory: 128 + max_gpus: 8 + max_gpu_memory: 128 + # Redirect Plugin maximum allowed core-hour cutoff. + # Users will not be able to search for procs with frames that have been + # already used more than this many core-hours: + max_proc_hour_cutoff: 30 + # Redirect plugin wasted cores threshold: + # When redirecting, and the target show is at or very close to subscription + # burst, killing frames will free up cores that may not be picked up by the + # target job. The plugin will warn the user if the number of potentially lost + # cores is higher that this threshold. To disable this warning, set the + # threshold to -1. + redirect_wasted_cores_threshold: 100 + +links.user_guide: 'https://www.opencue.io/docs/' +links.issue.create: 'https://github.com/AcademySoftwareFoundation/OpenCue/issues/new' +# Appended to `links.issue.create`. +links.issue.suggestion: '?labels=enhancement&template=enhancement.md' +# Appended to `links.issue.create`. +links.issue.bug: '?labels=bug&template=bug_report.md' + +# List of tags to be used when viewing or editing tags. +allowed_tags: ['general', 'desktop', 'playblast', 'util', 'preprocess', 'wan', 'cuda', 'splathw', + 'naiad', 'massive'] + +email.subject_prefix: 'cuemail: please check ' +email.body_prefix: 'Your PSTs request that you check ' +email.body_suffix: "\n\n" +email.domain: '' + +# Unix epoch timestamp. If the user last viewed the startup notice before this time, the +# notice will be shown. +startup_notice.date: 0 +# Notice message. +startup_notice.msg: '' + +# Memory usage above this level will be displayed in a different color. +memory_warning_level: 5242880 diff --git a/cuegui/cuegui/images/add.png b/cuegui/cuegui/images/add.png new file mode 100755 index 000000000..783b35924 Binary files /dev/null and b/cuegui/cuegui/images/add.png differ diff --git a/cuegui/cuegui/images/crystal/icons_rcc.py b/cuegui/cuegui/images/crystal/icons_rcc.py index 0ce55b24f..d66df7306 100644 --- a/cuegui/cuegui/images/crystal/icons_rcc.py +++ b/cuegui/cuegui/images/crystal/icons_rcc.py @@ -15,13 +15,15 @@ # -*- coding: utf-8 -*- +# pylint: disable=missing-function-docstring,missing-module-docstring + # Resource object code # # Created by: The Resource Compiler for PyQt (Qt v4.6.2) # # WARNING! All changes made in this file will be lost! -from PySide2 import QtCore +from qtpy import QtCore qt_resource_data = b"\ \x00\x00\x02\xfc\ diff --git a/cuegui/cuegui/images/icons_rcc.py b/cuegui/cuegui/images/icons_rcc.py index cc9df44af..4a9664ee7 100644 --- a/cuegui/cuegui/images/icons_rcc.py +++ b/cuegui/cuegui/images/icons_rcc.py @@ -14,6 +14,8 @@ # -*- coding: utf-8 -*- +# pylint: disable=missing-function-docstring,missing-module-docstring + # Resource object code # # Created: Tue Jul 15 11:26:55 2008 @@ -26,7 +28,7 @@ from __future__ import division from __future__ import absolute_import -from PySide2 import QtCore +from qtpy import QtCore qt_resource_data = b"\ diff --git a/cuegui/cuegui/plugins/AllocationsPlugin.py b/cuegui/cuegui/plugins/AllocationsPlugin.py index b181a350b..a7d18162e 100644 --- a/cuegui/cuegui/plugins/AllocationsPlugin.py +++ b/cuegui/cuegui/plugins/AllocationsPlugin.py @@ -22,7 +22,7 @@ from builtins import map -from PySide2 import QtGui, QtWidgets +from qtpy import QtWidgets import opencue @@ -104,10 +104,7 @@ def __init__(self, parent): self.setDragEnabled(True) self.setDragDropMode(QtWidgets.QAbstractItemView.DragDrop) - # Signals are defined in code, so pylint thinks they don't exist. - # pylint: disable=no-member - QtGui.qApp.facility_changed.connect(self._update) - # pylint: enable=no-member + self.app.facility_changed.connect(self._update) self.setUpdateInterval(60) diff --git a/cuegui/cuegui/plugins/AttributesPlugin.py b/cuegui/cuegui/plugins/AttributesPlugin.py index da4693d06..c25480f85 100644 --- a/cuegui/cuegui/plugins/AttributesPlugin.py +++ b/cuegui/cuegui/plugins/AttributesPlugin.py @@ -24,9 +24,8 @@ from builtins import str import time -from PySide2 import QtGui -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue import opencue.compiled_proto.depend_pb2 @@ -89,6 +88,8 @@ class Attributes(QtWidgets.QWidget): """ def __init__(self, parent=None): QtWidgets.QWidget.__init__(self, parent) + self.app = cuegui.app() + layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) @@ -106,9 +107,7 @@ def __init__(self, parent=None): self.__scrollWidget.layout().addWidget(self.__stack) self.__scrollArea.setWidget(self.__scrollWidget) layout.addWidget(self.__scrollArea) - # pylint: disable=no-member - QtGui.qApp.single_click.connect(self.setWidget) - # pylint: enable=no-member + self.app.single_click.connect(self.setWidget) self.__load = None @@ -138,13 +137,10 @@ def setWidget(self, item): # called in a worker thread prior to the creation of the widget. # Otherwise the widget will just be created now. if hasattr(function, "preload"): - if hasattr(QtGui.qApp, "threadpool"): + if self.app.threadpool is not None: self.__load = {"item": item, "function": function} - # pylint: disable=no-member - QtGui.qApp.threadpool.queue(self.__getUpdate, - self.__processResults, - "getting data for %s" % self.__class__) - # pylint: enable=no-member + self.app.threadpool.queue( + self.__getUpdate, self.__processResults, "getting data for %s" % self.__class__) else: logger.warning("threadpool not found, doing work in gui thread") return self.__createItemAttribute(item, function, function.preload(item)) @@ -224,7 +220,7 @@ def addData(parent, value): self.addTopLevelItem(root) self.expandAll() - self.itemSelectionChanged.connect(self.itemSingleClickedCopy) + self.itemSelectionChanged.connect(self.itemSingleClickedCopy) # pylint: disable=no-member def itemSingleClickedCopy(self): """Event handler that copies the text of the selected line to the clipboard on click.""" diff --git a/cuegui/cuegui/plugins/LogViewPlugin.py b/cuegui/cuegui/plugins/LogViewPlugin.py index fb40734e4..5c36ec717 100644 --- a/cuegui/cuegui/plugins/LogViewPlugin.py +++ b/cuegui/cuegui/plugins/LogViewPlugin.py @@ -25,11 +25,13 @@ import os import re import string +import sys import time +import traceback -from PySide2 import QtGui -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtGui +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.Constants import cuegui.AbstractDockWidget @@ -99,20 +101,22 @@ def __init__(self, parent): self.document().setDefaultFont(self.font) self._line_num_area = LineNumberArea(self) + # pylint: disable=no-member self.blockCountChanged.connect(self.update_line_number_area_width) self.updateRequest.connect(self.update_line_number_area) self.cursorPositionChanged.connect(self.highlight_current_line) + # pylint: enable=no-member self.update_line_number_area_width() self.setReadOnly(True) self.setMaximumBlockCount(20000) self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu) - self.customContextMenuRequested.connect(self.context_menu) + self.customContextMenuRequested.connect(self.context_menu) # pylint: disable=no-member self.copy_action = QtWidgets.QAction('Copy', self) self.copy_action.setStatusTip('Copy Selection') self.copy_action.setShortcut('Ctrl+C') - self.copy_action.triggered[bool].connect(lambda triggered: + self.copy_action.triggered[bool].connect(lambda triggered: # pylint: disable=unsubscriptable-object self.copy_selection(QtGui.QClipboard.Clipboard)) self.addAction(self.copy_action) @@ -181,7 +185,7 @@ def get_line_number_area_width(self): while count >= 10: count /= 10 digits += 1 - space = 3 + self.fontMetrics().width('9') * digits + space = 3 + self.fontMetrics().horizontalAdvance('9') * digits return space def update_line_number_area_width(self): @@ -231,9 +235,11 @@ def highlight_current_line(self): crnt_selection = QtWidgets.QTextEdit.ExtraSelection() line_color = QtGui.QColor(QtCore.Qt.red).lighter(12) + # pylint: disable=no-member crnt_selection.format.setBackground(line_color) crnt_selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection, True) + # pylint: enable=no-member crnt_selection.cursor = self.textCursor() crnt_selection.cursor.clearSelection() self.setExtraSelections([crnt_selection]) @@ -288,17 +294,48 @@ def line_number_area_paint_event(self, event): bottom = top + self.blockBoundingRect(block).height() block_number += 1 +class LogLoadSignals(QtCore.QObject): + """Signals for the LoadLog action""" + SIG_LOG_LOAD_ERROR = QtCore.Signal(tuple) + SIG_LOG_LOAD_RESULT = QtCore.Signal(str, str) + SIG_LOG_LOAD_FINISHED = QtCore.Signal() + +class LogLoader(QtCore.QRunnable): + """A thread to load logs""" + def __init__(self, fn, *args, **kwargs): + super(LogLoader, self).__init__() + self.fn = fn + self.args = args + self.kwargs = kwargs + self.signals = LogLoadSignals() + + @QtCore.Slot() + def run(self): + # pylint: disable=bare-except + try: + content, log_mtime = self.fn(*self.args, **self.kwargs) + except: + exctype, value = sys.exc_info()[:2] + self.signals.SIG_LOG_LOAD_ERROR.emit( + (exctype, value, traceback.format_exc())) + else: + self.signals.SIG_LOG_LOAD_RESULT.emit(content, log_mtime) + finally: + self.signals.SIG_LOG_LOAD_FINISHED.emit() class LogViewWidget(QtWidgets.QWidget): - """Displays the log file for the selected frame.""" - + """ + Displays the log file for the selected frame + """ + SIG_CONTENT_UPDATED = QtCore.Signal(str, str) def __init__(self, parent=None): """ Create the UI elements """ + QtWidgets.QWidget.__init__(self, parent) + self.app = cuegui.app() # Main Widget - QtWidgets.QWidget.__init__(self, parent) layout = QtWidgets.QVBoxLayout(self) layout.setContentsMargins(0, 0, 0, 0) self._scrollArea = QtWidgets.QScrollArea() @@ -314,23 +351,23 @@ def __init__(self, parent=None): path_layout = QtWidgets.QHBoxLayout(path_widget) path_layout.setContentsMargins(0, 0, 0, 0) self._first_log_button = QtWidgets.QPushButton('<<', self) - self._first_log_button.clicked.connect( + self._first_log_button.clicked.connect( # pylint: disable=no-member lambda: self._load_other_log(float('inf'))) self._first_log_button.setEnabled(False) self._first_log_button.setToolTip('Load First Log') path_layout.addWidget(self._first_log_button) self._prev_log_button = QtWidgets.QPushButton('<', self) - self._prev_log_button.clicked.connect(lambda: self._load_other_log(1)) + self._prev_log_button.clicked.connect(lambda: self._load_other_log(1)) # pylint: disable=no-member self._prev_log_button.setEnabled(False) self._prev_log_button.setToolTip('Load Previous Log') path_layout.addWidget(self._prev_log_button) self._next_log_button = QtWidgets.QPushButton('>', self) - self._next_log_button.clicked.connect(lambda: self._load_other_log(-1)) + self._next_log_button.clicked.connect(lambda: self._load_other_log(-1)) # pylint: disable=no-member self._next_log_button.setEnabled(False) self._next_log_button.setToolTip('Load Next Log') path_layout.addWidget(self._next_log_button) self._last_log_button = QtWidgets.QPushButton('>>', self) - self._last_log_button.clicked.connect( + self._last_log_button.clicked.connect( # pylint: disable=no-member lambda: self._load_other_log(-float('inf'))) self._last_log_button.setEnabled(False) self._last_log_button.setToolTip('Load Current Log') @@ -353,7 +390,7 @@ def __init__(self, parent=None): self._word_wrap_checkbox.setFont(font) path_layout.addWidget(self._word_wrap_checkbox) self._word_wrap_checkbox.setCheckState(QtCore.Qt.Checked) - self._word_wrap_checkbox.stateChanged.connect(self._set_word_wrap) + self._word_wrap_checkbox.stateChanged.connect(self._set_word_wrap) # pylint: disable=no-member # Content content_widget = QtWidgets.QWidget(self) @@ -377,30 +414,30 @@ def __init__(self, parent=None): search_layout = QtWidgets.QHBoxLayout(search_widget) self._case_stv_checkbox = QtWidgets.QCheckBox('Aa') search_layout.addWidget(self._case_stv_checkbox) - self._case_stv_checkbox.stateChanged.connect(self._move_to_search_box) + self._case_stv_checkbox.stateChanged.connect(self._move_to_search_box) # pylint: disable=no-member self._search_box = QtWidgets.QLineEdit('', self) self._search_box.setClearButtonEnabled(True) self._search_box.setPlaceholderText('Search log...') search_layout.addWidget(self._search_box) self._search_box.show() - self._search_box.editingFinished.connect(self._find_text) + self._search_box.editingFinished.connect(self._find_text) # pylint: disable=no-member self._search_button = QtWidgets.QPushButton('Find', self) search_layout.addWidget(self._search_button) self._prev_button = QtWidgets.QPushButton('Prev') - self._prev_button.clicked.connect(self._move_to_prev_match) + self._prev_button.clicked.connect(self._move_to_prev_match) # pylint: disable=no-member self._next_button = QtWidgets.QPushButton('Next') - self._next_button.clicked.connect(self._move_to_next_match) + self._next_button.clicked.connect(self._move_to_next_match) # pylint: disable=no-member search_layout.addWidget(self._next_button) search_layout.addWidget(self._prev_button) search_refresh_button = QtWidgets.QPushButton('Refresh', self) search_layout.addWidget(search_refresh_button) - search_refresh_button.clicked.connect(self._move_to_search_box) + search_refresh_button.clicked.connect(self._move_to_search_box) # pylint: disable=no-member clear_search_button = QtWidgets.QPushButton('Clr', self) search_layout.addWidget(clear_search_button) - clear_search_button.clicked.connect(self._clear_search_text) - self._search_button.clicked.connect(self._find_text) + clear_search_button.clicked.connect(self._clear_search_text) # pylint: disable=no-member + self._search_button.clicked.connect(self._find_text) # pylint: disable=no-member matches_widget = QtWidgets.QWidget(self) matches_layout = QtWidgets.QHBoxLayout(matches_widget) @@ -424,9 +461,7 @@ def __init__(self, parent=None): pos = QtCore.QPoint(0, 0) self._highlight_cursor = self._content_box.cursorForPosition(pos) # Signals are defined in code, so pylint thinks they don't exist. - # pylint: disable=no-member - QtGui.qApp.display_log_file_content.connect(self._set_log_files) - # pylint: enable=no-member + self.app.display_log_file_content.connect(self._set_log_files) self._log_scrollbar = self._content_box.verticalScrollBar() self._log_scrollbar.valueChanged.connect(self._set_scrollbar_value) @@ -450,6 +485,9 @@ def __init__(self, parent=None): self._current_match = 0 self._content_box.mousePressedSignal.connect(self._on_mouse_pressed) + self.SIG_CONTENT_UPDATED.connect(self._update_log_content) + self.log_thread_pool = QtCore.QThreadPool() + def _on_mouse_pressed(self, pos): """ Mouse press event, to be called when the user scrolls by hand or moves @@ -699,7 +737,6 @@ def _highlight_matches(self): QtGui.QTextCursor.KeepAnchor, match[-1]) self._highlight_cursor.setCharFormat(self._format) - self._matches_to_highlight.discard(match) def _clear_search_text(self): """ @@ -721,19 +758,27 @@ def _clear_search_data(self): are also removed. """ + if not self._log_file: + return + + # find matched text to "unhighlight" red by resetting the char format + highlight = self._matches[max(self._current_match - 300, 0): + min(self._current_match + 300, len(self._matches))] + matches = list(set(highlight).intersection(self._matches_to_highlight)) + + for match in matches: + self._highlight_cursor.setPosition(match[0]) + self._highlight_cursor.movePosition(QtGui.QTextCursor.Right, + QtGui.QTextCursor.KeepAnchor, + match[-1]) + self._highlight_cursor.setCharFormat(QtGui.QTextCharFormat()) + self._highlight_cursor.clearSelection() + + # reset text matches self._matches = [] self._matches_to_highlight = set() self._search_timestamp = 0 self._matches_label.setText('') - if not self._log_file: - return - - charFormat = QtGui.QTextCharFormat() - self._highlight_cursor.setPosition(QtGui.QTextCursor.Start) - self._highlight_cursor.movePosition( - QtGui.QTextCursor.End, mode=QtGui.QTextCursor.KeepAnchor) - self._highlight_cursor.setCharFormat(charFormat) - self._highlight_cursor.clearSelection() def _set_scrollbar_value(self, val): """ @@ -778,12 +823,56 @@ def _display_log_content(self): """ try: - self._update_log() - self._new_log = False + if not os.path.exists(self._log_file): + self._log_file_exists = False + content = 'Log file does not exist: %s' % self._log_file + self._content_timestamp = time.time() + self._update_log_content(content, self._log_mtime) + else: + # Creating the load logs process as qrunnables so + # that they don't block the ui while loading + log_loader = LogLoader(self._load_log, self._log_file, + self._new_log, self._log_mtime) + log_loader.signals.SIG_LOG_LOAD_RESULT.connect( + self._receive_log_results) + log_loader.setAutoDelete(True) + self.log_thread_pool.start(log_loader) + self.log_thread_pool.waitForDone() + self._new_log = False finally: QtCore.QTimer.singleShot(5000, self._display_log_content) - def _update_log(self): + # pylint: disable=no-self-use + @QtCore.Slot() + def _load_log(self, log_file, new_log, curr_log_mtime): + content = None + log_size = int(os.stat(log_file).st_size) + if log_size > 1 * 1e6: + content = ('Log file size (%0.1f MB) exceeds the size ' + 'threshold (1.0 MB).' + % float(log_size / (1024 * 1024))) + elif not new_log and os.path.exists(log_file): + log_mtime = os.path.getmtime(log_file) + if log_mtime > curr_log_mtime: + curr_log_mtime = log_mtime # no new updates + content = '' + + if content is None: + content = '' + try: + with open(log_file, 'r') as f: + content = f.read() + except IOError: + content = 'Can not access log file: %s' % log_file + + return content, curr_log_mtime + + @QtCore.Slot() + def _receive_log_results(self, content, log_mtime): + self.SIG_CONTENT_UPDATED.emit(content, log_mtime) + + @QtCore.Slot(str, str) + def _update_log_content(self, content, log_mtime): """ Updates the content of the content box with the content of the log file, if necessary. The full path to the log file will be populated in @@ -803,51 +892,23 @@ def _update_log(self): (if necessary) """ - # Get the content of the log file - if not self._log_file: - return # There's no log file, nothing to do here! - self._path.setText(self._log_file) - content = None - if not os.path.exists(self._log_file): - self._log_file_exists = False - content = 'Log file does not exist: %s' % self._log_file - self._content_timestamp = time.time() - else: - log_size = int(os.stat(self._log_file).st_size) - if log_size > 5 * 1e6: - content = ('Log file size (%0.1f MB) exceeds the size ' - 'threshold (5.0 MB).' - % float(log_size / (1024 * 1024))) - elif not self._new_log and os.path.exists(self._log_file): - log_mtime = os.path.getmtime(self._log_file) - if log_mtime > self._log_mtime: - self._log_mtime = log_mtime # no new updates - content = '' + self._log_mtime = log_mtime - if content is None: - content = '' - try: - with open(self._log_file, 'r') as f: - content = f.read() - except IOError: - content = 'Can not access log file: %s' % self._log_file - - # Do we need to scroll to the end? - scroll_to_end = (self._scrollbar_max == self._scrollbar_value - or self._new_log) + self.app.processEvents() # Update the content in the gui (if necessary) - current_text = (self._content_box.toPlainText() or '') - new_text = content.lstrip(str(current_text)) - if new_text: - if self._new_log: - self._content_box.setPlainText(content) - else: + if self._new_log: + self._content_box.setPlainText(content) + else: + current_text = (self._content_box.toPlainText() or '') + new_text = content.lstrip(str(current_text)) + if new_text: self._content_box.appendPlainText(new_text) - self._content_timestamp = time.time() - # pylint: disable=no-member - QtGui.qApp.processEvents() - # pylint: enable=no-member + self._content_timestamp = time.time() + self._path.setText(self._log_file) + + scroll_to_end = (self._scrollbar_max == self._scrollbar_value + or self._new_log) # Adjust scrollbar value (if necessary) self._scrollbar_max = self._log_scrollbar.maximum() diff --git a/cuegui/cuegui/plugins/MonitorCuePlugin.py b/cuegui/cuegui/plugins/MonitorCuePlugin.py index 6f07b80e0..7884bfc5a 100644 --- a/cuegui/cuegui/plugins/MonitorCuePlugin.py +++ b/cuegui/cuegui/plugins/MonitorCuePlugin.py @@ -25,9 +25,9 @@ import re import weakref -from PySide2 import QtGui -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtGui +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -78,9 +78,7 @@ def __init__(self, parent): self.layout().addLayout(self.__hlayout) - # pylint: disable=no-member - self.__monitorCue.view_object.connect(QtGui.qApp.view_object.emit) - # pylint: enable=no-member + self.__monitorCue.view_object.connect(self.app.view_object.emit) self.pluginRegisterSettings([("shows", self.__monitorCue.getShowNames, @@ -98,9 +96,7 @@ def __init__(self, parent): self.addShows([os.getenv('SHOW')]) def __cueStateBarSetup(self, layout): - # pylint: disable=no-member - cueStateBarEnabled = QtGui.qApp.settings.value("CueStateBar", False) - # pylint: enable=no-member + cueStateBarEnabled = self.app.settings.value("CueStateBar", False) if cueStateBarEnabled: self.__cueStateBar = cuegui.CueStateBarWidget.CueStateBarWidget(self.__monitorCue, self) layout.addWidget(self.__cueStateBar) @@ -112,7 +108,7 @@ def __expandAllSetup(self): btn.setIcon(QtGui.QIcon(":down.png")) btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setToolTip("Expand all groups") - btn.clicked.connect(self.__monitorCue.expandAll) + btn.clicked.connect(self.__monitorCue.expandAll) # pylint: disable=no-member def __collapseAllSetup(self): """Sets up the collapse all button""" @@ -121,7 +117,7 @@ def __collapseAllSetup(self): btn.setIcon(QtGui.QIcon(":up.png")) btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setToolTip("Collapse all groups") - btn.clicked.connect(self.__monitorCue.collapseAll) + btn.clicked.connect(self.__monitorCue.collapseAll) # pylint: disable=no-member def __buttonSetup(self, layout): btn = QtWidgets.QPushButton(QtGui.QIcon(":eat.png"), "") @@ -129,35 +125,35 @@ def __buttonSetup(self, layout): btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setFlat(True) layout.addWidget(btn) - btn.clicked.connect(self.__monitorCue.actionEatSelectedItems) + btn.clicked.connect(self.__monitorCue.actionEatSelectedItems) # pylint: disable=no-member btn = QtWidgets.QPushButton(QtGui.QIcon(":retry.png"), "") btn.setToolTip("Retries all dead frames for selected jobs") btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setFlat(True) layout.addWidget(btn) - btn.clicked.connect(self.__monitorCue.actionRetrySelectedItems) + btn.clicked.connect(self.__monitorCue.actionRetrySelectedItems) # pylint: disable=no-member btn = QtWidgets.QPushButton(QtGui.QIcon(":kill.png"), "") btn.setToolTip("Kill selected jobs") btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setFlat(True) layout.addWidget(btn) - btn.clicked.connect(self.__monitorCue.actionKillSelectedItems) + btn.clicked.connect(self.__monitorCue.actionKillSelectedItems) # pylint: disable=no-member btn = QtWidgets.QPushButton(QtGui.QIcon(":pause.png"), "") btn.setToolTip("Pause selected jobs") btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setFlat(True) layout.addWidget(btn) - btn.clicked.connect(self.__monitorCue.actionPauseSelectedItems) + btn.clicked.connect(self.__monitorCue.actionPauseSelectedItems) # pylint: disable=no-member btn = QtWidgets.QPushButton(QtGui.QIcon(":unpause.png"), "") btn.setToolTip("Unpause selected jobs") btn.setFocusPolicy(QtCore.Qt.NoFocus) btn.setFlat(True) layout.addWidget(btn) - btn.clicked.connect(self.__monitorCue.actionResumeSelectedItems) + btn.clicked.connect(self.__monitorCue.actionResumeSelectedItems) # pylint: disable=no-member ################################################################################ @@ -168,17 +164,15 @@ def __showMenuSetup(self): """Sets up the show selection menu""" self.__showMenuBtn = QtWidgets.QPushButton("Shows ",self) self.__showMenuBtn.setIcon(QtGui.QIcon(":show.png")) - self.__showMenuBtn.pressed.connect(self.__showMenuCheck) + self.__showMenuBtn.pressed.connect(self.__showMenuCheck) # pylint: disable=no-member self.__toolbar.addWidget(self.__showMenuBtn) self.__showMenu = QtWidgets.QMenu(self) self.__showMenuBtn.setMenu(self.__showMenu) self.__showMenuBtn.setFocusPolicy(QtCore.Qt.NoFocus) self.__showMenu.setFont(cuegui.Constants.STANDARD_FONT) - self.__showMenu.triggered.connect(self.__showMenuHandle) - # pylint: disable=no-member - QtGui.qApp.facility_changed.connect(self.__showMenuUpdate) - # pylint: enable=no-member + self.__showMenu.triggered.connect(self.__showMenuHandle) # pylint: disable=no-member + self.app.facility_changed.connect(self.__showMenuUpdate) self.__showMenuUpdate() @@ -248,23 +242,23 @@ def __selectJobsSetup(self): select_btn = QtWidgets.QPushButton("Select:") select_btn.setFocusPolicy(QtCore.Qt.NoFocus) self.__toolbar.addWidget(select_btn) - select_btn.clicked.connect(self.__selectJobsHandle) + select_btn.clicked.connect(self.__selectJobsHandle) # pylint: disable=no-member self._selectJobsEditBox = JobSelectEditBox(self) self.__toolbar.addWidget(self._selectJobsEditBox) - self._selectJobsEditBox.returnPressed.connect(self.__selectJobsHandle) + self._selectJobsEditBox.returnPressed.connect(self.__selectJobsHandle) # pylint: disable=no-member clear_btn = QtWidgets.QPushButton("Clr") clear_btn.setFocusPolicy(QtCore.Qt.NoFocus) clear_btn.setFixedWidth(24) self.__toolbar.addWidget(clear_btn) - clear_btn.clicked.connect(self._selectJobsEditBox.actionClear) + clear_btn.clicked.connect(self._selectJobsEditBox.actionClear) # pylint: disable=no-member mine_btn = QtWidgets.QPushButton("selectMine") mine_btn.setFocusPolicy(QtCore.Qt.NoFocus) mine_btn.setFixedWidth(70) self.__toolbar.addWidget(mine_btn) - mine_btn.clicked.connect(self.__selectJobsHandleMine) + mine_btn.clicked.connect(self.__selectJobsHandleMine) # pylint: disable=no-member def __selectJobsHandle(self, value = None): """This will select all jobs that have a name that contain the substring @@ -379,7 +373,7 @@ def actionClear(self): def _actionSelect(self): """Signals that a return was pressed""" - self.returnPressed.emit() + self.returnPressed.emit() # pylint: disable=no-member def keyPressEvent(self, event): """Let the parent handle any space key presses""" diff --git a/cuegui/cuegui/plugins/MonitorHostsPlugin.py b/cuegui/cuegui/plugins/MonitorHostsPlugin.py index cee8413fe..be40d7453 100644 --- a/cuegui/cuegui/plugins/MonitorHostsPlugin.py +++ b/cuegui/cuegui/plugins/MonitorHostsPlugin.py @@ -20,8 +20,8 @@ from __future__ import division from __future__ import absolute_import -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.AbstractDockWidget import cuegui.HostMonitor diff --git a/cuegui/cuegui/plugins/MonitorJobDetailsPlugin.py b/cuegui/cuegui/plugins/MonitorJobDetailsPlugin.py index b731736c4..bb1b61d02 100644 --- a/cuegui/cuegui/plugins/MonitorJobDetailsPlugin.py +++ b/cuegui/cuegui/plugins/MonitorJobDetailsPlugin.py @@ -22,9 +22,8 @@ from __future__ import division from __future__ import absolute_import -from PySide2 import QtGui -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -64,13 +63,11 @@ def __init__(self, parent): self.__splitter.addWidget(self.__monitorLayers) self.__splitter.addWidget(self.__monitorFrames) - # pylint: disable=no-member - QtGui.qApp.view_object.connect(self.__setJob) - QtGui.qApp.unmonitor.connect(self.__unmonitor) - QtGui.qApp.facility_changed.connect(self.__setJob) - # pylint: enable=no-member + self.app.view_object.connect(self.__setJob) + self.app.unmonitor.connect(self.__unmonitor) + self.app.facility_changed.connect(self.__setJob) self.__monitorLayers.handle_filter_layers_byLayer.connect(self.handleLayerFilter) - self.__splitter.splitterMoved.connect(self.__splitterMoved) + self.__splitter.splitterMoved.connect(self.__splitterMoved) # pylint: disable=no-member self.pluginRegisterSettings([("splitterSize", self.__splitter.sizes, diff --git a/cuegui/cuegui/plugins/MonitorJobsPlugin.py b/cuegui/cuegui/plugins/MonitorJobsPlugin.py index c2f970b59..875da70db 100644 --- a/cuegui/cuegui/plugins/MonitorJobsPlugin.py +++ b/cuegui/cuegui/plugins/MonitorJobsPlugin.py @@ -22,12 +22,13 @@ from builtins import str from builtins import map +import datetime import re import weakref -from PySide2 import QtGui -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtGui +from qtpy import QtCore +from qtpy import QtWidgets import opencue @@ -45,7 +46,8 @@ PLUGIN_CATEGORY = "Cuetopia" PLUGIN_DESCRIPTION = "Monitors a list of jobs" PLUGIN_PROVIDES = "MonitorJobsDockWidget" - +JOB_RESTORE_THRESHOLD_DAYS = 3 +JOB_RESTORE_THRESHOLD_LIMIT = 200 class MonitorJobsDockWidget(cuegui.AbstractDockWidget.AbstractDockWidget): """Plugin for listing active jobs and managing them.""" @@ -67,10 +69,8 @@ def __init__(self, parent): self.layout().addWidget(self.jobMonitor) # Signals in - # pylint: disable=no-member - QtGui.qApp.view_object.connect(self.addJob) - QtGui.qApp.facility_changed.connect(self.jobMonitor.removeAllItems) - # pylint: enable=no-member + self.app.view_object.connect(self.addJob) + self.app.facility_changed.connect(self.jobMonitor.removeAllItems) # Signals out self.jobMonitor.view_object.connect(self.view_object.emit) @@ -87,9 +87,18 @@ def __init__(self, parent): ("columnWidths", self.jobMonitor.getColumnWidths, self.jobMonitor.setColumnWidths), - ("columnOrder", + ("columnOrder", self.jobMonitor.getColumnOrder, - self.jobMonitor.setColumnOrder)]) + self.jobMonitor.setColumnOrder), + ("loadFinished", + self.__loadFinishedJobsCheckBox.isChecked, + self.__loadFinishedJobsCheckBox.setChecked), + ("grpDependentCb", + self.getGrpDependent, + self.setGrpDependent), + ("autoLoadMineCb", + self.getAutoLoadMine, + self.setAutoLoadMine)]) def addJob(self, rpcObject): """Adds a job to be monitored.""" @@ -97,7 +106,7 @@ def addJob(self, rpcObject): rpcObject = cuegui.Utils.findJob(rpcObject.data.job_name) elif not cuegui.Utils.isJob(rpcObject): return - self.jobMonitor.addJob(rpcObject) + self.jobMonitor.addJob(rpcObject, loading_from_config=True) self.raise_() def getJobIds(self): @@ -105,14 +114,36 @@ def getJobIds(self): return list(map(opencue.id, self.jobMonitor.getJobProxies())) def restoreJobIds(self, jobIds): - """Monitors a list of jobs.""" - for jobId in jobIds: - try: - self.jobMonitor.addJob(jobId) - except opencue.EntityNotFoundException: - logger.warning("Unable to load previously loaded job since " - "it was moved to the historical " - "database: %s", jobId) + """Restore monitored jobs from previous saved state + Only load jobs that have a timestamp less than or equal to the time a job lives on the farm + (jobs are moved to historical database) + + :param jobIds: monitored jobs ids and their timestamp from previous working state + (loaded from config.ini file) + ex: [("Job.f156be87-987a-48b9-b9da-774cd58674a3", 1612482716.170947),... + :type jobIds: list[tuples] + """ + today = datetime.datetime.now() + limit = JOB_RESTORE_THRESHOLD_LIMIT if len(jobIds) > \ + JOB_RESTORE_THRESHOLD_LIMIT else len(jobIds) + msg = ('Unable to load previously loaded job since it was moved ' + 'to the historical database: {0}') + + try: + for jobId, timestamp in jobIds[:limit]: + loggedTime = datetime.datetime.fromtimestamp(timestamp) + if (today - loggedTime).days <= JOB_RESTORE_THRESHOLD_DAYS: + try: + self.jobMonitor.addJob(jobId, timestamp) + except opencue.EntityNotFoundException: + logger.info(msg, jobId) + except ValueError: + # load older format + for jobId in jobIds[:limit]: + try: + self.jobMonitor.addJob(jobId) + except opencue.EntityNotFoundException: + logger.info(msg, jobId) def pluginRestoreState(self, saved_settings): """Called on plugin start with any previously saved state. @@ -143,11 +174,11 @@ def _regexLoadJobsSetup(self, layout): btn = QtWidgets.QPushButton("Load:") btn.setFocusPolicy(QtCore.Qt.NoFocus) layout.addWidget(btn) - btn.clicked.connect(self._regexLoadJobsHandle) + btn.clicked.connect(self._regexLoadJobsHandle) # pylint: disable=no-member self.__regexLoadJobsEditBox = JobRegexLoadEditBox(self) layout.addWidget(self.__regexLoadJobsEditBox) - self.__regexLoadJobsEditBox.returnPressed.connect(self._regexLoadJobsHandle) + self.__regexLoadJobsEditBox.returnPressed.connect(self._regexLoadJobsHandle) # pylint: disable=no-member def _loadFinishedJobsSetup(self, layout): """Ensures that when querying jobs that finished jobs are included. @@ -156,7 +187,7 @@ def _loadFinishedJobsSetup(self, layout): @type layout: QLayout""" self.__loadFinishedJobsCheckBox = JobLoadFinishedCheckBox(self) layout.addWidget(self.__loadFinishedJobsCheckBox) - self.__loadFinishedJobsCheckBox.stateChanged.connect(self._regexLoadJobsHandle) + self.__loadFinishedJobsCheckBox.stateChanged.connect(self._regexLoadJobsHandle) # pylint: disable=no-member def _regexLoadJobsHandle(self): """This will select all jobs that have a name that contain the substring @@ -164,93 +195,120 @@ def _regexLoadJobsHandle(self): substring = str(self.__regexLoadJobsEditBox.text()).strip() load_finished_jobs = self.__loadFinishedJobsCheckBox.isChecked() - if cuegui.Utils.isStringId(substring): - # If a uuid is provided, load it - self.jobMonitor.addJob(substring) - elif load_finished_jobs or re.search( + self.jobMonitor.removeAllItems() + + if substring: + if cuegui.Utils.isStringId(substring): + # If a uuid is provided, load it + self.jobMonitor.addJob(substring) + elif load_finished_jobs or re.search( r"^([a-z0-9_]+)\-([a-z0-9\.]+)\-", substring, re.IGNORECASE): - # If show and shot is provided, or if "load finished" checkbox is checked, load all jobs - for job in opencue.api.getJobs(substr=[substring], include_finished=True): - self.jobMonitor.addJob(job) - else: - # Otherwise, just load current matching jobs - for job in opencue.api.getJobs(regex=[substring]): - self.jobMonitor.addJob(job) + # Load all ff show and shot is provided or if "load finished" checkbox is checked + for job in opencue.api.getJobs(regex=[substring], include_finished=True): + self.jobMonitor.addJob(job) + else: + # Otherwise, just load current matching jobs (except for the empty string) + for job in opencue.api.getJobs(regex=[substring]): + self.jobMonitor.addJob(job) + + def getGrpDependent(self): + """Is group dependent checked""" + return bool(self.grpDependentCb.isChecked()) + + def setGrpDependent(self, state): + """Set group dependent""" + self.grpDependentCb.setChecked(bool(state)) + + def getAutoLoadMine(self): + """Is autoload mine checked""" + return bool(self.autoLoadMineCb.isChecked()) + + def setAutoLoadMine(self, state): + """Set autoload mine""" + self.autoLoadMineCb.setChecked(bool(state)) def _buttonSetup(self, layout): clearButton = QtWidgets.QPushButton("Clr") clearButton.setFocusPolicy(QtCore.Qt.NoFocus) clearButton.setFixedWidth(24) layout.addWidget(clearButton) - clearButton.clicked.connect(self.__regexLoadJobsEditBox.actionClear) + clearButton.clicked.connect(self.__regexLoadJobsEditBox.actionClear) # pylint: disable=no-member spacer = QtWidgets.QWidget() spacer.setFixedWidth(20) layout.addWidget(spacer) - mineCheckbox = QtWidgets.QCheckBox("Autoload Mine") - mineCheckbox.setFocusPolicy(QtCore.Qt.NoFocus) - mineCheckbox.setChecked(True) - layout.addWidget(mineCheckbox) - mineCheckbox.stateChanged.connect(self.jobMonitor.setLoadMine) + self.autoLoadMineCb = QtWidgets.QCheckBox("Autoload Mine") + self.autoLoadMineCb.setFocusPolicy(QtCore.Qt.NoFocus) + self.autoLoadMineCb.setChecked(True) + layout.addWidget(self.autoLoadMineCb) + self.autoLoadMineCb.stateChanged.connect(self.jobMonitor.setLoadMine) # pylint: disable=no-member self._loadFinishedJobsSetup(self.__toolbar) + self.grpDependentCb = QtWidgets.QCheckBox("Group Dependent") + self.grpDependentCb.setFocusPolicy(QtCore.Qt.NoFocus) + self.grpDependentCb.setChecked(True) + layout.addWidget(self.grpDependentCb) + # pylint: disable=no-member + self.grpDependentCb.stateChanged.connect(self.jobMonitor.setGroupDependent) + # pylint: enable=no-member + finishedButton = QtWidgets.QPushButton(QtGui.QIcon(":eject.png"), "Finished") finishedButton.setToolTip("Unmonitor finished jobs") finishedButton.setFocusPolicy(QtCore.Qt.NoFocus) finishedButton.setFlat(True) layout.addWidget(finishedButton) - finishedButton.clicked.connect(self.jobMonitor.removeFinishedItems) + finishedButton.clicked.connect(self.jobMonitor.removeFinishedItems) # pylint: disable=no-member allButton = QtWidgets.QPushButton(QtGui.QIcon(":eject.png"), "All") allButton.setToolTip("Unmonitor all jobs") allButton.setFocusPolicy(QtCore.Qt.NoFocus) allButton.setFlat(True) layout.addWidget(allButton) - allButton.clicked.connect(self.jobMonitor.removeAllItems) + allButton.clicked.connect(self.jobMonitor.removeAllItems) # pylint: disable=no-member removeSelectedButton = QtWidgets.QPushButton(QtGui.QIcon(":eject.png"), "") removeSelectedButton.setToolTip("Unmonitor selected jobs") removeSelectedButton.setFocusPolicy(QtCore.Qt.NoFocus) removeSelectedButton.setFlat(True) layout.addWidget(removeSelectedButton) - removeSelectedButton.clicked.connect(self.jobMonitor.actionRemoveSelectedItems) + removeSelectedButton.clicked.connect(self.jobMonitor.actionRemoveSelectedItems) # pylint: disable=no-member eatSelectedButton = QtWidgets.QPushButton(QtGui.QIcon(":eat.png"), "") eatSelectedButton.setToolTip("Eats all dead frames for selected jobs") eatSelectedButton.setFocusPolicy(QtCore.Qt.NoFocus) eatSelectedButton.setFlat(True) layout.addWidget(eatSelectedButton) - eatSelectedButton.clicked.connect(self.jobMonitor.actionEatSelectedItems) + eatSelectedButton.clicked.connect(self.jobMonitor.actionEatSelectedItems) # pylint: disable=no-member retryButton = QtWidgets.QPushButton(QtGui.QIcon(":retry.png"), "") retryButton.setToolTip("Retries all dead frames for selected jobs") retryButton.setFocusPolicy(QtCore.Qt.NoFocus) retryButton.setFlat(True) layout.addWidget(retryButton) - retryButton.clicked.connect(self.jobMonitor.actionRetrySelectedItems) + retryButton.clicked.connect(self.jobMonitor.actionRetrySelectedItems) # pylint: disable=no-member killButton = QtWidgets.QPushButton(QtGui.QIcon(":kill.png"), "") killButton.setToolTip("Kill selected jobs") killButton.setFocusPolicy(QtCore.Qt.NoFocus) killButton.setFlat(True) layout.addWidget(killButton) - killButton.clicked.connect(self.jobMonitor.actionKillSelectedItems) + killButton.clicked.connect(self.jobMonitor.actionKillSelectedItems) # pylint: disable=no-member pauseButton = QtWidgets.QPushButton(QtGui.QIcon(":pause.png"), "") pauseButton.setToolTip("Pause selected jobs") pauseButton.setFocusPolicy(QtCore.Qt.NoFocus) pauseButton.setFlat(True) layout.addWidget(pauseButton) - pauseButton.clicked.connect(self.jobMonitor.actionPauseSelectedItems) + pauseButton.clicked.connect(self.jobMonitor.actionPauseSelectedItems) # pylint: disable=no-member unpauseButton = QtWidgets.QPushButton(QtGui.QIcon(":unpause.png"), "") unpauseButton.setToolTip("Unpause selected jobs") unpauseButton.setFocusPolicy(QtCore.Qt.NoFocus) unpauseButton.setFlat(True) layout.addWidget(unpauseButton) - unpauseButton.clicked.connect(self.jobMonitor.actionResumeSelectedItems) + unpauseButton.clicked.connect(self.jobMonitor.actionResumeSelectedItems) # pylint: disable=no-member class JobLoadFinishedCheckBox(QtWidgets.QCheckBox): @@ -312,7 +370,7 @@ def actionClear(self): self.setText("") def _actionLoad(self): - self.returnPressed.emit() + self.returnPressed.emit() # pylint: disable=no-member def toggleReadOnly(self): """Toggles the textbox readonly setting.""" diff --git a/cuegui/cuegui/plugins/ServicePlugin.py b/cuegui/cuegui/plugins/ServicePlugin.py index cc2a99bef..009ed450b 100644 --- a/cuegui/cuegui/plugins/ServicePlugin.py +++ b/cuegui/cuegui/plugins/ServicePlugin.py @@ -20,8 +20,6 @@ from __future__ import division from __future__ import absolute_import -from PySide2 import QtGui - import cuegui.AbstractDockWidget import cuegui.ServiceDialog @@ -41,6 +39,4 @@ def __init__(self, parent): self.setWindowTitle("Facility Service Defaults") self.__serviceManager = cuegui.ServiceDialog.ServiceManager(None, self) self.layout().addWidget(self.__serviceManager) - # pylint: disable=no-member - QtGui.qApp.facility_changed.connect(self.__serviceManager.refresh) - # pylint: enable=no-member + self.app.facility_changed.connect(self.__serviceManager.refresh) diff --git a/cuegui/cuegui/plugins/ShowsPlugin.py b/cuegui/cuegui/plugins/ShowsPlugin.py index 89ef37754..9d292a5a8 100644 --- a/cuegui/cuegui/plugins/ShowsPlugin.py +++ b/cuegui/cuegui/plugins/ShowsPlugin.py @@ -20,7 +20,10 @@ from __future__ import division from __future__ import absolute_import +from qtpy import QtWidgets + import cuegui.AbstractDockWidget +import cuegui.CreateShowDialog import cuegui.ShowsWidget @@ -38,7 +41,11 @@ def __init__(self, parent): super(ShowsDockWidget, self).__init__(parent, PLUGIN_NAME) self.__showsWidget = cuegui.ShowsWidget.ShowsWidget(self) + self.__createShowButton = QtWidgets.QPushButton("Create Show") + self.__createShowButton.setFixedWidth(150) + self.__createShowButton.clicked.connect(self.onCreateShowClicked) # pylint: disable=no-member + self.layout().addWidget(self.__createShowButton) self.layout().addWidget(self.__showsWidget) self.pluginRegisterSettings([("columnVisibility", @@ -47,3 +54,8 @@ def __init__(self, parent): ("columnOrder", self.__showsWidget.getColumnOrder, self.__showsWidget.setColumnOrder)]) + + def onCreateShowClicked(self): + """Show the dialog for creating new shows""" + d = cuegui.CreateShowDialog.CreateShowDialog(self) + d.exec_() diff --git a/cuegui/cuegui/plugins/StuckFramePlugin.py b/cuegui/cuegui/plugins/StuckFramePlugin.py new file mode 100644 index 000000000..a44c90196 --- /dev/null +++ b/cuegui/cuegui/plugins/StuckFramePlugin.py @@ -0,0 +1,1839 @@ +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Plugin for managing stuck frames.""" + + +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import + +from builtins import str +from builtins import map +import datetime +import re +import os +from datetime import datetime +import time +import socket +import signal +import yaml + +from qtpy import QtGui +from qtpy import QtCore +from qtpy import QtWidgets + +import opencue.wrappers.frame + +import cuegui.AbstractDockWidget +import cuegui.AbstractTreeWidget +import cuegui.AbstractWidgetItem +import cuegui.Action +import cuegui.Constants +import cuegui.JobMonitorTree +import cuegui.Logger +import cuegui.MenuActions +import cuegui.Style +import cuegui.Utils + +logger = cuegui.Logger.getLogger(__file__) + +PLUGIN_NAME = "Stuck Frame" +PLUGIN_CATEGORY = "Cuecommander" +PLUGIN_DESCRIPTION = "Work with stuck frames." +PLUGIN_REQUIRES = "CueCommander" +PLUGIN_PROVIDES = "StuckWidget" +CUE_SLEEP = 30 + +NAME_COLUMN = 0 +COMMENT_COLUMN = 1 +FRAME_COLUMN = 2 +LLU_COLUMN = 3 +RUNTIME_COLUMN = 4 +LASTLINE_COLUMN = 7 + + +class StuckWidget(cuegui.AbstractDockWidget.AbstractDockWidget): + """This builds what is displayed on the dock widget""" + + def __init__(self, parent): + cuegui.AbstractDockWidget.AbstractDockWidget.__init__(self, parent, PLUGIN_NAME) + self.__stuckWidget = StuckFrameWidget(self) + self.layout().addWidget(self.__stuckWidget) + + def pluginSaveState(self): + """Saves current state of the plugin and returns it as dict""" + filters = self.__stuckWidget.getControls().getFilters() + save = {} + for frame_filter in filters: + save[frame_filter.getServiceBoxText()] = [frame_filter.getRegexText(), + frame_filter.getTime(), + frame_filter.getMinLLu(), + frame_filter.getAvgCompTime(), + frame_filter.getRunTime(), + frame_filter.getEnabled().isChecked()] + return save + + def pluginRestoreState(self, saved_settings): + """Restores state based on the saved settings.""" + if saved_settings: + if len(saved_settings) > 1: + current_settings = saved_settings["All Other Types"] + frame_filter = self.__stuckWidget.getControls().getFilters()[0] + frame_filter.getServiceBox().setText("All Other Types") + frame_filter.getRegex().setText(current_settings[0]) + frame_filter.getEnabled().setChecked(current_settings[5]) + frame_filter.getLLUFilter().setValue(current_settings[2]) + frame_filter.getPercentFilter().setValue(current_settings[1]) + frame_filter.getCompletionFilter().setValue(current_settings[3]) + frame_filter.getRunFilter().setValue(current_settings[4]) + top_filter = self.__stuckWidget.getControls().getFilters()[0] + else: + settings_text = "All (Click + to Add Specific Filter)" + current_settings = saved_settings[settings_text] + frame_filter = self.__stuckWidget.getControls().getFilters()[0] + frame_filter.getServiceBox().setText(settings_text) + frame_filter.getRegex().setText(current_settings[0]) + frame_filter.getEnabled().setChecked(current_settings[5]) + frame_filter.getLLUFilter().setValue(current_settings[2]) + frame_filter.getPercentFilter().setValue(current_settings[1]) + frame_filter.getCompletionFilter().setValue(current_settings[3]) + frame_filter.getRunFilter().setValue(current_settings[4]) + return + + for frame_filter in saved_settings.keys(): + if (not frame_filter == "All Other Types" and + not frame_filter == "All (Click + to Add Specific Filter)"): + current_settings = saved_settings[frame_filter] + new_filter = top_filter.addFilter() + new_filter.getServiceBox().setText(frame_filter) + new_filter.getRegex().setText(current_settings[0]) + new_filter.getEnabled().setChecked(current_settings[5]) + new_filter.getLLUFilter().setValue(current_settings[2]) + new_filter.getPercentFilter().setValue(current_settings[1]) + new_filter.getCompletionFilter().setValue(current_settings[3]) + new_filter.getRunFilter().setValue(current_settings[4]) + return + + +class ShowCombo(QtWidgets.QComboBox): + """Combobox with show names""" + + def __init__(self, selected="pipe", parent=None): + QtWidgets.QComboBox.__init__(self, parent) + self.refresh() + self.setCurrentIndex(self.findText(selected)) + + def refresh(self): + """Refreshes the show list.""" + self.clear() + shows = opencue.api.getActiveShows() + shows.sort() + + for show in shows: + self.addItem(show.data.name, show) + + def getShow(self): + """Returns show name.""" + return str(self.setCurrentText()) + + +class StuckFrameControls(QtWidgets.QWidget): + """ + A widget that contains all search options for stuck frames + """ + + def __init__(self, parent=None): + QtWidgets.QWidget.__init__(self, parent) + + self.__current_show = opencue.api.findShow(os.getenv("SHOW", "pipe")) + self.__show_combo = ShowCombo(self.__current_show.data.name, self) + self.__show_label = QtWidgets.QLabel("Show:", self) + self.__show_label.setToolTip("The show you want to find stuck frames for.") + + self.__clear_btn = QtWidgets.QPushButton("Clear") + self.__clear_btn.setFocusPolicy(QtCore.Qt.NoFocus) + self.__clear_btn.setMaximumWidth(150) + self.__clear_btn.setMinimumWidth(150) + + self.__search_btn = QtWidgets.QPushButton("Refresh", self) + self.__search_btn.setMinimumWidth(150) + self.__search_btn.setMaximumWidth(150) + self.__search_btn.setFocusPolicy(QtCore.Qt.NoFocus) + + self.__auto_refresh_btn = QtWidgets.QCheckBox("Auto-refresh", self) + self.__auto_refresh_btn.setToolTip("""Automatically get a new set of + frames approximately every 30 minutes.""") + self.__notification_btn = QtWidgets.QCheckBox("Notification", self) + self.__notification_btn.setEnabled(False) + self.__notification_btn.setToolTip("Get a notification when an auto-refresh has completed.") + + self.__progress = QtWidgets.QProgressBar(self) + self.__progress.setRange(0, 1000) + self.__progress.setMaximumWidth(150) + self.__progress.setMinimumWidth(150) + self.__progress.setMinimumHeight(20) + self.__progress.setMaximumHeight(20) + self.__progress.setFocusPolicy(QtCore.Qt.NoFocus) + + self.__progressLabel = QtWidgets.QLabel(self) + self.__progressLabel.setText("Progress: ") + + self.__group_filters = QtWidgets.QGroupBox("Search Filters") + + controls = QtWidgets.QHBoxLayout() + controls.addSpacing(10) + controls.addWidget(self.__show_label) + controls.addWidget(self.__show_combo) + controls.addWidget(self.__search_btn) + controls.addWidget(self.__clear_btn) + controls.addWidget(self.__auto_refresh_btn) + controls.addWidget(self.__notification_btn) + controls.addStretch() + controls.addWidget(self.__progressLabel) + controls.addWidget(self.__progress) + controls.addSpacing(10) + + self.__service_label = QtWidgets.QLabel("Layer Service", self) + self.__service_label.setToolTip("Apply filters to only this service.") + + self.__percent_label = QtWidgets.QLabel("% of Run Since LLU", self) + self.__percent_label.setToolTip("Percentage of the frame's running time spent" + + " with the same last log update.") + + self.__llu_label = QtWidgets.QLabel("Min LLU", self) + self.__llu_label.setToolTip("Only show frames whose last log update is more " + + "than this many minutes ago.") + + self.__completion_label = QtWidgets.QLabel("% of Average Completion Time ", self) + self.__completion_label.setToolTip(""" + Only show frames who are running at this percentage of + the average completion time for the same layer. If there is no + average yet, all frames will qualify. + """) + + self.__run_label = QtWidgets.QLabel("Total Runtime", self) + self.__run_label.setToolTip("Only show frames running for this long") + + self.__exclude_label = QtWidgets.QLabel("Exclude Keywords", self) + self.__exclude_label.setToolTip("Keywords to exclude certain layers or jobs. " + + "Separate by commas.") + + self.__enable_label = QtWidgets.QLabel("Enable", self) + self.__enable_label.setToolTip("Uncheck to disable a filter") + + self.__remove_btn = QtWidgets.QPushButton(QtGui.QIcon(":up.png"), "") + self.__remove_btn.setToolTip("Remove Filter") + self.__remove_btn.setFocusPolicy(QtCore.Qt.NoFocus) + self.__remove_btn.setFlat(True) + + self.__labels = QtWidgets.QHBoxLayout() + self.__labels.addSpacing(30) + self.__labels.addWidget(self.__service_label) + self.__labels.addSpacing(90) + self.__labels.addWidget(self.__exclude_label) + self.__labels.addSpacing(10) + self.__labels.addWidget(self.__percent_label) + self.__labels.addWidget(self.__llu_label) + self.__labels.addSpacing(30) + self.__labels.addWidget(self.__completion_label) + self.__labels.addWidget(self.__run_label) + self.__labels.addWidget(self.__enable_label) + self.__labels.addWidget(self.__remove_btn) + self.__labels.addStretch() + + filters = StuckFrameBar(True, self) + self.__all_filters = [filters] + self.showing = True + + filters3 = QtWidgets.QVBoxLayout() + self.filters4 = QtWidgets.QVBoxLayout() + self.filters4.addLayout(self.__labels) + self.filters4.addWidget(filters) + self.filters4.setSpacing(0) + + filters3.addLayout(self.filters4) + + self.__group_filters.setLayout(filters3) + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(self.__group_filters) + layout.addLayout(controls) + + self.connect(self.__show_combo, + QtCore.SIGNAL("currentIndexChanged(QString)"), + self.showChanged) + + self.connect(self.__remove_btn, + QtCore.SIGNAL("clicked()"), + self.hideButtonRequest) + + def addFilter(self): + """Adds new filter.""" + newFilter = StuckFrameBar(self) + self.__all_filters.append(newFilter) + self.filters4.addWidget(newFilter) + + def showChanged(self, show): + """Sets current show the one provided.""" + self.__current_show = opencue.api.findShow(str(show)) + + def getFilterBar(self): + """Returns filter bar.""" + return self.filters4 + + def getAllFilters(self): + """Returns all filters.""" + return self.__all_filters + + def hideButtonRequest(self): + """If filters are showed, hides all filters except the first and sets the remove + button icon as downward facing arrow. Otherwise, shows all filters.""" + if self.showing: + self.showing = False + for frame_filter in self.__all_filters: + if not frame_filter.isFirst(): + frame_filter.hide() + self.__remove_btn.setIcon(QtGui.QIcon(":down.png")) + else: + self.openAll() + + def openAll(self): + """Shows all filters and sets the remove button icon as upward facing arrow.""" + self.showing = True + for frame_filter in self.__all_filters: + frame_filter.show() + self.__remove_btn.setIcon(QtGui.QIcon(":up.png")) + + def getRegexString(self): + """Returns regex string.""" + return str(self.__exclude_regex.text()).strip() + + def getSearchButton(self): + """Returns search button.""" + return self.__search_btn + + def getProgress(self): + """Returns progress bar.""" + return self.__progress + + def getClearButton(self): + """Returns clear button.""" + return self.__clear_btn + + def getAutoRefresh(self): + """Returns auto refresh button.""" + return self.__auto_refresh_btn + + def getNotification(self): + """Returns notification button.""" + return self.__notification_btn + + def getShow(self): + """Returns current show.""" + return self.__current_show + + def getFilters(self): + """Returns all filters""" + return self.__all_filters + + def add(self): + """Adds new filter""" + # TODO: check if this is the correct implementation + return self.__all_filters + + +class StuckFrameBar(QtWidgets.QWidget): + """Bar with filters""" + + def __init__(self, first, parent=None): + self.defaults = {'preprocess': [1, 1, 115, 10], 'nuke': [50, 5, 115, 10], + 'arnold': [50, 60, 115, 120]} + + QtWidgets.QWidget.__init__(self, parent) + + self.__percent_spin = QtWidgets.QSpinBox(self) + self.__percent_spin.setRange(1, 100) + self.__percent_spin.setValue(50) + self.__percent_spin.setMaximumWidth(100) + self.__percent_spin.setMinimumWidth(100) + self.__percent_spin.setSuffix("%") + self.__percent_spin.setAlignment(QtCore.Qt.AlignRight) + + self.__run_sping = QtWidgets.QSpinBox(self) + self.__run_sping.setRange(1, 50000) + self.__run_sping.setValue(60) + self.__run_sping.setSuffix("min") + self.__run_sping.setAlignment(QtCore.Qt.AlignRight) + + self.__llu_spin = QtWidgets.QSpinBox(self) + self.__llu_spin.setRange(1, 50000) + self.__llu_spin.setValue(30) + self.__llu_spin.setSuffix(" min") + self.__llu_spin.setAlignment(QtCore.Qt.AlignRight) + + self.__completion_spin = QtWidgets.QSpinBox(self) + self.__completion_spin.setRange(1, 50000) + self.__completion_spin.setValue(115) + self.__completion_spin.setMaximumWidth(175) + self.__completion_spin.setMinimumWidth(175) + self.__completion_spin.setSuffix("%") + self.__completion_spin.setAlignment(QtCore.Qt.AlignRight) + + self.__exclude_regex = QtWidgets.QLineEdit(self) + self.__exclude_regex.setMaximumWidth(150) + self.__exclude_regex.setMinimumWidth(150) + + self.__service_type = ServiceBox(self) + self.__service_type.setMaximumWidth(200) + self.__service_type.setMinimumWidth(200) + self.__service_type.setTextMargins(5, 0, 0, 0) + + self.__enable = QtWidgets.QCheckBox(self) + self.__enable.setChecked(True) + + self.__filters = QtWidgets.QHBoxLayout(self) + self.__filters.addWidget(self.__service_type) + self.__filters.addWidget(self.__exclude_regex) + self.__filters.addWidget(self.__percent_spin) + self.__filters.addSpacing(30) + self.__filters.addWidget(self.__llu_spin) + self.__filters.addWidget(self.__completion_spin) + self.__filters.addSpacing(35) + self.__filters.addWidget(self.__run_sping) + self.__filters.addSpacing(25) + self.__filters.addWidget(self.__enable) + + if not first: + self.__remove_btn = QtWidgets.QPushButton(QtGui.QIcon(":kill.png"), "") + self.__remove_btn.setToolTip("Remove Filter") + self.__remove_btn.setFocusPolicy(QtCore.Qt.NoFocus) + self.__remove_btn.setFlat(True) + + self.connect(self.__remove_btn, + QtCore.SIGNAL("clicked()"), + self.removeFilter) + self.__filters.addWidget(self.__remove_btn) + self.__isFirst = False + else: + self.__service_type.setText("All (Click + to Add Specific Filter)") + self.__service_type.setReadOnly(True) + self.__add_btn = QtWidgets.QPushButton(QtGui.QIcon('%s/add.png' % + cuegui.Constants.RESOURCE_PATH), "") + self.__add_btn.setToolTip("Add Filter") + self.__add_btn.setFocusPolicy(QtCore.Qt.NoFocus) + self.__add_btn.setFlat(True) + + self.connect(self.__add_btn, + QtCore.SIGNAL("clicked()"), + self.addFilter) + self.__filters.addWidget(self.__add_btn) + self.__isFirst = True + + self.__filters.addStretch() + + def getServiceBox(self): + """Returns service box.""" + return self.__service_type + + def getRegex(self): + """Returns regex.""" + return self.__exclude_regex + + def getServiceBoxText(self): + """Returns service box text.""" + return str(self.__service_type.text()).strip() + + def getRegexText(self): + """Returns regex text.""" + return str(self.__exclude_regex.text()).strip() + + def getEnabled(self): + """Returns enable checkbox.""" + return self.__enable + + def removeFilter(self): + """Removes filter.""" + self.parent().parent().getFilterBar().removeWidget(self) + self.parent().parent().getAllFilters().remove(self) + if len(self.parent().parent().getAllFilters()) == 1: + self.parent().parent().getAllFilters()[0]\ + .getServiceBox().setText("All (Click + to Add Specific Filter)") + self.hide() + + def addFilter(self): + """Adds new filter.""" + newFilter = StuckFrameBar(False, self.parent()) + self.parent().parent().getFilterBar().addWidget(newFilter) + self.parent().parent().getAllFilters().append(newFilter) + self.parent().parent().getAllFilters()[0].getServiceBox().setText("All Other Types") + self.parent().parent().openAll() + self.parent().parent().parent().addConnections(newFilter) + return newFilter + + def isFirst(self): + """Returns true if first.""" + return self.__isFirst + + def getFilters(self): + """Returns filters.""" + return self.__filters + + def getTime(self): + """Returns time value as int""" + return int(self.__percent_spin.value()) + + def getMinLLu(self): + """Returns min LLU.""" + return int(self.__llu_spin.value()) + + def getAvgCompTime(self): + """Returns average completion time as int.""" + return int(self.__completion_spin.value()) + + def getMVTime(self): + """Returns MV time as int.""" + return int(self.__mkvid_spin.value()) + + def getPrepTime(self): + """Returns preparation time as int.""" + return int(self.__prep_spin.value()) + + def getLLUFilter(self): + """Returns LLU filter.""" + return self.__llu_spin + + def getPercentFilter(self): + """Returns percent filter.""" + return self.__percent_spin + + def getCompletionFilter(self): + """Return completion filter.""" + return self.__completion_spin + + def getRunFilter(self): + """Returns run filter.""" + return self.__run_sping + + def getRunTime(self): + """Returns run time as int.""" + return int(self.__run_sping.value()) + + def enable(self): + """Enables filters.""" + self.__percent_spin.setEnabled(not self.__percent_spin.isEnabled()) + self.__run_sping.setEnabled(not self.__run_sping.isEnabled()) + self.__llu_spin.setEnabled(not self.__llu_spin.isEnabled()) + self.__completion_spin.setEnabled(not self.__completion_spin.isEnabled()) + self.__exclude_regex.setEnabled(not self.__exclude_regex.isEnabled()) + self.__service_type.setEnabled(not self.__service_type.isEnabled()) + + def checkForDefaults(self): + """If service is in defaults, the filter values will be set to the service.""" + service = str(self.__service_type.text()).strip() + if service in self.defaults: + self.__percent_spin.setValue(self.defaults[service][0]) + self.__llu_spin.setValue(self.defaults[service][1]) + self.__completion_spin.setValue(self.defaults[service][2]) + self.__run_sping.setValue(self.defaults[service][3]) + + +class ServiceBox(QtWidgets.QLineEdit): + """ + A text box that auto-completes job names. + """ + + def __init__(self, parent=None): + QtWidgets.QLineEdit.__init__(self, parent) + self.__c = None + self.refresh() + + def refresh(self): + """Refreshes the show list.""" + slist = opencue.api.getDefaultServices() + slist.sort() + self.__c = QtWidgets.QCompleter(slist, self) + self.__c.setCaseSensitivity(QtCore.Qt.CaseInsensitive) + self.setCompleter(self.__c) + + +class StuckFrameWidget(QtWidgets.QWidget): + """ + Displays controls for finding stuck frames and a tree of the findings. + """ + + def __init__(self, parent): + QtWidgets.QWidget.__init__(self, parent) + + self.controls = StuckFrameControls(self) + self.tree = StuckFrameMonitorTree(self) + + layout = QtWidgets.QVBoxLayout(self) + layout.addWidget(self.controls) + layout.addWidget(self.tree) + + if self.tree.enableRefresh: + self.controls.getAutoRefresh().setCheckState(QtCore.Qt.Checked) + + self.connect(self.controls.getAutoRefresh(), + QtCore.SIGNAL('stateChanged(int)'), + self.__refreshToggleCheckBoxHandle) + + self.connect(self.controls.getNotification(), + QtCore.SIGNAL('stateChanged(int)'), + self.__refreshNotificationCheckBoxHandle) + + self.connect(self.tree, + QtCore.SIGNAL("updated()"), + self._refreshButtonDisableHandle) + + self.connect(self.controls.getSearchButton(), + QtCore.SIGNAL("clicked()"), + self.updateRequest) + filters = self.controls.getFilters() + for frame_filter in filters: + self.addConnections(frame_filter) + + self.connect(self.controls.getClearButton(), + QtCore.SIGNAL("clicked()"), + self.clearButtonRequest) + + def addConnections(self, frame_filter): + """Connects to the widget based on the filter provided""" + self.connect(frame_filter.getLLUFilter(), + QtCore.SIGNAL("valueChanged(int)"), + self.updateFilters) + + self.connect(frame_filter.getPercentFilter(), + QtCore.SIGNAL("valueChanged(int)"), + self.updateFilters) + + self.connect(frame_filter.getCompletionFilter(), + QtCore.SIGNAL("valueChanged(int)"), + self.updateFilters) + + self.connect(frame_filter.getRunFilter(), + QtCore.SIGNAL("valueChanged(int)"), + self.updateFilters) + + self.connect(frame_filter.getRegex(), + QtCore.SIGNAL("textChanged(QString)"), + self.updateFilters) + + self.connect(frame_filter.getServiceBox(), + QtCore.SIGNAL("textChanged(QString)"), + self.updateFilters) + + self.connect(frame_filter.getEnabled(), + QtCore.SIGNAL("stateChanged(int)"), + self.updateFilters) + + self.connect(frame_filter.getEnabled(), + QtCore.SIGNAL("stateChanged(int)"), + frame_filter.enable) + + self.connect(frame_filter.getServiceBox(), + QtCore.SIGNAL("textChanged(QString)"), + frame_filter.checkForDefaults) + + def __refreshToggleCheckBoxHandle(self, state): + self.tree.enableRefresh = bool(state) + self.controls.getNotification().setEnabled(bool(state)) + + def __refreshNotificationCheckBoxHandle(self, state): + self.tree.enableNotification = bool(state) + + def _refreshButtonEnableHandle(self): + self.controls.getSearchButton().setEnabled(True) + + def _refreshButtonDisableHandle(self): + self.controls.getSearchButton().setEnabled(False) + QtCore.QTimer.singleShot(5000, self._refreshButtonEnableHandle) + + def updateRequest(self): + """Updates filter list with only enabled filters and then updates the tree widget.""" + allFilters = {} + filters = self.controls.getFilters() + for frame_filter in filters: + if frame_filter.getEnabled().isChecked(): + allFilters[frame_filter.getServiceBoxText()] = [frame_filter.getRegexText(), + frame_filter.getTime(), + frame_filter.getMinLLu(), + frame_filter.getAvgCompTime(), + frame_filter.getRunTime()] + + self.tree.updateFilters(allFilters, self.controls.getShow()) + self.tree.setCompleteRefresh(True) + self.tree.updateRequest() + + def updateFilters(self): + """Updates filter list with only enabled filters.""" + allFilters = {} + filters = self.controls.getFilters() + for frame_filter in filters: + if frame_filter.getEnabled().isChecked(): + allFilters[frame_filter.getServiceBoxText()] = [ + frame_filter.getRegexText(), + frame_filter.getTime(), + frame_filter.getMinLLu(), + frame_filter.getAvgCompTime(), + frame_filter.getRunTime() + ] + self.tree.updateFilters(allFilters, self.controls.getShow()) + + def clearButtonRequest(self): + """Clears tree widget.""" + self.tree.clearItems() + self.tree.enableRefresh = False + self.controls.getAutoRefresh().setCheckState(QtCore.Qt.Unchecked) + + def getControls(self): + """Returns controls.""" + return self.controls + + +class StuckFrameMonitorTree(cuegui.AbstractTreeWidget.AbstractTreeWidget): + """Tree widget with stuck frames""" + + _updateProgress = QtCore.Signal(int) + _updateProgressMax = QtCore.Signal(int) + _itemSingleClickedComment = QtCore.Signal(QtWidgets.QTreeWidgetItem, int) + + def __init__(self, parent): + self.parent = parent + self.startColumnsForType(cuegui.Constants.TYPE_FRAME) + self.addColumn("Name", 300, id=1, + data=lambda item: (item.data.name or ""), + tip="The job name.") + self.addColumn("_Comment", 20, id=2, + tip="A comment icon will appear if a job has a comment. You\n" + "may click on it to view the comments.") + self.addColumn("Frame", 40, id=3, + data=lambda item: (item.number or ""), + tip="Frame number") + self.addColumn("Host", 120, id=4, + data=lambda item: (item.lastResource or ""), + tip="Host the frame is currently running on") + self.addColumn("LLU", 60, id=5, + data=lambda item: (self.numFormat(item.lastLogUpdate, "t") or ""), + tip="Last Log Update") + self.addColumn("Runtime", 60, id=6, + data=lambda item: (self.numFormat(item.timeRunning, "t") or ""), + tip="Length the Frame has been running") + self.addColumn("% Stuck", 50, id=7, + data=lambda item: (self.numFormat(item.stuckness, "f") or ""), + tip="Percent of frame's total runtime that the log has not been updated") + self.addColumn("Average", 60, id=8, + data=lambda item: (self.numFormat(item.averageFrameTime, "t") or ""), + tip="Average time for a frame of this type to complete") + self.addColumn("Last Line", 250, id=9, + data=lambda item: (cuegui.Utils.getLastLine(item.log_path) or ""), + tip="The last line of a running frame's log file.") + + self.startColumnsForType(cuegui.Constants.TYPE_GROUP) + self.addColumn("", 0, id=1, + data=lambda group: (group.data.name), sort=lambda group: (group.data.name)) + self.addColumn("", 0, id=2) + self.addColumn("", 0, id=3) + self.addColumn("", 0, id=4) + self.addColumn("", 0, id=5) + self.addColumn("", 0, id=6) + self.addColumn("", 0, id=7) + self.addColumn("", 0, id=8) + self.addColumn("", 0, id=9) + + cuegui.AbstractTreeWidget.AbstractTreeWidget.__init__(self, parent) + self.procSearch = opencue.search.ProcSearch() + + # Used to build right click context menus + self.__menuActions = cuegui.MenuActions.MenuActions(self, self.updateSoon, + self.selectedObjects, self.getJob) + + self.setDropIndicatorShown(True) + self.setDragEnabled(True) + + self.layer_cache = {} + self.jobs_created = {} + self.groups_created = {} + self.currentHosts = [] + + # Bring Up a comment if it exists + self._itemSingleClickedComment.connect(self.__itemSingleClickedComment) + # Set progress bar current value + self._updateProgress.connect(self.updateProgress) + + # Set total number of procs for the progress bar max + self._updateProgressMax.connect(self.updateProgressMax) + + # Don't use the standard space bar to refres + self.disconnect(self.app, + QtCore.SIGNAL('request_update()'), + self.updateRequest) + + self.run_log = LogFinal() + self.frames = {} + self.ticksSinceLogFlush = 0 + self.startTicksUpdate(2000) + + # Don't start refreshing until the user sets a filter or hits refresh + self.ticksWithoutUpdate = -1 + self.enableRefresh = False + self.completeRefresh = False + self.enableNotification = False + + self.runtime_filter = None + self.min_llu_filter = None + self.time_filter = None + self.avg_comp_filter = None + self.excludes = None + self.groups = None + self.showData = None + self.filters = None + + def logIt(self): + """Logs cache to a file.""" + if self.app.threadpool is not None: + print("Stuck Frame Log cache is being written to file.") + self.app.threadpool.queue( + self.run_log.finalize, self.logResult, "Writing out log", self.frames, self.show) + else: + logger.warning("threadpool not found, doing work in gui thread") + + # pylint: disable=missing-function-docstring,unused-argument + def logResult(self, work, rpcObjects): + self.frames = {} + + # pylint: disable=redefined-builtin,inconsistent-return-statements,no-self-use + def numFormat(self, num, type): + """Returns string formatting based on the number""" + if num == "" or num < .001 or num is None: + return "" + if type == "t": + return cuegui.Utils.secondsToHHMMSS(int(num)) + if type == "f": + return "%.2f" % float(num) + + def setCompleteRefresh(self, value): + """Sets complete refresh based on given value.""" + self.completeRefresh = value + + def tick(self): + """Handles update on single tick.""" + if self.ticksSinceLogFlush >= 400 and len(self.frames) > 0: + self.ticksSinceLogFlush = 0 + self.logIt() + + if self.completeRefresh: + self.ticksWithoutUpdate = 0 + self.completeRefresh = False + self._update() + return + + if (self.ticksWithoutUpdate % 40 == 0 and + self.ticksWithoutUpdate != self.updateInterval and not self.window().isMinimized()): + self.ticksWithoutUpdate += 1 + if len(self.currentHosts) > 0: + self.confirm(1) + return + + if (self.ticksWithoutUpdate >= self.updateInterval and + self.enableRefresh and not self.window().isMinimized()): + self.ticksWithoutUpdate = 0 + self._update() + if self.enableNotification: + message = QtWidgets.QMessageBox(self) + message.setText("Stuck Frames have refreshed!.") + message.exec_() + return + + self.ticksSinceLogFlush += 1 + if not self.window().isMinimized(): + self.ticksWithoutUpdate += 1 + + def __itemSingleClickedComment(self, item, col): + """If the comment column is clicked on, and there is a comment on the + host, this pops up the comments dialog + @type item: QTreeWidgetItem + @param item: The item clicked on + @type col: int + @param col: The column clicked on""" + commentItem = item.rpcObject + if (col == COMMENT_COLUMN and + cuegui.Utils.isJob(commentItem) and commentItem.data.hasComment): + self.__menuActions.jobs().viewComments([commentItem]) + self.update() + + def updateProgressMax(self, newMax): + """Send an update to the progress bar of the new maximum value""" + self.parent.getControls().getProgress().setMaximum(newMax) + + def updateProgress(self, currentValue): + """Send an update of the current value for the progress bar""" + self.parent.getControls().getProgress().setValue(currentValue) + + def updateSoon(self): + """Returns immediately. Causes an update to happen + Constants.AFTER_ACTION_UPDATE_DELAY after calling this function.""" + QtCore.QTimer.singleShot(cuegui.Constants.AFTER_ACTION_UPDATE_DELAY, + self.updateRequest) + + def getJob(self): + """Returns the current job + @return: The current job + @rtype: job""" + return cuegui.Utils.findJob(self.selectedObjects()[0].data.name) + + def clearItems(self): + """Clears all items""" + self.clearSelection() + self.removeAllItems() + self.currentHosts = [] + + def updateRequest(self): + """Updates the items in the TreeWidget if sufficient time has passed + since last updated""" + self.ticksWithoutUpdate = 999 + self.completeRefresh = True + + # pylint: disable=no-self-use + def get_frame_run_time(self, item): + """Returns frame run time.""" + if cuegui.Utils.isProc(item): + start_time = item.data.dispatch_time + elif cuegui.Utils.isFrame(item): + start_time = item.data.start_time + else: + return "" + current_time = time.time() + run_time = current_time - start_time + return run_time + + # pylint: disable=no-self-use + def get_llu_time(self, item): + """Returns LLU time.""" + if cuegui.Utils.isProc(item): + log_file = item.data.log_path + elif cuegui.Utils.isFrame(item): + log_file = item.log_path + else: + return "" + # pylint: disable=broad-except + try: + stat_info = os.path.getmtime(log_file) + except Exception: + return "None" + current_time = time.time() + llu_time = current_time - stat_info + + return llu_time + + def find_layer(self, proc): + """Return layer based on proc.""" + jobName = proc.data.job_name + layerName = proc.data.frame_name.split("-")[1] + key = "%s/%s" % (jobName, layerName) + + if not self.layer_cache.get(key, None): + # pylint: disable=broad-except + try: + self.layer_cache[key] = proc.getLayer() + except Exception: + return "None" + + return self.layer_cache[key] + + def confirm(self, update): + """Confirm frame filter.""" + currentHostsNew = [] + nextIndex = 2 + for index in range(len(self.currentHosts)): + if index == nextIndex: + frame = self.currentHosts[index] + nextIndex = nextIndex + 3 + + if frame.service in self.filters.keys(): + self.runtime_filter = self.filters[frame.service][4] + self.min_llu_filter = self.filters[frame.service][2] + self.time_filter = self.filters[frame.service][1] + self.avg_comp_filter = self.filters[frame.service][3] + self.excludes = [x.strip() for x in self.filters[frame.service][0].split(',') + if x != ""] + else: + if "All (Click + to Add Specific Filter)" in self.filters.keys(): + key = "All (Click + to Add Specific Filter)" + elif "All Other Types" in self.filters.keys(): + key = "All Other Types" + else: + continue + self.runtime_filter = self.filters[key][4] + self.min_llu_filter = self.filters[key][2] + self.time_filter = self.filters[key][1] + self.avg_comp_filter = self.filters[key][3] + self.excludes = [x.strip() for x in self.filters[key][0].split(',') if x != ""] + + # layerName = frame.data.layer_name + frameRunTime = self.get_frame_run_time(frame) + # jobName = frame.data.name + lluTime = self.get_llu_time(frame) + avgFrameTime = frame.averageFrameTime + percentStuck = lluTime / frameRunTime + + frame.stuckness = percentStuck + frame.lastLogUpdate = lluTime + frame.timeRunning = frameRunTime + + if ((lluTime > (self.min_llu_filter * 60)) and + (percentStuck * 100 > self.time_filter) and + (frameRunTime > (avgFrameTime * self.avg_comp_filter / 100) and + percentStuck < 1.1 and frameRunTime > 500)): + currentHostsNew.append(self.currentHosts[index - 2]) + currentHostsNew.append(self.currentHosts[index - 1]) + currentHostsNew.append(frame) + + self.currentHosts[:] = [] + self.currentHosts = currentHostsNew + + if update == 1: + self._processUpdate(None, self.currentHosts) + + def _getUpdate(self): + """Returns the proper data from the cuebot""" + # pylint: disable=broad-except,too-many-nested-blocks + try: + treeItems = [] + procs = [] + self.groups = [] + self.procSearch.hosts = [] + self.procSearch.shows = [self.show] + procs = opencue.api.getProcs() + + current_prog = 0 + self.emit(QtCore.SIGNAL("updatedProgressMax"), (len(procs))) + self._updateProgressMax.emit(len(procs)) + for proc in procs: + if proc.data.services[0] in self.filters.keys(): + self.runtime_filter = self.filters[proc.data.services[0]][4] + self.min_llu_filter = self.filters[proc.data.services[0]][2] + self.time_filter = self.filters[proc.data.services[0]][1] + self.avg_comp_filter = self.filters[proc.data.services[0]][3] + self.excludes = [x.strip() + for x in self.filters[proc.data.services[0]][0].split(',') + if x != ""] + else: + if "All (Click + to Add Specific Filter)" in self.filters.keys(): + key = "All (Click + to Add Specific Filter)" + elif "All Other Types" in self.filters.keys(): + key = "All Other Types" + else: + continue + self.runtime_filter = self.filters[key][4] + self.min_llu_filter = self.filters[key][2] + self.time_filter = self.filters[key][1] + self.avg_comp_filter = self.filters[key][3] + self.excludes = [x.strip() for x in self.filters[key][0].split(',') if x != ""] + + jobName = proc.data.job_name + (frameNumber, layerName) = proc.data.frame_name.split("-") + + frameRunTime = self.get_frame_run_time(proc) + # frameResource = proc.data.name + + if frameRunTime >= self.runtime_filter * 60: + # Get average completion time of the layer + layer = self.find_layer(proc) + # Skip processing if the layer obj doesn't exist. i.e frame finished + if layer == "None": + continue + avgFrameTime = layer.avgFrameTimeSeconds() + + if frameRunTime > (avgFrameTime * self.avg_comp_filter / 100): + # log_path = proc.data.log_path # Get the log file path for last line + lluTime = self.get_llu_time(proc) + if lluTime == "None": + # Skip processing if there was any error with reading + # the log file(path did not exist,permissions) + continue + if lluTime > self.min_llu_filter * 60: + percentStuck = 0 + if frameRunTime > 0: + percentStuck = lluTime / frameRunTime + + if percentStuck * 100 > self.time_filter and percentStuck < 1.1: + please_exclude = False + for exclude in self.excludes: + if (layerName.__contains__(exclude) or + jobName.__contains__(exclude)): + please_exclude = True + continue + + if please_exclude: + continue + + # Job may have finished/killed put in a try + # Injecting into rpcObjects extra data not available via client API + # to support cue3 iceObject backwards capability + try: + frame = opencue.api.findFrame(jobName, + layerName, int(frameNumber)) + frame.data.layer_name = layerName + frame.__dict__['job_name'] = jobName + frame.__dict__['log_path'] = proc.data.log_path + frame.__dict__['number'] = frame.data.number + frame.__dict__['lastLogUpdate'] = lluTime + frame.__dict__['averageFrameTime'] = avgFrameTime + frame.__dict__['stuckness'] = percentStuck + frame.__dict__['timeRunning'] = frameRunTime + frame.__dict__['lastResource'] = frame.data.last_resource + frame.__dict__['service'] = proc.data.services[0] + + job = opencue.api.findJob(jobName) + job.__dict__['log_path'] = job.data.log_dir + job.__dict__['lastLogUpdate'] = "" + job.__dict__['averageFrameTime'] = "" + job.__dict__['number'] = "" + job.__dict__['stuckness'] = "" + job.__dict__['timeRunning'] = "" + job.__dict__['lastResource'] = "" + job.__dict__['hostUsage'] = "" + job.__dict__['service'] = proc.data.services[0] + if self.show == job.data.show: + group = opencue.api.findGroup(self.show, job.data.group) + + treeItems.append(group) + treeItems.append(job) + treeItems.append(frame) + except Exception: + # Can safely ignore if a Job has already completed + pass + + current_prog = current_prog + 1 + self._updateProgress.emit(current_prog) + + self._updateProgress.emit(len(procs)) + self.currentHosts[:] = [] + self.currentHosts = treeItems + + self.confirm(0) + + return self.currentHosts + except Exception as e: + print(cuegui.Utils.exceptionOutput(e)) + map(logger.warning, cuegui.Utils.exceptionOutput(e)) + return [] + + def _createItem(self, object, parent=None): + """Creates and returns the proper item + @type object: Host + @param object: The object for this item + @type parent: QTreeWidgetItem + @param parent: Optional parent for this item + @rtype: QTreeWidgetItem + @return: The created item""" + + if cuegui.Utils.isGroup(object): + groupWidget = GroupWidgetItem(object, self) + self.groups_created[object.data.name] = groupWidget # Store parents created + groupWidget.setExpanded(True) + return groupWidget + if cuegui.Utils.isJob(object): + jobWidget = HostWidgetItem(object, self.groups_created[object.data.group]) + self.jobs_created[object.data.name] = jobWidget # Store parents created + jobWidget.setExpanded(True) + return jobWidget + if cuegui.Utils.isFrame(object): + frameWidget = HostWidgetItem(object, + # Find the Job to serve as its parent + self.jobs_created[object.job_name]) + return frameWidget + + def contextMenuEvent(self, e): + """When right clicking on an item, this raises a context menu""" + + menu = QtWidgets.QMenu() + + # Since we want different menu options based on what is chosen, we need to figure this out + isJob = False + isFrame = False + sameJob = True + jobName = None + # isGroup = True + for item in self.selectedObjects(): + if cuegui.Utils.isJob(item): + isJob = True + elif cuegui.Utils.isFrame(item): + isFrame = True + # elif cuegui.Utils.isGroup(item): + # isGroup = True + if not jobName: + jobName = item.data.name + else: + if item.data.name != jobName: + sameJob = False + + if isJob and not isFrame and sameJob: + self.__menuActions.jobs().addAction(menu, "viewComments") + self.__menuActions.jobs().addAction(menu, "emailArtist") + menu.addAction( + cuegui.Action.create(self, "Email and Comment", "Email and Comment", + self.emailComment, "mail")) + menu.addSeparator() + menu.addAction(cuegui.Action.create(self, "Job Not Stuck", "Job Not Stuck", + self.RemoveJob, "warning")) + menu.addAction( + cuegui.Action.create(self, "Add Job to Excludes", "Add Job to Excludes", + self.AddJobToExcludes, "eject")) + menu.addAction(cuegui.Action.create(self, "Exclude and Remove Job", + "Exclude and Remove Job", + self.AddJobToExcludesandRemove, "unbookkill")) + menu.addSeparator() + menu.addAction(cuegui.Action.create(self, "Core Up", "Core Up", self.coreup, "up")) + menu.exec_(e.globalPos()) + + if isFrame and not isJob and sameJob: + count = len(self.selectedItems()) + + self.__menuActions.frames().addAction(menu, "tail") + self.__menuActions.frames().addAction(menu, "view") + + if count == 1: + if self.selectedObjects()[0].data.retry_count >= 1: + self.__menuActions.frames().addAction(menu, "viewLastLog") + + if count >= 3: + self.__menuActions.frames().addAction(menu, "xdiff3") + elif count == 2: + self.__menuActions.frames().addAction(menu, "xdiff2") + + if count == 1: + menu.addSeparator() + menu.addAction(cuegui.Action.create(self, "Top Machine", "Top Machine", + self.topMachine, "up")) + if self.app.applicationName() == "CueCommander3": + self.__menuActions.frames().addAction(menu, "viewHost") + + menu.addSeparator() + menu.addAction(cuegui.Action.create(self, "Retry", "Retry", self.retryFrame, "retry")) + menu.addAction(cuegui.Action.create(self, "Eat", "Eat", self.eatFrame, "eat")) + menu.addAction(cuegui.Action.create(self, "Kill", "Kill", self.killFrame, "kill")) + menu.addSeparator() + if count == 1: + menu.addAction(cuegui.Action.create(self, "Log Stuck Frame", "Log Stuck Frame", + self.log, "loglast")) + elif count > 1: + menu.addAction(cuegui.Action.create(self, "Log Stuck Frames", "Log Stuck Frames", + self.log, "loglast")) + menu.addAction(cuegui.Action.create(self, "Log and Retry", "Log and Retry", + self.logRetry, "retry")) + menu.addAction(cuegui.Action.create(self, "Log and Eat", "Log and Eat", + self.logEat, "eat")) + menu.addAction(cuegui.Action.create(self, "Log and Kill", "Log and Kill", + self.logKill, "kill")) + menu.addSeparator() + menu.addAction(cuegui.Action.create(self, "Frame Not Stuck", "Frame Not Stuck", + self.remove, "warning")) + menu.addAction( + cuegui.Action.create(self, "Add Job to Excludes", "Add Job to Excludes", + self.AddJobToExcludes, "eject")) + menu.addAction(cuegui.Action.create(self, "Exclude and Remove Job", + "Exclude and Remove Job", + self.AddJobToExcludesandRemove, "unbookkill")) + menu.addSeparator() + menu.addAction(cuegui.Action.create(self, "Core Up", "Core Up", self.coreup, "up")) + + menu.exec_(e.globalPos()) + + def coreup(self): + """PST Menu Plugin entry point.""" + job = self.getJob() + win = CoreUpWindow(self, {job: job.getLayers()}) + win.show() + + def _processUpdate(self, work, rpcObjects): + """A generic function that Will: + Create new TreeWidgetItems if an item does not exist for the object. + Update existing TreeWidgetItems if an item already exists for the object. + Remove items that were not updated with rpcObjects. + @param work: + @type work: from ThreadPool + @param rpcObjects: A list of ice objects + @type rpcObjects: list """ + self._itemsLock.lockForWrite() + try: + updated = [] + for rpcObject in rpcObjects: + updated.append(cuegui.Utils.getObjectKey(rpcObject)) # rpcObject) + # If id already exists, update it + if cuegui.Utils.getObjectKey(rpcObject) in self._items: + self._items[cuegui.Utils.getObjectKey(rpcObject)].update(rpcObject) + # If id does not exist, create it + else: + self._items[cuegui.Utils.getObjectKey(rpcObject)] = self._createItem(rpcObject) + # Remove any items that were not updated + for rpcObject in list(set(self._items.keys()) - set(updated)): + self._removeItem(rpcObject) + self.redraw() + finally: + self._itemsLock.unlock() + + def topMachine(self): + signal.signal(signal.SIGALRM, self.handler) + signal.alarm(int(30)) + + job = self.selectedObjects()[0] + + command = (' xhost ' + job.lastResource.split('/')[0] + '; rsh ' + + job.lastResource.split('/')[0] + ' \"setenv DISPLAY ' + + str(socket.gethostname()).split('.', maxsplit=1)[0] + ':0; xterm -e top\" &') + os.system(command) + signal.alarm(0) + + def remove(self): + currentHostsNew = [] + nextIndex = 2 + for index in range(len(self.currentHosts)): + if index == nextIndex: + + nextIndex = nextIndex + 3 + + if self.currentHosts[index] not in self.selectedObjects(): + currentHostsNew.append(self.currentHosts[index - 2]) + currentHostsNew.append(self.currentHosts[index - 1]) + currentHostsNew.append(self.currentHosts[index]) + self.currentHosts[:] = [] + self.currentHosts = currentHostsNew + # self.currentHosts = [x for x in self.currentHosts if x not in self.selectedObjects() ] - + + self._processUpdate(None, self.currentHosts) + + def emailComment(self): + job = self.getJob() + self.__menuActions.jobs().emailArtist([job]) + job.addComment("Emailed artists", "Emailed Artist but took no further action") + + def logRetry(self): + names = [frame.name() for frame in self.selectedObjects()] + + if cuegui.Utils.questionBoxYesNo(self, "Confirm", "Retry selected frames?", names): + self.log() + for frame in self.selectedObjects(): + frame.retry() + self.remove() + + def logEat(self): + names = [frame.name() for frame in self.selectedObjects()] + + if cuegui.Utils.questionBoxYesNo(self, "Confirm", "Eat selected frames?", names): + self.log() + for frame in self.selectedObjects(): + frame.eat() + self.remove() + + def logKill(self): + names = [frame.name() for frame in self.selectedObjects()] + + if cuegui.Utils.questionBoxYesNo(self, "Confirm", "Kill selected frames?", names): + self.log() + for frame in self.selectedObjects(): + frame.kill() + self.remove() + + def retryFrame(self): + names = [frame.name() for frame in self.selectedObjects()] + + if cuegui.Utils.questionBoxYesNo(self, "Confirm", "Retry selected frames?", names): + for frame in self.selectedObjects(): + frame.retry() + self.remove() + + def eatFrame(self): + names = [frame.name() for frame in self.selectedObjects()] + + if cuegui.Utils.questionBoxYesNo(self, "Confirm", "Eat selected frames?", names): + for frame in self.selectedObjects(): + frame.eat() + self.remove() + + def killFrame(self): + names = [frame.name() for frame in self.selectedObjects()] + + if cuegui.Utils.questionBoxYesNo(self, "Confirm", "Kill selected frames?", names): + for frame in self.selectedObjects(): + frame.kill() + self.remove() + + def handler(self): + message = QtWidgets.QMessageBox(self) + message.setText("""Unable to connect to host after 30 sec. + It may need to be put into repair state. """) + message.exec_() + + def log(self): + self.ticksSinceLogFlush = 0 + currentJob = self.selectedObjects()[0].data.name + framesForJob = {} + for frame in self.selectedObjects(): + frameData = {} + frameData['layer'] = frame.job_name + frameData['host'] = frame.lastResource + frameData['llu'] = self.get_llu_time(frame) + frameData['runtime'] = self.get_frame_run_time(frame) + frameData['average'] = frame.averageFrameTime + frameData['log'] = cuegui.Utils.getLastLine(frame.log_path) + framesForJob[str(frame.data.number) + '-' + str(time.time())] = frameData + + self.frames[currentJob] = framesForJob + + def AddJobToExcludes(self): + currentJob = self.selectedObjects()[0] + currentJobName = currentJob.data.name + currentJobService = currentJob.service + filters = self.parent().getControls().getFilters() + + key = "" + for filter in filters: + if currentJobService == filter.getServiceBoxText(): + key = currentJobService + filterChange = filter + break + if key == "": + for filter in filters: + if filter.getServiceBoxText() == "All (Click + to Add Specific Filter)": + key = "All (Click + to Add Specific Filter)" + filterChange = filter + break + if key == "": + for filter in filters: + if filter.getServiceBoxText() == "All Other Types": + key = "All Other Types" + filterChange = filter + break + + if len(filterChange.getRegexText()) > 0: + filterChange.getRegex().setText(filterChange.getRegexText() + ", " + currentJobName) + else: + filterChange.getRegex().setText(currentJobName) + + return currentJobName + + def AddJobToExcludesandRemove(self): + self.AddJobToExcludes() + self.RemoveJob() + + def RemoveJob(self): + jobName = self.selectedObjects()[0].data.name + currentHostsNew = [] + nextIndex = 2 + for index in range(len(self.currentHosts)): + if index == nextIndex: + + nextIndex = nextIndex + 3 + + if self.currentHosts[index].data.name != jobName: + currentHostsNew.append(self.currentHosts[index - 2]) + currentHostsNew.append(self.currentHosts[index - 1]) + currentHostsNew.append(self.currentHosts[index]) + self.currentHosts[:] = [] + self.currentHosts = currentHostsNew + + self._processUpdate(None, self.currentHosts) + + def startDrag(self, dropActions): + cuegui.Utils.startDrag(self, dropActions, self.selectedObjects()) + + def dragEnterEvent(self, event): + cuegui.Utils.dragEnterEvent(event, "application/x-host-ids") + + def dragMoveEvent(self, event): + cuegui.Utils.dragMoveEvent(event, "application/x-host-ids") + + def updateFilters(self, filters, show): + self.showData = show + self.show = show.data.name + self.filters = filters + + def _removeItem(self, item): + """Removes an item from the TreeWidget without locking + @type item: AbstractTreeWidgetItem or String + @param item: A tree widget item or the string with the id of the item""" + + if item in self._items: + item = self._items[item] + elif not isinstance(item, cuegui.AbstractWidgetItem.AbstractWidgetItem): + # if the parent was already deleted, then this one was too + return + + # If it has children, they must be deleted first + if item.childCount() > 0: + for child in item.takeChildren(): + self._removeItem(child) + + if item.isSelected(): + item.setSelected(False) + + if item.parent(): + self.invisibleRootItem().removeChild(item) + self.takeTopLevelItem(self.indexOfTopLevelItem(item)) + objectClass = item.rpcObject.__class__.__name__ + objectId = item.rpcObject.id() + try: + del self._items['{}.{}'.format(objectClass, objectId)] + except KeyError: + # Dependent jobs are not stored in as keys the main self._items + # dictionary, trying to remove dependent jobs from self._items + # raises a KeyError, which we can safely ignore + pass + + def finalize(self, frames): + + dict = frames + yaml_path = "/shots/" + self.show + "/home/etc/stuck_frames_db.yaml" + + if not os.path.exists(yaml_path): + yaml_ob = open(yaml_path, 'w') + yaml.dump(dict, yaml_ob) + yaml_ob.close() + + else: + yaml_ob = open(yaml_path, 'r') + old_dict = yaml.load(yaml_ob) + yaml_ob.close() + + yaml_ob = open(yaml_path, 'w') + + for key in dict: # updates old dict + old_dict[key] = dict[key] + + yaml.dump(old_dict, yaml_ob) + yaml_ob.close() + + +class CommentWidget(QtWidgets.QWidget): + """Represents a comment.""" + def __init__(self, subject, message, parent=None): + QtWidgets.QWidget.__init__(self, parent) + self.__textSubject = subject + self.__textMessage = message + + # def getSubject(self): + # return str(self.__textSubject.text()) + # + # def getMessage(self): + # return str(self.__textMessage.toPlainText()) + + +class GroupWidgetItem(cuegui.AbstractWidgetItem.AbstractWidgetItem): + """Represents a group entry in the MonitorCue widget.""" + __initialized = False + + def __init__(self, rpcObject, parent): + # pylint: disable=protected-access + if not self.__initialized: + self.__class__.__initialized = True + self.__class__.__icon = QtGui.QIcon(":group.png") + self.__class__.__foregroundColor = cuegui.Style.ColorTheme.COLOR_GROUP_FOREGROUND + self.__class__.__backgroundColor = cuegui.Style.ColorTheme.COLOR_GROUP_BACKGROUND + self.__class__.__type = cuegui.Constants.TYPE_GROUP + + cuegui.AbstractWidgetItem.AbstractWidgetItem.__init__( + self, cuegui.Constants.TYPE_GROUP, rpcObject, parent) + + def data(self, col, role): + """Returns the proper display data for the given column and role + @type col: int + @param col: The column being displayed + @type role: QtCore.Qt.ItemDataRole + @param role: The role being displayed + @rtype: QtCore.QVariant + @return: The desired data wrapped in a QVariant""" + if role == QtCore.Qt.DisplayRole: + return self.column_info[col][cuegui.Constants.COLUMN_INFO_DISPLAY](self.rpcObject) + + if role == QtCore.Qt.ForegroundRole: + return self.__foregroundColor + + if role == QtCore.Qt.BackgroundRole: + return self.__backgroundColor + + if role == QtCore.Qt.DecorationRole and col == 0: + return self.__icon + + if role == QtCore.Qt.UserRole: + return self.__type + + return cuegui.Constants.QVARIANT_NULL + + +class LogFinal(): + """Utility class for logging to yaml.""" + # pylint: disable=no-self-use + def finalize(self, frames, show): + """Saves logs to yaml. If file not created, will create one.""" + frames_dict = frames + + yaml_path = "/shots/" + show + "/home/etc/stuck_frames_db.yaml" + if not os.path.exists(yaml_path): + yaml_ob = open(yaml_path, 'w') + yaml.dump(frames_dict, yaml_ob) + yaml_ob.close() + else: + yaml_ob = open(yaml_path, 'r') + old_dict = yaml.load(yaml_ob) + yaml_ob.close() + + yaml_ob = open(yaml_path, 'w') + + for key in frames_dict: # updates old dict + old_dict[key] = frames_dict[key] + + yaml.dump(old_dict, yaml_ob) + yaml_ob.close() + + +class HostWidgetItem(cuegui.AbstractWidgetItem.AbstractWidgetItem): + """Represents a host widget.""" + __initialized = False + # pylint: disable=redefined-builtin,protected-access + def __init__(self, object, parent): + if not self.__initialized: + self.__class__.__initialized = True + self.__class__.__commentIcon = QtGui.QIcon(":comment.png") + self.__class__.__backgroundColor = cuegui.app().palette().color(QtGui.QPalette.Base) + self.__class__.__foregroundColor = cuegui.Style.ColorTheme.COLOR_JOB_FOREGROUND + + cuegui.AbstractWidgetItem.AbstractWidgetItem.__init__(self, cuegui.Constants.TYPE_FRAME, + object, parent) + + def data(self, col, role): + if role == QtCore.Qt.DisplayRole: + if col not in self._cache: + self._cache[col] = self.column_info[col][cuegui.Constants.COLUMN_INFO_DISPLAY]( + self.rpcObject) + return self._cache.get(col, cuegui.Constants.QVARIANT_NULL) + if role == QtCore.Qt.DecorationRole: + # todo: get rpcOject comment!! + if col == COMMENT_COLUMN and cuegui.Utils.isJob(self.rpcObject): + # and self.rpcObject.hasComment: + return self.__commentIcon + elif role == QtCore.Qt.ForegroundRole: + return self.__foregroundColor + return cuegui.Constants.QVARIANT_NULL + + +class CoreUpWindow(QtWidgets.QDialog): + """A dialog box for adding more cores to a job.""" + + # pylint: disable=non-parent-init-called,super-init-not-called + def __init__(self, parent, jobs, selected=False): + QtWidgets.QWidget.__init__(self, parent) + self.setWindowTitle('Core Up') + self.jobs = jobs + self.dj = DJArnold() + self.setupUI(selected) + + def setupUI(self, selected=False): + """Setup the initial dialog box layout.""" + # Create initial layout + build_times = {} + for job, layers in self.jobs.iteritems(): + build_times[job] = self.dj.getBuildTimes(job, layers) + layout = QtWidgets.QVBoxLayout() + self.setLayout(layout) + + self.listWidget = QtWidgets.QListWidget(self) + self._layers = {} + for job, layers in self.jobs.iteritems(): + for layer in layers: + self._layers[layer.name()] = (job, layer) + layer_label = layer.name() + # if build_times.has_key(layer.name()): + # layer_label += ' - %s' % build_times[layer.name()] + listItem = QtWidgets.QListWidgetItem(layer_label) + self.listWidget.addItem(listItem) + self.listWidget.setSelectionMode(3) # Multi Selection Mode + layout.addWidget(self.listWidget) + + buttonLayout = QtWidgets.QHBoxLayout() + self.core2btn = QtWidgets.QPushButton('2 Cores') + self.connect(self.core2btn, QtCore.SIGNAL('clicked()'), self.core2btn_callback) + self.core4btn = QtWidgets.QPushButton('4 Cores') + self.connect(self.core4btn, QtCore.SIGNAL('clicked()'), self.core4btn_callback) + self.core8btn = QtWidgets.QPushButton('8 Cores') + self.connect(self.core8btn, QtCore.SIGNAL('clicked()'), self.core8btn_callback) + buttonLayout.addWidget(self.core2btn) + buttonLayout.addWidget(self.core4btn) + buttonLayout.addWidget(self.core8btn) + layout.addLayout(buttonLayout) + + coreLayout = QtWidgets.QHBoxLayout() + self.coreSpinner = QtWidgets.QSpinBox() + self.coreSpinner.setRange(1, 16) + self.coreSpinner.setWrapping(True) + self.coreSpinner.setSingleStep(1) + self.coreUpButton = QtWidgets.QPushButton('Core Up') + self.connect(self.coreUpButton, QtCore.SIGNAL('clicked()'), self.coreUpbtn_callback) + coreLayout.addWidget(self.coreSpinner) + coreLayout.addWidget(self.coreUpButton) + layout.addLayout(coreLayout) + + controlLayout = QtWidgets.QHBoxLayout() + self.retryFramesCB = QtWidgets.QCheckBox('Retry Frames') + self.retryThresholdSpinner = QtWidgets.QSpinBox() + self.retryThresholdSpinner.setRange(0, 100) + self.retryThresholdSpinner.setWrapping(True) + self.retryThresholdSpinner.setSingleStep(5) + self.retryThresholdSpinner.setSuffix('%') + self.retryThresholdSpinner.setEnabled(False) + self.retryThresholdSpinner.setValue(70) + controlLayout.addWidget(self.retryFramesCB) + controlLayout.addWidget(self.retryThresholdSpinner) + layout.addLayout(controlLayout) + + self.connect(self.retryFramesCB, QtCore.SIGNAL('stateChanged(int)'), + self.retryFrameCB_callback) + + if selected: + self.listWidget.selectAll() + + def selectedLayers(self): + """Return a list of selected layer rpcObjects.""" + indexs = map(lambda x: str(x.text()), self.listWidget.selectedItems()) + return [self._layers[index] for index in indexs] + + def coreup(self, cores): + """Set Min Cores to cores for all selected layers of job.""" + for job, layer in self.selectedLayers(): + print("Setting max cores to %d for %s" % (cores, layer.name())) + layer.setMinCores(cores * 1.0) + time.sleep(CUE_SLEEP) + if self.retryFramesCB.isChecked(): + fs = opencue.search.FrameSearch() + fs.state = [opencue.wrappers.frame.Frame().FrameState(2)] + frames = layer.getFrames(fs) + for frame in frames: + precentage = self.dj.getCompletionAmount(job, frame) + if precentage >= 0: + if precentage < self.retryThresholdSpinner.value(): + print('Retrying frame %s %s' % (job.name(), frame.frame())) + frame.kill() + time.sleep(CUE_SLEEP) + self.close() + + def core2btn_callback(self): + """2 Core Button Callback.""" + self.coreup(2) + + def core4btn_callback(self): + """4 Core Button Callback.""" + self.coreup(4) + + def core8btn_callback(self): + """8 Core Button Callback.""" + self.coreup(8) + + def coreUpbtn_callback(self): + """Core Up Button Callback.""" + cores = int(self.coreSpinner.value()) + self.coreup(cores) + + def retryFrameCB_callback(self, value): + """Retries frame if value is given.""" + if value: + self.retryThresholdSpinner.setEnabled(True) + else: + self.retryThresholdSpinner.setEnabled(False) + + +class DJArnold(object): + """Represents arnold engine.""" + completion_pattern = re.compile( + # pylint: disable=line-too-long + r'[INFO BatchMain]: [0-9][0-9]:[0-9][0-9]:[0-9][0-9] [0-9]{1,8}mb | (?P[0-9]{1,3})% done - [0-9]{1,5} rays/pixel') + + def __init__(self, show=None): + if not show: + show = os.environ.get('SHOW') + self.show = show + + # pylint: disable=no-self-use + def getLog(self, job, frame): + """Return the contents of a log given a job and a frame.""" + log_dir = job.logDir() + log_name = '%s.%s.rqlog' % (job.name(), frame.data.name) + log_file = os.path.join(log_dir, log_name) + if not os.path.exists(log_file): + return [] + f = open(log_file, 'r') + log_lines = [line.strip() for line in f.readlines() if line.strip()] + f.close() + return log_lines + + def getBuildTimes(self, job, layers=None): + """Return a dictionary with layer names as keys, and build tiems as + values. + """ + results = {} + if not layers: + layers = job.getLayers() + for layer in layers: + if isinstance(layer, str): + layer = job.getLayer(layer) + if 'preprocess' in layer.name(): + continue + built_frames = [] + cores = 0 + cores_list = [] + fs = opencue.search.FrameSearch() + fs.states = [opencue.wrappers.frame.Frame().FrameState(3)] + frames = layer.getFrames(fs) + if not frames: + fs.states = [opencue.wrappers.frame.Frame().FrameState(2)] + frames = layer.getFrames(fs) + for frame in frames: + frame_cores = float(frame.lastResource.split('/')[1]) + if frame_cores != cores: + if frame_cores not in cores_list: + built_frames.append((frame, frame_cores)) + cores_list.append(frame_cores) + build_times = [] + for frame, cores in built_frames: + log_lines = self.getLog(job, frame) + for line in log_lines: + if "[kat] Building scene done." in line: + line = line.replace('[INFO BatchMain]: ', '') + build_time = line.split()[0] + hours, minutes, seconds = build_time.split(':') + seconds = int(seconds) + seconds += (int(minutes) * 60) + seconds += (int(hours) * 360) + build_times.append(seconds) + if build_times: + avg = sum(build_times) / len(build_times) + seconds = int(avg % 60) + minutes = int((avg / 60) % 60) + hours = int(avg / 3600) + results[layer.name()] = (layer, datetime.time(hours, minutes, seconds)) + return results + + def getCompletionAmount(self, job, frame): + """Return a integer representing the last reported completed percenatge on arnold job.""" + log_lines = self.getLog(job, frame) + log_lines.reverse() + complete = -1 + for line in log_lines: + if line.startswith('[INFO BatchMain]:'): + matches = self.completion_pattern.search(line) + if matches: + complete = int(matches.group('total')) + break + return complete diff --git a/cuegui/cuegui/plugins/SubscriptionsGraphPlugin.py b/cuegui/cuegui/plugins/SubscriptionsGraphPlugin.py index 80dde33ca..a8c0078ef 100644 --- a/cuegui/cuegui/plugins/SubscriptionsGraphPlugin.py +++ b/cuegui/cuegui/plugins/SubscriptionsGraphPlugin.py @@ -20,8 +20,8 @@ from __future__ import division from __future__ import print_function -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.AbstractDockWidget import cuegui.SubscriptionGraphWidget diff --git a/cuegui/cuegui/plugins/SubscriptionsPlugin.py b/cuegui/cuegui/plugins/SubscriptionsPlugin.py index 7c0d11731..e40081802 100644 --- a/cuegui/cuegui/plugins/SubscriptionsPlugin.py +++ b/cuegui/cuegui/plugins/SubscriptionsPlugin.py @@ -20,8 +20,8 @@ from __future__ import division from __future__ import absolute_import -from PySide2 import QtCore -from PySide2 import QtWidgets +from qtpy import QtCore +from qtpy import QtWidgets import cuegui.AbstractDockWidget import cuegui.SubscriptionsWidget diff --git a/cuegui/setup.py b/cuegui/setup.py index eff519b23..2068936e0 100644 --- a/cuegui/setup.py +++ b/cuegui/setup.py @@ -46,6 +46,8 @@ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', ], packages=find_packages(), package_data={ @@ -63,6 +65,7 @@ 'grpcio-tools', 'PySide2', 'PyYAML', + 'QtPy', ] ) diff --git a/cuegui/tests/Comments_tests.py b/cuegui/tests/Comments_tests.py new file mode 100644 index 000000000..29bdf613f --- /dev/null +++ b/cuegui/tests/Comments_tests.py @@ -0,0 +1,74 @@ +# Copyright (c) OpenCue Project Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Tests for cuegui.Comments.""" + + +import time +import unittest + +import mock + +from qtpy import QtCore +from qtpy import QtWidgets + +import opencue.compiled_proto.comment_pb2 +import opencue.compiled_proto.job_pb2 +import opencue.wrappers.comment +import opencue.wrappers.job + +import cuegui.Comments +import cuegui.Style + +from . import test_utils + + +@mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) +class CommentsTests(unittest.TestCase): + @mock.patch('opencue.cuebot.Cuebot.getStub') + def setUp(self, getStubMock): + app = test_utils.createApplication() + app.settings = QtCore.QSettings() + cuegui.Style.init() + + commentProto = opencue.compiled_proto.comment_pb2.Comment( + id='comment-id-1', timestamp=int(time.time()), user='user-who-made-comment', + subject='comment-subject', message='this is the comment message body') + self.comment = opencue.wrappers.comment.Comment(commentProto) + getStubMock.return_value.GetComments.return_value = \ + opencue.compiled_proto.job_pb2.JobGetCommentsResponse( + comments=opencue.compiled_proto.comment_pb2.CommentSeq(comments=[commentProto])) + + self.job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name='fooJob')) + self.parentWidget = QtWidgets.QWidget() + self.commentListDialog = cuegui.Comments.CommentListDialog( + self.job, parent=self.parentWidget) + + def test_shouldDisplayComment(self): + self.assertEqual( + 1, self.commentListDialog._CommentListDialog__treeSubjects.topLevelItemCount()) + gotTreeWidgetItem = self.commentListDialog._CommentListDialog__treeSubjects.topLevelItem(0) + gotComment = gotTreeWidgetItem._Comment__comment + self.assertEqual(self.comment.timestamp(), gotComment.timestamp()) + self.assertEqual(self.comment.user(), gotComment.user()) + self.assertEqual(self.comment.subject(), gotComment.subject()) + self.assertEqual(self.comment.message(), gotComment.message()) + + def test_shouldRefreshJobComments(self): + self.job.getComments = mock.Mock(return_value=[]) + + self.commentListDialog.refreshComments() + + self.job.getComments.assert_called() diff --git a/cuegui/tests/Constants_tests.py b/cuegui/tests/Constants_tests.py new file mode 100644 index 000000000..3cfe3866f --- /dev/null +++ b/cuegui/tests/Constants_tests.py @@ -0,0 +1,171 @@ +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Tests for cuegui.Constants""" + + +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import + +import importlib +import os + +import mock +import pyfakefs.fake_filesystem_unittest +from qtpy import QtGui + +import opencue +import cuegui.Constants + + +CONFIG_YAML = ''' +unused_setting: some value +version: 98.707.68 +refresh.job_update_delay: 30000 + +logger.level: INFO +''' + + +# pylint: disable=import-outside-toplevel,redefined-outer-name,reimported +class ConstantsTests(pyfakefs.fake_filesystem_unittest.TestCase): + def setUp(self): + self.setUpPyfakefs() + self.fs.add_real_file( + os.path.join(os.path.dirname(cuegui.__file__), 'config', 'cuegui.yaml'), read_only=True) + if 'CUEGUI_CONFIG_FILE' in os.environ: + del os.environ['CUEGUI_CONFIG_FILE'] + + def test__should_load_user_config_from_env_var(self): + config_file_path = '/path/to/config.yaml' + self.fs.create_file(config_file_path, contents=CONFIG_YAML) + os.environ['CUEGUI_CONFIG_FILE'] = config_file_path + + import cuegui.Constants + result = importlib.reload(cuegui.Constants) + + self.assertEqual('98.707.68', result.VERSION) + self.assertEqual(30000, result.JOB_UPDATE_DELAY) + self.assertEqual(10000, result.LAYER_UPDATE_DELAY) + + @mock.patch('platform.system', new=mock.Mock(return_value='Linux')) + @mock.patch('os.path.expanduser', new=mock.Mock(return_value='/home/username')) + def test__should_load_user_config_from_user_profile(self): + config_file_path = '/home/username/.config/opencue/cuegui.yaml' + self.fs.create_file(config_file_path, contents=CONFIG_YAML) + + import cuegui.Constants + result = importlib.reload(cuegui.Constants) + + self.assertEqual('98.707.68', result.VERSION) + self.assertEqual(30000, result.JOB_UPDATE_DELAY) + self.assertEqual(10000, result.LAYER_UPDATE_DELAY) + + @mock.patch('platform.system', new=mock.Mock(return_value='Linux')) + def test__should_use_default_values(self): + import cuegui.Constants + result = importlib.reload(cuegui.Constants) + + self.assertNotEqual('98.707.68', result.VERSION) + self.assertEqual(0, result.STARTUP_NOTICE_DATE) + self.assertEqual('', result.STARTUP_NOTICE_MSG) + self.assertEqual(10000, result.JOB_UPDATE_DELAY) + self.assertEqual(10000, result.LAYER_UPDATE_DELAY) + self.assertEqual(10000, result.FRAME_UPDATE_DELAY) + self.assertEqual(20000, result.HOST_UPDATE_DELAY) + self.assertEqual(1000, result.AFTER_ACTION_UPDATE_DELAY) + self.assertEqual(5, result.MINIMUM_UPDATE_INTERVAL) + self.assertEqual('Luxi Sans', result.FONT_FAMILY) + self.assertEqual(10, result.FONT_SIZE) + self.assertEqual( + os.path.join(os.path.dirname(cuegui.__file__), 'images'), result.RESOURCE_PATH) + self.assertEqual( + os.path.join(os.path.dirname(cuegui.__file__), 'config'), result.CONFIG_PATH) + self.assertEqual( + os.path.join(os.path.dirname(cuegui.__file__), 'config'), result.DEFAULT_INI_PATH) + self.assertEqual( + [os.path.join(os.path.dirname(cuegui.__file__), 'plugins')], + result.DEFAULT_PLUGIN_PATHS) + self.assertEqual('%(levelname)-9s %(module)-10s %(message)s', result.LOGGER_FORMAT) + self.assertEqual('WARNING', result.LOGGER_LEVEL) + self.assertEqual('cuemail: please check ', result.EMAIL_SUBJECT_PREFIX) + self.assertEqual('Your PSTs request that you check ', result.EMAIL_BODY_PREFIX) + self.assertEqual('\n\n', result.EMAIL_BODY_SUFFIX) + self.assertEqual('', result.EMAIL_DOMAIN) + self.assertEqual( + 'https://github.com/AcademySoftwareFoundation/OpenCue/issues/new', + result.GITHUB_CREATE_ISSUE_URL) + self.assertEqual('https://www.opencue.io/docs/', result.URL_USERGUIDE) + self.assertEqual( + 'https://github.com/AcademySoftwareFoundation/OpenCue/issues/new' + '?labels=enhancement&template=enhancement.md', result.URL_SUGGESTION) + self.assertEqual( + 'https://github.com/AcademySoftwareFoundation/OpenCue/issues/new' + '?labels=bug&template=bug_report.md', result.URL_BUG) + self.assertEqual( + 'gview -R -m -M -U %s +' % os.path.join( + os.path.dirname(cuegui.__file__), 'config', 'gvimrc'), + result.DEFAULT_EDITOR) + self.assertEqual({ + 'rhel7': '/shots', + 'linux': '/shots', + 'windows': 'S:', + 'mac': '/Users/shots', + 'darwin': '/Users/shots', + }, result.LOG_ROOT_OS) + self.assertEqual(( + 'general', 'desktop', 'playblast', 'util', 'preprocess', 'wan', 'cuda', 'splathw', + 'naiad', 'massive'), result.ALLOWED_TAGS) + self.assertEqual( + os.path.join(os.path.dirname(cuegui.__file__), 'config', 'darkpalette.qss'), + result.DARK_STYLE_SHEET) + self.assertEqual('plastique', result.COLOR_THEME) + self.assertEqual(QtGui.QColor(50, 50, 100), result.COLOR_USER_1) + self.assertEqual(QtGui.QColor(100, 100, 50), result.COLOR_USER_2) + self.assertEqual(QtGui.QColor(0, 50, 0), result.COLOR_USER_3) + self.assertEqual(QtGui.QColor(50, 30, 00), result.COLOR_USER_4) + self.assertEqual({ + opencue.api.job_pb2.DEAD: QtGui.QColor(255, 0, 0), + opencue.api.job_pb2.DEPEND: QtGui.QColor(160, 32, 240), + opencue.api.job_pb2.EATEN: QtGui.QColor(150, 0, 0), + opencue.api.job_pb2.RUNNING: QtGui.QColor(200, 200, 55), + opencue.api.job_pb2.SETUP: QtGui.QColor(160, 32, 240), + opencue.api.job_pb2.SUCCEEDED: QtGui.QColor(55, 200, 55), + opencue.api.job_pb2.WAITING: QtGui.QColor(135, 207, 235), + opencue.api.job_pb2.CHECKPOINT: QtGui.QColor(61, 98, 247), + }, result.RGB_FRAME_STATE) + self.assertEqual(5242880, result.MEMORY_WARNING_LEVEL) + self.assertEqual( + ['error', 'aborted', 'fatal', 'failed', 'killed', 'command not found', + 'no licenses could be found', 'killMessage'], result.LOG_HIGHLIGHT_ERROR) + self.assertEqual(['warning', 'not found'], result.LOG_HIGHLIGHT_WARN) + self.assertEqual(['info:', 'rqd cmd:'], result.LOG_HIGHLIGHT_INFO) + self.assertEqual(2147483647, result.QT_MAX_INT) + self.assertEqual({ + 'max_cores': 32, + 'max_gpu_memory': 128, + 'max_gpus': 8, + 'max_memory': 128, + 'max_proc_hour_cutoff': 30, + 'redirect_wasted_cores_threshold': 100, + }, result.RESOURCE_LIMITS) + + @mock.patch('platform.system', new=mock.Mock(return_value='Darwin')) + def test__should_use_mac_editor(self): + import cuegui.Constants + result = importlib.reload(cuegui.Constants) + + self.assertEqual('open -t', result.DEFAULT_EDITOR) diff --git a/cuegui/tests/CueJobMonitorTree_tests.py b/cuegui/tests/CueJobMonitorTree_tests.py index 87dd0a635..2698ccedc 100644 --- a/cuegui/tests/CueJobMonitorTree_tests.py +++ b/cuegui/tests/CueJobMonitorTree_tests.py @@ -19,9 +19,9 @@ import unittest import mock -import PySide2.QtCore -import PySide2.QtGui -import PySide2.QtWidgets +import qtpy.QtCore +import qtpy.QtGui +import qtpy.QtWidgets import opencue.compiled_proto.job_pb2 import opencue.compiled_proto.show_pb2 @@ -39,8 +39,8 @@ class CueJobMonitorTreeTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub') def setUp(self, get_stub_mock): - test_utils.createApplication() - PySide2.QtGui.qApp.settings = PySide2.QtCore.QSettings() + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() cuegui.Style.init() self.show_name = 'arbitrary-show-name' @@ -60,7 +60,7 @@ def setUp(self, get_stub_mock): name=self.show_name, jobs=self.jobs)) - self.main_window = PySide2.QtWidgets.QMainWindow() + self.main_window = qtpy.QtWidgets.QMainWindow() self.widget = cuegui.plugins.MonitorCuePlugin.MonitorCueDockWidget(self.main_window) self.cue_job_monitor_tree = cuegui.CueJobMonitorTree.CueJobMonitorTree(self.widget) self.cue_job_monitor_tree.addShow(self.show_name) diff --git a/cuegui/tests/DependWizard_tests.py b/cuegui/tests/DependWizard_tests.py index 0e8667460..5efb49f74 100644 --- a/cuegui/tests/DependWizard_tests.py +++ b/cuegui/tests/DependWizard_tests.py @@ -19,10 +19,10 @@ import unittest import mock -import PySide2.QtCore -import PySide2.QtGui -import PySide2.QtWidgets -import PySide2.QtTest +import qtpy.QtCore +import qtpy.QtGui +import qtpy.QtWidgets +import qtpy.QtTest import opencue.compiled_proto.job_pb2 import opencue.wrappers.frame @@ -40,11 +40,11 @@ class DependWizardTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): - test_utils.createApplication() - PySide2.QtGui.qApp.settings = PySide2.QtCore.QSettings() + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() cuegui.Style.init() - self.parentWidget = PySide2.QtWidgets.QWidget() + self.parentWidget = qtpy.QtWidgets.QWidget() @mock.patch('cuegui.Cuedepend.createJobOnLayerDepend') @mock.patch('opencue.api.findJob') diff --git a/cuegui/tests/FilterDialog_tests.py b/cuegui/tests/FilterDialog_tests.py index 071a2dab9..536d65ec4 100644 --- a/cuegui/tests/FilterDialog_tests.py +++ b/cuegui/tests/FilterDialog_tests.py @@ -20,10 +20,10 @@ import mock -import PySide2.QtCore -import PySide2.QtGui -import PySide2.QtWidgets -import PySide2.QtTest +import qtpy.QtCore +import qtpy.QtGui +import qtpy.QtWidgets +import qtpy.QtTest import opencue.compiled_proto.show_pb2 import opencue.compiled_proto.filter_pb2 @@ -41,8 +41,8 @@ class FilterDialogTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub') def setUp(self, getStubMock): - test_utils.createApplication() - PySide2.QtGui.qApp.settings = PySide2.QtCore.QSettings() + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() cuegui.Style.init() self.show = opencue.wrappers.show.Show(opencue.compiled_proto.show_pb2.Show(name='fooShow')) @@ -54,7 +54,7 @@ def setUp(self, getStubMock): opencue.compiled_proto.show_pb2.ShowGetFiltersResponse( filters=opencue.compiled_proto.filter_pb2.FilterSeq(filters=[filterProto])) - self.parentWidget = PySide2.QtWidgets.QWidget() + self.parentWidget = qtpy.QtWidgets.QWidget() self.filterDialog = cuegui.FilterDialog.FilterDialog(self.show, parent=self.parentWidget) def test_shouldTriggerRefresh(self): @@ -64,7 +64,7 @@ def test_shouldTriggerRefresh(self): self.show.getFilters.assert_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_shouldAddFilter(self, getTextMock): newFilterId = 'new-filter-id' newFilterName = 'new-filter-name' @@ -78,7 +78,7 @@ def test_shouldAddFilter(self, getTextMock): self.show.createFilter.assert_called_with(newFilterName) - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_shouldCancelAddingFilter(self, getTextMock): self.show.createFilter = mock.Mock() getTextMock.return_value = (None, False) @@ -142,11 +142,11 @@ def test_shouldTriggerCreateAction(self): self.filterDialog._FilterDialog__actions.createAction.assert_called() def test_shouldCloseDialog(self): - self.assertEqual(PySide2.QtWidgets.QDialog.DialogCode.Rejected, self.filterDialog.result()) + self.assertEqual(qtpy.QtWidgets.QDialog.DialogCode.Rejected, self.filterDialog.result()) self.filterDialog._FilterDialog__btnDone.click() - self.assertEqual(PySide2.QtWidgets.QDialog.DialogCode.Accepted, self.filterDialog.result()) + self.assertEqual(qtpy.QtWidgets.QDialog.DialogCode.Accepted, self.filterDialog.result()) @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) @@ -154,8 +154,8 @@ class FilterMonitorTreeTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub') def setUp(self, getStubMock): - test_utils.createApplication() - PySide2.QtGui.qApp.settings = PySide2.QtCore.QSettings() + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() cuegui.Style.init() show = opencue.wrappers.show.Show(opencue.compiled_proto.show_pb2.Show(name='fooShow')) @@ -169,7 +169,7 @@ def setUp(self, getStubMock): opencue.compiled_proto.show_pb2.ShowGetFiltersResponse( filters=opencue.compiled_proto.filter_pb2.FilterSeq(filters=filters)) - self.parentWidget = PySide2.QtWidgets.QWidget() + self.parentWidget = qtpy.QtWidgets.QWidget() self.filterDialog = cuegui.FilterDialog.FilterDialog(show, parent=self.parentWidget) self.filterMonitorTree = self.filterDialog._FilterDialog__filters @@ -182,13 +182,13 @@ def test_shouldPopulateFiltersList(self): self.assertEqual('2', secondItem.text(0)) self.assertEqual(False, self.filterMonitorTree.itemWidget(secondItem, 1).isChecked()) - @mock.patch('PySide2.QtWidgets.QMenu') + @mock.patch('qtpy.QtWidgets.QMenu') def test_shouldRaiseContextMenu(self, qMenuMock): filterBeingSelected = self.filterMonitorTree.topLevelItem(0) self.filterMonitorTree.contextMenuEvent( - PySide2.QtGui.QContextMenuEvent( - PySide2.QtGui.QContextMenuEvent.Reason.Mouse, + qtpy.QtGui.QContextMenuEvent( + qtpy.QtGui.QContextMenuEvent.Reason.Mouse, self.filterMonitorTree.visualItemRect(filterBeingSelected).center())) qMenuMock.return_value.exec_.assert_called() @@ -199,8 +199,8 @@ class MatcherMonitorTreeTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub') def setUp(self, getStubMock): - test_utils.createApplication() - PySide2.QtGui.qApp.settings = PySide2.QtCore.QSettings() + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() cuegui.Style.init() self.matchers = [ @@ -219,7 +219,7 @@ def setUp(self, getStubMock): opencue.wrappers.filter.Matcher(matcher) for matcher in self.matchers] self.filter = opencue.wrappers.filter.Filter(opencue.compiled_proto.filter_pb2.Filter()) - self.parentWidget = PySide2.QtWidgets.QWidget() + self.parentWidget = qtpy.QtWidgets.QWidget() self.matcherMonitorTree = cuegui.FilterDialog.MatcherMonitorTree(None, self.parentWidget) def test_shouldPopulateMatchersList(self): @@ -238,8 +238,8 @@ def test_shouldPopulateMatchersList(self): self.assertEqual('IS', self.matcherMonitorTree.itemWidget(secondItem, 1).currentText()) self.assertEqual('showName', self.matcherMonitorTree.itemWidget(secondItem, 2).text()) - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') def test_shouldAddMatcher(self, getItemMock, getTextMock): matcherSubject = opencue.compiled_proto.filter_pb2.FACILITY matcherType = opencue.compiled_proto.filter_pb2.CONTAINS @@ -270,8 +270,8 @@ def test_shouldAddMatcher(self, getItemMock, getTextMock): 'CONTAINS', self.matcherMonitorTree.itemWidget(matcherWidget, 1).currentText()) self.assertEqual(matcherText, self.matcherMonitorTree.itemWidget(matcherWidget, 2).text()) - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') def test_shouldCancelMatcherAdditionAtFirstPrompt(self, getItemMock, getTextMock): self.filter.createMatcher = mock.Mock() getItemMock.side_effect = [ @@ -285,8 +285,8 @@ def test_shouldCancelMatcherAdditionAtFirstPrompt(self, getItemMock, getTextMock self.filter.createMatcher.assert_not_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') def test_shouldCancelMatcherAdditionAtSecondPrompt(self, getItemMock, getTextMock): self.filter.createMatcher = mock.Mock() getItemMock.side_effect = [ @@ -300,8 +300,8 @@ def test_shouldCancelMatcherAdditionAtSecondPrompt(self, getItemMock, getTextMoc self.filter.createMatcher.assert_not_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') def test_shouldCancelMatcherAdditionAtThirdrompt(self, getItemMock, getTextMock): self.filter.createMatcher = mock.Mock() getItemMock.side_effect = [ @@ -316,8 +316,8 @@ def test_shouldCancelMatcherAdditionAtThirdrompt(self, getItemMock, getTextMock) self.filter.createMatcher.assert_not_called() @mock.patch( - 'PySide2.QtWidgets.QMessageBox.question', - new=mock.Mock(return_value=PySide2.QtWidgets.QMessageBox.Yes)) + 'qtpy.QtWidgets.QMessageBox.question', + new=mock.Mock(return_value=qtpy.QtWidgets.QMessageBox.Yes)) def test_shouldDeleteAllMatchers(self): self.filter.getMatchers = mock.Mock(return_value=self.matcherWrappers) for matcher in self.matcherWrappers: @@ -330,8 +330,8 @@ def test_shouldDeleteAllMatchers(self): matcher.delete.assert_called() @mock.patch( - 'PySide2.QtWidgets.QMessageBox.question', - new=mock.Mock(return_value=PySide2.QtWidgets.QMessageBox.No)) + 'qtpy.QtWidgets.QMessageBox.question', + new=mock.Mock(return_value=qtpy.QtWidgets.QMessageBox.No)) def test_shouldNotDeleteAnyMatchers(self): self.filter.getMatchers = mock.Mock(return_value=self.matcherWrappers) for matcher in self.matcherWrappers: @@ -345,7 +345,7 @@ def test_shouldNotDeleteAnyMatchers(self): @mock.patch('cuegui.Utils.questionBoxYesNo', new=mock.Mock(return_value=True)) @mock.patch('cuegui.TextEditDialog.TextEditDialog') - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') def test_shouldAddMultipleMatchers(self, getItemMock, textEditDialogMock): matcherSubject = opencue.compiled_proto.filter_pb2.SHOT matcherType = opencue.compiled_proto.filter_pb2.IS @@ -382,7 +382,7 @@ def test_shouldAddMultipleMatchers(self, getItemMock, textEditDialogMock): @mock.patch('cuegui.Utils.questionBoxYesNo', new=mock.Mock(return_value=True)) @mock.patch('cuegui.TextEditDialog.TextEditDialog') - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') def test_shouldReplaceAllMatchers(self, getItemMock, textEditDialogMock): matcherSubject = opencue.compiled_proto.filter_pb2.SHOT matcherType = opencue.compiled_proto.filter_pb2.IS diff --git a/cuegui/tests/FrameMonitorTree_tests.py b/cuegui/tests/FrameMonitorTree_tests.py index e28e8229f..75521572f 100644 --- a/cuegui/tests/FrameMonitorTree_tests.py +++ b/cuegui/tests/FrameMonitorTree_tests.py @@ -19,10 +19,10 @@ import unittest import mock -import PySide2.QtCore -import PySide2.QtGui -import PySide2.QtTest -import PySide2.QtWidgets +import qtpy.QtCore +import qtpy.QtGui +import qtpy.QtTest +import qtpy.QtWidgets import opencue.compiled_proto.job_pb2 import opencue.wrappers.frame @@ -43,11 +43,10 @@ class FrameMonitorTreeTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): - test_utils.createApplication() - - PySide2.QtGui.qApp.settings = PySide2.QtCore.QSettings() + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() cuegui.Style.init() - self.parentWidget = PySide2.QtWidgets.QWidget() + self.parentWidget = qtpy.QtWidgets.QWidget() self.frameMonitorTree = cuegui.FrameMonitorTree.FrameMonitorTree(self.parentWidget) self.job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(id='foo')) self.frameMonitorTree.setJob(self.job) @@ -120,18 +119,18 @@ def test_tickFullUpdate(self, getFramesMock, getUpdatedFramesMock): def test_getCores(self): frame = opencue.wrappers.frame.Frame( - opencue.compiled_proto.job_pb2.Frame(last_resource='foo/125.82723')) + opencue.compiled_proto.job_pb2.Frame(last_resource='foo/125.82723/0')) self.assertEqual(125.82723, self.frameMonitorTree.getCores(frame)) self.assertEqual('125.83', self.frameMonitorTree.getCores(frame, format_as_string=True)) @mock.patch.object(cuegui.FrameMonitorTree.FrameContextMenu, 'exec_') def test_rightClickItem(self, execMock): - mouse_position = PySide2.QtCore.QPoint() + mouse_position = qtpy.QtCore.QPoint() self.frameMonitorTree.contextMenuEvent( - PySide2.QtGui.QContextMenuEvent( - PySide2.QtGui.QContextMenuEvent.Reason.Mouse, mouse_position, mouse_position)) + qtpy.QtGui.QContextMenuEvent( + qtpy.QtGui.QContextMenuEvent.Reason.Mouse, mouse_position, mouse_position)) execMock.assert_called_with(mouse_position) @@ -153,7 +152,7 @@ def setUp(self): checkpoint_state=opencue.compiled_proto.job_pb2.ENABLED)) # The widget needs a var, otherwise it gets garbage-collected before tests can run. - parentWidget = PySide2.QtWidgets.QWidget() + parentWidget = qtpy.QtWidgets.QWidget() self.frameWidgetItem = cuegui.FrameMonitorTree.FrameWidgetItem( self.frame, @@ -166,46 +165,46 @@ def test_data(self): self.assertEqual( self.dispatch_order, - self.frameWidgetItem.data(dispatch_order_col, PySide2.QtCore.Qt.DisplayRole)) + self.frameWidgetItem.data(dispatch_order_col, qtpy.QtCore.Qt.DisplayRole)) self.assertEqual( cuegui.Style.ColorTheme.COLOR_JOB_FOREGROUND, - self.frameWidgetItem.data(dispatch_order_col, PySide2.QtCore.Qt.ForegroundRole)) + self.frameWidgetItem.data(dispatch_order_col, qtpy.QtCore.Qt.ForegroundRole)) self.assertEqual( cuegui.FrameMonitorTree.QCOLOR_BLACK, self.frameWidgetItem.data( - cuegui.FrameMonitorTree.STATUS_COLUMN, PySide2.QtCore.Qt.ForegroundRole)) + cuegui.FrameMonitorTree.STATUS_COLUMN, qtpy.QtCore.Qt.ForegroundRole)) self.assertEqual( cuegui.FrameMonitorTree.QCOLOR_GREEN, self.frameWidgetItem.data( - cuegui.FrameMonitorTree.PROC_COLUMN, PySide2.QtCore.Qt.ForegroundRole)) + cuegui.FrameMonitorTree.PROC_COLUMN, qtpy.QtCore.Qt.ForegroundRole)) self.assertEqual( cuegui.Constants.RGB_FRAME_STATE[self.state], self.frameWidgetItem.data( - cuegui.FrameMonitorTree.STATUS_COLUMN, PySide2.QtCore.Qt.BackgroundRole)) + cuegui.FrameMonitorTree.STATUS_COLUMN, qtpy.QtCore.Qt.BackgroundRole)) self.assertEqual( - PySide2.QtGui.QIcon, + qtpy.QtGui.QIcon, self.frameWidgetItem.data( cuegui.FrameMonitorTree.CHECKPOINT_COLUMN, - PySide2.QtCore.Qt.DecorationRole).__class__) + qtpy.QtCore.Qt.DecorationRole).__class__) self.assertEqual( - PySide2.QtCore.Qt.AlignCenter, + qtpy.QtCore.Qt.AlignCenter, self.frameWidgetItem.data( - cuegui.FrameMonitorTree.STATUS_COLUMN, PySide2.QtCore.Qt.TextAlignmentRole)) + cuegui.FrameMonitorTree.STATUS_COLUMN, qtpy.QtCore.Qt.TextAlignmentRole)) self.assertEqual( - PySide2.QtCore.Qt.AlignRight, + qtpy.QtCore.Qt.AlignRight, self.frameWidgetItem.data( - cuegui.FrameMonitorTree.PROC_COLUMN, PySide2.QtCore.Qt.TextAlignmentRole)) + cuegui.FrameMonitorTree.PROC_COLUMN, qtpy.QtCore.Qt.TextAlignmentRole)) self.assertEqual( cuegui.Constants.TYPE_FRAME, - self.frameWidgetItem.data(dispatch_order_col, PySide2.QtCore.Qt.UserRole)) + self.frameWidgetItem.data(dispatch_order_col, qtpy.QtCore.Qt.UserRole)) if __name__ == '__main__': diff --git a/cuegui/tests/LayerDialog_tests.py b/cuegui/tests/LayerDialog_tests.py index 1f9624d59..f50e9cfab 100644 --- a/cuegui/tests/LayerDialog_tests.py +++ b/cuegui/tests/LayerDialog_tests.py @@ -20,9 +20,9 @@ import mock -import PySide2.QtCore -import PySide2.QtGui -import PySide2.QtWidgets +import qtpy.QtCore +import qtpy.QtGui +import qtpy.QtWidgets import opencue.compiled_proto.show_pb2 import opencue.compiled_proto.filter_pb2 @@ -47,21 +47,23 @@ class LayerPropertiesDialogTests(unittest.TestCase): @mock.patch('opencue.api.getLayer') @mock.patch('opencue.cuebot.Cuebot.getStub') def setUp(self, get_stub_mock, get_layer_mock, get_limits_mock): - test_utils.createApplication() - PySide2.QtGui.qApp.settings = PySide2.QtCore.QSettings() + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() cuegui.Style.init() self.layers = { 'layer1Id': opencue.wrappers.layer.Layer( opencue.compiled_proto.job_pb2.Layer( id='layer1Id', name='layer1Name', range='1-5', tags=['tag1', 'tag2'], - min_cores=1, max_cores=3, is_threadable=False, min_memory=2097152, min_gpu=1, + min_cores=1, max_cores=3, is_threadable=False, + min_memory=2097152, min_gpu_memory=1, chunk_size=1, timeout=30, timeout_llu=1, memory_optimizer_enabled=True, limits=['limit1Name', 'limit2Name'])), 'layer2Id': opencue.wrappers.layer.Layer( opencue.compiled_proto.job_pb2.Layer( id='layer2Id', name='layer2Name', range='2-22', tags=['tag2', 'tag3'], - min_cores=2, max_cores=2, is_threadable=True, min_memory=6291456, min_gpu=2, + min_cores=2, max_cores=2, is_threadable=True, + min_memory=6291456, min_gpu_memory=2, chunk_size=5, timeout=60, timeout_llu=5, memory_optimizer_enabled=False, limits=['limit2Name', 'limit3Name'])), } @@ -78,7 +80,7 @@ def setUp(self, get_stub_mock, get_layer_mock, get_limits_mock): opencue.compiled_proto.limit_pb2.Limit(id='limit4Id', name='limit4Name')), ] - self.parent_widget = PySide2.QtWidgets.QWidget() + self.parent_widget = qtpy.QtWidgets.QWidget() self.layer_properties_dialog = cuegui.LayerDialog.LayerPropertiesDialog( ['layer1Id', 'layer2Id'], parent=self.parent_widget) @@ -124,12 +126,12 @@ def test__should_display_current_values(self): self.assertTrue(self.layer_properties_dialog._LayerPropertiesDialog__thread.isChecked()) self.assertEqual( - int(self.layer_properties_dialog.gpu_min_gb * 1024 * 1024), - self.layer_properties_dialog._LayerPropertiesDialog__gpu.slider.minimum()) + int(self.layer_properties_dialog.gpu_mem_min_gb * 1024 * 1024), + self.layer_properties_dialog._LayerPropertiesDialog__gpu_mem.slider.minimum()) self.assertEqual( - int(self.layer_properties_dialog.gpu_max_gb * 1024 * 1024) // - int(self.layer_properties_dialog.gpu_tick_gb * 1024 * 1024), - self.layer_properties_dialog._LayerPropertiesDialog__gpu.slider.maximum()) + int(self.layer_properties_dialog.gpu_mem_max_gb * 1024 * 1024) // + int(self.layer_properties_dialog.gpu_mem_tick_gb * 1024 * 1024), + self.layer_properties_dialog._LayerPropertiesDialog__gpu_mem.slider.maximum()) # Layer with the highest timeout determines the initial value. self.assertEqual(60, self.layer_properties_dialog._LayerPropertiesDialog__timeout.value()) @@ -163,13 +165,13 @@ def test__should_fail_on_memory_too_low(self): self.assertFalse(self.layer_properties_dialog.verify()) def test__should_fail_on_gpu_too_high(self): - self.layer_properties_dialog._LayerPropertiesDialog__gpu.slider.setValue( - self.layer_properties_dialog.gpu_max_kb * 2) + self.layer_properties_dialog._LayerPropertiesDialog__gpu_mem.slider.setValue( + self.layer_properties_dialog.gpu_mem_max_kb * 2) self.assertFalse(self.layer_properties_dialog.verify()) def test__should_fail_on_gpu_too_low(self): - self.layer_properties_dialog._LayerPropertiesDialog__gpu.slider.setValue( - self.layer_properties_dialog.gpu_min_kb / 3) + self.layer_properties_dialog._LayerPropertiesDialog__gpu_mem.slider.setValue( + self.layer_properties_dialog.gpu_mem_min_kb / 3) self.assertFalse(self.layer_properties_dialog.verify()) def test__should_apply_new_settings(self): @@ -206,9 +208,10 @@ def test__should_apply_new_settings(self): self.layer_properties_dialog._LayerPropertiesDialog__thread.parent().parent().enable(True) self.layer_properties_dialog._LayerPropertiesDialog__thread.setChecked(new_is_threadable) - new_min_gpu = 6 - self.layer_properties_dialog._LayerPropertiesDialog__gpu.parent().parent().enable(True) - self.layer_properties_dialog._LayerPropertiesDialog__gpu.slider.setValue(new_min_gpu) + new_min_gpu_memory = 6 + self.layer_properties_dialog._LayerPropertiesDialog__gpu_mem.parent().parent().enable(True) + self.layer_properties_dialog._LayerPropertiesDialog__gpu_mem.slider.setValue( + new_min_gpu_memory) new_timeout = 20 self.layer_properties_dialog._LayerPropertiesDialog__timeout.parent().parent().enable(True) @@ -239,10 +242,10 @@ def test__should_apply_new_settings(self): layer2_mock.setMaxCores.assert_called_with(100 * new_max_cores) layer1_mock.setThreadable.assert_called_with(new_is_threadable) layer2_mock.setThreadable.assert_called_with(new_is_threadable) - layer1_mock.setMinGpu.assert_called_with( - new_min_gpu * self.layer_properties_dialog.gpu_tick_kb) - layer2_mock.setMinGpu.assert_called_with( - new_min_gpu * self.layer_properties_dialog.gpu_tick_kb) + layer1_mock.setMinGpuMemory.assert_called_with( + new_min_gpu_memory * self.layer_properties_dialog.gpu_mem_tick_kb) + layer2_mock.setMinGpuMemory.assert_called_with( + new_min_gpu_memory * self.layer_properties_dialog.gpu_mem_tick_kb) layer1_mock.setTimeout.assert_called_with(new_timeout) layer2_mock.setTimeout.assert_called_with(new_timeout) layer1_mock.setTimeoutLLU.assert_called_with(new_timeout_llu) diff --git a/cuegui/tests/Layout_tests.py b/cuegui/tests/Layout_tests.py new file mode 100644 index 000000000..2b6818f9c --- /dev/null +++ b/cuegui/tests/Layout_tests.py @@ -0,0 +1,93 @@ +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Tests for cuegui.Layout""" + + +from __future__ import print_function +from __future__ import division +from __future__ import absolute_import + +import os +import shutil +import tempfile +import unittest + +from qtpy import QtCore + +import cuegui.Layout + + +CONFIG_INI = ''' +[General] +Version=0.14 + +[CueCommander] +Open=true +Title=CustomWindowTitle +OtherAttr=arbitrary-value +''' + +CONFIG_WITH_RESTORE_FLAG = ''' +[General] +Version=0.14 +RevertLayout=true + +[CueCommander] +OtherAttr=arbitrary-value +''' + + +class LayoutTests(unittest.TestCase): + def setUp(self): + self.config_dir = tempfile.mkdtemp() + QtCore.QSettings.setPath( + QtCore.QSettings.IniFormat, QtCore.QSettings.UserScope, self.config_dir) + + def tearDown(self): + shutil.rmtree(self.config_dir) + + def test__should_load_user_layout(self): + app_name = 'arbitraryapp' + config_file_path = os.path.join(self.config_dir, '.%s' % app_name, 'config.ini') + os.mkdir(os.path.dirname(config_file_path)) + with open(config_file_path, 'w') as fp: + fp.write(CONFIG_INI) + + settings = cuegui.Layout.startup(app_name) + + self.assertEqual('0.14', settings.value('Version')) + self.assertEqual('true', settings.value('CueCommander/Open')) + self.assertEqual('CustomWindowTitle', settings.value('CueCommander/Title')) + self.assertEqual('arbitrary-value', settings.value('CueCommander/OtherAttr')) + + def test__should_load_default_layout(self): + settings = cuegui.Layout.startup('CueCommander') + + self.assertEqual('false', settings.value('CueCommander/Open')) + self.assertEqual('CueCommander', settings.value('CueCommander/Title')) + self.assertFalse(settings.value('CueCommander/OtherAttr', False)) + + def test__should_restore_default_layout(self): + config_file_path = os.path.join(self.config_dir, '.cuecommander', 'config.ini') + os.mkdir(os.path.dirname(config_file_path)) + with open(config_file_path, 'w') as fp: + fp.write(CONFIG_WITH_RESTORE_FLAG) + + settings = cuegui.Layout.startup('CueCommander') + + self.assertEqual('false', settings.value('CueCommander/Open')) + self.assertEqual('CueCommander', settings.value('CueCommander/Title')) + self.assertFalse(settings.value('CueCommander/OtherAttr', False)) diff --git a/cuegui/tests/MenuActions_tests.py b/cuegui/tests/MenuActions_tests.py index 87282eefe..e65c590da 100644 --- a/cuegui/tests/MenuActions_tests.py +++ b/cuegui/tests/MenuActions_tests.py @@ -23,8 +23,8 @@ import unittest import mock -import PySide2.QtGui -import PySide2.QtWidgets +import qtpy.QtGui +import qtpy.QtWidgets import opencue.compiled_proto.depend_pb2 import opencue.compiled_proto.facility_pb2 @@ -52,7 +52,7 @@ import cuegui.CueJobMonitorTree import cuegui.Main import cuegui.MenuActions - +from . import test_utils _GB_TO_KB = 1024 * 1024 @@ -60,25 +60,26 @@ @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) class JobActionsTests(unittest.TestCase): def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.job_actions = cuegui.MenuActions.JobActions(self.widgetMock, mock.Mock(), None, None) def test_jobs(self): - print(cuegui.MenuActions.MenuActions(self.widgetMock, None, None, None).jobs()) + cuegui.MenuActions.MenuActions(self.widgetMock, None, None, None).jobs() def test_unmonitor(self): self.job_actions.unmonitor() self.widgetMock.actionRemoveSelectedItems.assert_called_with() - @mock.patch('PySide2.QtGui.qApp') - def test_view(self, qAppMock): + def test_view(self): + self.app.view_object = mock.Mock() job_name = 'arbitrary-name' job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name=job_name)) self.job_actions.view(rpcObjects=[job, opencue.wrappers.frame.Frame()]) - qAppMock.view_object.emit.assert_called_once_with(job) + self.app.view_object.emit.assert_called_once_with(job) @mock.patch('cuegui.DependDialog.DependDialog') def test_viewDepends(self, dependDialogMock): @@ -100,7 +101,7 @@ def test_emailArtist(self, emailDialogMock): emailDialogMock.assert_called_with(job, self.widgetMock) emailDialogMock.return_value.show.assert_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_setMinCores(self, getDoubleMock): highest_current_core_count = 20 new_core_count = 50 @@ -122,7 +123,7 @@ def test_setMinCores(self, getDoubleMock): job1.setMinCores.assert_called_with(new_core_count) job2.setMinCores.assert_called_with(new_core_count) - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_setMinCoresCanceled(self, getDoubleMock): job1 = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(min_cores=0)) job1.setMinCores = mock.Mock() @@ -135,7 +136,7 @@ def test_setMinCoresCanceled(self, getDoubleMock): job1.setMinCores.assert_not_called() job2.setMinCores.assert_not_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_setMaxCores(self, getDoubleMock): highest_current_core_count = 20 new_core_count = 50 @@ -157,7 +158,7 @@ def test_setMaxCores(self, getDoubleMock): job1.setMaxCores.assert_called_with(new_core_count) job2.setMaxCores.assert_called_with(new_core_count) - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_setMaxCoresCanceled(self, getDoubleMock): job1 = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(max_cores=0)) job1.setMaxCores = mock.Mock() @@ -170,7 +171,7 @@ def test_setMaxCoresCanceled(self, getDoubleMock): job1.setMaxCores.assert_not_called() job2.setMaxCores.assert_not_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getInt') + @mock.patch('qtpy.QtWidgets.QInputDialog.getInt') def test_setPriority(self, getIntMock): job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(priority=0)) job.setPriority = mock.Mock() @@ -181,7 +182,7 @@ def test_setPriority(self, getIntMock): job.setPriority.assert_called_with(new_priority) - @mock.patch('PySide2.QtWidgets.QInputDialog.getInt') + @mock.patch('qtpy.QtWidgets.QInputDialog.getInt') def test_setPriorityCanceled(self, getIntMock): job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(priority=0)) job.setPriority = mock.Mock() @@ -191,7 +192,7 @@ def test_setPriorityCanceled(self, getIntMock): job.setPriority.assert_not_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getInt') + @mock.patch('qtpy.QtWidgets.QInputDialog.getInt') def test_setMaxRetries(self, getIntMock): job = opencue.wrappers.job.Job() job.setMaxRetries = mock.Mock() @@ -202,7 +203,7 @@ def test_setMaxRetries(self, getIntMock): job.setMaxRetries.assert_called_with(new_retries) - @mock.patch('PySide2.QtWidgets.QInputDialog.getInt') + @mock.patch('qtpy.QtWidgets.QInputDialog.getInt') def test_setMaxRetriesCanceled(self, getIntMock): job = opencue.wrappers.job.Job() job.setMaxRetries = mock.Mock() @@ -232,6 +233,8 @@ def test_resume(self): def test_kill(self, yesNoMock): job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name='job-name')) job.kill = mock.Mock() + job.getWhatDependsOnThis = mock.Mock() + job.getWhatDependsOnThis.return_value = [] self.job_actions.kill(rpcObjects=[job]) @@ -353,8 +356,8 @@ def test_dependWizard(self, dependWizardMock): dependWizardMock.assert_called_with(self.widgetMock, jobs) - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_reorder(self, getTextMock, getItemMock): original_range = '1-10' new_order = 'REVERSE' @@ -370,8 +373,8 @@ def test_reorder(self, getTextMock, getItemMock): job.reorderFrames.assert_called_with(original_range, opencue.compiled_proto.job_pb2.REVERSE) - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_reorderCanceled(self, getTextMock, getItemMock): original_range = '1-10' job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name='job-name')) @@ -394,8 +397,8 @@ def test_reorderCanceled(self, getTextMock, getItemMock): job.reorderFrames.assert_not_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getInt') - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getInt') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_stagger(self, getTextMock, getIntMock): original_range = '1-10' new_step = 28 @@ -411,8 +414,8 @@ def test_stagger(self, getTextMock, getIntMock): job.staggerFrames.assert_called_with(original_range, new_step) - @mock.patch('PySide2.QtWidgets.QInputDialog.getInt') - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getInt') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_staggerCanceled(self, getTextMock, getIntMock): original_range = '1-10' job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name='job-name')) @@ -443,34 +446,55 @@ def test_unbook(self, unbookDialogMock): unbookDialogMock.assert_called_with(jobs, self.widgetMock) unbookDialogMock.return_value.exec_.assert_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('cuegui.CueJobMonitorTree.MoveDialog.move_items') @mock.patch('opencue.api.findShow') - def test_sendToGroup(self, findShowMock, getItemMock): + def test_sendToGroup(self, findShowMock, move_itemsMock): + + move_dialogMock = mock.Mock() + + move_dialogMock.open() group_name = 'arbitrary-group-name' job = opencue.wrappers.job.Job( opencue.compiled_proto.job_pb2.Job( name='arbitrary-job-name', show='arbitrary-show-name')) - show = opencue.wrappers.show.Show() + body_content = cuegui.CueJobMonitorTree.Body(group_names=[], + group_ids=[], + job_names=[job.name()], + job_ids=[job]) + group = opencue.wrappers.group.Group(opencue.compiled_proto.job_pb2.Group(name=group_name)) group.reparentJobs = mock.Mock() + + show = opencue.wrappers.show.Show() findShowMock.return_value = show show.getGroups = mock.Mock(return_value=[group]) - getItemMock.return_value = (group_name, True) - self.job_actions.sendToGroup(rpcObjects=[job]) + move_dialogMock.dst_groups = {str(group_name): group} + move_itemsMock.return_value = move_dialogMock.dst_groups[str(group_name)].reparentJobs( + body_content.job_ids) + move_dialogMock.accept() - group.reparentJobs.assert_called_with([job]) + group.reparentJobs.assert_called_with(body_content.job_ids) - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('cuegui.CueJobMonitorTree.MoveDialog.move_items') @mock.patch('opencue.api.findShow') - def test_sendToGroupCanceled(self, findShowMock, getItemMock): - job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name='job-name')) - group = opencue.wrappers.group.Group() + def test_sendToGroupCanceled(self, findShowMock, move_itemsMock): + + move_dialogMock = mock.Mock() + + move_dialogMock.open() + group_name = 'arbitrary-group-name' + job = opencue.wrappers.job.Job( + opencue.compiled_proto.job_pb2.Job( + name='arbitrary-job-name', show='arbitrary-show-name')) + group = opencue.wrappers.group.Group(opencue.compiled_proto.job_pb2.Group(name=group_name)) group.reparentJobs = mock.Mock() - findShowMock.getGroups.return_value = [] - getItemMock.return_value = (None, False) - self.job_actions.sendToGroup(rpcObjects=[job]) + show = opencue.wrappers.show.Show() + findShowMock.return_value = show + show.getGroups = mock.Mock(return_value=[group]) + move_itemsMock.return_value = (None, False) + move_dialogMock.reject() group.reparentJobs.assert_not_called() @@ -483,7 +507,7 @@ def test_useLocalCores(self, localBookingDialogMock): localBookingDialogMock.assert_called_with(job, self.widgetMock) localBookingDialogMock.return_value.exec_.assert_called() - @mock.patch('PySide2.QtWidgets.QApplication.clipboard') + @mock.patch('qtpy.QtWidgets.QApplication.clipboard') def test_copyLogFileDir(self, clipboardMock): logDir1 = '/some/random/dir' logDir2 = '/a/different/random/dir' @@ -522,6 +546,7 @@ class LayerActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.job = mock.create_autospec(opencue.wrappers.job.Job()) self.layer_actions = cuegui.MenuActions.LayerActions( @@ -546,7 +571,7 @@ def test_viewDepends(self, dependDialogMock): dependDialogMock.return_value.show.assert_called() @mock.patch.object(opencue.wrappers.layer.Layer, 'setMinCores', autospec=True) - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_setMinCores(self, getDoubleMock, setMinCoresMock): highest_current_core_count = 20 new_core_count = 50 @@ -567,7 +592,7 @@ def test_setMinCores(self, getDoubleMock, setMinCoresMock): mock.call(layer1, new_core_count), mock.call(layer2, new_core_count)]) @mock.patch.object(opencue.wrappers.layer.Layer, 'setMinCores') - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_setMinCoresCanceled(self, getDoubleMock, setMinCoresMock): layer1 = opencue.wrappers.layer.Layer( opencue.compiled_proto.job_pb2.Layer(min_cores=0)) @@ -580,7 +605,7 @@ def test_setMinCoresCanceled(self, getDoubleMock, setMinCoresMock): setMinCoresMock.assert_not_called() @mock.patch.object(opencue.wrappers.layer.Layer, 'setMinMemory', autospec=True) - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_setMinMemoryKb(self, getDoubleMock, setMinMemoryMock): highest_current_mem_limit_gb = 20 new_mem_limit_gb = 50 @@ -605,7 +630,7 @@ def test_setMinMemoryKb(self, getDoubleMock, setMinMemoryMock): ]) @mock.patch.object(opencue.wrappers.layer.Layer, 'setMinMemory') - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_setMinMemoryKbCanceled(self, getDoubleMock, setMinMemoryMock): layer1 = opencue.wrappers.layer.Layer(opencue.compiled_proto.job_pb2.Layer(min_memory=0)) layer2 = opencue.wrappers.layer.Layer(opencue.compiled_proto.job_pb2.Layer(min_memory=0)) @@ -733,8 +758,8 @@ def test_dependWizard(self, dependWizardMock): dependWizardMock.assert_called_with(self.widgetMock, [self.job], layers=layers) @mock.patch.object(opencue.wrappers.layer.Layer, 'reorderFrames', autospec=True) - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_reorder(self, getTextMock, getItemMock, reorderFramesMock): original_range = '1-10' new_order = 'REVERSE' @@ -750,8 +775,8 @@ def test_reorder(self, getTextMock, getItemMock, reorderFramesMock): layer, original_range, opencue.compiled_proto.job_pb2.REVERSE) @mock.patch.object(opencue.wrappers.layer.Layer, 'staggerFrames', autospec=True) - @mock.patch('PySide2.QtWidgets.QInputDialog.getInt') - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getInt') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_stagger(self, getTextMock, getIntMock, staggerFramesMock): original_range = '1-10' new_step = 28 @@ -770,6 +795,7 @@ class FrameActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.job = mock.create_autospec(opencue.wrappers.job.Job()) self.frame_actions = cuegui.MenuActions.FrameActions( @@ -847,8 +873,9 @@ def test_xdiff3(self, popupFrameXdiffMock): popupFrameXdiffMock.assert_called_with(self.job, frame1, frame2, frame3) @mock.patch('opencue.api.findHost') - @mock.patch('PySide2.QtGui.qApp') - def test_viewHost(self, qAppMock, findHostMock): + def test_viewHost(self, findHostMock): + self.app.view_hosts = mock.Mock() + self.app.single_click = mock.Mock() host_name = 'arbitrary-host-name' host = opencue.wrappers.host.Host( opencue.compiled_proto.host_pb2.Host(id='arbitrary-id', name=host_name)) @@ -858,8 +885,8 @@ def test_viewHost(self, qAppMock, findHostMock): self.frame_actions.viewHost(rpcObjects=[frame]) - qAppMock.view_hosts.emit.assert_called_with([host_name]) - qAppMock.single_click.emit.assert_called_with(host) + self.app.view_hosts.emit.assert_called_with([host_name]) + self.app.single_click.emit.assert_called_with(host) def test_getWhatThisDependsOn(self): frame = opencue.wrappers.frame.Frame() @@ -971,7 +998,7 @@ def test_markdone(self, yesNoMock): self.job.markdoneFrames.assert_called_with(name=[frame_name]) @mock.patch.object(opencue.wrappers.layer.Layer, 'reorderFrames', autospec=True) - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') def test_reorder(self, getItemMock, reorderFramesMock): new_order = 'REVERSE' getItemMock.return_value = (new_order, True) @@ -988,7 +1015,7 @@ def test_reorder(self, getItemMock, reorderFramesMock): reorderFramesMock.assert_called_with( layer, str(frame_num), opencue.compiled_proto.job_pb2.REVERSE) - @mock.patch('PySide2.QtWidgets.QApplication.clipboard') + @mock.patch('qtpy.QtWidgets.QApplication.clipboard') @mock.patch('cuegui.Utils.getFrameLogFile') def test_copyLogFileName(self, getFrameLogFileMock, clipboardMock): frame_log_path = '/some/path/to/job/logs/job-name.frame-name.rqlog' @@ -1026,6 +1053,7 @@ class ShowActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.show_actions = cuegui.MenuActions.ShowActions( self.widgetMock, mock.Mock(), None, None) @@ -1062,6 +1090,7 @@ class GroupActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.group_actions = cuegui.MenuActions.GroupActions( self.widgetMock, mock.Mock(), None, None) @@ -1099,25 +1128,26 @@ class SubscriptionActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.subscription_actions = cuegui.MenuActions.SubscriptionActions( self.widgetMock, mock.Mock(), None, None) - @mock.patch('PySide2.QtWidgets.QMessageBox') - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QMessageBox') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_editSize(self, getDoubleMock, qMessageBoxMock): sub = opencue.wrappers.subscription.Subscription( opencue.compiled_proto.subscription_pb2.Subscription(size=382)) sub.setSize = mock.MagicMock() newSize = 8479 getDoubleMock.return_value = (newSize, True) - qMessageBoxMock.return_value.exec_.return_value = PySide2.QtWidgets.QMessageBox.Yes + qMessageBoxMock.return_value.exec_.return_value = qtpy.QtWidgets.QMessageBox.Yes self.subscription_actions.editSize(rpcObjects=[sub]) sub.setSize.assert_called_with(newSize*100.0) - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_editBurst(self, getDoubleMock): sub = opencue.wrappers.subscription.Subscription( opencue.compiled_proto.subscription_pb2.Subscription(burst=922)) @@ -1152,6 +1182,7 @@ class HostActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.host_actions = cuegui.MenuActions.HostActions( self.widgetMock, mock.Mock(), None, None) @@ -1165,15 +1196,15 @@ def test_viewComments(self, commentListDialogMock): commentListDialogMock.assert_called_with(host, mock.ANY) commentListDialogMock.return_value.show.assert_called() - @mock.patch('PySide2.QtGui.qApp') - def test_viewProc(self, qAppMock): + def test_viewProc(self): + self.app.view_procs = mock.Mock() hostName = 'arbitrary-name' host = opencue.wrappers.host.Host( opencue.compiled_proto.host_pb2.Host(id='arbitrary-id', name=hostName)) self.host_actions.viewProc(rpcObjects=[opencue.wrappers.layer.Layer, host, host]) - qAppMock.view_procs.emit.assert_called_with([hostName]) + self.app.view_procs.emit.assert_called_with([hostName]) def test_lock(self): host = opencue.wrappers.host.Host( @@ -1218,7 +1249,7 @@ def test_rebootWhenIdle(self): host.rebootWhenIdle.assert_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_addTags(self, getTextMock): host = opencue.wrappers.host.Host( opencue.compiled_proto.host_pb2.Host(id='arbitrary-id')) @@ -1230,7 +1261,7 @@ def test_addTags(self, getTextMock): host.addTags.assert_called_with(['firstTag', 'anotherTag', 'oneMoreTag']) - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_removeTags(self, getTextMock): host = opencue.wrappers.host.Host( opencue.compiled_proto.host_pb2.Host( @@ -1242,8 +1273,8 @@ def test_removeTags(self, getTextMock): host.removeTags.assert_called_with(['firstTag', 'anotherTag', 'oneMoreTag']) - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') def test_renameTag(self, getItemMock, getTextMock): host = opencue.wrappers.host.Host( opencue.compiled_proto.host_pb2.Host(id='arbitrary-id')) @@ -1257,7 +1288,7 @@ def test_renameTag(self, getItemMock, getTextMock): host.renameTag.assert_called_with(oldTagName, newTagName) - @mock.patch('PySide2.QtWidgets.QInputDialog.getItem') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') @mock.patch('opencue.api.getAllocations') def test_changeAllocation(self, getAllocationsMock, getItemMock): host = opencue.wrappers.host.Host( @@ -1314,13 +1345,14 @@ class ProcActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.proc_actions = cuegui.MenuActions.ProcActions( self.widgetMock, mock.Mock(), None, None) - @mock.patch('PySide2.QtGui.qApp') @mock.patch('opencue.api.findJob') - def test_view(self, findJobMock, qAppMock): + def test_view(self, findJobMock): + self.app.view_object = mock.Mock() jobName = 'arbitraryJobName' job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name=jobName)) proc = opencue.wrappers.proc.Proc(opencue.compiled_proto.host_pb2.Proc(job_name=jobName)) @@ -1328,7 +1360,7 @@ def test_view(self, findJobMock, qAppMock): self.proc_actions.view(rpcObjects=[opencue.wrappers.layer.Layer, proc]) - qAppMock.view_object.emit.assert_called_once_with(job) + self.app.view_object.emit.assert_called_once_with(job) @mock.patch('cuegui.Utils.questionBoxYesNo', new=mock.Mock(return_value=True)) def test_kill(self): @@ -1363,6 +1395,7 @@ class DependenciesActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.dep_actions = cuegui.MenuActions.DependenciesActions( self.widgetMock, mock.Mock(), None, None) @@ -1389,11 +1422,12 @@ class FilterActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.filter_actions = cuegui.MenuActions.FilterActions( self.widgetMock, mock.Mock(), None, None) - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_rename(self, getTextMock): filter_wrapper = opencue.wrappers.filter.Filter(opencue.compiled_proto.filter_pb2.Filter()) filter_wrapper.setName = mock.MagicMock() @@ -1445,7 +1479,7 @@ def test_orderLast(self): filter_wrapper.orderLast.assert_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getInt') + @mock.patch('qtpy.QtWidgets.QInputDialog.getInt') def test_setOrder(self, getTextMock): filter_wrapper = opencue.wrappers.filter.Filter(opencue.compiled_proto.filter_pb2.Filter()) filter_wrapper.setOrder = mock.MagicMock() @@ -1462,6 +1496,7 @@ class MatcherActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.matcher_actions = cuegui.MenuActions.MatcherActions( self.widgetMock, mock.Mock(), None, None) @@ -1475,7 +1510,7 @@ def test_delete(self): matcher.delete.assert_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_setValue(self, getTextMock): matcher = opencue.wrappers.filter.Matcher(opencue.compiled_proto.filter_pb2.Matcher()) matcher.setValue = mock.MagicMock() @@ -1492,6 +1527,7 @@ class ActionActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.action_actions = cuegui.MenuActions.ActionActions( self.widgetMock, mock.Mock(), None, None) @@ -1511,11 +1547,12 @@ class TaskActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.task_actions = cuegui.MenuActions.TaskActions( self.widgetMock, mock.Mock(), None, None) - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_setMinCores(self, getDoubleMock): task = opencue.wrappers.task.Task(opencue.compiled_proto.task_pb2.Task(min_cores=10)) task.setMinCores = mock.MagicMock() @@ -1549,12 +1586,13 @@ class LimitActionsTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.limit_actions = cuegui.MenuActions.LimitActions( self.widgetMock, mock.Mock(), None, None) @mock.patch('opencue.api.createLimit') - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_create(self, getTextMock, createLimitMock): limitName = 'newLimitName' getTextMock.return_value = ('%s \t ' % limitName, True) @@ -1572,7 +1610,7 @@ def test_delete(self): limit.delete.assert_called() - @mock.patch('PySide2.QtWidgets.QInputDialog.getDouble') + @mock.patch('qtpy.QtWidgets.QInputDialog.getDouble') def test_editMaxValue(self, getDoubleMock): limit = opencue.wrappers.limit.Limit(opencue.compiled_proto.limit_pb2.Limit(max_value=920)) limit.setMaxValue = mock.MagicMock() @@ -1584,7 +1622,7 @@ def test_editMaxValue(self, getDoubleMock): limit.setMaxValue.assert_called_with(newMaxValue) - @mock.patch('PySide2.QtWidgets.QInputDialog.getText') + @mock.patch('qtpy.QtWidgets.QInputDialog.getText') def test_rename(self, getTextMock): limit = opencue.wrappers.limit.Limit(opencue.compiled_proto.limit_pb2.Limit()) limit.rename = mock.MagicMock() @@ -1599,6 +1637,7 @@ def test_rename(self, getTextMock): @mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) class MenuActionsTests(unittest.TestCase): def setUp(self): + self.app = test_utils.createApplication() self.widgetMock = mock.Mock() self.args = [self.widgetMock, lambda: None, lambda: None, lambda: None] self.menuActions = cuegui.MenuActions.MenuActions(*self.args) diff --git a/cuegui/tests/Redirect_tests.py b/cuegui/tests/Redirect_tests.py index d1e7ce5dd..ecfffcc48 100644 --- a/cuegui/tests/Redirect_tests.py +++ b/cuegui/tests/Redirect_tests.py @@ -19,8 +19,8 @@ import unittest import mock -import PySide2.QtCore -import PySide2.QtGui +import qtpy.QtCore +import qtpy.QtGui import opencue.compiled_proto.show_pb2 import opencue.wrappers.show @@ -36,8 +36,8 @@ class RedirectTests(unittest.TestCase): @mock.patch('opencue.cuebot.Cuebot.getStub') def setUp(self, getStubMock): - test_utils.createApplication() - PySide2.QtGui.qApp.settings = PySide2.QtCore.QSettings() + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() cuegui.Style.init() getStubMock.return_value.GetActiveShows.return_value = \ diff --git a/cuegui/tests/UnbookDialog_tests.py b/cuegui/tests/UnbookDialog_tests.py new file mode 100644 index 000000000..bc5c348d8 --- /dev/null +++ b/cuegui/tests/UnbookDialog_tests.py @@ -0,0 +1,288 @@ +# Copyright (c) OpenCue Project Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +"""Tests for cuegui.UnbookDialog.""" + + +import unittest + +import mock + +import qtpy.QtCore +import qtpy.QtGui + +import opencue.compiled_proto.criterion_pb2 +import opencue.compiled_proto.host_pb2 +import opencue.compiled_proto.job_pb2 +import opencue.compiled_proto.show_pb2 +import opencue.compiled_proto.subscription_pb2 +import opencue.wrappers.group +import opencue.wrappers.job +import opencue.wrappers.proc +import opencue.wrappers.show +import opencue.wrappers.subscription + +import cuegui.Style +import cuegui.UnbookDialog + +from . import test_utils + + +@mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) +class UnbookDialogTests(unittest.TestCase): + + @mock.patch('opencue.api.findShow') + @mock.patch('opencue.cuebot.Cuebot.getStub') + def setUp(self, get_stub_mock, find_show_mock): + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() + cuegui.Style.init() + + show_name = 'showname' + self.job_names = [ + '%s-shotname-username_job1' % show_name, '%s-shotname-username_job2' % show_name] + self.jobs = [ + opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name=self.job_names[0])), + opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name=self.job_names[1]))] + show = opencue.wrappers.show.Show(opencue.compiled_proto.show_pb2.Show(name=show_name)) + self.tag_names = ['general', 'desktop'] + subscriptions = [ + opencue.wrappers.subscription.Subscription( + opencue.compiled_proto.subscription_pb2.Subscription( + name='local.%s.%s' % (self.tag_names[0], show_name))), + opencue.wrappers.subscription.Subscription( + opencue.compiled_proto.subscription_pb2.Subscription( + name='local.%s.%s' % (self.tag_names[1], show_name))), + ] + show.getSubscriptions = mock.Mock() + show.getSubscriptions.return_value = subscriptions + find_show_mock.return_value = show + + self.dialog = cuegui.UnbookDialog.UnbookDialog(self.jobs) + + def test__should_show_all_jobs_and_subscriptions(self): + self.dialog.open() + + jobs_shown = self.dialog._UnbookDialog__jobList.toPlainText().split() + self.assertEqual(self.job_names, jobs_shown) + subscription_matrix = self.dialog._UnbookDialog__matrix + subscriptions_shown = [ + checkbox.text() + for checkbox in subscription_matrix._CheckBoxSelectionMatrix__checkBoxes] + subscriptions_checked = subscription_matrix.checkedOptions() + self.assertEqual(self.tag_names, subscriptions_shown) + self.assertEqual(self.tag_names, subscriptions_checked) + + @mock.patch('qtpy.QtWidgets.QMessageBox', new=mock.Mock()) + @mock.patch('opencue.api.getProcs') + def test__should_unbook_procs(self, get_procs_mock): + num_procs = 17 + min_mem = 56 + max_mem = 143 + expected_proc_search = opencue.search.ProcSearch( + allocs=self.tag_names, jobs=self.job_names, maxResults=[num_procs], + memoryRange=[opencue.compiled_proto.criterion_pb2.InRangeIntegerSearchCriterion( + min=min_mem*1024, max=max_mem*1024)]) + returned_proc1 = opencue.wrappers.proc.Proc() + returned_proc1.unbook = mock.Mock() + returned_proc2 = opencue.wrappers.proc.Proc() + returned_proc2.unbook = mock.Mock() + get_procs_mock.return_value = [returned_proc1, returned_proc2] + + self.dialog.open() + self.dialog._UnbookDialog__amount.setValue(num_procs) + self.dialog._UnbookDialog__memoryRangeBox._RangeBox__group.setChecked(True) + self.dialog._UnbookDialog__memoryRangeBox._RangeBox__range.setChecked(True) + self.dialog._UnbookDialog__memoryRangeBox._RangeBox__min.setValue(min_mem) + self.dialog._UnbookDialog__memoryRangeBox._RangeBox__max.setValue(max_mem) + self.dialog.accept() + + get_procs_mock.assert_called_with(**expected_proc_search.options) + returned_proc1.unbook.assert_called() + returned_proc2.unbook.assert_called() + + @mock.patch('cuegui.UnbookDialog.KillConfirmationDialog') + def test__should_show_kill_confirmation_dialog(self, kill_dialog_mock): + num_procs = 2 + min_runtime = 90 + max_runtime = 105 + expected_proc_search = opencue.search.ProcSearch( + allocs=self.tag_names, jobs=self.job_names, maxResults=[num_procs], + durationRange=[opencue.compiled_proto.criterion_pb2.InRangeIntegerSearchCriterion( + min=min_runtime*60, max=max_runtime*60)]) + kill_dialog_mock.return_value.result.return_value = True + + self.dialog.open() + self.dialog._UnbookDialog__amount.setValue(num_procs) + self.dialog._UnbookDialog__kill.setChecked(True) + self.dialog._UnbookDialog__runtimeRangeBox._RangeBox__group.setChecked(True) + self.dialog._UnbookDialog__runtimeRangeBox._RangeBox__range.setChecked(True) + self.dialog._UnbookDialog__runtimeRangeBox._RangeBox__min.setValue(min_runtime) + self.dialog._UnbookDialog__runtimeRangeBox._RangeBox__max.setValue(max_runtime) + self.dialog.accept() + + kill_dialog_mock.assert_called_with(expected_proc_search, mock.ANY) + + @mock.patch('qtpy.QtWidgets.QMessageBox', new=mock.Mock()) + @mock.patch('opencue.api.getProcs') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') + @mock.patch('opencue.api.getActiveShows') + def test__should_redirect_proc_to_group( + self, get_active_shows_mock, get_item_mock, get_procs_mock): + num_procs = 50 + other_show_name = 'some-other-show' + group_name = 'group-to-redirect-to' + show = opencue.wrappers.show.Show( + opencue.compiled_proto.show_pb2.Show(name=other_show_name)) + group = opencue.wrappers.group.Group(opencue.compiled_proto.job_pb2.Group(name=group_name)) + show.getGroups = mock.Mock() + show.getGroups.return_value = [group] + get_active_shows_mock.return_value = [show] + get_item_mock.side_effect = [(other_show_name, True), ('Group', True), (group_name, True)] + expected_proc_search = opencue.search.ProcSearch( + allocs=self.tag_names, jobs=self.job_names, maxResults=[num_procs]) + proc_to_redirect = opencue.wrappers.proc.Proc() + proc_to_redirect.redirectToGroup = mock.Mock() + get_procs_mock.return_value = [proc_to_redirect] + + self.dialog.open() + self.dialog._UnbookDialog__amount.setValue(num_procs) + self.dialog._UnbookDialog__redirect.setChecked(True) + self.dialog.accept() + + get_procs_mock.assert_called_with(**expected_proc_search.options) + proc_to_redirect.redirectToGroup.assert_called_with(group, False) + + @mock.patch('qtpy.QtWidgets.QMessageBox', new=mock.Mock()) + @mock.patch('opencue.api.getProcs') + @mock.patch('cuegui.UnbookDialog.SelectItemsWithSearchDialog') + @mock.patch('opencue.api.getJobs') + @mock.patch('qtpy.QtWidgets.QInputDialog.getItem') + @mock.patch('opencue.api.getActiveShows') + def test__should_redirect_proc_to_job( + self, get_active_shows_mock, get_item_mock, get_jobs_mock, select_job_mock, + get_procs_mock): + num_procs = 50 + other_show_name = 'some-other-show' + job_name = 'job-to-redirect-to' + show = opencue.wrappers.show.Show( + opencue.compiled_proto.show_pb2.Show(name=other_show_name)) + job = opencue.wrappers.job.Job(opencue.compiled_proto.job_pb2.Job(name=job_name)) + get_active_shows_mock.return_value = [show] + get_item_mock.side_effect = [(other_show_name, True), ('Job', True)] + get_jobs_mock.return_value = [job] + select_job_mock.return_value.selected.return_value = [job_name] + expected_proc_search = opencue.search.ProcSearch( + allocs=self.tag_names, jobs=self.job_names, maxResults=[num_procs]) + proc_to_redirect = opencue.wrappers.proc.Proc() + proc_to_redirect.redirectToJob = mock.Mock() + get_procs_mock.return_value = [proc_to_redirect] + + self.dialog.open() + self.dialog._UnbookDialog__amount.setValue(num_procs) + self.dialog._UnbookDialog__redirect.setChecked(True) + self.dialog.accept() + + get_procs_mock.assert_called_with(**expected_proc_search.options) + proc_to_redirect.redirectToJob.assert_called_with(job, False) + + +class SelectItemsWithSearchDialogTests(unittest.TestCase): + + def setUp(self): + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() + cuegui.Style.init() + + def test__should_display_all_items(self): + items_to_be_shown = ['item1', 'item2', 'item3'] + + dialog = cuegui.UnbookDialog.SelectItemsWithSearchDialog(None, 'header', items_to_be_shown) + dialog.open() + + item_list = dialog._SelectItemsWithSearchDialog__widget._SelectItemsWithSearchWidget__list + all_items_shown = [item_list.item(x).text() for x in range(item_list.count())] + self.assertEqual(items_to_be_shown, all_items_shown) + + def test__should_filter_items(self): + initial_item_list = ['item1', 'itemSubstr2', 'item3', 'itemsubstr4'] + + dialog = cuegui.UnbookDialog.SelectItemsWithSearchDialog(None, 'header', initial_item_list) + dialog.open() + dialog._SelectItemsWithSearchDialog__widget._SelectItemsWithSearchWidget__filter.setText( + 'substr') + + item_list = dialog._SelectItemsWithSearchDialog__widget._SelectItemsWithSearchWidget__list + items_shown = [item_list.item(x).text() for x in range(item_list.count())] + self.assertEqual(['itemSubstr2', 'itemsubstr4'], items_shown) + + def test__should_return_selected_items(self): + initial_item_list = ['item1', 'item2', 'item3', 'item4'] + + dialog = cuegui.UnbookDialog.SelectItemsWithSearchDialog(None, 'header', initial_item_list) + dialog.open() + item_list = dialog._SelectItemsWithSearchDialog__widget._SelectItemsWithSearchWidget__list + item_list.item(1).setSelected(True) + item_list.item(2).setSelected(True) + dialog.accept() + + self.assertEqual(['item2', 'item3'], dialog.selected()) + + +@mock.patch('opencue.cuebot.Cuebot.getStub', new=mock.Mock()) +class KillConfirmationDialogTests(unittest.TestCase): + + def setUp(self): + app = test_utils.createApplication() + app.settings = qtpy.QtCore.QSettings() + cuegui.Style.init() + + @mock.patch('qtpy.QtWidgets.QMessageBox.information', new=mock.Mock()) + @mock.patch('opencue.api.getProcs') + def test__should_kill_procs(self, get_procs_mock): + proc_search = opencue.search.ProcSearch( + allocs=['tag1', 'tag2'], jobs=['someJob', 'anotherJob'], maxResults=[57]) + proc1 = opencue.wrappers.proc.Proc( + opencue.compiled_proto.host_pb2.Proc(job_name='someJob', frame_name='0002')) + proc1.kill = mock.Mock() + proc2 = opencue.wrappers.proc.Proc( + opencue.compiled_proto.host_pb2.Proc(job_name='anotherJob', frame_name='2847')) + proc2.kill = mock.Mock() + get_procs_mock.return_value = [proc1, proc2] + + dialog = cuegui.UnbookDialog.KillConfirmationDialog(proc_search) + dialog.accept() + + proc1.kill.assert_called() + proc2.kill.assert_called() + + @mock.patch('qtpy.QtWidgets.QMessageBox.information', new=mock.Mock()) + @mock.patch('opencue.api.getProcs') + def test__should_cancel_kill(self, get_procs_mock): + proc_search = opencue.search.ProcSearch( + allocs=['tag1', 'tag2'], jobs=['someJob', 'anotherJob'], maxResults=[57]) + proc1 = opencue.wrappers.proc.Proc( + opencue.compiled_proto.host_pb2.Proc(job_name='someJob', frame_name='0002')) + proc1.kill = mock.Mock() + proc2 = opencue.wrappers.proc.Proc( + opencue.compiled_proto.host_pb2.Proc(job_name='anotherJob', frame_name='2847')) + proc2.kill = mock.Mock() + get_procs_mock.return_value = [proc1, proc2] + + dialog = cuegui.UnbookDialog.KillConfirmationDialog(proc_search) + dialog.reject() + + proc1.kill.assert_not_called() + proc2.kill.assert_not_called() diff --git a/cuegui/tests/Utils_tests.py b/cuegui/tests/Utils_tests.py index dddf46423..e2f4a69b0 100644 --- a/cuegui/tests/Utils_tests.py +++ b/cuegui/tests/Utils_tests.py @@ -69,6 +69,18 @@ def test_shouldSwallowExceptionAndReturnNone(self): self.assertIsNone(cuegui.Utils.findJob(jobName)) + def test_shouldReturnResourceLimitsFromYaml(self): + result = cuegui.Utils.getResourceConfig() + + self.assertEqual({ + 'max_cores': 32, + 'max_gpu_memory': 128, + 'max_gpus': 8, + 'max_memory': 128, + 'max_proc_hour_cutoff': 30, + 'redirect_wasted_cores_threshold': 100, + }, result) + if __name__ == '__main__': unittest.main() diff --git a/cuegui/tests/plugins/LogViewPlugin_tests.py b/cuegui/tests/plugins/LogViewPlugin_tests.py index 9ceb4a5b6..62752c2f3 100644 --- a/cuegui/tests/plugins/LogViewPlugin_tests.py +++ b/cuegui/tests/plugins/LogViewPlugin_tests.py @@ -22,10 +22,10 @@ import mock import pyfakefs.fake_filesystem_unittest -import PySide2.QtCore -import PySide2.QtGui -import PySide2.QtTest -import PySide2.QtWidgets +import qtpy.QtCore +import qtpy.QtGui +import qtpy.QtTest +import qtpy.QtWidgets import cuegui.Main import cuegui.plugins.LogViewPlugin @@ -64,28 +64,29 @@ def setUp(self): self.fs.create_file(self.logPath2, contents=_LOG_TEXT_2) test_utils.createApplication() - PySide2.QtGui.qApp.settings = PySide2.QtCore.QSettings() + cuegui.app().settings = qtpy.QtCore.QSettings() cuegui.Style.init() - self.parentWidget = PySide2.QtWidgets.QMainWindow() + self.parentWidget = qtpy.QtWidgets.QMainWindow() self.logViewPlugin = cuegui.plugins.LogViewPlugin.LogViewPlugin(self.parentWidget) def test_shouldDisplayFirstLogFile(self): - PySide2.QtGui.qApp.display_log_file_content.emit([self.logPath1, self.logPath2]) - + cuegui.app().display_log_file_content.emit([self.logPath1, self.logPath2]) + self.logViewPlugin.logview_widget._receive_log_results(_LOG_TEXT_1, 0) self.assertEqual(_LOG_TEXT_1, self.logViewPlugin.logview_widget._content_box.toPlainText()) def test_shouldUpdateLogFile(self): - PySide2.QtGui.qApp.display_log_file_content.emit([self.logPath1, self.logPath2]) + cuegui.app().display_log_file_content.emit([self.logPath1, self.logPath2]) + self.logViewPlugin.logview_widget._receive_log_results(_LOG_TEXT_1, 0) new_contents = _LOG_TEXT_1 + '\nanother line at the end' self.log1.set_contents(new_contents) - PySide2.QtGui.qApp.display_log_file_content.emit([self.logPath1, self.logPath2]) - + cuegui.app().display_log_file_content.emit([self.logPath1, self.logPath2]) + self.logViewPlugin.logview_widget._receive_log_results(new_contents, 0) self.assertEqual(new_contents, self.logViewPlugin.logview_widget._content_box.toPlainText()) def test_shouldHighlightAllSearchResults(self): - PySide2.QtGui.qApp.display_log_file_content.emit([self.logPath1, self.logPath2]) + self.logViewPlugin.logview_widget._receive_log_results(_LOG_TEXT_1, 0) self.logViewPlugin.logview_widget._case_stv_checkbox.setCheckState( - PySide2.QtCore.Qt.CheckState.Unchecked) + qtpy.QtCore.Qt.CheckState.Unchecked) self.logViewPlugin.logview_widget._search_box.setText('lorem') self.logViewPlugin.logview_widget._search_button.click() @@ -100,9 +101,9 @@ def test_shouldHighlightAllSearchResults(self): self.logViewPlugin.logview_widget._content_box, matches[1][0], matches[1][1])) def test_shouldMoveCursorToSecondSearchResult(self): - PySide2.QtGui.qApp.display_log_file_content.emit([self.logPath1, self.logPath2]) + self.logViewPlugin.logview_widget._receive_log_results(_LOG_TEXT_1, 0) self.logViewPlugin.logview_widget._case_stv_checkbox.setCheckState( - PySide2.QtCore.Qt.CheckState.Unchecked) + qtpy.QtCore.Qt.CheckState.Unchecked) self.logViewPlugin.logview_widget._search_box.setText('lorem') self.logViewPlugin.logview_widget._search_button.click() @@ -114,9 +115,9 @@ def test_shouldMoveCursorToSecondSearchResult(self): self.assertEqual(132, self.logViewPlugin.logview_widget._cursor.position()) def test_shouldMoveCursorLastSearchResult(self): - PySide2.QtGui.qApp.display_log_file_content.emit([self.logPath1, self.logPath2]) + self.logViewPlugin.logview_widget._receive_log_results(_LOG_TEXT_1, 0) self.logViewPlugin.logview_widget._case_stv_checkbox.setCheckState( - PySide2.QtCore.Qt.CheckState.Unchecked) + qtpy.QtCore.Qt.CheckState.Unchecked) self.logViewPlugin.logview_widget._search_box.setText('lorem') self.logViewPlugin.logview_widget._search_button.click() @@ -128,10 +129,9 @@ def test_shouldMoveCursorLastSearchResult(self): self.assertEqual(132, self.logViewPlugin.logview_widget._cursor.position()) def test_shouldPerformCaseInsensitiveSearch(self): - PySide2.QtGui.qApp.display_log_file_content.emit([self.logPath1, self.logPath2]) + self.logViewPlugin.logview_widget._receive_log_results(_LOG_TEXT_1, 0) self.logViewPlugin.logview_widget._case_stv_checkbox.setCheckState( - PySide2.QtCore.Qt.CheckState.Checked) - + qtpy.QtCore.Qt.CheckState.Checked) self.logViewPlugin.logview_widget._search_box.setText('lorem') self.logViewPlugin.logview_widget._search_button.click() matches = self.logViewPlugin.logview_widget._matches @@ -143,12 +143,12 @@ def test_shouldPerformCaseInsensitiveSearch(self): @staticmethod def __isHighlighted(textBox, startPosition, selectionLength): - cursor = textBox.cursorForPosition(PySide2.QtCore.QPoint(0, 0)) + cursor = textBox.cursorForPosition(qtpy.QtCore.QPoint(0, 0)) cursor.setPosition(startPosition) - cursor.movePosition(PySide2.QtGui.QTextCursor.Right, - PySide2.QtGui.QTextCursor.KeepAnchor, + cursor.movePosition(qtpy.QtGui.QTextCursor.Right, + qtpy.QtGui.QTextCursor.KeepAnchor, selectionLength) - return cursor.charFormat().background() == PySide2.QtCore.Qt.red + return cursor.charFormat().background() == qtpy.QtCore.Qt.red if __name__ == '__main__': diff --git a/cuegui/tests/test_utils.py b/cuegui/tests/test_utils.py index 81f22f6cc..dd07840d4 100644 --- a/cuegui/tests/test_utils.py +++ b/cuegui/tests/test_utils.py @@ -16,14 +16,8 @@ """Common utility functions for CueGUI test code.""" -import cuegui.Main +import cuegui -__QAPPLICATION_SINGLETON = None - - -# pylint: disable=global-statement def createApplication(): - global __QAPPLICATION_SINGLETON - if __QAPPLICATION_SINGLETON is None: - __QAPPLICATION_SINGLETON = cuegui.Main.CueGuiApplication() + return cuegui.create_app([]) diff --git a/cuesubmit/Dockerfile b/cuesubmit/Dockerfile index 252936fa9..6f97d671f 100644 --- a/cuesubmit/Dockerfile +++ b/cuesubmit/Dockerfile @@ -1,10 +1,4 @@ -# ----------------- -# BUILD -# ----------------- -FROM centos:7 as build - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "CueSubmit build stage" +FROM --platform=linux/x86_64 centos:7 WORKDIR /src @@ -15,22 +9,17 @@ RUN yum -y install \ python-devel RUN yum -y install \ - python-pip \ python36 \ python36-devel \ python36-pip -RUN python -m pip install --upgrade 'pip<21' RUN python3.6 -m pip install --upgrade pip - -RUN python -m pip install --upgrade 'setuptools<45' RUN python3.6 -m pip install --upgrade setuptools COPY LICENSE ./ COPY requirements.txt ./ COPY requirements_gui.txt ./ -RUN python -m pip install -r requirements.txt -r requirements_gui.txt RUN python3.6 -m pip install -r requirements.txt -r requirements_gui.txt COPY proto/ ./proto @@ -39,7 +28,7 @@ COPY pycue/setup.py ./pycue/ COPY pycue/opencue ./pycue/opencue COPY pycue/FileSequence ./pycue/FileSequence -RUN python -m grpc_tools.protoc \ +RUN python3.6 -m grpc_tools.protoc \ -I=./proto \ --python_out=./pycue/opencue/compiled_proto \ --grpc_python_out=./pycue/opencue/compiled_proto \ @@ -52,7 +41,6 @@ RUN 2to3 -wn -f import pycue/opencue/compiled_proto/*_pb2*.py COPY pyoutline/README.md ./pyoutline/ COPY pyoutline/setup.py ./pyoutline/ COPY pyoutline/bin ./pyoutline/bin -COPY pyoutline/etc ./pyoutline/etc COPY pyoutline/wrappers ./pyoutline/wrappers COPY pyoutline/outline ./pyoutline/outline @@ -65,36 +53,14 @@ COPY cuesubmit/cuesubmit ./cuesubmit/cuesubmit COPY VERSION.in VERSIO[N] ./ RUN test -e VERSION || echo "$(cat VERSION.in)-custom" | tee VERSION -RUN cd pycue && python setup.py install - RUN cd pycue && python3.6 setup.py install - -RUN cd pyoutline && python setup.py install - RUN cd pyoutline && python3.6 setup.py install - -# TODO(bcipriano) Lint the code here. (Issue #78) - -RUN cd cuesubmit && python setup.py test - RUN cd cuesubmit && python3.6 setup.py test RUN cp LICENSE requirements.txt requirements_gui.txt VERSION cuesubmit/ RUN versioned_name="cuesubmit-$(cat ./VERSION)-all" \ - && mv cuesubmit $versioned_name \ - && tar -cvzf $versioned_name.tar.gz $versioned_name/* - - -# ----------------- -# RUN -# ----------------- -FROM centos:7 - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "CueSubmit runtime stage" - -WORKDIR /opt/opencue - -COPY --from=build /src/cuesubmit-*-all.tar.gz ./ - + && mv cuesubmit "${versioned_name}" \ + && tar -cvzf "${versioned_name}.tar.gz" ${versioned_name}/* \ + && mkdir -p /opt/opencue \ + && cp "${versioned_name}.tar.gz" /opt/opencue/ diff --git a/cuesubmit/cuesubmit/Config.py b/cuesubmit/cuesubmit/Config.py index 99f76242c..d657f5402 100644 --- a/cuesubmit/cuesubmit/Config.py +++ b/cuesubmit/cuesubmit/Config.py @@ -27,6 +27,8 @@ import os import yaml +import opencue.config + CONFIG_FILE_ENV_VAR = 'CUESUBMIT_CONFIG_FILE' @@ -35,7 +37,9 @@ def getConfigValues(): """Reads the config file from disk and returns the values it defines.""" configData = {} configFile = os.environ.get(CONFIG_FILE_ENV_VAR) - if configFile and os.path.exists(configFile): + if not configFile: + configFile = os.path.join(opencue.config.config_base_directory(), 'cuesubmit.yaml') + if os.path.exists(configFile): with open(configFile, 'r') as data: try: configData = yaml.load(data, Loader=yaml.SafeLoader) diff --git a/cuesubmit/cuesubmit/Constants.py b/cuesubmit/cuesubmit/Constants.py index 123f32723..b4f82f13f 100644 --- a/cuesubmit/cuesubmit/Constants.py +++ b/cuesubmit/cuesubmit/Constants.py @@ -39,6 +39,19 @@ BLENDER_RENDER_CMD = config.get('BLENDER_RENDER_CMD', 'blender') FRAME_TOKEN = config.get('FRAME_TOKEN', '#IFRAME#') +# Tokens are replaced by cuebot during dispatch with their computed value. +# see: cuebot/src/main/java/com/imageworks/spcue/dispatcher/DispatchSupportService.java +# Update this file when updating tokens in cuebot, they will appear in the cuesubmit tooltip popup. +COMMAND_TOKENS = {'#ZFRAME#': 'Current frame with a padding of 4', + '#IFRAME#': 'Current frame', + '#FRAME_START#': 'First frame of chunk', + '#FRAME_END#': 'Last frame of chunk', + '#FRAME_CHUNK#': 'Chunk size', + '#FRAMESPEC#': 'Full frame range', + '#LAYER#': 'Name of the Layer', + '#JOB#': 'Name of the Job', + '#FRAME#': 'Name of the Frame' + } BLENDER_FORMATS = ['', 'AVIJPEG', 'AVIRAW', 'BMP', 'CINEON', 'DPX', 'EXR', 'HDR', 'IRIS', 'IRIZ', 'JP2', 'JPEG', 'MPEG', 'MULTILAYER', 'PNG', 'RAWTGA', 'TGA', 'TIFF'] BLENDER_OUTPUT_OPTIONS_URL = \ diff --git a/cuesubmit/cuesubmit/__main__.py b/cuesubmit/cuesubmit/__main__.py index 50c111ab0..3adecc770 100644 --- a/cuesubmit/cuesubmit/__main__.py +++ b/cuesubmit/cuesubmit/__main__.py @@ -23,6 +23,7 @@ from __future__ import absolute_import import sys +from PySide2 import QtGui from PySide2 import QtWidgets from cuesubmit import Constants @@ -65,6 +66,7 @@ def __init__(self, name, *args, **kwargs): def main(): """Entrypoint for the CueSubmit application.""" app = CueSubmitApp(sys.argv) + QtGui.qApp = app app.startup() app.exec_() diff --git a/cuesubmit/cuesubmit/ui/Command.py b/cuesubmit/cuesubmit/ui/Command.py index 6377f2ce3..c1d144b24 100644 --- a/cuesubmit/cuesubmit/ui/Command.py +++ b/cuesubmit/cuesubmit/ui/Command.py @@ -23,6 +23,7 @@ from PySide2 import QtCore, QtWidgets from cuesubmit.ui import Widgets +from cuesubmit import Constants class CueCommandWidget(Widgets.CueHelpWidget): @@ -39,7 +40,7 @@ def __init__(self, parent=None): def setupConnections(self): """Sets up widget signals.""" - self.commandTextBox.commandBox.textChanged.connect(self.textChanged.emit) + self.commandTextBox.commandBox.textChanged.connect(self.textChanged.emit) # pylint: disable=no-member def setText(self, text): """Set the given text to the command box @@ -69,11 +70,10 @@ def __init__(self, *args, **kwargs): self.commandBox.setAccessibleName('commandBox') self.horizontalLine = Widgets.CueHLine() self.setFixedHeight(120) + tokensToolTip = '\n'.join([' {0} -- {1}'.format(token, info) + for token, info in Constants.COMMAND_TOKENS.items()]) self.commandBox.setToolTip('Enter the command to be run. Valid replacement tokens are:\n' - ' #IFRAME# -- frame number\n' - ' #LAYER# -- layer name\n' - ' #JOB# -- job name\n' - ' #FRAME# -- frame name') + + tokensToolTip) self.setupUi() def setupUi(self): diff --git a/cuesubmit/cuesubmit/ui/Job.py b/cuesubmit/cuesubmit/ui/Job.py index c9ac15bca..557b972fd 100644 --- a/cuesubmit/cuesubmit/ui/Job.py +++ b/cuesubmit/cuesubmit/ui/Job.py @@ -73,10 +73,12 @@ def setupUi(self): def setupConnections(self): """Sets up widget signals.""" self.table.selectionModel().selectionChanged.connect(self.updateSelection) + # pylint: disable=no-member self.addLayerButton.clicked.connect(self.newLayer) self.deleteLayerButton.clicked.connect(self.removeRow) self.upButton.clicked.connect(self.moveUp) self.downButton.clicked.connect(self.moveDown) + # pylint: enable=no-member def setupButtons(self): """Creates buttons working with job layers.""" diff --git a/cuesubmit/cuesubmit/ui/SettingsWidgets.py b/cuesubmit/cuesubmit/ui/SettingsWidgets.py index 43b7ed7dd..01601265a 100644 --- a/cuesubmit/cuesubmit/ui/SettingsWidgets.py +++ b/cuesubmit/cuesubmit/ui/SettingsWidgets.py @@ -92,7 +92,7 @@ def setupUi(self): def setupConnections(self): """Sets up widget signals.""" - self.mayaFileInput.lineEdit.textChanged.connect(self.dataChanged.emit) + self.mayaFileInput.lineEdit.textChanged.connect(self.dataChanged.emit) # pylint: disable=no-member def setCommandData(self, commandData): self.mayaFileInput.setText(commandData.get('mayaFile', '')) @@ -149,7 +149,7 @@ def setupUi(self): def setupConnections(self): """Sets up widget signals.""" - self.fileInput.lineEdit.textChanged.connect(self.dataChanged.emit) + self.fileInput.lineEdit.textChanged.connect(self.dataChanged.emit) # pylint: disable=no-member def setCommandData(self, commandData): self.fileInput.setText(commandData.get('nukeFile', '')) @@ -214,8 +214,10 @@ def setupUi(self): def setupConnections(self): """Sets up widget signals.""" + # pylint: disable=no-member self.fileInput.lineEdit.textChanged.connect(self.dataChanged.emit) self.outputPath.lineEdit.textChanged.connect(self.dataChanged.emit) + # pylint: enable=no-member def setCommandData(self, commandData): self.fileInput.setText(commandData.get('nukeFile', '')) diff --git a/cuesubmit/cuesubmit/ui/Submit.py b/cuesubmit/cuesubmit/ui/Submit.py index fb0c6de9e..1e0246d17 100644 --- a/cuesubmit/cuesubmit/ui/Submit.py +++ b/cuesubmit/cuesubmit/ui/Submit.py @@ -58,8 +58,10 @@ def __init__(self, parent=None): def setupConnections(self): """Sets up widget signals.""" + # pylint: disable=no-member self.submitButton.pressed.connect(self.submitPressed) self.cancelButton.pressed.connect(self.cancelPressed) + # pylint: enable=no-member def submitPressed(self): """Handler for when submit button has been pressed.""" @@ -222,6 +224,7 @@ def showEvent(self, event): def setupConnections(self): """Sets up widget signals.""" + # pylint: disable=no-member self.submitButtons.cancelled.connect(self.cancel) self.submitButtons.submitted.connect(self.submit) self.jobTreeWidget.selectionChanged.connect(self.jobLayerSelectionChanged) @@ -235,6 +238,7 @@ def setupConnections(self): self.coresInput.lineEdit.textChanged.connect(self.jobDataChanged) self.chunkInput.lineEdit.textChanged.connect(self.jobDataChanged) self.dependSelector.optionsMenu.triggered.connect(self.dependencyChanged) + # pylint: enable=no-member def setupUi(self): """Creates the widget layout.""" diff --git a/cuesubmit/cuesubmit/ui/Widgets.py b/cuesubmit/cuesubmit/ui/Widgets.py index b1b6f0b54..1bcf9b951 100644 --- a/cuesubmit/cuesubmit/ui/Widgets.py +++ b/cuesubmit/cuesubmit/ui/Widgets.py @@ -69,8 +69,10 @@ def setupUi(self): def setupConnections(self): """Sets up widget signals.""" + # pylint: disable=no-member self.lineEdit.textChanged.connect(self.validateText) self.lineEdit.focusChange.connect(self.textFocusChange) + # pylint: enable=no-member def setText(self, text): """Set the text to the given value. @@ -198,7 +200,7 @@ def setupUi(self): def setupConnections(self): """Sets up widget signals.""" - self.optionsMenu.triggered.connect(self.updateLabel) + self.optionsMenu.triggered.connect(self.updateLabel) # pylint: disable=no-member def setOptions(self, options): """Add options to the menu options. @@ -303,12 +305,14 @@ def setupUi(self): def setupConnections(self): """Sets up widget signals.""" + # pylint: disable=no-member self.toggle.valueChanged.connect(self.valueChanged.emit) self.toggle.sliderPressed.connect(self.sliderPressed.emit) self.toggle.sliderMoved.connect(self.sliderMoved.emit) self.toggle.sliderReleased.connect(self.sliderReleased.emit) self.toggle.actionTriggered.connect(self.actionTriggered.emit) self.toggle.rangeChanged.connect(self.rangeChanged.emit) + # pylint: enable=no-member class CueToggle(QtWidgets.QSlider): @@ -326,8 +330,10 @@ def __init__(self, *args, **kwargs): def setupConnections(self): """Sets up widget signals.""" + # pylint: disable=no-member self.valueChanged.connect(self.change) self.sliderPressed.connect(self.toggle) + # pylint: enable=no-member def change(self): """Action when the toggle is dragged.""" @@ -377,7 +383,7 @@ def __init__(self, parent=None): def setupHelpConnections(self): """Sets up widget signal for the help button.""" - self.helpButton.clicked.connect(self.toggleHelp) + self.helpButton.clicked.connect(self.toggleHelp) # pylint: disable=no-member def setHelpText(self): """Set the help text to the widget.""" diff --git a/cuesubmit/setup.py b/cuesubmit/setup.py index dd4f8b462..e9b82d3b4 100644 --- a/cuesubmit/setup.py +++ b/cuesubmit/setup.py @@ -43,6 +43,11 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', ], packages=find_packages(), package_data={ diff --git a/cuesubmit/tests/Config_tests.py b/cuesubmit/tests/Config_tests.py index 12ce31fba..749d3e098 100644 --- a/cuesubmit/tests/Config_tests.py +++ b/cuesubmit/tests/Config_tests.py @@ -21,9 +21,11 @@ from __future__ import absolute_import import os -import tempfile import unittest +import mock +import pyfakefs.fake_filesystem_unittest + import cuesubmit.Config @@ -36,28 +38,47 @@ CONFIG_YAML_INVALID = b' " some text in an unclosed quote' -class ConfigTests(unittest.TestCase): +class ConfigTests(pyfakefs.fake_filesystem_unittest.TestCase): + def setUp(self): + self.setUpPyfakefs() + if 'CUESUBMIT_CONFIG_FILE' in os.environ: + del os.environ['CUESUBMIT_CONFIG_FILE'] + + def test__should_skip_missing_files_without_error(self): + configData = cuesubmit.Config.getConfigValues() + + self.assertDictEqual({}, configData) + + def test__should_load_config_from_env_var(self): + config_file_path = '/path/to/config.yaml' + self.fs.create_file(config_file_path, contents=CONFIG_YAML) + os.environ['CUESUBMIT_CONFIG_FILE'] = config_file_path + + configData = cuesubmit.Config.getConfigValues() + + self.assertEqual('OPENCUESUBMIT', configData.get('UI_NAME')) + self.assertEqual('OpenCue Submit', configData.get('SUBMIT_APP_WINDOW_TITLE')) + self.assertEqual(None, configData.get('SOME_UNKNOWN_SETTING')) - def testGetConfigValues(self): - with tempfile.NamedTemporaryFile() as fp: - fp.write(CONFIG_YAML) - fp.flush() - os.environ[cuesubmit.Config.CONFIG_FILE_ENV_VAR] = fp.name + @mock.patch('platform.system', new=mock.Mock(return_value='Linux')) + @mock.patch('os.path.expanduser', new=mock.Mock(return_value='/home/username')) + def test__should_load_config_from_user_profile(self): + config_file_path = '/home/username/.config/opencue/cuesubmit.yaml' + self.fs.create_file(config_file_path, contents=CONFIG_YAML) - configData = cuesubmit.Config.getConfigValues() + configData = cuesubmit.Config.getConfigValues() - self.assertEqual('OPENCUESUBMIT', configData.get('UI_NAME')) - self.assertEqual('OpenCue Submit', configData.get('SUBMIT_APP_WINDOW_TITLE')) - self.assertEqual(None, configData.get('SOME_UNKNOWN_SETTING')) + self.assertEqual('OPENCUESUBMIT', configData.get('UI_NAME')) + self.assertEqual('OpenCue Submit', configData.get('SUBMIT_APP_WINDOW_TITLE')) + self.assertEqual(None, configData.get('SOME_UNKNOWN_SETTING')) - def testFailOnInvalidYaml(self): - with tempfile.NamedTemporaryFile() as fp: - fp.write(CONFIG_YAML_INVALID) - fp.flush() - os.environ[cuesubmit.Config.CONFIG_FILE_ENV_VAR] = fp.name + def test__should_fail_on_invalid_yaml(self): + config_file_path = '/path/to/config.yaml' + self.fs.create_file(config_file_path, contents=CONFIG_YAML_INVALID) + os.environ['CUESUBMIT_CONFIG_FILE'] = config_file_path - with self.assertRaises(cuesubmit.Config.CuesubmitConfigError): - cuesubmit.Config.getConfigValues() + with self.assertRaises(cuesubmit.Config.CuesubmitConfigError): + cuesubmit.Config.getConfigValues() if __name__ == '__main__': diff --git a/sandbox/docker-compose.yml b/docker-compose.yml similarity index 83% rename from sandbox/docker-compose.yml rename to docker-compose.yml index 09bdf7eb7..656dec4bc 100644 --- a/sandbox/docker-compose.yml +++ b/docker-compose.yml @@ -2,10 +2,10 @@ version: '3' services: db: - image: postgres + image: postgres:15.1 environment: - POSTGRES_USER=cuebot - - POSTGRES_PASSWORD=$POSTGRES_PASSWORD + - POSTGRES_PASSWORD=cuebot_password - POSTGRES_DB=cuebot ports: - "5432:5432" @@ -22,7 +22,7 @@ services: - db environment: - PGUSER=cuebot - - PGPASSWORD=$POSTGRES_PASSWORD + - PGPASSWORD=cuebot_password - PGDATABASE=cuebot - PGHOST=db - PGPORT=5432 @@ -39,8 +39,8 @@ services: - flyway restart: always environment: - - CUE_FRAME_LOG_DIR=$CUE_FRAME_LOG_DIR - command: --datasource.cue-data-source.jdbc-url=jdbc:postgresql://db/cuebot --datasource.cue-data-source.username=cuebot --datasource.cue-data-source.password=$POSTGRES_PASSWORD + - CUE_FRAME_LOG_DIR=/tmp/rqd/logs + command: --datasource.cue-data-source.jdbc-url=jdbc:postgresql://db/cuebot --datasource.cue-data-source.username=cuebot --datasource.cue-data-source.password=cuebot_password rqd: image: opencue/rqd diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000..a485625d4 --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1 @@ +/_build diff --git a/docs/_static/images/aswf_aqua.png b/docs/_static/images/aswf_aqua.png new file mode 100644 index 000000000..52bf2be87 Binary files /dev/null and b/docs/_static/images/aswf_aqua.png differ diff --git a/docs/_templates/footer.html b/docs/_templates/footer.html index 6ce7a2807..674845d7e 100644 --- a/docs/_templates/footer.html +++ b/docs/_templates/footer.html @@ -3,7 +3,7 @@ {% block extrafooter %}

-
+
Documentation Distributed under CC BY 4.0
LF Projects, LLC uses various trademarks. For our trademark, privacy and antitrust policies, code of conduct, @@ -21,4 +21,4 @@
{{ super() }} -{% endblock %} \ No newline at end of file +{% endblock %} diff --git a/docs/conf.py b/docs/conf.py index ba0826c30..230855ca1 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -25,9 +25,9 @@ author = u'Academy Software Foundation' # The short X.Y version -version = u'0.3.6' +version = u'built from source' # The full version, including alpha/beta/rc tags -release = u'0.3.6' +release = u'built from source' # -- General configuration --------------------------------------------------- @@ -67,7 +67,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. @@ -87,14 +87,13 @@ html_theme = 'sphinx_rtd_theme' # Logo (optional) html_logo = "images/opencue_logo_navbar.png" - +html_baseurl = 'https://www.opencue.io/' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { - 'canonical_url': 'https://www.opencue.io/', 'logo_only': True, 'display_version': True, 'prev_next_buttons_location': 'bottom', diff --git a/docs/requirements.txt b/docs/requirements.txt index 607db205d..1f3307af5 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,2 +1,2 @@ -sphinx==3.0.3 -sphinx_rtd_theme==0.4.3 +sphinx==5.0.0 +sphinx-rtd-theme==1.0.0 diff --git a/proto/README.md b/proto/README.md index 154a1d5d7..0b039b462 100644 --- a/proto/README.md +++ b/proto/README.md @@ -14,12 +14,15 @@ To generate: ```sh python -m grpc_tools.protoc -I=. --python_out=../rqd/rqd/compiled_proto --grpc_python_out=../rqd/rqd/compiled_proto ./*.proto +2to3 -wn -f import ../rqd/rqd/compiled_proto/*_pb2*.py ``` For Windows (Powershell): ```powershell python -m grpc_tools.protoc --proto_path=. --python_out=../rqd/rqd/compiled_proto --grpc_python_out=../rqd/rqd/compiled_proto (ls *.proto).Name +cd ..\rqd\rqd\compiled_proto\ +2to3 -wn -f import (ls *_pb2*.py).Name ``` @@ -29,12 +32,15 @@ To generate: ```sh python -m grpc_tools.protoc -I=. --python_out=../pycue/opencue/compiled_proto --grpc_python_out=../pycue/opencue/compiled_proto ./*.proto +2to3 -wn -f import ../pycue/opencue/compiled_proto/*_pb2*.py ``` For Windows (Powershell): ```powershell python -m grpc_tools.protoc --proto_path=. --python_out=../pycue/opencue/compiled_proto --grpc_python_out=../pycue/opencue/compiled_proto (ls *.proto).Name +cd ..\pycue\opencue\compiled_proto\ +2to3 -wn -f import (ls *_pb2*.py).Name ``` diff --git a/proto/facility.proto b/proto/facility.proto index 1b427c6ca..ae7f3d4b8 100644 --- a/proto/facility.proto +++ b/proto/facility.proto @@ -62,6 +62,12 @@ service AllocationInterface { // Set the allocation tag. Setting this will re-tag all the hosts in this allocation. rpc SetTag(AllocSetTagRequest) returns (AllocSetTagResponse); + + // Return the default allocation. + rpc GetDefault(AllocGetDefaultRequest) returns (AllocGetDefaultResponse); + + // Set the default allocation. + rpc SetDefault(AllocSetDefaultRequest) returns (AllocSetDefaultResponse); } @@ -99,6 +105,11 @@ message AllocationStats { int32 hosts = 6; int32 locked_hosts = 7; int32 down_hosts = 8; + float gpus = 9; + float available_gpus = 10; + float idle_gpus = 11; + float running_gpus = 12; + float locked_gpus = 13; } @@ -245,4 +256,18 @@ message AllocSetTagRequest { string tag = 2; } -message AllocSetTagResponse {} // +message AllocSetTagResponse {} // Empty + +// GetDefault +message AllocGetDefaultRequest {} // Empty + +message AllocGetDefaultResponse { + Allocation allocation = 1; +} + +// SetDefault +message AllocSetDefaultRequest { + Allocation allocation = 1; +} + +message AllocSetDefaultResponse {} // Empty diff --git a/proto/filter.proto b/proto/filter.proto index fdfae12e6..184097782 100644 --- a/proto/filter.proto +++ b/proto/filter.proto @@ -96,10 +96,15 @@ enum ActionType { SET_ALL_RENDER_LAYER_TAGS = 6; // Sets all layer minimum memory for any layer with the type "Render" SET_ALL_RENDER_LAYER_MEMORY = 7; - // Sets all min cores for any layer with the type "Render" - SET_ALL_RENDER_LAYER_CORES = 8; + // This field is deprecated, use SET_ALL_RENDER_LAYER_MIN_CORES and + // SET_ALL_RENDER_LAYER_MAX_CORES instead. + SET_ALL_RENDER_LAYER_CORES = 8 [deprecated = true]; // Set memory optimizer SET_MEMORY_OPTIMIZER = 9; + // Sets all min cores for any layer with the type "Render" + SET_ALL_RENDER_LAYER_MIN_CORES = 10; + // Sets all max cores for any layer with the type "Render" + SET_ALL_RENDER_LAYER_MAX_CORES = 11; }; enum ActionValueType { diff --git a/proto/host.proto b/proto/host.proto index 7d715a4de..9a7e30917 100644 --- a/proto/host.proto +++ b/proto/host.proto @@ -22,12 +22,6 @@ service DeedInterface { // Returns the owner for these settings. rpc GetOwner(DeedGetOwnerRequest) returns (DeedGetOwnerResponse); - - // Sets a blackout time for the host. - rpc SetBlackoutTime(DeedSetBlackoutTimeRequest) returns (DeedSetBlackoutTimeResponse); - - // Enable/Disable blackout time without changing the times. - rpc SetBlackoutTimeEnabled(DeedSetBlackoutTimeEnabledRequest) returns (DeedSetBlackoutTimeEnabledResponse); } service HostInterface { @@ -235,9 +229,6 @@ message Deed { string host = 2; string owner = 3; string show = 4; - bool blackout = 5; - int32 blackout_start_time = 6; - int32 blackout_stop_time = 7; } message DeedSeq { @@ -258,16 +249,16 @@ message Host { float idle_cores = 7; int64 memory = 8; int64 idle_memory = 9; - int64 gpu = 10; - int64 idle_gpu = 11; + int64 gpu_memory = 10; + int64 idle_gpu_memory = 11; int64 total_swap = 12; int64 total_memory = 13; - int64 total_gpu = 14; + int64 total_gpu_memory = 14; int64 total_mcp = 15; int64 free_swap = 16; int64 free_memory = 17; int64 free_mcp = 18; - int64 free_gpu = 19; + int64 free_gpu_memory = 19; int32 load = 20; int32 boot_time = 21; int32 ping_time = 22; @@ -276,6 +267,8 @@ message Host { HardwareState state = 25; LockState lock_state = 26; ThreadMode thread_mode = 27; + float gpus = 28; + float idle_gpus = 29; } message HostSearchCriteria { @@ -301,16 +294,16 @@ message NestedHost { float idle_cores = 7; int64 memory = 8; int64 idle_memory = 9; - int64 gpu = 10; - int64 idle_gpu = 11; + int64 gpu_memory = 10; + int64 idle_gpu_memory = 11; int64 total_swap = 12; int64 total_memory = 13; - int64 total_gpu = 14; + int64 total_gpu_memory = 14; int64 total_mcp = 15; int64 free_swap = 16; int64 free_memory = 17; int64 free_mcp = 18; - int64 free_gpu = 19; + int64 free_gpu_memory = 19; int32 load = 20; int32 boot_time = 21; int32 ping_time = 22; @@ -320,6 +313,8 @@ message NestedHost { LockState lock_state = 26; ThreadMode thread_mode = 27; NestedProcSeq procs = 28; + float gpus = 29; + float idle_gpus = 30; } message NestedHostSeq { @@ -337,7 +332,7 @@ message NestedProc { int32 bookedTime = 8; int32 dispatch_time = 9; int64 reserved_memory = 10; - int64 reserverd_gpu = 11; + int64 reserved_gpu_memory = 11; int64 used_memory = 12; float reserved_cores = 13; bool unbooked = 14; @@ -345,6 +340,8 @@ message NestedProc { string redirect_target = 16; repeated string services = 17; NestedHost parent = 18; + int64 used_gpu_memory = 19; + float reserved_gpus = 20; } message NestedProcSeq { @@ -369,13 +366,16 @@ message Proc { int32 bookedTime = 8; int32 dispatch_time = 9; int64 reserved_memory = 10; - int64 reserved_gpu = 11; + int64 reserved_gpu_memory = 11; int64 used_memory = 12; float reserved_cores = 13; bool unbooked = 14; string log_path = 15; string redirect_target = 16; repeated string services = 17; + int64 used_gpu_memory = 18; + float reserved_gpus = 19; + bytes child_processes = 20; } message ProcSearchCriteria { @@ -385,7 +385,7 @@ message ProcSearchCriteria { // An array of job names to match. repeated string jobs = 2; - // An arra of layer names to match. + // An array of layer names to match. repeated string layers = 3; // An array of show names to match. @@ -440,23 +440,6 @@ message DeedGetOwnerResponse { Owner owner = 1; } -// SetBlackoutTimeEnabled -message DeedSetBlackoutTimeEnabledRequest { - Deed deed = 1; - bool enabled = 2; -} - -message DeedSetBlackoutTimeEnabledResponse {} // Empty - -// SetBlackoutTime -message DeedSetBlackoutTimeRequest { - Deed deed = 1; - int32 start_time = 2; - int32 stop_time = 3; -} - -message DeedSetBlackoutTimeResponse {} // Empty - // HOST ---- // AddComment message HostAddCommentRequest { diff --git a/proto/job.proto b/proto/job.proto index 4a74f3aa8..79a0dc234 100644 --- a/proto/job.proto +++ b/proto/job.proto @@ -100,6 +100,12 @@ service GroupInterface { // Set the Default Job Min Core values to all in the provided group rpc SetDefaultJobMinCores(GroupSetDefJobMinCoresRequest) returns (GroupSetDefJobMinCoresResponse); + // Set the Default Job Max Gpu values to all in the provided group + rpc SetDefaultJobMaxGpus(GroupSetDefJobMaxGpusRequest) returns (GroupSetDefJobMaxGpusResponse); + + // Set the Default Job Min Gpu values to all in the provided group + rpc SetDefaultJobMinGpus(GroupSetDefJobMinGpusRequest) returns (GroupSetDefJobMinGpusResponse); + // Set the Default Job Priority values to all in the provided group rpc SetDefaultJobPriority(GroupSetDefJobPriorityRequest) returns (GroupSetDefJobPriorityResponse); @@ -115,6 +121,12 @@ service GroupInterface { // Set the groups Min Cores values rpc SetMinCores(GroupSetMinCoresRequest) returns (GroupSetMinCoresResponse); + // Set the group's Max Gpu value + rpc SetMaxGpus(GroupSetMaxGpusRequest) returns (GroupSetMaxGpusResponse); + + // Set the groups Min Gpu values + rpc SetMinGpus(GroupSetMinGpusRequest) returns (GroupSetMinGpusResponse); + // Set the groups name rpc SetName(GroupSetNameRequest) returns (GroupSetNameResponse); } @@ -248,9 +260,19 @@ service JobInterface { // Sets the minimum number of procs that can run on this job rpc SetMinCores(JobSetMinCoresRequest) returns (JobSetMinCoresResponse); + // Sets the maximum number of Gpu that can run on this job + rpc SetMaxGpus(JobSetMaxGpusRequest) returns (JobSetMaxGpusResponse); + + // Sets the minimum number of Gpu that can run on this job + rpc SetMinGpus(JobSetMinGpusRequest) returns (JobSetMinGpusResponse); + // Sets the job priority rpc SetPriority(JobSetPriorityRequest) returns (JobSetPriorityResponse); + // Shutdown the job if it is completed. This is a workaround for when + // Cuebot failed to shutdown a job due to database access error. + rpc ShutdownIfCompleted(JobShutdownIfCompletedRequest) returns (JobShutdownIfCompletedResponse); + // Staggers the specified frame range rpc StaggerFrames(JobStaggerFramesRequest) returns (JobStaggerFramesResponse); } @@ -331,7 +353,16 @@ service LayerInterface { // Set the Min Cores for this layer rpc SetMinCores(LayerSetMinCoresRequest) returns (LayerSetMinCoresResponse); - // Set the Min gpu value for the layer + // The maximum number of Gpu to run on a given frame within this layer. + rpc SetMaxGpus(LayerSetMaxGpusRequest) returns (LayerSetMaxGpusResponse); + + // Set the Min Gpus for this layer + rpc SetMinGpus(LayerSetMinGpusRequest) returns (LayerSetMinGpusResponse); + + // Set the Min gpu memory value for the layer + rpc SetMinGpuMemory(LayerSetMinGpuMemoryRequest) returns (LayerSetMinGpuMemoryResponse); + + // [Deprecated] Set the Min gpu memory value for the layer rpc SetMinGpu(LayerSetMinGpuRequest) returns (LayerSetMinGpuResponse); // Set the Min Memory value for the layer @@ -369,8 +400,8 @@ enum CheckpointState { enum FrameExitStatus { // The frame was a success SUCCESS = 0; - // The frame should not be retried - NO_RETRY = 256; + // The frame should be automatically retried + FAILED_LAUNCH = 256; // Retries should not be incremented SKIP_RETRY = 286; } @@ -450,12 +481,15 @@ message Frame { int64 max_rss = 11; int64 used_memory = 12; int64 reserved_memory = 13; - int64 reserved_gpu = 14; + int64 reserved_gpu_memory = 14; string last_resource = 15; CheckpointState checkpoint_state = 16; int32 checkpoint_count = 17; int32 total_core_time = 18; int32 llu_time = 19; + int32 total_gpu_time = 20; + int64 max_gpu_memory = 21; + int64 used_gpu_memory = 22; } // Object for frame searching @@ -499,6 +533,8 @@ message UpdatedFrame { int64 used_memory = 8; string last_resource = 9; int32 llu_time = 10; + int64 max_gpu_memory = 11; + int64 used_gpu_memory = 12; } message UpdatedFrameSeq { @@ -528,6 +564,10 @@ message Group { int32 level = 9; string parent_id = 10; GroupStats group_stats = 11; + float default_job_min_gpus = 12; + float default_job_max_gpus = 13; + float min_gpus = 14; + float max_gpus = 15; } message GroupSeq { @@ -541,6 +581,7 @@ message GroupStats { int32 waiting_frames = 4; int32 pending_jobs = 5; float reserved_cores = 6; + float reserved_gpus = 7; } // JOB ---- @@ -567,6 +608,8 @@ message Job { int32 start_time = 18; int32 stop_time = 19; JobStats job_stats = 20; + float min_gpus = 21; + float max_gpus = 22; } // Use to filter the job search. Please note that by searching for non-pending jobs, the output is limited to 200 jobs @@ -606,6 +649,11 @@ message JobStats { int64 failed_core_sec = 18; int64 max_rss = 19; float reserved_cores = 20; + int64 total_gpu_sec = 21; + int64 rendered_gpu_sec = 22; + int64 failed_gpu_sec = 23; + float reserved_gpus = 24; + int64 max_gpu_memory = 25; } // LAYER ---- @@ -618,7 +666,7 @@ message Layer { float max_cores = 6; bool is_threadable = 7; int64 min_memory = 8; - int64 min_gpu = 9; + int64 min_gpu_memory = 9; int32 chunk_size = 10; int32 dispatch_order = 11; LayerType type = 12; @@ -631,6 +679,8 @@ message Layer { repeated string limits = 17; int32 timeout = 18; int32 timeout_llu = 19; + float min_gpus = 20; + float max_gpus = 21; } message LayerSeq { @@ -658,6 +708,11 @@ message LayerStats { int64 failed_core_sec = 18; int64 max_rss = 19; float reserved_cores = 20; + int64 total_gpu_sec = 21; + int64 rendered_gpu_sec = 22; + int64 failed_gpu_sec = 23; + float reserved_gpus = 24; + int64 max_gpu_memory = 25; } // NestedGroup --- @@ -675,6 +730,10 @@ message NestedGroup { NestedGroupSeq groups = 11; repeated string jobs = 12; GroupStats stats = 13; + float default_job_min_gpus = 14; + float default_job_max_gpus = 15; + float min_gpus = 16; + float max_gpus = 17; } message NestedGroupSeq { @@ -706,6 +765,8 @@ message NestedJob { int32 stop_time = 19; NestedGroup parent = 20; JobStats stats = 21; + float min_gpus = 22; + float max_gpus = 23; } @@ -719,8 +780,9 @@ message FrameAddRenderPartitionRequest { int32 threads = 3; int32 max_cores = 4; int64 max_memory = 5; - int64 max_gpu = 6; + int64 max_gpu_memory = 6; string username = 7; + int32 max_gpus = 8; } message FrameAddRenderPartitionResponse { @@ -944,6 +1006,22 @@ message GroupSetDefJobMinCoresRequest { message GroupSetDefJobMinCoresResponse {} // Empty +// SetDefaultJobMaxGpus +message GroupSetDefJobMaxGpusRequest { + Group group = 1; + int32 max_gpus = 2; +} + +message GroupSetDefJobMaxGpusResponse {} // Empty + +// SetDefaultJobMinGpus +message GroupSetDefJobMinGpusRequest { + Group group = 1; + int32 min_gpus = 2; +} + +message GroupSetDefJobMinGpusResponse {} // Empty + // SetDefJobPriority message GroupSetDefJobPriorityRequest { Group group = 1; @@ -984,6 +1062,22 @@ message GroupSetMinCoresRequest { message GroupSetMinCoresResponse {} // Empty +// SetMaxGpus +message GroupSetMaxGpusRequest { + Group group = 1; + int32 max_gpus = 2; +} + +message GroupSetMaxGpusResponse {} // Empty + +// SetMinGpus +message GroupSetMinGpusRequest { + Group group = 1; + int32 min_gpus = 2; +} + +message GroupSetMinGpusResponse {} // Empty + // SetName message GroupSetNameRequest { Group group = 1; @@ -1008,8 +1102,9 @@ message JobAddRenderPartRequest { int32 threads = 3; int32 max_cores = 4; int64 max_memory = 5; - int64 max_gpu = 6; + int64 max_gpu_memory = 6; string username = 7; + int32 max_gpus = 8; } message JobAddRenderPartResponse { @@ -1296,6 +1391,14 @@ message JobSetMaxCoresRequest { message JobSetMaxCoresResponse {} // Empty +// SetMaxGpus +message JobSetMaxGpusRequest { + Job job = 1; + int32 val = 2; +} + +message JobSetMaxGpusResponse {} // Empty + // SetMaxRetries message JobSetMaxRetriesRequest { Job job = 1; @@ -1312,6 +1415,14 @@ message JobSetMinCoresRequest { message JobSetMinCoresResponse {} // Empty +// SetMinGpus +message JobSetMinGpusRequest { + Job job = 1; + int32 val = 2; +} + +message JobSetMinGpusResponse {} // Empty + // SetPriority message JobSetPriorityRequest { Job job = 1; @@ -1320,6 +1431,13 @@ message JobSetPriorityRequest { message JobSetPriorityResponse {} // Empty +// ShutdownIfCompleted +message JobShutdownIfCompletedRequest { + Job job = 1; +} + +message JobShutdownIfCompletedResponse {} // Empty + // StaggerFrames message JobStaggerFramesRequest { Job job = 1; @@ -1346,8 +1464,9 @@ message LayerAddRenderPartitionRequest { int32 threads = 3; int32 max_cores = 4; int64 max_memory = 5; - int64 max_gpu = 6; + int64 max_gpu_memory = 6; string username = 7; + int32 max_gpus = 8; } message LayerAddRenderPartitionResponse { @@ -1545,14 +1664,39 @@ message LayerSetMinCoresRequest { message LayerSetMinCoresResponse {} // Empty -// SetMinGpu +// [Deprecated] SetMinGpu message LayerSetMinGpuRequest { - Layer layer = 1; - int64 gpu = 2; + Layer layer = 1 [deprecated=true]; + int64 gpu = 2 [deprecated=true]; } +// [Deprecated] message LayerSetMinGpuResponse {} // Empty +// SetMaxGpus +message LayerSetMaxGpusRequest { + Layer layer = 1; + int32 max_gpus = 2; +} + +message LayerSetMaxGpusResponse {} // Empty + +// SetMinGpus +message LayerSetMinGpusRequest { + Layer layer = 1; + int32 min_gpus = 2; +} + +message LayerSetMinGpusResponse {} // Empty + +// SetMinGpuMemory +message LayerSetMinGpuMemoryRequest { + Layer layer = 1; + int64 gpu_memory = 2; +} + +message LayerSetMinGpuMemoryResponse {} // Empty + // SetMinMemory message LayerSetMinMemoryRequest { Layer layer = 1; diff --git a/proto/renderPartition.proto b/proto/renderPartition.proto index 9d7b554f3..c8553ed3c 100644 --- a/proto/renderPartition.proto +++ b/proto/renderPartition.proto @@ -40,8 +40,11 @@ message RenderPartition { int64 memory = 8; int32 max_cores = 9; int64 max_memory = 10; - int64 max_gpu = 11; + int64 max_gpu_memory = 11; int32 threads = 12; + int32 gpus = 13; + int32 max_gpus = 14; + int64 gpu_memory = 15; } message RenderPartitionSeq { @@ -61,7 +64,8 @@ message RenderPartSetMaxResourcesRequest { RenderPartition render_partition = 1; int32 cores = 2; int64 memory = 3; - int64 gpu = 4; + int64 gpu_memory = 4; + int32 gpus = 5; } message RenderPartSetMaxResourcesResponse {} // Empty diff --git a/proto/report.proto b/proto/report.proto index 7a1fffdb2..441f810c3 100644 --- a/proto/report.proto +++ b/proto/report.proto @@ -36,6 +36,13 @@ message CoreDetail { int32 idle_cores = 2; int32 locked_cores = 3; int32 booked_cores = 4; + //map + map reserved_cores = 5; +} + +message CoreId { + repeated int64 coreid = 1; + } message FrameCompleteReport { @@ -59,17 +66,20 @@ message RenderHost { string facility= 4; // The name of the facility that the host is in int32 num_procs = 5; // the number of physical procs on this machine int32 cores_per_proc = 6; // the number of cores per proc - int32 total_swap = 7; // the total size of the swap in kB - int32 total_mem = 8; // the total size of the main memory pool in kB - int32 total_mcp = 9; // the total size of MCP in kB - int32 free_swap = 10; // the current amount of free swap in kB - int32 free_mem = 11; // the current amount of free memory in kB - int32 free_mcp = 12; // the current amount of free MCP in kB + int64 total_swap = 7; // the total size of the swap in kB + int64 total_mem = 8; // the total size of the main memory pool in kB + int64 total_mcp = 9; // the total size of MCP in kB + int64 free_swap = 10; // the current amount of free swap in kB + int64 free_mem = 11; // the current amount of free memory in kB + int64 free_mcp = 12; // the current amount of free MCP in kB int32 load = 13; // the current load on the proc int32 boot_time = 14; // the time the proc was booted repeated string tags = 15; // an array of default tags that are added to the host record host.HardwareState state = 16; // hardware state for the host map attributes = 17; // additional data can be provided about the host + int32 num_gpus = 18; // the number of physical GPU's + int64 free_gpu_mem = 19; // the current amount of free gpu memory in kB + int64 total_gpu_mem = 20; // the total size of gpu memory in kB }; message RunningFrameInfo { @@ -87,11 +97,65 @@ message RunningFrameInfo { int64 vsize = 12; // kB map attributes = 13; //additional data can be provided about the running frame int64 llu_time = 14; + int32 num_gpus = 15; + int64 max_used_gpu_memory = 16; // kB + int64 used_gpu_memory = 17; // kB + ChildrenProcStats children = 18; //additional data about the running frame's child processes }; +message ChildrenProcStats { + repeated ProcStats children = 2; +} +message ProcStats { + Stat stat = 1; + Statm statm = 2; + Status status = 3; + string cmdline = 4; + string start_time = 5; +} + +message Stat { + int64 rss = 1; + int64 vsize = 2; + string state = 3; + string name = 4; + string pid = 5; +} + +message Statm { + int64 rss = 1; + int64 size = 2; +} + +message Status { + int64 vrss = 1; + int64 vsize = 2; +} // -------- Requests & Responses --------] +message ChildrenProcStatsRequest { + string frame_id = 1; + repeated ProcStats children = 2; +} + +message ChildrenProcStatsResponse { + ChildrenProcStats children_procs_stats = 1; +} + +message ProcStatsRequest { + string name = 1; + string stat = 2; + string statm = 3; + string cmdline = 4; + string state = 5; + string frame_id = 6; +} + +message ProcStatsResponse { + ProcStats proc_stats = 1; +} + // ReportRqdStartup message RqdReportRqdStartupRequest { BootReport boot_report = 1; diff --git a/proto/rqd.proto b/proto/rqd.proto index a4b83f0c3..6d82ccaab 100644 --- a/proto/rqd.proto +++ b/proto/rqd.proto @@ -110,6 +110,8 @@ message RunFrame { bool ignore_nimby = 20; map environment = 21; map attributes = 22; + int32 num_gpus = 23; + report.ChildrenProcStats children = 24; } message RunFrameSeq { diff --git a/proto/service.proto b/proto/service.proto index 8b554b388..57110bdaf 100644 --- a/proto/service.proto +++ b/proto/service.proto @@ -46,11 +46,14 @@ message Service { bool threadable = 3; int32 min_cores = 4; int32 max_cores = 5; - int32 min_memory = 6; - int32 min_gpu = 7; + int64 min_memory = 6; + int64 min_gpu_memory = 7; repeated string tags = 8; int32 timeout = 9; int32 timeout_llu = 10; + int32 min_gpus = 11; + int32 max_gpus = 12; + int32 min_memory_increase = 13; } message ServiceSeq { diff --git a/proto/show.proto b/proto/show.proto index ca237cc54..f0fbf34b4 100644 --- a/proto/show.proto +++ b/proto/show.proto @@ -99,6 +99,12 @@ service ShowInterface { // sets a show's default min procs rpc SetDefaultMinCores(ShowSetDefaultMinCoresRequest) returns (ShowSetDefaultMinCoresResponse); + + // sets a show's default max Gpus + rpc SetDefaultMaxGpus(ShowSetDefaultMaxGpusRequest) returns (ShowSetDefaultMaxGpusResponse); + + // sets a show's default min Gpus + rpc SetDefaultMinGpus(ShowSetDefaultMinGpusRequest) returns (ShowSetDefaultMinGpusResponse); } @@ -114,6 +120,8 @@ message Show { bool dispatch_enabled = 7; bool active = 8; ShowStats show_stats = 9; + float default_min_gpus = 10; + float default_max_gpus = 11; } message ShowSeq { @@ -130,6 +138,7 @@ message ShowStats { int64 rendered_frame_count = 7; int64 failed_frame_count = 8; float reserved_cores = 9; + float reserved_gpus = 10; } @@ -375,3 +384,19 @@ message ShowSetDefaultMinCoresRequest { } message ShowSetDefaultMinCoresResponse {} // Empty + +// SetDefaultMaxGpus +message ShowSetDefaultMaxGpusRequest { + Show show = 1; + int32 max_gpus = 2; +} + +message ShowSetDefaultMaxGpusResponse {} // Empty + +// SetDefaultMinGpus +message ShowSetDefaultMinGpusRequest { + Show show = 1; + int32 min_gpus = 2; +} + +message ShowSetDefaultMinGpusResponse {} // Empty diff --git a/proto/subscription.proto b/proto/subscription.proto index 8c7817da4..8ac100e7e 100644 --- a/proto/subscription.proto +++ b/proto/subscription.proto @@ -41,6 +41,7 @@ message Subscription { int32 size = 6; int32 burst = 7; int32 reserved_cores = 8; + int32 reserved_gpus = 9; } message SubscriptionSeq { diff --git a/pycue/Dockerfile b/pycue/Dockerfile index 54f8a2dc2..e93f9526a 100644 --- a/pycue/Dockerfile +++ b/pycue/Dockerfile @@ -1,35 +1,14 @@ -# ----------------- -# BUILD -# ----------------- -FROM centos:7 as build - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "PyCue build stage" +FROM python:3.9.14 WORKDIR /src -RUN yum -y install \ - epel-release \ - gcc \ - python-devel - -RUN yum -y install \ - python-pip \ - python36 \ - python36-devel \ - python36-pip - -RUN python -m pip install --upgrade 'pip<21' -RUN python3.6 -m pip install --upgrade pip - -RUN python -m pip install --upgrade 'setuptools<45' -RUN python3.6 -m pip install --upgrade setuptools +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools COPY LICENSE ./ COPY requirements.txt ./ -RUN python -m pip install -r requirements.txt -RUN python3.6 -m pip install -r requirements.txt +RUN python3 -m pip install -r requirements.txt COPY proto/ ./proto COPY pycue/README.md ./pycue/ @@ -38,7 +17,7 @@ COPY pycue/FileSequence ./pycue/FileSequence COPY pycue/tests/ ./pycue/tests COPY pycue/opencue ./pycue/opencue -RUN python -m grpc_tools.protoc \ +RUN python3 -m grpc_tools.protoc \ -I=./proto \ --python_out=./pycue/opencue/compiled_proto \ --grpc_python_out=./pycue/opencue/compiled_proto \ @@ -48,30 +27,15 @@ RUN python -m grpc_tools.protoc \ # for more info. RUN 2to3 -wn -f import pycue/opencue/compiled_proto/*_pb2*.py -# TODO(bcipriano) Lint the code here. (Issue #78) - COPY VERSION.in VERSIO[N] ./ RUN test -e VERSION || echo "$(cat VERSION.in)-custom" | tee VERSION -RUN cd pycue && python setup.py test - -RUN cd pycue && python3.6 setup.py test +RUN cd pycue && python3 setup.py test RUN cp LICENSE requirements.txt VERSION pycue/ RUN versioned_name="pycue-$(cat ./VERSION)-all" \ - && mv pycue $versioned_name \ - && tar -cvzf $versioned_name.tar.gz $versioned_name/* - - -# ----------------- -# RUN -# ----------------- -FROM centos:7 - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "PyCue runtime stage" - -WORKDIR /opt/opencue - -COPY --from=build /src/pycue-*-all.tar.gz ./ + && mv pycue "${versioned_name}" \ + && tar -cvzf "${versioned_name}.tar.gz" ${versioned_name}/* \ + && mkdir /opt/opencue \ + && cp "${versioned_name}.tar.gz" /opt/opencue/ diff --git a/pycue/opencue/api.py b/pycue/opencue/api.py index 10dbed4cc..d43575496 100644 --- a/pycue/opencue/api.py +++ b/pycue/opencue/api.py @@ -325,6 +325,7 @@ def getJobs(**options): - show: show names - list - shot: shot names - list - user: user names - list + - include_finished - bool :rtype: list :return: a list of Job objects @@ -457,7 +458,7 @@ def getFrame(uniq): @util.grpcExceptionParser def getFrames(job, **options): - """Finds frames in a job that match the search critieria. + """Finds frames in a job that match the search criteria. :type job: str :param job: the job name @@ -644,6 +645,28 @@ def deleteAllocation(alloc): facility_pb2.AllocDeleteRequest(allocation=alloc), timeout=Cuebot.Timeout) +@util.grpcExceptionParser +def getDefaultAllocation(): + """Get the default allocation. + + :rtype: Allocation + :return: an Allocation object""" + return Allocation(Cuebot.getStub('allocation').GetDefault( + facility_pb2.AllocGetDefaultRequest(), timeout=Cuebot.Timeout).allocation) + + +@util.grpcExceptionParser +def setDefaultAllocation(alloc): + """Set the default allocation. + + :type alloc: facility_pb2.Allocation + :param alloc: allocation to set default + :rtype: facility_pb2.AllocSetDefaultResponse + :return: empty response""" + return Cuebot.getStub('allocation').SetDefault( + facility_pb2.AllocSetDefaultRequest(allocation=alloc), timeout=Cuebot.Timeout) + + @util.grpcExceptionParser def allocSetBillable(alloc, is_billable): """Sets an allocation billable or not. @@ -742,7 +765,7 @@ def getProcs(**options): - "lt5" is less than 5 hours - "5-10" is range of 5 to 10 hours - :rtype: list + :rtype: list[opencue.wrapper.proc.Proc] :return: a list of Proc objects""" procSeq = search.ProcSearch.byOptions(**options).procs return [Proc(p) for p in procSeq.procs] @@ -772,3 +795,14 @@ def getLimits(): :return: a list of Limit objects""" return [Limit(limit) for limit in Cuebot.getStub('limit').GetAll( limit_pb2.LimitGetAllRequest(), timeout=Cuebot.Timeout).limits] + +@util.grpcExceptionParser +def findLimit(name): + """Returns the Limit object that matches the name. + + :type name: str + :param name: a string that represents a limit to return + :rtype: Limit + :return: the matching Limit object""" + return Limit(Cuebot.getStub('limit').Find( + limit_pb2.LimitFindRequest(name=name), timeout=Cuebot.Timeout).limit) diff --git a/pycue/opencue/config.py b/pycue/opencue/config.py new file mode 100644 index 000000000..7d1040a87 --- /dev/null +++ b/pycue/opencue/config.py @@ -0,0 +1,96 @@ +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""OpenCue configuration.""" + +import logging +import os +import platform + +import yaml + + +logger = logging.getLogger("opencue") + + +# Config file from which default settings are loaded. This file is distributed with the +# opencue Python library. +__DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__), 'default.yaml') + +# Environment variables which can be used to define a custom config file. Any settings +# defined in this file will be used instead of the defaults. +__CONFIG_FILE_ENV_VARS = [ + # OPENCUE_CONFIG_FILE is the preferred setting to use. + 'OPENCUE_CONFIG_FILE', + # OPENCUE_CONF is deprecated, but kept for now for backwards compatibility. + 'OPENCUE_CONF', +] + + +def config_base_directory(): + """Returns the OpenCue config base directory. + + This platform-dependent directory, stored within your user profile, is used by + OpenCue components as the default location for various configuration files. Typically + if you store your config files in this location, there is no need to set environment + variables to indicate where your config files are located -- OpenCue should recognize + them automatically. + + NOTE: This work is ongoing. Over time more OpenCue components will start using this + base directory. See https://github.com/AcademySoftwareFoundation/OpenCue/issues/785. + + :rtype: str + :return: config file base directory + """ + if platform.system() == 'Windows': + return os.path.join(os.path.expandvars('%APPDATA%'), 'opencue') + return os.path.join(os.path.expanduser('~'), '.config', 'opencue') + + +def load_config_from_file(): + """Loads configuration settings from config file on the local system. + + Default settings are read from default.yaml which is distributed with the opencue library. + User-provided config is then read from disk, in order of preference: + - Path defined by the OPENCUE_CONFIG_FILE environment variable. + - Path defined by the OPENCUE_CONF environment variable. + - Path within the config base directory (i.e. ~/.config/opencue/opencue.yaml) + + :rtype: dict + :return: config settings + """ + with open(__DEFAULT_CONFIG_FILE) as file_object: + config = yaml.load(file_object, Loader=yaml.SafeLoader) + + user_config_file = None + + for config_file_env_var in __CONFIG_FILE_ENV_VARS: + logger.debug('Checking for opencue config file path in %s', config_file_env_var) + config_file_from_env = os.environ.get(config_file_env_var) + if config_file_from_env and os.path.exists(config_file_from_env): + user_config_file = config_file_from_env + break + + if not user_config_file: + config_from_user_profile = os.path.join(config_base_directory(), 'opencue.yaml') + logger.debug('Checking for opencue config at %s', config_from_user_profile) + if os.path.exists(config_from_user_profile): + user_config_file = config_from_user_profile + + if user_config_file: + logger.info('Loading opencue config from %s', user_config_file) + with open(user_config_file) as file_object: + config.update(yaml.load(file_object, Loader=yaml.SafeLoader)) + + return config diff --git a/pycue/opencue/cuebot.py b/pycue/opencue/cuebot.py index 1211c9fe4..764d5428e 100644 --- a/pycue/opencue/cuebot.py +++ b/pycue/opencue/cuebot.py @@ -20,13 +20,16 @@ from builtins import object from random import shuffle +import abc +import time import atexit import logging import os -import yaml +import platform import grpc +import opencue.config from opencue.compiled_proto import comment_pb2 from opencue.compiled_proto import comment_pb2_grpc from opencue.compiled_proto import criterion_pb2 @@ -64,19 +67,14 @@ logger = logging.getLogger("opencue") -default_config = os.path.join(os.path.dirname(__file__), 'default.yaml') -with open(default_config) as file_object: - config = yaml.load(file_object, Loader=yaml.SafeLoader) - -# check for facility specific configurations. -fcnf = os.environ.get('OPENCUE_CONF', '') -if os.path.exists(fcnf): - with open(fcnf) as file_object: - config.update(yaml.load(file_object, Loader=yaml.SafeLoader)) DEFAULT_MAX_MESSAGE_BYTES = 1024 ** 2 * 10 DEFAULT_GRPC_PORT = 8443 +if platform.system() != 'Darwin': + # Avoid spamming users with epoll fork warning messages + os.environ["GRPC_POLL_STRATEGY"] = "epoll1" + class Cuebot(object): """Used to manage the connection to the Cuebot. Normally the connection to the Cuebot is made automatically as needed so you don't have to explicitly @@ -89,7 +87,8 @@ class Cuebot(object): RpcChannel = None Hosts = [] Stubs = {} - Timeout = config.get('cuebot.timeout', 10000) + Config = opencue.config.load_config_from_file() + Timeout = Config.get('cuebot.timeout', 10000) PROTO_MAP = { 'action': filter_pb2, @@ -137,19 +136,27 @@ class Cuebot(object): 'proc': host_pb2_grpc.ProcInterfaceStub, 'renderPartition': renderPartition_pb2_grpc.RenderPartitionInterfaceStub, 'service': service_pb2_grpc.ServiceInterfaceStub, + 'serviceOverride': service_pb2_grpc.ServiceOverrideInterfaceStub, 'show': show_pb2_grpc.ShowInterfaceStub, 'subscription': subscription_pb2_grpc.SubscriptionInterfaceStub, 'task': task_pb2_grpc.TaskInterfaceStub } @staticmethod - def init(): + def init(config=None): """Main init method for setting up the Cuebot object. - Sets the communication channel and hosts.""" + Sets the communication channel and hosts. + + :type config: dict + :param config: config dictionary, this will override the config read from disk + """ + if config: + Cuebot.Config = config + Cuebot.Timeout = config.get('cuebot.timeout', Cuebot.Timeout) if os.getenv("CUEBOT_HOSTS"): Cuebot.setHosts(os.getenv("CUEBOT_HOSTS").split(",")) else: - facility_default = config.get("cuebot.facility_default") + facility_default = Cuebot.Config.get("cuebot.facility_default") Cuebot.setFacility(facility_default) if Cuebot.Hosts is None: raise CueException('Cuebot host not set. Please ensure CUEBOT_HOSTS is set ' + @@ -162,18 +169,35 @@ def setChannel(): # gRPC must specify a single host. Randomize host list to balance load across cuebots. hosts = list(Cuebot.Hosts) shuffle(hosts) - maxMessageBytes = config.get('cuebot.max_message_bytes', DEFAULT_MAX_MESSAGE_BYTES) + maxMessageBytes = Cuebot.Config.get('cuebot.max_message_bytes', DEFAULT_MAX_MESSAGE_BYTES) + + # create interceptors + interceptors = ( + RetryOnRpcErrorClientInterceptor( + max_attempts=4, + sleeping_policy=ExponentialBackoff(init_backoff_ms=100, + max_backoff_ms=1600, + multiplier=2), + status_for_retry=(grpc.StatusCode.UNAVAILABLE,), + ), + ) + for host in hosts: if ':' in host: connectStr = host else: - connectStr = '%s:%s' % (host, config.get('cuebot.grpc_port', DEFAULT_GRPC_PORT)) - logger.debug('connecting to gRPC at %s', connectStr) + connectStr = '%s:%s' % ( + host, Cuebot.Config.get('cuebot.grpc_port', DEFAULT_GRPC_PORT)) + # pylint: disable=logging-not-lazy + logger.debug('connecting to gRPC at %s' % connectStr) + # pylint: enable=logging-not-lazy # TODO(bcipriano) Configure gRPC TLS. (Issue #150) try: - Cuebot.RpcChannel = grpc.insecure_channel(connectStr, options=[ - ('grpc.max_send_message_length', maxMessageBytes), - ('grpc.max_receive_message_length', maxMessageBytes)]) + Cuebot.RpcChannel = grpc.intercept_channel( + grpc.insecure_channel(connectStr, options=[ + ('grpc.max_send_message_length', maxMessageBytes), + ('grpc.max_receive_message_length', maxMessageBytes)]), + *interceptors) # Test the connection Cuebot.getStub('cue').GetSystemStats( cue_pb2.CueGetSystemStatsRequest(), timeout=Cuebot.Timeout) @@ -207,12 +231,12 @@ def setFacility(facility): :type facility: str :param facility: a facility named in the config file""" - if facility not in list(config.get("cuebot.facility").keys()): - default = config.get("cuebot.facility_default") + if facility not in list(Cuebot.Config.get("cuebot.facility").keys()): + default = Cuebot.Config.get("cuebot.facility_default") logger.warning("The facility '%s' does not exist, defaulting to %s", facility, default) facility = default logger.debug("setting facility to: %s", facility) - hosts = config.get("cuebot.facility")[facility] + hosts = Cuebot.Config.get("cuebot.facility")[facility] Cuebot.setHosts(hosts) @staticmethod @@ -269,4 +293,95 @@ def getStub(cls, name): @staticmethod def getConfig(): """Gets the Cuebot config object, originally read in from the config file on disk.""" - return config + return Cuebot.Config + + +# Python 2/3 compatible implementation of ABC +ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + + +class SleepingPolicy(ABC): + """ + Implement policy for sleeping between API retries + """ + @abc.abstractmethod + def sleep(self, attempt): + """ + How long to sleep in milliseconds. + :param attempt: the number of attempt (starting from zero) + """ + assert attempt >= 0 + + +class ExponentialBackoff(SleepingPolicy): + """ + Implement policy that will increase retry period by exponentially in every try + """ + def __init__(self, + init_backoff_ms, + max_backoff_ms, + multiplier=2): + """ + inputs in ms + """ + self._init_backoff = init_backoff_ms + self._max_backoff = max_backoff_ms + self._multiplier = multiplier + + def sleep(self, attempt): + sleep_time_ms = min( + self._init_backoff * self._multiplier ** attempt, + self._max_backoff + ) + time.sleep(sleep_time_ms / 1000.0) + + +class RetryOnRpcErrorClientInterceptor( + grpc.UnaryUnaryClientInterceptor, + grpc.StreamUnaryClientInterceptor +): + """ + Implement Client/Stream interceptors for GRPC channels to retry + calls that failed with retry-able states. This is required for + handling server interruptions that are not automatically handled + by grpc.insecure_channel + """ + def __init__(self, + max_attempts, + sleeping_policy, + status_for_retry=None): + self._max_attempts = max_attempts + self._sleeping_policy = sleeping_policy + self._retry_statuses = status_for_retry + + def _intercept_call(self, continuation, client_call_details, + request_or_iterator): + for attempt in range(self._max_attempts): + try: + return continuation(client_call_details, + request_or_iterator) + except grpc.RpcError as response: + # Return if it was last attempt + if attempt == (self._max_attempts - 1): + return response + + # If status code is not in retryable status codes + # pylint: disable=no-member + if self._retry_statuses \ + and hasattr(response, 'code') \ + and response.code() \ + not in self._retry_statuses: + return response + + self._sleeping_policy.sleep(attempt) + + def intercept_unary_unary(self, continuation, client_call_details, + request): + return self._intercept_call(continuation, client_call_details, + request) + + def intercept_stream_unary( + self, continuation, client_call_details, request_iterator + ): + return self._intercept_call(continuation, client_call_details, + request_iterator) diff --git a/pycue/opencue/search.py b/pycue/opencue/search.py index 995cf6e51..1a42f696a 100644 --- a/pycue/opencue/search.py +++ b/pycue/opencue/search.py @@ -77,6 +77,11 @@ class BaseSearch(object): def __init__(self, **options): self.options = options + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + return self.options == other.options + def search(self): """Executes the search using the options provided at initiation.""" return self.byOptions(**self.options) diff --git a/pycue/opencue/util.py b/pycue/opencue/util.py index 1eb704696..7183036c3 100644 --- a/pycue/opencue/util.py +++ b/pycue/opencue/util.py @@ -118,7 +118,7 @@ def _proxies(entities): def rep(entity): """rep(entity) - Extracts a string repesentation of a opencue entity""" + Extracts a string representation of a opencue entity""" try: return entity.name # pylint: disable=bare-except diff --git a/pycue/opencue/wrappers/allocation.py b/pycue/opencue/wrappers/allocation.py index 8539dbf4e..bbbb9b2d4 100644 --- a/pycue/opencue/wrappers/allocation.py +++ b/pycue/opencue/wrappers/allocation.py @@ -167,6 +167,46 @@ def totalLockedCores(self): """ return self.data.stats.locked_cores + def totalGpus(self): + """Returns the total number of gpus in the allocation. + + :rtype: float + :return: total number of gpus in the allocation + """ + return self.data.stats.gpus + + def totalAvailableGpus(self): + """Returns the total number of gpus available for booking in the allocation. + + :rtype: float + :return: total number of gpus in the allocation + """ + return self.data.stats.available_gpus + + def totalIdleGpus(self): + """Returns the total number of idle gpus in the allocation. + + :rtype: float + :return: total number of idle gpus in the allocation + """ + return self.data.stats.idle_gpus + + def totalRunningGpus(self): + """Returns the total number of running gpus in the allocation. + + :rtype: float + :return: total number of running gpus in the allocation + """ + return self.data.stats.running_gpus + + def totalLockedGpus(self): + """Returns the total number of locked gpus in the allocation. + + :rtype: float + :return: total number of locked gpus in the allocation + """ + return self.data.stats.locked_gpus + def totalHosts(self): """Returns the total number of hosts in the allocation. diff --git a/pycue/opencue/wrappers/deed.py b/pycue/opencue/wrappers/deed.py index d29ec8854..bdd0933cd 100644 --- a/pycue/opencue/wrappers/deed.py +++ b/pycue/opencue/wrappers/deed.py @@ -52,29 +52,6 @@ def getOwner(self): self.stub.GetOwner(host_pb2.DeedGetOwnerRequest(deed=self.data), timeout=Cuebot.Timeout).owner) - def setBlackoutTime(self, startTime, stopTime): - """Sets a blackout time for the host. - - :type startTime: int - :param startTime: blackout start time as an epoch - :type stopTime: int - :param stopTime: blackout stop time as an epoch - """ - self.stub.SetBlackoutTime( - host_pb2.DeedSetBlackoutTimeRequest( - deed=self.data, start_time=startTime, stop_time=stopTime), - timeout=Cuebot.Timeout) - - def setBlackoutTimeEnabled(self, enabled): - """Enable/disable the host blackout time without changing the times. - - :type enabled: bool - :param enabled: enable/disable blackout time - """ - self.stub.SetBlackoutTimeEnabled( - host_pb2.DeedSetBlackoutTimeEnabledRequest(deed=self.data, enabled=enabled), - timeout=Cuebot.Timeout) - def id(self): """Returns the id of the deed. @@ -106,27 +83,3 @@ def show(self): :return: name of the deed show """ return self.data.show - - def blackout(self): - """Returns whether the blackout time is enabled. - - :rtype: bool - :return: whether the blackout is enabled - """ - return self.data.blackout - - def blackoutStartTime(self): - """Returns the blackout start time as an epoch. - - :rtype: int - :return: blackout start time as an epoch - """ - return self.data.blackout_start_time - - def blackoutStopTime(self): - """Returns the blackout end time as an epoch. - - :rtype: int - :return: blackout end time as an epoch - """ - return self.data.blackout_stop_time diff --git a/pycue/opencue/wrappers/filter.py b/pycue/opencue/wrappers/filter.py index 45e3949ca..e18194a1c 100644 --- a/pycue/opencue/wrappers/filter.py +++ b/pycue/opencue/wrappers/filter.py @@ -270,7 +270,8 @@ class ActionType(enum.IntEnum): SET_JOB_PRIORITY = filter_pb2.SET_JOB_PRIORITY SET_ALL_RENDER_LAYER_TAGS = filter_pb2.SET_ALL_RENDER_LAYER_TAGS SET_ALL_RENDER_LAYER_MEMORY = filter_pb2.SET_ALL_RENDER_LAYER_MEMORY - SET_ALL_RENDER_LAYER_CORES = filter_pb2.SET_ALL_RENDER_LAYER_CORES + SET_ALL_RENDER_LAYER_MIN_CORES = filter_pb2.SET_ALL_RENDER_LAYER_MIN_CORES + SET_ALL_RENDER_LAYER_MAX_CORES = filter_pb2.SET_ALL_RENDER_LAYER_MAX_CORES SET_MEMORY_OPTIMIZER = filter_pb2.SET_MEMORY_OPTIMIZER class ActionValueType(enum.IntEnum): @@ -390,7 +391,8 @@ def setTypeAndValue(self, actionType, value): elif actionType in (filter_pb2.SET_JOB_MIN_CORES, filter_pb2.SET_JOB_MAX_CORES, - filter_pb2.SET_ALL_RENDER_LAYER_CORES): + filter_pb2.SET_ALL_RENDER_LAYER_MIN_CORES, + filter_pb2.SET_ALL_RENDER_LAYER_MAX_CORES): self.data.float_value = float(value) self.data.value_type = filter_pb2.FLOAT_TYPE diff --git a/pycue/opencue/wrappers/frame.py b/pycue/opencue/wrappers/frame.py index 0455cbd7c..5aa528b5d 100644 --- a/pycue/opencue/wrappers/frame.py +++ b/pycue/opencue/wrappers/frame.py @@ -36,7 +36,7 @@ class CheckpointState(enum.IntEnum): class FrameExitStatus(enum.IntEnum): """Possible frame exit statuses.""" SUCCESS = job_pb2.SUCCESS - NO_RETRY = job_pb2.NO_RETRY + FAILED_LAUNCH = job_pb2.FAILED_LAUNCH SKIP_RETRY = job_pb2.SKIP_RETRY class FrameState(enum.IntEnum): diff --git a/pycue/opencue/wrappers/group.py b/pycue/opencue/wrappers/group.py index 59685cc8b..1c8175e57 100644 --- a/pycue/opencue/wrappers/group.py +++ b/pycue/opencue/wrappers/group.py @@ -67,6 +67,24 @@ def setMinCores(self, value): self.stub.SetMinCores(job_pb2.GroupSetMinCoresRequest(group=self.data, min_cores=value), timeout=Cuebot.Timeout) + def setMaxGpus(self, value): + """Sets the maximum gpus of everything in the group. + + :type value: int + :param value: new maximum number of gpus + """ + self.stub.SetMaxGpus(job_pb2.GroupSetMaxGpusRequest(group=self.data, max_gpus=value), + timeout=Cuebot.Timeout) + + def setMinGpus(self, value): + """Sets the minimum gpus of everything the group. + + :type value: int + :param value: new minimum number of gpus + """ + self.stub.SetMinGpus(job_pb2.GroupSetMinGpusRequest(group=self.data, min_gpus=value), + timeout=Cuebot.Timeout) + def setDefaultJobPriority(self, value): """Sets the default job priority for everything in the group. @@ -97,6 +115,26 @@ def setDefaultJobMaxCores(self, value): job_pb2.GroupSetDefJobMaxCoresRequest(group=self.data, max_cores=value), timeout=Cuebot.Timeout) + def setDefaultJobMinGpus(self, value): + """Sets the default job minimum gpus for everything in the group. + + :type value: int + :param value: new default job minimum gpus + """ + self.stub.SetDefaultJobMinGpus( + job_pb2.GroupSetDefJobMinGpusRequest(group=self.data, min_gpus=value), + timeout=Cuebot.Timeout) + + def setDefaultJobMaxGpus(self, value): + """Sets the default job maximum gpus for everything in the group. + + :type value: int + :param value: new default job maximum gpus + """ + self.stub.SetDefaultJobMaxGpus( + job_pb2.GroupSetDefJobMaxGpusRequest(group=self.data, max_gpus=value), + timeout=Cuebot.Timeout) + def getGroups(self): """Returns child groups of this group. diff --git a/pycue/opencue/wrappers/host.py b/pycue/opencue/wrappers/host.py index 83dac252f..860847bb0 100644 --- a/pycue/opencue/wrappers/host.py +++ b/pycue/opencue/wrappers/host.py @@ -19,6 +19,7 @@ import time from opencue import Cuebot +from opencue import util from opencue.compiled_proto import comment_pb2 from opencue.compiled_proto import host_pb2 import opencue.wrappers.comment @@ -149,6 +150,7 @@ def renameTag(self, oldTag, newTag): host_pb2.HostRenameTagRequest(host=self.data, old_tag=oldTag, new_tag=newTag), timeout=Cuebot.Timeout) + @util.grpcExceptionParser def setAllocation(self, allocation): """Sets the host to the given allocation. @@ -279,7 +281,7 @@ def coresReserved(self): :rtype: float :return: number of cores reserved """ - return self.data.cores - self.data.idle_ores + return self.data.cores - self.data.idle_cores def coresIdle(self): """Returns the number of cores the host currently has idel. diff --git a/pycue/opencue/wrappers/job.py b/pycue/opencue/wrappers/job.py index eb977a280..cbb1869d9 100644 --- a/pycue/opencue/wrappers/job.py +++ b/pycue/opencue/wrappers/job.py @@ -126,6 +126,20 @@ def setMaxCores(self, maxCores): self.stub.SetMaxCores(job_pb2.JobSetMaxCoresRequest(job=self.data, val=maxCores), timeout=Cuebot.Timeout) + def setMinGpus(self, minGpus): + """Sets the minimum procs value + :type minGpus: int + :param minGpus: New minimum cores value""" + self.stub.SetMinGpus(job_pb2.JobSetMinGpusRequest(job=self.data, val=minGpus), + timeout=Cuebot.Timeout) + + def setMaxGpus(self, maxGpus): + """Sets the maximum procs value + :type maxGpus: int + :param maxGpus: New maximum cores value""" + self.stub.SetMaxGpus(job_pb2.JobSetMaxGpusRequest(job=self.data, val=maxGpus), + timeout=Cuebot.Timeout) + def setPriority(self, priority): """Sets the job priority. @@ -211,7 +225,7 @@ def setAutoEating(self, value): self.stub.SetAutoEat(job_pb2.JobSetAutoEatRequest(job=self.data, value=value), timeout=Cuebot.Timeout) - def addRenderPartition(self, hostname, threads, max_cores, num_mem, max_gpu): + def addRenderPartition(self, hostname, threads, max_cores, num_mem, max_gpus, max_gpu_memory): """Adds a render partition to the job. :type hostname: str @@ -222,8 +236,10 @@ def addRenderPartition(self, hostname, threads, max_cores, num_mem, max_gpu): :param max_cores: max cores enabled for the partition :type num_mem: int :param num_mem: amount of memory reserved for the partition - :type max_gpu: int - :param max_gpu: max gpu cores enabled for the partition + :type max_gpus: int + :param max_gpus: max gpu cores enabled for the partition + :type max_gpu_memory: int + :param max_gpu_memory: amount of gpu memory reserved for the partition """ self.stub.AddRenderPartition( job_pb2.JobAddRenderPartRequest(job=self.data, @@ -231,7 +247,8 @@ def addRenderPartition(self, hostname, threads, max_cores, num_mem, max_gpu): threads=threads, max_cores=max_cores, max_memory=num_mem, - max_gpu=max_gpu, + max_gpus=max_gpus, + max_gpu_memory=max_gpu_memory, username=os.getenv("USER", "unknown"))) def getWhatDependsOnThis(self): @@ -492,6 +509,20 @@ def maxCores(self): """ return self.data.max_cores + def minGpus(self): + """Returns the minimum number of gpus the job needs. + :rtype: int + :return: job's min gpus + """ + return self.data.min_gpus + + def maxGpus(self): + """Returns the maximum number of gpus the job will use. + :rtype: int + :return: job's max gpus + """ + return self.data.max_gpus + def os(self): """Returns the job's operating system. @@ -742,6 +773,11 @@ def maxRss(self): :return: most memory used by any frame in kB""" return self.data.job_stats.max_rss + def shutdownIfCompleted(self): + """Shutdown the job if it is completed.""" + self.stub.ShutdownIfCompleted(job_pb2.JobShutdownIfCompletedRequest(job=self.data), + timeout=Cuebot.Timeout) + class NestedJob(Job): """This class contains information and actions related to a nested job.""" @@ -823,6 +859,18 @@ def setMaxCores(self, maxCores): """ self.asJob().setMaxCores(maxCores) + def setMinGpus(self, minGpus): + """Sets the minimum gpus value + :type minGpus: int + :param minGpus: New minimum gpus value""" + self.asJob().setMinGpus(minGpus) + + def setMaxGpus(self, maxGpus): + """Sets the maximum gpus value + :type maxGpus: int + :param maxGpus: New maximum gpus value""" + self.asJob().setMaxGpus(maxGpus) + def setPriority(self, priority): """Sets the job priority. diff --git a/pycue/opencue/wrappers/layer.py b/pycue/opencue/wrappers/layer.py index 7c33b68e7..0e34985bf 100644 --- a/pycue/opencue/wrappers/layer.py +++ b/pycue/opencue/wrappers/layer.py @@ -140,14 +140,30 @@ def setMinCores(self, cores): job_pb2.LayerSetMinCoresRequest(layer=self.data, cores=cores/100.0), timeout=Cuebot.Timeout) - def setMinGpu(self, gpu): + def setMaxGpus(self, max_gpus): + """Sets the maximum number of gpus that this layer requires. + :type max_gpus: int + :param max_gpus: gpu cores""" + return self.stub.SetMaxGpus( + job_pb2.LayerSetMaxGpusRequest(layer=self.data, max_gpus=max_gpus), + timeout=Cuebot.Timeout) + + def setMinGpus(self, min_gpus): + """Sets the minimum number of gpus that this layer requires. + :type min_gpus: int + :param min_gpus: gou cores""" + return self.stub.SetMinGpus( + job_pb2.LayerSetMinGpusRequest(layer=self.data, min_gpus=min_gpus), + timeout=Cuebot.Timeout) + + def setMinGpuMemory(self, gpu_memory): """Sets the minimum number of gpu memory that this layer requires. - :type gpu: int - :param gpu: gpu value + :type gpu_memory: int + :param gpu_memory: gpu_memory value """ - return self.stub.SetMinGpu( - job_pb2.LayerSetMinGpuRequest(layer=self.data, gpu=gpu), + return self.stub.SetMinGpuMemory( + job_pb2.LayerSetMinGpuMemoryRequest(layer=self.data, gpu_memory=gpu_memory), timeout=Cuebot.Timeout) def setMinMemory(self, memory): @@ -401,6 +417,12 @@ def coresReserved(self): """ return self.data.layer_stats.reserved_cores + def gpusReserved(self): + """Returns the number of gpus reserved on this layer + :rtype: float + :return: gpus reserved""" + return self.data.layer_stats.reserved_gpus + def minCores(self): """Returns the minimum number of cores that frames in this layer require. @@ -409,6 +431,12 @@ def minCores(self): """ return self.data.min_cores + def minGpus(self): + """Returns the minimum number of gpus that frames in this layer require + :rtype: int + :return: Minimum number of gpus required""" + return self.data.min_gpus + def minMemory(self): """Returns the minimum amount of memory that frames in this layer require. diff --git a/pycue/opencue/wrappers/proc.py b/pycue/opencue/wrappers/proc.py index da8ab2fae..f0ab1bc96 100644 --- a/pycue/opencue/wrappers/proc.py +++ b/pycue/opencue/wrappers/proc.py @@ -59,6 +59,30 @@ def unbook(self, kill=False): host_pb2.ProcUnbookRequest(proc=self.data, kill=kill), timeout=Cuebot.Timeout) return response + def redirectToJob(self, job, kill=False): + """Unbooks the current frame from this proc and redirects the proc to a specific job. + + :type job: opencue.wrappers.job.Job + :param job: job which the proc should be booked to + :type kill: bool + :param kill: if true, the frame will be immediately killed + """ + self.stub.RedirectToJob( + host_pb2.ProcRedirectToJobRequest(proc=self.data, job_id=job.data.id, kill=kill), + timeout=Cuebot.Timeout) + + def redirectToGroup(self, group, kill=False): + """Unbooks the current frame from this proc and redirects the proc to another group. + + :type group: opencue.wrappers.group.Group + :param group: group which the proc should be booked to + :type kill: bool + :param kill: if true, the frame will be immediately killed + """ + self.stub.RedirectToGroup( + host_pb2.ProcRedirectToGroupRequest(proc=self.data, group_id=group.data.id, kill=kill), + timeout=Cuebot.Timeout) + def getHost(self): """Returns the host this proc is allocated from. diff --git a/pycue/opencue/wrappers/service.py b/pycue/opencue/wrappers/service.py index 1a941772b..92cafe7fc 100644 --- a/pycue/opencue/wrappers/service.py +++ b/pycue/opencue/wrappers/service.py @@ -29,6 +29,10 @@ def __init__(self, service=None): def create(self): """Creates a service in the database using the current instance data.""" + # min_memory_increase has to be greater than 0. + if self.data.min_memory_increase <= 0: + raise ValueError("Minimum memory increase must be > 0") + response = self.stub.CreateService( service_pb2.ServiceCreateServiceRequest(data=self.data), timeout=Cuebot.Timeout) @@ -69,6 +73,10 @@ def getService(name): def update(self): """Updates the service database record with the current instance data.""" + # min_memory_increase has to be greater than 0. + if self.data.min_memory_increase <= 0: + raise ValueError("Minimum memory increase must be > 0") + return self.stub.Update( service_pb2.ServiceUpdateRequest(service=self.data), timeout=Cuebot.Timeout) @@ -161,21 +169,53 @@ def setMinMemory(self, minMemory): """ self.data.min_memory = minMemory - def minGpu(self): - """Returns the minimum gpu of the service. + def minGpus(self): + """Returns the minimum gpus of the service. + + :rtype: int + :return: min gpus + """ + return self.data.min_gpus + + def setMinGpus(self, minGpus): + """Sets the minimum gpus of the service. + + :type: int + :param: new min gpus + """ + self.data.min_gpus = minGpus + + def maxGpus(self): + """Returns the maximum gpus of the service. + + :rtype: int + :return: max gpus + """ + return self.data.max_gpus + + def setMaxGpus(self, maxGpus): + """Sets the maximum gpus of the service. + + :type: int + :param: new max gpus + """ + self.data.max_gpus = maxGpus + + def minGpuMemory(self): + """Returns the minimum gpu memory of the service. :rtype: int - :return: min gpu + :return: min gpu memory """ - return self.data.min_gpu + return self.data.min_gpu_memory - def setMinGpu(self, minGpu): - """Sets the minimum gpu of the service. + def setMinGpuMemory(self, minGpuMemory): + """Sets the minimum gpu memory of the service. :type: int - :param: new min gpu + :param: new min gpu memory """ - self.data.min_gpu = minGpu + self.data.min_gpu_memory = minGpuMemory def tags(self): """Returns the list of tags for the service. @@ -208,3 +248,37 @@ def timeoutLLU(self): def setTimeoutLLU(self, timeout_llu): """Sets the default service LLU timeout.""" self.data.timeout_llu = timeout_llu + + def minMemoryIncrease(self): + """Gets the default service minimum memory increment""" + return self.data.min_memory_increase + + def setMinMemoryIncrease(self, min_memory_increase): + """Sets the default service minimum memory increment""" + if min_memory_increase > 0: + self.data.min_memory_increase = min_memory_increase + else: + raise ValueError("Minimum memory increase must be > 0") + +class ServiceOverride(object): + def __init__(self, serviceOverride=None): + if serviceOverride: + self.id = serviceOverride.id + self.data = serviceOverride.data or service_pb2.Service().data + else: + defaultServiceOverride = service_pb2.ServiceOverride() + self.id = defaultServiceOverride.id + self.data = defaultServiceOverride.data + + self.stub = Cuebot.getStub("serviceOverride") + + def delete(self): + self.stub.Delete( + service_pb2.ServiceOverrideDeleteRequest(service=self.data), + timeout=Cuebot.Timeout) + + def update(self): + """Commit a ServiceOverride change to the database""" + self.stub.Update( + service_pb2.ServiceOverrideUpdateRequest(service=self.data), + timeout=Cuebot.Timeout) diff --git a/pycue/opencue/wrappers/show.py b/pycue/opencue/wrappers/show.py index 750d05645..c7594aaa3 100644 --- a/pycue/opencue/wrappers/show.py +++ b/pycue/opencue/wrappers/show.py @@ -19,6 +19,7 @@ import opencue.wrappers.filter import opencue.wrappers.group import opencue.wrappers.subscription +from opencue.wrappers.service import ServiceOverride class Show(object): @@ -66,25 +67,29 @@ def delete(self): def createServiceOverride(self, data): """Creates a Service Override at the show level. - - :type data: service_pb2.Service - :param data: service data, typically from opencue.wrappers.service.Service.data + :type data: opencue.wrapper.service.Service + :param data: Service.data object """ - self.stub.CreateServiceOverride( - show_pb2.ShowCreateServiceOverrideRequest(show=self.data, service=data), - timeout=Cuebot.Timeout) + + # min_memory_increase has to be greater than 0. + if data.min_memory_increase <= 0: + raise ValueError("Minimum memory increase must be > 0") + + self.stub.CreateServiceOverride(show_pb2.ShowCreateServiceOverrideRequest( + show=self.data, service=data), + timeout=Cuebot.Timeout) def getServiceOverride(self, serviceName): - """Returns a service override for a show. + """ + Returns a service override for a show - :type serviceName: str :param serviceName: name of the service for the show - :rtype: service_pb2.ServiceOverride :return: service override object """ - return self.stub.GetServiceOverride( - show_pb2.ShowGetServiceOverrideRequest(show=self.data, name=serviceName), - timeout=Cuebot.Timeout).service_override + serviceOverride = self.stub.GetServiceOverride(show_pb2.ShowGetServiceOverrideRequest( + show=self.data, name=serviceName), + timeout=Cuebot.Timeout).service_override + return ServiceOverride(serviceOverride) def getServiceOverrides(self): """Returns a list of service overrides on the show. @@ -95,7 +100,7 @@ def getServiceOverrides(self): serviceOverrideSeq = self.stub.GetServiceOverrides( show_pb2.ShowGetServiceOverridesRequest(show=self.data), timeout=Cuebot.Timeout).service_overrides - return serviceOverrideSeq.service_overrides + return [ServiceOverride(override) for override in serviceOverrideSeq.service_overrides] def getSubscriptions(self): """Returns a list of all subscriptions the show has. @@ -167,6 +172,32 @@ def setDefaultMinCores(self, mincores): timeout=Cuebot.Timeout) return response + def setDefaultMaxGpus(self, maxgpus): + """Sets the default maximum number of gpus + that new jobs are launched with. + :type: float + :param: value to set maxGpu to + :rtype: show_pb2.ShowSetDefaultMaxGpuResponse + :return: response is empty + """ + response = self.stub.SetDefaultMaxGpus(show_pb2.ShowSetDefaultMaxGpusRequest( + show=self.data, max_gpus=maxgpus), + timeout=Cuebot.Timeout) + return response + + def setDefaultMinGpus(self, mingpus): + """Sets the default minimum number of gpus + all new jobs are launched with. + :type: float + :param: value to set minGpus to + :rtype: show_pb2.ShowSetDefaultMinGpusResponse + :return: response is empty + """ + response = self.stub.SetDefaultMinGpus(show_pb2.ShowSetDefaultMinGpusRequest( + show=self.data, min_gpus=mingpus), + timeout=Cuebot.Timeout) + return response + def findFilter(self, name): """Finds a filter by name. diff --git a/pycue/opencue/wrappers/subscription.py b/pycue/opencue/wrappers/subscription.py index 057067e3c..5334ae413 100644 --- a/pycue/opencue/wrappers/subscription.py +++ b/pycue/opencue/wrappers/subscription.py @@ -94,6 +94,12 @@ def id(self): def name(self): """Returns the name of the subscription. + Subscription names follow the format `.`, which can also be + read as `..` due to the way allocations are named. For + example the subscription name `local.general.testing` would indicate the show `testing` + has a subscription to the `local.general` allocation. The `local.general` allocation + indicates all hosts in the facility `local` containing the `general` tag. + :rtype: str :return: name of the subscription """ diff --git a/pycue/setup.py b/pycue/setup.py index 0906e9351..0750ab016 100644 --- a/pycue/setup.py +++ b/pycue/setup.py @@ -46,6 +46,8 @@ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', ], packages=find_packages(exclude=['tests']), package_data={ diff --git a/pycue/tests/api_test.py b/pycue/tests/api_test.py index 0d05246c4..759c09da1 100644 --- a/pycue/tests/api_test.py +++ b/pycue/tests/api_test.py @@ -357,11 +357,13 @@ class ServiceTests(unittest.TestCase): testThreadable = False testMinCores = 1000 testMaxCores = 2000 + minMemoryIncrease = 2068378 def setUp(self): self.service = service_pb2.Service( name=self.testName, threadable=self.testThreadable, min_cores=self.testMinCores, - max_cores=self.testMaxCores, tags=self.testTags) + max_cores=self.testMaxCores, tags=self.testTags, + min_memory_increase=self.minMemoryIncrease) @mock.patch('opencue.cuebot.Cuebot.getStub') def testCreate(self, getStubMock): @@ -719,6 +721,32 @@ def testDeleteAlloc(self, getStubMock): stubMock.Delete.assert_called_with( facility_pb2.AllocDeleteRequest(allocation=allocToDelete), timeout=mock.ANY) + @mock.patch('opencue.cuebot.Cuebot.getStub') + def testGetDefaultAlloc(self, getStubMock): + arbitraryId = '00000000-0000-0000-0000-012345678980' + stubMock = mock.Mock() + stubMock.GetDefault.return_value = facility_pb2.AllocGetDefaultResponse( + allocation=facility_pb2.Allocation(id=arbitraryId)) + getStubMock.return_value = stubMock + + alloc = opencue.api.getDefaultAllocation() + + stubMock.GetDefault.assert_called_with( + facility_pb2.AllocGetDefaultRequest(), timeout=mock.ANY) + self.assertEqual(arbitraryId, alloc.id()) + + @mock.patch('opencue.cuebot.Cuebot.getStub') + def testSetDefaultAlloc(self, getStubMock): + alloc = facility_pb2.Allocation(name=TEST_ALLOC_NAME) + stubMock = mock.Mock() + stubMock.SetDefault.return_value = facility_pb2.AllocSetDefaultResponse() + getStubMock.return_value = stubMock + + opencue.api.setDefaultAllocation(alloc) + + stubMock.SetDefault.assert_called_with( + facility_pb2.AllocSetDefaultRequest(allocation=alloc), timeout=mock.ANY) + @mock.patch('opencue.cuebot.Cuebot.getStub') def testAllocSetBillable(self, getStubMock): alloc = facility_pb2.Allocation(name=TEST_ALLOC_NAME) @@ -806,6 +834,21 @@ def testGetLimits(self, getStubMock): self.assertEqual(len(limits), 1) self.assertEqual(limits[0].name(), TEST_LIMIT_NAME) + @mock.patch('opencue.cuebot.Cuebot.getStub') + def testFindLimit(self, getStubMock): + stubMock = mock.Mock() + stubMock.Find.return_value = limit_pb2.LimitFindResponse( + limit=limit_pb2.Limit(name=TEST_LIMIT_NAME, max_value=42)) + getStubMock.return_value = stubMock + + limit = opencue.api.findLimit(TEST_LIMIT_NAME) + self.assertEqual(TEST_LIMIT_NAME, limit.name()) + self.assertEqual(42, limit.maxValue()) + + stubMock.Find.assert_called_with( + limit_pb2.LimitFindRequest(name=TEST_LIMIT_NAME), + timeout=mock.ANY) + if __name__ == '__main__': unittest.main() diff --git a/pycue/tests/config_test.py b/pycue/tests/config_test.py new file mode 100644 index 000000000..15672cb04 --- /dev/null +++ b/pycue/tests/config_test.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python + +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for `opencue.config`.""" + +import os +import unittest + +import mock +import pyfakefs.fake_filesystem_unittest + +import opencue.config + + +EXPECTED_DEFAULT_CONFIG = { + 'logger.format': '%(levelname)-9s %(module)-10s %(message)s', + 'logger.level': 'WARNING', + 'cuebot.protocol': 'tcp', + 'cuebot.grpc_port': 8443, + 'cuebot.timeout': 10000, + 'cuebot.max_message_bytes': 104857600, + 'cuebot.exception_retries': 3, + 'cuebot.facility_default': 'local', + 'cuebot.facility': { + 'local': ['localhost:8443'], + 'dev': ['cuetest02-vm.example.com:8443'], + 'cloud': [ + 'cuebot1.example.com:8443', + 'cuebot2.example.com:8443', + 'cuebot3.example.com:8443' + ], + }, +} + +USER_CONFIG = """ +cuebot.facility_default: fake-facility-01 +cuebot.facility: + fake-facility-01: + - fake-cuebot-01:1234 + fake-facility-02: + - fake-cuebot-02:5678 + - fake-cuebot-03:9012 +""" + + +class ConfigTests(pyfakefs.fake_filesystem_unittest.TestCase): + def setUp(self): + self.setUpPyfakefs() + self.fs.add_real_file( + os.path.join(os.path.dirname(opencue.__file__), 'default.yaml'), read_only=True) + if 'OPENCUE_CONFIG_FILE' in os.environ: + del os.environ['OPENCUE_CONFIG_FILE'] + if 'OPENCUE_CONF' in os.environ: + del os.environ['OPENCUE_CONF'] + + @mock.patch('platform.system', new=mock.Mock(return_value='Linux')) + @mock.patch('os.path.expanduser', new=mock.Mock(return_value='/home/username')) + def test__should_return_config_dir_unix(self): + self.assertEqual('/home/username/.config/opencue', opencue.config.config_base_directory()) + + @mock.patch('platform.system', new=mock.Mock(return_value='Windows')) + @mock.patch( + 'os.path.expandvars', new=mock.Mock(return_value='C:/Users/username/AppData/Roaming')) + def test__should_return_config_dir_windows(self): + self.assertEqual( + 'C:/Users/username/AppData/Roaming/opencue', opencue.config.config_base_directory()) + + def test__should_load_default_config(self): + self.assertIsNone(os.environ.get('OPENCUE_CONFIG_FILE')) + self.assertIsNone(os.environ.get('OPENCUE_CONF')) + + config = opencue.config.load_config_from_file() + + self.assertEqual(EXPECTED_DEFAULT_CONFIG, config) + + def test__should_load_user_config(self): + config_file_path = '/path/to/config.yaml' + self.fs.create_file(config_file_path, contents=USER_CONFIG) + os.environ['OPENCUE_CONFIG_FILE'] = config_file_path + # Define some invalid config using the old setting name, this ensures the old env var + # will be ignored if the new one is set. + config_file_path_legacy = '/path/to/legacy/config.yaml' + self.fs.create_file(config_file_path_legacy, contents='invalid yaml') + os.environ['OPENCUE_CONF'] = config_file_path_legacy + + config = opencue.config.load_config_from_file() + + self.assertEqual('fake-facility-01', config['cuebot.facility_default']) + self.assertEqual(['fake-cuebot-01:1234'], config['cuebot.facility']['fake-facility-01']) + self.assertEqual( + ['fake-cuebot-02:5678', 'fake-cuebot-03:9012'], + config['cuebot.facility']['fake-facility-02']) + # Settings not defined in user config should still have default values. + self.assertEqual(10000, config['cuebot.timeout']) + self.assertEqual(3, config['cuebot.exception_retries']) + + def test__should_load_user_config_from_legacy_var(self): + config_file_path = '/path/to/legacy/config.yaml' + self.fs.create_file(config_file_path, contents=USER_CONFIG) + os.environ['OPENCUE_CONF'] = config_file_path + + config = opencue.config.load_config_from_file() + + self.assertEqual('fake-facility-01', config['cuebot.facility_default']) + self.assertEqual(['fake-cuebot-01:1234'], config['cuebot.facility']['fake-facility-01']) + self.assertEqual( + ['fake-cuebot-02:5678', 'fake-cuebot-03:9012'], + config['cuebot.facility']['fake-facility-02']) + # Settings not defined in user config should still have default values. + self.assertEqual(10000, config['cuebot.timeout']) + self.assertEqual(3, config['cuebot.exception_retries']) + + @mock.patch('platform.system', new=mock.Mock(return_value='Linux')) + @mock.patch('os.path.expanduser', new=mock.Mock(return_value='/home/username')) + def test__should_load_user_config_from_user_profile(self): + config_file_path = '/home/username/.config/opencue/opencue.yaml' + self.fs.create_file(config_file_path, contents=USER_CONFIG) + + config = opencue.config.load_config_from_file() + + self.assertEqual('fake-facility-01', config['cuebot.facility_default']) + self.assertEqual(['fake-cuebot-01:1234'], config['cuebot.facility']['fake-facility-01']) + self.assertEqual( + ['fake-cuebot-02:5678', 'fake-cuebot-03:9012'], + config['cuebot.facility']['fake-facility-02']) + # Settings not defined in user config should still have default values. + self.assertEqual(10000, config['cuebot.timeout']) + self.assertEqual(3, config['cuebot.exception_retries']) + + +if __name__ == '__main__': + unittest.main() diff --git a/pycue/tests/cuebot_test.py b/pycue/tests/cuebot_test.py new file mode 100644 index 000000000..78e20fb97 --- /dev/null +++ b/pycue/tests/cuebot_test.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python + +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for `opencue.cuebot`.""" + +import unittest + +import mock + +import opencue + + +TESTING_CONFIG = { + "cuebot.facility_default": "fake-facility-01", + "cuebot.facility": { + "fake-facility-01": [ + "fake-cuebot-01", + ], + "fake-facility-02": [ + "fake-cuebot-02", + "fake-cuebot-03", + ], + }, +} + + +class CuebotTests(unittest.TestCase): + def setUp(self): + self.cuebot = opencue.Cuebot() + # Mocking the cue service ensures the initial healthcheck request made to Cuebot + # will succeed. + self.cuebot.SERVICE_MAP['cue'] = mock.Mock() + + def test__should_set_hosts_and_channel(self): + healthcheck_mock = mock.Mock() + self.cuebot.SERVICE_MAP['cue'] = healthcheck_mock + + self.cuebot.init(config=TESTING_CONFIG) + + self.assertEqual(["fake-cuebot-01"], self.cuebot.Hosts) + self.assertIsNotNone(self.cuebot.RpcChannel) + healthcheck_mock.assert_called_with(self.cuebot.RpcChannel) + + def test__should_set_known_facility(self): + self.cuebot.init(config=TESTING_CONFIG) + + self.cuebot.setFacility('fake-facility-02') + + self.assertEqual(['fake-cuebot-02', 'fake-cuebot-03'], self.cuebot.Hosts) + + def test__should_ignore_unknown_facility(self): + self.cuebot.init(config=TESTING_CONFIG) + + self.cuebot.setFacility('unknown-facility') + + self.assertEqual(['fake-cuebot-01'], self.cuebot.Hosts) + + +if __name__ == '__main__': + unittest.main() diff --git a/pycue/tests/wrappers/deed_test.py b/pycue/tests/wrappers/deed_test.py index 8fd0833f6..6d762fe4b 100644 --- a/pycue/tests/wrappers/deed_test.py +++ b/pycue/tests/wrappers/deed_test.py @@ -72,36 +72,6 @@ def testGetOwner(self, getStubMock): host_pb2.DeedGetOwnerRequest(deed=deed.data), timeout=mock.ANY) self.assertEqual(owner.name(), TEST_DEED_OWNER) - def testSetBlackoutTime(self, getStubMock): - stubMock = mock.Mock() - stubMock.SetBlackoutTime.return_value = host_pb2.DeedSetBlackoutTimeResponse() - getStubMock.return_value = stubMock - - testStartTime = 100 - testStopTime = 200 - deed = opencue.wrappers.deed.Deed(host_pb2.Deed(id=TEST_DEED_ID)) - deed.setBlackoutTime(testStartTime, testStopTime) - - stubMock.SetBlackoutTime.assert_called_with( - host_pb2.DeedSetBlackoutTimeRequest(deed=deed.data, - start_time=testStartTime, - stop_time=testStopTime), - timeout=mock.ANY) - - def testSetBlackoutTimeEnabled(self, getStubMock): - stubMock = mock.Mock() - stubMock.SetBlackoutTimeEnabled.return_value = host_pb2.DeedSetBlackoutTimeEnabledResponse() - getStubMock.return_value = stubMock - - testBlackoutEnabled = True - deed = opencue.wrappers.deed.Deed(host_pb2.Deed(id=TEST_DEED_ID)) - deed.setBlackoutTimeEnabled(testBlackoutEnabled) - - stubMock.SetBlackoutTimeEnabled.assert_called_with( - host_pb2.DeedSetBlackoutTimeEnabledRequest(deed=deed.data, - enabled=testBlackoutEnabled), - timeout=mock.ANY) - if __name__ == '__main__': unittest.main() diff --git a/pycue/tests/wrappers/frame_test.py b/pycue/tests/wrappers/frame_test.py index 8be9ff0fd..d0e7c7ba6 100644 --- a/pycue/tests/wrappers/frame_test.py +++ b/pycue/tests/wrappers/frame_test.py @@ -206,9 +206,9 @@ def testCheckpointState(self): self.assertEqual(opencue.api.Frame.CheckpointState.DISABLED, 0) def testFrameExitStatus(self): - self.assertEqual(opencue.api.Frame.FrameExitStatus.NO_RETRY, - opencue.compiled_proto.job_pb2.NO_RETRY) - self.assertEqual(opencue.api.Frame.FrameExitStatus.NO_RETRY, 256) + self.assertEqual(opencue.api.Frame.FrameExitStatus.FAILED_LAUNCH, + opencue.compiled_proto.job_pb2.FAILED_LAUNCH) + self.assertEqual(opencue.api.Frame.FrameExitStatus.FAILED_LAUNCH, 256) def testFrameState(self): self.assertEqual(opencue.api.Frame.FrameState.RUNNING, diff --git a/pycue/tests/wrappers/layer_test.py b/pycue/tests/wrappers/layer_test.py index cf8fb0c33..4f5578681 100644 --- a/pycue/tests/wrappers/layer_test.py +++ b/pycue/tests/wrappers/layer_test.py @@ -201,18 +201,18 @@ def testSetMaxCores(self, getStubMock): job_pb2.LayerSetMaxCoresRequest(layer=layer.data, cores=testCoresActual), timeout=mock.ANY) - def testSetMinGpu(self, getStubMock): + def testSetMinGpuMemory(self, getStubMock): stubMock = mock.Mock() - stubMock.SetMinGpu.return_value = job_pb2.LayerSetMinGpuResponse() + stubMock.SetMinGpuMemory.return_value = job_pb2.LayerSetMinGpuResponse() getStubMock.return_value = stubMock testCores = 100 layer = opencue.wrappers.layer.Layer( job_pb2.Layer(name=TEST_LAYER_NAME)) - layer.setMinGpu(testCores) + layer.setMinGpuMemory(testCores) - stubMock.SetMinGpu.assert_called_with( - job_pb2.LayerSetMinGpuRequest(layer=layer.data, gpu=testCores), + stubMock.SetMinGpuMemory.assert_called_with( + job_pb2.LayerSetMinGpuMemoryRequest(layer=layer.data, gpu_memory=testCores), timeout=mock.ANY) def testSetMinMemory(self, getStubMock): diff --git a/pycue/tests/wrappers/service_test.py b/pycue/tests/wrappers/service_test.py index 92b2e3b2c..3500eec5c 100644 --- a/pycue/tests/wrappers/service_test.py +++ b/pycue/tests/wrappers/service_test.py @@ -28,6 +28,9 @@ TEST_SERVICE_NAME = 'testService' +TEST_MIN_GPUS = 2 +TEST_MAX_GPUS = 7 +TEST_MIN_GPU_MEMORY = 8 * 1024 * 1024 * 1024 @mock.patch('opencue.cuebot.Cuebot.getStub') @@ -48,16 +51,23 @@ def testDelete(self, getStubMock): def testCreateService(self, getStubMock): stubMock = mock.Mock() stubMock.CreateService.return_value = service_pb2.ServiceCreateServiceResponse( - service=service_pb2.Service(name=TEST_SERVICE_NAME)) + service=service_pb2.Service(name=TEST_SERVICE_NAME, min_memory_increase=2097152)) getStubMock.return_value = stubMock wrapper = opencue.wrappers.service.Service( - service_pb2.Service(name=TEST_SERVICE_NAME)) + service_pb2.Service(name=TEST_SERVICE_NAME, min_memory_increase=2097152)) service = wrapper.create() stubMock.CreateService.assert_called_with( service_pb2.ServiceCreateServiceRequest(data=wrapper.data), timeout=mock.ANY) self.assertEqual(wrapper.name(), service.name()) + self.assertEqual(wrapper.minMemoryIncrease(), service.minMemoryIncrease()) + + def testCreateServiceMemError(self, getStubMock): + service = opencue.wrappers.service.Service(service_pb2.Service( + name=TEST_SERVICE_NAME)) + + self.assertRaises(ValueError, service.create) def testGetDefaultServices(self, getStubMock): stubMock = mock.Mock() @@ -93,12 +103,51 @@ def testUpdate(self, getStubMock): getStubMock.return_value = stubMock wrapper = opencue.wrappers.service.Service(service=service_pb2.Service( - name=TEST_SERVICE_NAME)) + name=TEST_SERVICE_NAME, min_memory_increase=302)) wrapper.update() stubMock.Update.assert_called_with( service_pb2.ServiceUpdateRequest(service=wrapper.data), timeout=mock.ANY) + def testGpus(self, getStubMock): + stubMock = mock.Mock() + stubMock.GetService.return_value = service_pb2.ServiceGetServiceResponse( + service=service_pb2.Service(name=TEST_SERVICE_NAME)) + getStubMock.return_value = stubMock + + wrapper = opencue.wrappers.service.Service() + service = wrapper.getService(name=TEST_SERVICE_NAME) + self.assertEqual(service.minGpus(), 0) + self.assertEqual(service.maxGpus(), 0) + self.assertEqual(service.minGpuMemory(), 0) + service.setMinGpus(TEST_MIN_GPUS) + service.setMaxGpus(TEST_MAX_GPUS) + service.setMinGpuMemory(TEST_MIN_GPU_MEMORY) + self.assertEqual(service.minGpus(), TEST_MIN_GPUS) + self.assertEqual(service.maxGpus(), TEST_MAX_GPUS) + self.assertEqual(service.minGpuMemory(), TEST_MIN_GPU_MEMORY) + + stubMock.GetService.assert_called_with( + service_pb2.ServiceGetServiceRequest(name=TEST_SERVICE_NAME), timeout=mock.ANY) + self.assertEqual(service.name(), TEST_SERVICE_NAME) + + def testUpdateMemError(self, getStubMock): + service = opencue.wrappers.service.Service(service=service_pb2.Service( + name=TEST_SERVICE_NAME)) + + self.assertRaises(ValueError, service.update) + + def testSetMinMemIncrease(self, getStubMock): + service = opencue.wrappers.service.Service( + service_pb2.Service(name=TEST_SERVICE_NAME, + min_memory_increase=2097152)) + + self.assertRaises(ValueError, service.setMinMemoryIncrease, -1) + self.assertRaises(ValueError, service.setMinMemoryIncrease, 0) + + service.setMinMemoryIncrease(12345678) + self.assertEqual(service.minMemoryIncrease(), 12345678) + if __name__ == '__main__': unittest.main() diff --git a/pycue/tests/wrappers/show_test.py b/pycue/tests/wrappers/show_test.py index b7cc3dd9a..cb73a4b5d 100644 --- a/pycue/tests/wrappers/show_test.py +++ b/pycue/tests/wrappers/show_test.py @@ -44,6 +44,8 @@ TEST_SUBSCRIPTION_BURST = 1200 TEST_MIN_CORES = 42 TEST_MAX_CORES = 47 +TEST_MIN_GPUS = 2 +TEST_MAX_GPUS = 7 TEST_ENABLE_VALUE = False TEST_GROUP_NAME = 'group' TEST_GROUP_DEPT = 'lighting' @@ -197,6 +199,30 @@ def testSetDefaultMinCores(self, getStubMock): show_pb2.ShowSetDefaultMinCoresRequest(show=show.data, min_cores=TEST_MIN_CORES), timeout=mock.ANY) + def testSetDefaultMaxGpus(self, getStubMock): + stubMock = mock.Mock() + stubMock.SetDefaultMaxGpus.return_value = show_pb2.ShowSetDefaultMaxGpusResponse() + getStubMock.return_value = stubMock + + show = opencue.wrappers.show.Show(show_pb2.Show(name=TEST_SHOW_NAME)) + show.setDefaultMaxGpus(TEST_MAX_GPUS) + + stubMock.SetDefaultMaxGpus.assert_called_with( + show_pb2.ShowSetDefaultMaxGpusRequest(show=show.data, max_gpus=TEST_MAX_GPUS), + timeout=mock.ANY) + + def testSetDefaultMinGpus(self, getStubMock): + stubMock = mock.Mock() + stubMock.SetDefaultMinGpus.return_value = show_pb2.ShowSetDefaultMinGpusResponse() + getStubMock.return_value = stubMock + + show = opencue.wrappers.show.Show(show_pb2.Show(name=TEST_SHOW_NAME)) + show.setDefaultMinGpus(TEST_MIN_GPUS) + + stubMock.SetDefaultMinGpus.assert_called_with( + show_pb2.ShowSetDefaultMinGpusRequest(show=show.data, min_gpus=TEST_MIN_GPUS), + timeout=mock.ANY) + def testFindFilter(self, getStubMock): stubMock = mock.Mock() stubMock.FindFilter.return_value = show_pb2.ShowFindFilterResponse( @@ -298,6 +324,12 @@ def testEnableDispatching(self, getStubMock): show_pb2.ShowEnableDispatchingRequest(show=show.data, enabled=TEST_ENABLE_VALUE), timeout=mock.ANY) + def testCreateServiceOverrideMemError(self, getStubMock): + service = service_pb2.Service(name=TEST_SERVICE_NAME) + show = opencue.wrappers.show.Show(show_pb2.Show(name=TEST_SHOW_NAME)) + + self.assertRaises(ValueError, show.createServiceOverride, service) + if __name__ == '__main__': unittest.main() diff --git a/pyoutline/Dockerfile b/pyoutline/Dockerfile index fb24d1136..02f954c23 100644 --- a/pyoutline/Dockerfile +++ b/pyoutline/Dockerfile @@ -1,35 +1,14 @@ -# ----------------- -# BUILD -# ----------------- -FROM centos:7 as build - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "PyCue build stage" +FROM python:3.9.14 WORKDIR /src -RUN yum -y install \ - epel-release \ - gcc \ - python-devel - -RUN yum -y install \ - python-pip \ - python36 \ - python36-devel \ - python36-pip - -RUN python -m pip install --upgrade 'pip<21' -RUN python3.6 -m pip install --upgrade pip - -RUN python -m pip install --upgrade 'setuptools<45' -RUN python3.6 -m pip install --upgrade setuptools +RUN python3 -m pip install --upgrade pip +RUN python3 -m pip install --upgrade setuptools COPY LICENSE ./ COPY requirements.txt ./ -RUN python -m pip install -r requirements.txt -RUN python3.6 -m pip install -r requirements.txt +RUN python3 -m pip install -r requirements.txt COPY proto/ ./proto COPY pycue/README.md ./pycue/ @@ -37,7 +16,7 @@ COPY pycue/setup.py ./pycue/ COPY pycue/opencue ./pycue/opencue COPY pycue/FileSequence ./pycue/FileSequence -RUN python -m grpc_tools.protoc \ +RUN python3 -m grpc_tools.protoc \ -I=./proto \ --python_out=./pycue/opencue/compiled_proto \ --grpc_python_out=./pycue/opencue/compiled_proto \ @@ -50,7 +29,6 @@ RUN 2to3 -wn -f import pycue/opencue/compiled_proto/*_pb2*.py COPY pyoutline/README.md ./pyoutline/ COPY pyoutline/setup.py ./pyoutline/ COPY pyoutline/bin ./pyoutline/bin -COPY pyoutline/etc ./pyoutline/etc COPY pyoutline/tests/ ./pyoutline/tests COPY pyoutline/wrappers ./pyoutline/wrappers COPY pyoutline/outline ./pyoutline/outline @@ -58,28 +36,13 @@ COPY pyoutline/outline ./pyoutline/outline COPY VERSION.in VERSIO[N] ./ RUN test -e VERSION || echo "$(cat VERSION.in)-custom" | tee VERSION -RUN cd pycue && python setup.py install -RUN cd pycue && python3.6 setup.py install - -RUN cd pyoutline && python setup.py test -RUN cd pyoutline && python3.6 setup.py test +RUN cd pycue && python3 setup.py install +RUN cd pyoutline && python3 setup.py test RUN cp LICENSE requirements.txt VERSION pyoutline/ RUN versioned_name="pyoutline-$(cat ./VERSION)-all" \ - && mv pyoutline $versioned_name \ - && tar -cvzf $versioned_name.tar.gz $versioned_name/* - - -# ----------------- -# RUN -# ----------------- -FROM centos:7 - -# First line after FROM should be unique to avoid --cache-from weirdness. -RUN echo "PyCue runtime stage" - -WORKDIR /opt/opencue - -COPY --from=build /src/pyoutline-*-all.tar.gz ./ - + && mv pyoutline "${versioned_name}" \ + && tar -cvzf "${versioned_name}.tar.gz" ${versioned_name}/* \ + && mkdir -p /opt/opencue \ + && cp "${versioned_name}.tar.gz" /opt/opencue/ diff --git a/pyoutline/bin/cuerunbase.py b/pyoutline/bin/cuerunbase.py index 89da42b6e..7839305b2 100644 --- a/pyoutline/bin/cuerunbase.py +++ b/pyoutline/bin/cuerunbase.py @@ -68,7 +68,7 @@ def handle_core_arguments(): --repos = specify an alternate outline repository All of these options need to be handled to setup the right - environement to imported the versioned cuerun/outline code. + environment to imported the versioned cuerun/outline code. """ repos = None diff --git a/pyoutline/outline/backend/cue.py b/pyoutline/outline/backend/cue.py index ea13b41f0..5d9a7147e 100644 --- a/pyoutline/outline/backend/cue.py +++ b/pyoutline/outline/backend/cue.py @@ -258,6 +258,16 @@ def _serialize(launcher, use_pycuerun): elif launcher.get("priority"): _warning_spec_version(spec_version, "priority") sub_element(j, "maxretries", str(launcher.get("maxretries"))) + if spec_version >= Version("1.13"): + if ol.get_maxcores(): + sub_element(j, "maxcores", str(ol.get_maxcores())) + if ol.get_maxgpus(): + sub_element(j, "maxgpus", str(ol.get_maxgpus())) + else: + if ol.get_maxcores(): + _warning_spec_version(spec_version, "maxcores") + if ol.get_maxgpus(): + _warning_spec_version(spec_version, "maxgpus") sub_element(j, "autoeat", str(launcher.get("autoeat"))) if ol.get_arg("localbook"): @@ -305,7 +315,12 @@ def _serialize(launcher, use_pycuerun): sub_element(spec_layer, "chunk", str(layer.get_chunk_size())) # opencue specific options - if layer.get_arg("threads"): + # Keeping 'threads' for backward compatibility + if layer.get_arg("cores"): + if layer.get_arg("threads"): + logger.warning("%s has both cores and threads. Use cores.", layer.get_name()) + sub_element(spec_layer, "cores", "%0.1f" % (layer.get_arg("cores"))) + elif layer.get_arg("threads"): sub_element(spec_layer, "cores", "%0.1f" % (layer.get_arg("threads"))) if layer.is_arg_set("threadable"): @@ -315,6 +330,30 @@ def _serialize(launcher, use_pycuerun): if layer.get_arg("memory"): sub_element(spec_layer, "memory", "%s" % (layer.get_arg("memory"))) + gpus = None + if layer.get_arg("gpus"): + if spec_version >= Version("1.12"): + gpus = layer.get_arg("gpus") + else: + _warning_spec_version(spec_version, "gpus") + + gpu_memory = None + if layer.get_arg("gpu_memory"): + if spec_version >= Version("1.12"): + gpu_memory = layer.get_arg("gpu_memory") + else: + _warning_spec_version(spec_version, "gpu_memory") + + if gpus or gpu_memory: + # Cuebot expects non-zero positive value on gpus and gpu_memory + if gpus is None: + gpus = 1 + if gpu_memory is None: + gpu_memory = "1g" + + sub_element(spec_layer, "gpus", "%d" % gpus) + sub_element(spec_layer, "gpu_memory", "%s" % gpu_memory) + if layer.get_arg("timeout"): if spec_version >= Version("1.10"): sub_element(spec_layer, "timeout", "%s" % (layer.get_arg("timeout"))) diff --git a/pyoutline/outline/config.py b/pyoutline/outline/config.py index 371793a3c..8cf298f62 100644 --- a/pyoutline/outline/config.py +++ b/pyoutline/outline/config.py @@ -28,8 +28,10 @@ # pylint: enable=wrong-import-position import getpass +import logging import os import pathlib +import platform import tempfile import six @@ -40,27 +42,92 @@ else: ConfigParser = configparser.ConfigParser - -__all__ = ["config"] +__all__ = ['config', 'read_config_from_disk'] __file_path__ = pathlib.Path(__file__) -PYOUTLINE_ROOT_DIR = __file_path__.parent.parent -DEFAULT_USER_DIR = pathlib.Path(tempfile.gettempdir()) / 'opencue' / 'outline' / getpass.getuser() +# Environment variables which can be used to define a custom config file. +__CONFIG_FILE_ENV_VARS = [ + # OUTLINE_CONFIG_FILE is the preferred setting to use. + 'OUTLINE_CONFIG_FILE', + # OL_CONFIG is deprecated, but kept for now for backwards compatibility. + 'OL_CONFIG', +] + + +logger = logging.getLogger("outline.config") + + +def config_base_directory(): + """Returns the OpenCue config base directory. + + This platform-dependent directory, stored within your user profile, is used by + OpenCue components as the default location for various configuration files. Typically + if you store your config files in this location, there is no need to set environment + variables to indicate where your config files are located -- OpenCue should recognize + them automatically. + + NOTE: This work is ongoing. Over time more OpenCue components will start using this + base directory. See https://github.com/AcademySoftwareFoundation/OpenCue/issues/785. + + :rtype: str + :return: config file base directory + """ + if platform.system() == 'Windows': + return os.path.join(os.path.expandvars('%APPDATA%'), 'opencue') + return os.path.join(os.path.expanduser('~'), '.config', 'opencue') + + +def read_config_from_disk(): + """Loads configuration settings from config file on the local system. + + The configuration file used is, in order of preference: + - Path defined by the OUTLINE_CONFIG_FILE environment variable. + - Path defined by the OL_CONFIG environment variable. + - Path within the config base directory (i.e. ~/.config/opencue/outline.cfg) + - The default outline.cfg file which is distributed with the outline library. + + :rtype: ConfigParser + :return: config settings + """ + pyoutline_root_dir = __file_path__.parent.parent + default_user_dir = pathlib.Path( + tempfile.gettempdir()) / 'opencue' / 'outline' / getpass.getuser() + + _config = ConfigParser() + config_file = None + + for config_file_env_var in __CONFIG_FILE_ENV_VARS: + logger.debug('Checking for outline config file path in %s', config_file_env_var) + config_file_from_env = os.environ.get(config_file_env_var) + if config_file_from_env and os.path.exists(config_file_from_env): + config_file = config_file_from_env + break + + if not config_file: + config_from_user_profile = os.path.join(config_base_directory(), 'outline.cfg') + logger.debug('Checking for outline config at %s', config_from_user_profile) + if os.path.exists(config_from_user_profile): + config_file = config_from_user_profile + + if not config_file: + default_config_path = __file_path__.parent / 'outline.cfg' + logger.info('Loading default outline config from %s', default_config_path) + if default_config_path.exists(): + config_file = default_config_path + + if not config_file: + raise FileNotFoundError('outline config file was not found') + + _config.read(str(config_file)) -config = ConfigParser() + # Add defaults to the config,if they were not specified. + if not _config.get('outline', 'home'): + _config.set('outline', 'home', str(pyoutline_root_dir)) -default_config_paths = [__file_path__.parent.parent.parent / 'etc' / 'outline.cfg', - __file_path__.parent.parent / 'etc' / 'outline.cfg'] -default_config_path = None -for default_config_path in default_config_paths: - if default_config_path.exists(): - break + if not _config.get('outline', 'user_dir'): + _config.set('outline', 'user_dir', str(default_user_dir)) -config.read(os.environ.get("OL_CONFIG", str(default_config_path))) + return _config -# Add defaults to the config,if they were not specified. -if not config.get('outline', 'home'): - config.set('outline', 'home', str(PYOUTLINE_ROOT_DIR)) -if not config.get('outline', 'user_dir'): - config.set('outline', 'user_dir', str(DEFAULT_USER_DIR)) +config = read_config_from_disk() diff --git a/pyoutline/outline/cuerun.py b/pyoutline/outline/cuerun.py index 7f40364f8..2abf7352d 100644 --- a/pyoutline/outline/cuerun.py +++ b/pyoutline/outline/cuerun.py @@ -74,7 +74,7 @@ def get_launch_facility(): def launch(ol, use_pycuerun=True, **args): """ - A simple convinience method for launching an outline script with + A simple convenience method for launching an outline script with the most common options. If you need additional options, use the OutlineLauncher class. @@ -181,7 +181,7 @@ def setup(self): if self.get("range_default"): fully_baked = True for layer in self.__outline.get_layers(): - # Frames dont' have a range by default. + # Frames don't have a range by default. if isinstance(layer, Frame): continue if not layer.get_arg("range"): diff --git a/pyoutline/outline/depend.py b/pyoutline/outline/depend.py index 9caffab85..cb83d576b 100644 --- a/pyoutline/outline/depend.py +++ b/pyoutline/outline/depend.py @@ -108,18 +108,18 @@ def get_type(self): """ return self.__type - def is_propigated(self): + def is_propagated(self): """ - A propigated dependency is propigated to to others layers + A propagated dependency is propagated to to others layers automatically. For example when a L{Layer} A depends on L{Layer} B through - a propigated dependency, then setting up a dependency from L{Layer} C to + a propagated dependency, then setting up a dependency from L{Layer} C to L{Layer} A would automatically create a depend from L{Layer} C to L{Layer} B. Depends that are automatically setup between L{LayerPreProcess} and - a L{Layer} are propigated dependencies. + a L{Layer} are propagated dependencies. :rtype: boolean - :return: True if he depend is propigated, false if it is not. + :return: True if he depend is propagated, false if it is not. """ return self.__propigate diff --git a/pyoutline/outline/io.py b/pyoutline/outline/io.py index e127a65fc..67d4b1138 100644 --- a/pyoutline/outline/io.py +++ b/pyoutline/outline/io.py @@ -361,11 +361,11 @@ def get_res(self): def get_rep(self): """ - Return the repesentation. The repesentation is + Return the representation. The representation is the oav_resolution_colorspace. :rtype: string - :return: the repesentation. + :return: the representation. """ return self.__fs.getDirname().rsplit("/", 2)[1] diff --git a/pyoutline/outline/layer.py b/pyoutline/outline/layer.py index 9fcdea1be..b0e43bde0 100644 --- a/pyoutline/outline/layer.py +++ b/pyoutline/outline/layer.py @@ -416,17 +416,17 @@ def execute(self, frame): # Check for the existance of required inputs. self.check_input(frames) - # Set all post set shot environement variables. + # Set all post set shot environment variables. for env_k, env_v in self.__outline.get_env().items(): if not env_v[1]: - logger.info("Setting post-set shot environement var: %s %s", + logger.info("Setting post-set shot environment var: %s %s", env_k, env_v[0]) os.environ[env_k] = env_v[0] # Set all layer specific post set shot env variables try: for env_k, env_v in self.__env.items(): - logger.info("Setting post-set shot environement var: %s %s", + logger.info("Setting post-set shot environment var: %s %s", env_k, env_v) os.environ[str(env_k)] = str(env_v) except AttributeError: @@ -628,7 +628,7 @@ def put_file(self, src, rename=None): def get_file(self, name, check=True, new=False): """ - Retrieve the sesion path path to the given file. The + Retrieve the session path path to the given file. The file does not have to exist. :type name: str @@ -741,7 +741,7 @@ def get_local_frame_set(self, start_frame): # Remove the duplicates out of our frame range. # frame_range = FileSequence.FrameSet(self.get_frame_range()) - frame_set = outline.util.deaggregate_frame_set(frame_range) + frame_set = outline.util.disaggregate_frame_set(frame_range) # # Now find the index for the current frame and start @@ -871,10 +871,10 @@ def depend_on(self, on_layer, depend_type=outline.depend.DependType.FrameByFrame # C, which means layer D must now also depend on layer B. # # Currently this creates a depend-all (LayerOnLayer) between - # the propigated depends. + # the propagated depends. # for depend in on_layer.get_depends(): - if depend.is_propigated(): + if depend.is_propagated(): for my_depend in self.get_depends(): dependant = my_depend.get_depend_on_layer() logger.info( diff --git a/pyoutline/outline/loader.py b/pyoutline/outline/loader.py index d043e4a32..ca30b7410 100644 --- a/pyoutline/outline/loader.py +++ b/pyoutline/outline/loader.py @@ -25,7 +25,6 @@ import os import logging import json -from past.builtins import execfile import time import uuid import yaml @@ -97,7 +96,7 @@ def load_outline(path): def load_json(json_str): """ - Parse a json repesentation of an outline file. + Parse a json representation of an outline file. :type json_str: str :param json_str: A json string. @@ -124,6 +123,12 @@ def decode_layer(layer): if "name" in data: ol.set_name(data["name"]) + if "facility" in data: + ol.set_facility(data["facility"]) + if "maxcores" in data: + ol.set_maxcores(data["maxcores"]) + if "maxgpus" in data: + ol.set_maxgpus(data["maxgpus"]) if "range" in data: ol.set_frame_range(data["range"]) @@ -167,7 +172,9 @@ def parse_outline_script(path): """ try: logger.info("parsing outline file %s", path) - execfile(path, {}) + with open(path) as fp: + code = compile(fp.read(), path, 'exec') + exec(code) # pylint: disable=exec-used except Exception as exp: logger.warning("failed to parse as python file, %s", exp) raise outline.exception.OutlineException( @@ -217,7 +224,8 @@ class Outline(object): def __init__(self, name=None, frame_range=None, path=None, serialize=True, name_unique=False, current=False, - shot=None, show=None, user=None, facility=None): + shot=None, show=None, user=None, facility=None, + maxcores=None, maxgpus=None): """ :type name: string :param name: A name for the outline instance. This will become @@ -252,6 +260,11 @@ def __init__(self, name=None, frame_range=None, path=None, :param facility: The launch facility to be used. If not specified the RENDER_TO and FACILITY environment variables will be checked. + :type maxcores: int + :param maxcores: The maximum number of CPU cores for the job. + + :type maxgpus: int + :param maxgpus: The maximum number of GPU units for the job. """ object.__init__(self) @@ -296,7 +309,7 @@ def __init__(self, name=None, frame_range=None, path=None, self.__args = {} # - # See contsants for the description of outline modes + # See constants for the description of outline modes # self.__mode = outline.constants.OUTLINE_MODE_INIT @@ -307,7 +320,7 @@ def __init__(self, name=None, frame_range=None, path=None, self.__layers = [] # - # A hash of environement variables that are passed up + # A hash of environment variables that are passed up # to opencue and then set before each frame is run. # These are set "pre" setshot, so they can be used # to modify setshot behavior. @@ -319,6 +332,16 @@ def __init__(self, name=None, frame_range=None, path=None, # self.__facility = facility + # + # The maximum number of CPU cores to use, or None. + # + self.__maxcores = maxcores + + # + # The maximum number of CPU cores to use, or None. + # + self.__maxgpus = maxgpus + # # The outline session. The session is setup during the setup # phase. Every job has a unique session which maps to a @@ -628,6 +651,30 @@ def set_facility(self, facility): """ self.__facility = facility + def get_maxcores(self): + """Return the maximum number of CPU cores fot this outline.""" + return self.__maxcores + + def set_maxcores(self, maxcores): + """Set the maximum number of CPU cores for this outline instance. + + :type maxcores: int + :param maxcores: The maximum number of CPU cores to set. + """ + self.__maxcores = maxcores + + def get_maxgpus(self): + """Return the maximum number of GPU units fot this outline.""" + return self.__maxgpus + + def set_maxgpus(self, maxgpus): + """Set the maximum number of GPU units for this outline instance. + + :type maxcores: int + :param maxcores: The maximum number of GPU units to set. + """ + self.__maxgpus = maxgpus + def get_mode(self): """Return the current mode of this outline object. @@ -680,11 +727,11 @@ def get_frame_range(self): def set_env(self, key, value): """ - Set an environment variable that is propigated to + Set an environment variable that is propagated to every frame. :type key: str - :param key: Name of environement variable. + :param key: Name of environment variable. :type value: str :param value: Value to associate with the name. @@ -705,11 +752,11 @@ def set_env(self, key, value): def get_env(self, key=None): """ - Return the environement hash setup using set_env. + Return the environment hash setup using set_env. :rtype: dict - :return: the dictionary of values that will be propigated into - every frame's environement on the cue. + :return: the dictionary of values that will be propagated into + every frame's environment on the cue. """ if key: return self.__env[key][0] @@ -742,7 +789,7 @@ def set_arg(self, key, value): def get_arg(self, key, default=None): """ - Return the value assoiciated with the given key. Throw an + Return the value associated with the given key. Throw an OutlineException if the key does not exist. If a default value is provided then that value is returned instead of throwing an OutlineException. @@ -782,7 +829,7 @@ def put_file(self, src, rename=None): def get_file(self, name, check=True, new=False): """ - Retrieve the sesion path path to the given file. The + Retrieve the session path path to the given file. The file does not have to exist. :type name: str diff --git a/pyoutline/outline/modules/shell.py b/pyoutline/outline/modules/shell.py index e564e2162..482fcda4a 100644 --- a/pyoutline/outline/modules/shell.py +++ b/pyoutline/outline/modules/shell.py @@ -22,7 +22,6 @@ import logging import os -from past.builtins import execfile import outline.layer import outline.util @@ -56,7 +55,10 @@ def _setup(self): self.__code = None def _execute(self, frames): - execfile(self.get_file("script")) + path = self.get_file("script") + with open(path) as fp: + code = compile(fp.read(), path, 'exec') + exec(code) # pylint: disable=exec-used class Shell(outline.layer.Layer): diff --git a/pyoutline/etc/outline.cfg b/pyoutline/outline/outline.cfg similarity index 93% rename from pyoutline/etc/outline.cfg rename to pyoutline/outline/outline.cfg index 6d0f129a4..6347825f9 100644 --- a/pyoutline/etc/outline.cfg +++ b/pyoutline/outline/outline.cfg @@ -5,7 +5,7 @@ wrapper_dir = %(home)s/wrappers user_dir = bin_dir = %(home)s/bin backend = cue -spec_version = 1.11 +spec_version = 1.13 facility = local domain = example.com maxretries = 2 diff --git a/pyoutline/outline/util.py b/pyoutline/outline/util.py index 82f7c1153..c5edd0f2d 100644 --- a/pyoutline/outline/util.py +++ b/pyoutline/outline/util.py @@ -31,16 +31,16 @@ from .config import config -def deaggregate_frame_set(frameset): - """Deaggregates a FileSequence.FrameSet into its individual frames +def disaggregate_frame_set(frameset): + """Disaggregates a FileSequence.FrameSet into its individual frames and removes duplicates. FrameSet objects can have duplicates if the user specifies duplicates, which they tend to do even though they don't want duplicates. :type frameset: FileSequence.FrameSet - :param frameset: The frameset to deaggregate + :param frameset: The frameset to disaggregate :rtype: List - :return: The list of deaggregated frames. + :return: The list of disaggregated frames. """ # This is not a Set because sets are unordered. diff --git a/pyoutline/setup.py b/pyoutline/setup.py index 5663a5fa0..0ecc79e0c 100644 --- a/pyoutline/setup.py +++ b/pyoutline/setup.py @@ -46,12 +46,18 @@ 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', ], packages=find_packages(exclude=['tests']), data_files=[ ('bin', ['bin/cuerunbase.py', 'bin/pycuerun', 'bin/util_qc_job_layer.py']), - ('etc', ['etc/outline.cfg']), - ('wrappers', ['wrappers/opencue_wrap_frame', 'wrappers/opencue_wrap_frame_no_ss', 'wrappers/local_wrap_frame']), + ('wrappers', [ + 'wrappers/opencue_wrap_frame', 'wrappers/opencue_wrap_frame_no_ss', + 'wrappers/local_wrap_frame']), ], + package_data={ + 'outline': ['outline.cfg'], + }, test_suite='tests', ) diff --git a/pyoutline/tests/backend/cue_test.py b/pyoutline/tests/backend/cue_test.py index 029a1ec3f..89c7bcac7 100644 --- a/pyoutline/tests/backend/cue_test.py +++ b/pyoutline/tests/backend/cue_test.py @@ -92,6 +92,42 @@ def testSerializeShellOutline(self): self.assertEqual(0, len(list(outlineXml.find('depends')))) +class CoresTest(unittest.TestCase): + def setUp(self): + # Ensure to reset current + outline.Outline.current = None + + def create(self): + ol = outline.Outline() + layer = outline.Layer("test") + ol.add_layer(layer) + return ol, layer + + def assertCores(self, ol, v): + launcher = outline.cuerun.OutlineLauncher(ol, user=TEST_USER) + outlineXml = ET.fromstring(outline.backend.cue.serialize(launcher)) + job = outlineXml.find('job') + layer = job.find('layers').find('layer') + self.assertEqual(v, layer.find('cores').text) + + def testCores(self): + ol, layer = self.create() + layer.set_arg("cores", 42) + self.assertCores(ol, "42.0") + + def testThreads(self): + ol, layer = self.create() + layer.set_arg("threads", 4) + self.assertCores(ol, "4.0") + + def testCoresAndThreads(self): + ol, layer = self.create() + layer.set_arg("cores", 8) + layer.set_arg("threads", 4) + # cores overrides threads + self.assertCores(ol, "8.0") + + class BuildCommandTest(unittest.TestCase): def setUp(self): path = os.path.join(SCRIPTS_DIR, 'shell.outline') diff --git a/pyoutline/tests/config_test.py b/pyoutline/tests/config_test.py new file mode 100644 index 000000000..a7e089e39 --- /dev/null +++ b/pyoutline/tests/config_test.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python + +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for the outline.config module.""" + +import getpass +import os.path +import unittest + +import mock +import pyfakefs.fake_filesystem_unittest + +import opencue +import outline +# The local import is necessary as `outline.config` will point to the ConfigParser after the +# first import. +from outline.config import read_config_from_disk + + +USER_CONFIG = ''' +[outline] +home = /some/users/home/dir +session_dir = {HOME}/.opencue/sessions +wrapper_dir = %(home)s/wrappers +user_dir = /arbitrary/user/dir +spec_version = 1.9 +facility = cloud + +[plugin:local] +module=outline.plugins.local +enable=1 +''' + + +class ConfigTest(pyfakefs.fake_filesystem_unittest.TestCase): + def setUp(self): + self.setUpPyfakefs() + self.fs.add_real_file( + os.path.join(os.path.dirname(opencue.__file__), 'default.yaml'), read_only=True) + if 'OL_CONFIG' in os.environ: + del os.environ['OL_CONFIG'] + if 'OUTLINE_CONFIG_FILE' in os.environ: + del os.environ['OUTLINE_CONFIG_FILE'] + + @mock.patch('tempfile.gettempdir', new=mock.Mock(return_value='/path/to/tmp/dir')) + def test__should_load_default_values(self): + self.assertIsNone(os.environ.get('OL_CONF')) + self.assertIsNone(os.environ.get('OUTLINE_CONFIG_FILE')) + self.fs.add_real_file( + os.path.join(os.path.dirname(outline.__file__), 'outline.cfg'), + read_only=True) + + config = read_config_from_disk() + + default_home = os.path.dirname(os.path.dirname(__file__)) + self.assertEqual(default_home, config.get('outline', 'home')) + self.assertEqual('{HOME}/.opencue/sessions', config.get('outline', 'session_dir')) + self.assertEqual( + os.path.join(default_home, 'wrappers'), config.get('outline', 'wrapper_dir')) + self.assertEqual( + '/path/to/tmp/dir/opencue/outline/%s' % getpass.getuser(), + config.get('outline', 'user_dir')) + self.assertEqual( + os.path.join(default_home, 'bin'), config.get('outline', 'bin_dir')) + self.assertEqual('cue', config.get('outline', 'backend')) + self.assertEqual('local', config.get('outline', 'facility')) + self.assertEqual('example.com', config.get('outline', 'domain')) + self.assertEqual('2', config.get('outline', 'maxretries')) + self.assertEqual('testing', config.get('outline', 'default_show')) + self.assertEqual('default', config.get('outline', 'default_shot')) + self.assertEqual('outline.plugins.local', config.get('plugin:local', 'module')) + self.assertEqual('1', config.get('plugin:local', 'enable')) + + def test__should_load_user_config_from_env_var(self): + config_file_path = '/path/to/outline.cfg' + self.fs.create_file(config_file_path, contents=USER_CONFIG) + os.environ['OUTLINE_CONFIG_FILE'] = config_file_path + + config = read_config_from_disk() + + custom_home = '/some/users/home/dir' + self.assertEqual(custom_home, config.get('outline', 'home')) + self.assertEqual('{HOME}/.opencue/sessions', config.get('outline', 'session_dir')) + self.assertEqual( + os.path.join(custom_home, 'wrappers'), config.get('outline', 'wrapper_dir')) + self.assertEqual('/arbitrary/user/dir', config.get('outline', 'user_dir')) + self.assertEqual('1.9', config.get('outline', 'spec_version')) + self.assertEqual('cloud', config.get('outline', 'facility')) + + def test__should_load_user_config_from_legacy_env_var(self): + config_file_path = '/path/to/outline.cfg' + self.fs.create_file(config_file_path, contents=USER_CONFIG) + os.environ['OL_CONFIG'] = config_file_path + + config = read_config_from_disk() + + custom_home = '/some/users/home/dir' + self.assertEqual(custom_home, config.get('outline', 'home')) + self.assertEqual('{HOME}/.opencue/sessions', config.get('outline', 'session_dir')) + self.assertEqual( + os.path.join(custom_home, 'wrappers'), config.get('outline', 'wrapper_dir')) + self.assertEqual('/arbitrary/user/dir', config.get('outline', 'user_dir')) + self.assertEqual('1.9', config.get('outline', 'spec_version')) + self.assertEqual('cloud', config.get('outline', 'facility')) + + @mock.patch('platform.system', new=mock.Mock(return_value='Linux')) + @mock.patch('os.path.expanduser', new=mock.Mock(return_value='/home/username')) + def test__should_load_user_config_from_user_profile(self): + config_file_path = '/home/username/.config/opencue/outline.cfg' + self.fs.create_file(config_file_path, contents=USER_CONFIG) + os.environ['OL_CONFIG'] = config_file_path + + config = read_config_from_disk() + + custom_home = '/some/users/home/dir' + self.assertEqual(custom_home, config.get('outline', 'home')) + self.assertEqual('{HOME}/.opencue/sessions', config.get('outline', 'session_dir')) + self.assertEqual( + os.path.join(custom_home, 'wrappers'), config.get('outline', 'wrapper_dir')) + self.assertEqual('/arbitrary/user/dir', config.get('outline', 'user_dir')) + self.assertEqual('1.9', config.get('outline', 'spec_version')) + self.assertEqual('cloud', config.get('outline', 'facility')) + + +if __name__ == '__main__': + unittest.main() diff --git a/pyoutline/tests/json/facility.json b/pyoutline/tests/json/facility.json new file mode 100644 index 000000000..0876b6872 --- /dev/null +++ b/pyoutline/tests/json/facility.json @@ -0,0 +1,12 @@ +{ + "name": "shell_command", + "facility": "test_facility", + "range": "1", + "layers": [ + { + "name": "shell_layer", + "module": "outline.modules.shell.Shell", + "command": ["/bin/ls"] + } + ] +} diff --git a/pyoutline/tests/json_test.py b/pyoutline/tests/json_test.py index dc0fe48fa..551beec8a 100644 --- a/pyoutline/tests/json_test.py +++ b/pyoutline/tests/json_test.py @@ -77,6 +77,12 @@ def testJsonFile(self, systemMock): systemMock.assert_has_calls([mock.call(['/bin/ls'], frame=1000)]) self.assertEqual('LAYER_VALUE', os.environ['LAYER_KEY']) + def testFacility(self): + """Test facility from JSON""" + with open(os.path.join(JSON_DIR, 'facility.json')) as fp: + ol = outline.load_json(fp.read()) + self.assertEqual('test_facility', ol.get_facility()) + if __name__ == '__main__': unittest.main() diff --git a/pyoutline/tests/specver_test.py b/pyoutline/tests/specver_test.py index 24d92df07..d04d65bb4 100644 --- a/pyoutline/tests/specver_test.py +++ b/pyoutline/tests/specver_test.py @@ -32,6 +32,8 @@ class SpecVersiondTest(unittest.TestCase): def _makeSpec(self): + # Ensure to reset current + outline.Outline.current = None ol = outline.Outline(name="spec_version_test") layer = outline.modules.shell.Shell("test_layer", command=["/bin/ls"]) layer.set_arg("timeout", 420) @@ -61,3 +63,43 @@ def test_1_11(self): self.assertEqual(root.find("job/layers/layer/timeout").text, "420") self.assertEqual(root.find("job/layers/layer/timeout_llu").text, "4200") self.assertEqual(root.find("job/priority").text, "42") + + def _makeGpuSpec(self): + ol = outline.Outline(name="spec_version_test") + layer = outline.modules.shell.Shell("test_layer", command=["/bin/ls"]) + layer.set_arg("gpus", 4) + layer.set_arg("gpu_memory", 8 * 1024 * 1024) + ol.add_layer(layer) + l = outline.cuerun.OutlineLauncher(ol) + return Et.fromstring(l.serialize()) + + def test_gpu_1_11(self): + outline.config.set("outline", "spec_version", "1.11") + root = self._makeGpuSpec() + self.assertIsNone(root.find("job/layers/layer/gpus")) + self.assertIsNone(root.find("job/layers/layer/gpus_memory")) + + def test_gpu_1_12(self): + outline.config.set("outline", "spec_version", "1.12") + root = self._makeGpuSpec() + self.assertEqual(root.find("job/layers/layer/gpus").text, "4") + self.assertEqual(root.find("job/layers/layer/gpu_memory").text, "8388608") + + def _makeMaxCoresGpusSpec(self): + ol = outline.Outline(name="override_max_cores_and_gpus", maxcores=8, maxgpus=7) + layer = outline.modules.shell.Shell("test_layer", command=["/bin/ls"]) + ol.add_layer(layer) + l = outline.cuerun.OutlineLauncher(ol) + return Et.fromstring(l.serialize()) + + def test_max_cores_gpus_1_12(self): + outline.config.set("outline", "spec_version", "1.12") + root = self._makeMaxCoresGpusSpec() + self.assertIsNone(root.find("job/maxcores")) + self.assertIsNone(root.find("job/maxgpus")) + + def test_max_cores_gpus_1_13(self): + outline.config.set("outline", "spec_version", "1.13") + root = self._makeMaxCoresGpusSpec() + self.assertEqual(root.find("job/maxcores").text, "8") + self.assertEqual(root.find("job/maxgpus").text, "7") diff --git a/requirements.txt b/requirements.txt index f5c9a537f..262f681f9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,14 +1,20 @@ 2to3==1.0 enum34==1.1.6 -future==0.17.1 +evdev==1.4.0;python_version<"3.0" and "linux" in sys_platform +future==0.18.3 futures==3.2.0;python_version<"3.0" -grpcio==1.16.0 -grpcio-tools==1.16.0 +grpcio==1.26.0;python_version<"3.0" +grpcio-tools==1.26.0;python_version<"3.0" +grpcio==1.47.0;python_version>="3.0" +grpcio-tools==1.47.0;python_version>="3.0" mock==2.0.0 packaging==20.9 pathlib==1.0.1;python_version<"3.4" -psutil==5.6.6 -pyfakefs==3.6 +protobuf==3.17.3;python_version<"3.0" +psutil==5.6.7 +pyfakefs==3.6;python_version<"3.7" +pyfakefs==5.2.3;python_version>="3.7" pylint==2.6.0;python_version>="3.7" +pynput==1.7.6 PyYAML==5.1 six==1.11.0 diff --git a/requirements_gui.txt b/requirements_gui.txt index 6f35031ad..b7ff2d6b0 100644 --- a/requirements_gui.txt +++ b/requirements_gui.txt @@ -1 +1,3 @@ -PySide2==5.11.2 +PySide2==5.15.2.1 +QtPy==1.11.3;python_version<"3.7" +QtPy==2.3.0;python_version>="3.7" diff --git a/rqd/Dockerfile b/rqd/Dockerfile index fd772c59e..93b222aa7 100644 --- a/rqd/Dockerfile +++ b/rqd/Dockerfile @@ -1,4 +1,6 @@ -FROM centos:7 +FROM --platform=linux/x86_64 centos:7 + +WORKDIR /opt/opencue RUN yum -y install \ epel-release \ @@ -7,23 +9,16 @@ RUN yum -y install \ time RUN yum -y install \ - python-pip \ python36 \ python36-devel \ python36-pip -RUN python -m pip install --upgrade 'pip<21' RUN python3.6 -m pip install --upgrade pip - -RUN python -m pip install --upgrade 'setuptools<45' RUN python3.6 -m pip install --upgrade setuptools -WORKDIR /opt/opencue - COPY LICENSE ./ COPY requirements.txt ./ -RUN python -m pip install -r requirements.txt RUN python3.6 -m pip install -r requirements.txt COPY proto/ ./proto @@ -43,13 +38,9 @@ RUN python3.6 -m grpc_tools.protoc \ # for more info. RUN 2to3 -wn -f import rqd/rqd/compiled_proto/*_pb2*.py -# TODO(bcipriano) Lint the code here. (Issue #78) - COPY VERSION.in VERSIO[N] ./ RUN test -e VERSION || echo "$(cat VERSION.in)-custom" | tee VERSION -# Test in Python 2 and 3, but only install in the Python 3 environment. -RUN cd rqd && python setup.py test RUN cd rqd && python3.6 setup.py test RUN cd rqd && python3.6 setup.py install @@ -61,6 +52,10 @@ RUN versioned_name="rqd-$(cat ./VERSION)-all" \ && tar -cvzf $versioned_name.tar.gz $versioned_name/* \ && ln -s $versioned_name rqd +RUN mkdir -p /etc/opencue +RUN echo "[Override]" > /etc/opencue/rqd.conf +RUN echo "USE_NIMBY_PYNPUT=false" >> /etc/opencue/rqd.conf + # RQD gRPC server EXPOSE 8444 diff --git a/rqd/deploy/rqd3_init.d b/rqd/deploy/rqd3_init.d index babc1e1f1..b146780f3 100644 --- a/rqd/deploy/rqd3_init.d +++ b/rqd/deploy/rqd3_init.d @@ -3,7 +3,7 @@ # RQD3: Start/stop rqd3 services # # chkconfig: 345 98 02 -# description: RQD for opencue +# description: Opencue RQD agent # # Source function library. @@ -18,7 +18,7 @@ RQD=${RQD_PATH}rqd.py start() { [ -f /usr/local/etc/sweatbox.csh ] && echo "Refusing to start RQD3 on a sweatbox" && exit 0 - echo -n $"Starting rqd3 services:" + echo -n $"Starting openrqd services:" cd ${RQD_PATH} daemon "${RQD}" -d echo "" @@ -26,20 +26,18 @@ start() idle_restart() { - echo -n "Requesting idle restart of rqd3 services:" + echo -n "Requesting idle restart of openrqd services:" cd ${RQD_PATH} - daemon "./cuerqd.py" -restart + daemon "rqd/cuerqd.py --restart &>/dev/null || :" echo "" } stop() { - echo -n "Stopping rqd3 services:" + echo -n "Stopping openrqd services:" cd ${RQD_PATH} - daemon "./cuerqd.py" -exit_now - sleep 2 - killproc ${RQD} >/dev/null 2>&1 || : - echo "" + daemon "rqd/cuerqd.py" --exit_now + echo "Stop Request completed" } case "$1" in diff --git a/rqd/rqd/__main__.py b/rqd/rqd/__main__.py index 4c0a46bfe..605f72f22 100755 --- a/rqd/rqd/__main__.py +++ b/rqd/rqd/__main__.py @@ -28,7 +28,7 @@ Optional configuration file: ---------------------------- -in /etc/rqd3/rqd3.conf: +In /etc/opencue/rqd.conf (on Linux) or %LOCALAPPDATA%/OpenCue/rqd.conf (on Windows): [Override] OVERRIDE_CORES = 2 OVERRIDE_PROCS = 3 @@ -61,15 +61,13 @@ def setupLogging(): """Sets up the logging for RQD. + Logs to /var/log/messages""" - Logs to /var/log/messages""" - # TODO(bcipriano) These should be config based. (Issue #72) - consoleFormat = '%(asctime)s %(levelname)-9s rqd3-%(module)-10s %(message)s' - consoleLevel = logging.DEBUG - fileFormat = '%(asctime)s %(levelname)-9s rqd3-%(module)-10s %(message)s' - fileLevel = logging.WARNING # Equal to or greater than the consoleLevel + consolehandler = logging.StreamHandler() + consolehandler.setLevel(rqd.rqconstants.CONSOLE_LOG_LEVEL) + consolehandler.setFormatter(logging.Formatter(rqd.rqconstants.LOG_FORMAT)) + logging.getLogger('').addHandler(consolehandler) - logging.basicConfig(level=consoleLevel, format=consoleFormat) if platform.system() in ('Linux', 'Darwin'): if platform.system() == 'Linux': syslogAddress = '/dev/log' @@ -83,21 +81,25 @@ def setupLogging(): logfile = logging.FileHandler(os.path.expandvars('%TEMP%/openrqd.log')) else: logfile = logging.handlers.SysLogHandler() - logfile.setLevel(fileLevel) - logfile.setFormatter(logging.Formatter(fileFormat)) + logfile.setLevel(rqd.rqconstants.FILE_LOG_LEVEL) + logfile.setFormatter(logging.Formatter(rqd.rqconstants.LOG_FORMAT)) logging.getLogger('').addHandler(logfile) + logging.getLogger('').setLevel(logging.DEBUG) def usage(): """Prints command line syntax""" - s = sys.stderr - print("SYNOPSIS", file=s) - print(" ", sys.argv[0], "[options]\n", file=s) - print(" -d | --daemon => Run as daemon", file=s) - print(" --nimbyoff => Disables nimby activation", file=s) - print(" -c => Provide an alternate config file", file=s) - print(" Defaults to /etc/rqd3/rqd3.conf", file=s) - print(" Config file is optional", file=s) + usage_msg = f"""SYNOPSIS + {sys.argv[0]} [options] + + -d | --daemon => Run as daemon + --nimbyoff => Disables nimby activation + -c => Provide an alternate config file + On Linux: defaults to /etc/opencue/rqd.conf + On Windows: Defaults to %LOCALAPPDATA%/OpenCue/rqd.conf + Config file is optional +""" + print(usage_msg, file=sys.stderr) def main(): diff --git a/rqd/rqd/cuerqd.py b/rqd/rqd/cuerqd.py index 005c92d4f..f7aee88ef 100755 --- a/rqd/rqd/cuerqd.py +++ b/rqd/rqd/cuerqd.py @@ -60,13 +60,13 @@ def getRunningFrame(self, frameId): def nimbyOff(self): """Disables Nimby on the host.""" - print(self.rqdHost, "Turning off Nimby") + log.info(self.rqdHost, "Turning off Nimby") log.info("rqd nimbyoff by %s", os.environ.get("USER")) self.stub.NimbyOff(rqd.compiled_proto.rqd_pb2.RqdStaticNimbyOffRequest()) def nimbyOn(self): """Enables Nimby on the host.""" - print(self.rqdHost, "Turning on Nimby") + log.info(self.rqdHost, "Turning on Nimby") log.info("rqd nimbyon by %s", os.environ.get("USER")) self.stub.NimbyOn(rqd.compiled_proto.rqd_pb2.RqdStaticNimbyOnRequest()) @@ -100,7 +100,13 @@ def shutdownRqdIdle(self): def shutdownRqdNow(self): """Shuts down the host now.""" print(self.rqdHost, "Sending shutdownRqdNow command") - self.stub.ShutdownRqdNow(rqd.compiled_proto.rqd_pb2.RqdStaticShutdownNowRequest()) + try: + self.stub.ShutdownRqdNow(rqd.compiled_proto.rqd_pb2.RqdStaticShutdownNowRequest()) + # pylint: disable=broad-except + except Exception: + # Shutting down the service from inside means this request will receive + # a connection error response + pass def restartRqdIdle(self): """Restarts RQD on the host when idle.""" diff --git a/rqd/rqd/rqconstants.py b/rqd/rqd/rqconstants.py index 07a621456..1daf2f993 100644 --- a/rqd/rqd/rqconstants.py +++ b/rqd/rqd/rqconstants.py @@ -66,9 +66,14 @@ RQD_RETRY_CRITICAL_REPORT_DELAY = 30 RQD_USE_IP_AS_HOSTNAME = True RQD_USE_IPV6_AS_HOSTNAME = False + +# Use the PATH environment variable from the RQD host. +RQD_USE_PATH_ENV_VAR = False + RQD_BECOME_JOB_USER = True RQD_CREATE_USER_IF_NOT_EXISTS = True RQD_TAGS = '' +RQD_PREPEND_TIMESTAMP = False KILL_SIGNAL = 9 if platform.system() == 'Linux': @@ -99,12 +104,19 @@ PATH_LOADAVG = "/proc/loadavg" PATH_STAT = "/proc/stat" PATH_MEMINFO = "/proc/meminfo" +# stat and statm are inaccurate because of kernel internal scability optimation +# stat/statm/status are inaccurate values, true values are in smaps +# but RQD user can't read smaps get: +# [Errno 13] Permission denied: '/proc/166289/smaps' +PATH_PROC_PID_STAT = "/proc/{0}/stat" +PATH_PROC_PID_STATM = "/proc/{0}/statm" +PATH_PROC_PID_CMDLINE = "/proc/{0}/cmdline" if platform.system() == 'Linux': SYS_HERTZ = os.sysconf('SC_CLK_TCK') if platform.system() == 'Windows': - CONFIG_FILE = os.path.expandvars('$LOCALAPPDATA/OpenCue/rqd.conf') + CONFIG_FILE = os.path.expandvars('%LOCALAPPDATA%/OpenCue/rqd.conf') else: CONFIG_FILE = '/etc/opencue/rqd.conf' @@ -116,10 +128,15 @@ OVERRIDE_PROCS = None # number of physical cpus. ex: None or 2 OVERRIDE_MEMORY = None # in Kb OVERRIDE_NIMBY = None # True to turn on, False to turn off +USE_NIMBY_PYNPUT = True # True pynput, False select OVERRIDE_HOSTNAME = None # Force to use this hostname ALLOW_GPU = False LOAD_MODIFIER = 0 # amount to add/subtract from load +LOG_FORMAT = '%(asctime)s %(levelname)-9s openrqd-%(module)-10s %(message)s' +CONSOLE_LOG_LEVEL = logging.DEBUG +FILE_LOG_LEVEL = logging.WARNING # Equal to or greater than the consoleLevel + if subprocess.getoutput('/bin/su --help').find('session-command') != -1: SU_ARGUMENT = '--session-command' else: @@ -138,8 +155,8 @@ else: ConfigParser = configparser.RawConfigParser config = ConfigParser() - logging.info('Loading config %s', CONFIG_FILE) config.read(CONFIG_FILE) + logging.warning('Loading config %s', CONFIG_FILE) if config.has_option(__section, "RQD_GRPC_PORT"): RQD_GRPC_PORT = config.getint(__section, "RQD_GRPC_PORT") @@ -155,6 +172,8 @@ CUEBOT_HOSTNAME = config.get(__section, "OVERRIDE_CUEBOT") if config.has_option(__section, "OVERRIDE_NIMBY"): OVERRIDE_NIMBY = config.getboolean(__section, "OVERRIDE_NIMBY") + if config.has_option(__section, "USE_NIMBY_PYNPUT"): + USE_NIMBY_PYNPUT = config.getboolean(__section, "USE_NIMBY_PYNPUT") if config.has_option(__section, "OVERRIDE_HOSTNAME"): OVERRIDE_HOSTNAME = config.get(__section, "OVERRIDE_HOSTNAME") if config.has_option(__section, "GPU"): @@ -165,6 +184,8 @@ RQD_USE_IP_AS_HOSTNAME = config.getboolean(__section, "RQD_USE_IP_AS_HOSTNAME") if config.has_option(__section, "RQD_USE_IPV6_AS_HOSTNAME"): RQD_USE_IPV6_AS_HOSTNAME = config.getboolean(__section, "RQD_USE_IPV6_AS_HOSTNAME") + if config.has_option(__section, "RQD_USE_PATH_ENV_VAR"): + RQD_USE_PATH_ENV_VAR = config.getboolean(__section, "RQD_USE_PATH_ENV_VAR") if config.has_option(__section, "RQD_BECOME_JOB_USER"): RQD_BECOME_JOB_USER = config.getboolean(__section, "RQD_BECOME_JOB_USER") if config.has_option(__section, "RQD_TAGS"): @@ -173,8 +194,18 @@ DEFAULT_FACILITY = config.get(__section, "DEFAULT_FACILITY") if config.has_option(__section, "LAUNCH_FRAME_USER_GID"): LAUNCH_FRAME_USER_GID = config.getint(__section, "LAUNCH_FRAME_USER_GID") + if config.has_option(__section, "CONSOLE_LOG_LEVEL"): + level = config.get(__section, "CONSOLE_LOG_LEVEL") + CONSOLE_LOG_LEVEL = logging.getLevelName(level) + if config.has_option(__section, "FILE_LOG_LEVEL"): + level = config.get(__section, "FILE_LOG_LEVEL") + FILE_LOG_LEVEL = logging.getLevelName(level) + if config.has_option(__section, "RQD_PREPEND_TIMESTAMP"): + RQD_PREPEND_TIMESTAMP = config.getboolean(__section, "RQD_PREPEND_TIMESTAMP") # pylint: disable=broad-except except Exception as e: logging.warning( "Failed to read values from config file %s due to %s at %s", CONFIG_FILE, e, traceback.extract_tb(sys.exc_info()[2])) + +logging.warning("CUEBOT_HOSTNAME: %s", CUEBOT_HOSTNAME) diff --git a/rqd/rqd/rqcore.py b/rqd/rqd/rqcore.py index dd58e35a6..9391a2126 100644 --- a/rqd/rqd/rqcore.py +++ b/rqd/rqd/rqcore.py @@ -22,7 +22,8 @@ from builtins import str from builtins import object -import logging as log +import datetime +import logging import os import platform import random @@ -44,6 +45,11 @@ import rqd.rqutil +INT32_MAX = 2147483647 +INT32_MIN = -2147483648 +log = logging.getLogger(__name__) + + class FrameAttendantThread(threading.Thread): """Once a frame has been received and checked by RQD, this class handles the launching, waiting on, and cleanup work related to running the @@ -77,8 +83,6 @@ def __createEnvVariables(self): self.frameEnv["TZ"] = self.rqCore.machine.getTimezone() self.frameEnv["USER"] = self.runFrame.user_name self.frameEnv["LOGNAME"] = self.runFrame.user_name - self.frameEnv["MAIL"] = "/usr/mail/%s" % self.runFrame.user_name - self.frameEnv["HOME"] = "/net/homedirs/%s" % self.runFrame.user_name self.frameEnv["mcp"] = "1" self.frameEnv["show"] = self.runFrame.show self.frameEnv["shot"] = self.runFrame.shot @@ -90,11 +94,22 @@ def __createEnvVariables(self): self.frameEnv["maxframetime"] = "0" self.frameEnv["minspace"] = "200" self.frameEnv["CUE3"] = "True" - self.frameEnv["CUE_GPU_MEMORY"] = str(self.rqCore.machine.getGpuMemory()) + self.frameEnv["CUE_GPU_MEMORY"] = str(self.rqCore.machine.getGpuMemoryFree()) self.frameEnv["SP_NOMYCSHRC"] = "1" - for key in self.runFrame.environment: - self.frameEnv[key] = self.runFrame.environment[key] + if platform.system() in ("Linux", "Darwin"): + self.frameEnv["MAIL"] = "/usr/mail/%s" % self.runFrame.user_name + self.frameEnv["HOME"] = "/net/homedirs/%s" % self.runFrame.user_name + elif platform.system() == "Windows": + for variable in ["SYSTEMROOT", "APPDATA", "TMP", "COMMONPROGRAMFILES", "SYSTEMDRIVE"]: + if variable in os.environ: + self.frameEnv[variable] = os.environ[variable] + + for key, value in self.runFrame.environment.items(): + if key == 'PATH': + self.frameEnv[key] += os.pathsep + value + else: + self.frameEnv[key] = value # Add threads to use all assigned hyper-threading cores if 'CPU_LIST' in self.runFrame.attributes and 'CUE_THREADS' in self.frameEnv: @@ -103,6 +118,10 @@ def __createEnvVariables(self): len(self.runFrame.attributes['CPU_LIST'].split(',')))) self.frameEnv['CUE_HT'] = "True" + # Add GPU's to use all assigned GPU cores + if 'GPU_LIST' in self.runFrame.attributes: + self.frameEnv['CUE_GPU_CORES'] = self.runFrame.attributes['GPU_LIST'] + def _createCommandFile(self, command): """Creates a file that subprocess. Popen then executes. @type command: string @@ -118,6 +137,11 @@ def _createCommandFile(self, command): except OSError: pass # okay, already exists + # Windows Batch needs some characters escaped: + command = command.replace('%', '%%') + for char in '^&<>|': + command = command.replace(char, '^' + char) + commandFile = os.path.join( rqd_tmp_dir, 'cmd-%s-%s.bat' % (self.runFrame.frame_id, time.time())) @@ -176,7 +200,8 @@ def __writeFooter(self): self.endTime = time.time() self.frameInfo.runTime = int(self.endTime - self.startTime) try: - print("\n", "="*59, file=self.rqlog) + print("", file=self.rqlog) + print("="*59, file=self.rqlog) print("RenderQ Job Complete\n", file=self.rqlog) print("%-20s%s" % ("exitStatus", self.frameInfo.exitStatus), file=self.rqlog) print("%-20s%s" % ("exitSignal", self.frameInfo.exitSignal), file=self.rqlog) @@ -187,9 +212,21 @@ def __writeFooter(self): print("%-20s%s" % ("endTime", time.ctime(self.endTime)), file=self.rqlog) print("%-20s%s" % ("maxrss", self.frameInfo.maxRss), file=self.rqlog) + print("%-20s%s" % ("maxUsedGpuMemory", + self.frameInfo.maxUsedGpuMemory), file=self.rqlog) print("%-20s%s" % ("utime", self.frameInfo.utime), file=self.rqlog) print("%-20s%s" % ("stime", self.frameInfo.stime), file=self.rqlog) print("%-20s%s" % ("renderhost", self.rqCore.machine.getHostname()), file=self.rqlog) + + print("%-20s%s" % ("maxrss (KB)", self.frameInfo.maxRss), file=self.rqlog) + for child in sorted(self.frameInfo.childrenProcs.items(), + key=lambda item: item[1]['start_time']): + print("\t%-20s%s" % (child[1]['name'], child[1]['rss']), file=self.rqlog) + print("\t%-20s%s" % ("start_time", + datetime.timedelta(seconds=child[1]["start_time"])), + file=self.rqlog) + print("\t%-20s%s" % ("cmdline", " ".join(child[1]["cmd_line"])), file=self.rqlog) + print("="*59, file=self.rqlog) # pylint: disable=broad-except @@ -278,14 +315,17 @@ def runLinux(self): else: tempCommand += [self._createCommandFile(runFrame.command)] - # Actual cwd is set by /shots/SHOW/home/perl/etc/qwrap.cuerun + if rqd.rqconstants.RQD_PREPEND_TIMESTAMP: + file_descriptor = subprocess.PIPE + else: + file_descriptor = self.rqlog # pylint: disable=subprocess-popen-preexec-fn frameInfo.forkedCommand = subprocess.Popen(tempCommand, env=self.frameEnv, cwd=self.rqCore.machine.getTempPath(), stdin=subprocess.PIPE, - stdout=self.rqlog, - stderr=self.rqlog, + stdout=file_descriptor, + stderr=file_descriptor, close_fds=True, preexec_fn=os.setsid) finally: @@ -293,11 +333,13 @@ def runLinux(self): frameInfo.pid = frameInfo.forkedCommand.pid - if not self.rqCore.updateRssThread.isAlive(): + if not self.rqCore.updateRssThread.is_alive(): self.rqCore.updateRssThread = threading.Timer(rqd.rqconstants.RSS_UPDATE_INTERVAL, self.rqCore.updateRss) self.rqCore.updateRssThread.start() + if rqd.rqconstants.RQD_PREPEND_TIMESTAMP: + pipe_to_file(frameInfo.forkedCommand.stdout, frameInfo.forkedCommand.stderr, self.rqlog) returncode = frameInfo.forkedCommand.wait() # Find exitStatus and exitSignal @@ -335,6 +377,7 @@ def runWindows(self): tempCommand = [self._createCommandFile(runFrame.command)] frameInfo.forkedCommand = subprocess.Popen(tempCommand, + env=self.frameEnv, stdin=subprocess.PIPE, stdout=self.rqlog, stderr=self.rqlog) @@ -346,7 +389,7 @@ def runWindows(self): frameInfo.pid = frameInfo.forkedCommand.pid - if not self.rqCore.updateRssThread.isAlive(): + if not self.rqCore.updateRssThread.is_alive(): self.rqCore.updateRssThread = threading.Timer(rqd.rqconstants.RSS_UPDATE_INTERVAL, self.rqCore.updateRss) self.rqCore.updateRssThread.start() @@ -355,6 +398,10 @@ def runWindows(self): # Find exitStatus and exitSignal returncode = frameInfo.forkedCommand.returncode + if returncode < INT32_MIN: + returncode = 303 + if returncode > INT32_MAX: + returncode = 304 frameInfo.exitStatus = returncode frameInfo.exitSignal = returncode @@ -390,7 +437,7 @@ def runDarwin(self): frameInfo.pid = frameInfo.forkedCommand.pid - if not self.rqCore.updateRssThread.isAlive(): + if not self.rqCore.updateRssThread.is_alive(): self.rqCore.updateRssThread = threading.Timer(rqd.rqconstants.RSS_UPDATE_INTERVAL, self.rqCore.updateRss) self.rqCore.updateRssThread.start() @@ -493,7 +540,7 @@ def run(self): else: raise RuntimeError(err) try: - self.rqlog = open(runFrame.log_dir_file, "w", 1) + self.rqlog = open(runFrame.log_dir_file, "w+", 1) self.waitForFile(runFrame.log_dir_file) # pylint: disable=broad-except except Exception as e: @@ -531,7 +578,9 @@ def run(self): # Delay keeps the cuebot from spamming failing booking requests time.sleep(10) finally: - self.rqCore.releaseCores(self.runFrame.num_cores, runFrame.attributes.get('CPU_LIST')) + self.rqCore.releaseCores(self.runFrame.num_cores, runFrame.attributes.get('CPU_LIST'), + runFrame.attributes.get('GPU_LIST') + if 'GPU_LIST' in self.runFrame.attributes else None) self.rqCore.deleteFrame(self.runFrame.frame_id) @@ -564,9 +613,10 @@ def __init__(self, optNimbyoff=False): idle_cores=0, locked_cores=0, booked_cores=0, + reserved_cores=[], ) - self.nimby = rqd.rqnimby.Nimby(self) + self.nimby = rqd.rqnimby.NimbyFactory.getNimby(self) self.machine = rqd.rqmachine.Machine(self, self.cores) @@ -598,6 +648,7 @@ def start(self): log.warning('OVERRIDE_NIMBY is False, Nimby startup has been disabled') else: self.nimbyOn() + self.onNimbyLock() elif rqd.rqconstants.OVERRIDE_NIMBY: log.warning('Nimby startup has been triggered by OVERRIDE_NIMBY') self.nimbyOn() @@ -702,6 +753,14 @@ def deleteFrame(self, frameId): try: if frameId in self.__cache: del self.__cache[frameId] + # pylint: disable=no-member + if not self.__cache and self.cores.reserved_cores: + # pylint: disable=no-member + log.error( + 'No running frames but reserved_cores is not empty: %s', + self.cores.reserved_cores) + # pylint: disable=no-member + self.cores.reserved_cores.clear() finally: self.__threadLock.release() @@ -733,7 +792,7 @@ def killAllFrame(self, reason): pass time.sleep(1) - def releaseCores(self, reqRelease, releaseHT=None): + def releaseCores(self, reqRelease, releaseHT=None, releaseGpus=None): """The requested number of cores are released @type reqRelease: int @param reqRelease: Number of cores to release, 100 = 1 physical core""" @@ -753,6 +812,9 @@ def releaseCores(self, reqRelease, releaseHT=None): if releaseHT: self.machine.releaseHT(releaseHT) + if releaseGpus: + self.machine.releaseGpus(releaseGpus) + finally: self.__threadLock.release() @@ -784,15 +846,18 @@ def shutdown(self): log.warning("Rebooting machine by request") self.machine.reboot() else: - log.warning("Shutting down RQD by request") + log.warning("Shutting down RQD by request. pid(%s)", os.getpid()) + self.network.stopGrpc() + # Using sys.exit would raise SystemExit, giving exception handlers a chance + # to block this + # pylint: disable=protected-access + os._exit(0) def handleExit(self, signalnum, flag): """Shutdown threads and exit RQD.""" del signalnum del flag self.shutdown() - self.network.stopGrpc() - sys.exit() def launchFrame(self, runFrame): """This will setup for the launch the frame specified in the arguments. @@ -817,7 +882,12 @@ def launchFrame(self, runFrame): raise rqd.rqexceptions.CoreReservationFailureException(err) if self.nimby.locked and not runFrame.ignore_nimby: - err = "Not launching, rqd is lockNimby" + err = "Not launching, rqd is lockNimby and not Ignore Nimby" + log.info(err) + raise rqd.rqexceptions.CoreReservationFailureException(err) + + if rqd.rqconstants.OVERRIDE_NIMBY and self.nimby.isNimbyActive(): + err = "Not launching, rqd is lockNimby and User is Active" log.info(err) raise rqd.rqexceptions.CoreReservationFailureException(err) @@ -851,6 +921,11 @@ def launchFrame(self, runFrame): if reserveHT: runFrame.attributes['CPU_LIST'] = reserveHT + if runFrame.num_gpus: + reserveGpus = self.machine.reserveGpus(runFrame.num_gpus) + if reserveGpus: + runFrame.attributes['GPU_LIST'] = reserveGpus + # They must be available at this point, reserve them # pylint: disable=no-member self.cores.idle_cores -= runFrame.num_cores @@ -882,13 +957,18 @@ def reportStatus(self): def shutdownRqdNow(self): """Kill all running frames and shutdown RQD""" self.machine.state = rqd.compiled_proto.host_pb2.DOWN - self.lockAll() - self.killAllFrame("shutdownRqdNow Command") + try: + self.lockAll() + self.killAllFrame("shutdownRqdNow Command") + # pylint: disable=broad-except + except Exception: + log.exception("Failed to kill frames, stopping service anyways") if not self.__cache: self.shutdown() def shutdownRqdIdle(self): """When machine is idle, shutdown RQD""" + log.info("shutdownRqdIdle") self.lockAll() self.__whenIdle = True self.sendStatusReport() @@ -897,11 +977,13 @@ def shutdownRqdIdle(self): def restartRqdNow(self): """Kill all running frames and restart RQD""" + log.info("RestartRqdNow") self.__respawn = True self.shutdownRqdNow() def restartRqdIdle(self): """When machine is idle, restart RQD""" + log.info("RestartRqdIdle") self.lockAll() self.__whenIdle = True self.__respawn = True @@ -934,14 +1016,12 @@ def rebootIdle(self): def nimbyOn(self): """Activates nimby, does not kill any running frames until next nimby event. Also does not unlock until sufficient idle time is reached.""" - if platform.system() != "Windows" and os.getuid() != 0: - log.warning("Not starting nimby, not running as root") - return - if not self.nimby.active: + if self.nimby and not self.nimby.active: try: self.nimby.run() - log.info("Nimby has been activated") - except: + log.warning("Nimby has been activated") + # pylint: disable=broad-except + except Exception: self.nimby.locked = False err = "Nimby is in the process of shutting down" log.exception(err) @@ -961,7 +1041,7 @@ def onNimbyLock(self): self.sendStatusReport() def onNimbyUnlock(self, asOf=None): - """This is called by nimby when it unlocks the machine due to sufficent + """This is called by nimby when it unlocks the machine due to sufficient idle. A new report is sent to the cuebot. @param asOf: Time when idle state began, if known.""" del asOf @@ -1086,3 +1166,98 @@ def sendStatusReport(self): def isWaitingForIdle(self): """Returns whether the host is waiting until idle to take some action.""" return self.__whenIdle + +def pipe_to_file(stdout, stderr, outfile): + """ + Prepend entries on stdout and stderr with a timestamp and write to outfile. + + The logic to poll stdout/stderr is inspired by the Popen.communicate implementation. + This feature is linux specific + """ + # Importing packages internally to avoid compatibility issues with Windows + + if stdout is None or stderr is None: + return + outfile.flush() + os.fsync(outfile) + + # pylint: disable=import-outside-toplevel + import select + import errno + # pylint: enable=import-outside-toplevel + + fd2file = {} + fd2output = {} + + poller = select.poll() + + def register_and_append(file_ojb, eventmask): + poller.register(file_ojb, eventmask) + fd2file[file_ojb.fileno()] = file_ojb + + def close_and_unregister_and_remove(fd, close=False): + poller.unregister(fd) + if close: + fd2file[fd].close() + fd2file.pop(fd) + + def print_and_flush_ln(fd, last_timestamp): + txt = ''.join(fd2output[fd]) + lines = txt.split('\n') + next_line_timestamp = None + + # Save the timestamp of the first break + if last_timestamp is None: + curr_line_timestamp = datetime.datetime.now().strftime("%H:%M:%S") + else: + curr_line_timestamp = last_timestamp + + # There are no line breaks + if len(lines) < 2: + return curr_line_timestamp + next_line_timestamp = datetime.datetime.now().strftime("%H:%M:%S") + + remainder = lines[-1] + for line in lines[0:-1]: + print("[%s] %s" % (curr_line_timestamp, line), file=outfile) + outfile.flush() + os.fsync(outfile) + fd2output[fd] = [remainder] + + if next_line_timestamp is None: + return curr_line_timestamp + return next_line_timestamp + + def translate_newlines(data): + data = data.decode("utf-8", "ignore") + return data.replace("\r\n", "\n").replace("\r", "\n") + + select_POLLIN_POLLPRI = select.POLLIN | select.POLLPRI + # stdout + register_and_append(stdout, select_POLLIN_POLLPRI) + fd2output[stdout.fileno()] = [] + + # stderr + register_and_append(stderr, select_POLLIN_POLLPRI) + fd2output[stderr.fileno()] = [] + + while fd2file: + try: + ready = poller.poll() + except select.error as e: + if e.args[0] == errno.EINTR: + continue + raise + + first_chunk_timestamp = None + for fd, mode in ready: + if mode & select_POLLIN_POLLPRI: + data = os.read(fd, 4096) + if not data: + close_and_unregister_and_remove(fd) + if not isinstance(data, str): + data = translate_newlines(data) + fd2output[fd].append(data) + first_chunk_timestamp = print_and_flush_ln(fd, first_chunk_timestamp) + else: + close_and_unregister_and_remove(fd) diff --git a/rqd/rqd/rqdservicers.py b/rqd/rqd/rqdservicers.py index a56fdf442..b736ef43b 100644 --- a/rqd/rqd/rqdservicers.py +++ b/rqd/rqd/rqdservicers.py @@ -20,7 +20,7 @@ from __future__ import print_function from __future__ import division -import logging as log +import logging import grpc @@ -28,6 +28,9 @@ import rqd.compiled_proto.rqd_pb2_grpc +log = logging.getLogger(__name__) + + class RqdInterfaceServicer(rqd.compiled_proto.rqd_pb2_grpc.RqdInterfaceServicer): """Service interface for RqdStatic gRPC definition.""" @@ -64,6 +67,8 @@ def KillRunningFrame(self, request, context): frame = self.rqCore.getRunningFrame(request.frame_id) if frame: frame.kill(message=request.message) + else: + log.warning("Wasn't able to find frame(%s) to kill", request.frame_id) return rqd.compiled_proto.rqd_pb2.RqdStaticKillRunningFrameResponse() def ShutdownRqdNow(self, request, context): diff --git a/rqd/rqd/rqmachine.py b/rqd/rqd/rqmachine.py index 915c13afc..65061ef82 100644 --- a/rqd/rqd/rqmachine.py +++ b/rqd/rqd/rqmachine.py @@ -31,7 +31,7 @@ import ctypes import errno -import logging as log +import logging import math import os import platform @@ -45,8 +45,13 @@ # pylint: disable=import-error,wrong-import-position if platform.system() in ('Linux', 'Darwin'): import resource -elif platform.system() == "win32": - import win32api +elif platform.system() == "Windows": + winpsIsAvailable = False + try: + import winps + winpsIsAvailable = True + except ImportError: + pass # pylint: enable=import-error,wrong-import-position import psutil @@ -59,6 +64,7 @@ import rqd.rqutil +log = logging.getLogger(__name__) KILOBYTE = 1024 @@ -73,7 +79,15 @@ def __init__(self, rqCore, coreInfo): """ self.__rqCore = rqCore self.__coreInfo = coreInfo - self.__tasksets = set() + self.__gpusets = set() + + # A dictionary built from /proc/cpuinfo containing + # { : { : set([, , ...]), ... }, ... } + self.__procs_by_physid_and_coreid = {} + + # A reverse mapping of the above. + # { : (, ), ... } + self.__physid_and_coreid_by_proc = {} if platform.system() == 'Linux': self.__vmstat = rqd.rqswap.VmStat() @@ -96,7 +110,8 @@ def __init__(self, rqCore, coreInfo): self.__pidHistory = {} - self.setupHT() + self.setupGpu() + self.setupTaskset() def isNimbySafeToRunJobs(self): """Returns False if nimby should be triggered due to resource limits""" @@ -186,8 +201,46 @@ def isUserLoggedIn(self): return True return False + def __updateGpuAndLlu(self, frame): + if 'GPU_LIST' in frame.runFrame.attributes: + usedGpuMemory = 0 + for unitId in frame.runFrame.attributes.get('GPU_LIST').split(','): + usedGpuMemory += self.getGpuMemoryUsed(unitId) + + frame.usedGpuMemory = usedGpuMemory + frame.maxUsedGpuMemory = max(usedGpuMemory, frame.maxUsedGpuMemory) + + if os.path.exists(frame.runFrame.log_dir_file): + stat = os.stat(frame.runFrame.log_dir_file).st_mtime + frame.lluTime = int(stat) + + def _getStatFields(self, pidFilePath): + with open(pidFilePath, "r") as statFile: + stats = statFile.read().split() + stats[1] = stats[1].strip('()') + return stats + def rssUpdate(self, frames): """Updates the rss and maxrss for all running frames""" + if platform.system() == 'Windows' and winpsIsAvailable: + values = list(frames.values()) + pids = [frame.pid for frame in list( + filter(lambda frame: frame.pid > 0, values) + )] + # pylint: disable=no-member + stats = winps.update(pids) + # pylint: enable=no-member + for frame in values: + self.__updateGpuAndLlu(frame) + if frame.pid > 0 and frame.pid in stats: + stat = stats[frame.pid] + frame.rss = stat["rss"] // 1024 + frame.maxRss = max(frame.rss, frame.maxRss) + frame.runFrame.attributes["pcpu"] = str( + stat["pcpu"] * self.__coreInfo.total_cores + ) + return + if platform.system() != 'Linux': return @@ -195,13 +248,15 @@ def rssUpdate(self, frames): for pid in os.listdir("/proc"): if pid.isdigit(): try: - with open("/proc/%s/stat" % pid, "r") as statFile: - statFields = statFile.read().split() - - # See "man proc" + statFields = self._getStatFields(rqd.rqconstants.PATH_PROC_PID_STAT + .format(pid)) pids[pid] = { + "name": statFields[1], + "state": statFields[2], + "pgrp": statFields[4], "session": statFields[5], - "vsize": statFields[22], + # virtual memory size is in bytes convert to kb + "vsize": int(statFields[22]), "rss": statFields[23], # These are needed to compute the cpu used "utime": statFields[13], @@ -212,10 +267,27 @@ def rssUpdate(self, frames): # after system boot. "start_time": statFields[21], } + # cmdline: + p = psutil.Process(int(pid)) + pids[pid]["cmd_line"] = p.cmdline() + + # 2. Collect Statm file: /proc/[pid]/statm (same as status vsize in kb) + # - size: "total program size" + # - rss: inaccurate, similar to VmRss in /proc/[pid]/status + child_statm_fields = self._getStatFields( + rqd.rqconstants.PATH_PROC_PID_STATM.format(pid)) + pids[pid]['statm_size'] = \ + int(re.search(r"\d+", child_statm_fields[0]).group()) \ + if re.search(r"\d+", child_statm_fields[0]) else -1 + pids[pid]['statm_rss'] = \ + int(re.search(r"\d+", child_statm_fields[1]).group()) \ + if re.search(r"\d+", child_statm_fields[1]) else -1 # pylint: disable=broad-except - except Exception: - log.exception('failed to read stat file for pid %s', pid) + except (OSError, IOError): + # Many Linux processes are ephemeral and will disappear before we're able + # to read them. This is not typically indicative of a problem. + log.debug('Failed to read stat/statm file for pid %s', pid) # pylint: disable=too-many-nested-blocks try: @@ -224,13 +296,13 @@ def rssUpdate(self, frames): bootTime = self.getBootTime() values = list(frames.values()) - for frame in values: if frame.pid > 0: session = str(frame.pid) rss = 0 vsize = 0 pcpu = 0 + # children pids share the same session id for pid, data in pids.items(): if data["session"] == session: try: @@ -263,13 +335,42 @@ def rssUpdate(self, frames): pidPcpu = totalTime / seconds pcpu += pidPcpu pidData[pid] = totalTime, seconds, pidPcpu + # only keep the highest recorded rss value + if pid in frame.childrenProcs: + childRss = (int(data["rss"]) * resource.getpagesize()) // 1024 + if childRss > frame.childrenProcs[pid]['rss']: + frame.childrenProcs[pid]['rss_page'] = int(data["rss"]) + frame.childrenProcs[pid]['rss'] = childRss + frame.childrenProcs[pid]['vsize'] = \ + int(data["vsize"]) // 1024 + frame.childrenProcs[pid]['statm_rss'] = \ + (int(data["statm_rss"]) \ + * resource.getpagesize()) // 1024 + frame.childrenProcs[pid]['statm_size'] = \ + (int(data["statm_size"]) * \ + resource.getpagesize()) // 1024 + else: + frame.childrenProcs[pid] = \ + {'name': data['name'], + 'rss_page': int(data["rss"]), + 'rss': (int(data["rss"]) * resource.getpagesize()) // 1024, + 'vsize': int(data["vsize"]) // 1024, + 'state': data['state'], + # statm reports in pages (~ 4kB) + # same as VmRss in /proc/[pid]/status (in KB) + 'statm_rss': (int(data["statm_rss"]) * \ + resource.getpagesize()) // 1024, + 'statm_size': (int(data["statm_size"]) * \ + resource.getpagesize()) // 1024, + 'cmd_line': data["cmd_line"], + 'start_time': seconds} # pylint: disable=broad-except except Exception as e: log.warning( 'Failure with pid rss update due to: %s at %s', e, traceback.extract_tb(sys.exc_info()[2])) - + # convert bytes to KB rss = (rss * resource.getpagesize()) // 1024 vsize = int(vsize/1024) @@ -285,6 +386,8 @@ def rssUpdate(self, frames): frame.runFrame.attributes["pcpu"] = str(pcpu) + self.__updateGpuAndLlu(frame) + # Store the current data for the next check self.__pidHistory = pidData @@ -299,7 +402,7 @@ def getLoadAvg(self): loadAvgFile = open(rqd.rqconstants.PATH_LOADAVG, "r") loadAvg = int(float(loadAvgFile.read().split()[0]) * 100) if self.__enabledHT(): - loadAvg = loadAvg // 2 + loadAvg = loadAvg // self.__getHyperthreadingMultiplier() loadAvg = loadAvg + rqd.rqconstants.LOAD_MODIFIER loadAvg = max(loadAvg, 0) return loadAvg @@ -315,44 +418,71 @@ def getBootTime(self): return int(line.split()[1]) return 0 + @rqd.rqutil.Memoize + def getGpuCount(self): + """Returns the total gpu's on the machine""" + return self.__getGpuValues()['count'] + @rqd.rqutil.Memoize def getGpuMemoryTotal(self): """Returns the total gpu memory in kb for CUE_GPU_MEMORY""" return self.__getGpuValues()['total'] - def getGpuMemory(self): + def getGpuMemoryFree(self): """Returns the available gpu memory in kb for CUE_GPU_MEMORY""" return self.__getGpuValues()['free'] + def getGpuMemoryUsed(self, unitId): + """Returns the available gpu memory in kb for CUE_GPU_MEMORY""" + usedMemory = self.__getGpuValues()['used'] + return usedMemory[unitId] if unitId in usedMemory else 0 + # pylint: disable=attribute-defined-outside-init + def __resetGpuResults(self): + self.gpuResults = {'count': 0, 'total': 0, 'free': 0, 'used': {}, 'updated': 0} + def __getGpuValues(self): if not hasattr(self, 'gpuNotSupported'): if not hasattr(self, 'gpuResults'): - self.gpuResults = {'total': 0, 'free': 0, 'updated': 0} + self.__resetGpuResults() if not rqd.rqconstants.ALLOW_GPU: self.gpuNotSupported = True return self.gpuResults - if self.gpuResults['updated'] > time.time() - 60: + if self.gpuResults['updated'] > int(time.time()) - 60: return self.gpuResults try: - # /shots/spi/home/bin/spinux1/cudaInfo - # /shots/spi/home/bin/rhel7/cudaInfo - cudaInfo = subprocess.getoutput('/usr/local/spi/rqd3/cudaInfo') - if 'There is no device supporting CUDA' in cudaInfo: - self.gpuNotSupported = True - else: - results = cudaInfo.splitlines()[-1].split() - # TotalMem 1023 Mb FreeMem 968 Mb - # The int(math.ceil(int(x) / 32.0) * 32) rounds up to the next multiple of 32 - self.gpuResults['total'] = ( - int(math.ceil(int(results[1]) / 32.0) * 32) * KILOBYTE) - self.gpuResults['free'] = int(results[4]) * KILOBYTE - self.gpuResults['updated'] = time.time() + nvidia_smi = subprocess.getoutput( + 'nvidia-smi --query-gpu=memory.total,memory.free,count' + ' --format=csv,noheader') + total = 0 + free = 0 + count = 0 + unitId = 0 + for line in nvidia_smi.splitlines(): + # Example "16130 MiB, 16103 MiB, 8" + # 1 MiB = 1048.576 KB + l = line.split() + unitTotal = math.ceil(int(l[0]) * 1048.576) + unitFree = math.ceil(int(l[2]) * 1048.576) + total += unitTotal + free += unitFree + count = int(l[-1]) + self.gpuResults['used'][str(unitId)] = unitTotal - unitFree + unitId += 1 + + self.gpuResults['total'] = int(total) + self.gpuResults['free'] = int(free) + self.gpuResults['count'] = count + self.gpuResults['updated'] = int(time.time()) # pylint: disable=broad-except except Exception as e: + self.gpuNotSupported = True + self.__resetGpuResults() log.warning( - 'Failed to get FreeMem from cudaInfo due to: %s at %s', + 'Failed to query nvidia-smi due to: %s at %s', e, traceback.extract_tb(sys.exc_info()[2])) + else: + self.__resetGpuResults() return self.gpuResults def __getSwapout(self): @@ -379,15 +509,17 @@ def getHostname(self): @rqd.rqutil.Memoize def getPathEnv(self): """Returns the correct path environment for the given machine""" + if rqd.rqconstants.RQD_USE_PATH_ENV_VAR: + return os.getenv('PATH') if platform.system() == 'Linux': return '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' + if platform.system() == 'Windows': + return 'C:/Windows/system32;C:/Windows;C:/Windows/System32/Wbem' return '' @rqd.rqutil.Memoize def getTempPath(self): """Returns the correct mcp path for the given machine""" - if platform.system() == "win32": - return win32api.GetTempPath() if os.path.isdir("/mcp/"): return "/mcp/" return '%s/' % tempfile.gettempdir() @@ -441,33 +573,50 @@ def __initMachineStats(self, pathCpuInfo=None): mcpStat = os.statvfs(self.getTempPath()) self.__renderHost.total_mcp = mcpStat.f_blocks * mcpStat.f_frsize // KILOBYTE + # Reset mappings + self.__procs_by_physid_and_coreid = {} + self.__physid_and_coreid_by_proc = {} + # Reads static information from /proc/cpuinfo with open(pathCpuInfo or rqd.rqconstants.PATH_CPUINFO, "r") as cpuinfoFile: - singleCore = {} + currCore = {} procsFound = [] for line in cpuinfoFile: - lineList = line.strip().replace("\t","").split(": ") + lineList = line.strip().replace("\t", "").split(": ") # A normal entry added to the singleCore dictionary if len(lineList) >= 2: - singleCore[lineList[0]] = lineList[1] + currCore[lineList[0]] = lineList[1] # The end of a processor block elif lineList == ['']: # Check for hyper-threading - hyperthreadingMultiplier = (int(singleCore.get('siblings', '1')) - // int(singleCore.get('cpu cores', '1'))) + hyperthreadingMultiplier = (int(currCore.get('siblings', '1')) + // int(currCore.get('cpu cores', '1'))) __totalCores += rqd.rqconstants.CORE_VALUE - if "core id" in singleCore \ - and "physical id" in singleCore \ - and not singleCore["physical id"] in procsFound: - procsFound.append(singleCore["physical id"]) + if "core id" in currCore \ + and "physical id" in currCore \ + and not currCore["physical id"] in procsFound: + procsFound.append(currCore["physical id"]) __numProcs += 1 - elif "core id" not in singleCore: + elif "core id" not in currCore: __numProcs += 1 - singleCore = {} + + if 'physical id' in currCore and 'core id' in currCore: + # Keep track of what processors are on which core on + # which physical socket. + procid, physid, coreid = ( + currCore['processor'], + currCore['physical id'], + currCore['core id']) + self.__procs_by_physid_and_coreid \ + .setdefault(physid, {}) \ + .setdefault(coreid, set()).add(procid) + self.__physid_and_coreid_by_proc[procid] = physid, coreid + currCore = {} + # An entry without data elif len(lineList) == 1: - singleCore[lineList[0]] = "" + currCore[lineList[0]] = "" else: hyperthreadingMultiplier = 1 @@ -512,7 +661,7 @@ def __initMachineStats(self, pathCpuInfo=None): self.__renderHost.num_procs = __numProcs self.__renderHost.cores_per_proc = __totalCores // __numProcs - if hyperthreadingMultiplier > 1: + if hyperthreadingMultiplier >= 1: self.__renderHost.attributes['hyperthreadingMultiplier'] = str(hyperthreadingMultiplier) def getWindowsMemory(self): @@ -592,7 +741,10 @@ def updateMachineStats(self): self.__renderHost.free_swap = freeSwapMem self.__renderHost.free_mem = freeMem + cachedMem - self.__renderHost.attributes['freeGpu'] = str(self.getGpuMemory()) + self.__renderHost.num_gpus = self.getGpuCount() + self.__renderHost.total_gpu_mem = self.getGpuMemoryTotal() + self.__renderHost.free_gpu_mem = self.getGpuMemoryFree() + self.__renderHost.attributes['swapout'] = self.__getSwapout() elif platform.system() == 'Darwin': @@ -604,6 +756,9 @@ def updateMachineStats(self): self.__renderHost.free_mcp = TEMP_DEFAULT self.__renderHost.free_swap = int(stats.ullAvailPageFile / 1024) self.__renderHost.free_mem = int(stats.ullAvailPhys / 1024) + self.__renderHost.num_gpus = self.getGpuCount() + self.__renderHost.total_gpu_mem = self.getGpuMemoryTotal() + self.__renderHost.free_gpu_mem = self.getGpuMemoryFree() # Updates dynamic information self.__renderHost.load = self.getLoadAvg() @@ -641,44 +796,79 @@ def getBootReport(self): def __enabledHT(self): return 'hyperthreadingMultiplier' in self.__renderHost.attributes - def setupHT(self): + def __getHyperthreadingMultiplier(self): + return int(self.__renderHost.attributes['hyperthreadingMultiplier']) + + def setupTaskset(self): """ Setup rqd for hyper-threading """ + self.__coreInfo.reserved_cores.clear() - if self.__enabledHT(): - self.__tasksets = set(range(self.__coreInfo.total_cores // 100)) + def setupGpu(self): + """ Setup rqd for Gpus """ + self.__gpusets = set(range(self.getGpuCount())) - def reserveHT(self, reservedCores): + def reserveHT(self, frameCores): """ Reserve cores for use by taskset taskset -c 0,1,8,9 COMMAND Not thread save, use with locking. - @type reservedCores: int - @param reservedCores: The total physical cores reserved by the frame. + @type frameCores: int + @param frameCores: The total physical cores reserved by the frame. @rtype: string @return: The cpu-list for taskset -c """ - if not self.__enabledHT(): - return None - - if reservedCores % 100: - log.debug('Taskset: Can not reserveHT with fractional cores') + if frameCores % 100: + log.warning('Taskset: Can not reserveHT with fractional cores') return None + log.warning('Taskset: Requesting reserve of %d', (frameCores // 100)) + + # Look for the most idle physical cpu. + # Prefer to assign cores from the same physical cpu. + # Spread different frames around on different physical cpus. + avail_cores = {} + avail_cores_count = 0 + reserved_cores = self.__coreInfo.reserved_cores + + for physid, cores in self.__procs_by_physid_and_coreid.items(): + for coreid in cores.keys(): + if int(physid) in reserved_cores and \ + int(coreid) in reserved_cores[int(physid)].coreid: + continue + avail_cores.setdefault(physid, set()).add(coreid) + avail_cores_count += 1 - log.debug('Taskset: Requesting reserve of %d', (reservedCores // 100)) + remaining_cores = frameCores / 100 - if len(self.__tasksets) < reservedCores // 100: + if avail_cores_count < remaining_cores: err = ('Not launching, insufficient hyperthreading cores to reserve ' - 'based on reservedCores') + 'based on frameCores (%s < %s)') \ + % (avail_cores_count, remaining_cores) log.critical(err) raise rqd.rqexceptions.CoreReservationFailureException(err) tasksets = [] - for _ in range(reservedCores // 100): - core = self.__tasksets.pop() - tasksets.append(str(core)) - tasksets.append(str(core + self.__coreInfo.total_cores // 100)) - log.debug('Taskset: Reserving cores - %s', ','.join(tasksets)) + for physid, cores in sorted( + avail_cores.items(), + # Return the physical socket that has + # the most idle cores first. + key=lambda tup: len(tup[1]), + reverse=True): + + while remaining_cores > 0 and len(cores) > 0: + coreid = cores.pop() + # Give all the hyperthreads on this core. + # This counts as one core. + reserved_cores[int(physid)].coreid.extend([int(coreid)]) + remaining_cores -= 1 + + for procid in self.__procs_by_physid_and_coreid[physid][coreid]: + tasksets.append(procid) + + if remaining_cores == 0: + break + + log.warning('Taskset: Reserving procs - %s', ','.join(tasksets)) return ','.join(tasksets) @@ -691,10 +881,47 @@ def releaseHT(self, reservedHT): @param: The cpu-list used for taskset to release. ex: '0,8,1,9' """ - if not self.__enabledHT(): - return None - log.debug('Taskset: Releasing cores - %s', reservedHT) + + # Remove these cores from the reserved set. + # Silently ignore any that weren't really reserved or + # aren't valid core identities. + reserved_cores = self.__coreInfo.reserved_cores for core in reservedHT.split(','): - if int(core) < self.__coreInfo.total_cores // 100: - self.__tasksets.add(int(core)) + physical_id_str, core_id_str = self.__physid_and_coreid_by_proc.get(core) + physical_id = int(physical_id_str) + core_id = int(core_id_str) + + if physical_id in reserved_cores and core_id in reserved_cores[physical_id].coreid: + reserved_cores[physical_id].coreid.remove(core_id) + if len(reserved_cores[physical_id].coreid) == 0: + del reserved_cores[physical_id] + + def reserveGpus(self, reservedGpus): + """ Reserve gpus + @type reservedGpus: int + @param reservedGpus: The total gpus reserved by the frame. + @rtype: string + @return: The gpu-list. ex: '0,1,8,9' + """ + if len(self.__gpusets) < reservedGpus: + err = 'Not launching, insufficient GPUs to reserve based on reservedGpus' + log.critical(err) + raise rqd.rqexceptions.CoreReservationFailureException(err) + + gpusets = [] + for _ in range(reservedGpus): + gpu = self.__gpusets.pop() + gpusets.append(str(gpu)) + + return ','.join(gpusets) + + def releaseGpus(self, reservedGpus): + """ Release gpus + @type: string + @param: The gpu-list to release. ex: '0,1,8,9' + """ + log.debug('GPU set: Releasing gpu - %s', reservedGpus) + for gpu in reservedGpus.split(','): + if int(gpu) < self.getGpuCount(): + self.__gpusets.add(int(gpu)) diff --git a/rqd/rqd/rqnetwork.py b/rqd/rqd/rqnetwork.py index da5bee5cc..6bb26cfce 100644 --- a/rqd/rqd/rqnetwork.py +++ b/rqd/rqd/rqnetwork.py @@ -23,8 +23,10 @@ from builtins import object from concurrent import futures from random import shuffle +import abc import atexit -import logging as log +import datetime +import logging import os import platform import subprocess @@ -36,10 +38,14 @@ import rqd.compiled_proto.report_pb2_grpc import rqd.compiled_proto.rqd_pb2_grpc import rqd.rqconstants +import rqd.rqexceptions import rqd.rqdservicers import rqd.rqutil +log = logging.getLogger(__name__) + + class RunningFrame(object): """Represents a running frame.""" @@ -62,11 +68,15 @@ def __init__(self, rqCore, runFrame): self.vsize = 0 self.maxVsize = 0 + self.usedGpuMemory = 0 + self.maxUsedGpuMemory = 0 + self.realtime = 0 self.utime = 0 self.stime = 0 self.lluTime = 0 + self.childrenProcs = {} def runningFrameInfo(self): """Returns the RunningFrameInfo object""" @@ -84,23 +94,63 @@ def runningFrameInfo(self): max_vsize=self.maxVsize, vsize=self.vsize, attributes=self.runFrame.attributes, - llu_time=self.lluTime + llu_time=self.lluTime, + num_gpus=self.runFrame.num_gpus, + max_used_gpu_memory=self.maxUsedGpuMemory, + used_gpu_memory=self.usedGpuMemory, + children=self._serializeChildrenProcs() ) return runningFrameInfo + def _serializeChildrenProcs(self): + """ Collect and serialize children proc stats for protobuf + Convert to Kilobytes: + * RSS (Resident set size) measured in pages + * Statm size measured in pages + * Stat size measured in bytes + + :param data: dictionary + :return: serialized children proc host stats + :rtype: rqd.compiled_proto.report_pb2.ChildrenProcStats + """ + childrenProc = rqd.compiled_proto.report_pb2.ChildrenProcStats() + for proc, values in self.childrenProcs.items(): + procStats = rqd.compiled_proto.report_pb2.ProcStats() + procStatFile = rqd.compiled_proto.report_pb2.Stat() + procStatmFile = rqd.compiled_proto.report_pb2.Statm() + + procStatFile.pid = proc + procStatFile.name = values["name"] if values["name"] else "" + procStatFile.state = values["state"] + procStatFile.vsize = values["vsize"] + procStatFile.rss = values["rss"] + + procStatmFile.size = values["statm_size"] + procStatmFile.rss = values["statm_rss"] + # pylint: disable=no-member + procStats.stat.CopyFrom(procStatFile) + procStats.statm.CopyFrom(procStatmFile) + procStats.cmdline = " ".join(values["cmd_line"]) + + startTime = datetime.datetime.now() - datetime.timedelta(seconds=values["start_time"]) + procStats.start_time = startTime.strftime("%Y-%m-%d %H:%M%S") + childrenProc.children.extend([procStats]) + # pylint: enable=no-member + return childrenProc + def status(self): """Returns the status of the frame""" return self.runningFrameInfo() def kill(self, message=""): """Kills the frame""" - log.info("Request recieved: kill") + log.info("Request received: kill") if self.frameAttendantThread is None: log.warning( "Kill requested before frameAttendantThread is created for: %s", self.frameId) - elif self.frameAttendantThread.isAlive() and self.pid is None: + elif self.frameAttendantThread.is_alive() and self.pid is None: log.warning("Kill requested before pid is available for: %s", self.frameId) - elif self.frameAttendantThread.isAlive(): + elif self.frameAttendantThread.is_alive(): # pylint: disable=broad-except try: if not self.killMessage and message: @@ -112,10 +162,13 @@ def kill(self, message=""): else: os.killpg(self.pid, rqd.rqconstants.KILL_SIGNAL) finally: + log.warning( + "kill() successfully killed frameId=%s pid=%s", self.frameId, self.pid) rqd.rqutil.permissionsLow() except OSError as e: log.warning( "kill() tried to kill a non-existant pid for: %s Error: %s", self.frameId, e) + # pylint: disable=broad-except except Exception as e: log.warning("kill() encountered an unknown error: %s", e) else: @@ -177,8 +230,8 @@ def serveForever(self): def shutdown(self): """Stops the gRPC server.""" - log.info('Stopping grpc server.') - self.server.stop(0) + log.warning('Stopping grpc server.') + self.server.stop(10) def stayAlive(self): """Runs forever until killed.""" @@ -204,8 +257,9 @@ def start_grpc(self): def stopGrpc(self): """Stops the gRPC server.""" - self.grpcServer.shutdown() - del self.grpcServer + if self.grpcServer: + self.grpcServer.shutdown() + del self.grpcServer def closeChannel(self): """Closes the gRPC channel.""" @@ -216,11 +270,24 @@ def closeChannel(self): def __getChannel(self): # TODO(bcipriano) Add support for the facility nameserver or drop this concept? (Issue #152) if self.channel is None: - cuebots = rqd.rqconstants.CUEBOT_HOSTNAME.split() + # create interceptors + interceptors = ( + RetryOnRpcErrorClientInterceptor( + max_attempts=4, + sleeping_policy=ExponentialBackoff(init_backoff_ms=100, + max_backoff_ms=1600, + multiplier=2), + status_for_retry=(grpc.StatusCode.UNAVAILABLE,), + ), + ) + + cuebots = rqd.rqconstants.CUEBOT_HOSTNAME.strip().split() + if len(cuebots) == 0: + raise rqd.rqexceptions.RqdException("CUEBOT_HOSTNAME is empty") shuffle(cuebots) - for cuebotHostname in cuebots: - self.channel = grpc.insecure_channel('%s:%s' % (cuebotHostname, - rqd.rqconstants.CUEBOT_GRPC_PORT)) + self.channel = grpc.insecure_channel('%s:%s' % (cuebots[0], + rqd.rqconstants.CUEBOT_GRPC_PORT)) + self.channel = grpc.intercept_channel(self.channel, *interceptors) atexit.register(self.closeChannel) def __getReportStub(self): @@ -246,3 +313,94 @@ def reportRunningFrameCompletion(self, report): request = rqd.compiled_proto.report_pb2.RqdReportRunningFrameCompletionRequest( frame_complete_report=report) stub.ReportRunningFrameCompletion(request, timeout=rqd.rqconstants.RQD_TIMEOUT) + + +# Python 2/3 compatible implementation of ABC +ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + + +class SleepingPolicy(ABC): + """ + Implement policy for sleeping between API retries + """ + @abc.abstractmethod + def sleep(self, attempt): + """ + How long to sleep in milliseconds. + :param attempt: the number of attempt (starting from zero) + """ + assert attempt >= 0 + + +class ExponentialBackoff(SleepingPolicy): + """ + Implement policy that will increase retry period by exponentially in every try + """ + def __init__(self, + init_backoff_ms, + max_backoff_ms, + multiplier=2): + """ + inputs in ms + """ + self._init_backoff = init_backoff_ms + self._max_backoff = max_backoff_ms + self._multiplier = multiplier + + def sleep(self, attempt): + sleep_time_ms = min( + self._init_backoff * self._multiplier ** attempt, + self._max_backoff + ) + time.sleep(sleep_time_ms / 1000.0) + + +class RetryOnRpcErrorClientInterceptor( + grpc.UnaryUnaryClientInterceptor, + grpc.StreamUnaryClientInterceptor +): + """ + Implement Client/Stream interceptors for GRPC channels to retry + calls that failed with retry-able states. This is required for + handling server interruptions that are not automatically handled + by grpc.insecure_channel + """ + def __init__(self, + max_attempts, + sleeping_policy, + status_for_retry=None): + self._max_attempts = max_attempts + self._sleeping_policy = sleeping_policy + self._retry_statuses = status_for_retry + + def _intercept_call(self, continuation, client_call_details, + request_or_iterator): + for attempt in range(self._max_attempts): + try: + return continuation(client_call_details, + request_or_iterator) + except grpc.RpcError as response: + # Return if it was last attempt + if attempt == (self._max_attempts - 1): + return response + + # If status code is not in retryable status codes + # pylint: disable=no-member + if self._retry_statuses \ + and hasattr(response, 'code') \ + and response.code() \ + not in self._retry_statuses: + return response + + self._sleeping_policy.sleep(attempt) + + def intercept_unary_unary(self, continuation, client_call_details, + request): + return self._intercept_call(continuation, client_call_details, + request) + + def intercept_stream_unary( + self, continuation, client_call_details, request_iterator + ): + return self._intercept_call(continuation, client_call_details, + request_iterator) diff --git a/rqd/rqd/rqnimby.py b/rqd/rqd/rqnimby.py index ad006fca5..4784ba90f 100644 --- a/rqd/rqd/rqnimby.py +++ b/rqd/rqd/rqnimby.py @@ -20,18 +20,53 @@ from __future__ import print_function from __future__ import division +from abc import abstractmethod +import abc import os import select -import time import signal import threading -import logging as log +import time +import logging import rqd.rqconstants import rqd.rqutil -class Nimby(threading.Thread): +log = logging.getLogger(__name__) + +# compatible with Python 2 and 3: +ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + + +class NimbyFactory(object): + """ Factory to handle Linux/Windows platforms """ + @staticmethod + def getNimby(rqCore): + """ assign platform dependent Nimby instance """ + if rqd.rqconstants.USE_NIMBY_PYNPUT: + try: + # DISPLAY is required to import pynput internals + # and it's not automatically set depending on the + # environment rqd is running in + if "DISPLAY" not in os.environ: + os.environ['DISPLAY'] = ":0" + # pylint: disable=unused-import, import-error, unused-variable, import-outside-toplevel + import pynput + # pylint: disable=broad-except + except Exception: + # Ideally ImportError could be used here, but pynput + # can throw other kinds of exception while trying to + # access runpy components + log.exception("Failed to import pynput, falling back to Select module") + # Still enabling the application start as hosts can be manually locked + # using the API/GUI + return NimbyNop(rqCore) + return NimbyPynput(rqCore) + return NimbySelect(rqCore) + + +class Nimby(threading.Thread, ABC): """Nimby == Not In My Back Yard. If enabled, nimby will lock and kill all frames running on the host if keyboard or mouse activity is detected. If sufficient idle time has @@ -45,7 +80,6 @@ def __init__(self, rqCore): threading.Thread.__init__(self) self.rqCore = rqCore - self.locked = False self.active = False @@ -54,6 +88,8 @@ def __init__(self, rqCore): self.thread = None + self.interaction_detected = False + signal.signal(signal.SIGINT, self.signalHandler) def signalHandler(self, sig, frame): @@ -66,7 +102,7 @@ def lockNimby(self): """Activates the nimby lock, calls lockNimby() in rqcore""" if self.active and not self.locked: self.locked = True - log.info("Locked nimby") + log.warning("Locked nimby") self.rqCore.onNimbyLock() def unlockNimby(self, asOf=None): @@ -74,52 +110,101 @@ def unlockNimby(self, asOf=None): @param asOf: Time when idle state began, if known.""" if self.locked: self.locked = False - log.info("Unlocked nimby") + log.warning("Unlocked nimby") self.rqCore.onNimbyUnlock(asOf=asOf) - def _openEvents(self): - """Opens the /dev/input/event* files so nimby can monitor them""" - self._closeEvents() + def run(self): + """Starts the Nimby thread""" + self.active = True + self.startListener() + self.unlockedIdle() - rqd.rqutil.permissionsHigh() - try: - for device in os.listdir("/dev/input/"): - if device.startswith("event") or device.startswith("mice"): - log.debug("Found device: %s", device) - try: - self.fileObjList.append(open("/dev/input/%s" % device, "rb")) - except IOError as e: - # Bad device found - log.debug("IOError: Failed to open %s, %s", "/dev/input/%s" % device, e) - finally: - rqd.rqutil.permissionsLow() + def stop(self): + """Stops the Nimby thread""" + log.warning("Stop Nimby") + if self.thread: + self.thread.cancel() + self.active = False + self.stopListener() + self.unlockNimby() - def _closeEvents(self): - """Closes the /dev/input/event* files""" - log.debug("_closeEvents") - if self.fileObjList: - for fileObj in self.fileObjList: - try: - fileObj.close() - # pylint: disable=broad-except - except Exception: - pass - self.fileObjList = [] + @abstractmethod + def startListener(self): + """ start listening """ + + @abstractmethod + def stopListener(self): + """ stop listening """ + + @abstractmethod + def lockedInUse(self): + """Nimby State: Machine is in use, host is locked, + waiting for sufficient idle time""" + + @abstractmethod + def lockedIdle(self): + """Nimby State: Machine is idle, + waiting for sufficient idle time to unlock""" + + @abstractmethod + def unlockedIdle(self): + """Nimby State: Machine is idle, host is unlocked, + waiting for user activity""" + + @abstractmethod + def isNimbyActive(self): + """ Check if user is active + :return: boolean if events are logged and Nimby is active + """ + + +class NimbySelect(Nimby): + """ Nimby Linux """ + def startListener(self): + """ start listening """ + + def stopListener(self): + self.closeEvents() def lockedInUse(self): """Nimby State: Machine is in use, host is locked, waiting for sufficient idle time""" - log.debug("lockedInUse") - self._openEvents() + log.warning("lockedInUse") + self.openEvents() try: self.results = select.select(self.fileObjList, [], [], 5) # pylint: disable=broad-except - except Exception: - pass + except Exception as e: + log.warning(e) if self.active and self.results[0] == []: self.lockedIdle() elif self.active: - self._closeEvents() + self.closeEvents() + self.thread = threading.Timer(rqd.rqconstants.CHECK_INTERVAL_LOCKED, + self.lockedInUse) + self.thread.start() + + def unlockedIdle(self): + """Nimby State: Machine is idle, host is unlocked, + waiting for user activity""" + log.warning("UnlockedIdle Nimby") + while self.active and \ + self.results[0] == [] and \ + self.rqCore.machine.isNimbySafeToRunJobs(): + try: + self.openEvents() + self.results = select.select(self.fileObjList, [], [], 5) + # pylint: disable=broad-except + except Exception: + log.exception("failed to execute nimby check event") + if not self.rqCore.machine.isNimbySafeToRunJobs(): + log.warning("memory threshold has been exceeded, locking nimby") + self.active = True + + if self.active: + log.warning("Is active, locking Nimby") + self.closeEvents() + self.lockNimby() self.thread = threading.Timer(rqd.rqconstants.CHECK_INTERVAL_LOCKED, self.lockedInUse) self.thread.start() @@ -127,21 +212,99 @@ def lockedInUse(self): def lockedIdle(self): """Nimby State: Machine is idle, waiting for sufficient idle time to unlock""" - self._openEvents() + log.warning("lockedIdle") + self.openEvents() waitStartTime = time.time() try: self.results = select.select(self.fileObjList, [], [], rqd.rqconstants.MINIMUM_IDLE) # pylint: disable=broad-except - except Exception: - pass + except Exception as e: + log.warning(e) if self.active and self.results[0] == [] and \ - self.rqCore.machine.isNimbySafeToUnlock(): - self._closeEvents() + self.rqCore.machine.isNimbySafeToUnlock(): + self.closeEvents() self.unlockNimby(asOf=waitStartTime) self.unlockedIdle() elif self.active: - self._closeEvents() + self.closeEvents() + self.thread = threading.Timer(rqd.rqconstants.CHECK_INTERVAL_LOCKED, + self.lockedInUse) + self.thread.start() + + def openEvents(self): + """Opens the /dev/input/event* files so nimby can monitor them""" + self.closeEvents() + + rqd.rqutil.permissionsHigh() + try: + for device in os.listdir("/dev/input/"): + if device.startswith("event") or device.startswith("mice"): + try: + self.fileObjList.append(open("/dev/input/%s" % device, "rb")) + except IOError: + # Bad device found + log.exception("IOError: Failed to open /dev/input/%s", device) + finally: + rqd.rqutil.permissionsLow() + + def closeEvents(self): + """Closes the /dev/input/event* files""" + log.info("closeEvents") + if self.fileObjList: + for fileObj in self.fileObjList: + try: + fileObj.close() + # pylint: disable=broad-except + except Exception: + pass + self.fileObjList = [] + + def isNimbyActive(self): + """ Check if user is active + :return: boolean if events are logged and Nimby is active + """ + return self.active and self.results[0] == [] + + +class NimbyPynput(Nimby): + """ Nimby using pynput """ + def __init__(self, rqCore): + Nimby.__init__(self, rqCore) + + # pylint: disable=unused-import, import-error, import-outside-toplevel + import pynput + self.mouse_listener = pynput.mouse.Listener( + on_move=self.on_interaction, + on_click=self.on_interaction, + on_scroll=self.on_interaction) + self.keyboard_listener = pynput.keyboard.Listener(on_press=self.on_interaction) + + # pylint: disable=unused-argument + def on_interaction(self, *args): + """ interaction detected """ + self.interaction_detected = True + + def startListener(self): + """ start listening """ + self.mouse_listener.start() + self.keyboard_listener.start() + + def stopListener(self): + """ stop listening """ + self.mouse_listener.stop() + self.keyboard_listener.stop() + + def lockedInUse(self): + """Nimby State: Machine is in use, host is locked, + waiting for sufficient idle time""" + self.interaction_detected = False + + time.sleep(5) + if self.active and not self.interaction_detected: + self.lockedIdle() + elif self.active: + self.thread = threading.Timer(rqd.rqconstants.CHECK_INTERVAL_LOCKED, self.lockedInUse) self.thread.start() @@ -149,35 +312,74 @@ def lockedIdle(self): def unlockedIdle(self): """Nimby State: Machine is idle, host is unlocked, waiting for user activity""" + log.warning("unlockedIdle") while self.active and \ - self.results[0] == [] and \ - self.rqCore.machine.isNimbySafeToRunJobs(): - try: - self._openEvents() - self.results = select.select(self.fileObjList, [], [], 5) - # pylint: disable=broad-except - except Exception: - pass + not self.interaction_detected and \ + self.rqCore.machine.isNimbySafeToRunJobs(): + + time.sleep(5) + if not self.rqCore.machine.isNimbySafeToRunJobs(): log.warning("memory threshold has been exceeded, locking nimby") self.active = True if self.active: - self._closeEvents() + log.warning("Is active, lock Nimby") self.lockNimby() self.thread = threading.Timer(rqd.rqconstants.CHECK_INTERVAL_LOCKED, self.lockedInUse) self.thread.start() - def run(self): - """Starts the Nimby thread""" - self.active = True - self.unlockedIdle() + def lockedIdle(self): + """Nimby State: Machine is idle, + waiting for sufficient idle time to unlock""" + wait_start_time = time.time() - def stop(self): - """Stops the Nimby thread""" - if self.thread: - self.thread.cancel() - self.active = False - self._closeEvents() - self.unlockNimby() + time.sleep(rqd.rqconstants.MINIMUM_IDLE) + + if self.active and not self.interaction_detected and \ + self.rqCore.machine.isNimbySafeToUnlock(): + log.warning("Start wait time: %s", wait_start_time) + self.unlockNimby(asOf=wait_start_time) + self.unlockedIdle() + elif self.active: + + self.thread = threading.Timer(rqd.rqconstants.CHECK_INTERVAL_LOCKED, + self.lockedInUse) + self.thread.start() + + def isNimbyActive(self): + """ Check if user is active + :return: boolean if events are logged and Nimby is active + """ + return not self.active and self.interaction_detected + + +class NimbyNop(Nimby): + """Nimby option for when no option is available""" + def __init__(self, rqCore): + Nimby.__init__(self, rqCore) + self.warning_msg() + + @staticmethod + def warning_msg(): + """Just a helper to avoid duplication""" + log.warning("Using Nimby nop! Something went wrong on nimby's initialization.") + + def startListener(self): + self.warning_msg() + + def stopListener(self): + self.warning_msg() + + def lockedInUse(self): + self.warning_msg() + + def unlockedIdle(self): + self.warning_msg() + + def lockedIdle(self): + self.warning_msg() + + def isNimbyActive(self): + return False diff --git a/rqd/rqd/rqswap.py b/rqd/rqd/rqswap.py index eae7259dd..733750702 100644 --- a/rqd/rqd/rqswap.py +++ b/rqd/rqd/rqswap.py @@ -24,12 +24,13 @@ from builtins import str from builtins import range from builtins import object -import logging as log +import logging import re import threading import time +log = logging.getLogger(__name__) PGPGOUT_RE = re.compile(r"^pgpgout (\d+)") diff --git a/rqd/rqd/rqutil.py b/rqd/rqd/rqutil.py index 0253dcff0..da627a2d5 100644 --- a/rqd/rqd/rqutil.py +++ b/rqd/rqd/rqutil.py @@ -24,7 +24,7 @@ from builtins import str from builtins import object import functools -import logging as log +import logging import os import platform import socket @@ -40,6 +40,7 @@ PERMISSIONS = threading.Lock() HIGH_PERMISSION_GROUPS = os.getgroups() +log = logging.getLogger(__name__) class Memoize(object): diff --git a/rqd/setup.py b/rqd/setup.py index f3151326f..c1f7b9cc4 100644 --- a/rqd/setup.py +++ b/rqd/setup.py @@ -43,6 +43,11 @@ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', ], packages=find_packages(), entry_points={ diff --git a/rqd/tests/cuerqd_tests.py b/rqd/tests/cuerqd_tests.py index d1ef540a1..7fc574704 100644 --- a/rqd/tests/cuerqd_tests.py +++ b/rqd/tests/cuerqd_tests.py @@ -179,6 +179,7 @@ def test_launchFrame(self, stubMock, frameStubMock): runFrame.frame_id = "FD1S3I154O646UGSNN" runFrameRequest = rqd.compiled_proto.rqd_pb2.RqdStaticLaunchFrameRequest(run_frame=runFrame) rqdHost = rqd.cuerqd.RqdHost(RQD_HOSTNAME) + rqdHost.active = False rqdHost.launchFrame(runFrame) diff --git a/rqd/tests/rqconstants_tests.py b/rqd/tests/rqconstants_tests.py index 0df71790c..46da55f39 100644 --- a/rqd/tests/rqconstants_tests.py +++ b/rqd/tests/rqconstants_tests.py @@ -41,7 +41,6 @@ from .rqmachine_tests import ( CPUINFO, - CUDAINFO, LOADAVG_LOW_USAGE, MEMINFO_MODERATE_USAGE, PROC_STAT, @@ -78,7 +77,7 @@ def decorator(*args, **kwargs): return decorator -@mock.patch("subprocess.getoutput", new=mock.MagicMock(return_value=CUDAINFO)) +@mock.patch("subprocess.getoutput", new=mock.MagicMock(return_value="")) @mock.patch.object( rqd.rqutil.Memoize, "isCached", new=mock.MagicMock(return_value=False) ) diff --git a/rqd/tests/rqcore_tests.py b/rqd/tests/rqcore_tests.py index 90ef4618b..256b96b2b 100644 --- a/rqd/tests/rqcore_tests.py +++ b/rqd/tests/rqcore_tests.py @@ -40,7 +40,7 @@ class RqCoreTests(unittest.TestCase): - @mock.patch('rqd.rqnimby.Nimby', autospec=True) + @mock.patch('rqd.rqnimby.NimbySelect', autospec=True) @mock.patch('rqd.rqnetwork.Network', autospec=True) @mock.patch('rqd.rqmachine.Machine', autospec=True) def setUp(self, machineMock, networkMock, nimbyMock): @@ -206,7 +206,7 @@ def test_deleteFrame(self): def test_killAllFrame(self): frameAttendantThread = mock.MagicMock() - frameAttendantThread.isAlive.return_value = False + frameAttendantThread.is_alive.return_value = False frame1Id = 'frame1' frame2Id = 'frame2' frame3Id = 'frame3' @@ -230,7 +230,7 @@ def test_killAllFrame(self): def test_killAllFrameIgnoreNimby(self): frameAttendantThread = mock.MagicMock() - frameAttendantThread.isAlive.return_value = False + frameAttendantThread.is_alive.return_value = False frame1Id = 'frame1' frame2Id = 'frame2' frame1 = rqd.rqnetwork.RunningFrame( @@ -286,6 +286,7 @@ def test_launchFrame(self, frameThreadMock): self.machineMock.return_value.state = rqd.compiled_proto.host_pb2.UP self.nimbyMock.return_value.locked = False frame = rqd.compiled_proto.rqd_pb2.RunFrame(uid=22, num_cores=10) + rqd.rqconstants.OVERRIDE_NIMBY = None self.rqcore.launchFrame(frame) @@ -314,7 +315,7 @@ def test_launchFrameOnNimbyHost(self, frameThreadMock): frame = rqd.compiled_proto.rqd_pb2.RunFrame(uid=22, num_cores=10) frameIgnoreNimby = rqd.compiled_proto.rqd_pb2.RunFrame( uid=22, num_cores=10, ignore_nimby=True) - self.rqcore.nimby = mock.create_autospec(rqd.rqnimby.Nimby) + self.rqcore.nimby = mock.create_autospec(rqd.rqnimby.NimbySelect) self.rqcore.nimby.locked = True with self.assertRaises(rqd.rqexceptions.CoreReservationFailureException): @@ -331,6 +332,7 @@ def test_launchDuplicateFrame(self): frameId = 'arbitrary-frame-id' self.rqcore.storeFrame(frameId, rqd.compiled_proto.rqd_pb2.RunFrame(frame_id=frameId)) frameToLaunch = rqd.compiled_proto.rqd_pb2.RunFrame(frame_id=frameId) + rqd.rqconstants.OVERRIDE_NIMBY = None with self.assertRaises(rqd.rqexceptions.DuplicateFrameViolationException): self.rqcore.launchFrame(frameToLaunch) @@ -565,6 +567,7 @@ def setUp(self): @mock.patch('platform.system', new=mock.Mock(return_value='Linux')) @mock.patch('tempfile.gettempdir') + @mock.patch('rqd.rqcore.pipe_to_file', new=mock.MagicMock()) def test_runLinux(self, getTempDirMock, permsUser, timeMock, popenMock): # mkdirMock, openMock, # given currentTime = 1568070634.3 @@ -593,6 +596,7 @@ def test_runLinux(self, getTempDirMock, permsUser, timeMock, popenMock): # mkdir rqCore.machine.isDesktop.return_value = True rqCore.machine.getHostInfo.return_value = renderHost rqCore.nimby.locked = False + children = rqd.compiled_proto.report_pb2.ChildrenProcStats() runFrame = rqd.compiled_proto.rqd_pb2.RunFrame( frame_id=frameId, @@ -600,7 +604,8 @@ def test_runLinux(self, getTempDirMock, permsUser, timeMock, popenMock): # mkdir frame_name=frameName, uid=frameUid, user_name=frameUsername, - log_dir=logDir) + log_dir=logDir, + children=children) frameInfo = rqd.rqnetwork.RunningFrame(rqCore, runFrame) # when @@ -628,14 +633,12 @@ def test_runLinux(self, getTempDirMock, permsUser, timeMock, popenMock): # mkdir self.assertTrue(os.path.exists(logDir)) self.assertTrue(os.path.isfile(logFile)) _, kwargs = popenMock.call_args - self.assertEqual(logFile, kwargs['stdout'].name) - self.assertEqual(logFile, kwargs['stderr'].name) rqCore.network.reportRunningFrameCompletion.assert_called_with( rqd.compiled_proto.report_pb2.FrameCompleteReport( host=renderHost, frame=rqd.compiled_proto.report_pb2.RunningFrameInfo( - job_name=jobName, frame_id=frameId, frame_name=frameName), + job_name=jobName, frame_id=frameId, frame_name=frameName, children=children), exit_status=returnCode)) # TODO(bcipriano) Re-enable this test once Windows is supported. The main sticking point here @@ -665,6 +668,7 @@ def disabled__test_runWindows(self, permsUser, timeMock, popenMock): rqCore.machine.isDesktop.return_value = True rqCore.machine.getHostInfo.return_value = renderHost rqCore.nimby.locked = False + children = rqd.compiled_proto.report_pb2.ChildrenProcStats() runFrame = rqd.compiled_proto.rqd_pb2.RunFrame( frame_id=frameId, @@ -674,6 +678,7 @@ def disabled__test_runWindows(self, permsUser, timeMock, popenMock): uid=frameUid, user_name=frameUsername, log_dir=logDir, + children=children, environment={'CUE_IFRAME': '2000'}) frameInfo = rqd.rqnetwork.RunningFrame(rqCore, runFrame) @@ -693,7 +698,7 @@ def disabled__test_runWindows(self, permsUser, timeMock, popenMock): rqd.compiled_proto.report_pb2.FrameCompleteReport( host=renderHost, frame=rqd.compiled_proto.report_pb2.RunningFrameInfo( - job_name=jobName, frame_id=frameId, frame_name=frameName), + job_name=jobName, frame_id=frameId, frame_name=frameName, children=children), exit_status=returnCode)) @mock.patch('platform.system', new=mock.Mock(return_value='Darwin')) @@ -726,6 +731,7 @@ def test_runDarwin(self, getTempDirMock, permsUser, timeMock, popenMock): rqCore.machine.isDesktop.return_value = True rqCore.machine.getHostInfo.return_value = renderHost rqCore.nimby.locked = False + children = rqd.compiled_proto.report_pb2.ChildrenProcStats() runFrame = rqd.compiled_proto.rqd_pb2.RunFrame( frame_id=frameId, @@ -733,7 +739,8 @@ def test_runDarwin(self, getTempDirMock, permsUser, timeMock, popenMock): frame_name=frameName, uid=frameUid, user_name=frameUsername, - log_dir=logDir) + log_dir=logDir, + children=children) frameInfo = rqd.rqnetwork.RunningFrame(rqCore, runFrame) # when @@ -765,7 +772,7 @@ def test_runDarwin(self, getTempDirMock, permsUser, timeMock, popenMock): rqd.compiled_proto.report_pb2.FrameCompleteReport( host=renderHost, frame=rqd.compiled_proto.report_pb2.RunningFrameInfo( - job_name=jobName, frame_id=frameId, frame_name=frameName), + job_name=jobName, frame_id=frameId, frame_name=frameName, children=children), exit_status=returnCode)) diff --git a/rqd/tests/rqmachine_tests.py b/rqd/tests/rqmachine_tests.py index 7c7a650e1..1b1bdaf4a 100644 --- a/rqd/tests/rqmachine_tests.py +++ b/rqd/tests/rqmachine_tests.py @@ -148,22 +148,24 @@ softirq 10802040 0 3958368 410 1972314 394501 0 1 3631586 0 844860 ''' -PROC_PID_STAT = ('105 (time) S 7 105 105 0 -1 4210688 317 0 1 0 31 13 0 0 20 0 1 0 17385159 ' - '4460544 154 18446744073709551615 4194304 4204692 140725890735264 0 0 0 0 ' - '16781318 0 0 0 0 17 4 0 0 0 0 0 6303248 6304296 23932928 140725890743234 ' - '140725890743420 140725890743420 140725890744298 0') +PROC_STAT_SUFFIX = (' S 7 105 105 0 -1 4210688 317 0 1 0 31 13 0 0 20 0 1 0 17385159 ' + '4460544 154 18446744073709551615 4194304 4204692 140725890735264 0 0 0 0 ' + '16781318 0 0 0 0 17 4 0 0 0 0 0 6303248 6304296 23932928 140725890743234 ' + '140725890743420 140725890743420 140725890744298 0') +PROC_PID_STAT = '105 (time)' + PROC_STAT_SUFFIX +PROC_PID_STAT_WITH_SPACES = '105 (test space)' + PROC_STAT_SUFFIX +PROC_PID_STAT_WITH_BRACKETS = '105 (test) (brackets)' + PROC_STAT_SUFFIX -CUDAINFO = ' TotalMem 1023 Mb FreeMem 968 Mb' +PROC_PID_STATM = '152510 14585 7032 9343 0 65453 0' +PROC_PID_CMDLINE = ' sleep 20' -@mock.patch('subprocess.getoutput', new=mock.MagicMock(return_value=CUDAINFO)) @mock.patch.object(rqd.rqutil.Memoize, 'isCached', new=mock.MagicMock(return_value=False)) @mock.patch('platform.system', new=mock.MagicMock(return_value='Linux')) @mock.patch('os.statvfs', new=mock.MagicMock()) @mock.patch('rqd.rqutil.getHostname', new=mock.MagicMock(return_value='arbitrary-hostname')) class MachineTests(pyfakefs.fake_filesystem_unittest.TestCase): - @mock.patch('subprocess.getoutput', new=mock.MagicMock(return_value=CUDAINFO)) @mock.patch('os.statvfs', new=mock.MagicMock()) @mock.patch('platform.system', new=mock.MagicMock(return_value='Linux')) def setUp(self): @@ -174,7 +176,7 @@ def setUp(self): self.meminfo = self.fs.create_file('/proc/meminfo', contents=MEMINFO_MODERATE_USAGE) self.rqCore = mock.MagicMock(spec=rqd.rqcore.RqCore) - self.nimby = mock.MagicMock(spec=rqd.rqnimby.Nimby) + self.nimby = mock.MagicMock(spec=rqd.rqnimby.NimbySelect) self.rqCore.nimby = self.nimby self.nimby.active = False self.nimby.locked = False @@ -278,12 +280,13 @@ def test_isUserLoggedInWithNoDisplayOrProcess(self, processIterMock): self.assertFalse(self.machine.isUserLoggedIn()) - @mock.patch('time.time', new=mock.MagicMock(return_value=1570057887.61)) - def test_rssUpdate(self): + def _test_rssUpdate(self, proc_stat): rqd.rqconstants.SYS_HERTZ = 100 pid = 105 frameId = 'unused-frame-id' - self.fs.create_file('/proc/%d/stat' % pid, contents=PROC_PID_STAT) + self.fs.create_file('/proc/%d/stat' % pid, contents=proc_stat) + self.fs.create_file('/proc/%s/cmdline' % pid, contents=PROC_PID_CMDLINE) + self.fs.create_file('/proc/%s/statm' % pid, contents=PROC_PID_STATM) runningFrame = rqd.rqnetwork.RunningFrame(self.rqCore, rqd.compiled_proto.rqd_pb2.RunFrame()) runningFrame.pid = pid @@ -299,6 +302,18 @@ def test_rssUpdate(self): self.assertEqual(4356, updatedFrameInfo.vsize) self.assertAlmostEqual(0.034444696691, float(updatedFrameInfo.attributes['pcpu'])) + @mock.patch('time.time', new=mock.MagicMock(return_value=1570057887.61)) + def test_rssUpdate(self): + self._test_rssUpdate(PROC_PID_STAT) + + @mock.patch('time.time', new=mock.MagicMock(return_value=1570057887.61)) + def test_rssUpdateWithSpaces(self): + self._test_rssUpdate(PROC_PID_STAT_WITH_SPACES) + + @mock.patch('time.time', new=mock.MagicMock(return_value=1570057887.61)) + def test_rssUpdateWithBrackets(self): + self._test_rssUpdate(PROC_PID_STAT_WITH_BRACKETS) + @mock.patch.object( rqd.rqmachine.Machine, '_Machine__enabledHT', new=mock.MagicMock(return_value=False)) def test_getLoadAvg(self): @@ -308,6 +323,9 @@ def test_getLoadAvg(self): @mock.patch.object( rqd.rqmachine.Machine, '_Machine__enabledHT', new=mock.MagicMock(return_value=True)) + @mock.patch.object( + rqd.rqmachine.Machine, '_Machine__getHyperthreadingMultiplier', + new=mock.MagicMock(return_value=2)) def test_getLoadAvgHT(self): self.loadavg.set_contents(LOADAVG_HIGH_USAGE) @@ -318,29 +336,39 @@ def test_getBootTime(self): self.assertEqual(1569882758, self.machine.getBootTime()) - @mock.patch( - 'subprocess.getoutput', - new=mock.MagicMock(return_value=' TotalMem 1023 Mb FreeMem 968 Mb')) - def test_getGpuMemoryTotal(self): + def _resetGpuStat(self): if hasattr(self.machine, 'gpuNotSupported'): delattr(self.machine, 'gpuNotSupported') if hasattr(self.machine, 'gpuResults'): delattr(self.machine, 'gpuResults') - rqd.rqconstants.ALLOW_GPU = True - self.assertEqual(1048576, self.machine.getGpuMemoryTotal()) - - @mock.patch( - 'subprocess.getoutput', - new=mock.MagicMock(return_value=' TotalMem 1023 Mb FreeMem 968 Mb')) - def test_getGpuMemory(self): - if hasattr(self.machine, 'gpuNotSupported'): - delattr(self.machine, 'gpuNotSupported') - if hasattr(self.machine, 'gpuResults'): - delattr(self.machine, 'gpuResults') - rqd.rqconstants.ALLOW_GPU = True + @mock.patch.object( + rqd.rqconstants, 'ALLOW_GPU', new=mock.MagicMock(return_value=True)) + @mock.patch('subprocess.getoutput', + new=mock.MagicMock(return_value='16130 MiB, 16119 MiB, 1')) + def test_getGpuStat(self): + self._resetGpuStat() + self.assertEqual(1, self.machine.getGpuCount()) + self.assertEqual(16913531, self.machine.getGpuMemoryTotal()) + self.assertEqual(16901997, self.machine.getGpuMemoryFree()) - self.assertEqual(991232, self.machine.getGpuMemory()) + @mock.patch.object( + rqd.rqconstants, 'ALLOW_GPU', new=mock.MagicMock(return_value=True)) + @mock.patch('subprocess.getoutput', + new=mock.MagicMock(return_value="""\ +16130 MiB, 16103 MiB, 8 +16130 MiB, 16119 MiB, 8 +16130 MiB, 16119 MiB, 8 +16130 MiB, 16119 MiB, 8 +16130 MiB, 4200 MiB, 8 +16130 MiB, 16119 MiB, 8 +16130 MiB, 16119 MiB, 8 +16130 MiB, 16119 MiB, 8""")) + def test_multipleGpus(self): + self._resetGpuStat() + self.assertEqual(8, self.machine.getGpuCount()) + self.assertEqual(135308248, self.machine.getGpuMemoryTotal()) + self.assertEqual(122701222, self.machine.getGpuMemoryFree()) def test_getPathEnv(self): self.assertEqual( @@ -365,16 +393,12 @@ def test_reboot(self, popenMock): popenMock.assert_called_with(['/usr/bin/sudo', '/sbin/reboot', '-f']) - @mock.patch( - 'subprocess.getoutput', - new=mock.MagicMock(return_value=' TotalMem 1023 Mb FreeMem 968 Mb')) def test_getHostInfo(self): # pylint: disable=no-member hostInfo = self.machine.getHostInfo() self.assertEqual(4105212, hostInfo.free_swap) self.assertEqual(25699176, hostInfo.free_mem) - self.assertEqual('991232', hostInfo.attributes['freeGpu']) self.assertEqual('0', hostInfo.attributes['swapout']) self.assertEqual(25, hostInfo.load) self.assertEqual(False, hostInfo.nimby_enabled) @@ -422,18 +446,103 @@ def test_getBootReport(self): self.assertEqual(25699176, bootReport.host.free_mem) def test_reserveHT(self): + """ + Total 2 physical(ph) processors with 4 cores each with 2 threads each + step1 - taskset1: Reserve 3 cores (ph1) + step2 - taskset0: Reserve 4 cores (ph0) + step3 - Release cores on taskset0 + step4 - taskset3: Reserve 2 cores (ph0) + step5 - taskset4: 3 remaining, Reserve 3 cores (ph0+ph1) + step5 - taskset5: No more cores + """ cpuInfo = os.path.join(os.path.dirname(__file__), 'cpuinfo', '_cpuinfo_shark_ht_8-4-2-2') self.fs.add_real_file(cpuInfo) self.machine.testInitMachineStats(cpuInfo) - self.machine.setupHT() - tasksets = self.machine.reserveHT(300) + self.machine.setupTaskset() + + # ------------------------step1------------------------- + # phys_id 1 + # - core_id 0 + # - process_id 4 + # - process_id 12 + # - core_id 1 + # - process_id 5 + # - process_id 13 + # - core_id 3 + # - process_id 7 + # - process_id 15 + tasksets1 = self.machine.reserveHT(300) + # pylint: disable=no-member + self.assertItemsEqual(['4', '5', '7', '12', '13', '15'], sorted(tasksets1.split(','))) + + # ------------------------step2------------------------- + # phys_id 0 + # - core_id 0 + # - process_id 0 + # - process_id 8 + # - core_id 1 + # - process_id 1 + # - process_id 9 + # - core_id 2 + # - process_id 2 + # - process_id 10 + # - core_id 3 + # - process_id 3 + # - process_id 11 + tasksets0 = self.machine.reserveHT(400) + # pylint: disable=no-member + self.assertItemsEqual(['0', '1', '2', '3', '8', '9', '10', '11'], + sorted(tasksets0.split(','))) - self.assertEqual('0,8,1,9,2,10', tasksets) + # reserved cores got updated properly + # pylint: disable=no-member + self.assertItemsEqual([0, 1, 2, 3], self.coreDetail.reserved_cores[0].coreid) + + # Make sure tastsets don't overlap + self.assertTrue(set(tasksets0.split(',')).isdisjoint(tasksets1.split(','))) + + # ------------------------step3------------------------- + # Releasing a physcore shouldn't impact other physcores + self.machine.releaseHT(tasksets0) + # pylint: disable=no-member + self.assertTrue(1 in self.coreDetail.reserved_cores) + # pylint: disable=no-member + self.assertItemsEqual([0, 1, 3], self.coreDetail.reserved_cores[1].coreid) + + # ------------------------step4------------------------- + # phys_id 0 + # - core_id 0 + # - process_id 0 + # - process_id 8 + # - core_id 1 + # - process_id 1 + # - process_id 9 + tasksets3 = self.machine.reserveHT(200) + # pylint: disable=no-member + self.assertItemsEqual(['0', '1', '8', '9'], sorted(tasksets3.split(','))) + + # ------------------------step5------------------------- + # phys_id 0 + # - core_id 2 + # - process_id 2 + # - process_id 10 + # - core_id 3 + # - process_id 3 + # - process_id 11 + # phys_id 1 + # - core_id 2 + # - process_id 6 + # - process_id 14 + tasksets4 = self.machine.reserveHT(300) + # pylint: disable=no-member + self.assertItemsEqual(['2', '10', '3', '11', '6', '14'], sorted(tasksets4.split(','))) - self.machine.releaseHT(tasksets) + # ------------------------step6------------------------- + # No cores available + with self.assertRaises(rqd.rqexceptions.CoreReservationFailureException): + self.machine.reserveHT(300) - self.assertEqual({0, 1, 2, 3, 4, 5, 6, 7}, self.machine._Machine__tasksets) def test_tags(self): tags = ["test1", "test2", "test3"] diff --git a/rqd/tests/rqnimby_tests.py b/rqd/tests/rqnimby_tests.py index aa6846f44..04cf3e765 100644 --- a/rqd/tests/rqnimby_tests.py +++ b/rqd/tests/rqnimby_tests.py @@ -41,10 +41,10 @@ def setUp(self): self.rqMachine = mock.MagicMock(spec=rqd.rqmachine.Machine) self.rqCore = mock.MagicMock(spec=rqd.rqcore.RqCore) self.rqCore.machine = self.rqMachine - self.nimby = rqd.rqnimby.Nimby(self.rqCore) + self.nimby = rqd.rqnimby.NimbyFactory.getNimby(self.rqCore) self.nimby.daemon = True - @mock.patch.object(rqd.rqnimby.Nimby, 'unlockedIdle') + @mock.patch.object(rqd.rqnimby.NimbySelect, 'unlockedIdle') def test_initialState(self, unlockedIdleMock): self.nimby.daemon = True @@ -70,7 +70,7 @@ def test_unlockedIdle(self, timerMock): timerMock.return_value.start.assert_called() @mock.patch('select.select', new=mock.MagicMock(return_value=[[], [], []])) - @mock.patch.object(rqd.rqnimby.Nimby, 'unlockedIdle') + @mock.patch.object(rqd.rqnimby.NimbySelect, 'unlockedIdle') @mock.patch('threading.Timer') def test_lockedIdleWhenIdle(self, timerMock, unlockedIdleMock): self.nimby.active = True @@ -96,7 +96,7 @@ def test_lockedIdleWhenInUse(self, timerMock): timerMock.return_value.start.assert_called() @mock.patch('select.select', new=mock.MagicMock(return_value=[[], [], []])) - @mock.patch.object(rqd.rqnimby.Nimby, 'lockedIdle') + @mock.patch.object(rqd.rqnimby.NimbySelect, 'lockedIdle') @mock.patch('threading.Timer') def test_lockedInUseWhenIdle(self, timerMock, lockedIdleMock): self.nimby.active = True diff --git a/rqd/winps/setup.py b/rqd/winps/setup.py new file mode 100644 index 000000000..5db5c7ad4 --- /dev/null +++ b/rqd/winps/setup.py @@ -0,0 +1,14 @@ +from distutils.core import setup, Extension + +winps = Extension('winps', + define_macros = [('MAJOR_VERSION', '1'), + ('MINOR_VERSION', '0')], + include_dirs = [], + libraries = [], + library_dirs = [], + sources = ['winps.cpp']) + +setup (name = 'winps', + version = '1.0', + description = 'Windows ps for RQD', + ext_modules = [winps]) diff --git a/rqd/winps/winps.cpp b/rqd/winps/winps.cpp new file mode 100644 index 000000000..a7c058324 --- /dev/null +++ b/rqd/winps/winps.cpp @@ -0,0 +1,228 @@ +/* + * Copyright Contributors to the OpenCue Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define PY_SSIZE_T_CLEAN +#include + +#include +// TlHelp32.h should be included after Windows.h +#include +#include + +#include +#include +#include +#include + +namespace { + +// Process stat history +struct Snapshot { + uint64_t creationTimeInFiletime; + uint64_t totalTimeInFiletime; + uint64_t wallTimeInFiletime; + double pidPcpu; +}; +std::map history; + +// FILETIME -> uint64_t, in 100-nanosecond unit +uint64_t convertFiletime(const FILETIME& ft) { + union TimeUnion { + FILETIME ft; + ULARGE_INTEGER ul; + }; + TimeUnion tu; + tu.ft = ft; + return tu.ul.QuadPart; +} + +void traverse( + const std::map>& parentChildrenMap, + DWORD pid, + uint64_t& rss, + double& pcpu) { + HANDLE hProcess = + OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, false, pid); + if (hProcess != nullptr) { + // RSS + PROCESS_MEMORY_COUNTERS pmc; + if (GetProcessMemoryInfo(hProcess, &pmc, sizeof(pmc))) { + rss += pmc.WorkingSetSize; + } + + // %CPU + FILETIME creationTime; + FILETIME exitTime; + FILETIME kernelTime; + FILETIME userTime; + if (GetProcessTimes( + hProcess, &creationTime, &exitTime, &kernelTime, &userTime)) { + // Current time in FILETIME + FILETIME now; + GetSystemTimeAsFileTime(&now); + + // Process start time + uint64_t creationTimeInFiletime = convertFiletime(creationTime); + + // Total time of kernel and user mode on this process + uint64_t totalTimeInFiletime = + convertFiletime(kernelTime) + convertFiletime(userTime); + + // Walltime of this process + uint64_t wallTimeInFiletime = + convertFiletime(now) - creationTimeInFiletime; + + if (wallTimeInFiletime > 0) { + auto it = history.find(pid); + if (it != history.end() && + it->second.creationTimeInFiletime == creationTimeInFiletime) { + // Percent cpu using decaying average, 50% from 10 seconds ago, + // 50% from last 10 seconds: + const auto& last = it->second; + double pidPcpu = static_cast( + totalTimeInFiletime - last.totalTimeInFiletime) / + static_cast(wallTimeInFiletime - last.wallTimeInFiletime); + pcpu += (last.pidPcpu + pidPcpu) / 2.0; // %cpu + history[pid] = Snapshot{ + creationTimeInFiletime, + totalTimeInFiletime, + wallTimeInFiletime, + pidPcpu}; + } else { + double pidPcpu = static_cast(totalTimeInFiletime) / + static_cast(wallTimeInFiletime); + pcpu += pidPcpu; + + history[pid] = Snapshot{ + creationTimeInFiletime, + totalTimeInFiletime, + wallTimeInFiletime, + pidPcpu}; + } + } + } + } + + const auto it = parentChildrenMap.find(pid); + if (it != parentChildrenMap.end()) { + for (const auto childPid : it->second) { + traverse(parentChildrenMap, childPid, rss, pcpu); + } + } +} + +PyObject* winpsUpdate(PyObject* self, PyObject* args) { + /* + * :param list pids: a list of pid(int) to look into + * :return: RSS and %CPU dict, or None if invalid inputs or error occurred + * :rtype: dict (key=pid, value={rss:uint64_t, pcpu:double}) + */ + PyObject* list; + if (!PyArg_ParseTuple(args, "O!", &PyList_Type, &list)) { + return nullptr; + } + + // Take a snapshot of all processes and create parent-children process map + std::map> parentChildrenMap; + HANDLE snapshotHandle = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0); + PROCESSENTRY32 processEntry = {}; + processEntry.dwSize = sizeof(PROCESSENTRY32); + if (Process32First(snapshotHandle, &processEntry)) { + do { + if (processEntry.th32ProcessID != 0) { + parentChildrenMap[processEntry.th32ParentProcessID].push_back( + processEntry.th32ProcessID); + } + } while (Process32Next(snapshotHandle, &processEntry)); + } + CloseHandle(snapshotHandle); + + // output = {} + PyObject* output = PyDict_New(); + if (output == nullptr) { + return nullptr; + } + + // Iterate the pids list + Py_ssize_t listSize = PyList_Size(list); + for (Py_ssize_t i = 0; i < listSize; i++) { + PyObject* pidObject = PyList_GetItem(list, i); + if (pidObject == nullptr) { + return nullptr; + } + DWORD pid = PyLong_AsUnsignedLong(pidObject); + if (PyErr_Occurred()) { + return nullptr; + } + + // Traverse process tree to add up RSS and %CPU from the pid + uint64_t rss = 0; + double pcpu = 0; + traverse(parentChildrenMap, pid, rss, pcpu); + + // stat = {} + PyObject* stat = PyDict_New(); + if (stat == nullptr) { + return nullptr; + } + + // stat["rss"] = rss + PyObject* rssObject = PyLong_FromUnsignedLongLong(rss); + if (rssObject == nullptr) { + return nullptr; + } + if (PyDict_SetItemString(stat, "rss", rssObject) != 0) { + return nullptr; + } + + // stat["pcpu"] = pcpu + PyObject* pcpuObject = PyFloat_FromDouble(pcpu); + if (pcpuObject == nullptr) { + return nullptr; + } + if (PyDict_SetItemString(stat, "pcpu", pcpuObject) != 0) { + return nullptr; + } + + // output[pid] = stat + if (PyDict_SetItem(output, pidObject, stat) != 0) { + return nullptr; + } + } + + return output; +} + +PyMethodDef winpsMethods[] = { + {"update", + winpsUpdate, + METH_VARARGS, + "Updates internal state and returns rss and pcpu"}, + {NULL, NULL, 0, NULL}}; + +PyModuleDef winpsModule = { + PyModuleDef_HEAD_INIT, + "winps", + nullptr, + -1, + winpsMethods, +}; + +} // namespace + +PyMODINIT_FUNC PyInit_winps() { + return PyModule_Create(&winpsModule); +} diff --git a/samples/outline-files/hello.outline b/samples/outline-files/hello.outline deleted file mode 100644 index 6288ca30c..000000000 --- a/samples/outline-files/hello.outline +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python - -from outline.modules.shell import Shell - -Shell("hello", - command="echo 'Hello World!'") diff --git a/samples/outline-files/hello_frame.outline b/samples/outline-files/hello_frame.outline deleted file mode 100644 index c060719e1..000000000 --- a/samples/outline-files/hello_frame.outline +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python - -from outline.modules.shell import Shell - -Shell("hello", - command="echo 'Hello frame %{FRAME}!'") diff --git a/samples/pycue/wait_for_job.py b/samples/pycue/wait_for_job.py new file mode 100755 index 000000000..d66de1a44 --- /dev/null +++ b/samples/pycue/wait_for_job.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# Copyright Contributors to the OpenCue Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic script that waits for a job to complete.""" + +import argparse +import datetime +import logging +import sys +import time + +import opencue +from opencue.wrappers.job import Job + + +def wait_for_job(job_name, timeout_sec=None): + logging.basicConfig( + level=logging.INFO, + format='%(asctime)s %(levelname)s %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + logging.info('Waiting for job %s...', job_name) + start_time = datetime.datetime.now() + while True: + if (datetime.datetime.now() - start_time).seconds > timeout_sec: + logging.error('Timed out') + return False + jobs = opencue.api.getJobs(job=[job_name], include_finished=True) + if not jobs: + logging.error("Job %s not found", job_name) + return False + job = jobs[0] + logging.info('Job state = %s', Job.JobState(job.state()).name) + if job.state() == Job.JobState.FINISHED: + logging.info('Job succeeded') + return True + if job.deadFrames() > 0: + logging.error('Job is failing') + return False + time.sleep(5) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("job_name", help="name of the job to wait for") + parser.add_argument("--timeout", help="number of seconds to wait before timing out", type=int) + args = parser.parse_args() + result = wait_for_job(args.job_name, timeout_sec=args.timeout) + if not result: + sys.exit(1) diff --git a/samples/outline-modules/hellomodule.py b/samples/pyoutline/basic_job.py old mode 100644 new mode 100755 similarity index 56% rename from samples/outline-modules/hellomodule.py rename to samples/pyoutline/basic_job.py index 2e8f09ae1..7cae6eb19 --- a/samples/outline-modules/hellomodule.py +++ b/samples/pyoutline/basic_job.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - # Copyright Contributors to the OpenCue Project # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -14,11 +13,21 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Basic job structure with a single layer and five frames. + +The frames just print out the current frame number.""" + + +import getpass -from outline import Outline, cuerun -from outline.modules.tutorial import HelloModule +import outline +import outline.cuerun +import outline.modules.shell -ol = Outline("my_job") -ol.add_layer(HelloModule("my_layer")) -cuerun.launch(ol, range="1-10", pause=True) +ol = outline.Outline( + 'basic_job', shot='shot01', show='testing', user=getpass.getuser()) +layer = outline.modules.shell.Shell( + 'echo_frame', command=['echo', '#IFRAME#'], chunk=1, threads=1, range='1-5') +ol.add_layer(layer) +outline.cuerun.launch(ol, use_pycuerun=False) diff --git a/samples/outline-modules/example.py b/samples/pyoutline/example_module.py similarity index 100% rename from samples/outline-modules/example.py rename to samples/pyoutline/example_module.py diff --git a/samples/rqd/blender/Dockerfile b/samples/rqd/blender/Dockerfile new file mode 100644 index 000000000..bb2235613 --- /dev/null +++ b/samples/rqd/blender/Dockerfile @@ -0,0 +1,40 @@ +# Builds on the latest base image of RQD from Docker Hub +FROM opencue/rqd + +# Install dependencies to run Blender on the opencue/rqd image +RUN yum -y update +RUN yum -y install \ + bzip2 \ + libfreetype6 \ + libgl1-mesa-dev \ + libXi-devel \ + mesa-libGLU-devel \ + zlib-devel \ + libXinerama-devel \ + libXrandr-devel + +# Set Blender install directory +ARG BLENDER_INSTALL_DIR=/usr/local/blender + +# Set Blender download source +ARG BLENDER_DOWNLOAD_SRC=https://download.blender.org/release/Blender3.3/blender-3.3.3-linux-x64.tar.xz + +# Download and install Blender +RUN mkdir ${BLENDER_INSTALL_DIR} +RUN curl -SL ${BLENDER_DOWNLOAD_SRC} \ + -o blender.tar.xz + +RUN tar -xvf blender.tar.xz \ + -C ${BLENDER_INSTALL_DIR} \ + --strip-components=1 + +RUN rm blender.tar.xz + +# Add Blender path as environment variable +ENV PATH=$PATH:${BLENDER_INSTALL_DIR} + +# Allows RQD to read Blender install directory in PATH env. variable +RUN echo "RQD_USE_PATH_ENV_VAR=true" >> /etc/opencue/rqd.conf + +# Verify Blender installation +RUN blender --version diff --git a/samples/rqd/blender/blender2.79-docker/Dockerfile b/samples/rqd/blender/blender2.79-docker/Dockerfile deleted file mode 100644 index 2c1b908f7..000000000 --- a/samples/rqd/blender/blender2.79-docker/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -# Builds on the latest base image of RQD from Docker Hub -FROM opencue/rqd - -# Install dependencies to run Blender on the opencue/rqd image -RUN yum -y update -RUN yum -y install \ - bzip2 \ - libfreetype6 \ - libgl1-mesa-dev \ - libXi-devel \ - mesa-libGLU-devel \ - zlib-devel \ - libXinerama-devel \ - libXrandr-devel - -# Download and install Blender 2.79 -RUN mkdir /usr/local/blender -RUN curl -SL https://download.blender.org/release/Blender2.79/blender-2.79-linux-glibc219-x86_64.tar.bz2 \ - -o blender.tar.bz2 - -RUN tar -jxvf blender.tar.bz2 \ - -C /usr/local/blender \ - --strip-components=1 - -RUN rm blender.tar.bz2 diff --git a/sandbox/README.md b/sandbox/README.md index 8fcc71fa8..e2cadb902 100644 --- a/sandbox/README.md +++ b/sandbox/README.md @@ -1,31 +1,16 @@ # OpenCue sandbox environment -The sandbox environment provides a way to run a test OpenCue deployment. You -can use the sandbox environment to run small tests or development work. The sandbox -environment runs OpenCue components in separate Docker containers on your local -machine. +The sandbox environment provides a way to run a test OpenCue deployment. You can use the sandbox +environment to run small tests or development work. The sandbox environment runs OpenCue components +in separate Docker containers on your local machine. To learn how to run the sandbox environment, see https://www.opencue.io/docs/quick-starts/. ## Monitoring -To get started with monitoring there is also an additional docker-compose which sets up +To get started with monitoring there is also an additional Docker compose file which sets up monitoring for key services. -This can be started from the OpenCue root directory with: -```bash -docker-compose --project-directory . -f sandbox/docker-compose.yml -f sandbox/docker-compose.monitoring.yml up -``` - -Spins up a monitoring stack - -http://localhost:3000/ - -login: admin -pass: admin - -### Loki logging - -Too use loki to store logs requires installing the docker drivers. see: -https://grafana.com/docs/loki/latest/clients/docker-driver/ \ No newline at end of file +To learn how to run the sandbox environment with monitoring, +see https://www.opencue.io/docs/other-guides/monitoring-with-prometheus-loki-and-grafana/. diff --git a/sandbox/docker-compose.monitoring.yml b/sandbox/docker-compose.monitoring.yml index 3f6e451f3..2b5ab78ee 100644 --- a/sandbox/docker-compose.monitoring.yml +++ b/sandbox/docker-compose.monitoring.yml @@ -13,7 +13,7 @@ services: environment: - DATA_SOURCE_URI=db:5432/postgres?sslmode=disable - DATA_SOURCE_USER=cuebot - - DATA_SOURCE_PASS=$POSTGRES_PASSWORD + - DATA_SOURCE_PASS=cuebot_password - PG_EXPORTER_AUTO_DISCOVER_DATABASES=true ports: - 9187:9187 diff --git a/sandbox/flyway.Dockerfile b/sandbox/flyway.Dockerfile index a7c6ebf45..fffcbd3c2 100644 --- a/sandbox/flyway.Dockerfile +++ b/sandbox/flyway.Dockerfile @@ -1,18 +1,20 @@ -FROM centos +FROM almalinux:8.7 + +ARG FLYWAY_VERSION=9.11.0 # Get flyway -RUN ["curl", "-O", "https://repo1.maven.org/maven2/org/flywaydb/flyway-commandline/6.0.0/flyway-commandline-6.0.0-linux-x64.tar.gz"] -RUN ["yum", "install", "-y", "tar", "java-1.8.0-openjdk", "postgresql-jdbc", "nc", "postgresql"] -RUN ["tar", "-xzf", "flyway-commandline-6.0.0-linux-x64.tar.gz"] +RUN yum install -y tar java-1.8.0-openjdk postgresql-jdbc nc postgresql +RUN curl -O https://repo1.maven.org/maven2/org/flywaydb/flyway-commandline/${FLYWAY_VERSION}/flyway-commandline-${FLYWAY_VERSION}.tar.gz +RUN tar -xzf flyway-commandline-${FLYWAY_VERSION}.tar.gz -WORKDIR flyway-6.0.0 +WORKDIR flyway-${FLYWAY_VERSION} # Copy the postgres driver to its required location -RUN ["cp", "/usr/share/java/postgresql-jdbc.jar", "jars/"] -RUN ["mkdir", "/opt/migrations"] -RUN ["mkdir", "/opt/scripts"] +RUN cp /usr/share/java/postgresql-jdbc.jar jars/ +RUN mkdir /opt/migrations +RUN mkdir /opt/scripts COPY ./cuebot/src/main/resources/conf/ddl/postgres/migrations /opt/migrations -COPY ./cuebot/src/main/resources/conf/ddl/postgres/demo_data.sql /opt/scripts +COPY ./cuebot/src/main/resources/conf/ddl/postgres/seed_data.sql /opt/scripts COPY ./sandbox/migrate.sh /opt/scripts/ CMD ["/bin/bash"] diff --git a/sandbox/get-latest-release-tag.sh b/sandbox/get-latest-release-tag.sh new file mode 100755 index 000000000..0aa552efa --- /dev/null +++ b/sandbox/get-latest-release-tag.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +# Script for fetching the latest release version of OpenCue. +# - `curl` fetches all of the metadata for the latest release, in JSON format. +# - `grep` filters for just the `"tag_name": "v1.2.3"` line. +# - `cut` extracts the `v1.2.3` value from the `tag_name` line. +# - `tr` removes the `v` to leave us with the final version number e.g. `1.2.3`. + +curl -s https://api.github.com/repos/AcademySoftwareFoundation/OpenCue/releases/latest \ + | grep tag_name \ + | cut -d \" -f 4 \ + | tr -d v diff --git a/sandbox/install-client-archives.sh b/sandbox/install-client-archives.sh index 66dfe31f1..be82dd76b 100755 --- a/sandbox/install-client-archives.sh +++ b/sandbox/install-client-archives.sh @@ -38,6 +38,16 @@ for PACKAGE in "${CLIENT_PACKAGES[@]}"; do pip install -r ${REQUIREMENTS_GUI} fi cd ${PACKAGE}-${VERSION}-all + + # remove *.pyc files and __pycache__ folders contained on + # -all.tar.gz. As these files might be generated from + # a different operating system and/or python version than current host has + # `python setup.py install` may raise a `ValueError: bad marshal data` error. + # Removing these files before invoking `setup.py` prevent this error. + # NOTE: Temporary solution until pip distribution is ready. + find . -path '*/__pycache__*' -delete + find . -name '*.pyc' -type f -delete + python setup.py install cd .. done diff --git a/sandbox/install-client-sources.sh b/sandbox/install-client-sources.sh index 1030f492b..7e15ed018 100755 --- a/sandbox/install-client-sources.sh +++ b/sandbox/install-client-sources.sh @@ -4,14 +4,13 @@ set -e pip install -r requirements.txt -r requirements_gui.txt -# Compile the proto used to communicate with the Cuebot server +# Compile the proto used to communicate with the Cuebot server. cd proto python -m grpc_tools.protoc -I=. \ --python_out=../pycue/opencue/compiled_proto \ --grpc_python_out=../pycue/opencue/compiled_proto ./*.proto cd .. +2to3 -wn -f import pycue/opencue/compiled_proto/*_pb2*.py -# Install the OpenCue client packages -# You also need to set the OL_CONFIG environment variable -# to pyoutline/etc/outline.cfg to run Cuesubmit -pip install pycue/ pyoutline/ cuesubmit/ cuegui/ cueadmin/ +# Install all client packages. +pip install pycue/ pyoutline/ cueadmin/ cuesubmit/ cuegui/ diff --git a/sandbox/migrate.sh b/sandbox/migrate.sh index 57892b606..b7af1cdfd 100755 --- a/sandbox/migrate.sh +++ b/sandbox/migrate.sh @@ -1,5 +1,6 @@ #!/bin/bash +set -e until nc --send-only $PGHOST $PGPORT < /dev/null do @@ -13,10 +14,10 @@ do sleep 2 done -# Apply the flyway database migrations. +echo "Applying database migrations..." ./flyway migrate -user=${PGUSER} -password=${PGPASSWORD} -url="jdbc:postgresql://${PGHOST}:${PGPORT}/${PGDATABASE}" -locations='filesystem:/opt/migrations' # Check if a show exists, if not apply demo data if psql -c "select 1 from show"|grep "(0 rows)"; then - psql -a -f /opt/scripts/demo_data.sql + psql -a -f /opt/scripts/seed_data.sql fi diff --git a/sandbox/setup-database.sh b/sandbox/setup-database.sh index 56761a61f..50383ba95 100755 --- a/sandbox/setup-database.sh +++ b/sandbox/setup-database.sh @@ -149,7 +149,7 @@ read -n 1 -p "Select mode of population: " POPULATING_OPT if [[ $POPULATING_OPT -eq 1 ]] then wget ${BASE_URL}"${VERSION}"/schema-"${VERSION}".sql -P ./db-data/ - wget ${BASE_URL}"${VERSION}"/demo_data-"${VERSION}".sql -P ./db-data/ + wget ${BASE_URL}"${VERSION}"/seed_data-"${VERSION}".sql -P ./db-data/ echo "" echo "Populating the database schema and some initial data" @@ -164,7 +164,7 @@ elif [[ $POPULATING_OPT -eq 2 ]] then brew install flyway || flyway -url=jdbc:postgresql://$DB_HOST/$DB_NAME -user="$USER" -n -locations=filesystem:/cuebot/src/main/resources/conf/ddl/postgres/migrations migrate - psql -h $DB_HOST -f /cuebot/src/main/resources/conf/ddl/postgres/demo_data.sql $DB_NAME + psql -h $DB_HOST -f /cuebot/src/main/resources/conf/ddl/postgres/seed_data.sql $DB_NAME else echo "" echo "Invalid option!" diff --git a/tsc/meetings/2021-02-17.md b/tsc/meetings/2021-02-17.md new file mode 100644 index 000000000..ee95b60a1 --- /dev/null +++ b/tsc/meetings/2021-02-17.md @@ -0,0 +1,109 @@ +# OpenCue TSC Meeting Notes 17 Feb 2021 + +Secretary: Brian Cipriano + +TSC Attendees: + +* [ ] Matt Chambers +* [x] Brian Cipriano +* [ ] Greg Denton +* [x] Ben Dines +* [ ] Dave Fellows +* [x] Daniel Neilson +* [ ] Alex Schworer +* [ ] Erik Strauss +* [x] Lars van der Bijl + +Committer Attendees: + +* [ ] Christian Smith +* [x] Diego Tavares +* [x] Idris Miles + +Agenda/Notes: + +* Goals for 2021 + * User survey + * Responses: https://www.surveymonkey.com/results/SM-8DS6VQ9K9/ + * New user UX + * Split demo_data.sql + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/891 + * Progress: https://github.com/AcademySoftwareFoundation/OpenCue/pull/910 + * Maybe just a rename for now? seed_data.sql + * Could add explanation of demo data and what each piece means + * Keep using Docker compose? + * Look at survey responses to see if this is a useful tool still. + * improvements + * move to toplevel? + * Review Daniel's writeup for new action items + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/905 + * Document config options for RQD and Cuebot + * Make RQD gRPC port configurable (done) + * Rename/split demo_data.sql (in progress) + * Document how facilities/tags/allocations work + * This is in glossary, but deserves a dedicated doc page, perhaps a reference on how + an OpenCue deployment might be structured (also addresses point 6 in Daniel's + writeup) + * Could add explanation of demo data and what each piece means + * Clean hardcoded facilities from RQD source (done) + * Clean SPI-specific environment variables from RQD source, or convert them into fully + supported, documented ones + * Why does RQD limit facility names to three chars? + * Fix cuebot:latest tag for sandbox setup + * Published new release which will fix the problem for now. + * Need to address in more detail soon. + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Pro: master branch in repo will always match :latest tag in Docker Hub + * Pro: following deployment guide using combo of source checkout + docker images + will work fine + * Con: following deployment guide using combo of release artifacts + docker images + means images will be ahead of release artifacts (because guide assumes :latest tag + for images) + * This should be ok most of the time, as changes tend to be + backwards-compatible (e.g. old schema versions still exist in the system) + * We could change layout of deployment guide, split into two options: + * Deploy from a release (uses release artifacts and docker images + with : tag) + * Deploy from master (uses source code and docker images with :latest tag) + * This would also help simplify the docs, there are too many options for + deploying each component which is confusing for new users. + * podman issue + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/898 + * This appears to be related to some Dockerfile syntax we're using which podman doesn't + support. + * Auto close github issues + * Docs refresh + * API reference + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/559 + * We are able to generate HTML, we just need to publish it somewhere + * No other progress yet. + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * Configuration guide for Cuebot + * Configuration guide for RQD + * dedicated doc page, a reference on how an OpenCue deployment might/should be structured, + with examples and pointers to seed data + * UX expert we can contact? + * CII badge updates + * Linting done. Brian working on resolving some additional SonarCloud issues. + * Brian to go through CII checklist and start to fill in more details. + * Drop Oracle support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/840 + * PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/pull/895 + * GPU support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * Starting PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * GUI to add new shows + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/892 + * Some discussion but coding has not started + * Expand DCC plugins + * Houdini? + * Katana, needs followup + * Logging solution + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/880 + * PR approved, needs to resolve some failing checks. + * CSP Terraform docs + * No progress yet. diff --git a/tsc/meetings/2021-03-03.md b/tsc/meetings/2021-03-03.md new file mode 100644 index 000000000..ad6b4e44e --- /dev/null +++ b/tsc/meetings/2021-03-03.md @@ -0,0 +1,91 @@ +# OpenCue TSC Meeting Notes 3 Mar 2021 + +Secretary: Brian Cipriano + +TSC Attendees: + +* [ ] Matt Chambers +* [x] Brian Cipriano +* [ ] Greg Denton +* [x] Ben Dines +* [ ] Dave Fellows +* [x] Daniel Neilson +* [ ] Alex Schworer +* [ ] Erik Strauss +* [x] Lars van der Bijl + +Committer Attendees: + +* [ ] Christian Smith +* [x] Diego Tavares +* [x] Idris Miles + +Agenda/Notes: + +* Goals for 2021 + * User survey + * ASWF Graduation + * Brian working through CII checklist, should be completed soon. + * Brian also preparing proposal and presentation for next TAC meeting, will be circulated + soon. + * Quality gate failing due to test coverage, this should not block graduation but requires + some ongoing work to resolve ASAP. + * New user UX + * Rename demo_data.sql + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/891 + * Progress: https://github.com/AcademySoftwareFoundation/OpenCue/pull/910 + * No new updates. + * Review Daniel's writeup for new action items + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/905 + * See takeaways from last meeting. + * Brian to file bugs for each of these. + * Fix cuebot:latest tag for sandbox setup + * Published new release which will fix the problem for now. + * Need to address in more detail soon. + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * See pros/cons from last meeting. + * No progress implementing this yet. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/559 + * We are able to generate HTML, we just need to publish it somewhere + * No other progress yet. + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for Cuebot + * No progress. + * Configuration guide for RQD + * No progress. + * Drop Oracle support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/840 + * PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/pull/895 + * Ready for review? Yes + * GPU support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * Starting PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * New PR: https://github.com/AcademySoftwareFoundation/OpenCue/pull/924 + * Ready for review + * GUI to add new shows + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/892 + * Coding done, PR approved and ready to merge. + * Expand DCC plugins + * No progress. + * Logging solution + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/880 + * PR approved and merged. + * Blog post on this topic? + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * No progress. +* Other current work updates + * SPI several bug fixes, PRs coming soon, mostly GUI issues + * A few FRs to be filed as issues and discussed soon diff --git a/tsc/meetings/2021-03-31.md b/tsc/meetings/2021-03-31.md new file mode 100644 index 000000000..37829c441 --- /dev/null +++ b/tsc/meetings/2021-03-31.md @@ -0,0 +1,41 @@ +# OpenCue TSC Meeting Notes 31 Mar 2021 + +Secretary: Brian Cipriano + +TSC Attendees: + +* [ ] Matt Chambers +* [x] Brian Cipriano +* [x] Greg Denton +* [x] Ben Dines +* [ ] Dave Fellows +* [x] Daniel Neilson +* [ ] Alex Schworer +* [ ] Erik Strauss +* [ ] Lars van der Bijl + +Committer Attendees: + +* [ ] Christian Smith +* [x] Diego Tavares +* [ ] Idris Miles + +Agenda/Notes: + +* Goals for 2021 + * User survey + * ASWF Graduation + * CII badge complete + * Proposal submitted + * Waiting on TAC for consideration + * New user UX + * Brian planning to start on Python distribution updates, publish to PyPI + * Docs refresh + * API reference + * Let's publish instructions for doing local build for now +* Other current work updates? + * SPI merged with OSS version, CueGUI and other bug fixes incoming +* User survey impressions + * PR/outreach, highlight new features/integrations + * Could make videos e.g. demonstrating CueGUI + * More windows support/docs diff --git a/tsc/meetings/2021-04-14.md b/tsc/meetings/2021-04-14.md new file mode 100644 index 000000000..48fa86ac5 --- /dev/null +++ b/tsc/meetings/2021-04-14.md @@ -0,0 +1,96 @@ +# OpenCue TSC Meeting Notes 14 Apr 2021 + +Secretary: Brian Cipriano + +TSC Attendees: + +* [ ] Matt Chambers +* [x] Brian Cipriano +* [x] Greg Denton +* [x] Ben Dines +* [ ] Dave Fellows +* [x] Daniel Neilson +* [ ] Alex Schworer +* [ ] Erik Strauss +* [x] Lars van der Bijl + +Committer Attendees: + +* [ ] Christian Smith +* [x] Diego Tavares +* [x] Idris Miles + +Agenda/Notes: + +* Goals for 2021 + * ~~User survey~~ + * ASWF Graduation + * Waiting on TAC to discuss. OpenCue deep-dive session scheduled for next week's meeting, + was bumped from last week due to guest speakers. + * New user UX + * Rename demo_data.sql + * Main code change done. + * Docs updated. + * Still todo: updating release pipeline to publish seed_data artifact. + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Reviewed design doc, no issues, let's proceed. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: two PRs out to clean up this process, then we can circulate the doc page on + this: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for Cuebot + * No progress. + * Configuration guide for RQD + * No progress. + * PyPI design doc may cover this, design includes a review of all Python components' + config systems and requires documentation of each of them before publishing to PyPI. + * Drop Oracle support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/840 + * PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/pull/895 + * Ready to merge, needs rebase. + * GPU support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * Starting PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * In review, troubleshooting test failures. + * ~~GUI to add new shows~~ + * ~~Done.~~ + * Expand DCC plugins + * No progress. + * Logging solution + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/880 + * PR approved and merged. + * Proposal: write blog post on this topic + * Proposal accepted + * No progress on this yet + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * No progress. +* Other current work updates? + * SPI grpc fork bug + * Seeing errors + like `Warn: Fork support is only compatible with the epoll1 and poll polling strategies` + * Fix: Set `os.environ["GRPC_POLL_STRATEGY"] = "epoll1"` + * Lars working on some docker compose updates. diff --git a/tsc/meetings/2021-04-28.md b/tsc/meetings/2021-04-28.md new file mode 100644 index 000000000..552c4151c --- /dev/null +++ b/tsc/meetings/2021-04-28.md @@ -0,0 +1,104 @@ +# OpenCue TSC Meeting Notes 28 Apr 2021 + +Secretary: Brian Cipriano + +TSC Attendees: + +* [ ] Matt Chambers +* [x] Brian Cipriano +* [x] Greg Denton +* [x] Ben Dines +* [ ] Dave Fellows +* [x] Daniel Neilson +* [ ] Alex Schworer +* [ ] Erik Strauss +* [x] Lars van der Bijl + +Committer Attendees: + +* [ ] Christian Smith +* [x] Diego Tavares +* [x] Idris Miles + +Agenda/Notes: + +* Goals for 2021 + * ~~User survey~~ + * ASWF Graduation + * Approved by TAC, approved by board. Done! + * New user UX + * Rename demo_data.sql + * Main code change done. + * Docs updated. + * Still todo: updating release pipeline to publish seed_data artifact. + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Brian: this happened again, sent update to Quickstarts to instruct user to build local + Cuebot to work around this for now. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for Cuebot + * In progress, will be done as part of PyPI work. + * Configuration guide for RQD + * In progress, will be done as part of PyPI work. + * ~~Drop Oracle support~~ + * GPU support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * Starting PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * In review, troubleshooting test failures. + * ~~GUI to add new shows~~ + * Expand DCC plugins + * No progress. + * Logging solution + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/880 + * PR approved and merged. + * Proposal: write blog post on this topic + * Proposal accepted + * No progress on this yet + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * No progress. + * Improve booking logic + * Wasting resources while hostreport and booking threadpools fight for resources, work needs + to be rebooked + * Let's dig up the work that Matt Chambers was working on a while ago + * RQD needs to handle dynamic pool of cuebot hosts better + * Let's file an issue and continue discussion there +* Other current work updates? + * Brian + * Xvfb no longer supports Python 2 + * Issue: https://github.com/AcademySoftwareFoundation/OpenCue/issues/964 + * PR to disable CueGUI Python 2 + tests: https://github.com/AcademySoftwareFoundation/OpenCue/pull/965 + * Any other way we can work around this? Or do we just have to accept this is going to + happen gradually? Consensus: seems like the latter. + * Remove localbook? + * Still a useful feature, though it needs some fixing up. + * Let's keep it around for now, improve if needed. + diff --git a/tsc/meetings/2021-05-12.md b/tsc/meetings/2021-05-12.md new file mode 100644 index 000000000..71c4c3035 --- /dev/null +++ b/tsc/meetings/2021-05-12.md @@ -0,0 +1,94 @@ +# OpenCue TSC Meeting Notes 12 May 2021 + +Secretary: Brian Cipriano + +TSC Attendees: + +* [ ] Matt Chambers +* [x] Brian Cipriano +* [x] Greg Denton +* [x] Ben Dines +* [ ] Dave Fellows +* [x] Daniel Neilson +* [ ] Alex Schworer +* [ ] Erik Strauss +* [ ] Lars van der Bijl + +Committer Attendees: + +* [ ] Christian Smith +* [x] Diego Tavares +* [ ] Idris Miles + +Agenda/Notes: + +* Goals for 2021 + * User survey + * ASWF Graduation + * New user UX + * Rename demo_data.sql + * Main code change done. + * Docs updated. + * Still todo: updating release pipeline to publish seed_data artifact. + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Brian has started work on config cleanup. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for client-side python + * In progress, will be done as part of PyPI work. + * Configuration guide for Cuebot + * In progress, will be done as part of PyPI work. + * Configuration guide for RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Brian: couldn't locate old docs, email out to Matt to see if he has any materials around. + * Need an owner for this work. Diego has volunteered. + * Drop Oracle support + * Done. + * GPU support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * Starting PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/pull/924 + * Test failures resolved, needs review. + * GUI to add new shows + * Done. + * Expand DCC plugins + * No progress. + * Logging solution + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/880 + * PR approved and merged. + * Proposal: write blog post on this topic + * Proposal accepted + * No progress on this yet + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * No progress. +* Other current work updates + * SPI: shutdown of old cue3 system is complete. + diff --git a/tsc/meetings/2021-06-09.md b/tsc/meetings/2021-06-09.md new file mode 100644 index 000000000..1bef06ccf --- /dev/null +++ b/tsc/meetings/2021-06-09.md @@ -0,0 +1,115 @@ +# OpenCue TSC Meeting Notes 9 June 2021 + +Secretary: Brian Cipriano + +TSC Attendees: + +* [ ] Matt Chambers +* [x] Brian Cipriano +* [x] Greg Denton +* [x] Ben Dines +* [ ] Dave Fellows +* [x] Daniel Neilson +* [ ] Alex Schworer +* [ ] Erik Strauss +* [ ] Lars van der Bijl + +Committer Attendees: + +* [ ] Christian Smith +* [x] Diego Tavares +* [x] Idris Miles + +Agenda/Notes: + +* Goals for 2021 + * ~~User survey~~ + * ~~ASWF Graduation~~ + * New user UX + * Rename demo_data.sql + * Main code change done. + * Docs updated. + * Still todo: updating release pipeline to publish seed_data artifact. + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue done and ready for review. Tested on macOS and Windows. + * Next up will do config cleanup on other components. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for client-side python + * In progress, will be done as part of PyPI work. + * Configuration guide for Cuebot + * In progress, will be done as part of PyPI work. + * Mention tuning RQD connection cache size to number of expected hosts. + * Configuration guide for RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Brian: couldn't locate old docs, email out to Matt to see if he has any materials around. + * Need an owner for this work. Diego has volunteered. + * Haven't been able to reach Matt yet. + * Let's try to locate Matt's fork from a while back. + * Might speak to Weta for advice as well. + * ~~Drop Oracle support~~ + * ~~Done.~~ + * GPU support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * Starting PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/pull/924 + * PR approved, ready to merge. Could use some production testing. + * ~~GUI to add new shows~~ + * ~~Done.~~ + * Expand DCC plugins + * No progress. + * Logging solution + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/880 + * PR approved and merged. + * Proposal: write blog post on this topic + * Proposal accepted + * No progress on this yet + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * Proposal: + * Any issues in a Project have been identified as important and will be ignored. + * Other issues: + * If no update for 60 days, will get the label "stale" and a comment notifying of + auto-close. + * If no further update for 7 days, will be closed. + * Any issue update will reset the timer — remove the "stale" tag + * Action https://github.com/marketplace/actions/close-stale-issues will be used. + * May need another action to label issues if they are in a project, the above action doesn't + appear to have options for ignoring issues based on project membership. +* Other current work updates + * Bug where services would need restart + * cuebot -> rqd module, calls not getting responses + * needed to set deadline on all grpc calls, fixed issue + * cuebot threadpools able to self-heal now + * Cache of RQD connections + * need to tune cache size to about number of expected RQD hosts + * Set concurrency cache settings + * set to number of threads expected + * Issue about RQD hardcoded timezone diff --git a/tsc/meetings/2021-06-23.md b/tsc/meetings/2021-06-23.md new file mode 100644 index 000000000..669e7cbcc --- /dev/null +++ b/tsc/meetings/2021-06-23.md @@ -0,0 +1,120 @@ +# OpenCue TSC Meeting Notes 23 June 2021 + +Secretary: Brian Cipriano + +TSC Attendees: + +* [ ] Matt Chambers +* [x] Brian Cipriano +* [x] Greg Denton +* [x] Ben Dines +* [ ] Dave Fellows +* [x] Daniel Neilson +* [ ] Alex Schworer +* [ ] Erik Strauss +* [ ] Lars van der Bijl + +Committer Attendees: + +* [ ] Christian Smith +* [x] Diego Tavares +* [ ] Idris Miles + +Agenda/Notes: + +* Completed 2021 goals + * User survey + * ASWF graduation + * Drop Oracle support + * GUI to add new shows +* Todo 2021 goals + * New user UX + * Rename demo_data.sql + * Main code change done. + * Docs updated. + * Release pipeline updated. Done! + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue done and ready for review. Tested on macOS and Windows. + * Next up will do config cleanup on other components. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for client-side python + * In progress, will be done as part of PyPI work. + * Configuration guide for Cuebot + * In progress, will be done as part of PyPI work. + * Configuration guide for RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. Let's continue the discussion there. + * GPU support + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/459 + * Starting PR in review: https://github.com/AcademySoftwareFoundation/OpenCue/pull/924 + * PR merged. This is done for now pending further testing, after which we should improve + Windows support. + * Expand DCC plugins + * No progress. + * Logging solution + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/880 + * PR approved and merged. + * Proposal: write blog post on this topic + * Proposal accepted + * No progress on this yet + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * Proposal: + * Any issues in a Project have been identified as important and will be ignored. + * Other issues: + * If no update for 60 days, will get the label "stale" and a comment notifying of + auto-close. + * If no further update for 7 days, will be closed. + * Any issue update will rest the timer. + * Action https://github.com/marketplace/actions/close-stale-issues will be used. + * May need another action to label issues if they are in a project, the above action doesn't + appear to have options for ignoring issues based on project membership. +* Other current work updates + * Brian: split lint into its own CI job to avoid confusion, reduce runtime. + * Diego + * grpc connections PR + * threadpools PR + * Discussion to revive cloud work/plugin + * GSoC Cloud Plugin + * Is very simple, provides ability to manually adjust instance group size. + * Need to finalize GSoC Azure support, Greg volunteered to test this. + * Need to add AWS support. + * Beyond that, we'd like to allow autoscaling based on the amount of work in the queue. + * We need to be very careful with this as bugs could cause user money to be burned on + cloud resources. We should provide the tools for users to build this themselves but + stop short of building this fully into Cuebot. +* Open Source Days 2021 + * Need rough idea and presenters list. + * Brian will give short project update at beginning of session. + * Diego has volunteered to present some of SPI's work, Ben to assist. + * Let's start an email thread to discuss further. diff --git a/tsc/meetings/2021-07-21.md b/tsc/meetings/2021-07-21.md new file mode 100644 index 000000000..da316fb91 --- /dev/null +++ b/tsc/meetings/2021-07-21.md @@ -0,0 +1,116 @@ +# OpenCue TSC Meeting Notes 21 July 2021 + +Secretary: Brian Cipriano + +TSC Attendees: + +* [ ] Matt Chambers +* [x] Brian Cipriano +* [x] Greg Denton +* [x] Ben Dines +* [ ] Dave Fellows +* [x] Daniel Neilson +* [ ] Alex Schworer +* [ ] Erik Strauss +* [ ] Lars van der Bijl + +Committer Attendees: + +* [ ] Christian Smith +* [x] Diego Tavares +* [ ] Idris Miles + +Agenda/Notes: + +* Completed 2021 Goals + * User survey + * ASWF graduation + * Drop Oracle support + * Rename demo_data.sql + * Expanded GPU support +* Todo 2021 Goals + * Open Source Days 2021 + * OpenCue session Wednesday, August 4 at 10:55 AM - 11:35 AM PT + * Slides + * Google docs + template: https://docs.google.com/presentation/d/12GJ_6xSm0fwBlZON3VVGhYr5gQKRgu82YIVOvLH3U74/edit?usp=sharing + * Let's have a single master deck to avoid switching between presenters. Can start with + separate decks and merge later. Last OSD we used "gain control" so each speaker could + control their own slides. + * Discuss SPI portion of the session. + * Shotgun integration. Can we share? + * Sanitized Grafana boards? + * Diego/Ben will look into this. + * Brian: to start email checkin thread and plan a rehearsal meeting. + * New user UX + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue done and ready for review. Tested on macOS and Windows. + * Next up will do config cleanup on other components. + * No update here yet. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for client-side python + * In progress, will be done as part of PyPI work. + * Configuration guide for Cuebot + * In progress, will be done as part of PyPI work. + * Configuration guide for RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * Let's start github issue/PR for this + * Create `rfc` tag + * Expand DCC plugins + * No progress. + * Logging solution + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/880 + * PR approved and merged. + * Proposal: write blog post on this topic + * Proposal accepted + * No progress on this yet + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * Proposal: + * Any issues in a Project have been identified as important and will be ignored. + * Other issues: + * If no update for 60 days, will get the label "stale" and a comment notifying of + auto-close. + * If no further update for 7 days, will be closed. + * Any issue update will rest the timer. + * Action https://github.com/marketplace/actions/close-stale-issues will be used. + * May need another action to label issues if they are in a project, the above action doesn't + appear to have options for ignoring issues based on project membership. + * No progress yet. + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Greg: started to test / clean up azure support. Looking good so far. diff --git a/tsc/meetings/2021-09-15.md b/tsc/meetings/2021-09-15.md new file mode 100644 index 000000000..be57b6e10 --- /dev/null +++ b/tsc/meetings/2021-09-15.md @@ -0,0 +1,99 @@ +# OpenCue TSC Meeting Notes 15 September 2021 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Completed 2021 Goals + * User survey + * ASWF graduation + * Drop Oracle support + * Rename demo_data.sql + * Expanded GPU support +* Todo 2021 Goals + * Open Source Days 2021 + * Any takeaways/followups? + * New user UX + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue done and ready for review. Tested on macOS and Windows. + * Next up will do config cleanup on other components. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for client-side python + * In progress, will be done as part of PyPI work. + * Configuration guide for Cuebot + * In progress, will be done as part of PyPI work. + * Configuration guide for RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Expand DCC plugins + * No progress. + * Logging solution + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/880 + * PR approved and merged. + * Proposal: write blog post on this topic + * Proposal accepted + * Tutorial is now published, presented at OSD 2021. + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * Proposal: + * Any issues in a Project have been identified as important and will be ignored. + * Other issues: + * If no update for 60 days, will get the label "stale" and a comment notifying of + auto-close. + * If no further update for 7 days, will be closed. + * Any issue update will rest the timer. + * Action https://github.com/marketplace/actions/close-stale-issues will be used. + * May need another action to label issues if they are in a project, the above action doesn't + appear to have options for ignoring issues based on project membership. + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. +* Other ongoing work + * Brian + * Python 3.8, 3.9 support + * VFX ref platform CY2021 how in CI pipeline + * Ready for CY2022 now that Python 3.9 is supported + * Published tutorial on Grafana integration + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * Scheduling fixes + * Add thread pool properties https://github.com/AcademySoftwareFoundation/OpenCue/pull/1008 + * Schedule dispatch frame https://github.com/AcademySoftwareFoundation/OpenCue/pull/1012 + * Determine idle cores + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1021 + * Delete CORE_POINTS_RESERVED_MAX logic? + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1026 diff --git a/tsc/meetings/2021-09-29.md b/tsc/meetings/2021-09-29.md new file mode 100644 index 000000000..29a3be9d6 --- /dev/null +++ b/tsc/meetings/2021-09-29.md @@ -0,0 +1,96 @@ +# OpenCue TSC Meeting Notes 29 September 2021 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Completed 2021 Goals + * User survey + * ASWF graduation + * Drop Oracle support + * Rename demo_data.sql + * Expanded GPU support + * Open Source Days 2021 + * Logging/monitoring solution +* Ongoing work + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * Consistent issue + * MonitorCue refresh tweak, reduce API calls + * Collecting feedback on this now + * Custom garbage collection + * Haven't tested yet + * Scheduling fixes + * Add thread pool properties https://github.com/AcademySoftwareFoundation/OpenCue/pull/1008 + * Schedule dispatch frame https://github.com/AcademySoftwareFoundation/OpenCue/pull/1012 + * SPI version https://github.com/AcademySoftwareFoundation/OpenCue/pull/1035 + * Tested >5 months + * User permissions + * default disable job options for jobs not owned by the current user + * let's start designing permission system, will make a github post to start gathering + requirements +* Todo 2021 Goals + * New user UX + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue done and ready for review. Tested on macOS and Windows. + * Next up will do config cleanup on other components. + * pyoutline, rqd up next + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for client-side python + * In progress, will be done as part of PyPI work. + * Configuration guide for Cuebot + * In progress, will be done as part of PyPI work. + * Configuration guide for RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Expand DCC plugins + * No progress. + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * Proposal: + * Any issues in a Project have been identified as important and will be ignored. + * Other issues: + * If no update for 60 days, will get the label "stale" and a comment notifying of + auto-close. + * If no further update for 7 days, will be closed. + * Any issue update will rest the timer. + * Action https://github.com/marketplace/actions/close-stale-issues will be used. + * May need another action to label issues if they are in a project, the above action doesn't + appear to have options for ignoring issues based on project membership. + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. diff --git a/tsc/meetings/2021-12-08.md b/tsc/meetings/2021-12-08.md new file mode 100644 index 000000000..30528ec63 --- /dev/null +++ b/tsc/meetings/2021-12-08.md @@ -0,0 +1,103 @@ +# OpenCue TSC Meeting Notes 8 December 2021 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Completed 2021 Goals + * User survey + * ASWF graduation + * Drop Oracle support + * Rename demo_data.sql + * Expanded GPU support + * Open Source Days 2021 + * Logging/monitoring solution +* Ongoing work + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * SPI: still researching and testing their fix. + * Scheduling fixes + * Thread pool properties + * Two versions: + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1008 + * SPI version https://github.com/AcademySoftwareFoundation/OpenCue/pull/1035 + * PRs need review. + * Let's start with 1035, then merge 1008 to fix conflicts to see what in 1008 is still + useful, if anything. + * Schedule dispatch frame + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1012 + * Adds database fields to eliminate multiple cuebots handing out the same frames. + * Might not want to merge while larger scheduler redesign is going on. + * RQD change, adding child proc info into log, storing in database + * For understanding memory usage of complex jobs +* Todo 2021 Goals + * New user UX + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue done, tested on macOS and Windows, merged. + * pyoutline, cuesubmit config cleanup in review. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for client-side python + * In progress, will be done as part of PyPI work. + * Configuration guide for Cuebot + * In progress, will be done as part of PyPI work. + * Configuration guide for RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Other PRs with improvements — see ongoing work above. + * Some feedback on discussion thread that needs review. + * SPI experimenting with redis to handle RQD host reports, but may make a good candidate for + the scheduler as well. PR with some initial changes likely around end of year. + * Expand DCC plugins + * No progress. + * CSP Terraform docs + * No progress yet. + * Auto-close Github issues + * Proposal: + * Any issues in a Project have been identified as important and will be ignored. + * Other issues: + * If no update for 60 days, will get the label "stale" and a comment notifying of + auto-close. + * If no further update for 7 days, will be closed. + * Any issue update will rest the timer. + * Action https://github.com/marketplace/actions/close-stale-issues will be used. + * May need another action to label issues if they are in a project, the above action doesn't + appear to have options for ignoring issues based on project membership. + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. + * Let's send PR to add Kazuki (splhack) as committer + * Brian needs to catch up on importing meeting notes into the Github repo. + * Next meeting Dec 22, should probably cancel and hold next meeting Jan 5. diff --git a/tsc/meetings/2022-01-05.md b/tsc/meetings/2022-01-05.md new file mode 100644 index 000000000..47636676b --- /dev/null +++ b/tsc/meetings/2022-01-05.md @@ -0,0 +1,113 @@ +# OpenCue TSC Meeting Notes 5 January 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Completed 2022 Goals + * Cleared section for the new year. Nothing to be added here, yet. +* Ongoing work + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * SPI: still researching and testing their fix. + * Scheduling fixes + * Thread pool properties + * Two versions: + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1008 + * SPI version https://github.com/AcademySoftwareFoundation/OpenCue/pull/1035 + * PRs need review. + * Let's start with 1035, then merge 1008 to fix conflicts to see what in 1008 is still + useful, if anything. + * Review of 1035 is almost complete, code looking good with a few minor suggestions. + * RQD change, adding child proc info into RQD logs as well as database for viewing in CueGUI + * For understanding memory usage of complex jobs. + * Still in development. + * log4j update + * PR needs another update. + * Let's post an update to the user group to let everyone know to update. +* Goals from 2021 + * Subsections here should be moved to the 2022 section or removed. + * New user UX + * Keep for 2022 + * Docs refresh + * Keep for 2022 + * Improve scheduler logic + * Keep for 2022 + * Expand DCC plugins + * No progress in 2021. Keep for 2022? + * We should keep it, but lower priority. Important for wider user adoption. + * High priority: Blender, Houdini + * Worth writing to the user group to see what folks have already? + * CSP Terraform docs + * No progress yet. + * Let's look at the Azure version of this. + * Let's roll this into the more general Cloud goal. + * Auto-close Github issues + * No need to keep this as its own goal — it's just a single piece of work. "Improving + development workflow" would be a more suitable goal but doesn't appear to be necessary at + the moment. + * Expanded Cloud functionality + * Keep for 2022. +* Todo 2022 Goals + * New user UX + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue done, tested on macOS and Windows, merged. + * pyoutline, cuesubmit config cleanup review done, ready to merge. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guide for client-side python + * In progress, will be done as part of PyPI work. + * Configuration guide for Cuebot + * In progress, will be done as part of PyPI work. + * Configuration guide for RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Other PRs with improvements — see ongoing work above. + * Prototype in progress. + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. + * Terraform scripts? + * Let's look at the Azure version of this + * Generic k8s setup and cuebot pool size suggestions + * Let's write up a github issue to discuss requirements + * User permissions + * Need a github issue for discussion. + * Expand DCC plugins + * No progress. + * High priority: Blender, Houdini + * Important for wider user adoption + * Worth writing to the user group to see what folks have already? + diff --git a/tsc/meetings/2022-01-19.md b/tsc/meetings/2022-01-19.md new file mode 100644 index 000000000..6d0cd0ab6 --- /dev/null +++ b/tsc/meetings/2022-01-19.md @@ -0,0 +1,94 @@ +# OpenCue TSC Meeting Notes 19 January 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Completed 2022 Goals + * Nothing yet. +* Ongoing work + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * SPI: still researching and testing their fix. + * Scheduling fixes + * Thread pool properties + * Two versions: + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1008 + * SPI version https://github.com/AcademySoftwareFoundation/OpenCue/pull/1035 + * PRs need review. + * Let's start with 1035, then merge 1008 to fix conflicts to see what in 1008 is still + useful, if anything. + * Schedule dispatch frame + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1012 + * Adds database fields to eliminate multiple cuebots handing out the same frames. + * Might not want to merge while larger scheduler redesign is going on. + * RQD change, adding child proc info into log, storing in database + * For understanding memory usage of complex jobs + * Entering testing soon. + * Replaced embedded Postgres server. + * log4j update + * Was blocked on embedded Postgres server, now good to resume. + * Should post announcement to opencue-users before merging. +* 2022 Goals + * New user UX + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue done, tested on macOS and Windows, merged. + * pyoutline, cuesubmit config cleanup review done, ready to merge. + * CueGUI/RQD still to do. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guides for Python/Cuebot/RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Other PRs with improvements — see ongoing work above. + * Prototype in progress. + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. + * Terraform scripts? + * Let's look at the Azure version of this + * Generic k8s setup and cuebot pool size suggestions + * Let's write up a github issue to discuss requirements + * User permissions + * Need a github issue for discussion. + * Added initial + thoughts: https://github.com/AcademySoftwareFoundation/OpenCue/issues/218#issuecomment-1016904933 + * Need more research into integration with Active Directory. + * Will try to find an example. + * Expand DCC plugins + * No progress. + * High priority: Blender, Houdini + * Important for wider user adoption + * Worth writing to the user group to see what folks have already? diff --git a/tsc/meetings/2022-02-16.md b/tsc/meetings/2022-02-16.md new file mode 100644 index 000000000..ebe212c29 --- /dev/null +++ b/tsc/meetings/2022-02-16.md @@ -0,0 +1,102 @@ +# OpenCue TSC Meeting Notes 16 February 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Completed 2022 Goals + * Nothing yet. +* ASWF open source forum +* Ongoing work + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * What we know: bug in job monitor tree creates unnecessary refreshing. Probably present in + all tree plugins, but most apparent when monitoring 100s/1000s of jobs. + * SPI: researching and testing their fix. + * Scheduling fixes + * Thread pool properties + * Two versions: + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1008 + * SPI version https://github.com/AcademySoftwareFoundation/OpenCue/pull/1035 + * Let's start with 1035, then merge 1008 to fix conflicts to see what in 1008 is still + useful, if anything. + * Initial review of 1035 done. Pretty close, a few last things to wrap up. + * RQD change, adding child proc info into log, storing in database + * For understanding memory usage of complex jobs. + * Testing ongoing, feedback has been positive so far. + * Avoiding complex merges from upstream. + * Problem: users will maintain their own OpenCue forks with many preferred customizations. + When it comes time to merge in changes from upstream, a complex merge is needed. This is + time consuming and results in long gaps between upstream merges. + * How do others solve this? + * Suggestion: let's identify the various areas needing customization, and discuss how we can + generalize a solution by creating options/plugins/hooks so customization can live outside + the main codebase. + * SPI to start compiling a list of these locations. +* 2022 Goals + * New user UX + * Publish PyPI packages + * Design + doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue done, tested on macOS and Windows, merged. + * pyoutline, cuesubmit config cleanup review done, ready to merge. + * CueGUI/RQD still to do. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to + master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch + version. + * Docs refresh + * Tech writing help + * Discussing with John Mertic at LF and OCIO reps who are also interested. + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to + build and publish this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guides for Python/Cuebot/RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * Other PRs with improvements — see ongoing work above. + * Prototype in progress. + * High-level prototype details using + Redis: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. + * Terraform scripts? + * Let's look at the Azure version of this + * Generic k8s setup and cuebot pool size suggestions + * Let's write up a github issue to discuss requirements + * https://github.com/Azure/Avere/tree/main/src/terraform/examples/vfxt/opencue + * User permissions + * Need a github issue for discussion. + * Added initial + thoughts: https://github.com/AcademySoftwareFoundation/OpenCue/issues/218#issuecomment-1016904933 + * Need more research into integration with Active Directory. + * Permissions model: proposal to be written up. + * Expand DCC plugins + * High priority: Blender, Houdini + * Important for wider user adoption. + * Worth writing to the user group to see what folks have already. + * No further progress. diff --git a/tsc/meetings/2022-03-02.md b/tsc/meetings/2022-03-02.md new file mode 100644 index 000000000..4f5e54d2e --- /dev/null +++ b/tsc/meetings/2022-03-02.md @@ -0,0 +1,114 @@ +# OpenCue TSC Meeting Notes 2 March 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Completed 2022 Goals + * ASWF Open Source Forum + * Skip boring stuff like CL/issue stats, highlight items that show why it's good to be an ASWF member. + * Ideas: + * Extra resources provided through ASWF/LF: + * Tech writing resources + * CI system + * New features that benefit from TSC with varied perspectives/experience: + * Scheduler rewrite + * Cloud functionality + * Users/permissions + * Window support, feedback + * Visibility as a member, for new projects +* Ongoing work + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * What we know: bug in job monitor tree creates unnecessary refreshing. Probably present in all tree plugins, + but most apparent when monitoring 100s/1000s of jobs. + * SPI: researching and testing their fix. + * Scheduling fixes + * Thread pool properties + * Two versions: + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1008 + * SPI version https://github.com/AcademySoftwareFoundation/OpenCue/pull/1035 + * Let's start with 1035, then merge 1008 to fix conflicts to see what in 1008 is still useful, if anything. + * Initial review of 1035 done. Pretty close, a few last things to wrap up. + * RQD change, adding child proc info into log, storing in database + * For understanding memory usage of complex jobs. + * Testing ongoing, feedback has been positive so far. + * Linux only right now. + * Avoiding complex merges from upstream. + * Problem: users will maintain their own OpenCue forks with many preferred customizations. When it comes time to + merge in changes from upstream, a complex merge is needed. This is time consuming and results in long gaps + between upstream merges. + * How do others solve this? + * Suggestion: let's identify the various areas needing customization, and discuss how we can generalize a + solution by creating options/plugins/hooks so customization can live outside the main codebase. + * SPI to start compiling a list of these locations. + * List in progress, will report soon. + * OS-dependent log root + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1096 +* 2022 Goals + * New user UX + * Publish PyPI packages + * Design doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue, pyoutline, cuesubmit done. + * CueGUI in progress. + * RQD still todo. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch version. + * Docs refresh + * Tech writing help + * Discussing with John Mertic at LF and OCIO reps who are also interested. + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to build and publish + this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guides for Python/Cuebot/RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * Other PRs with improvements — see ongoing work above. + * Prototype in progress. + * High-level prototype details using Redis: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. + * Terraform scripts? + * Let's look at the Azure version of this + * Generic k8s setup and cuebot pool size suggestions + * Existing Azure work on this: https://github.com/Azure/Avere/tree/main/src/terraform/examples/vfxt/opencue + * Issue for discussion: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1097 + * Use Kubernetes for rqd? Kubernetes adds some overhead. A very small amount but some people care. + * A Helm chart could be a good option. Easy to use and get started. + * Let's review the Github issue, leave notes. Short, rough draft of design doc for auto-scaling would be good + for further discussion. + * User permissions + * Need a github issue for discussion. + * Added initial + thoughts: https://github.com/AcademySoftwareFoundation/OpenCue/issues/218#issuecomment-1016904933 + * Need more research into integration with Active Directory. + * Permissions model: proposal to be written up. + * Expand DCC plugins + * High priority: Blender, Houdini + * Important for wider user adoption. + * Worth writing to the user group to see what folks have already. + * No further progress. diff --git a/tsc/meetings/2022-03-30.md b/tsc/meetings/2022-03-30.md new file mode 100644 index 000000000..798fbbf89 --- /dev/null +++ b/tsc/meetings/2022-03-30.md @@ -0,0 +1,121 @@ +# OpenCue TSC Meeting Notes 30 March 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Completed 2022 Goals + * +* New business + * Opencue Discord channel + * seems like a good alternative to slack/mailing lists + * let's look into free version limitations and consider it + * Nimby changes: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1106 + * supersedes older windows nimby PR, let's close out the old one with a note + * Stuck frame plugin: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1109 + * Dynamic facility / OS: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1108 + * some pipelines need to be able to have complex logic to determine these settings + * some concerns around the custom module loading + * let's post discussion to the github PR to settle on a solution + * Sandbox fix: need to have scheduled tests for some things, to account for upstream changes. + * New dispatcher query modes: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1103 +* Ongoing work + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * What we know: bug in job monitor tree creates unnecessary refreshing. Probably present in all tree plugins, + but most apparent when monitoring 100s/1000s of jobs. + * SPI: researching and testing their fix. + * some improvements were successful, still needs some more work. problems redrawing when more threads are + running + * Scheduling fixes + * Thread pool properties + * Two versions: + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1008 + * SPI version https://github.com/AcademySoftwareFoundation/OpenCue/pull/1035 + * Let's start with 1035, then merge 1008 to fix conflicts to see what in 1008 is still useful, if anything. + * 1035 merged. Ready to proceed to 1008. + * RQD change, adding child proc info into log, storing in database + * For understanding memory usage of complex jobs. + * Linux-only. + * Testing ongoing, feedback has been positive so far. + * feedback still good, making some minor improvements in response to user feedback + * Avoiding complex merges from upstream. + * Problem: users will maintain their own OpenCue forks with many preferred customizations. When it comes time to + merge in changes from upstream, a complex merge is needed. This is time consuming and results in long gaps + between upstream merges. + * How do others solve this? + * Suggestion: let's identify the various areas needing customization, and discuss how we can generalize a + solution by creating options/plugins/hooks so customization can live outside the main codebase. + * SPI to start compiling a list of these locations. + * List in progress, will report soon. + * OS-dependent log root + * Issue: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1096 + * PR: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1102 + * Almost ready to merge. +* 2022 Goals + * New user UX + * Publish PyPI packages + * Design doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue, pyoutline, cuesubmit done. + * CueGUI in progress. + * RQD still todo. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch version. + * Docs refresh + * Tech writing help + * Discussing with John Mertic at LF and OCIO reps who are also interested. + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to build and publish + this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guides for Python/Cuebot/RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * Other PRs with improvements — see ongoing work above. + * Prototype in progress. + * High-level prototype details using Redis: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. + * Terraform scripts? + * Let's look at the Azure version of this + * Generic k8s setup and cuebot pool size suggestions + * Existing Azure work on this: https://github.com/Azure/Avere/tree/main/src/terraform/examples/vfxt/opencue + * Issue for discussion: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1097 + * Use Kubernetes for rqd? Kubernetes adds some overhead. A very small amount but some people care. + * A Helm chart could be a good option. Easy to use and get started. + * Let's review the Github issue, leave notes. Short, rough draft of design doc for auto-scaling would be good + for further discussion. + * User permissions + * Need a github issue for discussion. + * Added initial + thoughts: https://github.com/AcademySoftwareFoundation/OpenCue/issues/218#issuecomment-1016904933 + * Need more research into integration with Active Directory. + * Permissions model: proposal to be written up. + * Expand DCC plugins + * High priority: Blender, Houdini + * Important for wider user adoption. + * Worth writing to the user group to see what folks have already. + * No further progress. diff --git a/tsc/meetings/2022-05-11.md b/tsc/meetings/2022-05-11.md new file mode 100644 index 000000000..4d633af9a --- /dev/null +++ b/tsc/meetings/2022-05-11.md @@ -0,0 +1,123 @@ +# OpenCue TSC Meeting Notes 11 May 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Completed 2022 Goals +* New business + * Kill dependent https://github.com/AcademySoftwareFoundation/OpenCue/pull/1115 + * AllowDeeding: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1114 + * Job monitor filter behavior: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1139 + * open comment, need to resolve before merging? + * Nimby in new thread: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1063 + * not reproducible? needs input + * SQL query adjustments: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1078 + * ready to merge? + * Sentry logging in CueGUI: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1127 + * looks ok, looking for more detail +* Ongoing work + * Footage for ASWF reel + * https://www.youtube.com/user/ImageworksVFX/videos + * Brian to follow up with ASWF + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * What we know: bug in job monitor tree creates unnecessary refreshing. Probably present in all tree plugins, + but most apparent when monitoring 100s/1000s of jobs. + * SPI: researching and testing their fix. + * Scheduling fixes + * Thread pool properties + * Two versions: + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1008 + * SPI version https://github.com/AcademySoftwareFoundation/OpenCue/pull/1035 + * Let's start with 1035, then merge 1008 to fix conflicts to see what in 1008 is still useful, if anything. + * 1035 merged. Ready to proceed to 1008. + * RQD change, adding child proc info into log, storing in database + * For understanding memory usage of complex jobs. + * Linux-only. + * Testing ongoing, feedback has been positive so far. + * Avoiding complex merges from upstream. + * Problem: users will maintain their own OpenCue forks with many preferred customizations. When it comes time to + merge in changes from upstream, a complex merge is needed. This is time consuming and results in long gaps + between upstream merges. + * How do others solve this? + * Suggestion: let's identify the various areas needing customization, and discuss how we can generalize a + solution by creating options/plugins/hooks so customization can live outside the main codebase. + * SPI to start compiling a list of these locations. + * List in progress, will report soon. + * Some PRs to resolve this, will get update from Diego next time + * OS-dependent log root + * Issue: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1096 + * PR: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1133 + * PR merged. + * New PR to fix log paths on the CueGUI side: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1138 +* 2022 Goals + * New user UX + * Publish PyPI packages + * Design doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue, pyoutline, cuesubmit done. + * CueGUI in progress. + * constants file in particular needs attention + * RQD still todo. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch version. + * Docs refresh + * Tech writing help + * Discussing with John Mertic at LF and OCIO reps who are also interested. + * Need to check in with ASWF + * Let's ping Sharif to see what his plan was for publishing API reference docs + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to build and publish + this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guides for Python/Cuebot/RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * Other PRs with improvements — see ongoing work above. + * Prototype in progress. + * High-level prototype details using Redis: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. + * Terraform scripts? + * Let's look at the Azure version of this + * Generic k8s setup and cuebot pool size suggestions + * Existing Azure work on this: https://github.com/Azure/Avere/tree/main/src/terraform/examples/vfxt/opencue + * Issue for discussion: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1097 + * Use Kubernetes for rqd? Kubernetes adds some overhead. A very small amount but some people care. + * A Helm chart could be a good option. Easy to use and get started. + * Let's review the Github issue, leave notes. Short, rough draft of design doc for auto-scaling would be good + for further discussion. + * User permissions + * Need a github issue for discussion. + * Added initial + thoughts: https://github.com/AcademySoftwareFoundation/OpenCue/issues/218#issuecomment-1016904933 + * Need more research into integration with Active Directory. + * Permissions model: proposal to be written up. + * Expand DCC plugins + * High priority: Blender, Houdini + * Important for wider user adoption. + * Worth writing to the user group to see what folks have already. + * No further progress. diff --git a/tsc/meetings/2022-07-06.md b/tsc/meetings/2022-07-06.md new file mode 100644 index 000000000..9b52bfb9b --- /dev/null +++ b/tsc/meetings/2022-07-06.md @@ -0,0 +1,125 @@ +# OpenCue TSC Meeting Notes 6 July 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* New business + * API changes to fail on incorrect parameters + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1165/files + * no issues, good to merge when code/tests are ready + * Make frames and layers readonly after job finishes + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1164 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1163 + * solution seems ok, but should be a cuebot config flag for now + * Siggraph/Open Source Days + * Few folks attending Siggraph likely. Brian will be OOO and unable to attend. + * Will hybrid in-person/virtual sessions be supported? + * How to design the session? + * Brian to do some research and start email thread. + * Integration tests + * Integration test script being developed at SPI. + * We will want to look at turning it into a github action. + * This will help speed up the release process a lot. Most of the time spent making a release is QAing it. + * SPI to update soon. +* Ongoing work + * CueGUI memory leak + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1025 + * What we know: bug in job monitor tree creates unnecessary refreshing. Probably present in all tree plugins, + but most apparent when monitoring 100s/1000s of jobs. + * SPI: researching and testing their fix. + * RQD change, adding child proc info into log, storing in database + * For understanding memory usage of complex jobs. + * Linux-only. + * Testing ongoing, feedback has been positive so far. + * Change was deployed to production, found some issues, fixed. + * Work is done. Has it been merged to github yet? + * Avoiding complex merges from upstream. + * Problem: users will maintain their own OpenCue forks with many preferred customizations. When it comes time to + merge in changes from upstream, a complex merge is needed. This is time consuming and results in long gaps + between upstream merges. + * How do others solve this? + * Suggestion: let's identify the various areas needing customization, and discuss how we can generalize a + solution by creating options/plugins/hooks so customization can live outside the main codebase. + * Most spots identified, PRs have been sent + * Upstream merge almost complete, will resume that soon. + * OS-dependent log root + * Issue: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1096 + * PR: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1102 + * PR merged. + * New PR to fix log paths on the CueGUI side: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1138 + * All PRs merged. Done now. + * Nimby in new thread: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1063 + * Not reproducible? We need input from SPI. + * Reproduced now. PR coming with several rqd/nimby fixes. + * pynput will become the default for detecting system activity. + * Sentry logging in CueGUI: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1127 + * Change looks ok, looking for more detail. + * Let's follow up on that PR again, review, figure out if we need to clone to new PR. +* 2022 Goals + * New user UX + * Publish PyPI packages + * Design doc: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/ + * Proposal for config file + standardization: https://docs.google.com/document/d/1pWjMNGzGmBvuHoBzXewQEzdwTKtrY6VOtaPi7ALwWg0/edit#heading=h.v85uo9klwqft + * Config cleanup for pycue, pyoutline, cuesubmit done. + * CueGUI in progress. + * RQD still todo. + * Fix cuebot:latest tag for sandbox setup + * Proposal: publish Docker images for every new patch version (i.e. every new commit to master) + * Proposal accepted. + * No progress implementing this yet. + * Related: for PyPI design we may want to also publish packages for every new patch version. + * Docs refresh + * Tech writing help + * Discussing with John Mertic at LF and OCIO reps who are also interested. + * User guide refresh + * Split into "install from latest release" and "install from master" + * Needed to assist transition to publish Docker images on all commits + * No progress. + * API reference + * We have the ability to generate HTML from any commit, but we need a new CI pipeline to build and publish + this on opencue.io for new releases. + * Proposal: for now, highlight existing docs for building reference locally + * Proposal accepted. + * Brian: cleaned up local build process, sent email with doc + link: https://www.opencue.io/contributing/opencue/build-docs/ + * pyoutline examples + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/177 + * No progress yet. + * User guide for CueGUI + * No progress. + * Configuration guides for Python/Cuebot/RQD + * In progress, will be done as part of PyPI work. + * Improve scheduler logic + * Diego has volunteered to own this. + * Email reply from Matt, looped in others. + * Other PRs with improvements — see ongoing work above. + * Prototype in progress. + * High-level prototype details using Redis: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1001 + * Expanded Cloud functionality + * Finalize GSoC plugin + * Test Azure support + * Add AWS support + * Design expanded functionality e.g. autoscaling for work in the queue. + * Last update from Greg: started to test / clean up azure support. Looking good so far. + * Terraform scripts? + * Let's look at the Azure version of this + * Generic k8s setup and cuebot pool size suggestions + * Existing Azure work on this: https://github.com/Azure/Avere/tree/main/src/terraform/examples/vfxt/opencue + * Issue for discussion: https://github.com/AcademySoftwareFoundation/OpenCue/issues/1097 + * Use Kubernetes for rqd? Kubernetes adds some overhead. A very small amount but some people care. + * A Helm chart could be a good option. Easy to use and get started. + * Let's review the Github issue, leave notes. Short, rough draft of design doc for auto-scaling would be good + for further discussion. + * User permissions + * Need a github issue for discussion. + * Added initial + thoughts: https://github.com/AcademySoftwareFoundation/OpenCue/issues/218#issuecomment-1016904933 + * Need more research into integration with Active Directory. + * Permissions model: proposal to be written up. + * Expand DCC plugins + * High priority: Blender, Houdini + * Important for wider user adoption. + * Worth writing to the user group to see what folks have already. + * No further progress. diff --git a/tsc/meetings/2022-07-20.md b/tsc/meetings/2022-07-20.md new file mode 100644 index 000000000..99037e579 --- /dev/null +++ b/tsc/meetings/2022-07-20.md @@ -0,0 +1,45 @@ +# OpenCue TSC Meeting Notes 20 July 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Open Source Days + * Sections + * New stuff + * Improved Windows support, including hybrid farms with multiple OSes (multiple log roots, path conversion + in CueGUI) + * Improved GPU support + * New scheduler modes + * Scitech investigations + * Github stats + * Last year 34 -> 41 unique contributors from 10+ organizations + * Grown committer pool to 8 committers + * 102 PRs merged = 2/week + * Issues + * Over past year, 27 unique people have submitted issues. + * Would be nice to count people who have contributed to issues as well, many folks will chime in but not + necessarily open their own issues. + * Any way to do this? + * Contributor pool significantly more diverse + * In the past year 50% of commits came from "founding" organizations. + * In past years / all time this was ~70%. + * Good to highlight project is not dependent on the original creators. + * SPI update + * Fixing things from migration + * Working on upstream merge issues + * When was oracle removed? Apr 2021 + * Potential good story: migration cue3 -> opencue -> opencue master branch + * Stats on shows that have used opencue + * Anything usable from scitech presentation? + * Q&A + * Let's collect potential warmup questions + * We could also maybe use the LF poll tool. For example to vote on feature requests. + * Brian to create new doc to polish this outline. + * Presenters + * Diego: in-person, run through the project update then open up Q&A + * Ben: virtual + * Greg: in-person + * Brian to email LF organizers with contact info. +* Record video on using opencue from wrangler perspective? From Noel? Let's check in after OSD event. +* Yaash: introductions. Waiting on a code review. diff --git a/tsc/meetings/2022-08-17.md b/tsc/meetings/2022-08-17.md new file mode 100644 index 000000000..af88fcbe5 --- /dev/null +++ b/tsc/meetings/2022-08-17.md @@ -0,0 +1,39 @@ +# OpenCue TSC Meeting Notes 17 Aug 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Siggraph/OSD debrief + * Event attendance ~20 in room, 50-60 total + * Q: who is using opencue? + * Should we change the TSC meeting time? To account for EU participants. + * OpenCue on GKE + * Some peculiarities there as both OpenCue and GKE act as schedulers, in different ways but with some + overlapping features. + * How can we help decide which pods can be killed? + * Could maybe use metrics/prometheus data as a source. + * We should add a helm chart for the repo. + * Both kubernetes and OpenCue want to do bin packing, maybe we could find a way to help them play better + together. + * PyPI packages still a need + * Look at "poetry"? + * Protobuf compiling is an obstacle as it breaks modularity. + * Can we make compiled proto its own package that other components depend on? + * Not getting consistent responses to issues / threads. + * Currently this is best effort. + * TSC should do an issue/PR cleanout. + * Let's try to make a standard process to keep up with it. + * Finish setting up stale issue closer to help with cleanout. + * Bring up with TAC to see if other projects have similar issues or thoughts. +* M1 issues + * Having some difficulties running OpenCue on a local M1 macbook, which is primary development machine for an + increasing number of people. + * Need to upgrade some dependencies like protoc. + * Docker build issues due to architecture mismatch. + * PySide2 might not be supported anymore. + * Has VFX reference platform dealt with this? +* Unit test failures + * Coming from code that hasn't changed + * Possibly due to changes in ref platform docker images? + * More investigation needed. diff --git a/tsc/meetings/2022-09-14.md b/tsc/meetings/2022-09-14.md new file mode 100644 index 000000000..5b4df0279 --- /dev/null +++ b/tsc/meetings/2022-09-14.md @@ -0,0 +1,54 @@ +# OpenCue TSC Meeting Notes 14 Sep 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Upgrade Cuebot dependencies + * M1 has issues with grpc and protoc. + * PR to upgrade, needs review: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1185 + * Will review soon. +* PySide qApp problem + * This is causing unit test / CI failures. + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1192 + * Fix in progress https://github.com/AcademySoftwareFoundation/OpenCue/pull/1193 +* PySide6 + * PySide2 is deprecated, wheels are no longer published for newer architectures like M1. + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1194 + * Fix in progress https://github.com/AcademySoftwareFoundation/OpenCue/pull/1195 + * Will need some help testing. + * VFX reference platform still uses Qt 5 and will until at least 2024. This may create an issue if we drop support + for PySide2 which depends on Qt 5. PySide6 uses Qt 6. + * Can the pyside2/6 versions coexist? + * We could also create a wrapper / compatibility layer. This will likely be useful in the future as PySide will + change again. + * OpenTimelineIO has dealt with the same issue. + * See Github issues: + * https://github.com/AcademySoftwareFoundation/OpenTimelineIO/issues/1215 + * https://github.com/AcademySoftwareFoundation/OpenTimelineIO/issues/1220 + * https://github.com/AcademySoftwareFoundation/OpenTimelineIO/issues/1261 + * Appears they've fixed it, fix looks very similar to our in-progress fix. + * Has the VFX reference platform dealt with architectures at all? arm is getting more popular, and the switch to arm + created/exposed these issues. +* Redis + * Considering adding Redis into the mix to help with Cuebot/database load and Cuebot/RQD communication issues. + * Draft PR: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1187 + * Brian has left some initial comments on that PR. + * This will break the connection between RQD and a single Cuebot. Currently RQD must communicate with a single + Cuebot via gRPC and must be restarted to switch to a different one. + * Using Redis is highly experimental, not production tested. + * The PR must be updated to be completely optional, coexist with the existing code. We need to collect performance + data and decide whether we want to proceed with this project. + * Redis should probably be optional in perpetuity. Default install should not require it, but it can be used to + increase performance for larger deployments. +* Docker image issues + * Can't build on M1 mac. + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1196 + * Also slowing down development. +* Need to do a release + * Blocked on qApp issue as this causes CI failures, we can't use our release pipeline. + * Any update on integration test script? + * This would help speed up the release by automating the QA procedure. + * There was a basic script that had some issues, not much progress. + * Work may resume soon. + * If the in-progress script is sharable, that would save some time. diff --git a/tsc/meetings/2022-09-28.md b/tsc/meetings/2022-09-28.md new file mode 100644 index 000000000..f22833839 --- /dev/null +++ b/tsc/meetings/2022-09-28.md @@ -0,0 +1,31 @@ +# OpenCue TSC Meeting Notes 28 Sep 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Update on M1 issues / CI failures + * All issues now have proposed fixes. Working our way through review. +* Preparing for next release + * qApp PR needs review/merge. This is the highest priority to unbreak our CI pipelines. + * Logging flags. Dropping --log.frame-log-root is a breaking change. Continue supporting it for + now? Add a deprecation warning? + * Possible solution: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1203 + * Could also just rename the new flags. + * Will follow up on the PR. + * Integration test script + * Existing script is very basic and SPI specific. Won't really save us time vs writing from + scratch. + * CueGUI is the challenge here. Automating API calls is trivial, automating GUI tests much + more involved. + * Script to test our sandbox setup is a good place to start. This is critical for ensuring + new users have a good experience. + * "testing" show deactivated? + * Reported in https://github.com/AcademySoftwareFoundation/OpenCue/issues/1205 + * Culprit PR: ​​https://github.com/AcademySoftwareFoundation/OpenCue/pull/1151 + * Does automated show cleanup, testing show is not in the allowlist. + * Quick fix: allowlist testing show in properties file. + * Permanent fix: change 30 day hardcoded limit to come from properties files. -1 means + feature is disabled. Disable this by default. +* Redis + * No update yet, work to resume later. diff --git a/tsc/meetings/2022-10-26.md b/tsc/meetings/2022-10-26.md new file mode 100644 index 000000000..d4c21d39f --- /dev/null +++ b/tsc/meetings/2022-10-26.md @@ -0,0 +1,24 @@ +# OpenCue TSC Meeting Notes 26 Oct 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Intros + * Holovisn.com + * Intro from team, discussion of OpenCue use case, demo. +* RQD completely ignores env on Windows + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1211 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1215 + * LGTM +* RQD doesn't Nimby with Remote users on machine + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/564 + * Diego: this is fixed by using pynput. PR for this has been merged, release is needed. +* Release blockers + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1193 + * Testing/review will commence shortly. + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1203 + * Let's merge the PR. Not ideal but lesson is learned for the future. + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1199 + * Brian to try again to see if problem persists. + * Diego to ask DBA about the issue and potential performance issues created by the fix. diff --git a/tsc/meetings/2022-12-07.md b/tsc/meetings/2022-12-07.md new file mode 100644 index 000000000..6fc9286cf --- /dev/null +++ b/tsc/meetings/2022-12-07.md @@ -0,0 +1,51 @@ +# OpenCue TSC Meeting Notes 7 Dec 2022 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Postgres upgrade/fixes + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1199 + * PR updated with some new research. + * DBA contact has reproduced problem but doesn't support Docker deployments. Confirmed this is + embedded-postgres issue only. + * Filed a ticket with the upstream project. Got a few suggestions. + * Will be hard to verify performance issues until it's in production and hard to roll back. + * Conclusion: Mac+Docker for production deployments is uncommon, this is mostly for developers. + Let's work around this for now, and look into the suggestions from the upstream ticket. + Hopefully we can track down the problem and avoid having to merge this PR. +* Log4j update + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1080 + * Older PR, requested by user who tested and verified. + * Confirmed, we are good to merge this now. +* PySide6 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1195 + * SPI: difficult to production test, PySide6 does not fit nicely into packaging system. + * Conclusion: we'll add the compatibility layer now, test as best we can, and merge as an + experimental feature. Issues may still be present but we can fix these as we go, and it will + be a better situation than we currently have, where CueGUI is not available at all for users + who can't access PySide2. +* Postgres query issues + * New issue with very slow queries on the database side. + * Upgrade happened 3-4 months ago but symptoms didn't present until heavy production load. + * Debugged issue, found culprit is the `show` table, particularly a few stats columns. These + columns are updated multiple times per second under heavy load, and many other critical + queries join to the show table. This slows down the whole system. + * PR coming soon to separate these columns out into their own table/view. +* Removing dead code + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1178/files + * Can we ignore the version bump here? + * Diego will look into it offline. +* akim-ruslanov PRs need update + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1168 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1167 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1165 + * Diego will look into these offline. +* Blender update + * Blender has made progress on improved Python bindings. Should make it easier to avoid + compatibility issues in the future. + * They will support a build using VFX reference platform versions. + * March release for the initial implementation, this will be improved over the next few cycles. + * JT looking to hand this off soon. + * Nuwan is interested in working on the plugin as well. He should proceed, and we'll sync his + and JT's work if needed, or maybe JT will just use it as the base for his own work. diff --git a/tsc/meetings/2023-01-18.md b/tsc/meetings/2023-01-18.md new file mode 100644 index 000000000..1f34a4726 --- /dev/null +++ b/tsc/meetings/2023-01-18.md @@ -0,0 +1,67 @@ +# OpenCue TSC Meeting Notes 18 Jan 2023 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* New release! + * CI / dev environment issues mostly resolved now, release unblocked. + * Getting back to our monthly release cycle. +* Postgres upgrade + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1199 + * Upstream: https://github.com/zonkyio/embedded-postgres/issues/99 + * Cuebot Docker image currently does not build on M1 Mac due to the old embedded-postgres + library. + * Newer embedded-postgres binaries produce weird results that fail tests. M1+Docker only. + * [Breakthrough!](https://github.com/zonkyio/embedded-postgres/issues/99#issuecomment-1378159242) + The issue was coming from an old Ubuntu version used for the embedded-postgres build. + * Brian sent https://github.com/zonkyio/embedded-postgres-binaries/pull/64 upstream with a + proposed fix, which upgrades their build process to use a newer Ubuntu version. + * Waiting for review, then waiting for new binaries to be published. But we are able to build + embedded-postgres locally now, and modify Cuebot to use those binaries rather than pull from + Maven. +* PySide6 + * New proposed PR: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1238 + * Use QtPy library as the compatibility layer, works in PySide2 and 6. + * Would like to get more testing in a PySide2 environment. + * This breaks the dependency chain slightly, as the code would now depend on QtPy but not + PySide. However we can specify whatever other dependencies we want in + setup.py/requirements.txt. + * Proposal: master branch will continue to specify PySide 2 as a dependency. Packaging/release + pipelines will also create a PySide 6 version. + * Idea: setup.py is a Python script, so it could use custom logic to specify the PySide6 + dependency if on an M1 mac. setup.py is executed not just at build/packaging time but at + install time as well. + * Will test more using Pyside2 before merging. +* Migrate stats columns + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1228 + * Test failures to be resolved. + * Diego will take a look. + * Maybe related to some intermittent CI failures we've seen recently, those should be mostly + resolved now. +* CueGUI new config file, cuegui.yaml + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1242 + * Moves most constants to be editable via an external YAML file. No longer any need to edit + code, and exposes many useful settings to users. + * YAML file can be specified via env var or standard config directories. Sysadmins can + distribute their own cuegui.yaml to all local users. + * Need docs update that includes config guides for all components, including this update. + * This should be the last client-side piece needing a config update. We can now move on to the + main PyPI work. +* Integration tests + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1245 + * Initial version out for review. Stands up Docker compose environment, tests database, RQD + registration, API, cueadmin. + * Next will need to add launching a job and verifying. +* Batch of GUI bug fixes coming soon. +* RQD systemd changes + * Previously using init.d, now migrating to systemd. + * OOM manager was sometimes killing the parent RQD process rather than the job itself. This + would take the RQD host offline and it would not report on failure cause. Cuebot would then + distribute the culprit job to other hosts, and the problem could proliferate. + * systemd has a feature to help reduce likelihood of this happening. + * Once that's done, it would be good to publish rpm packages as part of packaging/release. + Cuebot does this already, this would standardize among the server-side components. + * RQD pip package may need to include initd/systemd scripts, or docs to help register RQD with + the system, i.e. start on host boot. + * Sysadmins also seem to prefer rpms to pip install. diff --git a/tsc/meetings/2023-02-01.md b/tsc/meetings/2023-02-01.md new file mode 100644 index 000000000..88ef82f5b --- /dev/null +++ b/tsc/meetings/2023-02-01.md @@ -0,0 +1,42 @@ +# OpenCue TSC Meeting Notes 1 Feb 2023 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Move outline.cfg into outline module + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1252 + * What to do with bin/ and wrappers/ code? E.g. pycuerun. Packaged with rqd? + * What do these tools do? + * pycuerun is a tool for sending jobs to Cuebot. This should be packaged with pyoutline, + ideally as a console script / entrypoint. + * wrappers are used on the RQD side. These should probably be packaged with RQD somehow. + * Conclusion: we'll need different approaches for packaging these, needs some more research. +* Integration tests + * New PR to run a job and verify it + finishes: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1253 + * Uses a pyoutline script to launch a job and a pycue script to wait for it to finish. + * Discovered outline.cfg issue above! + * Almost ready for review, waiting on pyoutline fix to be merged. +* Config guide + * CueGUI YAML config merged. + * PR with new config guide: https://github.com/AcademySoftwareFoundation/opencue.io/pull/274 + * Preview: + https://deploy-preview-274--elated-haibt-1b47ff.netlify.app/docs/other-guides/configuring-opencue/ +* PyPI + * Brian now doing some tests, cleaning up the pycue setup.py. + * Need to clean up dependency list in setup.py. This duplicates some work from requirements.txt, + but serves a different purpose, and the version restrictions should be a little looser. Have + to go one at a time to make a decision. +* RQD systemd changes + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1251 + * To be reviewed soon. + * Needs to be built into Docker image, CI pipelines. +* PySide6 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1238 + * Review complete, merging soon. +* Migrate stats columns + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1228 + * Good way to roll out destructive changes? + * Any input from PG admins? + * Let's check with Diego next time, but current change looks fine. diff --git a/tsc/meetings/2023-03-01.md b/tsc/meetings/2023-03-01.md new file mode 100644 index 000000000..9c065a76c --- /dev/null +++ b/tsc/meetings/2023-03-01.md @@ -0,0 +1,47 @@ +# OpenCue TSC Meeting Notes 1 Mar 2023 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Managing database migrations + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1228 + * Sequential numbering in the repo creates problems for users with forks. Migrations in forks + need to be renumbered after the fact to not conflict, and reapplying these changes to a + production database is tricky. + * Suggestion: leave gaps in between main repo migrations, e.g. v30, v40, etc. Forks can fill + this in. + * This could also create problems if v40 conflicts with v32 for example, and cause a need to + renumber fork migrations still. + * Diego and Brian to do some further research on this. + * Another suggestion: fork migrations could use version numbers starting at a very high number + e.g. 1000. Fork migrations would always be applied on top of the full main repo schema. + * Any conflicts would need to be resolved by the user. + * Any new main repo migrations would need to be applied manually. Flyway won't apply v40 if + it thinks the database is at v1000. + * This might be the least painful option. +* Customizing frame display colors + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1246 + * Author has given more context in the PR. + * It feels wrong to store color information in the database, it's purely a visual change. + * Brian to take another look. + * Maybe we can change PR to use something like "reason code" in the database rather than color + information directly, and update cuegui.yaml to convert reason code -> color. +* Preparing for next release + * PySide6 cuegui changes + * Merged, done. + * CueSubmit PySide6 + * Not started yet. Need to include this in the same release. + * Update test script to run example job + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1253 + * Ready for review. + * Includes pycue and pyoutline example scripts which should be generally useful to users + beyond tests. + * Config guide doc + * https://github.com/AcademySoftwareFoundation/opencue.io/pull/274 + * Ready for review, to be merged/published once release is done. + * show_stats table PR + * Is there a better way to test potentially destructive changes? + * There's no easy way in pure SQL to verify changes before dropping cols/tables. + * We should expand our doc on applying database migrations to cover a db backup/restore. + * The current change seems fine, good to merge. diff --git a/tsc/meetings/2023-04-12.md b/tsc/meetings/2023-04-12.md new file mode 100644 index 000000000..43d001d65 --- /dev/null +++ b/tsc/meetings/2023-04-12.md @@ -0,0 +1,38 @@ +# OpenCue TSC Meeting Notes 12 Apr 2023 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Host ping time until marked as DOWN + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1265 + * This does seem long, any reason why? + * Diego: seems unusual. Should be less than a minute for host to be marked DOWN. CueGUI should + update 15-20s + * Should we make this a config setting? + * SPI to check on their code for differences + * Might need to lower default value, this is a good candidate for config flag. +* RQD config file overhaul + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1270 + * Would be good to get SPI input on this. + * Let's review in more detail. No immediate concerns. SPI has some similar configuration already +* Rez setup email thread + * https://lists.aswf.io/g/opencue-dev/topic/97805737#571 + * Diego: might make a better tutorial doc than merging into master branch. We don't want to + confuse new users with multiple packaging options. + * Look into spk, an OSS project. + * pip packages will make this setup much simpler. +* Prepending timestamps to RQD child process output + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1286 + * Doesn't modify any existing output other than prepending timestamp to each line. + * Linux-specific, configurable. +* Python 2 support + * Not ready to drop support for 2 entirely, especially python libraries. + * GUI should be fine to go 3-only. + * If we're going to do it, document which tag contains the last 2 support + * A py2 branch might be helpful if anyone wants to backport, but might have issues with our + versioning tooling. +* Blender plugin update + * Basic plugin is loading, currently navigating issues with installing pyoutline into Blender + environment. Will start to send test jobs soon + * Will continue to update email thread. diff --git a/tsc/meetings/2023-06-07.md b/tsc/meetings/2023-06-07.md new file mode 100644 index 000000000..98df98a61 --- /dev/null +++ b/tsc/meetings/2023-06-07.md @@ -0,0 +1,44 @@ +# OpenCue TSC Meeting Notes 7 Jun 2023 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Host ping time until marked as DOWN + * https://github.com/AcademySoftwareFoundation/OpenCue/issues/1265 + * Any update here? + * Needs some further verification and response. +* Appending timestamps to logs + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1286 + * LGTM, needs merge from master, looking into test failures. +* Cuesubmit batch of PRs + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1278 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1280 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1281 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1282 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1283 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1284 + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1285 + * Reviews needed. + * Be careful we're not making CueSubmit too specialized, keep it generally useful for new + users. + * Let's invite the author to a TSC meeting soon. + * Improvements are good, is there something else we can offer? What would be helpful for + larger-studio users? Or is the Python library good enough? + * Best to expand pyoutline examples / docs to help developers who have already tried + CueSubmit. + * Build on basic example used in integration test script. +* Blender plugin update + * Currently testing job submission, blocked on some submission code. + * Loading python deps (opencue, filesequence) + * Can manually copy into blender plugin directory, but how to automate this? + * Does Blender offer alternatives e.g. configuring plugin path via env var? + * Look into creating additional packages, maybe as empty packages. +* Openshift Cuebot version + * Putting multiple Cuebots behind gRPC load balancer, and pointing RQD at the LB. Currently to + take a Cuebot offline all RQDs need to be restarted to move to a new Cuebot host, this solves + that problem. + * Would make a good tutorial or sample to include in the main repo. + * Prometheus export needs to be reworked. Currently using a separate client to query metrics, + which doesn't work with the LB setup as it will not redirect requests to a consistent Cuebot. + Working on a change to send metrics directly from Cuebot. diff --git a/tsc/meetings/2023-07-19.md b/tsc/meetings/2023-07-19.md new file mode 100644 index 000000000..61051f9c9 --- /dev/null +++ b/tsc/meetings/2023-07-19.md @@ -0,0 +1,45 @@ +# OpenCue TSC Meeting Notes 19 July 2023 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* CI pipeline updates + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1305 + * Added new VFX ref platform years, retired old ones. + * Added CY2023. + * Keet CY2022. + * Drop CY2021, CY2020. + * Keet CY2019, but repurposed it as an explicitly-named Python 2 test. + * Disabled GUI tests on older platforms due to flakes, we'll keep running them on CY2023. + * A few other minor dependency upgrades and fixes. +* New release v0.22.14. + * We needed to get the latest database query fixes into an official release, newer versions of + Postgres that trigger those issues are more common now. + * Release includes: + * PySide6 in CueGUI, still needed for CueSubmit. + * Config / env var cleanup. Published a new doc page covering + this: https://www.opencue.io/docs/other-guides/configuring-opencue/ +* Enable GPU booking + * https://lists.aswf.io/g/opencue-user/topic/local_deployment_issues/100008713 + * Any ideas for this user? + * You need to have the nvidia-smi tool on RQD to detect GPU hardware. + * Once we figure this out, we should write up a doc page on how to enable GPU. + * If the user is on Docker, they may need to use the nvidia base image. +* Minimum bookable free mcp + * https://github.com/AcademySoftwareFoundation/OpenCue/pull/1306 + * Enforce minimum mcp (scratch space) for all hosts, take host offline if low on space. + * Brian to review. + * Ideally we should avoid spreading the "mcp" terminology, but this is a much larger project, + let's just avoid it where we can. +* Siggraph + * Nothing official planned, some folks attending virtually. +* SPI updates + * Finally up-to-date with “current” version of github. + * Performance issues on DispatchQuery. + * Using database migrations starting at v1000, this works because migrations are all applied + manually anyway, not via e.g. Flyway. + * When we create migrations, if you rename a field, you need to copy the value as well. +* Blender plugin update + * Added the ability to refresh/update opencue code from the addon. + * Brian to follow up on email thread. diff --git a/tsc/meetings/2023-09-27.md b/tsc/meetings/2023-09-27.md new file mode 100644 index 000000000..562f31f28 --- /dev/null +++ b/tsc/meetings/2023-09-27.md @@ -0,0 +1,24 @@ +# OpenCue TSC Meeting Notes 27 Sep 2023 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* OOM protection logic: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1317 + * Reworks OOM protection using percentages instead of hardcoded values, helps for larger hosts. + * Draft status for now, working on some more testing. + * Cuebot only for now. Solves 99% of cases but 1% still have a race condition because RQD does + not send an exit code indicating OOM. RQD fixes coming next. +* Reserve all cores / negative cores: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1313 + * Diego / SPI to discuss. + * Definitely needs to be wrapped in a flag. +* Nimby override: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1311 + * Let's double check the fallback behavior is working and there's not some other error. + * PR could be a good idea anyway, we know pynput isn't installed in certain environments and the + warning in the logs can be confusing. +* SPI update + * Merging CueGUI updates. Config file change and qtpy change. +* Blender plugin + * Running in a container working now. + * Docker run flag to mount the host network worked. Let's update the linux contributing page. + * Job submitted but didn't show up in the job list. RQD can't find blender command, debugging. diff --git a/tsc/meetings/2023-11-08.md b/tsc/meetings/2023-11-08.md new file mode 100644 index 000000000..07f18796c --- /dev/null +++ b/tsc/meetings/2023-11-08.md @@ -0,0 +1,37 @@ +# OpenCue TSC Meeting Notes 8 Nov 2023 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* OOM protection logic: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1317 + * PR is reviewed and merged. + * Has been running in production for a while now, working well in 90% of cases. + * Other 10% are hitting a race condition with RQD. RQD sends fresh host report after OOM + decision has been made but not actuated yet, clearing the OOM state in the database. Frame is + rescheduled with same memory requirements instead of increased. + * Followup change to RQD coming soon, kill frame with correct OOM code so database state isn't + required. +* RQD env var expansion: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1323 + * Expanding any env vars in the command itself in RQD, before command is executed on host. + * This works already on Linux, Windows doesn't expand vars in command as command written to a + temp file batch script. + * Env vars should be expanded as late as possible, host/RQD env might differ slightly from frame + env. + * Let's move the change into the Windows section. +* RQD copy env vars from host: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1324 + * Needed to preserve the host PYTHONPATH in the frame env. + * Reviewed, change is too broad, left comments on the PR. +* DB indexes: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1304 + * Adding new database indexes to resolve some performance issues. + * Has some commented out pieces, why? + * Diego to check and update PR. +* CUDA RQD image: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1327 + * Let's try to copy RQD package from base image to reduce duplication. + * Should revisit the RQD base image at some point to see if we can upgrade past Python 3.6. +* Blender plugin: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1309 + * Plugin working e2e on Nuwan's machine, getting renders back. + * Starting review of draft PR. + * Would like to see a doc on how to install/use the plugin, will help to understand the code. +* CueGUI PySide compatibility change is rolling out to production soon, working in initial tests but + will get more feedback as usage expands. diff --git a/tsc/meetings/2024-01-17.md b/tsc/meetings/2024-01-17.md new file mode 100644 index 000000000..55e355faf --- /dev/null +++ b/tsc/meetings/2024-01-17.md @@ -0,0 +1,39 @@ +# OpenCue TSC Meeting Notes 17 Jan 2024 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Upgrade Cuebot to Java 21 + * Code changes in progress, PR coming. Let's discuss this next meeting once Diego is back, + interested to get more detail on any non-backwards-compatible changes. +* PR to fix crash in RQD with non-ascii chars + * Short term, fixing a crash is a priority. + * Longer term, there's a potential future project to improve non-ascii compatibility through the + system. +* Refactoring Cuebot host report handler + * PR coming soon. +* RQD changes for OOM protection + * PR coming soon. +* CueGUI web UI project launching at SPI + * Next.js on top of React. + * REST wrapper for gRPC. Many folks will find this useful outside of the GUI. + * Prototypes provide readonly functionality, for now. + * We should discuss the authorization system more, things are currently wide open if you have + network access. +* Blender plugin update + * Working on plugin bug fixes. + * Docs update coming soon. Packaging/distribution/install/upgrade process still a big open + question, we should look at the updated docs to see what the process currently looks like then + formulate a plan for this. + * RQD GPU image needs another look. +* M2/M3 compatibility + * We have been working on M1 compatibility for OpenCue. Are M2/M3 also supported? We're not + sure, let's check. + * Still need to finish the embedded postgres fix to complete M1 compatibility. +* OpenJobDescription + * https://github.com/OpenJobDescription/openjd-specifications/wiki + * Effort to standardize job descriptions between work management systems. + * OpenCue open to this effort? In theory yes, we will need to look into more detail to see what + would be required. Could add a layer for converting between the new job description and + OpenCue's internal format. diff --git a/tsc/meetings/2024-02-14.md b/tsc/meetings/2024-02-14.md new file mode 100644 index 000000000..2317d1396 --- /dev/null +++ b/tsc/meetings/2024-02-14.md @@ -0,0 +1,39 @@ +# OpenCue TSC Meeting Notes 14 Feb 2024 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* Cuebot upgrade to Java 21 + * Started to refactor HostReportHandler, realized it would be much improved using newer java + features such as thread pools. + * Upgraded java and some dependencies like gradle. + * SPI has a build that passes all unit tests and are currently testing on a dev environment + before moving forward. Will do some a/b performance testing as well. + * We'll need to upgrade java on our CI docker images, this might require a different base image. + * Can discuss more on github issue/PR. +* OpenJobDescription + * Review of their project goals as discussed last time. Code published to their github org now. + * Reviewed the different github repos and their purpose. + * SPI to review further. + * OpenJD appears to be a superset of opencue's job schema, so opencue should fit into this + system fine. + * Could do implementation in different stages, start with a simple openjd -> opencue converter, + later add other components such as the CLI into cuebot/RQD. + * Diego to start a thread on their discussion forum. + * Others to look into new github repos and understand how opencue would implement support. + * Longer term project. +* Web UI update + * Continued progress on prototyping. + * Will this be a full replacement, or desktop GUI app kept for power users? This is an open + question, starting with basic functionality and will see how it goes. +* RQD OOM issue + * Still testing, PR coming soon. +* Blender plugin + * Almost ready to merge PR with cuesubmit frame range adjustments, then will incorporate that + into the plugin. + * Draft of user guide google doc is ready, linked in the PR. Brian to review. +* opencue.io appears dead to casual users + * Not much new on there, new activity mostly limited to the github, no new releases recently. + * We should do regular website updates, monthly? Publish an activity report? + * We should publish a new release soon as well, been a while. diff --git a/tsc/meetings/2024-03-27.md b/tsc/meetings/2024-03-27.md new file mode 100644 index 000000000..e2cb77227 --- /dev/null +++ b/tsc/meetings/2024-03-27.md @@ -0,0 +1,26 @@ +# OpenCue TSC Meeting Notes 27 Mar 2024 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* TSC handoff + * Brian has been TSC chair for 5-6 years. At this point his time is limited due to other work + priorities. TSC and ASWF participation has been suffering as a result. + * It's a good idea to rotate chairs regularly anyway for project health. + * Will work offline to discuss among TSC members and find a new chair. + * Brian will stay around to participate in the project, and will be heavily involved in the + handoff to make sure the new chair is comfortable with new duties. +* New web UI version + * Prototype coming in early may. + * Still planned as readonly. + * Authentication: + * okta within SPI + * By default it will need to use the database, need to create User as first-class object. + * Login with X (google / github / etc.) in the future? + * Brian: rolling our own auth system feels very old school. There must be a better way. +* New Nimby notifications on desktop + * Disabled by default, new constant to enable. + * Tkinter for showing notifications, so it's cross-platform. +* Java 21 upgrade + * Ongoing, most tests complete, PR still being prepared. diff --git a/tsc/meetings/2024-04-24.md b/tsc/meetings/2024-04-24.md new file mode 100644 index 000000000..8a0a20091 --- /dev/null +++ b/tsc/meetings/2024-04-24.md @@ -0,0 +1,41 @@ +# OpenCue TSC Meeting Notes 24 Apr 2024 + +Secretary: Brian Cipriano + +Agenda/Notes: + +* TSC chair handoff + * Diego has volunteered to take over TSC chair position + * No objections from the TSC, new chair position accepted! + * Related housekeeping: + * Meetings + * Need to migrate to new ASWF scheduling system + * Also need to move to Zoom, Brian owns current GVC meeting invite + * Brian to reach out to John Mertic about this + * Consider starting new agenda / notes doc, whatever works best with the new meeting system + * Good chance to clear out Github CODEOWNERS / email lists / groups + * Let's add Ramon to CODEOWNERS + * Can we identify new reviewers / committers automatically with a report of top + contributors? +* Web version + * REST gateway: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1355 + * CueWeb: https://github.com/AcademySoftwareFoundation/OpenCue/pull/1356 + * Currently going to separate branch. + * Security issues + * Currently readonly, but write functionality will be added over time + * Very dangerous if deployed to the public internet + * There are no plans to do this currently, but once code is committed someone could do this. + It's a potential liability issue for OpenCue. + * Let's add additional warnings and keep all code on the separate branch for now. + * Web UI uses a separate REST gateway, which is even more useful than the Web UI. Has the same + security concerns. Perimeter security is not reliable — phishing emails, etc. + * Let's review the two PRs for initial thoughts, testing continues within SPI. +* Daniel: OpenJobDescription + * Potential summer project to implement some initial OpenCue integration. + * Early days, still not decided whether we will get a slot allotted to us. + * Diego offered to help onboard to the OpenCue codebase if we get to that. +* Nuwan: Blender update + * Brian to keep working with Nuwan on this, even after TSC chair handoff. + * Google doc with setup instructions needs another review. + * PR with some opencue.io changes also needs a review. + * Looking at Kubernetes integration for a potential next project.