diff --git a/.editorconfig b/.editorconfig index a04cb9054cb0d..258d41ab485ab 100644 --- a/.editorconfig +++ b/.editorconfig @@ -55,7 +55,7 @@ indent_size = 4 emacs_mode = perl # but user kernel "style" for imported scripts -[scripts/{kernel-doc,get_maintainer.pl,checkpatch.pl}] +[scripts/{get_maintainer.pl,checkpatch.pl}] indent_style = tab indent_size = 8 emacs_mode = perl diff --git a/.gitlab-ci.d/buildtest-template.yml b/.gitlab-ci.d/buildtest-template.yml index 038c3c9540ae5..d866cb12bb1ee 100644 --- a/.gitlab-ci.d/buildtest-template.yml +++ b/.gitlab-ci.d/buildtest-template.yml @@ -83,14 +83,18 @@ .native_test_job_template: extends: .common_test_job_template + before_script: + # Prevent logs from the build job that run earlier + # from being duplicated in the test job artifacts + - rm -f build/meson-logs/* artifacts: name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" when: always expire_in: 7 days paths: - - build/meson-logs/testlog.txt + - build/meson-logs reports: - junit: build/meson-logs/testlog.junit.xml + junit: build/meson-logs/*.junit.xml .functional_test_job_template: extends: .common_test_job_template @@ -104,14 +108,16 @@ when: always expire_in: 7 days paths: - - build/tests/results/latest/results.xml - - build/tests/results/latest/test-results + - build/meson-logs - build/tests/functional/*/*/*.log reports: - junit: build/tests/results/latest/results.xml + junit: build/meson-logs/*.junit.xml before_script: - export QEMU_TEST_ALLOW_UNTRUSTED_CODE=1 - export QEMU_TEST_CACHE_DIR=${CI_PROJECT_DIR}/functional-cache + # Prevent logs from the build job that run earlier + # from being duplicated in the test job artifacts + - rm -f build/meson-logs/* after_script: - cd build - du -chs ${CI_PROJECT_DIR}/*-cache diff --git a/.gitlab-ci.d/buildtest.yml b/.gitlab-ci.d/buildtest.yml index d888a60063715..21f6d7e96fedc 100644 --- a/.gitlab-ci.d/buildtest.yml +++ b/.gitlab-ci.d/buildtest.yml @@ -36,12 +36,12 @@ build-system-ubuntu: - .native_build_job_template - .native_build_artifact_template needs: - job: amd64-ubuntu2204-container + - job: amd64-ubuntu2204-container variables: IMAGE: ubuntu2204 - CONFIGURE_ARGS: --enable-docs --enable-rust + CONFIGURE_ARGS: --enable-docs TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu - MAKE_CHECK_ARGS: check-build check-doc + MAKE_CHECK_ARGS: check-build check-system-ubuntu: extends: .native_test_job_template @@ -66,7 +66,7 @@ build-system-debian: - .native_build_job_template - .native_build_artifact_template needs: - job: amd64-debian-container + - job: amd64-debian-container variables: IMAGE: debian CONFIGURE_ARGS: --with-coroutine=sigaltstack --enable-rust @@ -109,7 +109,7 @@ build-system-fedora: - .native_build_job_template - .native_build_artifact_template needs: - job: amd64-fedora-container + - job: amd64-fedora-container variables: IMAGE: fedora CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs --enable-crypto-afalg --enable-rust @@ -122,7 +122,7 @@ build-system-fedora-rust-nightly: - .native_build_job_template - .native_build_artifact_template needs: - job: amd64-fedora-rust-nightly-container + - job: amd64-fedora-rust-nightly-container variables: IMAGE: fedora-rust-nightly CONFIGURE_ARGS: --disable-docs --enable-rust --enable-strict-rust-lints @@ -167,7 +167,7 @@ build-system-centos: - .native_build_job_template - .native_build_artifact_template needs: - job: amd64-centos9-container + - job: amd64-centos9-container variables: IMAGE: centos9 CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-vfio-user-server @@ -189,7 +189,7 @@ build-previous-qemu: - build-previous/tests/qtest/migration-test - build-previous/scripts needs: - job: amd64-opensuse-leap-container + - job: amd64-opensuse-leap-container variables: IMAGE: opensuse-leap TARGETS: x86_64-softmmu aarch64-softmmu @@ -274,7 +274,7 @@ build-system-opensuse: - .native_build_job_template - .native_build_artifact_template needs: - job: amd64-opensuse-leap-container + - job: amd64-opensuse-leap-container variables: IMAGE: opensuse-leap TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu @@ -308,7 +308,7 @@ build-system-flaky: - .native_build_job_template - .native_build_artifact_template needs: - job: amd64-debian-container + - job: amd64-debian-container variables: IMAGE: debian QEMU_JOB_OPTIONAL: 1 @@ -338,7 +338,7 @@ functional-system-flaky: build-tcg-disabled: extends: .native_build_job_template needs: - job: amd64-centos9-container + - job: amd64-centos9-container variables: IMAGE: centos9 script: @@ -364,7 +364,7 @@ build-tcg-disabled: build-user: extends: .native_build_job_template needs: - job: amd64-debian-user-cross-container + - job: amd64-debian-user-cross-container variables: IMAGE: debian-all-test-cross CONFIGURE_ARGS: --disable-tools --disable-system @@ -374,7 +374,7 @@ build-user: build-user-static: extends: .native_build_job_template needs: - job: amd64-debian-user-cross-container + - job: amd64-debian-user-cross-container variables: IMAGE: debian-all-test-cross CONFIGURE_ARGS: --disable-tools --disable-system --static @@ -385,7 +385,7 @@ build-user-static: build-legacy: extends: .native_build_job_template needs: - job: amd64-debian-legacy-cross-container + - job: amd64-debian-legacy-cross-container variables: IMAGE: debian-legacy-test-cross TARGETS: alpha-linux-user alpha-softmmu sh4-linux-user @@ -395,7 +395,7 @@ build-legacy: build-user-hexagon: extends: .native_build_job_template needs: - job: hexagon-cross-container + - job: hexagon-cross-container variables: IMAGE: debian-hexagon-cross TARGETS: hexagon-linux-user @@ -408,7 +408,7 @@ build-user-hexagon: build-some-softmmu: extends: .native_build_job_template needs: - job: amd64-debian-user-cross-container + - job: amd64-debian-user-cross-container variables: IMAGE: debian-all-test-cross CONFIGURE_ARGS: --disable-tools --enable-debug @@ -419,7 +419,7 @@ build-some-softmmu: build-loongarch64: extends: .native_build_job_template needs: - job: loongarch-debian-cross-container + - job: loongarch-debian-cross-container variables: IMAGE: debian-loongarch-cross CONFIGURE_ARGS: --disable-tools --enable-debug @@ -430,7 +430,7 @@ build-loongarch64: build-tricore-softmmu: extends: .native_build_job_template needs: - job: tricore-debian-cross-container + - job: tricore-debian-cross-container variables: IMAGE: debian-tricore-cross CONFIGURE_ARGS: --disable-tools --disable-fdt --enable-debug @@ -440,7 +440,7 @@ build-tricore-softmmu: clang-system: extends: .native_build_job_template needs: - job: amd64-fedora-container + - job: amd64-fedora-container variables: IMAGE: fedora CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-ubsan @@ -451,7 +451,7 @@ clang-system: clang-user: extends: .native_build_job_template needs: - job: amd64-debian-user-cross-container + - job: amd64-debian-user-cross-container timeout: 70m variables: IMAGE: debian-all-test-cross @@ -479,7 +479,7 @@ build-cfi-aarch64: LD_JOBS: 1 AR: llvm-ar IMAGE: fedora - CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug + CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-safe-stack --disable-slirp TARGETS: aarch64-softmmu MAKE_CHECK_ARGS: check-build @@ -517,7 +517,7 @@ build-cfi-ppc64-s390x: LD_JOBS: 1 AR: llvm-ar IMAGE: fedora - CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug + CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-safe-stack --disable-slirp TARGETS: ppc64-softmmu s390x-softmmu MAKE_CHECK_ARGS: check-build @@ -555,7 +555,7 @@ build-cfi-x86_64: LD_JOBS: 1 AR: llvm-ar IMAGE: fedora - CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug + CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-safe-stack --disable-slirp TARGETS: x86_64-softmmu MAKE_CHECK_ARGS: check-build @@ -582,7 +582,7 @@ functional-cfi-x86_64: tsan-build: extends: .native_build_job_template needs: - job: amd64-ubuntu2204-container + - job: amd64-ubuntu2204-container variables: IMAGE: ubuntu2204 CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++ @@ -596,7 +596,7 @@ tsan-build: gcov: extends: .native_build_job_template needs: - job: amd64-ubuntu2204-container + - job: amd64-ubuntu2204-container timeout: 80m variables: IMAGE: ubuntu2204 @@ -613,9 +613,9 @@ gcov: when: always expire_in: 2 days paths: - - build/meson-logs/testlog.txt + - build/meson-logs reports: - junit: build/meson-logs/testlog.junit.xml + junit: build/meson-logs/*.junit.xml coverage_report: coverage_format: cobertura path: build/coverage.xml @@ -623,7 +623,7 @@ gcov: build-oss-fuzz: extends: .native_build_job_template needs: - job: amd64-fedora-container + - job: amd64-fedora-container variables: IMAGE: fedora script: @@ -645,7 +645,7 @@ build-oss-fuzz: build-tci: extends: .native_build_job_template needs: - job: amd64-debian-user-cross-container + - job: amd64-debian-user-cross-container variables: IMAGE: debian-all-test-cross script: @@ -670,7 +670,7 @@ build-tci: build-without-defaults: extends: .native_build_job_template needs: - job: amd64-centos9-container + - job: amd64-centos9-container variables: IMAGE: centos9 CONFIGURE_ARGS: @@ -688,7 +688,7 @@ build-libvhost-user: stage: build image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG needs: - job: amd64-fedora-container + - job: amd64-fedora-container script: - mkdir subprojects/libvhost-user/build - cd subprojects/libvhost-user/build @@ -702,9 +702,9 @@ build-tools-and-docs-debian: - .native_build_job_template - .native_build_artifact_template needs: - job: amd64-debian-container - # when running on 'master' we use pre-existing container - optional: true + - job: amd64-debian-container + # when running on 'master' we use pre-existing container + optional: true variables: IMAGE: debian MAKE_CHECK_ARGS: check-unit ctags TAGS cscope @@ -736,7 +736,7 @@ pages: - make gtags # We unset variables to work around a bug in some htags versions # which causes it to fail when the environment is large - - CI_COMMIT_MESSAGE= CI_COMMIT_TAG_MESSAGE= htags + - CI_COMMIT_MESSAGE= CI_COMMIT_TAG_MESSAGE= CI_COMMIT_DESCRIPTION= htags -anT --tree-view=filetree -m qemu_init -t "Welcome to the QEMU sourcecode" - mv HTML public/src @@ -759,7 +759,7 @@ coverity: - job: amd64-fedora-container optional: true before_script: - - dnf install -y curl wget + - dnf install -y curl wget file script: # would be nice to cancel the job if over quota (https://gitlab.com/gitlab-org/gitlab/-/issues/256089) # for example: @@ -791,7 +791,7 @@ build-wasm: extends: .wasm_build_job_template timeout: 2h needs: - job: wasm-emsdk-cross-container + - job: wasm-emsdk-cross-container variables: IMAGE: emsdk-wasm32-cross CONFIGURE_ARGS: --static --disable-tools --enable-debug --enable-tcg-interpreter diff --git a/.gitlab-ci.d/cirrus.yml b/.gitlab-ci.d/cirrus.yml index 75b611418e7b8..13a0bf5bb9f95 100644 --- a/.gitlab-ci.d/cirrus.yml +++ b/.gitlab-ci.d/cirrus.yml @@ -42,7 +42,7 @@ x64-freebsd-14-build: CIRRUS_VM_RAM: 8G UPDATE_COMMAND: pkg update; pkg upgrade -y INSTALL_COMMAND: pkg install -y - CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblaze-softmmu,mips64el-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4eb-softmmu,xtensa-softmmu + CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblaze-softmmu,mips64el-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4eb-softmmu,xtensa-softmmu --enable-rust TEST_TARGETS: check aarch64-macos-build: diff --git a/.gitlab-ci.d/container-cross.yml b/.gitlab-ci.d/container-cross.yml index 8d3be53b75b23..0fd7341afac01 100644 --- a/.gitlab-ci.d/container-cross.yml +++ b/.gitlab-ci.d/container-cross.yml @@ -52,12 +52,6 @@ mips64el-debian-cross-container: variables: NAME: debian-mips64el-cross -mipsel-debian-cross-container: - extends: .container_job_template - stage: containers - variables: - NAME: debian-mipsel-cross - ppc64el-debian-cross-container: extends: .container_job_template stage: containers diff --git a/.gitlab-ci.d/crossbuild-template.yml b/.gitlab-ci.d/crossbuild-template.yml index 303943f818f75..58136d06e4adf 100644 --- a/.gitlab-ci.d/crossbuild-template.yml +++ b/.gitlab-ci.d/crossbuild-template.yml @@ -128,6 +128,6 @@ when: always expire_in: 7 days paths: - - build/meson-logs/testlog.txt + - build/meson-logs reports: - junit: build/meson-logs/testlog.junit.xml + junit: build/meson-logs/*.junit.xml diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml index 3f76c901ba8bf..99dfa7eea6fc4 100644 --- a/.gitlab-ci.d/crossbuilds.yml +++ b/.gitlab-ci.d/crossbuilds.yml @@ -4,28 +4,28 @@ include: cross-armhf-user: extends: .cross_user_build_job needs: - job: armhf-debian-cross-container + - job: armhf-debian-cross-container variables: IMAGE: debian-armhf-cross cross-arm64-system: extends: .cross_system_build_job needs: - job: arm64-debian-cross-container + - job: arm64-debian-cross-container variables: IMAGE: debian-arm64-cross cross-arm64-user: extends: .cross_user_build_job needs: - job: arm64-debian-cross-container + - job: arm64-debian-cross-container variables: IMAGE: debian-arm64-cross cross-arm64-kvm-only: extends: .cross_accel_build_job needs: - job: arm64-debian-cross-container + - job: arm64-debian-cross-container variables: IMAGE: debian-arm64-cross EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-features @@ -35,7 +35,7 @@ cross-i686-system: - .cross_system_build_job - .cross_test_artifacts needs: - job: i686-debian-cross-container + - job: i686-debian-cross-container variables: IMAGE: debian-i686-cross EXTRA_CONFIGURE_OPTS: --disable-kvm @@ -46,7 +46,7 @@ cross-i686-user: - .cross_user_build_job - .cross_test_artifacts needs: - job: i686-debian-cross-container + - job: i686-debian-cross-container variables: IMAGE: debian-i686-cross MAKE_CHECK_ARGS: check @@ -57,7 +57,7 @@ cross-i686-tci: - .cross_test_artifacts timeout: 60m needs: - job: i686-debian-cross-container + - job: i686-debian-cross-container variables: IMAGE: debian-i686-cross ACCEL: tcg-interpreter @@ -68,52 +68,38 @@ cross-i686-tci: # would otherwise be using a parallelism of 9. MAKE_CHECK_ARGS: check check-tcg -j2 -cross-mipsel-system: - extends: .cross_system_build_job - needs: - job: mipsel-debian-cross-container - variables: - IMAGE: debian-mipsel-cross - -cross-mipsel-user: - extends: .cross_user_build_job - needs: - job: mipsel-debian-cross-container - variables: - IMAGE: debian-mipsel-cross - cross-mips64el-system: extends: .cross_system_build_job needs: - job: mips64el-debian-cross-container + - job: mips64el-debian-cross-container variables: IMAGE: debian-mips64el-cross cross-mips64el-user: extends: .cross_user_build_job needs: - job: mips64el-debian-cross-container + - job: mips64el-debian-cross-container variables: IMAGE: debian-mips64el-cross cross-ppc64el-system: extends: .cross_system_build_job needs: - job: ppc64el-debian-cross-container + - job: ppc64el-debian-cross-container variables: IMAGE: debian-ppc64el-cross cross-ppc64el-user: extends: .cross_user_build_job needs: - job: ppc64el-debian-cross-container + - job: ppc64el-debian-cross-container variables: IMAGE: debian-ppc64el-cross cross-ppc64el-kvm-only: extends: .cross_accel_build_job needs: - job: ppc64el-debian-cross-container + - job: ppc64el-debian-cross-container variables: IMAGE: debian-ppc64el-cross EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-devices @@ -121,35 +107,35 @@ cross-ppc64el-kvm-only: cross-riscv64-system: extends: .cross_system_build_job needs: - job: riscv64-debian-cross-container + - job: riscv64-debian-cross-container variables: IMAGE: debian-riscv64-cross cross-riscv64-user: extends: .cross_user_build_job needs: - job: riscv64-debian-cross-container + - job: riscv64-debian-cross-container variables: IMAGE: debian-riscv64-cross cross-s390x-system: extends: .cross_system_build_job needs: - job: s390x-debian-cross-container + - job: s390x-debian-cross-container variables: IMAGE: debian-s390x-cross cross-s390x-user: extends: .cross_user_build_job needs: - job: s390x-debian-cross-container + - job: s390x-debian-cross-container variables: IMAGE: debian-s390x-cross cross-s390x-kvm-only: extends: .cross_accel_build_job needs: - job: s390x-debian-cross-container + - job: s390x-debian-cross-container variables: IMAGE: debian-s390x-cross EXTRA_CONFIGURE_OPTS: --disable-tcg --enable-trace-backends=ftrace @@ -157,7 +143,7 @@ cross-s390x-kvm-only: cross-mips64el-kvm-only: extends: .cross_accel_build_job needs: - job: mips64el-debian-cross-container + - job: mips64el-debian-cross-container variables: IMAGE: debian-mips64el-cross EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu @@ -165,7 +151,7 @@ cross-mips64el-kvm-only: cross-win64-system: extends: .cross_system_build_job needs: - job: win64-fedora-cross-container + - job: win64-fedora-cross-container variables: IMAGE: fedora-win64-cross EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins @@ -181,7 +167,7 @@ cross-win64-system: cross-amd64-xen-only: extends: .cross_accel_build_job needs: - job: amd64-debian-cross-container + - job: amd64-debian-cross-container variables: IMAGE: debian-amd64-cross ACCEL: xen @@ -190,7 +176,7 @@ cross-amd64-xen-only: cross-arm64-xen-only: extends: .cross_accel_build_job needs: - job: arm64-debian-cross-container + - job: arm64-debian-cross-container variables: IMAGE: debian-arm64-cross ACCEL: xen diff --git a/.gitlab-ci.d/custom-runners.yml b/.gitlab-ci.d/custom-runners.yml index 1aa3c60efe991..142fbf4a242f2 100644 --- a/.gitlab-ci.d/custom-runners.yml +++ b/.gitlab-ci.d/custom-runners.yml @@ -26,9 +26,8 @@ - build/build.ninja - build/meson-logs reports: - junit: build/meson-logs/testlog.junit.xml + junit: build/meson-logs/*.junit.xml include: - - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml' - - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml' - - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml' + - local: '/.gitlab-ci.d/custom-runners/ubuntu-24.04-s390x.yml' + - local: '/.gitlab-ci.d/custom-runners/ubuntu-24.04-aarch64.yml' diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml deleted file mode 100644 index 8727687e2b469..0000000000000 --- a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml +++ /dev/null @@ -1,25 +0,0 @@ -# All ubuntu-22.04 jobs should run successfully in an environment -# setup by the scripts/ci/setup/ubuntu/build-environment.yml task -# "Install basic packages to build QEMU on Ubuntu 22.04" - -ubuntu-22.04-aarch32-all: - extends: .custom_runner_template - needs: [] - stage: build - tags: - - ubuntu_22.04 - - aarch32 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - allow_failure: true - - if: "$AARCH32_RUNNER_AVAILABLE" - when: manual - allow_failure: true - script: - - mkdir build - - cd build - - ../configure --cross-prefix=arm-linux-gnueabihf- - || { cat config.log meson-logs/meson-log.txt; exit 1; } - - make --output-sync -j`nproc --ignore=40` - - make --output-sync -j`nproc --ignore=40` check diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml b/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml deleted file mode 100644 index ca2f1404710e3..0000000000000 --- a/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml +++ /dev/null @@ -1,151 +0,0 @@ -# All ubuntu-22.04 jobs should run successfully in an environment -# setup by the scripts/ci/setup/ubuntu/build-environment.yml task -# "Install basic packages to build QEMU on Ubuntu 22.04" - -ubuntu-22.04-aarch64-all-linux-static: - extends: .custom_runner_template - needs: [] - stage: build - tags: - - ubuntu_22.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$AARCH64_RUNNER_AVAILABLE" - script: - - mkdir build - - cd build - # Disable -static-pie due to build error with system libc: - # https://bugs.launchpad.net/ubuntu/+source/glibc/+bug/1987438 - - ../configure --enable-debug --static --disable-system --disable-pie - || { cat config.log meson-logs/meson-log.txt; exit 1; } - - make --output-sync -j`nproc --ignore=40` - - make check-tcg - - make --output-sync -j`nproc --ignore=40` check - -ubuntu-22.04-aarch64-all: - extends: .custom_runner_template - needs: [] - stage: build - tags: - - ubuntu_22.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - allow_failure: true - - if: "$AARCH64_RUNNER_AVAILABLE" - when: manual - allow_failure: true - script: - - mkdir build - - cd build - - ../configure - || { cat config.log meson-logs/meson-log.txt; exit 1; } - - make --output-sync -j`nproc --ignore=40` - - make --output-sync -j`nproc --ignore=40` check - -ubuntu-22.04-aarch64-without-defaults: - extends: .custom_runner_template - needs: [] - stage: build - tags: - - ubuntu_22.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - allow_failure: true - - if: "$AARCH64_RUNNER_AVAILABLE" - when: manual - allow_failure: true - script: - - mkdir build - - cd build - - ../configure --disable-user --without-default-devices --without-default-features - || { cat config.log meson-logs/meson-log.txt; exit 1; } - - make --output-sync -j`nproc --ignore=40` - - make --output-sync -j`nproc --ignore=40` check - -ubuntu-22.04-aarch64-alldbg: - extends: .custom_runner_template - needs: [] - stage: build - tags: - - ubuntu_22.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - - if: "$AARCH64_RUNNER_AVAILABLE" - script: - - mkdir build - - cd build - - ../configure --enable-debug - || { cat config.log meson-logs/meson-log.txt; exit 1; } - - make clean - - make --output-sync -j`nproc --ignore=40` - - make --output-sync -j`nproc --ignore=40` check - -ubuntu-22.04-aarch64-clang: - extends: .custom_runner_template - needs: [] - stage: build - tags: - - ubuntu_22.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - allow_failure: true - - if: "$AARCH64_RUNNER_AVAILABLE" - when: manual - allow_failure: true - script: - - mkdir build - - cd build - - ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-ubsan - || { cat config.log meson-logs/meson-log.txt; exit 1; } - - make --output-sync -j`nproc --ignore=40` - - make --output-sync -j`nproc --ignore=40` check - -ubuntu-22.04-aarch64-tci: - needs: [] - stage: build - tags: - - ubuntu_22.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - allow_failure: true - - if: "$AARCH64_RUNNER_AVAILABLE" - when: manual - allow_failure: true - script: - - mkdir build - - cd build - - ../configure --enable-tcg-interpreter - || { cat config.log meson-logs/meson-log.txt; exit 1; } - - make --output-sync -j`nproc --ignore=40` - -ubuntu-22.04-aarch64-notcg: - extends: .custom_runner_template - needs: [] - stage: build - tags: - - ubuntu_22.04 - - aarch64 - rules: - - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' - when: manual - allow_failure: true - - if: "$AARCH64_RUNNER_AVAILABLE" - when: manual - allow_failure: true - script: - - mkdir build - - cd build - - ../configure --disable-tcg --with-devices-aarch64=minimal - || { cat config.log meson-logs/meson-log.txt; exit 1; } - - make --output-sync -j`nproc --ignore=40` - - make --output-sync -j`nproc --ignore=40` check diff --git a/.gitlab-ci.d/custom-runners/ubuntu-24.04-aarch64.yml b/.gitlab-ci.d/custom-runners/ubuntu-24.04-aarch64.yml new file mode 100644 index 0000000000000..46db9ae0138c6 --- /dev/null +++ b/.gitlab-ci.d/custom-runners/ubuntu-24.04-aarch64.yml @@ -0,0 +1,113 @@ +# All ubuntu-24.04 jobs should run successfully in an environment +# setup by the scripts/ci/setup/ubuntu/build-environment.yml task +# "Install basic packages to build QEMU on Ubuntu 24.04" + +.ubuntu_aarch64_template: + extends: .custom_runner_template + needs: [] + stage: build + tags: + - ubuntu_24.04 + - aarch64 + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + - if: "$AARCH64_RUNNER_AVAILABLE" + before_script: + - source scripts/ci/gitlab-ci-section + - section_start setup "Pre-script setup" + - JOBS=$(expr $(nproc) - 4) + - section_end setup + script: + - mkdir build + - cd build + - section_start configure "Running configure" + - ../configure $CONFIGURE_ARGS || + { cat config.log meson-logs/meson-log.txt && exit 1; } + - section_end configure + - section_start build "Building QEMU" + - make --output-sync -j"$JOBS" + - section_end build + - section_start test "Running tests" + - if test -n "$MAKE_CHECK_ARGS"; + then + make -j"$JOBS" $MAKE_CHECK_ARGS ; + fi + - section_end test + +ubuntu-24.04-aarch64-all-linux-static: + extends: .ubuntu_aarch64_template + variables: + # Disable -static-pie due to build error with system libc: + # https://bugs.launchpad.net/ubuntu/+source/glibc/+bug/1987438 + CONFIGURE_ARGS: --enable-debug --static --disable-system --disable-pie + MAKE_CHECK_ARGS: check-tcg + +ubuntu-24.04-aarch64-all: + extends: .ubuntu_aarch64_template + variables: + MAKE_CHECK_ARGS: check + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true + +ubuntu-24.04-aarch64-without-defaults: + extends: .ubuntu_aarch64_template + variables: + CONFIGURE_ARGS: --disable-user --without-default-devices --without-default-features + MAKE_CHECK_ARGS: check + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true + +ubuntu-24.04-aarch64-alldbg: + extends: .ubuntu_aarch64_template + variables: + CONFIGURE_ARGS: --enable-debug + MAKE_CHECK_ARGS: check-tcg + +ubuntu-24.04-aarch64-clang: + extends: .ubuntu_aarch64_template + variables: + CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-ubsan + MAKE_CHECK_ARGS: check + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true + +ubuntu-24.04-aarch64-tci: + extends: .ubuntu_aarch64_template + variables: + CONFIGURE_ARGS: --enable-tcg-interpreter + MAKE_CHECK_ARGS: check + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true + +ubuntu-24.04-aarch64-notcg: + extends: .ubuntu_aarch64_template + variables: + CONFIGURE_ARGS: --disable-tcg --with-devices-aarch64=minimal + MAKE_CHECK_ARGS: check + rules: + - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' + when: manual + allow_failure: true + - if: "$AARCH64_RUNNER_AVAILABLE" + when: manual + allow_failure: true diff --git a/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml b/.gitlab-ci.d/custom-runners/ubuntu-24.04-s390x.yml similarity index 88% rename from .gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml rename to .gitlab-ci.d/custom-runners/ubuntu-24.04-s390x.yml index e62ff1763fa0d..45dbee178802b 100644 --- a/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml +++ b/.gitlab-ci.d/custom-runners/ubuntu-24.04-s390x.yml @@ -1,13 +1,13 @@ -# All ubuntu-22.04 jobs should run successfully in an environment +# All ubuntu-24.04 jobs should run successfully in an environment # setup by the scripts/ci/setup/ubuntu/build-environment.yml task -# "Install basic packages to build QEMU on Ubuntu 22.04" +# "Install basic packages to build QEMU on Ubuntu 24.04" -ubuntu-22.04-s390x-all-linux: +ubuntu-24.04-s390x-all-linux: extends: .custom_runner_template needs: [] stage: build tags: - - ubuntu_22.04 + - ubuntu_24.04 - s390x rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' @@ -21,12 +21,12 @@ ubuntu-22.04-s390x-all-linux: - make --output-sync check-tcg - make --output-sync -j`nproc` check -ubuntu-22.04-s390x-all-system: +ubuntu-24.04-s390x-all-system: extends: .custom_runner_template needs: [] stage: build tags: - - ubuntu_22.04 + - ubuntu_24.04 - s390x timeout: 75m rules: @@ -42,12 +42,12 @@ ubuntu-22.04-s390x-all-system: - make --output-sync -j`nproc` - make --output-sync -j`nproc` check -ubuntu-22.04-s390x-alldbg: +ubuntu-24.04-s390x-alldbg: extends: .custom_runner_template needs: [] stage: build tags: - - ubuntu_22.04 + - ubuntu_24.04 - s390x rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' @@ -65,12 +65,12 @@ ubuntu-22.04-s390x-alldbg: - make --output-sync -j`nproc` - make --output-sync -j`nproc` check -ubuntu-22.04-s390x-clang: +ubuntu-24.04-s390x-clang: extends: .custom_runner_template needs: [] stage: build tags: - - ubuntu_22.04 + - ubuntu_24.04 - s390x rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' @@ -87,11 +87,11 @@ ubuntu-22.04-s390x-clang: - make --output-sync -j`nproc` - make --output-sync -j`nproc` check -ubuntu-22.04-s390x-tci: +ubuntu-24.04-s390x-tci: needs: [] stage: build tags: - - ubuntu_22.04 + - ubuntu_24.04 - s390x rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' @@ -107,12 +107,12 @@ ubuntu-22.04-s390x-tci: || { cat config.log meson-logs/meson-log.txt; exit 1; } - make --output-sync -j`nproc` -ubuntu-22.04-s390x-notcg: +ubuntu-24.04-s390x-notcg: extends: .custom_runner_template needs: [] stage: build tags: - - ubuntu_22.04 + - ubuntu_24.04 - s390x rules: - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/' diff --git a/.gitlab-ci.d/static_checks.yml b/.gitlab-ci.d/static_checks.yml index c3ed6de453d07..61fe2fa39ab28 100644 --- a/.gitlab-ci.d/static_checks.yml +++ b/.gitlab-ci.d/static_checks.yml @@ -32,7 +32,7 @@ check-python-minreqs: variables: GIT_DEPTH: 1 needs: - job: python-container + - job: python-container check-python-tox: extends: .base_job_template @@ -45,7 +45,7 @@ check-python-tox: QEMU_TOX_EXTRA_ARGS: --skip-missing-interpreters=false QEMU_JOB_OPTIONAL: 1 needs: - job: python-container + - job: python-container check-rust-tools-nightly: extends: .base_job_template @@ -76,7 +76,7 @@ check-build-units: stage: build image: $CI_REGISTRY_IMAGE/qemu/debian:$QEMU_CI_CONTAINER_TAG needs: - job: amd64-debian-container + - job: amd64-debian-container before_script: - source scripts/ci/gitlab-ci-section - section_start setup "Install Tools" diff --git a/.gitlab-ci.d/windows.yml b/.gitlab-ci.d/windows.yml index 45ed0c96feaa8..5dbdabfbec03c 100644 --- a/.gitlab-ci.d/windows.yml +++ b/.gitlab-ci.d/windows.yml @@ -24,9 +24,10 @@ msys2-64bit: name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG" expire_in: 7 days paths: - - build/meson-logs/testlog.txt + - build/meson-logs + - build/cache-log.txt reports: - junit: "build/meson-logs/testlog.junit.xml" + junit: build/meson-logs/*.junit.xml before_script: - Write-Output "Acquiring msys2.exe installer at $(Get-Date -Format u)" - If ( !(Test-Path -Path msys64\var\cache ) ) { @@ -77,7 +78,7 @@ msys2-64bit: git grep make sed mingw-w64-x86_64-binutils mingw-w64-x86_64-ccache - mingw-w64-x86_64-curl + mingw-w64-x86_64-curl-winssl mingw-w64-x86_64-gcc mingw-w64-x86_64-glib2 mingw-w64-x86_64-libnfs @@ -87,13 +88,14 @@ msys2-64bit: mingw-w64-x86_64-pkgconf mingw-w64-x86_64-python mingw-w64-x86_64-zstd" + - .\msys64\usr\bin\bash -lc "pacman -Sc --noconfirm" - Write-Output "Running build at $(Get-Date -Format u)" - $env:JOBS = $(.\msys64\usr\bin\bash -lc nproc) - $env:CHERE_INVOKING = 'yes' # Preserve the current working directory - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink - $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR" - $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache" - - $env:CCACHE_MAXSIZE = "500M" + - $env:CCACHE_MAXSIZE = "180M" - $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode - $env:CC = "ccache gcc" - mkdir build @@ -102,5 +104,7 @@ msys2-64bit: - ..\msys64\usr\bin\bash -lc "../configure $CONFIGURE_ARGS" - ..\msys64\usr\bin\bash -lc "make -j$env:JOBS" - ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;" + - ..\msys64\usr\bin\bash -lc "ls -lR /var/cache > cache-log.txt" + - ..\msys64\usr\bin\bash -lc "du -sh ." - ..\msys64\usr\bin\bash -lc "ccache --show-stats" - Write-Output "Finished build at $(Get-Date -Format u)" diff --git a/.gitmodules b/.gitmodules index 73cae4cd4da00..c307216d173ca 100644 --- a/.gitmodules +++ b/.gitmodules @@ -15,6 +15,7 @@ url = https://gitlab.com/qemu-project/qemu-palcode.git [submodule "roms/u-boot"] path = roms/u-boot + # upstream is https://github.com/u-boot/u-boot url = https://gitlab.com/qemu-project/u-boot.git [submodule "roms/skiboot"] path = roms/skiboot @@ -27,6 +28,7 @@ url = https://gitlab.com/qemu-project/seabios-hppa.git [submodule "roms/u-boot-sam460ex"] path = roms/u-boot-sam460ex + # upstream is https://github.com/zbalaton/u-boot-sam460ex url = https://gitlab.com/qemu-project/u-boot-sam460ex.git [submodule "roms/edk2"] path = roms/edk2 diff --git a/.gitpublish b/.gitpublish index a13f8c7c0ecd9..a3adb21ffa150 100644 --- a/.gitpublish +++ b/.gitpublish @@ -4,48 +4,48 @@ # See https://github.com/stefanha/git-publish for more information # [gitpublishprofile "default"] -base = master +base = origin/master to = qemu-devel@nongnu.org cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null [gitpublishprofile "rfc"] -base = master +base = origin/master prefix = RFC PATCH to = qemu-devel@nongnu.org cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null [gitpublishprofile "stable"] -base = master +base = origin/master to = qemu-devel@nongnu.org cc = qemu-stable@nongnu.org cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null [gitpublishprofile "trivial"] -base = master +base = origin/master to = qemu-devel@nongnu.org cc = qemu-trivial@nongnu.org cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null [gitpublishprofile "block"] -base = master +base = origin/master to = qemu-devel@nongnu.org cc = qemu-block@nongnu.org cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null [gitpublishprofile "arm"] -base = master +base = origin/master to = qemu-devel@nongnu.org cc = qemu-arm@nongnu.org cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null [gitpublishprofile "s390"] -base = master +base = origin/master to = qemu-devel@nongnu.org cc = qemu-s390@nongnu.org cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null [gitpublishprofile "ppc"] -base = master +base = origin/master to = qemu-devel@nongnu.org cc = qemu-ppc@nongnu.org cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null diff --git a/.mailmap b/.mailmap index e7271852dc6af..7f817d9f425c0 100644 --- a/.mailmap +++ b/.mailmap @@ -74,6 +74,7 @@ Aleksandar Markovic Aleksandar Rikalo Aleksandar Rikalo +Alex Williamson Alexander Graf Ani Sinha Anthony Liguori Anthony Liguori @@ -136,6 +137,7 @@ Chen Gang Chen Gang Chen Wei-Ren Christophe Lyon +Clément Mathieu--Drif Collin L. Walling Daniel P. Berrangé Eduardo Otubo diff --git a/.readthedocs.yml b/.readthedocs.yml index 0b262469ce693..639f628612c88 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -21,5 +21,3 @@ python: install: - requirements: docs/requirements.txt -# We want all the document formats -formats: all diff --git a/MAINTAINERS b/MAINTAINERS index a462345618350..36eef27b4192a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -85,7 +85,7 @@ Responsible Disclosure, Reporting Security Issues ------------------------------------------------- W: https://wiki.qemu.org/SecurityProcess M: Michael S. Tsirkin -L: secalert@redhat.com +L: qemu-security@nongnu.org Trivial patches --------------- @@ -146,6 +146,8 @@ F: target/i386/*.[ch] F: target/i386/Kconfig F: target/i386/meson.build F: tools/i386/ +F: tests/functional/i386/ +F: tests/functional/x86_64/ Guest CPU cores (TCG) --------------------- @@ -189,6 +191,7 @@ M: Richard Henderson S: Maintained F: target/alpha/ F: tests/tcg/alpha/ +F: tests/functional/alpha/ F: disas/alpha.c ARM TCG CPUs @@ -212,7 +215,7 @@ L: qemu-arm@nongnu.org S: Maintained F: hw/arm/smmu* F: include/hw/arm/smmu* -F: tests/functional/test_aarch64_smmu.py +F: tests/functional/aarch64/test_smmu.py AVR TCG CPUs M: Michael Rolnik @@ -220,7 +223,7 @@ S: Maintained F: docs/system/target-avr.rst F: gdb-xml/avr-cpu.xml F: target/avr/ -F: tests/functional/test_avr_*.py +F: tests/functional/avr/ Hexagon TCG CPUs M: Brian Cain @@ -256,7 +259,7 @@ M: Song Gao S: Maintained F: target/loongarch/ F: tests/tcg/loongarch64/ -F: tests/functional/test_loongarch64_virt.py +F: tests/functional/loongarch64/test_virt.py M68K TCG CPUs M: Laurent Vivier @@ -295,7 +298,7 @@ F: tests/tcg/openrisc/ PowerPC TCG CPUs M: Nicholas Piggin -M: Daniel Henrique Barboza +R: Chinmay Rath L: qemu-ppc@nongnu.org S: Odd Fixes F: target/ppc/ @@ -308,7 +311,7 @@ F: configs/devices/ppc* F: docs/system/ppc/embedded.rst F: docs/system/target-ppc.rst F: tests/tcg/ppc*/* -F: tests/functional/test_ppc_74xx.py +F: tests/functional/ppc/test_74xx.py RISC-V TCG CPUs M: Palmer Dabbelt @@ -330,7 +333,8 @@ F: include/hw/riscv/ F: linux-user/host/riscv32/ F: linux-user/host/riscv64/ F: common-user/host/riscv* -F: tests/functional/test_riscv* +F: tests/functional/riscv32 +F: tests/functional/riscv64 F: tests/tcg/riscv64/ RISC-V XThead* extensions @@ -443,6 +447,7 @@ M: Peter Maydell L: qemu-arm@nongnu.org S: Maintained F: target/arm/kvm.c +F: tests/functional/aarch64/test_kvm.py MIPS KVM CPUs M: Huacai Chen @@ -452,7 +457,7 @@ F: target/mips/system/ PPC KVM CPUs M: Nicholas Piggin -R: Daniel Henrique Barboza +R: Harsh Prateek Bora S: Odd Fixes F: target/ppc/kvm.c @@ -479,7 +484,7 @@ F: docs/system/i386/sgx.rst F: target/i386/kvm/ F: target/i386/sev* F: scripts/kvm/vmxcap -F: tests/functional/test_x86_64_hotplug_cpu.py +F: tests/functional/x86_64/test_hotplug_cpu.py Xen emulation on X86 KVM CPUs M: David Woodhouse @@ -488,7 +493,7 @@ S: Supported F: include/system/kvm_xen.h F: target/i386/kvm/xen* F: hw/i386/kvm/xen* -F: tests/functional/test_x86_64_kvm_xen.py +F: tests/functional/x86_64/test_kvm_xen.py Guest CPU Cores (other accelerators) ------------------------------------ @@ -546,6 +551,21 @@ F: target/i386/whpx/ F: accel/stubs/whpx-stub.c F: include/system/whpx.h +MSHV +M: Magnus Kulke +R: Wei Liu +S: Supported +F: accel/mshv/ +F: include/system/mshv.h +F: include/hw/hyperv/hvgdk*.h +F: include/hw/hyperv/hvhdk*.h + +X86 MSHV CPUs +M: Magnus Kulke +R: Wei Liu +S: Supported +F: target/i386/mshv/ + X86 Instruction Emulator M: Cameron Esfahani M: Roman Bolshakov @@ -651,12 +671,13 @@ F: tests/docker/dockerfiles/emsdk-wasm32-cross.docker Alpha Machines -------------- +Clipper M: Richard Henderson S: Maintained F: hw/alpha/ F: hw/isa/smc37c669-superio.c F: tests/tcg/alpha/system/ -F: tests/functional/test_alpha_clipper.py +F: tests/functional/alpha/test_clipper.py ARM Machines ------------ @@ -672,7 +693,7 @@ F: include/hw/*/allwinner* F: hw/arm/cubieboard.c F: docs/system/arm/cubieboard.rst F: hw/misc/axp209.c -F: tests/functional/test_arm_cubieboard.py +F: tests/functional/arm/test_cubieboard.py Allwinner-h3 M: Niek Linnenbank @@ -682,7 +703,7 @@ F: hw/*/allwinner-h3* F: include/hw/*/allwinner-h3* F: hw/arm/orangepi.c F: docs/system/arm/orangepi.rst -F: tests/functional/test_arm_orangepi.py +F: tests/functional/arm/test_orangepi.py ARM PrimeCell and CMSDK devices M: Peter Maydell @@ -752,7 +773,7 @@ F: docs/system/arm/bananapi_m2u.rst F: hw/*/allwinner-r40*.c F: hw/arm/bananapi_m2u.c F: include/hw/*/allwinner-r40*.h -F: tests/functional/test_arm_bpim2u.py +F: tests/functional/arm/test_bpim2u.py B-L475E-IOT01A IoT Node M: Samuel Tardieu @@ -770,7 +791,7 @@ S: Odd Fixes F: hw/*/exynos* F: include/hw/*/exynos* F: docs/system/arm/exynos.rst -F: tests/functional/test_arm_smdkc210.py +F: tests/functional/arm/test_smdkc210.py Calxeda Highbank M: Rob Herring @@ -789,7 +810,7 @@ S: Odd Fixes F: include/hw/arm/digic.h F: hw/*/digic* F: include/hw/*/digic* -F: tests/functional/test_arm_canona1100.py +F: tests/functional/arm/test_canona1100.py F: docs/system/arm/digic.rst Goldfish RTC @@ -832,7 +853,7 @@ S: Odd Fixes F: hw/arm/integratorcp.c F: hw/misc/arm_integrator_debug.c F: include/hw/misc/arm_integrator_debug.h -F: tests/functional/test_arm_integratorcp.py +F: tests/functional/arm/test_integratorcp.py F: docs/system/arm/integratorcp.rst MCIMX6UL EVK / i.MX6ul @@ -874,7 +895,7 @@ F: include/hw/arm/fsl-imx8mp.h F: include/hw/misc/imx8mp_*.h F: include/hw/pci-host/fsl_imx8m_phy.h F: docs/system/arm/imx8mp-evk.rst -F: tests/functional/test_aarch64_imx8mp_evk.py +F: tests/functional/aarch64/test_imx8mp_evk.py F: tests/qtest/rs5c372-test.c MPS2 / MPS3 @@ -938,7 +959,7 @@ F: pc-bios/npcm7xx_bootrom.bin F: pc-bios/npcm8xx_bootrom.bin F: roms/vbootrom F: docs/system/arm/nuvoton.rst -F: tests/functional/test_arm_quanta_gsj.py +F: tests/functional/arm/test_quanta_gsj.py Raspberry Pi M: Peter Maydell @@ -951,9 +972,8 @@ F: hw/*/bcm283* F: include/hw/arm/rasp* F: include/hw/*/bcm283* F: docs/system/arm/raspi.rst -F: tests/functional/test_arm_raspi2.py -F: tests/functional/test_aarch64_raspi3.py -F: tests/functional/test_aarch64_raspi4.py +F: tests/functional/arm/test_raspi2.py +F: tests/functional/aarch64/test_raspi*.py Real View M: Peter Maydell @@ -964,7 +984,7 @@ F: hw/cpu/realview_mpcore.c F: hw/intc/realview_gic.c F: include/hw/intc/realview_gic.h F: docs/system/arm/realview.rst -F: tests/functional/test_arm_realview.py +F: tests/functional/arm/test_realview.py SABRELITE / i.MX6 M: Peter Maydell @@ -993,7 +1013,7 @@ F: hw/misc/sbsa_ec.c F: hw/watchdog/sbsa_gwdt.c F: include/hw/watchdog/sbsa_gwdt.h F: docs/system/arm/sbsa.rst -F: tests/functional/test_aarch64_*sbsaref*.py +F: tests/functional/aarch64/test_*sbsaref*.py Sharp SL-5500 (Collie) PDA M: Peter Maydell @@ -1002,9 +1022,8 @@ S: Odd Fixes F: hw/arm/collie.c F: hw/arm/strongarm* F: hw/gpio/zaurus.c -F: include/hw/arm/sharpsl.h F: docs/system/arm/collie.rst -F: tests/functional/test_arm_collie.py +F: tests/functional/arm/test_collie.py Stellaris M: Peter Maydell @@ -1015,7 +1034,7 @@ F: hw/display/ssd03* F: include/hw/input/stellaris_gamepad.h F: include/hw/timer/stellaris-gptm.h F: docs/system/arm/stellaris.rst -F: tests/functional/test_arm_stellaris.py +F: tests/functional/arm/test_stellaris.py STM32L4x5 SoC Family M: Samuel Tardieu @@ -1044,7 +1063,7 @@ S: Odd Fixes F: hw/arm/vexpress.c F: hw/display/sii9022.c F: docs/system/arm/vexpress.rst -F: tests/functional/test_arm_vexpress.py +F: tests/functional/arm/test_vexpress.py Versatile PB M: Peter Maydell @@ -1063,10 +1082,10 @@ S: Maintained F: hw/arm/virt* F: include/hw/arm/virt.h F: docs/system/arm/virt.rst -F: tests/functional/test_aarch64_*virt*.py -F: tests/functional/test_aarch64_tuxrun.py -F: tests/functional/test_arm_tuxrun.py -F: tests/functional/test_arm_virt.py +F: tests/functional/aarch64/test_*virt*.py +F: tests/functional/aarch64/test_tuxrun.py +F: tests/functional/arm/test_tuxrun.py +F: tests/functional/arm/test_virt.py Xilinx Zynq M: Edgar E. Iglesias @@ -1096,7 +1115,7 @@ F: hw/display/dpcd.c F: include/hw/display/dpcd.h F: docs/system/arm/xlnx-versal-virt.rst F: docs/system/arm/xlnx-zcu102.rst -F: tests/functional/test_aarch64_xlnx_versal.py +F: tests/functional/aarch64/test_xlnx_versal.py Xilinx Versal OSPI M: Francisco Iglesias @@ -1187,7 +1206,7 @@ L: qemu-arm@nongnu.org S: Maintained F: hw/arm/msf2-som.c F: docs/system/arm/emcraft-sf2.rst -F: tests/functional/test_arm_emcraft_sf2.py +F: tests/functional/arm/test_emcraft_sf2.py ASPEED BMCs M: Cédric Le Goater @@ -1205,6 +1224,7 @@ F: hw/net/ftgmac100.c F: include/hw/net/ftgmac100.h F: docs/system/arm/aspeed.rst F: docs/system/arm/fby35.rst +F: tests/functional/*/*aspeed* F: tests/*/*aspeed* F: tests/*/*ast2700* F: hw/arm/fby35.c @@ -1220,7 +1240,7 @@ F: hw/*/microbit*.c F: include/hw/*/nrf51*.h F: include/hw/*/microbit*.h F: tests/qtest/microbit-test.c -F: tests/functional/test_arm_microbit.py +F: tests/functional/arm/test_microbit.py F: docs/system/arm/nrf.rst ARM PL011 Rust device @@ -1247,7 +1267,7 @@ Arduino M: Philippe Mathieu-Daudé S: Maintained F: hw/avr/arduino.c -F: tests/functional/test_avr_uno.py +F: tests/functional/avr/test_uno.py HP-PARISC Machines ------------------ @@ -1271,7 +1291,7 @@ F: include/hw/pci-host/astro.h F: include/hw/pci-host/dino.h F: pc-bios/hppa-firmware.img F: roms/seabios-hppa/ -F: tests/functional/test_hppa_seabios.py +F: tests/functional/hppa/ LoongArch Machines ------------------ @@ -1289,7 +1309,6 @@ F: include/hw/intc/loongarch_*.h F: include/hw/intc/loongson_ipi_common.h F: hw/intc/loongarch_*.c F: hw/intc/loongson_ipi_common.c -F: include/hw/pci-host/ls7a.h F: hw/rtc/ls7a_rtc.c F: gdb-xml/loongarch*.xml @@ -1309,7 +1328,7 @@ F: hw/m68k/mcf_intc.c F: hw/char/mcf_uart.c F: hw/net/mcf_fec.c F: include/hw/m68k/mcf*.h -F: tests/functional/test_m68k_mcf5208evb.py +F: tests/functional/m68k/test_mcf5208evb.py NeXTcube M: Thomas Huth @@ -1317,7 +1336,7 @@ S: Odd Fixes F: hw/m68k/next-*.c F: hw/display/next-fb.c F: include/hw/m68k/next-cube.h -F: tests/functional/test_m68k_nextcube.py +F: tests/functional/m68k/test_nextcube.py q800 M: Laurent Vivier @@ -1343,7 +1362,7 @@ F: include/hw/m68k/q800-glue.h F: include/hw/misc/djmemc.h F: include/hw/misc/iosb.h F: include/hw/audio/asc.h -F: tests/functional/test_m68k_q800.py +F: tests/functional/m68k/test_q800.py virt M: Laurent Vivier @@ -1358,7 +1377,7 @@ F: include/hw/intc/goldfish_pic.h F: include/hw/intc/m68k_irqc.h F: include/hw/misc/virt_ctrl.h F: docs/specs/virt-ctlr.rst -F: tests/functional/test_m68k_tuxrun.py +F: tests/functional/m68k/test_tuxrun.py MicroBlaze Machines ------------------- @@ -1367,7 +1386,7 @@ M: Edgar E. Iglesias S: Maintained F: hw/microblaze/petalogix_s3adsp1800_mmu.c F: include/hw/char/xilinx_uartlite.h -F: tests/functional/test_microblaze*.py +F: tests/functional/microblaze*/test_s3adsp1800.py petalogix_ml605 M: Edgar E. Iglesias @@ -1403,14 +1422,8 @@ F: hw/acpi/piix4.c F: hw/mips/malta.c F: hw/pci-host/gt64120.c F: include/hw/southbridge/piix.h -F: tests/functional/test_mips*_malta.py -F: tests/functional/test_mips*_tuxrun.py - -Mipssim -R: Aleksandar Rikalo -S: Orphan -F: hw/mips/mipssim.c -F: hw/net/mipsnet.c +F: tests/functional/mips*/test_malta.py +F: tests/functional/mips*/test_tuxrun.py Fuloong 2E M: Huacai Chen @@ -1420,7 +1433,7 @@ S: Odd Fixes F: hw/mips/fuloong2e.c F: hw/pci-host/bonito.c F: include/hw/pci-host/bonito.h -F: tests/functional/test_mips64el_fuloong2e.py +F: tests/functional/mips64el/test_fuloong2e.py Loongson-3 virtual platforms M: Huacai Chen @@ -1435,7 +1448,7 @@ F: hw/mips/loongson3_virt.c F: include/hw/intc/loongson_ipi_common.h F: include/hw/intc/loongson_ipi.h F: include/hw/intc/loongson_liointc.h -F: tests/functional/test_mips64el_loongson3v.py +F: tests/functional/mips64el/test_loongson3v.py Boston M: Paul Burton @@ -1454,7 +1467,7 @@ S: Maintained F: docs/system/openrisc/or1k-sim.rst F: hw/intc/ompic.c F: hw/openrisc/openrisc_sim.c -F: tests/functional/test_or1k_sim.py +F: tests/functional/or1k/test_sim.py PowerPC Machines ---------------- @@ -1463,7 +1476,7 @@ L: qemu-ppc@nongnu.org S: Orphan F: hw/ppc/ppc440_bamboo.c F: hw/pci-host/ppc4xx_pci.c -F: tests/functional/test_ppc_bamboo.py +F: tests/functional/ppc/test_bamboo.py e500 M: Bernhard Beschow @@ -1481,8 +1494,8 @@ F: pc-bios/u-boot.e500 F: hw/intc/openpic_kvm.c F: include/hw/ppc/openpic_kvm.h F: docs/system/ppc/ppce500.rst -F: tests/functional/test_ppc64_e500.py -F: tests/functional/test_ppc_tuxrun.py +F: tests/functional/ppc64/test_e500.py +F: tests/functional/ppc/test_tuxrun.py mpc8544ds M: Bernhard Beschow @@ -1490,7 +1503,7 @@ L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/mpc8544ds.c F: hw/ppc/mpc8544_guts.c -F: tests/functional/test_ppc_mpc8544ds.py +F: tests/functional/ppc/test_mpc8544ds.py New World (mac99) M: Mark Cave-Ayland @@ -1512,8 +1525,8 @@ F: include/hw/ppc/mac_dbdma.h F: include/hw/pci-host/uninorth.h F: include/hw/input/adb* F: pc-bios/qemu_vga.ndrv -F: tests/functional/test_ppc_mac.py -F: tests/functional/test_ppc64_mac99.py +F: tests/functional/ppc/test_mac.py +F: tests/functional/ppc64/test_mac99.py Old World (g3beige) M: Mark Cave-Ayland @@ -1529,7 +1542,14 @@ F: include/hw/intc/heathrow_pic.h F: include/hw/input/adb* F: include/hw/pci-host/grackle.h F: pc-bios/qemu_vga.ndrv -F: tests/functional/test_ppc_mac.py +F: tests/functional/ppc/test_mac.py + +PPE42 +M: Glenn Miles +L: qemu-ppc@nongnu.org +S: Odd Fixes +F: hw/ppc/ppe42_machine.c +F: tests/functional/ppc/test_ppe42.py PReP M: Hervé Poussineau @@ -1546,11 +1566,11 @@ F: hw/dma/i82374.c F: hw/rtc/m48t59-isa.c F: include/hw/isa/pc87312.h F: include/hw/rtc/m48t59.h -F: tests/functional/test_ppc_40p.py +F: tests/functional/ppc/test_40p.py sPAPR (pseries) M: Nicholas Piggin -R: Daniel Henrique Barboza +M: Harsh Prateek Bora R: Harsh Prateek Bora L: qemu-ppc@nongnu.org S: Odd Fixes @@ -1569,13 +1589,14 @@ F: tests/qtest/spapr* F: tests/qtest/libqos/*spapr* F: tests/qtest/rtas* F: tests/qtest/libqos/rtas* -F: tests/functional/test_ppc64_pseries.py -F: tests/functional/test_ppc64_hv.py -F: tests/functional/test_ppc64_tuxrun.py +F: tests/functional/ppc64/test_pseries.py +F: tests/functional/ppc64/test_hv.py +F: tests/functional/ppc64/test_tuxrun.py PowerNV (Non-Virtualized) M: Nicholas Piggin -R: Frédéric Barrat +R: Aditya Gupta +R: Glenn Miles L: qemu-ppc@nongnu.org S: Odd Fixes F: docs/system/ppc/powernv.rst @@ -1590,7 +1611,7 @@ F: include/hw/ssi/pnv_spi* F: pc-bios/skiboot.lid F: pc-bios/pnv-pnor.bin F: tests/qtest/pnv* -F: tests/functional/test_ppc64_powernv.py +F: tests/functional/ppc64/test_powernv.py pca955x M: Glenn Miles @@ -1605,7 +1626,7 @@ M: Edgar E. Iglesias L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/ppc/virtex_ml507.c -F: tests/functional/test_ppc_virtex_ml507.py +F: tests/functional/ppc/test_virtex_ml507.py sam460ex M: BALATON Zoltan @@ -1621,7 +1642,7 @@ F: pc-bios/dtb/canyonlands.dt[sb] F: pc-bios/u-boot-sam460ex-20100605.bin F: roms/u-boot-sam460ex F: docs/system/ppc/amigang.rst -F: tests/functional/test_ppc_sam460ex.py +F: tests/functional/ppc/test_sam460ex.py pegasos2 M: BALATON Zoltan @@ -1639,7 +1660,7 @@ S: Maintained F: hw/ppc/amigaone.c F: hw/pci-host/articia.c F: include/hw/pci-host/articia.h -F: tests/functional/test_ppc_amiga.py +F: tests/functional/ppc/test_amiga.py Virtual Open Firmware (VOF) M: Alexey Kardashevskiy @@ -1695,6 +1716,7 @@ S: Supported F: docs/system/riscv/sifive_u.rst F: hw/*/*sifive*.c F: include/hw/*/*sifive*.h +F: tests/functional/test_riscv64_sifive_u.py AMD Microblaze-V Generic Board M: Sai Pavan Boddu @@ -1716,7 +1738,7 @@ R: Yoshinori Sato S: Orphan F: docs/system/target-rx.rst F: hw/rx/rx-gdbsim.c -F: tests/functional/test_rx_gdbsim.py +F: tests/functional/rx/test_gdbsim.py SH4 Machines ------------ @@ -1731,8 +1753,8 @@ F: hw/pci-host/sh_pci.c F: hw/timer/sh_timer.c F: include/hw/sh4/sh_intc.h F: include/hw/timer/tmu012.h -F: tests/functional/test_sh4*_r2d.py -F: tests/functional/test_sh4_tuxrun.py +F: tests/functional/sh4*/test_r2d.py +F: tests/functional/sh4/test_tuxrun.py SPARC Machines -------------- @@ -1750,7 +1772,7 @@ F: include/hw/nvram/sun_nvram.h F: include/hw/sparc/sparc32_dma.h F: include/hw/sparc/sun4m_iommu.h F: pc-bios/openbios-sparc32 -F: tests/functional/test_sparc_sun4m.py +F: tests/functional/sparc/test_sun4m.py Sun4u M: Mark Cave-Ayland @@ -1763,8 +1785,8 @@ F: include/hw/pci-host/sabre.h F: hw/pci-bridge/simba.c F: include/hw/pci-bridge/simba.h F: pc-bios/openbios-sparc64 -F: tests/functional/test_sparc64_sun4u.py -F: tests/functional/test_sparc64_tuxrun.py +F: tests/functional/sparc64/test_sun4u.py +F: tests/functional/sparc64/test_tuxrun.py Sun4v M: Artyom Tarasenko @@ -1792,7 +1814,7 @@ S: Supported F: hw/s390x/ F: include/hw/s390x/ F: configs/devices/s390x-softmmu/default.mak -F: tests/functional/test_s390x_* +F: tests/functional/s390x T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org @@ -1806,7 +1828,7 @@ F: hw/s390x/ipl.* F: pc-bios/s390-ccw/ F: pc-bios/s390-ccw.img F: docs/devel/s390-dasd-ipl.rst -F: tests/functional/test_s390x_pxelinux.py +F: tests/functional/s390x/test_pxelinux.py T: git https://github.com/borntraeger/qemu.git s390-next L: qemu-s390x@nongnu.org @@ -1860,7 +1882,7 @@ F: hw/s390x/cpu-topology.c F: target/s390x/kvm/stsi-topology.c F: docs/devel/s390-cpu-topology.rst F: docs/system/s390x/cpu-topology.rst -F: tests/functional/test_s390x_topology.py +F: tests/functional/s390x/test_topology.py X86 Machines ------------ @@ -1888,12 +1910,12 @@ F: hw/isa/apm.c F: include/hw/isa/apm.h F: tests/unit/test-x86-topo.c F: tests/qtest/test-x86-cpuid-compat.c -F: tests/functional/test_i386_tuxrun.py -F: tests/functional/test_linux_initrd.py -F: tests/functional/test_mem_addr_space.py -F: tests/functional/test_pc_cpu_hotplug_props.py -F: tests/functional/test_x86_64_tuxrun.py -F: tests/functional/test_x86_cpu_model_versions.py +F: tests/functional/i386/test_tuxrun.py +F: tests/functional/x86_64/test_linux_initrd.py +F: tests/functional/x86_64/test_mem_addr_space.py +F: tests/functional/x86_64/test_pc_cpu_hotplug_props.py +F: tests/functional/x86_64/test_tuxrun.py +F: tests/functional/x86_64/test_cpu_model_versions.py PC Chipset M: Michael S. Tsirkin @@ -1969,8 +1991,8 @@ F: include/hw/boards.h F: include/hw/core/cpu.h F: include/hw/cpu/cluster.h F: include/system/numa.h -F: tests/functional/test_cpu_queries.py -F: tests/functional/test_empty_cpu_model.py +F: tests/functional/x86_64/test_cpu_queries.py +F: tests/functional/generic/test_empty_cpu_model.py F: tests/unit/test-smp-parse.c T: git https://gitlab.com/ehabkost/qemu.git machine-next @@ -1999,7 +2021,7 @@ S: Maintained F: hw/xtensa/xtfpga.c F: hw/net/opencores_eth.c F: include/hw/xtensa/mx_pic.h -F: tests/functional/test_xtensa_lx60.py +F: tests/functional/xtensa/test_lx60.py Devices ------- @@ -2076,7 +2098,7 @@ S: Odd Fixes F: hw/*/omap* F: include/hw/arm/omap.h F: docs/system/arm/sx1.rst -F: tests/functional/test_arm_sx1.py +F: tests/functional/arm/test_sx1.py IPack M: Alberto Garcia @@ -2108,7 +2130,7 @@ ARM PCI Hotplug M: Gustavo Romero L: qemu-arm@nongnu.org S: Supported -F: tests/functional/test_aarch64_hotplug_pci.py +F: tests/functional/aarch64/test_hotplug_pci.py ACPI/SMBIOS M: Michael S. Tsirkin @@ -2154,7 +2176,7 @@ M: Ani Sinha M: Michael S. Tsirkin S: Supported F: tests/functional/acpi-bits/* -F: tests/functional/test_acpi_bits.py +F: tests/functional/x86_64/test_acpi_bits.py F: docs/devel/testing/acpi-bits.rst ACPI/HEST/GHES @@ -2165,6 +2187,16 @@ F: hw/acpi/ghes.c F: include/hw/acpi/ghes.h F: docs/specs/acpi_hest_ghes.rst +ACPI/HEST/GHES/ARM processor CPER +R: Mauro Carvalho Chehab +S: Maintained +F: hw/arm/ghes_cper.c +F: hw/acpi/ghes_cper_stub.c +F: qapi/acpi-hest.json +F: scripts/ghes_inject.py +F: scripts/arm_processor_error.py +F: scripts/qmp_helper.py + ppc4xx L: qemu-ppc@nongnu.org S: Orphan @@ -2191,7 +2223,7 @@ S: Odd Fixes F: hw/net/ F: include/hw/net/ F: tests/qtest/virtio-net-test.c -F: tests/functional/test_info_usernet.py +F: tests/functional/generic/test_info_usernet.py F: docs/system/virtio-net-failover.rst T: git https://github.com/jasowang/qemu.git net @@ -2254,7 +2286,7 @@ S: Maintained F: hw/usb/dev-serial.c VFIO -M: Alex Williamson +M: Alex Williamson M: Cédric Le Goater S: Supported F: hw/vfio/* @@ -2262,9 +2294,11 @@ F: util/vfio-helpers.c F: include/hw/vfio/ F: docs/devel/migration/vfio.rst F: qapi/vfio.json +F: migration/vfio-stub.c +F: tests/functional/aarch64/test_device_passthrough.py vfio-igd -M: Alex Williamson +M: Alex Williamson M: Cédric Le Goater M: Tomita Moeko S: Supported @@ -2322,6 +2356,7 @@ F: include/*/vhost* F: subprojects/libvhost-user/ F: block/export/vhost-user* F: util/vhost-user-server.c +F: net/vhost* vhost-shadow-virtqueue R: Eugenio Pérez @@ -2338,7 +2373,7 @@ F: net/vhost-user.c F: include/hw/virtio/ F: docs/devel/virtio* F: docs/devel/migration/virtio.rst -F: tests/functional/test_virtio_version.py +F: tests/functional/x86_64/test_virtio_version.py virtio-balloon M: Michael S. Tsirkin @@ -2350,7 +2385,7 @@ F: include/hw/virtio/virtio-balloon.h F: system/balloon.c F: include/system/balloon.h F: tests/qtest/virtio-balloon-test.c -F: tests/functional/test_virtio_balloon.py +F: tests/functional/x86_64/test_virtio_balloon.py virtio-9p M: Christian Schoenebeck @@ -2373,7 +2408,7 @@ F: hw/block/virtio-blk.c F: hw/block/dataplane/* F: include/hw/virtio/virtio-blk-common.h F: tests/qtest/virtio-blk-test.c -F: tests/functional/test_x86_64_hotplug_blk.py +F: tests/functional/x86_64/test_hotplug_blk.py T: git https://github.com/stefanha/qemu.git block virtio-ccw @@ -2597,7 +2632,7 @@ R: Sriram Yagnaraman S: Odd Fixes F: docs/system/devices/igb.rst F: hw/net/igb* -F: tests/functional/test_netdev_ethtool.py +F: tests/functional/x86_64/test_netdev_ethtool.py F: tests/qtest/igb-test.c F: tests/qtest/libqos/igb.c @@ -2636,7 +2671,7 @@ M: Alex Bennée S: Maintained F: hw/core/guest-loader.c F: docs/system/guest-loader.rst -F: tests/functional/test_aarch64_xen.py +F: tests/functional/aarch64/test_xen.py Intel Hexadecimal Object File Loader M: Su Hang @@ -2705,7 +2740,8 @@ F: hw/display/virtio-gpu* F: hw/display/virtio-vga.* F: include/hw/virtio/virtio-gpu.h F: docs/system/devices/virtio-gpu.rst -F: tests/functional/test_aarch64_virt_gpu.py +F: tests/functional/aarch64/test_virt_gpu.py +F: tests/functional/x86_64/test_virtio_gpu.py vhost-user-blk M: Raphael Norwitz @@ -2776,7 +2812,8 @@ F: tests/qtest/fw_cfg-test.c T: git https://github.com/philmd/qemu.git fw_cfg-next XIVE -R: Frédéric Barrat +R: Gautam Menghani +R: Glenn Miles L: qemu-ppc@nongnu.org S: Odd Fixes F: hw/*/*xive* @@ -3125,7 +3162,7 @@ S: Supported F: include/qemu/option.h F: tests/unit/test-keyval.c F: tests/unit/test-qemu-opts.c -F: tests/functional/test_version.py +F: tests/functional/generic/test_version.py F: util/keyval.c F: util/qemu-option.c @@ -3200,6 +3237,7 @@ S: Supported F: include/system/ioport.h F: include/exec/memop.h F: include/system/memory.h +F: include/system/physmem.h F: include/system/ram_addr.h F: include/system/ramblock.h F: include/system/memory_mapping.h @@ -3243,7 +3281,7 @@ F: include/ui/ F: qapi/ui.json F: util/drm.c F: docs/devel/ui.rst -F: tests/functional/test_vnc.py +F: tests/functional/generic/test_vnc.py Cocoa graphics M: Peter Maydell @@ -3445,6 +3483,7 @@ F: qom/ F: tests/unit/check-qom-interface.c F: tests/unit/check-qom-proplist.c F: tests/unit/test-qdev-global-props.c +F: tests/qtest/qom-test.c QOM boilerplate conversion script M: Eduardo Habkost @@ -3508,9 +3547,18 @@ F: include/hw/registerfields.h Rust M: Manos Pitsidianakis S: Maintained -F: rust/qemu-api -F: rust/qemu-api-macros +F: rust/bql/ +F: rust/chardev/ +F: rust/common/ +F: rust/hw/core/ +F: rust/migration/ +F: rust/qemu-macros/ +F: rust/qom/ F: rust/rustfmt.toml +F: rust/system/ +F: rust/tests/ +F: rust/util/ +F: scripts/get-wraps-from-cargo-registry.py Rust-related patches CC here L: qemu-rust@nongnu.org @@ -3554,6 +3602,7 @@ F: scripts/tracetool/ F: scripts/qemu-trace-stap* F: docs/tools/qemu-trace-stap.rst F: docs/devel/tracing.rst +F: tests/tracetool/ T: git https://github.com/stefanha/qemu.git tracing Simpletrace @@ -3594,8 +3643,11 @@ F: include/migration/ F: include/qemu/userfaultfd.h F: migration/ F: scripts/vmstate-static-checker.py -F: tests/functional/test_migration.py -F: tests/vmstate-static-checker-data/ +F: tests/functional/migration.py +F: tests/functional/*/*migration.py +F: tests/functional/generic/test_vmstate.py +F: tests/functional/x86_64/test_bad_vmstate.py +F: tests/data/vmstate-static-checker/ F: tests/qtest/migration/ F: tests/qtest/migration-* F: docs/devel/migration/ @@ -3763,8 +3815,10 @@ F: include/system/replay.h F: docs/devel/replay.rst F: docs/system/replay.rst F: stubs/replay.c -F: tests/functional/*reverse_debug*.py -F: tests/functional/*replay*.py +F: tests/functional/replay_kernel.py +F: tests/functional/reverse_debugging.py +F: tests/functional/*/*replay*.py +F: tests/functional/*/*reverse_debug*.py F: qapi/replay.json IOVA Tree @@ -3848,7 +3902,7 @@ S: Supported F: hw/i386/intel_iommu.c F: hw/i386/intel_iommu_internal.h F: include/hw/i386/intel_iommu.h -F: tests/functional/test_intel_iommu.py +F: tests/functional/x86_64/test_intel_iommu.py F: tests/qtest/intel-iommu-test.c AMD-Vi Emulation @@ -3910,7 +3964,7 @@ F: configs/targets/*linux-user.mak F: scripts/qemu-binfmt-conf.sh F: scripts/update-syscalltbl.sh F: scripts/update-mips-syscall-args.sh -F: tests/functional/test_arm_bflt.py +F: tests/functional/arm/test_bflt.py Tiny Code Generator (TCG) ------------------------- @@ -3930,7 +3984,7 @@ S: Maintained F: docs/devel/tcg-plugins.rst F: plugins/ F: tests/tcg/plugins/ -F: tests/functional/test_aarch64_tcg_plugins.py +F: tests/functional/aarch64/test_tcg_plugins.py F: contrib/plugins/ F: scripts/qemu-plugin-symbols.py @@ -4283,7 +4337,8 @@ F: hw/remote/vfio-user-obj.c F: include/hw/remote/vfio-user-obj.h F: hw/remote/iommu.c F: include/hw/remote/iommu.h -F: tests/functional/test_multiprocess.py +F: tests/functional/multiprocess.py +F: tests/functional/*/*multiprocess.py VFIO-USER: M: John Levon @@ -4295,6 +4350,7 @@ F: docs/system/devices/vfio-user.rst F: hw/vfio-user/* F: include/hw/vfio-user/* F: subprojects/libvfio-user +F: tests/functional/x86_64/test_vfio_user_client.py EBPF: M: Jason Wang @@ -4322,7 +4378,7 @@ F: scripts/ci/ F: tests/docker/ F: tests/vm/ F: tests/lcitool/ -F: tests/functional/test_*_tuxrun.py +F: tests/functional/*/test_tuxrun.py F: scripts/archive-source.sh F: docs/devel/testing/ci* F: docs/devel/testing/main.rst @@ -4342,6 +4398,7 @@ M: Thomas Huth R: Philippe Mathieu-Daudé R: Daniel P. Berrange F: docs/devel/testing/functional.rst +F: scripts/clean_functional_cache.py F: tests/functional/qemu_test/ Windows Hosted Continuous Integration @@ -4389,7 +4446,6 @@ R: Philippe Mathieu-Daudé S: Maintained F: meson.build F: meson_options.txt -F: scripts/meson-buildoptions.* F: scripts/check_sparse.py F: scripts/symlink-install-tree.py @@ -4400,6 +4456,9 @@ R: Thomas Huth S: Maintained F: Makefile F: configure +F: pythondeps.toml +F: scripts/git-submodule.sh +F: scripts/meson-buildoptions.* F: scripts/mtest2make.py F: tests/Makefile.include @@ -4425,6 +4484,7 @@ F: po/*.po Sphinx documentation configuration and build machinery M: John Snow M: Peter Maydell +M: Mauro Carvalho Chehab S: Maintained F: docs/conf.py F: docs/*/conf.py @@ -4432,6 +4492,9 @@ F: docs/requirements.txt F: docs/sphinx/ F: docs/_templates/ F: docs/devel/docs.rst +F: docs/devel/qapi-domain.rst +F: scripts/kernel-doc +F: scripts/lib/kdoc/ Rust build system integration M: Manos Pitsidianakis diff --git a/VERSION b/VERSION index 54e6ccf8546b6..9856be5dd9873 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -10.0.50 +10.1.50 diff --git a/accel/Kconfig b/accel/Kconfig index 4263cab72272e..a60f1149238bc 100644 --- a/accel/Kconfig +++ b/accel/Kconfig @@ -13,6 +13,9 @@ config TCG config KVM bool +config MSHV + bool + config XEN bool select FSDEV_9P if VIRTFS diff --git a/accel/accel-irq.c b/accel/accel-irq.c new file mode 100644 index 0000000000000..7f864e35c4e71 --- /dev/null +++ b/accel/accel-irq.c @@ -0,0 +1,106 @@ +/* + * Accelerated irqchip abstraction + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: Ziqiao Zhou + * Magnus Kulke + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/pci/msi.h" + +#include "system/kvm.h" +#include "system/mshv.h" +#include "system/accel-irq.h" + +int accel_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev) +{ +#ifdef CONFIG_MSHV_IS_POSSIBLE + if (mshv_msi_via_irqfd_enabled()) { + return mshv_irqchip_add_msi_route(vector, dev); + } +#endif + if (kvm_enabled()) { + return kvm_irqchip_add_msi_route(c, vector, dev); + } + return -ENOSYS; +} + +int accel_irqchip_update_msi_route(int vector, MSIMessage msg, PCIDevice *dev) +{ +#ifdef CONFIG_MSHV_IS_POSSIBLE + if (mshv_msi_via_irqfd_enabled()) { + return mshv_irqchip_update_msi_route(vector, msg, dev); + } +#endif + if (kvm_enabled()) { + return kvm_irqchip_update_msi_route(kvm_state, vector, msg, dev); + } + return -ENOSYS; +} + +void accel_irqchip_commit_route_changes(KVMRouteChange *c) +{ +#ifdef CONFIG_MSHV_IS_POSSIBLE + if (mshv_msi_via_irqfd_enabled()) { + mshv_irqchip_commit_routes(); + } +#endif + if (kvm_enabled()) { + kvm_irqchip_commit_route_changes(c); + } +} + +void accel_irqchip_commit_routes(void) +{ +#ifdef CONFIG_MSHV_IS_POSSIBLE + if (mshv_msi_via_irqfd_enabled()) { + mshv_irqchip_commit_routes(); + } +#endif + if (kvm_enabled()) { + kvm_irqchip_commit_routes(kvm_state); + } +} + +void accel_irqchip_release_virq(int virq) +{ +#ifdef CONFIG_MSHV_IS_POSSIBLE + if (mshv_msi_via_irqfd_enabled()) { + mshv_irqchip_release_virq(virq); + } +#endif + if (kvm_enabled()) { + kvm_irqchip_release_virq(kvm_state, virq); + } +} + +int accel_irqchip_add_irqfd_notifier_gsi(EventNotifier *n, EventNotifier *rn, + int virq) +{ +#ifdef CONFIG_MSHV_IS_POSSIBLE + if (mshv_msi_via_irqfd_enabled()) { + return mshv_irqchip_add_irqfd_notifier_gsi(n, rn, virq); + } +#endif + if (kvm_enabled()) { + return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, rn, virq); + } + return -ENOSYS; +} + +int accel_irqchip_remove_irqfd_notifier_gsi(EventNotifier *n, int virq) +{ +#ifdef CONFIG_MSHV_IS_POSSIBLE + if (mshv_msi_via_irqfd_enabled()) { + return mshv_irqchip_remove_irqfd_notifier_gsi(n, virq); + } +#endif + if (kvm_enabled()) { + return kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, virq); + } + return -ENOSYS; +} diff --git a/accel/dummy-cpus.c b/accel/dummy-cpus.c index 03cfc0fa01e65..5752f6302c870 100644 --- a/accel/dummy-cpus.c +++ b/accel/dummy-cpus.c @@ -43,6 +43,7 @@ static void *dummy_cpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { + qemu_process_cpu_events(cpu); bql_unlock(); #ifndef _WIN32 do { @@ -57,7 +58,6 @@ static void *dummy_cpu_thread_fn(void *arg) qemu_sem_wait(&cpu->sem); #endif bql_lock(); - qemu_wait_io_event(cpu); } while (!cpu->unplug); bql_unlock(); diff --git a/accel/hvf/hvf-accel-ops.c b/accel/hvf/hvf-accel-ops.c index d488d6afbacf6..8b794c2d418a9 100644 --- a/accel/hvf/hvf-accel-ops.c +++ b/accel/hvf/hvf-accel-ops.c @@ -192,13 +192,13 @@ static void *hvf_cpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { + qemu_process_cpu_events(cpu); if (cpu_can_run(cpu)) { r = hvf_vcpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } - qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); hvf_vcpu_destroy(cpu); diff --git a/accel/hvf/hvf-all.c b/accel/hvf/hvf-all.c index e67a8105a66a0..0a4b498e8369c 100644 --- a/accel/hvf/hvf-all.c +++ b/accel/hvf/hvf-all.c @@ -84,7 +84,7 @@ static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags) trace_hvf_vm_map(slot->start, slot->size, slot->mem, flags, flags & HV_MEMORY_READ ? 'R' : '-', flags & HV_MEMORY_WRITE ? 'W' : '-', - flags & HV_MEMORY_EXEC ? 'E' : '-'); + flags & HV_MEMORY_EXEC ? 'X' : '-'); ret = hv_vm_map(slot->mem, slot->start, slot->size, flags); assert_hvf_ok(ret); return 0; diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c index b709187c7d769..8ed6945c2f78e 100644 --- a/accel/kvm/kvm-accel-ops.c +++ b/accel/kvm/kvm-accel-ops.c @@ -47,13 +47,14 @@ static void *kvm_vcpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { + qemu_process_cpu_events(cpu); + if (cpu_can_run(cpu)) { r = kvm_cpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } - qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); kvm_destroy_vcpu(cpu); diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 890d5ea9f8652..f9254ae654667 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -32,11 +32,13 @@ #include "system/runstate.h" #include "system/cpus.h" #include "system/accel-blocker.h" +#include "system/physmem.h" +#include "system/ramblock.h" #include "accel/accel-ops.h" #include "qemu/bswap.h" #include "exec/tswap.h" +#include "exec/target_page.h" #include "system/memory.h" -#include "system/ram_addr.h" #include "qemu/event_notifier.h" #include "qemu/main-loop.h" #include "trace.h" @@ -358,7 +360,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new) { KVMState *s = kvm_state; - struct kvm_userspace_memory_region2 mem; + struct kvm_userspace_memory_region2 mem = {}; int ret; mem.slot = slot->slot | (kml->as_id << 16); @@ -414,7 +416,7 @@ static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, boo return ret; } -void kvm_park_vcpu(CPUState *cpu) +static void kvm_park_vcpu(CPUState *cpu) { struct KVMParkedVcpu *vcpu; @@ -426,7 +428,7 @@ void kvm_park_vcpu(CPUState *cpu) QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node); } -int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id) +static int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id) { struct KVMParkedVcpu *cpu; int kvm_fd = -ENOENT; @@ -523,7 +525,8 @@ static int do_kvm_destroy_vcpu(CPUState *cpu) } /* If I am the CPU that created coalesced_mmio_ring, then discard it */ - if (s->coalesced_mmio_ring == (void *)cpu->kvm_run + PAGE_SIZE) { + if (s->coalesced_mmio_ring == + (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE) { s->coalesced_mmio_ring = NULL; } @@ -756,7 +759,7 @@ static void kvm_slot_sync_dirty_pages(KVMSlot *slot) ram_addr_t start = slot->ram_start_offset; ram_addr_t pages = slot->memory_size / qemu_real_host_page_size(); - cpu_physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); + physical_memory_set_dirty_lebitmap(slot->dirty_bmap, start, pages); } static void kvm_slot_reset_dirty_pages(KVMSlot *slot) @@ -1595,7 +1598,8 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, mem->ram = ram; mem->flags = kvm_mem_flags(mr); mem->guest_memfd = mr->ram_block->guest_memfd; - mem->guest_memfd_offset = (uint8_t*)ram - mr->ram_block->host; + mem->guest_memfd_offset = mem->guest_memfd >= 0 ? + (uint8_t*)ram - mr->ram_block->host : 0; kvm_slot_init_dirty_bitmap(mem); err = kvm_set_user_memory_region(kml, mem, true); @@ -2776,8 +2780,8 @@ static int kvm_init(AccelState *as, MachineState *ms) kvm_supported_memory_attributes = kvm_vm_check_extension(s, KVM_CAP_MEMORY_ATTRIBUTES); kvm_guest_memfd_supported = - kvm_check_extension(s, KVM_CAP_GUEST_MEMFD) && - kvm_check_extension(s, KVM_CAP_USER_MEMORY2) && + kvm_vm_check_extension(s, KVM_CAP_GUEST_MEMFD) && + kvm_vm_check_extension(s, KVM_CAP_USER_MEMORY2) && (kvm_supported_memory_attributes & KVM_MEMORY_ATTRIBUTE_PRIVATE); kvm_pre_fault_memory_supported = kvm_vm_check_extension(s, KVM_CAP_PRE_FAULT_MEMORY); @@ -2934,22 +2938,32 @@ void kvm_cpu_synchronize_state(CPUState *cpu) } } -static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) +static bool kvm_cpu_synchronize_put(CPUState *cpu, KvmPutState state, + const char *desc) { Error *err = NULL; - int ret = kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE, &err); + int ret = kvm_arch_put_registers(cpu, state, &err); if (ret) { if (err) { - error_reportf_err(err, "Restoring resisters after reset: "); + error_reportf_err(err, "Restoring resisters %s: ", desc); } else { - error_report("Failed to put registers after reset: %s", + error_report("Failed to put registers %s: %s", desc, strerror(-ret)); } - cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); - vm_stop(RUN_STATE_INTERNAL_ERROR); + return false; } cpu->vcpu_dirty = false; + + return true; +} + +static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg) +{ + if (!kvm_cpu_synchronize_put(cpu, KVM_PUT_RESET_STATE, "after reset")) { + cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); + vm_stop(RUN_STATE_INTERNAL_ERROR); + } } void kvm_cpu_synchronize_post_reset(CPUState *cpu) @@ -2963,19 +2977,9 @@ void kvm_cpu_synchronize_post_reset(CPUState *cpu) static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg) { - Error *err = NULL; - int ret = kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE, &err); - if (ret) { - if (err) { - error_reportf_err(err, "Putting registers after init: "); - } else { - error_report("Failed to put registers after init: %s", - strerror(-ret)); - } + if (!kvm_cpu_synchronize_put(cpu, KVM_PUT_FULL_STATE, "after init")) { exit(1); } - - cpu->vcpu_dirty = false; } void kvm_cpu_synchronize_post_init(CPUState *cpu) @@ -3029,10 +3033,6 @@ static void kvm_eat_signals(CPUState *cpu) if (kvm_immediate_exit) { qatomic_set(&cpu->kvm_run->immediate_exit, 0); - /* Write kvm_run->immediate_exit before the cpu->exit_request - * write in kvm_cpu_exec. - */ - smp_wmb(); return; } @@ -3159,7 +3159,6 @@ int kvm_cpu_exec(CPUState *cpu) trace_kvm_cpu_exec(); if (kvm_arch_process_async_events(cpu)) { - qatomic_set(&cpu->exit_request, 0); return EXCP_HLT; } @@ -3170,24 +3169,16 @@ int kvm_cpu_exec(CPUState *cpu) MemTxAttrs attrs; if (cpu->vcpu_dirty) { - Error *err = NULL; - ret = kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE, &err); - if (ret) { - if (err) { - error_reportf_err(err, "Putting registers after init: "); - } else { - error_report("Failed to put registers after init: %s", - strerror(-ret)); - } + if (!kvm_cpu_synchronize_put(cpu, KVM_PUT_RUNTIME_STATE, + "at runtime")) { ret = -1; break; } - - cpu->vcpu_dirty = false; } kvm_arch_pre_run(cpu, run); - if (qatomic_read(&cpu->exit_request)) { + /* Corresponding store-release is in cpu_exit. */ + if (qatomic_load_acquire(&cpu->exit_request)) { trace_kvm_interrupt_exit_request(); /* * KVM requires us to reenter the kernel after IO exits to complete @@ -3197,13 +3188,15 @@ int kvm_cpu_exec(CPUState *cpu) kvm_cpu_kick_self(); } - /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit. - * Matching barrier in kvm_eat_signals. - */ - smp_rmb(); - run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0); + /* + * After writing cpu->exit_request, cpu_exit() sends a signal that writes + * kvm->run->immediate_exit. The signal is already happening after the + * write to cpu->exit_request so, if KVM read kvm->run->immediate_exit + * as true, cpu->exit_request will always read as true. + */ + attrs = kvm_arch_post_run(cpu, run); #ifdef KVM_HAVE_MCE_INJECTION @@ -3346,7 +3339,6 @@ int kvm_cpu_exec(CPUState *cpu) vm_stop(RUN_STATE_INTERNAL_ERROR); } - qatomic_set(&cpu->exit_request, 0); return ret; } @@ -3731,7 +3723,7 @@ int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) have_sigbus_pending = true; pending_sigbus_addr = addr; pending_sigbus_code = code; - qatomic_set(&cpu->exit_request, 1); + qatomic_set(&cpu->exit_request, true); return 0; #else return 1; diff --git a/accel/meson.build b/accel/meson.build index 25b0f100b5121..983dfd0bd55e0 100644 --- a/accel/meson.build +++ b/accel/meson.build @@ -1,6 +1,6 @@ common_ss.add(files('accel-common.c')) specific_ss.add(files('accel-target.c')) -system_ss.add(files('accel-system.c', 'accel-blocker.c', 'accel-qmp.c')) +system_ss.add(files('accel-system.c', 'accel-blocker.c', 'accel-qmp.c', 'accel-irq.c')) user_ss.add(files('accel-user.c')) subdir('tcg') @@ -10,6 +10,7 @@ if have_system subdir('kvm') subdir('xen') subdir('stubs') + subdir('mshv') endif # qtest diff --git a/accel/mshv/irq.c b/accel/mshv/irq.c new file mode 100644 index 0000000000000..adf8f337d9c93 --- /dev/null +++ b/accel/mshv/irq.c @@ -0,0 +1,399 @@ +/* + * QEMU MSHV support + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: Ziqiao Zhou + * Magnus Kulke + * Stanislav Kinsburskii + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "linux/mshv.h" +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "hw/hyperv/hvhdk_mini.h" +#include "hw/hyperv/hvgdk_mini.h" +#include "hw/intc/ioapic.h" +#include "hw/pci/msi.h" +#include "system/mshv.h" +#include "system/mshv_int.h" +#include "trace.h" +#include +#include + +#define MSHV_IRQFD_RESAMPLE_FLAG (1 << MSHV_IRQFD_BIT_RESAMPLE) +#define MSHV_IRQFD_BIT_DEASSIGN_FLAG (1 << MSHV_IRQFD_BIT_DEASSIGN) + +static MshvMsiControl *msi_control; +static QemuMutex msi_control_mutex; + +void mshv_init_msicontrol(void) +{ + qemu_mutex_init(&msi_control_mutex); + msi_control = g_new0(MshvMsiControl, 1); + msi_control->gsi_routes = g_hash_table_new(g_direct_hash, g_direct_equal); + msi_control->updated = false; +} + +static int set_msi_routing(uint32_t gsi, uint64_t addr, uint32_t data) +{ + struct mshv_user_irq_entry *entry; + uint32_t high_addr = addr >> 32; + uint32_t low_addr = addr & 0xFFFFFFFF; + GHashTable *gsi_routes; + + trace_mshv_set_msi_routing(gsi, addr, data); + + if (gsi >= MSHV_MAX_MSI_ROUTES) { + error_report("gsi >= MSHV_MAX_MSI_ROUTES"); + return -1; + } + + assert(msi_control); + + WITH_QEMU_LOCK_GUARD(&msi_control_mutex) { + gsi_routes = msi_control->gsi_routes; + entry = g_hash_table_lookup(gsi_routes, GINT_TO_POINTER(gsi)); + + if (entry + && entry->address_hi == high_addr + && entry->address_lo == low_addr + && entry->data == data) + { + /* nothing to update */ + return 0; + } + + /* free old entry */ + g_free(entry); + + /* create new entry */ + entry = g_new0(struct mshv_user_irq_entry, 1); + entry->gsi = gsi; + entry->address_hi = high_addr; + entry->address_lo = low_addr; + entry->data = data; + + g_hash_table_insert(gsi_routes, GINT_TO_POINTER(gsi), entry); + msi_control->updated = true; + } + + return 0; +} + +static int add_msi_routing(uint64_t addr, uint32_t data) +{ + struct mshv_user_irq_entry *route_entry; + uint32_t high_addr = addr >> 32; + uint32_t low_addr = addr & 0xFFFFFFFF; + int gsi; + GHashTable *gsi_routes; + + trace_mshv_add_msi_routing(addr, data); + + assert(msi_control); + + WITH_QEMU_LOCK_GUARD(&msi_control_mutex) { + /* find an empty slot */ + gsi = 0; + gsi_routes = msi_control->gsi_routes; + while (gsi < MSHV_MAX_MSI_ROUTES) { + route_entry = g_hash_table_lookup(gsi_routes, GINT_TO_POINTER(gsi)); + if (!route_entry) { + break; + } + gsi++; + } + if (gsi >= MSHV_MAX_MSI_ROUTES) { + error_report("No empty gsi slot available"); + return -1; + } + + /* create new entry */ + route_entry = g_new0(struct mshv_user_irq_entry, 1); + route_entry->gsi = gsi; + route_entry->address_hi = high_addr; + route_entry->address_lo = low_addr; + route_entry->data = data; + + g_hash_table_insert(gsi_routes, GINT_TO_POINTER(gsi), route_entry); + msi_control->updated = true; + } + + return gsi; +} + +static int commit_msi_routing_table(int vm_fd) +{ + guint len; + int i, ret; + size_t table_size; + struct mshv_user_irq_table *table; + GHashTableIter iter; + gpointer key, value; + + assert(msi_control); + + WITH_QEMU_LOCK_GUARD(&msi_control_mutex) { + if (!msi_control->updated) { + /* nothing to update */ + return 0; + } + + /* Calculate the size of the table */ + len = g_hash_table_size(msi_control->gsi_routes); + table_size = sizeof(struct mshv_user_irq_table) + + len * sizeof(struct mshv_user_irq_entry); + table = g_malloc0(table_size); + + g_hash_table_iter_init(&iter, msi_control->gsi_routes); + i = 0; + while (g_hash_table_iter_next(&iter, &key, &value)) { + struct mshv_user_irq_entry *entry = value; + table->entries[i] = *entry; + i++; + } + table->nr = i; + + trace_mshv_commit_msi_routing_table(vm_fd, len); + + ret = ioctl(vm_fd, MSHV_SET_MSI_ROUTING, table); + g_free(table); + if (ret < 0) { + error_report("Failed to commit msi routing table"); + return -1; + } + msi_control->updated = false; + } + return 0; +} + +static int remove_msi_routing(uint32_t gsi) +{ + struct mshv_user_irq_entry *route_entry; + GHashTable *gsi_routes; + + trace_mshv_remove_msi_routing(gsi); + + if (gsi >= MSHV_MAX_MSI_ROUTES) { + error_report("Invalid GSI: %u", gsi); + return -1; + } + + assert(msi_control); + + WITH_QEMU_LOCK_GUARD(&msi_control_mutex) { + gsi_routes = msi_control->gsi_routes; + route_entry = g_hash_table_lookup(gsi_routes, GINT_TO_POINTER(gsi)); + if (route_entry) { + g_hash_table_remove(gsi_routes, GINT_TO_POINTER(gsi)); + g_free(route_entry); + msi_control->updated = true; + } + } + + return 0; +} + +/* Pass an eventfd which is to be used for injecting interrupts from userland */ +static int irqfd(int vm_fd, int fd, int resample_fd, uint32_t gsi, + uint32_t flags) +{ + int ret; + struct mshv_user_irqfd arg = { + .fd = fd, + .resamplefd = resample_fd, + .gsi = gsi, + .flags = flags, + }; + + ret = ioctl(vm_fd, MSHV_IRQFD, &arg); + if (ret < 0) { + error_report("Failed to set irqfd: gsi=%u, fd=%d", gsi, fd); + return -1; + } + return ret; +} + +static int register_irqfd(int vm_fd, int event_fd, uint32_t gsi) +{ + int ret; + + trace_mshv_register_irqfd(vm_fd, event_fd, gsi); + + ret = irqfd(vm_fd, event_fd, 0, gsi, 0); + if (ret < 0) { + error_report("Failed to register irqfd: gsi=%u", gsi); + return -1; + } + return 0; +} + +static int register_irqfd_with_resample(int vm_fd, int event_fd, + int resample_fd, uint32_t gsi) +{ + int ret; + uint32_t flags = MSHV_IRQFD_RESAMPLE_FLAG; + + ret = irqfd(vm_fd, event_fd, resample_fd, gsi, flags); + if (ret < 0) { + error_report("Failed to register irqfd with resample: gsi=%u", gsi); + return -errno; + } + return 0; +} + +static int unregister_irqfd(int vm_fd, int event_fd, uint32_t gsi) +{ + int ret; + uint32_t flags = MSHV_IRQFD_BIT_DEASSIGN_FLAG; + + ret = irqfd(vm_fd, event_fd, 0, gsi, flags); + if (ret < 0) { + error_report("Failed to unregister irqfd: gsi=%u", gsi); + return -errno; + } + return 0; +} + +static int irqchip_update_irqfd_notifier_gsi(const EventNotifier *event, + const EventNotifier *resample, + int virq, bool add) +{ + int fd = event_notifier_get_fd(event); + int rfd = resample ? event_notifier_get_fd(resample) : -1; + int vm_fd = mshv_state->vm; + + trace_mshv_irqchip_update_irqfd_notifier_gsi(fd, rfd, virq, add); + + if (!add) { + return unregister_irqfd(vm_fd, fd, virq); + } + + if (rfd > 0) { + return register_irqfd_with_resample(vm_fd, fd, rfd, virq); + } + + return register_irqfd(vm_fd, fd, virq); +} + + +int mshv_irqchip_add_msi_route(int vector, PCIDevice *dev) +{ + MSIMessage msg = { 0, 0 }; + int virq = 0; + + if (pci_available && dev) { + msg = pci_get_msi_message(dev, vector); + virq = add_msi_routing(msg.address, le32_to_cpu(msg.data)); + } + + return virq; +} + +void mshv_irqchip_release_virq(int virq) +{ + remove_msi_routing(virq); +} + +int mshv_irqchip_update_msi_route(int virq, MSIMessage msg, PCIDevice *dev) +{ + int ret; + + ret = set_msi_routing(virq, msg.address, le32_to_cpu(msg.data)); + if (ret < 0) { + error_report("Failed to set msi routing"); + return -1; + } + + return 0; +} + +int mshv_request_interrupt(MshvState *mshv_state, uint32_t interrupt_type, uint32_t vector, + uint32_t vp_index, bool logical_dest_mode, + bool level_triggered) +{ + int ret; + int vm_fd = mshv_state->vm; + + if (vector == 0) { + warn_report("Ignoring request for interrupt vector 0"); + return 0; + } + + union hv_interrupt_control control = { + .interrupt_type = interrupt_type, + .level_triggered = level_triggered, + .logical_dest_mode = logical_dest_mode, + .rsvd = 0, + }; + + struct hv_input_assert_virtual_interrupt arg = {0}; + arg.control = control; + arg.dest_addr = (uint64_t)vp_index; + arg.vector = vector; + + struct mshv_root_hvcall args = {0}; + args.code = HVCALL_ASSERT_VIRTUAL_INTERRUPT; + args.in_sz = sizeof(arg); + args.in_ptr = (uint64_t)&arg; + + ret = mshv_hvcall(vm_fd, &args); + if (ret < 0) { + error_report("Failed to request interrupt"); + return -errno; + } + return 0; +} + +void mshv_irqchip_commit_routes(void) +{ + int ret; + int vm_fd = mshv_state->vm; + + ret = commit_msi_routing_table(vm_fd); + if (ret < 0) { + error_report("Failed to commit msi routing table"); + abort(); + } +} + +int mshv_irqchip_add_irqfd_notifier_gsi(const EventNotifier *event, + const EventNotifier *resample, + int virq) +{ + return irqchip_update_irqfd_notifier_gsi(event, resample, virq, true); +} + +int mshv_irqchip_remove_irqfd_notifier_gsi(const EventNotifier *event, + int virq) +{ + return irqchip_update_irqfd_notifier_gsi(event, NULL, virq, false); +} + +int mshv_reserve_ioapic_msi_routes(int vm_fd) +{ + int ret, gsi; + + /* + * Reserve GSI 0-23 for IOAPIC pins, to avoid conflicts of legacy + * peripherals with MSI-X devices + */ + for (gsi = 0; gsi < IOAPIC_NUM_PINS; gsi++) { + ret = add_msi_routing(0, 0); + if (ret < 0) { + error_report("Failed to reserve GSI %d", gsi); + return -1; + } + } + + ret = commit_msi_routing_table(vm_fd); + if (ret < 0) { + error_report("Failed to commit reserved IOAPIC MSI routes"); + return -1; + } + + return 0; +} diff --git a/accel/mshv/mem.c b/accel/mshv/mem.c new file mode 100644 index 0000000000000..0e2164af3ee3a --- /dev/null +++ b/accel/mshv/mem.c @@ -0,0 +1,563 @@ +/* + * QEMU MSHV support + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: + * Magnus Kulke + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + */ + +#include "qemu/osdep.h" +#include "qemu/lockable.h" +#include "qemu/error-report.h" +#include "qemu/rcu.h" +#include "linux/mshv.h" +#include "system/address-spaces.h" +#include "system/mshv.h" +#include "system/mshv_int.h" +#include "exec/memattrs.h" +#include +#include "trace.h" + +typedef struct SlotsRCUReclaim { + struct rcu_head rcu; + GList *old_head; + MshvMemorySlot *removed_slot; +} SlotsRCUReclaim; + +static void rcu_reclaim_slotlist(struct rcu_head *rcu) +{ + SlotsRCUReclaim *r = container_of(rcu, SlotsRCUReclaim, rcu); + g_list_free(r->old_head); + g_free(r->removed_slot); + g_free(r); +} + +static void publish_slots(GList *new_head, GList *old_head, + MshvMemorySlot *removed_slot) +{ + MshvMemorySlotManager *manager = &mshv_state->msm; + + assert(manager); + qatomic_store_release(&manager->slots, new_head); + + SlotsRCUReclaim *r = g_new(SlotsRCUReclaim, 1); + r->old_head = old_head; + r->removed_slot = removed_slot; + + call_rcu1(&r->rcu, rcu_reclaim_slotlist); +} + +/* Needs to be called with mshv_state->msm.mutex held */ +static int remove_slot(MshvMemorySlot *slot) +{ + GList *old_head, *new_head; + MshvMemorySlotManager *manager = &mshv_state->msm; + + assert(manager); + old_head = qatomic_load_acquire(&manager->slots); + + if (!g_list_find(old_head, slot)) { + error_report("slot requested for removal not found"); + return -1; + } + + new_head = g_list_copy(old_head); + new_head = g_list_remove(new_head, slot); + manager->n_slots--; + + publish_slots(new_head, old_head, slot); + + return 0; +} + +/* Needs to be called with mshv_state->msm.mutex held */ +static MshvMemorySlot *append_slot(uint64_t gpa, uint64_t userspace_addr, + uint64_t size, bool readonly) +{ + GList *old_head, *new_head; + MshvMemorySlot *slot; + MshvMemorySlotManager *manager = &mshv_state->msm; + + assert(manager); + + old_head = qatomic_load_acquire(&manager->slots); + + if (manager->n_slots >= MSHV_MAX_MEM_SLOTS) { + error_report("no free memory slots available"); + return NULL; + } + + slot = g_new0(MshvMemorySlot, 1); + slot->guest_phys_addr = gpa; + slot->userspace_addr = userspace_addr; + slot->memory_size = size; + slot->readonly = readonly; + + new_head = g_list_copy(old_head); + new_head = g_list_append(new_head, slot); + manager->n_slots++; + + publish_slots(new_head, old_head, NULL); + + return slot; +} + +static int slot_overlaps(const MshvMemorySlot *slot1, + const MshvMemorySlot *slot2) +{ + uint64_t start_1 = slot1->userspace_addr, + start_2 = slot2->userspace_addr; + size_t len_1 = slot1->memory_size, + len_2 = slot2->memory_size; + + if (slot1 == slot2) { + return -1; + } + + return ranges_overlap(start_1, len_1, start_2, len_2) ? 0 : -1; +} + +static bool is_mapped(MshvMemorySlot *slot) +{ + /* Subsequent reads of mapped field see a fully-initialized slot */ + return qatomic_load_acquire(&slot->mapped); +} + +/* + * Find slot that is: + * - overlapping in userspace + * - currently mapped in the guest + * + * Needs to be called with mshv_state->msm.mutex or RCU read lock held. + */ +static MshvMemorySlot *find_overlap_mem_slot(GList *head, MshvMemorySlot *slot) +{ + GList *found; + MshvMemorySlot *overlap_slot; + + found = g_list_find_custom(head, slot, (GCompareFunc) slot_overlaps); + + if (!found) { + return NULL; + } + + overlap_slot = found->data; + if (!overlap_slot || !is_mapped(overlap_slot)) { + return NULL; + } + + return overlap_slot; +} + +static int set_guest_memory(int vm_fd, + const struct mshv_user_mem_region *region) +{ + int ret; + + ret = ioctl(vm_fd, MSHV_SET_GUEST_MEMORY, region); + if (ret < 0) { + error_report("failed to set guest memory: %s", strerror(errno)); + return -1; + } + + return 0; +} + +static int map_or_unmap(int vm_fd, const MshvMemorySlot *slot, bool map) +{ + struct mshv_user_mem_region region = {0}; + + region.guest_pfn = slot->guest_phys_addr >> MSHV_PAGE_SHIFT; + region.size = slot->memory_size; + region.userspace_addr = slot->userspace_addr; + + if (!map) { + region.flags |= (1 << MSHV_SET_MEM_BIT_UNMAP); + trace_mshv_unmap_memory(slot->userspace_addr, slot->guest_phys_addr, + slot->memory_size); + return set_guest_memory(vm_fd, ®ion); + } + + region.flags = BIT(MSHV_SET_MEM_BIT_EXECUTABLE); + if (!slot->readonly) { + region.flags |= BIT(MSHV_SET_MEM_BIT_WRITABLE); + } + + trace_mshv_map_memory(slot->userspace_addr, slot->guest_phys_addr, + slot->memory_size); + return set_guest_memory(vm_fd, ®ion); +} + +static int slot_matches_region(const MshvMemorySlot *slot1, + const MshvMemorySlot *slot2) +{ + return (slot1->guest_phys_addr == slot2->guest_phys_addr && + slot1->userspace_addr == slot2->userspace_addr && + slot1->memory_size == slot2->memory_size) ? 0 : -1; +} + +/* Needs to be called with mshv_state->msm.mutex held */ +static MshvMemorySlot *find_mem_slot_by_region(uint64_t gpa, uint64_t size, + uint64_t userspace_addr) +{ + MshvMemorySlot ref_slot = { + .guest_phys_addr = gpa, + .userspace_addr = userspace_addr, + .memory_size = size, + }; + GList *found; + MshvMemorySlotManager *manager = &mshv_state->msm; + + assert(manager); + found = g_list_find_custom(manager->slots, &ref_slot, + (GCompareFunc) slot_matches_region); + + return found ? found->data : NULL; +} + +static int slot_covers_gpa(const MshvMemorySlot *slot, uint64_t *gpa_p) +{ + uint64_t gpa_offset, gpa = *gpa_p; + + gpa_offset = gpa - slot->guest_phys_addr; + return (slot->guest_phys_addr <= gpa && gpa_offset < slot->memory_size) + ? 0 : -1; +} + +/* Needs to be called with mshv_state->msm.mutex or RCU read lock held */ +static MshvMemorySlot *find_mem_slot_by_gpa(GList *head, uint64_t gpa) +{ + GList *found; + MshvMemorySlot *slot; + + trace_mshv_find_slot_by_gpa(gpa); + + found = g_list_find_custom(head, &gpa, (GCompareFunc) slot_covers_gpa); + if (found) { + slot = found->data; + trace_mshv_found_slot(slot->userspace_addr, slot->guest_phys_addr, + slot->memory_size); + return slot; + } + + return NULL; +} + +/* Needs to be called with mshv_state->msm.mutex held */ +static void set_mapped(MshvMemorySlot *slot, bool mapped) +{ + /* prior writes to mapped field becomes visible before readers see slot */ + qatomic_store_release(&slot->mapped, mapped); +} + +MshvRemapResult mshv_remap_overlap_region(int vm_fd, uint64_t gpa) +{ + MshvMemorySlot *gpa_slot, *overlap_slot; + GList *head; + int ret; + MshvMemorySlotManager *manager = &mshv_state->msm; + + /* fast path, called often by unmapped_gpa vm exit */ + WITH_RCU_READ_LOCK_GUARD() { + assert(manager); + head = qatomic_load_acquire(&manager->slots); + /* return early if no slot is found */ + gpa_slot = find_mem_slot_by_gpa(head, gpa); + if (gpa_slot == NULL) { + return MshvRemapNoMapping; + } + + /* return early if no overlapping slot is found */ + overlap_slot = find_overlap_mem_slot(head, gpa_slot); + if (overlap_slot == NULL) { + return MshvRemapNoOverlap; + } + } + + /* + * We'll modify the mapping list, so we need to upgrade to mutex and + * recheck. + */ + assert(manager); + QEMU_LOCK_GUARD(&manager->mutex); + + /* return early if no slot is found */ + gpa_slot = find_mem_slot_by_gpa(manager->slots, gpa); + if (gpa_slot == NULL) { + return MshvRemapNoMapping; + } + + /* return early if no overlapping slot is found */ + overlap_slot = find_overlap_mem_slot(manager->slots, gpa_slot); + if (overlap_slot == NULL) { + return MshvRemapNoOverlap; + } + + /* unmap overlapping slot */ + ret = map_or_unmap(vm_fd, overlap_slot, false); + if (ret < 0) { + error_report("failed to unmap overlap region"); + abort(); + } + set_mapped(overlap_slot, false); + warn_report("mapped out userspace_addr=0x%016lx gpa=0x%010lx size=0x%lx", + overlap_slot->userspace_addr, + overlap_slot->guest_phys_addr, + overlap_slot->memory_size); + + /* map region for gpa */ + ret = map_or_unmap(vm_fd, gpa_slot, true); + if (ret < 0) { + error_report("failed to map new region"); + abort(); + } + set_mapped(gpa_slot, true); + warn_report("mapped in userspace_addr=0x%016lx gpa=0x%010lx size=0x%lx", + gpa_slot->userspace_addr, gpa_slot->guest_phys_addr, + gpa_slot->memory_size); + + return MshvRemapOk; +} + +static int handle_unmapped_mmio_region_read(uint64_t gpa, uint64_t size, + uint8_t *data) +{ + warn_report("read from unmapped mmio region gpa=0x%lx size=%lu", gpa, size); + + if (size == 0 || size > 8) { + error_report("invalid size %lu for reading from unmapped mmio region", + size); + return -1; + } + + memset(data, 0xFF, size); + + return 0; +} + +int mshv_guest_mem_read(uint64_t gpa, uint8_t *data, uintptr_t size, + bool is_secure_mode, bool instruction_fetch) +{ + int ret; + MemTxAttrs memattr = { .secure = is_secure_mode }; + + if (instruction_fetch) { + trace_mshv_insn_fetch(gpa, size); + } else { + trace_mshv_mem_read(gpa, size); + } + + ret = address_space_rw(&address_space_memory, gpa, memattr, (void *)data, + size, false); + if (ret == MEMTX_OK) { + return 0; + } + + if (ret == MEMTX_DECODE_ERROR) { + return handle_unmapped_mmio_region_read(gpa, size, data); + } + + error_report("failed to read guest memory at 0x%lx", gpa); + return -1; +} + +int mshv_guest_mem_write(uint64_t gpa, const uint8_t *data, uintptr_t size, + bool is_secure_mode) +{ + int ret; + MemTxAttrs memattr = { .secure = is_secure_mode }; + + trace_mshv_mem_write(gpa, size); + ret = address_space_rw(&address_space_memory, gpa, memattr, (void *)data, + size, true); + if (ret == MEMTX_OK) { + return 0; + } + + if (ret == MEMTX_DECODE_ERROR) { + warn_report("write to unmapped mmio region gpa=0x%lx size=%lu", gpa, + size); + return 0; + } + + error_report("Failed to write guest memory"); + return -1; +} + +static int tracked_unmap(int vm_fd, uint64_t gpa, uint64_t size, + uint64_t userspace_addr) +{ + int ret; + MshvMemorySlot *slot; + MshvMemorySlotManager *manager = &mshv_state->msm; + + assert(manager); + + QEMU_LOCK_GUARD(&manager->mutex); + + slot = find_mem_slot_by_region(gpa, size, userspace_addr); + if (!slot) { + trace_mshv_skip_unset_mem(userspace_addr, gpa, size); + /* no work to do */ + return 0; + } + + if (!is_mapped(slot)) { + /* remove slot, no need to unmap */ + return remove_slot(slot); + } + + ret = map_or_unmap(vm_fd, slot, false); + if (ret < 0) { + error_report("failed to unmap memory region"); + return ret; + } + return remove_slot(slot); +} + +static int tracked_map(int vm_fd, uint64_t gpa, uint64_t size, bool readonly, + uint64_t userspace_addr) +{ + MshvMemorySlot *slot, *overlap_slot; + int ret; + MshvMemorySlotManager *manager = &mshv_state->msm; + + assert(manager); + + QEMU_LOCK_GUARD(&manager->mutex); + + slot = find_mem_slot_by_region(gpa, size, userspace_addr); + if (slot) { + error_report("memory region already mapped at gpa=0x%lx, " + "userspace_addr=0x%lx, size=0x%lx", + slot->guest_phys_addr, slot->userspace_addr, + slot->memory_size); + return -1; + } + + slot = append_slot(gpa, userspace_addr, size, readonly); + + overlap_slot = find_overlap_mem_slot(manager->slots, slot); + if (overlap_slot) { + trace_mshv_remap_attempt(slot->userspace_addr, + slot->guest_phys_addr, + slot->memory_size); + warn_report("attempt to map region [0x%lx-0x%lx], while " + "[0x%lx-0x%lx] is already mapped in the guest", + userspace_addr, userspace_addr + size - 1, + overlap_slot->userspace_addr, + overlap_slot->userspace_addr + + overlap_slot->memory_size - 1); + + /* do not register mem slot in hv, but record for later swap-in */ + set_mapped(slot, false); + + return 0; + } + + ret = map_or_unmap(vm_fd, slot, true); + if (ret < 0) { + error_report("failed to map memory region"); + return -1; + } + set_mapped(slot, true); + + return 0; +} + +static int set_memory(uint64_t gpa, uint64_t size, bool readonly, + uint64_t userspace_addr, bool add) +{ + int vm_fd = mshv_state->vm; + + if (add) { + return tracked_map(vm_fd, gpa, size, readonly, userspace_addr); + } + + return tracked_unmap(vm_fd, gpa, size, userspace_addr); +} + +/* + * Calculate and align the start address and the size of the section. + * Return the size. If the size is 0, the aligned section is empty. + */ +static hwaddr align_section(MemoryRegionSection *section, hwaddr *start) +{ + hwaddr size = int128_get64(section->size); + hwaddr delta, aligned; + + /* + * works in page size chunks, but the function may be called + * with sub-page size and unaligned start address. Pad the start + * address to next and truncate size to previous page boundary. + */ + aligned = ROUND_UP(section->offset_within_address_space, + qemu_real_host_page_size()); + delta = aligned - section->offset_within_address_space; + *start = aligned; + if (delta > size) { + return 0; + } + + return (size - delta) & qemu_real_host_page_mask(); +} + +void mshv_set_phys_mem(MshvMemoryListener *mml, MemoryRegionSection *section, + bool add) +{ + int ret = 0; + MemoryRegion *area = section->mr; + bool writable = !area->readonly && !area->rom_device; + hwaddr start_addr, mr_offset, size; + void *ram; + + size = align_section(section, &start_addr); + trace_mshv_set_phys_mem(add, section->mr->name, start_addr); + + size = align_section(section, &start_addr); + trace_mshv_set_phys_mem(add, section->mr->name, start_addr); + + /* + * If the memory device is a writable non-ram area, we do not + * want to map it into the guest memory. If it is not a ROM device, + * we want to remove mshv memory mapping, so accesses will trap. + */ + if (!memory_region_is_ram(area)) { + if (writable) { + return; + } else if (!area->romd_mode) { + add = false; + } + } + + if (!size) { + return; + } + + mr_offset = section->offset_within_region + start_addr - + section->offset_within_address_space; + + ram = memory_region_get_ram_ptr(area) + mr_offset; + + ret = set_memory(start_addr, size, !writable, (uint64_t)ram, add); + if (ret < 0) { + error_report("failed to set memory region"); + abort(); + } +} + +void mshv_init_memory_slot_manager(MshvState *mshv_state) +{ + MshvMemorySlotManager *manager; + + assert(mshv_state); + manager = &mshv_state->msm; + + manager->n_slots = 0; + manager->slots = NULL; + qemu_mutex_init(&manager->mutex); +} diff --git a/accel/mshv/meson.build b/accel/mshv/meson.build new file mode 100644 index 0000000000000..d3a2b32581122 --- /dev/null +++ b/accel/mshv/meson.build @@ -0,0 +1,9 @@ +mshv_ss = ss.source_set() +mshv_ss.add(if_true: files( + 'irq.c', + 'mem.c', + 'msr.c', + 'mshv-all.c' +)) + +specific_ss.add_all(when: 'CONFIG_MSHV', if_true: mshv_ss) diff --git a/accel/mshv/mshv-all.c b/accel/mshv/mshv-all.c new file mode 100644 index 0000000000000..45174f7c4eba0 --- /dev/null +++ b/accel/mshv/mshv-all.c @@ -0,0 +1,727 @@ +/* + * QEMU MSHV support + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: + * Ziqiao Zhou + * Magnus Kulke + * Jinank Jain + * Wei Liu + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/event_notifier.h" +#include "qemu/module.h" +#include "qemu/main-loop.h" +#include "hw/boards.h" + +#include "hw/hyperv/hvhdk.h" +#include "hw/hyperv/hvhdk_mini.h" +#include "hw/hyperv/hvgdk.h" +#include "hw/hyperv/hvgdk_mini.h" +#include "linux/mshv.h" + +#include "qemu/accel.h" +#include "qemu/guest-random.h" +#include "accel/accel-ops.h" +#include "accel/accel-cpu-ops.h" +#include "system/cpus.h" +#include "system/runstate.h" +#include "system/accel-blocker.h" +#include "system/address-spaces.h" +#include "system/mshv.h" +#include "system/mshv_int.h" +#include "system/reset.h" +#include "trace.h" +#include +#include +#include + +#define TYPE_MSHV_ACCEL ACCEL_CLASS_NAME("mshv") + +DECLARE_INSTANCE_CHECKER(MshvState, MSHV_STATE, TYPE_MSHV_ACCEL) + +bool mshv_allowed; + +MshvState *mshv_state; + +static int init_mshv(int *mshv_fd) +{ + int fd = open("/dev/mshv", O_RDWR | O_CLOEXEC); + if (fd < 0) { + error_report("Failed to open /dev/mshv: %s", strerror(errno)); + return -1; + } + *mshv_fd = fd; + return 0; +} + +/* freeze 1 to pause, 0 to resume */ +static int set_time_freeze(int vm_fd, int freeze) +{ + int ret; + struct hv_input_set_partition_property in = {0}; + in.property_code = HV_PARTITION_PROPERTY_TIME_FREEZE; + in.property_value = freeze; + + struct mshv_root_hvcall args = {0}; + args.code = HVCALL_SET_PARTITION_PROPERTY; + args.in_sz = sizeof(in); + args.in_ptr = (uint64_t)∈ + + ret = mshv_hvcall(vm_fd, &args); + if (ret < 0) { + error_report("Failed to set time freeze"); + return -1; + } + + return 0; +} + +static int pause_vm(int vm_fd) +{ + int ret; + + ret = set_time_freeze(vm_fd, 1); + if (ret < 0) { + error_report("Failed to pause partition: %s", strerror(errno)); + return -1; + } + + return 0; +} + +static int resume_vm(int vm_fd) +{ + int ret; + + ret = set_time_freeze(vm_fd, 0); + if (ret < 0) { + error_report("Failed to resume partition: %s", strerror(errno)); + return -1; + } + + return 0; +} + +static int create_partition(int mshv_fd, int *vm_fd) +{ + int ret; + struct mshv_create_partition args = {0}; + + /* Initialize pt_flags with the desired features */ + uint64_t pt_flags = (1ULL << MSHV_PT_BIT_LAPIC) | + (1ULL << MSHV_PT_BIT_X2APIC) | + (1ULL << MSHV_PT_BIT_GPA_SUPER_PAGES); + + /* Set default isolation type */ + uint64_t pt_isolation = MSHV_PT_ISOLATION_NONE; + + args.pt_flags = pt_flags; + args.pt_isolation = pt_isolation; + + ret = ioctl(mshv_fd, MSHV_CREATE_PARTITION, &args); + if (ret < 0) { + error_report("Failed to create partition: %s", strerror(errno)); + return -1; + } + + *vm_fd = ret; + return 0; +} + +static int set_synthetic_proc_features(int vm_fd) +{ + int ret; + struct hv_input_set_partition_property in = {0}; + union hv_partition_synthetic_processor_features features = {0}; + + /* Access the bitfield and set the desired features */ + features.hypervisor_present = 1; + features.hv1 = 1; + features.access_partition_reference_counter = 1; + features.access_synic_regs = 1; + features.access_synthetic_timer_regs = 1; + features.access_partition_reference_tsc = 1; + features.access_frequency_regs = 1; + features.access_intr_ctrl_regs = 1; + features.access_vp_index = 1; + features.access_hypercall_regs = 1; + features.tb_flush_hypercalls = 1; + features.synthetic_cluster_ipi = 1; + features.direct_synthetic_timers = 1; + + mshv_arch_amend_proc_features(&features); + + in.property_code = HV_PARTITION_PROPERTY_SYNTHETIC_PROC_FEATURES; + in.property_value = features.as_uint64[0]; + + struct mshv_root_hvcall args = {0}; + args.code = HVCALL_SET_PARTITION_PROPERTY; + args.in_sz = sizeof(in); + args.in_ptr = (uint64_t)∈ + + trace_mshv_hvcall_args("synthetic_proc_features", args.code, args.in_sz); + + ret = mshv_hvcall(vm_fd, &args); + if (ret < 0) { + error_report("Failed to set synthethic proc features"); + return -errno; + } + return 0; +} + +static int initialize_vm(int vm_fd) +{ + int ret = ioctl(vm_fd, MSHV_INITIALIZE_PARTITION); + if (ret < 0) { + error_report("Failed to initialize partition: %s", strerror(errno)); + return -1; + } + return 0; +} + +static int create_vm(int mshv_fd, int *vm_fd) +{ + int ret = create_partition(mshv_fd, vm_fd); + if (ret < 0) { + return -1; + } + + ret = set_synthetic_proc_features(*vm_fd); + if (ret < 0) { + return -1; + } + + ret = initialize_vm(*vm_fd); + if (ret < 0) { + return -1; + } + + ret = mshv_reserve_ioapic_msi_routes(*vm_fd); + if (ret < 0) { + return -1; + } + + ret = mshv_arch_post_init_vm(*vm_fd); + if (ret < 0) { + return -1; + } + + /* Always create a frozen partition */ + pause_vm(*vm_fd); + + return 0; +} + +static void mem_region_add(MemoryListener *listener, + MemoryRegionSection *section) +{ + MshvMemoryListener *mml; + mml = container_of(listener, MshvMemoryListener, listener); + memory_region_ref(section->mr); + mshv_set_phys_mem(mml, section, true); +} + +static void mem_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + MshvMemoryListener *mml; + mml = container_of(listener, MshvMemoryListener, listener); + mshv_set_phys_mem(mml, section, false); + memory_region_unref(section->mr); +} + +typedef enum { + DATAMATCH_NONE, + DATAMATCH_U32, + DATAMATCH_U64, +} DatamatchTag; + +typedef struct { + DatamatchTag tag; + union { + uint32_t u32; + uint64_t u64; + } value; +} Datamatch; + +/* flags: determine whether to de/assign */ +static int ioeventfd(int vm_fd, int event_fd, uint64_t addr, Datamatch dm, + uint32_t flags) +{ + struct mshv_user_ioeventfd args = {0}; + args.fd = event_fd; + args.addr = addr; + args.flags = flags; + + if (dm.tag == DATAMATCH_NONE) { + args.datamatch = 0; + } else { + flags |= BIT(MSHV_IOEVENTFD_BIT_DATAMATCH); + args.flags = flags; + if (dm.tag == DATAMATCH_U64) { + args.len = sizeof(uint64_t); + args.datamatch = dm.value.u64; + } else { + args.len = sizeof(uint32_t); + args.datamatch = dm.value.u32; + } + } + + return ioctl(vm_fd, MSHV_IOEVENTFD, &args); +} + +static int unregister_ioevent(int vm_fd, int event_fd, uint64_t mmio_addr) +{ + uint32_t flags = 0; + Datamatch dm = {0}; + + flags |= BIT(MSHV_IOEVENTFD_BIT_DEASSIGN); + dm.tag = DATAMATCH_NONE; + + return ioeventfd(vm_fd, event_fd, mmio_addr, dm, flags); +} + +static int register_ioevent(int vm_fd, int event_fd, uint64_t mmio_addr, + uint64_t val, bool is_64bit, bool is_datamatch) +{ + uint32_t flags = 0; + Datamatch dm = {0}; + + if (!is_datamatch) { + dm.tag = DATAMATCH_NONE; + } else if (is_64bit) { + dm.tag = DATAMATCH_U64; + dm.value.u64 = val; + } else { + dm.tag = DATAMATCH_U32; + dm.value.u32 = val; + } + + return ioeventfd(vm_fd, event_fd, mmio_addr, dm, flags); +} + +static void mem_ioeventfd_add(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) +{ + int fd = event_notifier_get_fd(e); + int ret; + bool is_64 = int128_get64(section->size) == 8; + uint64_t addr = section->offset_within_address_space; + + trace_mshv_mem_ioeventfd_add(addr, int128_get64(section->size), data); + + ret = register_ioevent(mshv_state->vm, fd, addr, data, is_64, match_data); + + if (ret < 0) { + error_report("Failed to register ioeventfd: %s (%d)", strerror(-ret), + -ret); + abort(); + } +} + +static void mem_ioeventfd_del(MemoryListener *listener, + MemoryRegionSection *section, + bool match_data, uint64_t data, + EventNotifier *e) +{ + int fd = event_notifier_get_fd(e); + int ret; + uint64_t addr = section->offset_within_address_space; + + trace_mshv_mem_ioeventfd_del(section->offset_within_address_space, + int128_get64(section->size), data); + + ret = unregister_ioevent(mshv_state->vm, fd, addr); + if (ret < 0) { + error_report("Failed to unregister ioeventfd: %s (%d)", strerror(-ret), + -ret); + abort(); + } +} + +static MemoryListener mshv_memory_listener = { + .name = "mshv", + .priority = MEMORY_LISTENER_PRIORITY_ACCEL, + .region_add = mem_region_add, + .region_del = mem_region_del, + .eventfd_add = mem_ioeventfd_add, + .eventfd_del = mem_ioeventfd_del, +}; + +static MemoryListener mshv_io_listener = { + .name = "mshv", .priority = MEMORY_LISTENER_PRIORITY_DEV_BACKEND, + /* MSHV does not support PIO eventfd */ +}; + +static void register_mshv_memory_listener(MshvState *s, MshvMemoryListener *mml, + AddressSpace *as, int as_id, + const char *name) +{ + int i; + + mml->listener = mshv_memory_listener; + mml->listener.name = name; + memory_listener_register(&mml->listener, as); + for (i = 0; i < s->nr_as; ++i) { + if (!s->as[i].as) { + s->as[i].as = as; + s->as[i].ml = mml; + break; + } + } +} + +int mshv_hvcall(int fd, const struct mshv_root_hvcall *args) +{ + int ret = 0; + + ret = ioctl(fd, MSHV_ROOT_HVCALL, args); + if (ret < 0) { + error_report("Failed to perform hvcall: %s", strerror(errno)); + return -1; + } + return ret; +} + +static int mshv_init_vcpu(CPUState *cpu) +{ + int vm_fd = mshv_state->vm; + uint8_t vp_index = cpu->cpu_index; + int ret; + + cpu->accel = g_new0(AccelCPUState, 1); + mshv_arch_init_vcpu(cpu); + + ret = mshv_create_vcpu(vm_fd, vp_index, &cpu->accel->cpufd); + if (ret < 0) { + return -1; + } + + cpu->accel->dirty = true; + + return 0; +} + +static int mshv_init(AccelState *as, MachineState *ms) +{ + MshvState *s; + int mshv_fd, vm_fd, ret; + + if (mshv_state) { + warn_report("MSHV accelerator already initialized"); + return 0; + } + + s = MSHV_STATE(as); + + accel_blocker_init(); + + s->vm = 0; + + ret = init_mshv(&mshv_fd); + if (ret < 0) { + return -1; + } + + mshv_init_mmio_emu(); + + mshv_init_msicontrol(); + + mshv_init_memory_slot_manager(s); + + ret = create_vm(mshv_fd, &vm_fd); + if (ret < 0) { + close(mshv_fd); + return -1; + } + + ret = resume_vm(vm_fd); + if (ret < 0) { + close(mshv_fd); + close(vm_fd); + return -1; + } + + s->vm = vm_fd; + s->fd = mshv_fd; + s->nr_as = 1; + s->as = g_new0(MshvAddressSpace, s->nr_as); + + mshv_state = s; + + register_mshv_memory_listener(s, &s->memory_listener, &address_space_memory, + 0, "mshv-memory"); + memory_listener_register(&mshv_io_listener, &address_space_io); + + return 0; +} + +static int mshv_destroy_vcpu(CPUState *cpu) +{ + int cpu_fd = mshv_vcpufd(cpu); + int vm_fd = mshv_state->vm; + + mshv_remove_vcpu(vm_fd, cpu_fd); + mshv_vcpufd(cpu) = 0; + + mshv_arch_destroy_vcpu(cpu); + g_clear_pointer(&cpu->accel, g_free); + return 0; +} + +static int mshv_cpu_exec(CPUState *cpu) +{ + hv_message mshv_msg; + enum MshvVmExit exit_reason; + int ret = 0; + + bql_unlock(); + cpu_exec_start(cpu); + + do { + if (cpu->accel->dirty) { + ret = mshv_arch_put_registers(cpu); + if (ret) { + error_report("Failed to put registers after init: %s", + strerror(-ret)); + ret = -1; + break; + } + cpu->accel->dirty = false; + } + + ret = mshv_run_vcpu(mshv_state->vm, cpu, &mshv_msg, &exit_reason); + if (ret < 0) { + error_report("Failed to run on vcpu %d", cpu->cpu_index); + abort(); + } + + switch (exit_reason) { + case MshvVmExitIgnore: + break; + default: + ret = EXCP_INTERRUPT; + break; + } + } while (ret == 0); + + cpu_exec_end(cpu); + bql_lock(); + + if (ret < 0) { + cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); + vm_stop(RUN_STATE_INTERNAL_ERROR); + } + + return ret; +} + +/* + * The signal handler is triggered when QEMU's main thread receives a SIG_IPI + * (SIGUSR1). This signal causes the current CPU thread to be kicked, forcing a + * VM exit on the CPU. The VM exit generates an exit reason that breaks the loop + * (see mshv_cpu_exec). If the exit is due to a Ctrl+A+x command, the system + * will shut down. For other cases, the system will continue running. + */ +static void sa_ipi_handler(int sig) +{ + /* TODO: call IOCTL to set_immediate_exit, once implemented. */ + + qemu_cpu_kick_self(); +} + +static void init_signal(CPUState *cpu) +{ + /* init cpu signals */ + struct sigaction sigact; + sigset_t set; + + memset(&sigact, 0, sizeof(sigact)); + sigact.sa_handler = sa_ipi_handler; + sigaction(SIG_IPI, &sigact, NULL); + + pthread_sigmask(SIG_BLOCK, NULL, &set); + sigdelset(&set, SIG_IPI); + pthread_sigmask(SIG_SETMASK, &set, NULL); +} + +static void *mshv_vcpu_thread(void *arg) +{ + CPUState *cpu = arg; + int ret; + + rcu_register_thread(); + + bql_lock(); + qemu_thread_get_self(cpu->thread); + cpu->thread_id = qemu_get_thread_id(); + current_cpu = cpu; + ret = mshv_init_vcpu(cpu); + if (ret < 0) { + error_report("Failed to init vcpu %d", cpu->cpu_index); + goto cleanup; + } + init_signal(cpu); + + /* signal CPU creation */ + cpu_thread_signal_created(cpu); + qemu_guest_random_seed_thread_part2(cpu->random_seed); + + do { + qemu_process_cpu_events(cpu); + if (cpu_can_run(cpu)) { + mshv_cpu_exec(cpu); + } + } while (!cpu->unplug || cpu_can_run(cpu)); + + mshv_destroy_vcpu(cpu); +cleanup: + cpu_thread_signal_destroyed(cpu); + bql_unlock(); + rcu_unregister_thread(); + return NULL; +} + +static void mshv_start_vcpu_thread(CPUState *cpu) +{ + char thread_name[VCPU_THREAD_NAME_SIZE]; + + cpu->thread = g_malloc0(sizeof(QemuThread)); + cpu->halt_cond = g_malloc0(sizeof(QemuCond)); + + qemu_cond_init(cpu->halt_cond); + + trace_mshv_start_vcpu_thread(thread_name, cpu->cpu_index); + qemu_thread_create(cpu->thread, thread_name, mshv_vcpu_thread, cpu, + QEMU_THREAD_JOINABLE); +} + +static void do_mshv_cpu_synchronize_post_init(CPUState *cpu, + run_on_cpu_data arg) +{ + int ret = mshv_arch_put_registers(cpu); + if (ret < 0) { + error_report("Failed to put registers after init: %s", strerror(-ret)); + abort(); + } + + cpu->accel->dirty = false; +} + +static void mshv_cpu_synchronize_post_init(CPUState *cpu) +{ + run_on_cpu(cpu, do_mshv_cpu_synchronize_post_init, RUN_ON_CPU_NULL); +} + +static void mshv_cpu_synchronize_post_reset(CPUState *cpu) +{ + int ret = mshv_arch_put_registers(cpu); + if (ret) { + error_report("Failed to put registers after reset: %s", + strerror(-ret)); + cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); + vm_stop(RUN_STATE_INTERNAL_ERROR); + } + cpu->accel->dirty = false; +} + +static void do_mshv_cpu_synchronize_pre_loadvm(CPUState *cpu, + run_on_cpu_data arg) +{ + cpu->accel->dirty = true; +} + +static void mshv_cpu_synchronize_pre_loadvm(CPUState *cpu) +{ + run_on_cpu(cpu, do_mshv_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL); +} + +static void do_mshv_cpu_synchronize(CPUState *cpu, run_on_cpu_data arg) +{ + if (!cpu->accel->dirty) { + int ret = mshv_load_regs(cpu); + if (ret < 0) { + error_report("Failed to load registers for vcpu %d", + cpu->cpu_index); + + cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); + vm_stop(RUN_STATE_INTERNAL_ERROR); + } + + cpu->accel->dirty = true; + } +} + +static void mshv_cpu_synchronize(CPUState *cpu) +{ + if (!cpu->accel->dirty) { + run_on_cpu(cpu, do_mshv_cpu_synchronize, RUN_ON_CPU_NULL); + } +} + +static bool mshv_cpus_are_resettable(void) +{ + return false; +} + +static void mshv_accel_class_init(ObjectClass *oc, const void *data) +{ + AccelClass *ac = ACCEL_CLASS(oc); + + ac->name = "MSHV"; + ac->init_machine = mshv_init; + ac->allowed = &mshv_allowed; +} + +static void mshv_accel_instance_init(Object *obj) +{ + MshvState *s = MSHV_STATE(obj); + + s->vm = 0; +} + +static const TypeInfo mshv_accel_type = { + .name = TYPE_MSHV_ACCEL, + .parent = TYPE_ACCEL, + .instance_init = mshv_accel_instance_init, + .class_init = mshv_accel_class_init, + .instance_size = sizeof(MshvState), +}; + +static void mshv_accel_ops_class_init(ObjectClass *oc, const void *data) +{ + AccelOpsClass *ops = ACCEL_OPS_CLASS(oc); + + ops->create_vcpu_thread = mshv_start_vcpu_thread; + ops->synchronize_post_init = mshv_cpu_synchronize_post_init; + ops->synchronize_post_reset = mshv_cpu_synchronize_post_reset; + ops->synchronize_state = mshv_cpu_synchronize; + ops->synchronize_pre_loadvm = mshv_cpu_synchronize_pre_loadvm; + ops->cpus_are_resettable = mshv_cpus_are_resettable; + ops->handle_interrupt = generic_handle_interrupt; +} + +static const TypeInfo mshv_accel_ops_type = { + .name = ACCEL_OPS_NAME("mshv"), + .parent = TYPE_ACCEL_OPS, + .class_init = mshv_accel_ops_class_init, + .abstract = true, +}; + +static void mshv_type_init(void) +{ + type_register_static(&mshv_accel_type); + type_register_static(&mshv_accel_ops_type); +} + +type_init(mshv_type_init); diff --git a/accel/mshv/msr.c b/accel/mshv/msr.c new file mode 100644 index 0000000000000..e6e5baef507bf --- /dev/null +++ b/accel/mshv/msr.c @@ -0,0 +1,375 @@ +/* + * QEMU MSHV support + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: Magnus Kulke + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "system/mshv.h" +#include "system/mshv_int.h" +#include "hw/hyperv/hvgdk_mini.h" +#include "linux/mshv.h" +#include "qemu/error-report.h" + +static uint32_t supported_msrs[64] = { + IA32_MSR_TSC, + IA32_MSR_EFER, + IA32_MSR_KERNEL_GS_BASE, + IA32_MSR_APIC_BASE, + IA32_MSR_PAT, + IA32_MSR_SYSENTER_CS, + IA32_MSR_SYSENTER_ESP, + IA32_MSR_SYSENTER_EIP, + IA32_MSR_STAR, + IA32_MSR_LSTAR, + IA32_MSR_CSTAR, + IA32_MSR_SFMASK, + IA32_MSR_MTRR_DEF_TYPE, + IA32_MSR_MTRR_PHYSBASE0, + IA32_MSR_MTRR_PHYSMASK0, + IA32_MSR_MTRR_PHYSBASE1, + IA32_MSR_MTRR_PHYSMASK1, + IA32_MSR_MTRR_PHYSBASE2, + IA32_MSR_MTRR_PHYSMASK2, + IA32_MSR_MTRR_PHYSBASE3, + IA32_MSR_MTRR_PHYSMASK3, + IA32_MSR_MTRR_PHYSBASE4, + IA32_MSR_MTRR_PHYSMASK4, + IA32_MSR_MTRR_PHYSBASE5, + IA32_MSR_MTRR_PHYSMASK5, + IA32_MSR_MTRR_PHYSBASE6, + IA32_MSR_MTRR_PHYSMASK6, + IA32_MSR_MTRR_PHYSBASE7, + IA32_MSR_MTRR_PHYSMASK7, + IA32_MSR_MTRR_FIX64K_00000, + IA32_MSR_MTRR_FIX16K_80000, + IA32_MSR_MTRR_FIX16K_A0000, + IA32_MSR_MTRR_FIX4K_C0000, + IA32_MSR_MTRR_FIX4K_C8000, + IA32_MSR_MTRR_FIX4K_D0000, + IA32_MSR_MTRR_FIX4K_D8000, + IA32_MSR_MTRR_FIX4K_E0000, + IA32_MSR_MTRR_FIX4K_E8000, + IA32_MSR_MTRR_FIX4K_F0000, + IA32_MSR_MTRR_FIX4K_F8000, + IA32_MSR_TSC_AUX, + IA32_MSR_DEBUG_CTL, + HV_X64_MSR_GUEST_OS_ID, + HV_X64_MSR_SINT0, + HV_X64_MSR_SINT1, + HV_X64_MSR_SINT2, + HV_X64_MSR_SINT3, + HV_X64_MSR_SINT4, + HV_X64_MSR_SINT5, + HV_X64_MSR_SINT6, + HV_X64_MSR_SINT7, + HV_X64_MSR_SINT8, + HV_X64_MSR_SINT9, + HV_X64_MSR_SINT10, + HV_X64_MSR_SINT11, + HV_X64_MSR_SINT12, + HV_X64_MSR_SINT13, + HV_X64_MSR_SINT14, + HV_X64_MSR_SINT15, + HV_X64_MSR_SCONTROL, + HV_X64_MSR_SIEFP, + HV_X64_MSR_SIMP, + HV_X64_MSR_REFERENCE_TSC, + HV_X64_MSR_EOM, +}; +static const size_t msr_count = ARRAY_SIZE(supported_msrs); + +static int compare_msr_index(const void *a, const void *b) +{ + return *(uint32_t *)a - *(uint32_t *)b; +} + +__attribute__((constructor)) +static void init_sorted_msr_map(void) +{ + qsort(supported_msrs, msr_count, sizeof(uint32_t), compare_msr_index); +} + +static int mshv_is_supported_msr(uint32_t msr) +{ + return bsearch(&msr, supported_msrs, msr_count, sizeof(uint32_t), + compare_msr_index) != NULL; +} + +static int mshv_msr_to_hv_reg_name(uint32_t msr, uint32_t *hv_reg) +{ + switch (msr) { + case IA32_MSR_TSC: + *hv_reg = HV_X64_REGISTER_TSC; + return 0; + case IA32_MSR_EFER: + *hv_reg = HV_X64_REGISTER_EFER; + return 0; + case IA32_MSR_KERNEL_GS_BASE: + *hv_reg = HV_X64_REGISTER_KERNEL_GS_BASE; + return 0; + case IA32_MSR_APIC_BASE: + *hv_reg = HV_X64_REGISTER_APIC_BASE; + return 0; + case IA32_MSR_PAT: + *hv_reg = HV_X64_REGISTER_PAT; + return 0; + case IA32_MSR_SYSENTER_CS: + *hv_reg = HV_X64_REGISTER_SYSENTER_CS; + return 0; + case IA32_MSR_SYSENTER_ESP: + *hv_reg = HV_X64_REGISTER_SYSENTER_ESP; + return 0; + case IA32_MSR_SYSENTER_EIP: + *hv_reg = HV_X64_REGISTER_SYSENTER_EIP; + return 0; + case IA32_MSR_STAR: + *hv_reg = HV_X64_REGISTER_STAR; + return 0; + case IA32_MSR_LSTAR: + *hv_reg = HV_X64_REGISTER_LSTAR; + return 0; + case IA32_MSR_CSTAR: + *hv_reg = HV_X64_REGISTER_CSTAR; + return 0; + case IA32_MSR_SFMASK: + *hv_reg = HV_X64_REGISTER_SFMASK; + return 0; + case IA32_MSR_MTRR_CAP: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_CAP; + return 0; + case IA32_MSR_MTRR_DEF_TYPE: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_DEF_TYPE; + return 0; + case IA32_MSR_MTRR_PHYSBASE0: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_BASE0; + return 0; + case IA32_MSR_MTRR_PHYSMASK0: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_MASK0; + return 0; + case IA32_MSR_MTRR_PHYSBASE1: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_BASE1; + return 0; + case IA32_MSR_MTRR_PHYSMASK1: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_MASK1; + return 0; + case IA32_MSR_MTRR_PHYSBASE2: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_BASE2; + return 0; + case IA32_MSR_MTRR_PHYSMASK2: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_MASK2; + return 0; + case IA32_MSR_MTRR_PHYSBASE3: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_BASE3; + return 0; + case IA32_MSR_MTRR_PHYSMASK3: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_MASK3; + return 0; + case IA32_MSR_MTRR_PHYSBASE4: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_BASE4; + return 0; + case IA32_MSR_MTRR_PHYSMASK4: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_MASK4; + return 0; + case IA32_MSR_MTRR_PHYSBASE5: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_BASE5; + return 0; + case IA32_MSR_MTRR_PHYSMASK5: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_MASK5; + return 0; + case IA32_MSR_MTRR_PHYSBASE6: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_BASE6; + return 0; + case IA32_MSR_MTRR_PHYSMASK6: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_MASK6; + return 0; + case IA32_MSR_MTRR_PHYSBASE7: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_BASE7; + return 0; + case IA32_MSR_MTRR_PHYSMASK7: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_PHYS_MASK7; + return 0; + case IA32_MSR_MTRR_FIX64K_00000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX64K00000; + return 0; + case IA32_MSR_MTRR_FIX16K_80000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX16K80000; + return 0; + case IA32_MSR_MTRR_FIX16K_A0000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX16KA0000; + return 0; + case IA32_MSR_MTRR_FIX4K_C0000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX4KC0000; + return 0; + case IA32_MSR_MTRR_FIX4K_C8000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX4KC8000; + return 0; + case IA32_MSR_MTRR_FIX4K_D0000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX4KD0000; + return 0; + case IA32_MSR_MTRR_FIX4K_D8000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX4KD8000; + return 0; + case IA32_MSR_MTRR_FIX4K_E0000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX4KE0000; + return 0; + case IA32_MSR_MTRR_FIX4K_E8000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX4KE8000; + return 0; + case IA32_MSR_MTRR_FIX4K_F0000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX4KF0000; + return 0; + case IA32_MSR_MTRR_FIX4K_F8000: + *hv_reg = HV_X64_REGISTER_MSR_MTRR_FIX4KF8000; + return 0; + case IA32_MSR_TSC_AUX: + *hv_reg = HV_X64_REGISTER_TSC_AUX; + return 0; + case IA32_MSR_BNDCFGS: + *hv_reg = HV_X64_REGISTER_BNDCFGS; + return 0; + case IA32_MSR_DEBUG_CTL: + *hv_reg = HV_X64_REGISTER_DEBUG_CTL; + return 0; + case IA32_MSR_TSC_ADJUST: + *hv_reg = HV_X64_REGISTER_TSC_ADJUST; + return 0; + case IA32_MSR_SPEC_CTRL: + *hv_reg = HV_X64_REGISTER_SPEC_CTRL; + return 0; + case HV_X64_MSR_GUEST_OS_ID: + *hv_reg = HV_REGISTER_GUEST_OS_ID; + return 0; + case HV_X64_MSR_SINT0: + *hv_reg = HV_REGISTER_SINT0; + return 0; + case HV_X64_MSR_SINT1: + *hv_reg = HV_REGISTER_SINT1; + return 0; + case HV_X64_MSR_SINT2: + *hv_reg = HV_REGISTER_SINT2; + return 0; + case HV_X64_MSR_SINT3: + *hv_reg = HV_REGISTER_SINT3; + return 0; + case HV_X64_MSR_SINT4: + *hv_reg = HV_REGISTER_SINT4; + return 0; + case HV_X64_MSR_SINT5: + *hv_reg = HV_REGISTER_SINT5; + return 0; + case HV_X64_MSR_SINT6: + *hv_reg = HV_REGISTER_SINT6; + return 0; + case HV_X64_MSR_SINT7: + *hv_reg = HV_REGISTER_SINT7; + return 0; + case HV_X64_MSR_SINT8: + *hv_reg = HV_REGISTER_SINT8; + return 0; + case HV_X64_MSR_SINT9: + *hv_reg = HV_REGISTER_SINT9; + return 0; + case HV_X64_MSR_SINT10: + *hv_reg = HV_REGISTER_SINT10; + return 0; + case HV_X64_MSR_SINT11: + *hv_reg = HV_REGISTER_SINT11; + return 0; + case HV_X64_MSR_SINT12: + *hv_reg = HV_REGISTER_SINT12; + return 0; + case HV_X64_MSR_SINT13: + *hv_reg = HV_REGISTER_SINT13; + return 0; + case HV_X64_MSR_SINT14: + *hv_reg = HV_REGISTER_SINT14; + return 0; + case HV_X64_MSR_SINT15: + *hv_reg = HV_REGISTER_SINT15; + return 0; + case IA32_MSR_MISC_ENABLE: + *hv_reg = HV_X64_REGISTER_MSR_IA32_MISC_ENABLE; + return 0; + case HV_X64_MSR_SCONTROL: + *hv_reg = HV_REGISTER_SCONTROL; + return 0; + case HV_X64_MSR_SIEFP: + *hv_reg = HV_REGISTER_SIEFP; + return 0; + case HV_X64_MSR_SIMP: + *hv_reg = HV_REGISTER_SIMP; + return 0; + case HV_X64_MSR_REFERENCE_TSC: + *hv_reg = HV_REGISTER_REFERENCE_TSC; + return 0; + case HV_X64_MSR_EOM: + *hv_reg = HV_REGISTER_EOM; + return 0; + default: + error_report("failed to map MSR %u to HV register name", msr); + return -1; + } +} + +static int set_msrs(const CPUState *cpu, GList *msrs) +{ + size_t n_msrs; + GList *entries; + MshvMsrEntry *entry; + enum hv_register_name name; + struct hv_register_assoc *assoc; + int ret; + size_t i = 0; + + n_msrs = g_list_length(msrs); + hv_register_assoc *assocs = g_new0(hv_register_assoc, n_msrs); + + entries = msrs; + for (const GList *elem = entries; elem != NULL; elem = elem->next) { + entry = elem->data; + ret = mshv_msr_to_hv_reg_name(entry->index, &name); + if (ret < 0) { + g_free(assocs); + return ret; + } + assoc = &assocs[i]; + assoc->name = name; + /* the union has been initialized to 0 */ + assoc->value.reg64 = entry->data; + i++; + } + ret = mshv_set_generic_regs(cpu, assocs, n_msrs); + g_free(assocs); + if (ret < 0) { + error_report("failed to set msrs"); + return -1; + } + return 0; +} + + +int mshv_configure_msr(const CPUState *cpu, const MshvMsrEntry *msrs, + size_t n_msrs) +{ + GList *valid_msrs = NULL; + uint32_t msr_index; + int ret; + + for (size_t i = 0; i < n_msrs; i++) { + msr_index = msrs[i].index; + /* check whether index of msrs is in SUPPORTED_MSRS */ + if (mshv_is_supported_msr(msr_index)) { + valid_msrs = g_list_append(valid_msrs, (void *) &msrs[i]); + } + } + + ret = set_msrs(cpu, valid_msrs); + g_list_free(valid_msrs); + + return ret; +} diff --git a/accel/mshv/trace-events b/accel/mshv/trace-events new file mode 100644 index 0000000000000..36f0d59b38556 --- /dev/null +++ b/accel/mshv/trace-events @@ -0,0 +1,33 @@ +# Authors: Ziqiao Zhou +# Magnus Kulke +# +# SPDX-License-Identifier: GPL-2.0-or-later + +mshv_start_vcpu_thread(const char* thread, uint32_t cpu) "thread=%s cpu_index=%d" + +mshv_set_memory(bool add, uint64_t gpa, uint64_t size, uint64_t user_addr, bool readonly, int ret) "add=%d gpa=0x%" PRIx64 " size=0x%" PRIx64 " user=0x%" PRIx64 " readonly=%d result=%d" +mshv_mem_ioeventfd_add(uint64_t addr, uint32_t size, uint32_t data) "addr=0x%" PRIx64 " size=%d data=0x%x" +mshv_mem_ioeventfd_del(uint64_t addr, uint32_t size, uint32_t data) "addr=0x%" PRIx64 " size=%d data=0x%x" + +mshv_hvcall_args(const char* hvcall, uint16_t code, uint16_t in_sz) "built args for '%s' code: %d in_sz: %d" + +mshv_handle_interrupt(uint32_t cpu, int mask) "cpu_index=%d mask=0x%x" +mshv_set_msi_routing(uint32_t gsi, uint64_t addr, uint32_t data) "gsi=%d addr=0x%" PRIx64 " data=0x%x" +mshv_remove_msi_routing(uint32_t gsi) "gsi=%d" +mshv_add_msi_routing(uint64_t addr, uint32_t data) "addr=0x%" PRIx64 " data=0x%x" +mshv_commit_msi_routing_table(int vm_fd, int len) "vm_fd=%d table_size=%d" +mshv_register_irqfd(int vm_fd, int event_fd, uint32_t gsi) "vm_fd=%d event_fd=%d gsi=%d" +mshv_irqchip_update_irqfd_notifier_gsi(int event_fd, int resample_fd, int virq, bool add) "event_fd=%d resample_fd=%d virq=%d add=%d" + +mshv_insn_fetch(uint64_t addr, size_t size) "gpa=0x%" PRIx64 " size=%zu" +mshv_mem_write(uint64_t addr, size_t size) "\tgpa=0x%" PRIx64 " size=%zu" +mshv_mem_read(uint64_t addr, size_t size) "\tgpa=0x%" PRIx64 " size=%zu" +mshv_map_memory(uint64_t userspace_addr, uint64_t gpa, uint64_t size) "\tu_a=0x%" PRIx64 " gpa=0x%010" PRIx64 " size=0x%08" PRIx64 +mshv_unmap_memory(uint64_t userspace_addr, uint64_t gpa, uint64_t size) "\tu_a=0x%" PRIx64 " gpa=0x%010" PRIx64 " size=0x%08" PRIx64 +mshv_set_phys_mem(bool add, const char *name, uint64_t gpa) "\tadd=%d name=%s gpa=0x%010" PRIx64 +mshv_handle_mmio(uint64_t gva, uint64_t gpa, uint64_t size, uint8_t access_type) "\tgva=0x%" PRIx64 " gpa=0x%010" PRIx64 " size=0x%" PRIx64 " access_type=%d" + +mshv_found_slot(uint64_t userspace_addr, uint64_t gpa, uint64_t size) "\tu_a=0x%" PRIx64 " gpa=0x%010" PRIx64 " size=0x%08" PRIx64 +mshv_skip_unset_mem(uint64_t userspace_addr, uint64_t gpa, uint64_t size) "\tu_a=0x%" PRIx64 " gpa=0x%010" PRIx64 " size=0x%08" PRIx64 +mshv_remap_attempt(uint64_t userspace_addr, uint64_t gpa, uint64_t size) "\tu_a=0x%" PRIx64 " gpa=0x%010" PRIx64 " size=0x%08" PRIx64 +mshv_find_slot_by_gpa(uint64_t gpa) "\tgpa=0x%010" PRIx64 diff --git a/accel/mshv/trace.h b/accel/mshv/trace.h new file mode 100644 index 0000000000000..0dca48f9179f1 --- /dev/null +++ b/accel/mshv/trace.h @@ -0,0 +1,14 @@ +/* + * QEMU MSHV support + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: + * Ziqiao Zhou + * Magnus Kulke + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + */ + +#include "trace/trace-accel_mshv.h" diff --git a/accel/stubs/meson.build b/accel/stubs/meson.build index 9dfc4f9ddaf5b..48eccd1b86170 100644 --- a/accel/stubs/meson.build +++ b/accel/stubs/meson.build @@ -5,5 +5,6 @@ system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c')) system_stubs_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c')) system_stubs_ss.add(when: 'CONFIG_NVMM', if_false: files('nvmm-stub.c')) system_stubs_ss.add(when: 'CONFIG_WHPX', if_false: files('whpx-stub.c')) +system_stubs_ss.add(when: 'CONFIG_MSHV', if_false: files('mshv-stub.c')) specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss) diff --git a/accel/stubs/mshv-stub.c b/accel/stubs/mshv-stub.c new file mode 100644 index 0000000000000..e499b199d9dd5 --- /dev/null +++ b/accel/stubs/mshv-stub.c @@ -0,0 +1,44 @@ +/* + * QEMU MSHV stub + * + * Copyright Red Hat, Inc. 2025 + * + * Author: Paolo Bonzini + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/pci/msi.h" +#include "system/mshv.h" + +bool mshv_allowed; + +int mshv_irqchip_add_msi_route(int vector, PCIDevice *dev) +{ + return -ENOSYS; +} + +void mshv_irqchip_release_virq(int virq) +{ +} + +int mshv_irqchip_update_msi_route(int virq, MSIMessage msg, PCIDevice *dev) +{ + return -ENOSYS; +} + +void mshv_irqchip_commit_routes(void) +{ +} + +int mshv_irqchip_add_irqfd_notifier_gsi(const EventNotifier *n, + const EventNotifier *rn, int virq) +{ + return -ENOSYS; +} + +int mshv_irqchip_remove_irqfd_notifier_gsi(const EventNotifier *n, int virq) +{ + return -ENOSYS; +} diff --git a/accel/stubs/tcg-stub.c b/accel/stubs/tcg-stub.c index 3b76b8b17c17d..77055e396443b 100644 --- a/accel/stubs/tcg-stub.c +++ b/accel/stubs/tcg-stub.c @@ -17,8 +17,3 @@ G_NORETURN void cpu_loop_exit(CPUState *cpu) { g_assert_not_reached(); } - -G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc) -{ - g_assert_not_reached(); -} diff --git a/accel/tcg/atomic_common.c.inc b/accel/tcg/atomic_common.c.inc index 6056598c23d42..bca93a0ac49bd 100644 --- a/accel/tcg/atomic_common.c.inc +++ b/accel/tcg/atomic_common.c.inc @@ -122,5 +122,14 @@ GEN_ATOMIC_HELPERS(umax_fetch) GEN_ATOMIC_HELPERS(xchg) +#if HAVE_CMPXCHG128 +ATOMIC_HELPER(xchgo_be, Int128) +ATOMIC_HELPER(xchgo_le, Int128) +ATOMIC_HELPER(fetch_ando_be, Int128) +ATOMIC_HELPER(fetch_ando_le, Int128) +ATOMIC_HELPER(fetch_oro_be, Int128) +ATOMIC_HELPER(fetch_oro_le, Int128) +#endif + #undef ATOMIC_HELPER #undef GEN_ATOMIC_HELPERS diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h index 08a475c10ca04..ae5203b43904a 100644 --- a/accel/tcg/atomic_template.h +++ b/accel/tcg/atomic_template.h @@ -100,7 +100,6 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr, return ret; } -#if DATA_SIZE < 16 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) { @@ -108,7 +107,28 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val, DATA_SIZE, retaddr); DATA_TYPE ret; +#if DATA_SIZE == 16 + ret = atomic16_xchg(haddr, val); +#else ret = qatomic_xchg__nocheck(haddr, val); +#endif + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, + VALUE_LOW(ret), + VALUE_HIGH(ret), + VALUE_LOW(val), + VALUE_HIGH(val), + oi); + return ret; +} + +#if DATA_SIZE == 16 +ABI_TYPE ATOMIC_NAME(fetch_and)(CPUArchState *env, vaddr addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, + DATA_SIZE, retaddr); + DATA_TYPE ret = atomic16_fetch_and(haddr, val); ATOMIC_MMU_CLEANUP; atomic_trace_rmw_post(env, addr, VALUE_LOW(ret), @@ -119,6 +139,22 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val, return ret; } +ABI_TYPE ATOMIC_NAME(fetch_or)(CPUArchState *env, vaddr addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, + DATA_SIZE, retaddr); + DATA_TYPE ret = atomic16_fetch_or(haddr, val); + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, + VALUE_LOW(ret), + VALUE_HIGH(ret), + VALUE_LOW(val), + VALUE_HIGH(val), + oi); + return ret; +} +#else #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ @@ -188,7 +224,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new) GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new) #undef GEN_ATOMIC_HELPER_FN -#endif /* DATA SIZE < 16 */ +#endif /* DATA SIZE == 16 */ #undef END @@ -225,7 +261,6 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, vaddr addr, return BSWAP(ret); } -#if DATA_SIZE < 16 ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) { @@ -233,7 +268,28 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val, DATA_SIZE, retaddr); ABI_TYPE ret; +#if DATA_SIZE == 16 + ret = atomic16_xchg(haddr, BSWAP(val)); +#else ret = qatomic_xchg__nocheck(haddr, BSWAP(val)); +#endif + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, + VALUE_LOW(ret), + VALUE_HIGH(ret), + VALUE_LOW(val), + VALUE_HIGH(val), + oi); + return BSWAP(ret); +} + +#if DATA_SIZE == 16 +ABI_TYPE ATOMIC_NAME(fetch_and)(CPUArchState *env, vaddr addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, + DATA_SIZE, retaddr); + DATA_TYPE ret = atomic16_fetch_and(haddr, BSWAP(val)); ATOMIC_MMU_CLEANUP; atomic_trace_rmw_post(env, addr, VALUE_LOW(ret), @@ -244,6 +300,22 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, vaddr addr, ABI_TYPE val, return BSWAP(ret); } +ABI_TYPE ATOMIC_NAME(fetch_or)(CPUArchState *env, vaddr addr, ABI_TYPE val, + MemOpIdx oi, uintptr_t retaddr) +{ + DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, + DATA_SIZE, retaddr); + DATA_TYPE ret = atomic16_fetch_or(haddr, BSWAP(val)); + ATOMIC_MMU_CLEANUP; + atomic_trace_rmw_post(env, addr, + VALUE_LOW(ret), + VALUE_HIGH(ret), + VALUE_LOW(val), + VALUE_HIGH(val), + oi); + return BSWAP(ret); +} +#else #define GEN_ATOMIC_HELPER(X) \ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, vaddr addr, \ ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \ @@ -317,7 +389,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new) #undef ADD #undef GEN_ATOMIC_HELPER_FN -#endif /* DATA_SIZE < 16 */ +#endif /* DATA_SIZE == 16 */ #undef END #endif /* DATA_SIZE > 1 */ diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 713bdb2056480..7c20d9db122e5 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -40,6 +40,7 @@ #include "exec/replay-core.h" #include "system/tcg.h" #include "exec/helper-proto-common.h" +#include "tcg-accel-ops.h" #include "tb-jmp-cache.h" #include "tb-hash.h" #include "tb-context.h" @@ -748,6 +749,22 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret) return false; } +void tcg_kick_vcpu_thread(CPUState *cpu) +{ +#ifndef CONFIG_USER_ONLY + /* + * Ensure cpu_exec will see the reason why the exit request was set. + * FIXME: this is not always needed. Other accelerators instead + * read interrupt_request and set exit_request on demand from the + * CPU thread; see kvm_arch_pre_run() for example. + */ + qatomic_store_release(&cpu->exit_request, true); +#endif + + /* Ensure cpu_exec will see the exit request after TCG has exited. */ + qatomic_store_release(&cpu->neg.icount_decr.u16.high, -1); +} + static inline bool icount_exit_request(CPUState *cpu) { if (!icount_enabled()) { @@ -774,44 +791,47 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, /* Clear the interrupt flag now since we're processing * cpu->interrupt_request and cpu->exit_request. * Ensure zeroing happens before reading cpu->exit_request or - * cpu->interrupt_request (see also smp_wmb in cpu_exit()) + * cpu->interrupt_request (see also store-release in + * tcg_kick_vcpu_thread()) */ qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0); - if (unlikely(qatomic_read(&cpu->interrupt_request))) { - int interrupt_request; +#ifdef CONFIG_USER_ONLY + assert(!cpu_test_interrupt(cpu, ~0)); +#else + if (unlikely(cpu_test_interrupt(cpu, ~0))) { bql_lock(); - interrupt_request = cpu->interrupt_request; - if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { - /* Mask out external interrupts for this step. */ - interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; - } - if (interrupt_request & CPU_INTERRUPT_DEBUG) { - cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_DEBUG)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_DEBUG); cpu->exception_index = EXCP_DEBUG; bql_unlock(); return true; } -#if !defined(CONFIG_USER_ONLY) if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { /* Do nothing */ - } else if (interrupt_request & CPU_INTERRUPT_HALT) { + } else if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HALT)) { replay_interrupt(); - cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HALT); cpu->halted = 1; cpu->exception_index = EXCP_HLT; bql_unlock(); return true; } else { const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops; + int interrupt_request = cpu->interrupt_request; - if (interrupt_request & CPU_INTERRUPT_RESET) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_RESET)) { replay_interrupt(); tcg_ops->cpu_exec_reset(cpu); bql_unlock(); return true; } + if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { + /* Mask out external interrupts for this step. */ + interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; + } + /* * The target hook has 3 exit conditions: * False when the interrupt isn't processed, @@ -836,13 +856,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, cpu->exception_index = -1; *last_tb = NULL; } - /* The target hook may have updated the 'cpu->interrupt_request'; - * reload the 'interrupt_request' value */ - interrupt_request = cpu->interrupt_request; } -#endif /* !CONFIG_USER_ONLY */ - if (interrupt_request & CPU_INTERRUPT_EXITTB) { - cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_EXITTB)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_EXITTB); /* ensure that no TB jump will be modified as the program flow was changed */ *last_tb = NULL; @@ -851,10 +867,13 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ bql_unlock(); } +#endif /* !CONFIG_USER_ONLY */ - /* Finally, check if we need to exit to the main loop. */ - if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) { - qatomic_set(&cpu->exit_request, 0); + /* + * Finally, check if we need to exit to the main loop. + * The corresponding store-release is in cpu_exit. + */ + if (unlikely(qatomic_load_acquire(&cpu->exit_request)) || icount_exit_request(cpu)) { if (cpu->exception_index == -1) { cpu->exception_index = EXCP_INTERRUPT; } diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 87e14bde4f225..631f1fe135936 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -25,6 +25,7 @@ #include "accel/tcg/probe.h" #include "exec/page-protection.h" #include "system/memory.h" +#include "system/physmem.h" #include "accel/tcg/cpu-ldst-common.h" #include "accel/tcg/cpu-mmu-index.h" #include "exec/cputlb.h" @@ -89,9 +90,6 @@ */ QEMU_BUILD_BUG_ON(sizeof(vaddr) > sizeof(run_on_cpu_data)); -/* We currently can't handle more than 16 bits in the MMUIDX bitmask. - */ -QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) static inline size_t tlb_n_entries(CPUTLBDescFast *fast) @@ -129,7 +127,7 @@ static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry) static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx, vaddr addr) { - uintptr_t size_mask = cpu->neg.tlb.f[mmu_idx].mask >> CPU_TLB_ENTRY_BITS; + uintptr_t size_mask = cpu_tlb_fast(cpu, mmu_idx)->mask >> CPU_TLB_ENTRY_BITS; return (addr >> TARGET_PAGE_BITS) & size_mask; } @@ -138,7 +136,7 @@ static inline uintptr_t tlb_index(CPUState *cpu, uintptr_t mmu_idx, static inline CPUTLBEntry *tlb_entry(CPUState *cpu, uintptr_t mmu_idx, vaddr addr) { - return &cpu->neg.tlb.f[mmu_idx].table[tlb_index(cpu, mmu_idx, addr)]; + return &cpu_tlb_fast(cpu, mmu_idx)->table[tlb_index(cpu, mmu_idx, addr)]; } static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, @@ -292,7 +290,7 @@ static void tlb_flush_one_mmuidx_locked(CPUState *cpu, int mmu_idx, int64_t now) { CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; - CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; + CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx); tlb_mmu_resize_locked(desc, fast, now); tlb_mmu_flush_locked(desc, fast); @@ -331,7 +329,7 @@ void tlb_init(CPUState *cpu) cpu->neg.tlb.c.dirty = 0; for (i = 0; i < NB_MMU_MODES; i++) { - tlb_mmu_init(&cpu->neg.tlb.d[i], &cpu->neg.tlb.f[i], now); + tlb_mmu_init(&cpu->neg.tlb.d[i], cpu_tlb_fast(cpu, i), now); } } @@ -342,7 +340,7 @@ void tlb_destroy(CPUState *cpu) qemu_spin_destroy(&cpu->neg.tlb.c.lock); for (i = 0; i < NB_MMU_MODES; i++) { CPUTLBDesc *desc = &cpu->neg.tlb.d[i]; - CPUTLBDescFast *fast = &cpu->neg.tlb.f[i]; + CPUTLBDescFast *fast = cpu_tlb_fast(cpu, i); g_free(fast->table); g_free(desc->fulltlb); @@ -370,8 +368,8 @@ static void flush_all_helper(CPUState *src, run_on_cpu_func fn, static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) { - uint16_t asked = data.host_int; - uint16_t all_dirty, work, to_clean; + MMUIdxMap asked = data.host_int; + MMUIdxMap all_dirty, work, to_clean; int64_t now = get_clock_realtime(); assert_cpu_is_self(cpu); @@ -408,7 +406,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) } } -void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) +void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap) { tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); @@ -422,7 +420,7 @@ void tlb_flush(CPUState *cpu) tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); } -void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, MMUIdxMap idxmap) { const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; @@ -531,7 +529,7 @@ static void tlb_flush_page_locked(CPUState *cpu, int midx, vaddr page) */ static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, vaddr addr, - uint16_t idxmap) + MMUIdxMap idxmap) { int mmu_idx; @@ -570,14 +568,14 @@ static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, { vaddr addr_and_idxmap = data.target_ptr; vaddr addr = addr_and_idxmap & TARGET_PAGE_MASK; - uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; + MMUIdxMap idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); } typedef struct { vaddr addr; - uint16_t idxmap; + MMUIdxMap idxmap; } TLBFlushPageByMMUIdxData; /** @@ -599,7 +597,7 @@ static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, g_free(d); } -void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap) +void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, MMUIdxMap idxmap) { tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap); @@ -618,7 +616,7 @@ void tlb_flush_page(CPUState *cpu, vaddr addr) void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, vaddr addr, - uint16_t idxmap) + MMUIdxMap idxmap) { tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); @@ -667,7 +665,7 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx, unsigned bits) { CPUTLBDesc *d = &cpu->neg.tlb.d[midx]; - CPUTLBDescFast *f = &cpu->neg.tlb.f[midx]; + CPUTLBDescFast *f = cpu_tlb_fast(cpu, midx); vaddr mask = MAKE_64BIT_MASK(0, bits); /* @@ -715,8 +713,8 @@ static void tlb_flush_range_locked(CPUState *cpu, int midx, typedef struct { vaddr addr; vaddr len; - uint16_t idxmap; - uint16_t bits; + MMUIdxMap idxmap; + unsigned bits; } TLBFlushRangeData; static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, @@ -766,7 +764,7 @@ static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, } void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, + vaddr len, MMUIdxMap idxmap, unsigned bits) { TLBFlushRangeData d; @@ -797,7 +795,7 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, } void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits) + MMUIdxMap idxmap, unsigned bits) { tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); } @@ -805,7 +803,7 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, vaddr addr, vaddr len, - uint16_t idxmap, + MMUIdxMap idxmap, unsigned bits) { TLBFlushRangeData d, *p; @@ -847,7 +845,7 @@ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, vaddr addr, - uint16_t idxmap, + MMUIdxMap idxmap, unsigned bits) { tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, @@ -858,7 +856,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, can be detected */ void tlb_protect_code(ram_addr_t ram_addr) { - cpu_physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, + physical_memory_test_and_clear_dirty(ram_addr & TARGET_PAGE_MASK, TARGET_PAGE_SIZE, DIRTY_MEMORY_CODE); } @@ -867,7 +865,7 @@ void tlb_protect_code(ram_addr_t ram_addr) tested for self modifying code */ void tlb_unprotect_code(ram_addr_t ram_addr) { - cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); + physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); } @@ -923,7 +921,7 @@ void tlb_reset_dirty(CPUState *cpu, uintptr_t start, uintptr_t length) qemu_spin_lock(&cpu->neg.tlb.c.lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { CPUTLBDesc *desc = &cpu->neg.tlb.d[mmu_idx]; - CPUTLBDescFast *fast = &cpu->neg.tlb.f[mmu_idx]; + CPUTLBDescFast *fast = cpu_tlb_fast(cpu, mmu_idx); unsigned int n = tlb_n_entries(fast); unsigned int i; @@ -1085,7 +1083,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx, if (prot & PAGE_WRITE) { if (section->readonly) { write_flags |= TLB_DISCARD_WRITE; - } else if (cpu_physical_memory_is_clean(iotlb)) { + } else if (physical_memory_is_clean(iotlb)) { write_flags |= TLB_NOTDIRTY; } } @@ -1316,7 +1314,7 @@ static bool victim_tlb_hit(CPUState *cpu, size_t mmu_idx, size_t index, if (cmp == page) { /* Found entry in victim tlb, swap tlb and iotlb. */ - CPUTLBEntry tmptlb, *tlb = &cpu->neg.tlb.f[mmu_idx].table[index]; + CPUTLBEntry tmptlb, *tlb = &cpu_tlb_fast(cpu, mmu_idx)->table[index]; qemu_spin_lock(&cpu->neg.tlb.c.lock); copy_tlb_helper_locked(&tmptlb, tlb); @@ -1341,7 +1339,7 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); - if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { + if (!physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { tb_invalidate_phys_range_fast(cpu, ram_addr, size, retaddr); } @@ -1349,10 +1347,10 @@ static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, * Set both VGA and migration bits for simplicity and to remove * the notdirty callback faster. */ - cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); + physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); /* We remove the notdirty callback only if the code has been flushed. */ - if (!cpu_physical_memory_is_clean(ram_addr)) { + if (!physical_memory_is_clean(ram_addr)) { trace_memory_notdirty_set_dirty(mem_vaddr); tlb_set_dirty(cpu, mem_vaddr); } @@ -1744,6 +1742,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, uintptr_t ra, MMUAccessType type, MMULookupLocals *l) { bool crosspage; + vaddr last; int flags; l->memop = get_memop(oi); @@ -1753,13 +1752,15 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, l->page[0].addr = addr; l->page[0].size = memop_size(l->memop); - l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK; + l->page[1].addr = 0; l->page[1].size = 0; - crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK; - if (likely(!crosspage)) { - mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); + /* Lookup and recognize exceptions from the first page. */ + mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); + last = addr + l->page[0].size - 1; + crosspage = (addr ^ last) & TARGET_PAGE_MASK; + if (likely(!crosspage)) { flags = l->page[0].flags; if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) { mmu_watch_or_dirty(cpu, &l->page[0], type, ra); @@ -1769,18 +1770,18 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi, } } else { /* Finish compute of page crossing. */ - int size0 = l->page[1].addr - addr; + vaddr addr1 = last & TARGET_PAGE_MASK; + int size0 = addr1 - addr; l->page[1].size = l->page[0].size - size0; l->page[0].size = size0; - l->page[1].addr = cpu->cc->tcg_ops->pointer_wrap(cpu, l->mmu_idx, - l->page[1].addr, addr); + addr1, addr); /* - * Lookup both pages, recognizing exceptions from either. If the - * second lookup potentially resized, refresh first CPUTLBEntryFull. + * Lookup and recognize exceptions from the second page. + * If the lookup potentially resized the table, refresh the + * first CPUTLBEntryFull pointer. */ - mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra); if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) { uintptr_t index = tlb_index(cpu, l->mmu_idx, addr); l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index]; diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 9920381a84eec..1ffcb4b2d2d0f 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -102,8 +102,8 @@ static TCGv_i32 gen_cpu_index(void) /* * Optimize when we run with a single vcpu. All values using cpu_index, * including scoreboard index, will be optimized out. - * User-mode calls tb_flush when setting this flag. In system-mode, all - * vcpus are created before generating code. + * User-mode flushes all TBs when setting this flag. + * In system-mode, all vcpus are created before generating code. */ if (!tcg_cflags_has(current_cpu, CF_PARALLEL)) { return tcg_constant_i32(current_cpu->cpu_index); diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c index 0048316f99a77..5a8d0784e7ad3 100644 --- a/accel/tcg/tb-maint.c +++ b/accel/tcg/tb-maint.c @@ -36,6 +36,9 @@ #include "internal-common.h" #ifdef CONFIG_USER_ONLY #include "user/page-protection.h" +#define runstate_is_running() true +#else +#include "system/runstate.h" #endif @@ -88,7 +91,10 @@ static IntervalTreeRoot tb_root; static void tb_remove_all(void) { - assert_memory_lock(); + /* + * Only called from tb_flush__exclusive_or_serial, where we have already + * asserted that we're in an exclusive state. + */ memset(&tb_root, 0, sizeof(tb_root)); } @@ -756,17 +762,19 @@ static void tb_remove(TranslationBlock *tb) } #endif /* CONFIG_USER_ONLY */ -/* flush all the translation blocks */ -static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) +/* + * Flush all the translation blocks. + * Must be called from a context in which no cpus are running, + * e.g. start_exclusive() or vm_stop(). + */ +void tb_flush__exclusive_or_serial(void) { - bool did_flush = false; + CPUState *cpu; - mmap_lock(); - /* If it is already been done on request of another CPU, just retry. */ - if (tb_ctx.tb_flush_count != tb_flush_count.host_int) { - goto done; - } - did_flush = true; + assert(tcg_enabled()); + /* Note that cpu_in_serial_context checks cpu_in_exclusive_context. */ + assert(!runstate_is_running() || + (current_cpu && cpu_in_serial_context(current_cpu))); CPU_FOREACH(cpu) { tcg_flush_jmp_cache(cpu); @@ -778,25 +786,23 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) tcg_region_reset_all(); /* XXX: flush processor icache at this point if cache flush is expensive */ qatomic_inc(&tb_ctx.tb_flush_count); + qemu_plugin_flush_cb(); +} -done: - mmap_unlock(); - if (did_flush) { - qemu_plugin_flush_cb(); +static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) +{ + /* If it is already been done on request of another CPU, just retry. */ + if (tb_ctx.tb_flush_count == tb_flush_count.host_int) { + tb_flush__exclusive_or_serial(); } } -void tb_flush(CPUState *cpu) +void queue_tb_flush(CPUState *cs) { if (tcg_enabled()) { unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count); - - if (cpu_in_serial_context(cpu)) { - do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count)); - } else { - async_safe_run_on_cpu(cpu, do_tb_flush, - RUN_ON_CPU_HOST_INT(tb_flush_count)); - } + async_safe_run_on_cpu(cs, do_tb_flush, + RUN_ON_CPU_HOST_INT(tb_flush_count)); } } @@ -836,6 +842,14 @@ static inline void tb_remove_from_jmp_list(TranslationBlock *orig, int n_orig) * We first acquired the lock, and since the destination pointer matches, * we know for sure that @orig is in the jmp list. */ + if (dest == orig) { + /* + * In the case of a TB that links to itself, removing the entry + * from the list means that it won't be present later during + * tb_jmp_unlink -- unlink now. + */ + tb_reset_jump(orig, n_orig); + } pprev = &dest->jmp_list_head; TB_FOR_EACH_JMP(dest, tb, n) { if (tb == orig && n == n_orig) { @@ -1154,7 +1168,6 @@ tb_invalidate_phys_page_range__locked(CPUState *cpu, page_collection_unlock(pages); /* Force execution of one insn next time. */ cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu); - mmap_unlock(); cpu_loop_exit_noexc(cpu); } } diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index 337b993d3da91..cf1ee7ac2587c 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -84,10 +84,9 @@ static void *mttcg_cpu_thread_fn(void *arg) cpu_thread_signal_created(cpu); qemu_guest_random_seed_thread_part2(cpu->random_seed); - /* process any pending work */ - cpu->exit_request = 1; - do { + qemu_process_cpu_events(cpu); + if (cpu_can_run(cpu)) { int r; bql_unlock(); @@ -112,8 +111,6 @@ static void *mttcg_cpu_thread_fn(void *arg) break; } } - - qemu_wait_io_event(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); tcg_cpu_destroy(cpu); @@ -123,11 +120,6 @@ static void *mttcg_cpu_thread_fn(void *arg) return NULL; } -void mttcg_kick_vcpu_thread(CPUState *cpu) -{ - cpu_exit(cpu); -} - void mttcg_start_vcpu_thread(CPUState *cpu) { char thread_name[VCPU_THREAD_NAME_SIZE]; diff --git a/accel/tcg/tcg-accel-ops-mttcg.h b/accel/tcg/tcg-accel-ops-mttcg.h index 8ffa7a9a9fe0d..5c145cc85955a 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.h +++ b/accel/tcg/tcg-accel-ops-mttcg.h @@ -10,9 +10,6 @@ #ifndef TCG_ACCEL_OPS_MTTCG_H #define TCG_ACCEL_OPS_MTTCG_H -/* kick MTTCG vCPU thread */ -void mttcg_kick_vcpu_thread(CPUState *cpu); - /* start an mttcg vCPU thread */ void mttcg_start_vcpu_thread(CPUState *cpu); diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index 6eec5c9eee913..2fb464399710b 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -43,7 +43,7 @@ void rr_kick_vcpu_thread(CPUState *unused) CPUState *cpu; CPU_FOREACH(cpu) { - cpu_exit(cpu); + tcg_kick_vcpu_thread(cpu); }; } @@ -117,7 +117,7 @@ static void rr_wait_io_event(void) rr_start_kick_timer(); CPU_FOREACH(cpu) { - qemu_wait_io_event_common(cpu); + qemu_process_cpu_events_common(cpu); } } @@ -203,7 +203,7 @@ static void *rr_cpu_thread_fn(void *arg) /* process any pending work */ CPU_FOREACH(cpu) { current_cpu = cpu; - qemu_wait_io_event_common(cpu); + qemu_process_cpu_events_common(cpu); } } @@ -211,13 +211,30 @@ static void *rr_cpu_thread_fn(void *arg) cpu = first_cpu; - /* process any pending work */ - cpu->exit_request = 1; - while (1) { /* Only used for icount_enabled() */ int64_t cpu_budget = 0; + if (cpu) { + /* + * This could even reset exit_request for all CPUs, but in practice + * races between CPU exits and changes to "cpu" are so rare that + * there's no advantage in doing so. + */ + qatomic_set(&cpu->exit_request, false); + } + + if (icount_enabled() && all_cpu_threads_idle()) { + /* + * When all cpus are sleeping (e.g in WFI), to avoid a deadlock + * in the main_loop, wake it up in order to start the warp timer. + */ + qemu_notify_event(); + } + + rr_wait_io_event(); + rr_deal_with_unplugged_cpus(); + bql_unlock(); replay_mutex_lock(); bql_lock(); @@ -242,10 +259,17 @@ static void *rr_cpu_thread_fn(void *arg) cpu = first_cpu; } - while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) { - /* Store rr_current_cpu before evaluating cpu_can_run(). */ + while (cpu && cpu_work_list_empty(cpu)) { + /* + * Store rr_current_cpu before evaluating cpu->exit_request. + * Pairs with rr_kick_next_cpu(). + */ qatomic_set_mb(&rr_current_cpu, cpu); + /* Pairs with store-release in cpu_exit. */ + if (qatomic_load_acquire(&cpu->exit_request)) { + break; + } current_cpu = cpu; qemu_clock_enable(QEMU_CLOCK_VIRTUAL, @@ -285,21 +309,6 @@ static void *rr_cpu_thread_fn(void *arg) /* Does not need a memory barrier because a spurious wakeup is okay. */ qatomic_set(&rr_current_cpu, NULL); - - if (cpu && cpu->exit_request) { - qatomic_set_mb(&cpu->exit_request, 0); - } - - if (icount_enabled() && all_cpu_threads_idle()) { - /* - * When all cpus are sleeping (e.g in WFI), to avoid a deadlock - * in the main_loop, wake it up in order to start the warp timer. - */ - qemu_notify_event(); - } - - rr_wait_io_event(); - rr_deal_with_unplugged_cpus(); } g_assert_not_reached(); diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 3b0d7d298e61f..3bd980050423c 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -82,8 +82,6 @@ int tcg_cpu_exec(CPUState *cpu) ret = cpu_exec(cpu); cpu_exec_end(cpu); - qatomic_set_mb(&cpu->exit_request, 0); - return ret; } @@ -97,7 +95,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu) /* mask must never be zero, except for A20 change call */ void tcg_handle_interrupt(CPUState *cpu, int mask) { - cpu->interrupt_request |= mask; + cpu_set_interrupt(cpu, mask); /* * If called from iothread context, wake the target cpu in @@ -206,7 +204,7 @@ static void tcg_accel_ops_init(AccelClass *ac) if (qemu_tcg_mttcg_enabled()) { ops->create_vcpu_thread = mttcg_start_vcpu_thread; - ops->kick_vcpu_thread = mttcg_kick_vcpu_thread; + ops->kick_vcpu_thread = tcg_kick_vcpu_thread; ops->handle_interrupt = tcg_handle_interrupt; } else { ops->create_vcpu_thread = rr_start_vcpu_thread; diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h index 6feeb3f3e9b20..aecce605d7b7a 100644 --- a/accel/tcg/tcg-accel-ops.h +++ b/accel/tcg/tcg-accel-ops.h @@ -18,5 +18,6 @@ void tcg_cpu_destroy(CPUState *cpu); int tcg_cpu_exec(CPUState *cpu); void tcg_handle_interrupt(CPUState *cpu, int mask); void tcg_cpu_init_cflags(CPUState *cpu, bool parallel); +void tcg_kick_vcpu_thread(CPUState *cpu); #endif /* TCG_ACCEL_OPS_H */ diff --git a/accel/tcg/tcg-all.c b/accel/tcg/tcg-all.c index 5125e1a4e27d9..18ea0c58b0b74 100644 --- a/accel/tcg/tcg-all.c +++ b/accel/tcg/tcg-all.c @@ -38,6 +38,8 @@ #include "qemu/target-info.h" #ifndef CONFIG_USER_ONLY #include "hw/boards.h" +#include "exec/tb-flush.h" +#include "system/runstate.h" #endif #include "accel/accel-ops.h" #include "accel/accel-cpu-ops.h" @@ -82,6 +84,23 @@ static void tcg_accel_instance_init(Object *obj) bool one_insn_per_tb; +#ifndef CONFIG_USER_ONLY +static void tcg_vm_change_state(void *opaque, bool running, RunState state) +{ + if (state == RUN_STATE_RESTORE_VM) { + /* + * loadvm will update the content of RAM, bypassing the usual + * mechanisms that ensure we flush TBs for writes to memory + * we've translated code from, so we must flush all TBs. + * + * vm_stop() has just stopped all cpus, so we are exclusive. + */ + assert(!running); + tb_flush__exclusive_or_serial(); + } +} +#endif + static int tcg_init_machine(AccelState *as, MachineState *ms) { TCGState *s = TCG_STATE(as); @@ -124,6 +143,8 @@ static int tcg_init_machine(AccelState *as, MachineState *ms) default: g_assert_not_reached(); } + + qemu_add_vm_change_state_handler(tcg_vm_change_state, NULL); #endif tcg_allowed = true; diff --git a/accel/tcg/tcg-runtime.h b/accel/tcg/tcg-runtime.h index c23b5e66c4631..8436599b9f1d0 100644 --- a/accel/tcg/tcg-runtime.h +++ b/accel/tcg/tcg-runtime.h @@ -63,6 +63,18 @@ DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG, i128, env, i64, i128, i128, i32) DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG, i128, env, i64, i128, i128, i32) +DEF_HELPER_FLAGS_4(atomic_xchgo_be, TCG_CALL_NO_WG, + i128, env, i64, i128, i32) +DEF_HELPER_FLAGS_4(atomic_xchgo_le, TCG_CALL_NO_WG, + i128, env, i64, i128, i32) +DEF_HELPER_FLAGS_4(atomic_fetch_ando_be, TCG_CALL_NO_WG, + i128, env, i64, i128, i32) +DEF_HELPER_FLAGS_4(atomic_fetch_ando_le, TCG_CALL_NO_WG, + i128, env, i64, i128, i32) +DEF_HELPER_FLAGS_4(atomic_fetch_oro_be, TCG_CALL_NO_WG, + i128, env, i64, i128, i32) +DEF_HELPER_FLAGS_4(atomic_fetch_oro_le, TCG_CALL_NO_WG, + i128, env, i64, i128, i32) #endif DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG, diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index d468667b0dd4c..da9d7f1675276 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -289,7 +289,11 @@ TranslationBlock *tb_gen_code(CPUState *cpu, TCGTBCPUState s) tb = tcg_tb_alloc(tcg_ctx); if (unlikely(!tb)) { /* flush must be done */ - tb_flush(cpu); + if (cpu_in_serial_context(cpu)) { + tb_flush__exclusive_or_serial(); + goto buffer_overflow; + } + queue_tb_flush(cpu); mmap_unlock(); /* Make the execution loop process the flush as soon as possible. */ cpu->exception_index = EXCP_INTERRUPT; diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c index f25d80e2dc2c1..1800dffa63f14 100644 --- a/accel/tcg/user-exec.c +++ b/accel/tcg/user-exec.c @@ -38,6 +38,7 @@ #include "qemu/int128.h" #include "trace.h" #include "tcg/tcg-ldst.h" +#include "tcg-accel-ops.h" #include "backend-ldst.h" #include "internal-common.h" #include "tb-internal.h" @@ -46,11 +47,15 @@ __thread uintptr_t helper_retaddr; //#define DEBUG_SIGNAL -void cpu_interrupt(CPUState *cpu, int mask) +void qemu_cpu_kick(CPUState *cpu) { - g_assert(bql_locked()); - cpu->interrupt_request |= mask; - qatomic_set(&cpu->neg.icount_decr.u16.high, -1); + tcg_kick_vcpu_thread(cpu); +} + +void qemu_process_cpu_events(CPUState *cpu) +{ + qatomic_set(&cpu->exit_request, false); + process_queued_cpu_work(cpu); } /* @@ -264,48 +269,6 @@ static void pageflags_create(vaddr start, vaddr last, int flags) interval_tree_insert(&p->itree, &pageflags_root); } -/* A subroutine of page_set_flags: remove everything in [start,last]. */ -static bool pageflags_unset(vaddr start, vaddr last) -{ - bool inval_tb = false; - - while (true) { - PageFlagsNode *p = pageflags_find(start, last); - vaddr p_last; - - if (!p) { - break; - } - - if (p->flags & PAGE_EXEC) { - inval_tb = true; - } - - interval_tree_remove(&p->itree, &pageflags_root); - p_last = p->itree.last; - - if (p->itree.start < start) { - /* Truncate the node from the end, or split out the middle. */ - p->itree.last = start - 1; - interval_tree_insert(&p->itree, &pageflags_root); - if (last < p_last) { - pageflags_create(last + 1, p_last, p->flags); - break; - } - } else if (p_last <= last) { - /* Range completely covers node -- remove it. */ - g_free_rcu(p, rcu); - } else { - /* Truncate the node from the start. */ - p->itree.start = last + 1; - interval_tree_insert(&p->itree, &pageflags_root); - break; - } - } - - return inval_tb; -} - /* * A subroutine of page_set_flags: nothing overlaps [start,last], * but check adjacent mappings and maybe merge into a single range. @@ -351,15 +314,6 @@ static void pageflags_create_merge(vaddr start, vaddr last, int flags) } } -/* - * Allow the target to decide if PAGE_TARGET_[12] may be reset. - * By default, they are not kept. - */ -#ifndef PAGE_TARGET_STICKY -#define PAGE_TARGET_STICKY 0 -#endif -#define PAGE_STICKY (PAGE_ANON | PAGE_PASSTHROUGH | PAGE_TARGET_STICKY) - /* A subroutine of page_set_flags: add flags to [start,last]. */ static bool pageflags_set_clear(vaddr start, vaddr last, int set_flags, int clear_flags) @@ -372,7 +326,7 @@ static bool pageflags_set_clear(vaddr start, vaddr last, restart: p = pageflags_find(start, last); if (!p) { - if (set_flags) { + if (set_flags & PAGE_VALID) { pageflags_create_merge(start, last, set_flags); } goto done; @@ -386,11 +340,12 @@ static bool pageflags_set_clear(vaddr start, vaddr last, /* * Need to flush if an overlapping executable region - * removes exec, or adds write. + * removes exec, adds write, or is a new mapping. */ if ((p_flags & PAGE_EXEC) && (!(merge_flags & PAGE_EXEC) - || (merge_flags & ~p_flags & PAGE_WRITE))) { + || (merge_flags & ~p_flags & PAGE_WRITE) + || (clear_flags & PAGE_VALID))) { inval_tb = true; } @@ -399,7 +354,7 @@ static bool pageflags_set_clear(vaddr start, vaddr last, * attempting to merge with adjacent regions. */ if (start == p_start && last == p_last) { - if (merge_flags) { + if (merge_flags & PAGE_VALID) { p->flags = merge_flags; } else { interval_tree_remove(&p->itree, &pageflags_root); @@ -419,12 +374,12 @@ static bool pageflags_set_clear(vaddr start, vaddr last, interval_tree_insert(&p->itree, &pageflags_root); if (last < p_last) { - if (merge_flags) { + if (merge_flags & PAGE_VALID) { pageflags_create(start, last, merge_flags); } pageflags_create(last + 1, p_last, p_flags); } else { - if (merge_flags) { + if (merge_flags & PAGE_VALID) { pageflags_create(start, p_last, merge_flags); } if (p_last < last) { @@ -433,18 +388,18 @@ static bool pageflags_set_clear(vaddr start, vaddr last, } } } else { - if (start < p_start && set_flags) { + if (start < p_start && (set_flags & PAGE_VALID)) { pageflags_create(start, p_start - 1, set_flags); } if (last < p_last) { interval_tree_remove(&p->itree, &pageflags_root); p->itree.start = last + 1; interval_tree_insert(&p->itree, &pageflags_root); - if (merge_flags) { + if (merge_flags & PAGE_VALID) { pageflags_create(start, last, merge_flags); } } else { - if (merge_flags) { + if (merge_flags & PAGE_VALID) { p->flags = merge_flags; } else { interval_tree_remove(&p->itree, &pageflags_root); @@ -492,7 +447,7 @@ static bool pageflags_set_clear(vaddr start, vaddr last, g_free_rcu(p, rcu); goto restart; } - if (set_flags) { + if (set_flags & PAGE_VALID) { pageflags_create(start, last, set_flags); } @@ -500,42 +455,36 @@ static bool pageflags_set_clear(vaddr start, vaddr last, return inval_tb; } -void page_set_flags(vaddr start, vaddr last, int flags) +void page_set_flags(vaddr start, vaddr last, int set_flags, int clear_flags) { - bool reset = false; - bool inval_tb = false; - - /* This function should never be called with addresses outside the - guest address space. If this assert fires, it probably indicates - a missing call to h2g_valid. */ + /* + * This function should never be called with addresses outside the + * guest address space. If this assert fires, it probably indicates + * a missing call to h2g_valid. + */ assert(start <= last); assert(last <= guest_addr_max); - /* Only set PAGE_ANON with new mappings. */ - assert(!(flags & PAGE_ANON) || (flags & PAGE_RESET)); assert_memory_lock(); start &= TARGET_PAGE_MASK; last |= ~TARGET_PAGE_MASK; - if (!(flags & PAGE_VALID)) { - flags = 0; - } else { - reset = flags & PAGE_RESET; - flags &= ~PAGE_RESET; - if (flags & PAGE_WRITE) { - flags |= PAGE_WRITE_ORG; - } + if (set_flags & PAGE_WRITE) { + set_flags |= PAGE_WRITE_ORG; + } + if (clear_flags & PAGE_WRITE) { + clear_flags |= PAGE_WRITE_ORG; } - if (!flags || reset) { + if (clear_flags & PAGE_VALID) { page_reset_target_data(start, last); - inval_tb |= pageflags_unset(start, last); - } - if (flags) { - inval_tb |= pageflags_set_clear(start, last, flags, - ~(reset ? 0 : PAGE_STICKY)); + clear_flags = -1; + } else { + /* Only set PAGE_ANON with new mappings. */ + assert(!(set_flags & PAGE_ANON)); } - if (inval_tb) { + + if (pageflags_set_clear(start, last, set_flags, clear_flags)) { tb_invalidate_phys_range(NULL, start, last); } } diff --git a/backends/iommufd.c b/backends/iommufd.c index 2a33c7ab0bcdc..fdfb7c9d67197 100644 --- a/backends/iommufd.c +++ b/backends/iommufd.c @@ -197,7 +197,7 @@ void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id) } int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly) + uint64_t size, void *vaddr, bool readonly) { int ret, fd = be->fd; struct iommu_ioas_map map = { @@ -230,7 +230,7 @@ int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, } int iommufd_backend_map_file_dma(IOMMUFDBackend *be, uint32_t ioas_id, - hwaddr iova, ram_addr_t size, + hwaddr iova, uint64_t size, int mfd, unsigned long start, bool readonly) { int ret, fd = be->fd; @@ -268,7 +268,7 @@ int iommufd_backend_map_file_dma(IOMMUFDBackend *be, uint32_t ioas_id, } int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, - hwaddr iova, ram_addr_t size) + hwaddr iova, uint64_t size) { int ret, fd = be->fd; struct iommu_ioas_unmap unmap = { diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c index 4a234ab2c0b19..dacfca5ab7eb0 100644 --- a/backends/tpm/tpm_emulator.c +++ b/backends/tpm/tpm_emulator.c @@ -819,7 +819,8 @@ static int tpm_emulator_get_state_blobs(TPMEmulator *tpm_emu) static int tpm_emulator_set_state_blob(TPMEmulator *tpm_emu, uint32_t type, TPMSizedBuffer *tsb, - uint32_t flags) + uint32_t flags, + Error **errp) { ssize_t n; ptm_setstate pss; @@ -838,17 +839,18 @@ static int tpm_emulator_set_state_blob(TPMEmulator *tpm_emu, /* write the header only */ if (tpm_emulator_ctrlcmd(tpm_emu, CMD_SET_STATEBLOB, &pss, offsetof(ptm_setstate, u.req.data), 0, 0) < 0) { - error_report("tpm-emulator: could not set state blob type %d : %s", - type, strerror(errno)); + error_setg_errno(errp, errno, + "tpm-emulator: could not set state blob type %d", + type); return -1; } /* now the body */ n = qemu_chr_fe_write_all(&tpm_emu->ctrl_chr, tsb->buffer, tsb->size); if (n != tsb->size) { - error_report("tpm-emulator: Writing the stateblob (type %d) " - "failed; could not write %u bytes, but only %zd", - type, tsb->size, n); + error_setg(errp, "tpm-emulator: Writing the stateblob (type %d) " + "failed; could not write %u bytes, but only %zd", + type, tsb->size, n); return -1; } @@ -856,17 +858,17 @@ static int tpm_emulator_set_state_blob(TPMEmulator *tpm_emu, n = qemu_chr_fe_read_all(&tpm_emu->ctrl_chr, (uint8_t *)&pss, sizeof(pss.u.resp)); if (n != sizeof(pss.u.resp)) { - error_report("tpm-emulator: Reading response from writing stateblob " - "(type %d) failed; expected %zu bytes, got %zd", type, - sizeof(pss.u.resp), n); + error_setg(errp, "tpm-emulator: Reading response from writing " + "stateblob (type %d) failed; expected %zu bytes, " + "got %zd", type, sizeof(pss.u.resp), n); return -1; } tpm_result = be32_to_cpu(pss.u.resp.tpm_result); if (tpm_result != 0) { - error_report("tpm-emulator: Setting the stateblob (type %d) failed " - "with a TPM error 0x%x %s", type, tpm_result, - tpm_emulator_strerror(tpm_result)); + error_setg(errp, "tpm-emulator: Setting the stateblob (type %d) " + "failed with a TPM error 0x%x %s", type, tpm_result, + tpm_emulator_strerror(tpm_result)); return -1; } @@ -880,7 +882,7 @@ static int tpm_emulator_set_state_blob(TPMEmulator *tpm_emu, * * Returns a negative errno code in case of error. */ -static int tpm_emulator_set_state_blobs(TPMBackend *tb) +static int tpm_emulator_set_state_blobs(TPMBackend *tb, Error **errp) { TPMEmulator *tpm_emu = TPM_EMULATOR(tb); TPMBlobBuffers *state_blobs = &tpm_emu->state_blobs; @@ -894,13 +896,13 @@ static int tpm_emulator_set_state_blobs(TPMBackend *tb) if (tpm_emulator_set_state_blob(tpm_emu, PTM_BLOB_TYPE_PERMANENT, &state_blobs->permanent, - state_blobs->permanent_flags) < 0 || + state_blobs->permanent_flags, errp) < 0 || tpm_emulator_set_state_blob(tpm_emu, PTM_BLOB_TYPE_VOLATILE, &state_blobs->volatil, - state_blobs->volatil_flags) < 0 || + state_blobs->volatil_flags, errp) < 0 || tpm_emulator_set_state_blob(tpm_emu, PTM_BLOB_TYPE_SAVESTATE, &state_blobs->savestate, - state_blobs->savestate_flags) < 0) { + state_blobs->savestate_flags, errp) < 0) { return -EIO; } @@ -948,12 +950,12 @@ static void tpm_emulator_vm_state_change(void *opaque, bool running, * * Returns negative errno codes in case of error. */ -static int tpm_emulator_post_load(void *opaque, int version_id) +static int tpm_emulator_post_load(void *opaque, int version_id, Error **errp) { TPMBackend *tb = opaque; int ret; - ret = tpm_emulator_set_state_blobs(tb); + ret = tpm_emulator_set_state_blobs(tb, errp); if (ret < 0) { return ret; } @@ -969,7 +971,7 @@ static const VMStateDescription vmstate_tpm_emulator = { .name = "tpm-emulator", .version_id = 0, .pre_save = tpm_emulator_pre_save, - .post_load = tpm_emulator_post_load, + .post_load_errp = tpm_emulator_post_load, .fields = (const VMStateField[]) { VMSTATE_UINT32(state_blobs.permanent_flags, TPMEmulator), VMSTATE_UINT32(state_blobs.permanent.size, TPMEmulator), diff --git a/block/curl.c b/block/curl.c index 5467678024f71..68cf83ce55fc5 100644 --- a/block/curl.c +++ b/block/curl.c @@ -162,13 +162,9 @@ static int curl_timer_cb(CURLM *multi, long timeout_ms, void *opaque) static int curl_sock_cb(CURL *curl, curl_socket_t fd, int action, void *userp, void *sp) { - BDRVCURLState *s; - CURLState *state = NULL; + BDRVCURLState *s = userp; CURLSocket *socket; - curl_easy_getinfo(curl, CURLINFO_PRIVATE, (char **)&state); - s = state->s; - socket = g_hash_table_lookup(s->sockets, GINT_TO_POINTER(fd)); if (!socket) { socket = g_new0(CURLSocket, 1); @@ -475,11 +471,11 @@ static int curl_init_state(BDRVCURLState *s, CURLState *state) (void *)curl_read_cb) || curl_easy_setopt(state->curl, CURLOPT_WRITEDATA, (void *)state) || curl_easy_setopt(state->curl, CURLOPT_PRIVATE, (void *)state) || - curl_easy_setopt(state->curl, CURLOPT_AUTOREFERER, 1) || - curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1) || - curl_easy_setopt(state->curl, CURLOPT_NOSIGNAL, 1) || + curl_easy_setopt(state->curl, CURLOPT_AUTOREFERER, 1L) || + curl_easy_setopt(state->curl, CURLOPT_FOLLOWLOCATION, 1L) || + curl_easy_setopt(state->curl, CURLOPT_NOSIGNAL, 1L) || curl_easy_setopt(state->curl, CURLOPT_ERRORBUFFER, state->errmsg) || - curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1)) { + curl_easy_setopt(state->curl, CURLOPT_FAILONERROR, 1L)) { goto err; } if (s->username) { @@ -520,7 +516,7 @@ static int curl_init_state(BDRVCURLState *s, CURLState *state) CURLOPT_REDIR_PROTOCOLS_STR, PROTOCOLS)) { goto err; } -#elif LIBCURL_VERSION_NUM >= 0x071304 +#else if (curl_easy_setopt(state->curl, CURLOPT_PROTOCOLS, PROTOCOLS) || curl_easy_setopt(state->curl, CURLOPT_REDIR_PROTOCOLS, PROTOCOLS)) { goto err; @@ -605,6 +601,7 @@ static void curl_attach_aio_context(BlockDriverState *bs, assert(!s->multi); s->multi = curl_multi_init(); s->aio_context = new_context; + curl_multi_setopt(s->multi, CURLMOPT_SOCKETDATA, s); curl_multi_setopt(s->multi, CURLMOPT_SOCKETFUNCTION, curl_sock_cb); curl_multi_setopt(s->multi, CURLMOPT_TIMERDATA, s); curl_multi_setopt(s->multi, CURLMOPT_TIMERFUNCTION, curl_timer_cb); @@ -803,7 +800,7 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, } s->accept_range = false; - if (curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1) || + if (curl_easy_setopt(state->curl, CURLOPT_NOBODY, 1L) || curl_easy_setopt(state->curl, CURLOPT_HEADERFUNCTION, curl_header_cb) || curl_easy_setopt(state->curl, CURLOPT_HEADERDATA, s)) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, @@ -824,22 +821,11 @@ static int curl_open(BlockDriverState *bs, QDict *options, int flags, goto out; } #endif - /* Prior CURL 7.19.4 return value of 0 could mean that the file size is not - * know or the size is zero. From 7.19.4 CURL returns -1 if size is not - * known and zero if it is really zero-length file. */ -#if LIBCURL_VERSION_NUM >= 0x071304 if (cl < 0) { pstrcpy(state->errmsg, CURL_ERROR_SIZE, "Server didn't report file size."); goto out; } -#else - if (cl <= 0) { - pstrcpy(state->errmsg, CURL_ERROR_SIZE, - "Unknown file size or zero-length file."); - goto out; - } -#endif s->len = cl; diff --git a/block/nbd.c b/block/nbd.c index d5a2b21c6d14d..5d231d5c4e45a 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -351,7 +351,9 @@ int coroutine_fn nbd_co_do_establish_connection(BlockDriverState *bs, return ret; } - qio_channel_set_blocking(s->ioc, false, NULL); + if (!qio_channel_set_blocking(s->ioc, false, errp)) { + return -EINVAL; + } qio_channel_set_follow_coroutine_ctx(s->ioc, true); /* successfully connected */ diff --git a/block/rbd.c b/block/rbd.c index 951cd63f9aedf..3611dc81cf178 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -99,6 +99,14 @@ typedef struct BDRVRBDState { char *namespace; uint64_t image_size; uint64_t object_size; + + /* + * If @bs->encrypted is true, this is the encryption format actually loaded + * at the librbd level. If it is false, it is the result of probing. + * RBD_IMAGE_ENCRYPTION_FORMAT__MAX means that encryption is not enabled and + * probing didn't find any known encryption header either. + */ + RbdImageEncryptionFormat encryption_format; } BDRVRBDState; typedef struct RBDTask { @@ -470,10 +478,12 @@ static int qemu_rbd_encryption_format(rbd_image_t image, return 0; } -static int qemu_rbd_encryption_load(rbd_image_t image, +static int qemu_rbd_encryption_load(BlockDriverState *bs, + rbd_image_t image, RbdEncryptionOptions *encrypt, Error **errp) { + BDRVRBDState *s = bs->opaque; int r = 0; g_autofree char *passphrase = NULL; rbd_encryption_luks1_format_options_t luks_opts; @@ -544,15 +554,19 @@ static int qemu_rbd_encryption_load(rbd_image_t image, error_setg_errno(errp, -r, "encryption load fail"); return r; } + bs->encrypted = true; + s->encryption_format = encrypt->format; return 0; } #ifdef LIBRBD_SUPPORTS_ENCRYPTION_LOAD2 -static int qemu_rbd_encryption_load2(rbd_image_t image, +static int qemu_rbd_encryption_load2(BlockDriverState *bs, + rbd_image_t image, RbdEncryptionOptions *encrypt, Error **errp) { + BDRVRBDState *s = bs->opaque; int r = 0; int encrypt_count = 1; int i; @@ -638,6 +652,8 @@ static int qemu_rbd_encryption_load2(rbd_image_t image, error_setg_errno(errp, -r, "layered encryption load fail"); goto exit; } + bs->encrypted = true; + s->encryption_format = encrypt->format; exit: for (i = 0; i < encrypt_count; ++i) { @@ -671,6 +687,45 @@ static int qemu_rbd_encryption_load2(rbd_image_t image, #endif #endif +/* + * For an image without encryption enabled on the rbd layer, probe the start of + * the image if it could be opened as an encrypted image so that we can display + * it when the user queries the node (most importantly in qemu-img). + * + * If the guest writes an encryption header to its disk after this probing, this + * won't be reflected when queried, but that's okay. There is no reason why the + * user should want to apply encryption at the rbd level while the image is + * still in use. This is just guest data. + */ +static void qemu_rbd_encryption_probe(BlockDriverState *bs) +{ + BDRVRBDState *s = bs->opaque; + char buf[RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN] = {0}; + int r; + + assert(s->encryption_format == RBD_IMAGE_ENCRYPTION_FORMAT__MAX); + + r = rbd_read(s->image, 0, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN, buf); + if (r < RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) { + return; + } + + if (memcmp(buf, rbd_luks_header_verification, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT_LUKS; + } else if (memcmp(buf, rbd_luks2_header_verification, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT_LUKS2; + } else if (memcmp(buf, rbd_layered_luks_header_verification, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT_LUKS; + } else if (memcmp(buf, rbd_layered_luks2_header_verification, + RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT_LUKS2; + } +} + /* FIXME Deprecate and remove keypairs or make it available in QMP. */ static int qemu_rbd_do_create(BlockdevCreateOptions *options, const char *keypairs, const char *password_secret, @@ -1133,17 +1188,18 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, goto failed_open; } + s->encryption_format = RBD_IMAGE_ENCRYPTION_FORMAT__MAX; if (opts->encrypt) { #ifdef LIBRBD_SUPPORTS_ENCRYPTION if (opts->encrypt->parent) { #ifdef LIBRBD_SUPPORTS_ENCRYPTION_LOAD2 - r = qemu_rbd_encryption_load2(s->image, opts->encrypt, errp); + r = qemu_rbd_encryption_load2(bs, s->image, opts->encrypt, errp); #else r = -ENOTSUP; error_setg(errp, "RBD library does not support layered encryption"); #endif } else { - r = qemu_rbd_encryption_load(s->image, opts->encrypt, errp); + r = qemu_rbd_encryption_load(bs, s->image, opts->encrypt, errp); } if (r < 0) { goto failed_post_open; @@ -1153,6 +1209,8 @@ static int qemu_rbd_open(BlockDriverState *bs, QDict *options, int flags, error_setg(errp, "RBD library does not support image encryption"); goto failed_post_open; #endif + } else { + qemu_rbd_encryption_probe(bs); } r = rbd_stat(s->image, &info, sizeof(info)); @@ -1412,17 +1470,6 @@ static ImageInfoSpecific *qemu_rbd_get_specific_info(BlockDriverState *bs, { BDRVRBDState *s = bs->opaque; ImageInfoSpecific *spec_info; - char buf[RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN] = {0}; - int r; - - if (s->image_size >= RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) { - r = rbd_read(s->image, 0, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN, buf); - if (r < 0) { - error_setg_errno(errp, -r, "cannot read image start for probe"); - return NULL; - } - } spec_info = g_new(ImageInfoSpecific, 1); *spec_info = (ImageInfoSpecific){ @@ -1430,28 +1477,13 @@ static ImageInfoSpecific *qemu_rbd_get_specific_info(BlockDriverState *bs, .u.rbd.data = g_new0(ImageInfoSpecificRbd, 1), }; - if (memcmp(buf, rbd_luks_header_verification, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { - spec_info->u.rbd.data->encryption_format = - RBD_IMAGE_ENCRYPTION_FORMAT_LUKS; - spec_info->u.rbd.data->has_encryption_format = true; - } else if (memcmp(buf, rbd_luks2_header_verification, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { - spec_info->u.rbd.data->encryption_format = - RBD_IMAGE_ENCRYPTION_FORMAT_LUKS2; - spec_info->u.rbd.data->has_encryption_format = true; - } else if (memcmp(buf, rbd_layered_luks_header_verification, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { - spec_info->u.rbd.data->encryption_format = - RBD_IMAGE_ENCRYPTION_FORMAT_LUKS; - spec_info->u.rbd.data->has_encryption_format = true; - } else if (memcmp(buf, rbd_layered_luks2_header_verification, - RBD_ENCRYPTION_LUKS_HEADER_VERIFICATION_LEN) == 0) { - spec_info->u.rbd.data->encryption_format = - RBD_IMAGE_ENCRYPTION_FORMAT_LUKS2; - spec_info->u.rbd.data->has_encryption_format = true; + if (s->encryption_format == RBD_IMAGE_ENCRYPTION_FORMAT__MAX) { + assert(!bs->encrypted); } else { - spec_info->u.rbd.data->has_encryption_format = false; + ImageInfoSpecificRbd *rbd_info = spec_info->u.rbd.data; + + rbd_info->has_encryption_format = true; + rbd_info->encryption_format = s->encryption_format; } return spec_info; diff --git a/bsd-user/aarch64/target_arch_cpu.h b/bsd-user/aarch64/target_arch_cpu.h index 87fbf6d67755d..15df84fda2194 100644 --- a/bsd-user/aarch64/target_arch_cpu.h +++ b/bsd-user/aarch64/target_arch_cpu.h @@ -54,7 +54,7 @@ static inline G_NORETURN void target_cpu_loop(CPUARMState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_SWI: diff --git a/bsd-user/aarch64/target_arch_elf.h b/bsd-user/aarch64/target_arch_elf.h index cc87f475b3f9f..cec254f88b9ce 100644 --- a/bsd-user/aarch64/target_arch_elf.h +++ b/bsd-user/aarch64/target_arch_elf.h @@ -114,7 +114,7 @@ static uint32_t get_elf_hwcap(void) GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); - GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); + GET_FEATURE_ID(aa64_lse, ARM_HWCAP_A64_ATOMICS); GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); diff --git a/bsd-user/arm/target_arch_cpu.h b/bsd-user/arm/target_arch_cpu.h index bc2eaa0bf4e7f..9a952ef0ff7d2 100644 --- a/bsd-user/arm/target_arch_cpu.h +++ b/bsd-user/arm/target_arch_cpu.h @@ -46,7 +46,7 @@ static inline G_NORETURN void target_cpu_loop(CPUARMState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_UDEF: case EXCP_NOCP: diff --git a/bsd-user/arm/target_arch_elf.h b/bsd-user/arm/target_arch_elf.h index b1c0fd2b3209e..b54bf5fbc69be 100644 --- a/bsd-user/arm/target_arch_elf.h +++ b/bsd-user/arm/target_arch_elf.h @@ -86,7 +86,6 @@ static uint32_t get_elf_hwcap(void) /* probe for the extra features */ /* EDSP is in v5TE and above */ GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); - GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); diff --git a/bsd-user/bsd-mem.h b/bsd-user/bsd-mem.h index 1be906c5914be..416d0f8c23da1 100644 --- a/bsd-user/bsd-mem.h +++ b/bsd-user/bsd-mem.h @@ -390,8 +390,9 @@ static inline abi_long do_bsd_shmat(int shmid, abi_ulong shmaddr, int shmflg) raddr = h2g(host_raddr); page_set_flags(raddr, raddr + shm_info.shm_segsz - 1, - PAGE_VALID | PAGE_RESET | PAGE_READ | - (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE)); + PAGE_VALID | PAGE_READ | + (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE), + PAGE_VALID); for (int i = 0; i < N_BSD_SHM_REGIONS; i++) { if (bsd_shm_regions[i].start == 0) { @@ -428,7 +429,7 @@ static inline abi_long do_bsd_shmdt(abi_ulong shmaddr) abi_ulong size = bsd_shm_regions[i].size; bsd_shm_regions[i].start = 0; - page_set_flags(shmaddr, shmaddr + size - 1, 0); + page_set_flags(shmaddr, shmaddr + size - 1, 0, PAGE_VALID); mmap_reserve(shmaddr, size); } } diff --git a/bsd-user/i386/target_arch_cpu.h b/bsd-user/i386/target_arch_cpu.h index 5d4c931decdbd..f147d5b6f85f5 100644 --- a/bsd-user/i386/target_arch_cpu.h +++ b/bsd-user/i386/target_arch_cpu.h @@ -113,7 +113,7 @@ static inline G_NORETURN void target_cpu_loop(CPUX86State *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case 0x80: { diff --git a/bsd-user/main.c b/bsd-user/main.c index 7e5d4bbce0942..73aae8c3274d9 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -214,11 +214,6 @@ bool qemu_cpu_is_self(CPUState *cpu) return thread_cpu == cpu; } -void qemu_cpu_kick(CPUState *cpu) -{ - cpu_exit(cpu); -} - /* Assumes contents are already zeroed. */ static void init_task_state(TaskState *ts) { @@ -367,14 +362,6 @@ int main(int argc, char **argv) } } else if (!strcmp(r, "L")) { interp_prefix = argv[optind++]; - } else if (!strcmp(r, "p")) { - unsigned size, want = qemu_real_host_page_size(); - - r = argv[optind++]; - if (qemu_strtoui(r, NULL, 10, &size) || size != want) { - warn_report("Deprecated page size option cannot " - "change host page size (%u)", want); - } } else if (!strcmp(r, "g")) { gdbstub = g_strdup(argv[optind++]); } else if (!strcmp(r, "r")) { diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c index 47e317517cb54..24ba1728eb558 100644 --- a/bsd-user/mmap.c +++ b/bsd-user/mmap.c @@ -122,7 +122,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int prot) if (ret != 0) goto error; } - page_set_flags(start, start + len - 1, prot | PAGE_VALID); + page_set_flags(start, start + len - 1, prot, PAGE_RWX); mmap_unlock(); return 0; error: @@ -652,7 +652,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, } } the_end1: - page_set_flags(start, start + len - 1, prot | PAGE_VALID); + page_set_flags(start, start + len - 1, prot | PAGE_VALID, PAGE_VALID); the_end: #ifdef DEBUG_MMAP printf("ret=0x" TARGET_ABI_FMT_lx "\n", start); @@ -763,7 +763,7 @@ int target_munmap(abi_ulong start, abi_ulong len) } if (ret == 0) { - page_set_flags(start, start + len - 1, 0); + page_set_flags(start, start + len - 1, 0, PAGE_VALID); } mmap_unlock(); return ret; diff --git a/bsd-user/riscv/target_arch_cpu.h b/bsd-user/riscv/target_arch_cpu.h index ef92f004803ba..ad428d0263d9b 100644 --- a/bsd-user/riscv/target_arch_cpu.h +++ b/bsd-user/riscv/target_arch_cpu.h @@ -49,7 +49,7 @@ static inline G_NORETURN void target_cpu_loop(CPURISCVState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); signo = 0; diff --git a/bsd-user/x86_64/target_arch_cpu.h b/bsd-user/x86_64/target_arch_cpu.h index f82042e30afae..1fa71d87f1249 100644 --- a/bsd-user/x86_64/target_arch_cpu.h +++ b/bsd-user/x86_64/target_arch_cpu.h @@ -121,7 +121,7 @@ static inline G_NORETURN void target_cpu_loop(CPUX86State *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_SYSCALL: diff --git a/chardev/baum.c b/chardev/baum.c index f3e8cd27f0673..ad6832150416f 100644 --- a/chardev/baum.c +++ b/chardev/baum.c @@ -94,7 +94,7 @@ struct BaumChardev { Chardev parent; brlapi_handle_t *brlapi; - int brlapi_fd; + brlapi_fileDescriptor brlapi_fd; unsigned int x, y; bool deferred_init; @@ -654,7 +654,7 @@ static void baum_chr_open(Chardev *chr, baum->brlapi = handle; baum->brlapi_fd = brlapi__openConnection(handle, NULL, NULL); - if (baum->brlapi_fd == -1) { + if (baum->brlapi_fd == BRLAPI_INVALID_FILE_DESCRIPTOR) { error_setg(errp, "brlapi__openConnection: %s", brlapi_strerror(brlapi_error_location())); g_free(handle); @@ -665,6 +665,10 @@ static void baum_chr_open(Chardev *chr, baum->cellCount_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, baum_cellCount_timer_cb, baum); + /* + * On Windows, brlapi_fd is a pointer, which is being used here + * as an integer, but in practice it seems to work + */ qemu_set_fd_handler(baum->brlapi_fd, baum_chr_read, NULL, baum); } diff --git a/chardev/char-fd.c b/chardev/char-fd.c index 6f03adf872531..4ee286f323328 100644 --- a/chardev/char-fd.c +++ b/chardev/char-fd.c @@ -206,14 +206,16 @@ int qmp_chardev_open_file_source(char *src, int flags, Error **errp) } /* open a character device to a unix fd */ -void qemu_chr_open_fd(Chardev *chr, - int fd_in, int fd_out) +bool qemu_chr_open_fd(Chardev *chr, + int fd_in, int fd_out, Error **errp) { FDChardev *s = FD_CHARDEV(chr); g_autofree char *name = NULL; - if (fd_out >= 0 && !g_unix_set_fd_nonblocking(fd_out, true, NULL)) { - assert(!"Failed to set FD nonblocking"); + if (fd_out >= 0) { + if (!qemu_set_blocking(fd_out, false, errp)) { + return false; + } } if (fd_out == fd_in && fd_in >= 0) { @@ -221,7 +223,7 @@ void qemu_chr_open_fd(Chardev *chr, name = g_strdup_printf("chardev-file-%s", chr->label); qio_channel_set_name(QIO_CHANNEL(s->ioc_in), name); s->ioc_out = QIO_CHANNEL(object_ref(s->ioc_in)); - return; + return true; } if (fd_in >= 0) { @@ -236,6 +238,8 @@ void qemu_chr_open_fd(Chardev *chr, name = g_strdup_printf("chardev-file-out-%s", chr->label); qio_channel_set_name(QIO_CHANNEL(s->ioc_out), name); } + + return true; } static void char_fd_class_init(ObjectClass *oc, const void *data) diff --git a/chardev/char-file.c b/chardev/char-file.c index a9e8c5e0d7f8d..89e9cb849c840 100644 --- a/chardev/char-file.c +++ b/chardev/char-file.c @@ -92,7 +92,11 @@ static void qmp_chardev_open_file(Chardev *chr, } } - qemu_chr_open_fd(chr, in, out); + if (!qemu_chr_open_fd(chr, in, out, errp)) { + qemu_close(out); + qemu_close(in); + return; + } #endif } diff --git a/chardev/char-pipe.c b/chardev/char-pipe.c index 3d1b0ce2d2ed3..e9f3bb82904c0 100644 --- a/chardev/char-pipe.c +++ b/chardev/char-pipe.c @@ -150,7 +150,14 @@ static void qemu_chr_open_pipe(Chardev *chr, return; } } - qemu_chr_open_fd(chr, fd_in, fd_out); + + if (!qemu_chr_open_fd(chr, fd_in, fd_out, errp)) { + close(fd_in); + if (fd_out != fd_in) { + close(fd_out); + } + return; + } } #endif /* !_WIN32 */ diff --git a/chardev/char-pty.c b/chardev/char-pty.c index 674e9b3f1448f..b066f01412697 100644 --- a/chardev/char-pty.c +++ b/chardev/char-pty.c @@ -349,8 +349,8 @@ static void char_pty_open(Chardev *chr, } close(slave_fd); - if (!g_unix_set_fd_nonblocking(master_fd, true, NULL)) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (!qemu_set_blocking(master_fd, false, errp)) { + close(master_fd); return; } diff --git a/chardev/char-serial.c b/chardev/char-serial.c index 0a68b4b4e0b14..4c6ca713eb1ab 100644 --- a/chardev/char-serial.c +++ b/chardev/char-serial.c @@ -271,13 +271,16 @@ static void qmp_chardev_open_serial(Chardev *chr, if (fd < 0) { return; } - if (!g_unix_set_fd_nonblocking(fd, true, NULL)) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); return; } tty_serial_init(fd, 115200, 'N', 8, 1); - qemu_chr_open_fd(chr, fd, fd); + if (!qemu_chr_open_fd(chr, fd, fd, errp)) { + close(fd); + return; + } } #endif /* __linux__ || __sun__ */ diff --git a/chardev/char-socket.c b/chardev/char-socket.c index 1e8313915b540..cb4ec78ebe304 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -294,7 +294,12 @@ static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len) } if (msgfds_num) { - /* close and clean read_msgfds */ + /* + * Close and clean previous read_msgfds, they are obsolete at + * this point, regardless result of new call to + * qio_channel_readv_full(). + */ + for (i = 0; i < s->read_msgfds_num; i++) { close(s->read_msgfds[i]); } @@ -307,20 +312,6 @@ static ssize_t tcp_chr_recv(Chardev *chr, char *buf, size_t len) s->read_msgfds_num = msgfds_num; } - for (i = 0; i < s->read_msgfds_num; i++) { - int fd = s->read_msgfds[i]; - if (fd < 0) { - continue; - } - - /* O_NONBLOCK is preserved across SCM_RIGHTS so reset it */ - qemu_socket_set_block(fd); - -#ifndef MSG_CMSG_CLOEXEC - qemu_set_cloexec(fd); -#endif - } - if (ret == QIO_CHANNEL_ERR_BLOCK) { errno = EAGAIN; ret = -1; @@ -539,16 +530,24 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len) SocketChardev *s = SOCKET_CHARDEV(chr); int size; int saved_errno; + Error *local_err = NULL; if (s->state != TCP_CHARDEV_STATE_CONNECTED) { return 0; } - qio_channel_set_blocking(s->ioc, true, NULL); + if (!qio_channel_set_blocking(s->ioc, true, &local_err)) { + error_report_err(local_err); + return -1; + } size = tcp_chr_recv(chr, (void *) buf, len); saved_errno = errno; if (s->state != TCP_CHARDEV_STATE_DISCONNECTED) { - qio_channel_set_blocking(s->ioc, false, NULL); + if (!qio_channel_set_blocking(s->ioc, false, &local_err)) { + error_report_err(local_err); + /* failed to recover non-blocking state */ + tcp_chr_disconnect(chr); + } } if (size == 0) { /* connection closed */ @@ -893,18 +892,22 @@ static void tcp_chr_set_client_ioc_name(Chardev *chr, static int tcp_chr_new_client(Chardev *chr, QIOChannelSocket *sioc) { SocketChardev *s = SOCKET_CHARDEV(chr); + Error *local_err = NULL; if (s->state != TCP_CHARDEV_STATE_CONNECTING) { return -1; } + if (!qio_channel_set_blocking(QIO_CHANNEL(sioc), false, &local_err)) { + error_report_err(local_err); + return -1; + } + s->ioc = QIO_CHANNEL(sioc); object_ref(OBJECT(sioc)); s->sioc = sioc; object_ref(OBJECT(sioc)); - qio_channel_set_blocking(s->ioc, false, NULL); - if (s->do_nodelay) { qio_channel_set_delay(s->ioc, false); } diff --git a/chardev/char-stdio.c b/chardev/char-stdio.c index 48db8d2f30fcf..2568164a108e7 100644 --- a/chardev/char-stdio.c +++ b/chardev/char-stdio.c @@ -107,18 +107,20 @@ static void qemu_chr_open_stdio(Chardev *chr, old_fd0_flags = fcntl(0, F_GETFL); old_fd1_flags = fcntl(1, F_GETFL); tcgetattr(0, &oldtty); - if (!g_unix_set_fd_nonblocking(0, true, NULL)) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (!qemu_set_blocking(0, false, errp)) { return; } + + if (!qemu_chr_open_fd(chr, 0, 1, errp)) { + return; + } + atexit(term_exit); memset(&act, 0, sizeof(act)); act.sa_handler = term_stdio_handler; sigaction(SIGCONT, &act, NULL); - qemu_chr_open_fd(chr, 0, 1); - stdio_allow_signal = !opts->has_signal || opts->signal; qemu_chr_set_echo_stdio(chr, false); } diff --git a/clippy.toml b/clippy.toml index 9016172983839..204f5713c0007 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1,3 +1,3 @@ doc-valid-idents = ["IrDA", "PrimeCell", ".."] allow-mixed-uninlined-format-args = false -msrv = "1.77.0" +msrv = "1.83.0" diff --git a/common-user/host/mips/safe-syscall.inc.S b/common-user/host/mips/safe-syscall.inc.S index 6a446149704e4..3b196cc634c4e 100644 --- a/common-user/host/mips/safe-syscall.inc.S +++ b/common-user/host/mips/safe-syscall.inc.S @@ -30,15 +30,9 @@ * arguments being syscall arguments (also 'long'). */ -#if _MIPS_SIM == _ABIO32 -/* 8 * 4 = 32 for outgoing parameters; 1 * 4 for s0 save; 1 * 4 for align. */ -#define FRAME 40 -#define OFS_S0 32 -#else /* 1 * 8 for s0 save; 1 * 8 for align. */ #define FRAME 16 #define OFS_S0 0 -#endif NESTED(safe_syscall_base, FRAME, ra) @@ -47,34 +41,6 @@ NESTED(safe_syscall_base, FRAME, ra) .cfi_adjust_cfa_offset FRAME REG_S s0, OFS_S0(sp) .cfi_rel_offset s0, OFS_S0 -#if _MIPS_SIM == _ABIO32 - /* - * The syscall calling convention is nearly the same as C: - * we enter with a0 == &signal_pending - * a1 == syscall number - * a2, a3, stack == syscall arguments - * and return the result in a0 - * and the syscall instruction needs - * v0 == syscall number - * a0 ... a3, stack == syscall arguments - * and returns the result in v0 - * Shuffle everything around appropriately. - */ - move s0, a0 /* signal_pending pointer */ - move v0, a1 /* syscall number */ - move a0, a2 /* syscall arguments */ - move a1, a3 - lw a2, FRAME+16(sp) - lw a3, FRAME+20(sp) - lw t4, FRAME+24(sp) - lw t5, FRAME+28(sp) - lw t6, FRAME+32(sp) - lw t7, FRAME+40(sp) - sw t4, 16(sp) - sw t5, 20(sp) - sw t6, 24(sp) - sw t7, 28(sp) -#else /* * The syscall calling convention is nearly the same as C: * we enter with a0 == &signal_pending @@ -95,7 +61,6 @@ NESTED(safe_syscall_base, FRAME, ra) move a3, a5 move a4, a6 move a5, a7 -#endif /* * This next sequence of code works in conjunction with the diff --git a/configs/devices/mips-softmmu/common.mak b/configs/devices/mips-softmmu/common.mak index b50107feafe3e..cdeae7ce450bc 100644 --- a/configs/devices/mips-softmmu/common.mak +++ b/configs/devices/mips-softmmu/common.mak @@ -6,4 +6,3 @@ # Boards are selected by default, uncomment to keep out of the build. # CONFIG_MALTA=n -# CONFIG_MIPSSIM=n diff --git a/configure b/configure index 2b2b3d6597946..a2f66f7ff9c70 100755 --- a/configure +++ b/configure @@ -391,21 +391,15 @@ elif check_define __sparc__ ; then else cpu="sparc" fi -elif check_define _ARCH_PPC ; then - if check_define _ARCH_PPC64 ; then - if check_define _LITTLE_ENDIAN ; then - cpu="ppc64le" - else - cpu="ppc64" - fi +elif check_define _ARCH_PPC64 ; then + if check_define _LITTLE_ENDIAN ; then + cpu="ppc64le" else - cpu="ppc" + cpu="ppc64" fi elif check_define __mips__ ; then if check_define __mips64 ; then cpu="mips64" - else - cpu="mips" fi elif check_define __s390__ ; then if check_define __s390x__ ; then @@ -453,7 +447,6 @@ case "$cpu" in armv*b|armv*l|arm) cpu=arm host_arch=arm - linux_arch=arm ;; i386|i486|i586|i686) @@ -474,17 +467,7 @@ case "$cpu" in host_arch=mips linux_arch=mips ;; - mips*) - cpu=mips - host_arch=mips - linux_arch=mips - ;; - ppc) - host_arch=ppc - linux_arch=powerpc - CPU_CFLAGS="-m32" - ;; ppc64) host_arch=ppc64 linux_arch=powerpc @@ -1001,7 +984,19 @@ $mkvenv ensuregroup --dir "${source_path}/python/wheels" \ # We ignore PATH completely here: we want to use the venv's Meson # *exclusively*. -meson="$(cd pyvenv/bin; pwd)/meson" +# for msys2 +get_pwd() { + if pwd -W >/dev/null 2>&1; then + pwd -W + else + pwd + fi +} + +meson="$(cd pyvenv/bin; get_pwd)/meson" +if [ -f "$meson$EXESUF" ]; then + meson="$meson$EXESUF" +fi # Conditionally ensure Sphinx is installed. @@ -1185,12 +1180,14 @@ fi # detect rust triple meson_version=$($meson --version) -if test "$rust" != disabled && ! version_ge "$meson_version" 1.8.1; then +if test "$rust" != disabled && ! version_ge "$meson_version" 1.9.0; then if test "$rust" = enabled; then - error_exit "Rust support needs Meson 1.8.1 or newer" + $mkvenv ensuregroup --dir "${source_path}/python/wheels" \ + ${source_path}/pythondeps.toml meson-rust || exit 1 + else + echo "Rust needs Meson 1.9.0, disabling" 2>&1 + rust=disabled fi - echo "Rust needs Meson 1.8.1, disabling" 2>&1 - rust=disabled fi if test "$rust" != disabled && has "$rustc" && $rustc -vV > "${TMPDIR1}/${TMPB}.out"; then rust_host_triple=$(sed -n 's/^host: //p' "${TMPDIR1}/${TMPB}.out") @@ -1203,8 +1200,9 @@ fi if test "$rust" != disabled && test -z "$rust_target_triple"; then # arch and os generally matches between meson and rust rust_arch=$host_arch + # default to host vendor + rust_vendor=$(echo "$rust_host_triple" | cut -d'-' -f2) rust_os=$host_os - rust_machine=unknown rust_osvariant= # tweak rust_os if needed; also, machine and variant depend on the OS @@ -1212,7 +1210,7 @@ if test "$rust" != disabled && test -z "$rust_target_triple"; then case "$host_os" in darwin) # e.g. aarch64-apple-darwin - rust_machine=apple + rust_vendor=apple ;; linux) @@ -1260,13 +1258,13 @@ EOF ;; sunos) - rust_machine=pc + rust_vendor=pc rust_os=solaris ;; windows) # e.g. aarch64-pc-windows-gnullvm, x86_64-pc-windows-gnu (MSVC not supported) - rust_machine=pc + rust_vendor=pc if test "$host_arch" = aarch64; then rust_osvariant=gnullvm else @@ -1297,7 +1295,7 @@ EOF sparc64) if test "$rust_os" = solaris; then rust_arch=sparcv9 - rust_machine=sun + rust_vendor=sun fi ;; @@ -1311,7 +1309,7 @@ EOF # e.g. aarch64-linux-android rust_target_triple=$rust_arch-$rust_os-$rust_osvariant else - rust_target_triple=$rust_arch-$rust_machine-$rust_os${rust_osvariant:+-$rust_osvariant} + rust_target_triple=$rust_arch-$rust_vendor-$rust_os${rust_osvariant:+-$rust_osvariant} fi fi @@ -1464,7 +1462,7 @@ probe_target_compiler() { container_image=debian-all-test-cross container_cross_prefix=mips64-linux-gnuabi64- ;; - ppc|ppc64|ppc64le) + ppc64|ppc64le) container_image=debian-all-test-cross container_cross_prefix=powerpc${target_arch#ppc}-linux-gnu- ;; @@ -1801,6 +1799,7 @@ echo "SRC_PATH=$source_path" >> tests/tcg/$config_host_mak if test "$plugins" = "yes" ; then echo "CONFIG_PLUGIN=y" >> tests/tcg/$config_host_mak fi +echo "PYTHON=$python" >> tests/tcg/$config_host_mak tcg_tests_targets= for target in $target_list; do @@ -1837,6 +1836,12 @@ for target in $target_list; do echo "GDB=$gdb_bin" >> $config_target_mak fi + if test "${gdb_arches#*$arch}" != "$gdb_arches" && version_ge $gdb_version 14.1; then + echo "GDB_HAS_SME_TILES=y" >> $config_target_mak + else + echo "GDB_HAS_SME_TILES=n" >> $config_target_mak + fi + if test "${gdb_arches#*aarch64}" != "$gdb_arches" && version_ge $gdb_version 15.1; then echo "GDB_HAS_MTE=y" >> $config_target_mak fi @@ -1870,6 +1875,13 @@ if test "$skip_meson" = no; then eval "c=\$devices_${a}" echo "${a}-softmmu = '$c'" >> $cross done + if test "$rust" != disabled; then + if test "$cross_compile" = "yes"; then + . "$source_path/scripts/rust-to-clang-target.sh" + clang_target=$(rust_to_clang_target "$rust_target_triple") + echo "bindgen_clang_arguments = [$(meson_quote --target="$clang_target")]" >> $cross + fi + fi echo "[built-in options]" >> $cross echo "c_args = [$(meson_quote $CFLAGS $EXTRA_CFLAGS)]" >> $cross @@ -1950,7 +1962,7 @@ if test "$skip_meson" = no; then echo "[binaries]" >> $native echo "c = [$(meson_quote $host_cc)]" >> $native if test "$rust" != disabled; then - echo "rust = [$(meson_quote $rustc)]" >> $cross + echo "rust = [$(meson_quote $rustc)]" >> $native fi mv $native config-meson.native meson_option_add --native-file @@ -1976,6 +1988,8 @@ if test "$skip_meson" = no; then test -n "${LIB_FUZZING_ENGINE+xxx}" && meson_option_add "-Dfuzzing_engine=$LIB_FUZZING_ENGINE" test "$plugins" = yes && meson_option_add "-Dplugins=true" test "$tcg" != enabled && meson_option_add "-Dtcg=$tcg" + test -n "$gdb_bin" && meson_option_add "-Dgdb=$gdb_bin" + run_meson() { NINJA=$ninja $meson setup "$@" "$PWD" "$source_path" } diff --git a/contrib/elf2dmp/download.c b/contrib/elf2dmp/download.c index 21306b3fd4c44..fa8da0f9a2d03 100644 --- a/contrib/elf2dmp/download.c +++ b/contrib/elf2dmp/download.c @@ -27,8 +27,8 @@ bool download_url(const char *name, const char *url) if (curl_easy_setopt(curl, CURLOPT_URL, url) != CURLE_OK || curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, NULL) != CURLE_OK || curl_easy_setopt(curl, CURLOPT_WRITEDATA, file) != CURLE_OK - || curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1) != CURLE_OK - || curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0) != CURLE_OK + || curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L) != CURLE_OK + || curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 0L) != CURLE_OK || curl_easy_perform(curl) != CURLE_OK) { unlink(name); fclose(file); diff --git a/contrib/ivshmem-server/ivshmem-server.c b/contrib/ivshmem-server/ivshmem-server.c index 2f3c7320a6781..13cb828174e80 100644 --- a/contrib/ivshmem-server/ivshmem-server.c +++ b/contrib/ivshmem-server/ivshmem-server.c @@ -6,6 +6,7 @@ * top-level directory. */ #include "qemu/osdep.h" +#include "qapi/error.h" #include "qemu/host-utils.h" #include "qemu/sockets.h" @@ -135,6 +136,7 @@ ivshmem_server_handle_new_conn(IvshmemServer *server) socklen_t unaddr_len; int newfd; unsigned i; + Error *local_err = NULL; /* accept the incoming connection */ unaddr_len = sizeof(unaddr); @@ -146,9 +148,14 @@ ivshmem_server_handle_new_conn(IvshmemServer *server) return -1; } - qemu_socket_set_nonblock(newfd); IVSHMEM_SERVER_DEBUG(server, "accept()=%d\n", newfd); + if (!qemu_set_blocking(newfd, false, &local_err)) { + error_report_err(local_err); + close(newfd); + return -1; + } + /* allocate new structure for this peer */ peer = g_malloc0(sizeof(*peer)); peer->sock_fd = newfd; diff --git a/contrib/plugins/execlog.c b/contrib/plugins/execlog.c index 06ec76d6e9a35..811f32031994c 100644 --- a/contrib/plugins/execlog.c +++ b/contrib/plugins/execlog.c @@ -95,6 +95,7 @@ static void insn_check_regs(CPU *cpu) g_byte_array_set_size(reg->new, 0); sz = qemu_plugin_read_register(reg->handle, reg->new); + g_assert(sz > 0); g_assert(sz == reg->last->len); if (memcmp(reg->last->data, reg->new->data, sz)) { diff --git a/contrib/plugins/meson.build b/contrib/plugins/meson.build index 1876bc78438ab..7eb3629c95dd9 100644 --- a/contrib/plugins/meson.build +++ b/contrib/plugins/meson.build @@ -1,5 +1,6 @@ contrib_plugins = ['bbv', 'cache', 'cflow', 'drcov', 'execlog', 'hotblocks', - 'hotpages', 'howvec', 'hwprofile', 'ips', 'stoptrigger'] + 'hotpages', 'howvec', 'hwprofile', 'ips', 'stoptrigger', + 'uftrace'] if host_os != 'windows' # lockstep uses socket.h contrib_plugins += 'lockstep' diff --git a/contrib/plugins/uftrace.c b/contrib/plugins/uftrace.c new file mode 100644 index 0000000000000..b7d6124d2f517 --- /dev/null +++ b/contrib/plugins/uftrace.c @@ -0,0 +1,878 @@ +/* + * Copyright (C) 2025, Pierrick Bouvier + * + * Generates a trace compatible with uftrace (similar to uftrace record). + * https://github.com/namhyung/uftrace + * + * See docs/about/emulation.rst|Uftrace for details and examples. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include +#include +#include +#include +#include +#include + +#define MiB (INT64_C(1) << 20) +#define NANOSECONDS_PER_SECOND 1000000000LL +#define TRACE_FLUSH_SIZE (32 * MiB) +#define TRACE_ID_SCALE 100 + +QEMU_PLUGIN_EXPORT int qemu_plugin_version = QEMU_PLUGIN_VERSION; + +typedef struct { + GArray *s; +} Callstack; + +typedef struct { + uint64_t pc; + uint64_t frame_pointer; +} CallstackEntry; + +typedef struct { + GArray *t; + GString *path; + GString *name; + uint32_t id; +} Trace; + +typedef struct Cpu Cpu; + +typedef struct { + void (*init)(Cpu *cpu); + void (*end)(Cpu *cpu); + uint64_t (*get_frame_pointer)(Cpu *cpu); + uint8_t (*get_privilege_level)(Cpu *cpu); + uint8_t (*num_privilege_levels)(void); + const char *(*get_privilege_level_name)(uint8_t pl); + bool (*does_insn_modify_frame_pointer)(const char *disas); +} CpuOps; + +typedef struct Cpu { + Trace *trace; + Callstack *cs; + uint8_t privilege_level; + GArray *traces; /* Trace *traces [] */ + GByteArray *buf; + CpuOps ops; + void *arch; +} Cpu; + +typedef enum { + AARCH64_EL0_SECURE, + AARCH64_EL0_NONSECURE, + AARCH64_EL0_REALM, + AARCH64_EL1_SECURE, + AARCH64_EL1_NONSECURE, + AARCH64_EL1_REALM, + AARCH64_EL2_SECURE, + AARCH64_EL2_NONSECURE, + AARCH64_EL2_REALM, + AARCH64_EL3, + AARCH64_PRIVILEGE_LEVEL_MAX, +} Aarch64PrivilegeLevel; + +typedef struct { + struct qemu_plugin_register *reg_fp; + struct qemu_plugin_register *reg_cpsr; + struct qemu_plugin_register *reg_scr_el3; +} Aarch64Cpu; + +typedef enum { + X64_RING0, + X64_RING1, + X64_RING2, + X64_RING3, + X64_REAL_MODE, + X64_PRIVILEGE_LEVEL_MAX, +} X64PrivilegeLevel; + +typedef struct { + struct qemu_plugin_register *reg_rbp; + struct qemu_plugin_register *reg_cs; + struct qemu_plugin_register *reg_cr0; +} X64Cpu; + +typedef struct { + uint64_t timestamp; + uint64_t data; +} UftraceEntry; + +typedef enum { + UFTRACE_ENTRY, + UFTRACE_EXIT, + UFTRACE_LOST, + UFTRACE_EVENT, +} UftraceRecordType; + +static struct qemu_plugin_scoreboard *score; +static bool trace_privilege_level; +static CpuOps arch_ops; + +static uint64_t gettime_ns(void) +{ +#ifdef _WIN32 + /* + * On Windows, timespec_get is available only with UCRT, but not with + * MinGW64 environment. Simplify by using only gettimeofday on this + * platform. This may result in a precision loss. + */ + struct timeval tv; + gettimeofday(&tv, NULL); + uint64_t now_ns = tv.tv_sec * NANOSECONDS_PER_SECOND + tv.tv_usec * 1000; +#else + /* We need nanosecond precision for short lived functions. */ + struct timespec ts; + timespec_get(&ts, TIME_UTC); + uint64_t now_ns = ts.tv_sec * NANOSECONDS_PER_SECOND + ts.tv_nsec; +#endif + return now_ns; +} + +static void uftrace_write_map(bool system_emulation) +{ + const char *path = "./uftrace.data/sid-0.map"; + + if (system_emulation && access(path, F_OK) == 0) { + /* do not erase existing map in system emulation, as a custom one might + * already have been generated by uftrace_symbols.py */ + return; + } + + FILE *sid_map = fopen(path, "w"); + g_assert(sid_map); + + if (system_emulation) { + fprintf(sid_map, + "# map stack on highest address possible, to prevent uftrace\n" + "# from considering any kernel address\n"); + fprintf(sid_map, + "ffffffffffff-ffffffffffff rw-p 00000000 00:00 0 [stack]\n"); + } else { + /* in user mode, copy /proc/self/maps instead */ + FILE *self_map = fopen("/proc/self/maps", "r"); + g_assert(self_map); + for (;;) { + int c = fgetc(self_map); + if (c == EOF) { + break; + } + fputc(c, sid_map); + } + fclose(self_map); + } + fclose(sid_map); +} + +static void uftrace_write_task(const GArray *traces) +{ + FILE *task = fopen("./uftrace.data/task.txt", "w"); + g_assert(task); + for (int i = 0; i < traces->len; ++i) { + Trace *t = g_array_index(traces, Trace*, i); + fprintf(task, "SESS timestamp=0.0 pid=%"PRIu32" sid=0 exename=\"%s\"\n", + t->id, t->name->str); + fprintf(task, "TASK timestamp=0.0 tid=%"PRIu32" pid=%"PRIu32"\n", + t->id, t->id); + } + fclose(task); +} + +static void uftrace_write_info(const GArray *traces) +{ + g_autoptr(GString) taskinfo_tids = g_string_new("taskinfo:tids="); + for (int i = 0; i < traces->len; ++i) { + Trace *t = g_array_index(traces, Trace*, i); + const char *delim = i > 0 ? "," : ""; + g_string_append_printf(taskinfo_tids, "%s%"PRIu32, delim, t->id); + } + + g_autoptr(GString) taskinfo_nr_tid = g_string_new("taskinfo:nr_tid="); + g_string_append_printf(taskinfo_nr_tid, "%d", traces->len); + + FILE *info = fopen("./uftrace.data/info", "w"); + g_assert(info); + /* + * $ uftrace dump --debug + * uftrace file header: magic = 4674726163652100 + * uftrace file header: version = 4 + * uftrace file header: header size = 40 + * uftrace file header: endian = 1 (little) + * uftrace file header: class = 2 (64 bit) + * uftrace file header: features = 0x1263 (PLTHOOK | ... + * uftrace file header: info = 0x7bff (EXE_NAME | ... + * <0000000000000000>: 46 74 72 61 63 65 21 00 04 00 00 00 28 00 01 02 + * <0000000000000010>: 63 12 00 00 00 00 00 00 ff 7b 00 00 00 00 00 00 + * <0000000000000020>: 00 04 00 00 00 00 00 00 + */ + const uint8_t header[] = {0x46, 0x74, 0x72, 0x61, 0x63, 0x65, 0x21, 0x00, + 0x04, 0x00, 0x00, 0x00, 0x28, 0x00, 0x01, 0x02, + 0x63, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0xff, 0x7b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; + size_t wrote = fwrite(header, sizeof(header), 1, info); + g_assert(wrote == 1); + const char *info_data[] = { + "exename:", + "build_id:0000000000000000000000000000000000000000", + "exit_status:", + "cmdline:", + "cpuinfo:lines=2", + "cpuinfo:nr_cpus=", + "cpuinfo:desc=", + "meminfo:", + "osinfo:lines=3", + "osinfo:kernel=", + "osinfo:hostname=", + "osinfo:distro=", + "taskinfo:lines=2", + taskinfo_nr_tid->str, + taskinfo_tids->str, + "usageinfo:lines=6", + "usageinfo:systime=", + "usageinfo:usrtime=", + "usageinfo:ctxsw=", + "usageinfo:maxrss=", + "usageinfo:pagefault=", + "usageinfo:iops=", + "loadinfo:", + "record_date:", + "elapsed_time:", + "pattern_type:regex", + "uftrace_version:", + "utc_offset:", + 0}; + const char **info_data_it = info_data; + while (*(info_data_it)) { + fprintf(info, "%s\n", *info_data_it); + ++info_data_it; + } + fclose(info); +} + +static Callstack *callstack_new(void) +{ + Callstack *cs = g_new0(Callstack, 1); + cs->s = g_array_new(false, false, sizeof(CallstackEntry)); + return cs; +} + +static void callstack_free(Callstack *cs) +{ + g_array_free(cs->s, true); + cs->s = NULL; + g_free(cs); +} + +static size_t callstack_depth(const Callstack *cs) +{ + return cs->s->len; +} + +static size_t callstack_empty(const Callstack *cs) +{ + return callstack_depth(cs) == 0; +} + +static void callstack_clear(Callstack *cs) +{ + g_array_set_size(cs->s, 0); +} + +static const CallstackEntry *callstack_at(const Callstack *cs, size_t depth) +{ + g_assert(depth > 0); + g_assert(depth <= callstack_depth(cs)); + return &g_array_index(cs->s, CallstackEntry, depth - 1); +} + +static CallstackEntry callstack_top(const Callstack *cs) +{ + if (callstack_depth(cs) >= 1) { + return *callstack_at(cs, callstack_depth(cs)); + } + return (CallstackEntry){}; +} + +static CallstackEntry callstack_caller(const Callstack *cs) +{ + if (callstack_depth(cs) >= 2) { + return *callstack_at(cs, callstack_depth(cs) - 1); + } + return (CallstackEntry){}; +} + +static void callstack_push(Callstack *cs, CallstackEntry e) +{ + g_array_append_val(cs->s, e); +} + +static CallstackEntry callstack_pop(Callstack *cs) +{ + g_assert(!callstack_empty(cs)); + CallstackEntry e = callstack_top(cs); + g_array_set_size(cs->s, callstack_depth(cs) - 1); + return e; +} + +static Trace *trace_new(uint32_t id, GString *name) +{ + Trace *t = g_new0(Trace, 1); + t->t = g_array_new(false, false, sizeof(UftraceEntry)); + t->path = g_string_new(NULL); + g_string_append_printf(t->path, "./uftrace.data/%"PRIu32".dat", id); + t->name = g_string_new(name->str); + t->id = id; + return t; +} + +static void trace_free(Trace *t) +{ + g_assert(t->t->len == 0); + g_array_free(t->t, true); + t->t = NULL; + g_string_free(t->path, true); + t->path = NULL; + g_string_free(t->name, true); + t->name = NULL; + g_free(t); +} + +static void trace_flush(Trace *t, bool append) +{ + int create_dir = g_mkdir_with_parents("./uftrace.data", + S_IRWXU | S_IRWXG | S_IRWXO); + g_assert(create_dir == 0); + FILE *dat = fopen(t->path->str, append ? "a" : "w"); + g_assert(dat); + GArray *data = t->t; + if (data->len) { + size_t wrote = fwrite(data->data, sizeof(UftraceEntry), data->len, dat); + g_assert(wrote == data->len); + } + fclose(dat); + g_array_set_size(data, 0); +} + +static void trace_add_entry(Trace *t, uint64_t timestamp, uint64_t pc, + size_t depth, UftraceRecordType type) +{ + /* https://github.com/namhyung/uftrace/blob/v0.18/libmcount/record.c#L909 */ + const uint64_t record_magic = 0x5; + uint64_t data = type | (record_magic << 3); + data += depth << 6; + data += pc << 16; + UftraceEntry e = {.timestamp = timestamp, .data = data}; + g_array_append_val(t->t, e); + if (t->t->len * sizeof(UftraceEntry) > TRACE_FLUSH_SIZE) { + trace_flush(t, true); + } +} + +static void trace_enter_function(Trace *t, uint64_t timestamp, + uint64_t pc, size_t depth) +{ + trace_add_entry(t, timestamp, pc, depth, UFTRACE_ENTRY); +} + +static void trace_exit_function(Trace *t, uint64_t timestamp, + uint64_t pc, size_t depth) +{ + trace_add_entry(t, timestamp, pc, depth, UFTRACE_EXIT); +} + +static void trace_enter_stack(Trace *t, Callstack *cs, uint64_t timestamp) +{ + for (size_t depth = 1; depth <= callstack_depth(cs); ++depth) { + trace_enter_function(t, timestamp, callstack_at(cs, depth)->pc, depth); + } +} + +static void trace_exit_stack(Trace *t, Callstack *cs, uint64_t timestamp) +{ + for (size_t depth = callstack_depth(cs); depth > 0; --depth) { + trace_exit_function(t, timestamp, callstack_at(cs, depth)->pc, depth); + } +} + +static uint64_t cpu_read_register64(Cpu *cpu, struct qemu_plugin_register *reg) +{ + GByteArray *buf = cpu->buf; + g_byte_array_set_size(buf, 0); + size_t sz = qemu_plugin_read_register(reg, buf); + g_assert(sz == 8); + g_assert(buf->len == 8); + return *((uint64_t *) buf->data); +} + +static uint32_t cpu_read_register32(Cpu *cpu, struct qemu_plugin_register *reg) +{ + GByteArray *buf = cpu->buf; + g_byte_array_set_size(buf, 0); + size_t sz = qemu_plugin_read_register(reg, buf); + g_assert(sz == 4); + g_assert(buf->len == 4); + return *((uint32_t *) buf->data); +} + +static uint64_t cpu_read_memory64(Cpu *cpu, uint64_t addr) +{ + g_assert(addr); + GByteArray *buf = cpu->buf; + g_byte_array_set_size(buf, 0); + bool read = qemu_plugin_read_memory_vaddr(addr, buf, 8); + if (!read) { + return 0; + } + g_assert(buf->len == 8); + return *((uint64_t *) buf->data); +} + +static void cpu_unwind_stack(Cpu *cpu, uint64_t frame_pointer, uint64_t pc) +{ + g_assert(callstack_empty(cpu->cs)); + + #define UNWIND_STACK_MAX_DEPTH 1024 + CallstackEntry unwind[UNWIND_STACK_MAX_DEPTH]; + size_t depth = 0; + do { + /* check we don't have an infinite stack */ + for (size_t i = 0; i < depth; ++i) { + if (frame_pointer == unwind[i].frame_pointer) { + break; + } + } + CallstackEntry e = {.frame_pointer = frame_pointer, .pc = pc}; + unwind[depth] = e; + depth++; + if (frame_pointer) { + frame_pointer = cpu_read_memory64(cpu, frame_pointer); + } + pc = cpu_read_memory64(cpu, frame_pointer + 8); /* read previous lr */ + } while (frame_pointer && pc && depth < UNWIND_STACK_MAX_DEPTH); + #undef UNWIND_STACK_MAX_DEPTH + + /* push it from bottom to top */ + while (depth) { + callstack_push(cpu->cs, unwind[depth - 1]); + --depth; + } +} + +static struct qemu_plugin_register *plugin_find_register(const char *name) +{ + g_autoptr(GArray) regs = qemu_plugin_get_registers(); + for (int i = 0; i < regs->len; ++i) { + qemu_plugin_reg_descriptor *reg; + reg = &g_array_index(regs, qemu_plugin_reg_descriptor, i); + if (!strcmp(reg->name, name)) { + return reg->handle; + } + } + return NULL; +} + +static uint8_t aarch64_num_privilege_levels(void) +{ + return AARCH64_PRIVILEGE_LEVEL_MAX; +} + +static const char *aarch64_get_privilege_level_name(uint8_t pl) +{ + switch (pl) { + case AARCH64_EL0_SECURE: return "S-EL0"; + case AARCH64_EL0_NONSECURE: return "NS-EL0"; + case AARCH64_EL0_REALM: return "R-EL0"; + case AARCH64_EL1_SECURE: return "S-EL1"; + case AARCH64_EL1_NONSECURE: return "NS-EL1"; + case AARCH64_EL1_REALM: return "R-EL1"; + case AARCH64_EL2_SECURE: return "S-EL2"; + case AARCH64_EL2_NONSECURE: return "NS-EL2"; + case AARCH64_EL2_REALM: return "R-EL2"; + case AARCH64_EL3: return "EL3"; + default: + g_assert_not_reached(); + } +} + +static uint8_t aarch64_get_privilege_level(Cpu *cpu_) +{ + Aarch64Cpu *cpu = cpu_->arch; + /* + * QEMU gdbstub does not provide access to CurrentEL, + * so we use CPSR instead. + */ + uint8_t el = cpu_read_register32(cpu_, cpu->reg_cpsr) >> 2 & 0b11; + + if (el == 3) { + return AARCH64_EL3; + } + + uint8_t ss = AARCH64_EL0_SECURE; + if (!cpu->reg_scr_el3) { + ss = AARCH64_EL0_NONSECURE; + } + uint64_t scr_el3 = cpu_read_register64(cpu_, cpu->reg_scr_el3); + uint64_t ns = (scr_el3 >> 0) & 0b1; + uint64_t nse = (scr_el3 >> 62) & 0b1; + switch (nse << 1 | ns) { + case 0b00: + ss = AARCH64_EL0_SECURE; + break; + case 0b01: + ss = AARCH64_EL0_NONSECURE; + break; + case 0b11: + ss = AARCH64_EL0_REALM; + break; + default: + g_assert_not_reached(); + } + + const uint8_t num_ss = 3; + Aarch64PrivilegeLevel pl = el * num_ss + ss; + return pl; +} + +static uint64_t aarch64_get_frame_pointer(Cpu *cpu_) +{ + Aarch64Cpu *cpu = cpu_->arch; + return cpu_read_register64(cpu_, cpu->reg_fp); +} + +static void aarch64_init(Cpu *cpu_) +{ + Aarch64Cpu *cpu = g_new0(Aarch64Cpu, 1); + cpu_->arch = cpu; + cpu->reg_fp = plugin_find_register("x29"); + if (!cpu->reg_fp) { + fprintf(stderr, "uftrace plugin: frame pointer register (x29) is not " + "available. Please use an AArch64 cpu (or -cpu max).\n"); + g_abort(); + } + cpu->reg_cpsr = plugin_find_register("cpsr"); + g_assert(cpu->reg_cpsr); + cpu->reg_scr_el3 = plugin_find_register("SCR_EL3"); + /* scr_el3 is optional */ +} + +static void aarch64_end(Cpu *cpu) +{ + g_free(cpu->arch); +} + +static bool aarch64_does_insn_modify_frame_pointer(const char *disas) +{ + /* + * Check if current instruction concerns fp register "x29". + * We add a prefix space to make sure we don't match addresses dump + * in disassembly. + */ + return strstr(disas, " x29"); +} + +static CpuOps aarch64_ops = { + .init = aarch64_init, + .end = aarch64_end, + .get_frame_pointer = aarch64_get_frame_pointer, + .get_privilege_level = aarch64_get_privilege_level, + .num_privilege_levels = aarch64_num_privilege_levels, + .get_privilege_level_name = aarch64_get_privilege_level_name, + .does_insn_modify_frame_pointer = aarch64_does_insn_modify_frame_pointer, +}; + +static uint8_t x64_num_privilege_levels(void) +{ + return X64_PRIVILEGE_LEVEL_MAX; +} + +static const char *x64_get_privilege_level_name(uint8_t pl) +{ + switch (pl) { + case X64_RING0: return "Ring0"; + case X64_RING1: return "Ring1"; + case X64_RING2: return "Ring2"; + case X64_RING3: return "Ring3"; + case X64_REAL_MODE: return "RealMode"; + default: + g_assert_not_reached(); + } +} + +static uint8_t x64_get_privilege_level(Cpu *cpu_) +{ + X64Cpu *cpu = cpu_->arch; + uint64_t cr0 = cpu_read_register64(cpu_, cpu->reg_cr0); + uint64_t protected_mode = (cr0 >> 0) & 0b1; + if (!protected_mode) { + return X64_REAL_MODE; + } + uint32_t cs = cpu_read_register32(cpu_, cpu->reg_cs); + uint32_t ring_level = (cs >> 0) & 0b11; + return ring_level; +} + +static uint64_t x64_get_frame_pointer(Cpu *cpu_) +{ + X64Cpu *cpu = cpu_->arch; + return cpu_read_register64(cpu_, cpu->reg_rbp); +} + +static void x64_init(Cpu *cpu_) +{ + X64Cpu *cpu = g_new0(X64Cpu, 1); + cpu_->arch = cpu; + cpu->reg_rbp = plugin_find_register("rbp"); + g_assert(cpu->reg_rbp); + cpu->reg_cs = plugin_find_register("cs"); + g_assert(cpu->reg_cs); + cpu->reg_cr0 = plugin_find_register("cr0"); + g_assert(cpu->reg_cr0); +} + +static void x64_end(Cpu *cpu) +{ + g_free(cpu->arch); +} + +static bool x64_does_insn_modify_frame_pointer(const char *disas) +{ + return strstr(disas, "rbp"); +} + +static CpuOps x64_ops = { + .init = x64_init, + .end = x64_end, + .get_frame_pointer = x64_get_frame_pointer, + .get_privilege_level = x64_get_privilege_level, + .num_privilege_levels = x64_num_privilege_levels, + .get_privilege_level_name = x64_get_privilege_level_name, + .does_insn_modify_frame_pointer = x64_does_insn_modify_frame_pointer, +}; + +static void track_privilege_change(unsigned int cpu_index, void *udata) +{ + Cpu *cpu = qemu_plugin_scoreboard_find(score, cpu_index); + uint8_t new_pl = cpu->ops.get_privilege_level(cpu); + + if (new_pl == cpu->privilege_level) { + return; + } + + uint64_t pc = (uintptr_t) udata; + uint64_t timestamp = gettime_ns(); + + trace_exit_stack(cpu->trace, cpu->cs, timestamp); + callstack_clear(cpu->cs); + + cpu->privilege_level = new_pl; + cpu->trace = g_array_index(cpu->traces, Trace*, new_pl); + + cpu_unwind_stack(cpu, cpu->ops.get_frame_pointer(cpu), pc); + trace_enter_stack(cpu->trace, cpu->cs, timestamp); +} + +static void track_callstack(unsigned int cpu_index, void *udata) +{ + uint64_t pc = (uintptr_t) udata; + Cpu *cpu = qemu_plugin_scoreboard_find(score, cpu_index); + uint64_t timestamp = gettime_ns(); + Callstack *cs = cpu->cs; + Trace *t = cpu->trace; + + uint64_t fp = cpu->ops.get_frame_pointer(cpu); + if (!fp && callstack_empty(cs)) { + /* + * We simply push current pc. Note that we won't detect symbol change as + * long as a proper call does not happen. + */ + callstack_push(cs, (CallstackEntry){.frame_pointer = fp, .pc = pc}); + trace_enter_function(t, timestamp, pc, callstack_depth(cs)); + return; + } + + CallstackEntry top = callstack_top(cs); + if (fp == top.frame_pointer) { + /* same function */ + return; + } + + CallstackEntry caller = callstack_caller(cs); + if (fp == caller.frame_pointer) { + /* return */ + CallstackEntry e = callstack_pop(cs); + trace_exit_function(t, timestamp, e.pc, callstack_depth(cs)); + return; + } + + uint64_t caller_fp = fp ? cpu_read_memory64(cpu, fp) : 0; + if (caller_fp == top.frame_pointer) { + /* call */ + callstack_push(cs, (CallstackEntry){.frame_pointer = fp, .pc = pc}); + trace_enter_function(t, timestamp, pc, callstack_depth(cs)); + return; + } + + /* discontinuity, exit current stack and unwind new one */ + trace_exit_stack(t, cs, timestamp); + callstack_clear(cs); + + cpu_unwind_stack(cpu, fp, pc); + trace_enter_stack(t, cs, timestamp); +} + +static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb) +{ + size_t n_insns = qemu_plugin_tb_n_insns(tb); + uintptr_t tb_pc = qemu_plugin_tb_vaddr(tb); + + if (trace_privilege_level) { + qemu_plugin_register_vcpu_tb_exec_cb(tb, track_privilege_change, + QEMU_PLUGIN_CB_R_REGS, + (void *) tb_pc); + } + + /* + * Callbacks and inline instrumentation are inserted before an instruction. + * Thus, to see instruction effect, we need to wait for next one. + * Potentially, the last instruction of a block could modify the frame + * pointer. Thus, we need to always instrument first instruction in a tb. + */ + bool instrument_insn = true; + for (size_t i = 0; i < n_insns; i++) { + struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, i); + + if (instrument_insn) { + uintptr_t pc = qemu_plugin_insn_vaddr(insn); + qemu_plugin_register_vcpu_insn_exec_cb(insn, track_callstack, + QEMU_PLUGIN_CB_R_REGS, + (void *) pc); + instrument_insn = false; + } + + char *disas = qemu_plugin_insn_disas(insn); + if (arch_ops.does_insn_modify_frame_pointer(disas)) { + instrument_insn = true; + } + } +} + +static void vcpu_init(qemu_plugin_id_t id, unsigned int vcpu_index) +{ + Cpu *cpu = qemu_plugin_scoreboard_find(score, vcpu_index); + cpu->ops = arch_ops; + + cpu->ops.init(cpu); + cpu->buf = g_byte_array_new(); + cpu->traces = g_array_new(0, 0, sizeof(Trace *)); + + g_assert(vcpu_index < UINT32_MAX / TRACE_ID_SCALE); + g_assert(cpu->ops.num_privilege_levels() < TRACE_ID_SCALE); + /* trace_id is: cpu_number * TRACE_ID_SCALE + privilege_level */ + uint32_t trace_id = (vcpu_index + 1) * TRACE_ID_SCALE; + + if (trace_privilege_level) { + for (uint8_t pl = 0; pl < cpu->ops.num_privilege_levels(); ++pl) { + g_autoptr(GString) trace_name = g_string_new(NULL); + g_string_append_printf(trace_name, "cpu%u %s", vcpu_index, + cpu->ops.get_privilege_level_name(pl)); + Trace *t = trace_new(trace_id + pl, trace_name); + g_array_append_val(cpu->traces, t); + } + } else { + g_autoptr(GString) trace_name = g_string_new(NULL); + g_string_append_printf(trace_name, "cpu%u", vcpu_index); + Trace *t = trace_new(trace_id, trace_name); + g_array_append_val(cpu->traces, t); + } + + for (size_t i = 0; i < cpu->traces->len; ++i) { + /* create/truncate trace files */ + Trace *t = g_array_index(cpu->traces, Trace*, i); + trace_flush(t, false); + } + + cpu->cs = callstack_new(); + cpu->trace = g_array_index(cpu->traces, Trace*, cpu->privilege_level); +} + +static void vcpu_end(unsigned int vcpu_index) +{ + Cpu *cpu = qemu_plugin_scoreboard_find(score, vcpu_index); + g_byte_array_free(cpu->buf, true); + + for (size_t i = 0; i < cpu->traces->len; ++i) { + Trace *t = g_array_index(cpu->traces, Trace*, i); + trace_free(t); + } + + g_array_free(cpu->traces, true); + callstack_free(cpu->cs); + memset(cpu, 0, sizeof(Cpu)); +} + +static void at_exit(qemu_plugin_id_t id, void *data) +{ + bool system_emulation = (bool) data; + g_autoptr(GArray) traces = g_array_new(0, 0, sizeof(Trace *)); + + for (size_t i = 0; i < qemu_plugin_num_vcpus(); ++i) { + Cpu *cpu = qemu_plugin_scoreboard_find(score, i); + for (size_t j = 0; j < cpu->traces->len; ++j) { + Trace *t = g_array_index(cpu->traces, Trace*, j); + trace_flush(t, true); + g_array_append_val(traces, t); + } + } + + uftrace_write_map(system_emulation); + uftrace_write_info(traces); + uftrace_write_task(traces); + + for (size_t i = 0; i < qemu_plugin_num_vcpus(); ++i) { + vcpu_end(i); + } + + qemu_plugin_scoreboard_free(score); +} + +QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, + const qemu_info_t *info, + int argc, char **argv) +{ + for (int i = 0; i < argc; i++) { + char *opt = argv[i]; + g_auto(GStrv) tokens = g_strsplit(opt, "=", 2); + if (g_strcmp0(tokens[0], "trace-privilege-level") == 0) { + if (!qemu_plugin_bool_parse(tokens[0], tokens[1], + &trace_privilege_level)) { + fprintf(stderr, "boolean argument parsing failed: %s\n", opt); + return -1; + } + } else { + fprintf(stderr, "option parsing failed: %s\n", opt); + return -1; + } + } + + if (!strcmp(info->target_name, "aarch64")) { + arch_ops = aarch64_ops; + } else if (!strcmp(info->target_name, "x86_64")) { + arch_ops = x64_ops; + } else { + fprintf(stderr, "plugin uftrace: %s target is not supported\n", + info->target_name); + return 1; + } + + score = qemu_plugin_scoreboard_new(sizeof(Cpu)); + qemu_plugin_register_vcpu_init_cb(id, vcpu_init); + qemu_plugin_register_atexit_cb(id, at_exit, (void *) info->system_emulation); + qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans); + + return 0; +} diff --git a/contrib/plugins/uftrace_symbols.py b/contrib/plugins/uftrace_symbols.py new file mode 100755 index 0000000000000..45fb79c7a58c9 --- /dev/null +++ b/contrib/plugins/uftrace_symbols.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# Create symbols, debug and mapping files for uftrace. +# +# Copyright 2025 Linaro Ltd +# Author: Pierrick Bouvier +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import argparse +import os +import subprocess + +class Symbol: + def __init__(self, name, addr, size): + self.name = name + # clamp addr to 48 bits, like uftrace entries + self.addr = addr & 0xffffffffffff + self.full_addr = addr + self.size = size + + def set_loc(self, file, line): + self.file = file + self.line = line + +def get_symbols(elf_file): + symbols=[] + try: + out = subprocess.check_output(['nm', '--print-size', elf_file], + stderr=subprocess.STDOUT, + text=True) + except subprocess.CalledProcessError as e: + print(e.output) + raise + out = out.strip().split('\n') + for line in out: + info = line.split(' ') + if len(info) == 3: + # missing size information + continue + addr, size, type, name = info + # add only symbols from .text section + if type.lower() != 't': + continue + addr = int(addr, 16) + size = int(size, 16) + symbols.append(Symbol(name, addr, size)) + symbols.sort(key = lambda x: x.addr) + return symbols + +def find_symbols_locations(elf_file, symbols): + addresses = '\n'.join([hex(x.full_addr) for x in symbols]) + try: + out = subprocess.check_output(['addr2line', '--exe', elf_file], + stderr=subprocess.STDOUT, + input=addresses, text=True) + except subprocess.CalledProcessError as e: + print(e.output) + raise + out = out.strip().split('\n') + assert len(out) == len(symbols) + for i in range(len(symbols)): + s = symbols[i] + file, line = out[i].split(':') + # addr2line may return 'line (discriminator [0-9]+)' sometimes, + # remove this to keep only line number. + line = line.split(' ')[0] + s.set_loc(file, line) + +class BinaryFile: + def __init__(self, path, map_offset): + self.fullpath = os.path.realpath(path) + self.map_offset = map_offset + self.symbols = get_symbols(self.fullpath) + find_symbols_locations(self.fullpath, self.symbols) + + def path(self): + return self.fullpath + + def addr_start(self): + return self.map_offset + + def addr_end(self): + last_sym = self.symbols[-1] + return last_sym.addr + last_sym.size + self.map_offset + + def generate_symbol_file(self, prefix_symbols): + binary_name = os.path.basename(self.fullpath) + sym_file_path = os.path.join('uftrace.data', f'{binary_name}.sym') + print(f'{sym_file_path} ({len(self.symbols)} symbols)') + with open(sym_file_path, 'w') as sym_file: + # print hexadecimal addresses on 48 bits + addrx = "0>12x" + for s in self.symbols: + addr = s.addr + addr = f'{addr:{addrx}}' + size = f'{s.size:{addrx}}' + if prefix_symbols: + name = f'{binary_name}:{s.name}' + print(addr, size, 'T', name, file=sym_file) + + def generate_debug_file(self): + binary_name = os.path.basename(self.fullpath) + dbg_file_path = os.path.join('uftrace.data', f'{binary_name}.dbg') + with open(dbg_file_path, 'w') as dbg_file: + for s in self.symbols: + print(f'F: {hex(s.addr)} {s.name}', file=dbg_file) + print(f'L: {s.line} {s.file}', file=dbg_file) + +def parse_parameter(p): + s = p.split(":") + path = s[0] + if len(s) == 1: + return path, 0 + if len(s) > 2: + raise ValueError('only one offset can be set') + offset = s[1] + if not offset.startswith('0x'): + err = f'offset "{offset}" is not an hexadecimal constant. ' + err += 'It should start with "0x".' + raise ValueError(err) + offset = int(offset, 16) + return path, offset + +def is_from_user_mode(map_file_path): + if os.path.exists(map_file_path): + with open(map_file_path, 'r') as map_file: + if not map_file.readline().startswith('# map stack on'): + return True + return False + +def generate_map(binaries): + map_file_path = os.path.join('uftrace.data', 'sid-0.map') + + if is_from_user_mode(map_file_path): + print(f'do not overwrite {map_file_path} generated from qemu-user') + return + + mappings = [] + + # print hexadecimal addresses on 48 bits + addrx = "0>12x" + + mappings += ['# map stack on highest address possible, to prevent uftrace'] + mappings += ['# from considering any kernel address'] + mappings += ['ffffffffffff-ffffffffffff rw-p 00000000 00:00 0 [stack]'] + + for b in binaries: + m = f'{b.addr_start():{addrx}}-{b.addr_end():{addrx}}' + m += f' r--p 00000000 00:00 0 {b.path()}' + mappings.append(m) + + with open(map_file_path, 'w') as map_file: + print('\n'.join(mappings), file=map_file) + print(f'{map_file_path}') + print('\n'.join(mappings)) + +def main(): + parser = argparse.ArgumentParser(description= + 'generate symbol files for uftrace. ' + 'Require binutils (nm and addr2line).') + parser.add_argument('elf_file', nargs='+', + help='path to an ELF file. ' + 'Use /path/to/file:0xdeadbeef to add a mapping offset.') + parser.add_argument('--prefix-symbols', + help='prepend binary name to symbols', + action=argparse.BooleanOptionalAction) + args = parser.parse_args() + + if not os.path.exists('uftrace.data'): + os.mkdir('uftrace.data') + + binaries = [] + for file in args.elf_file: + path, offset = parse_parameter(file) + b = BinaryFile(path, offset) + binaries.append(b) + binaries.sort(key = lambda b: b.addr_end()); + + for b in binaries: + b.generate_symbol_file(args.prefix_symbols) + b.generate_debug_file() + + generate_map(binaries) + +if __name__ == '__main__': + main() diff --git a/cpu-common.c b/cpu-common.c index ef5757d23bf68..0eb5c7b8f247d 100644 --- a/cpu-common.c +++ b/cpu-common.c @@ -137,7 +137,8 @@ static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) wi->done = false; qemu_mutex_unlock(&cpu->work_mutex); - qemu_cpu_kick(cpu); + /* exit the inner loop and reach qemu_process_cpu_events_common(). */ + cpu_exit(cpu); } void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data, diff --git a/cpu-target.c b/cpu-target.c index 772e35495b857..f030e2c642ed0 100644 --- a/cpu-target.c +++ b/cpu-target.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "cpu.h" #include "accel/accel-cpu-ops.h" #include "system/cpus.h" #include "exec/cpu-common.h" @@ -27,10 +26,6 @@ #include "hw/core/cpu.h" #include "trace/trace-root.h" -/* Validate correct placement of CPUArchState. */ -QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0); -QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState)); - /* enable or disable single step mode. EXCP_DEBUG is returned by the CPU loop after each instruction */ void cpu_single_step(CPUState *cpu, int enabled) diff --git a/crypto/hmac-gcrypt.c b/crypto/hmac-gcrypt.c index 5273086eb9ac9..e428d17479881 100644 --- a/crypto/hmac-gcrypt.c +++ b/crypto/hmac-gcrypt.c @@ -121,7 +121,9 @@ qcrypto_gcrypt_hmac_bytesv(QCryptoHmac *hmac, return -1; } - if (*resultlen == 0) { + if (resultlen == NULL) { + return 0; + } else if (*resultlen == 0) { *resultlen = ret; *result = g_new0(uint8_t, *resultlen); } else if (*resultlen != ret) { diff --git a/crypto/hmac-glib.c b/crypto/hmac-glib.c index ea80c8d1b23a2..b845133a058e8 100644 --- a/crypto/hmac-glib.c +++ b/crypto/hmac-glib.c @@ -104,7 +104,9 @@ qcrypto_glib_hmac_bytesv(QCryptoHmac *hmac, return -1; } - if (*resultlen == 0) { + if (resultlen == NULL) { + return 0; + } else if (*resultlen == 0) { *resultlen = ret; *result = g_new0(uint8_t, *resultlen); } else if (*resultlen != ret) { diff --git a/crypto/hmac-gnutls.c b/crypto/hmac-gnutls.c index 822995505cd95..3c5bcbe80beae 100644 --- a/crypto/hmac-gnutls.c +++ b/crypto/hmac-gnutls.c @@ -119,7 +119,9 @@ qcrypto_gnutls_hmac_bytesv(QCryptoHmac *hmac, return -1; } - if (*resultlen == 0) { + if (resultlen == NULL) { + return 0; + } else if (*resultlen == 0) { *resultlen = ret; *result = g_new0(uint8_t, *resultlen); } else if (*resultlen != ret) { diff --git a/crypto/hmac-nettle.c b/crypto/hmac-nettle.c index dd5b2ab7a1947..2cff7931e120e 100644 --- a/crypto/hmac-nettle.c +++ b/crypto/hmac-nettle.c @@ -164,7 +164,9 @@ qcrypto_nettle_hmac_bytesv(QCryptoHmac *hmac, } } - if (*resultlen == 0) { + if (resultlen == NULL) { + return 0; + } else if (*resultlen == 0) { *resultlen = qcrypto_hmac_alg_map[hmac->alg].len; *result = g_new0(uint8_t, *resultlen); } else if (*resultlen != qcrypto_hmac_alg_map[hmac->alg].len) { diff --git a/crypto/tlscredsx509.c b/crypto/tlscredsx509.c index 63a72fe47c8ef..cd1f504471574 100644 --- a/crypto/tlscredsx509.c +++ b/crypto/tlscredsx509.c @@ -426,9 +426,8 @@ qcrypto_tls_creds_load_cert(QCryptoTLSCredsX509 *creds, static int qcrypto_tls_creds_load_ca_cert_list(QCryptoTLSCredsX509 *creds, const char *certFile, - gnutls_x509_crt_t *certs, - unsigned int certMax, - size_t *ncerts, + gnutls_x509_crt_t **certs, + unsigned int *ncerts, Error **errp) { gnutls_datum_t data; @@ -449,20 +448,18 @@ qcrypto_tls_creds_load_ca_cert_list(QCryptoTLSCredsX509 *creds, data.data = (unsigned char *)buf; data.size = strlen(buf); - if (gnutls_x509_crt_list_import(certs, &certMax, &data, - GNUTLS_X509_FMT_PEM, 0) < 0) { + if (gnutls_x509_crt_list_import2(certs, ncerts, &data, + GNUTLS_X509_FMT_PEM, 0) < 0) { error_setg(errp, "Unable to import CA certificate list %s", certFile); return -1; } - *ncerts = certMax; return 0; } -#define MAX_CERTS 16 static int qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, bool isServer, @@ -471,12 +468,11 @@ qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, Error **errp) { gnutls_x509_crt_t cert = NULL; - gnutls_x509_crt_t cacerts[MAX_CERTS]; - size_t ncacerts = 0; + gnutls_x509_crt_t *cacerts = NULL; + unsigned int ncacerts = 0; size_t i; int ret = -1; - memset(cacerts, 0, sizeof(cacerts)); if (certFile && access(certFile, R_OK) == 0) { cert = qcrypto_tls_creds_load_cert(creds, @@ -488,8 +484,9 @@ qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, } if (access(cacertFile, R_OK) == 0) { if (qcrypto_tls_creds_load_ca_cert_list(creds, - cacertFile, cacerts, - MAX_CERTS, &ncacerts, + cacertFile, + &cacerts, + &ncacerts, errp) < 0) { goto cleanup; } @@ -526,6 +523,8 @@ qcrypto_tls_creds_x509_sanity_check(QCryptoTLSCredsX509 *creds, for (i = 0; i < ncacerts; i++) { gnutls_x509_crt_deinit(cacerts[i]); } + g_free(cacerts); + return ret; } diff --git a/crypto/tlssession.c b/crypto/tlssession.c index 6d8f8df62323b..ac38c2121dbd3 100644 --- a/crypto/tlssession.c +++ b/crypto/tlssession.c @@ -19,6 +19,8 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/thread.h" #include "crypto/tlssession.h" #include "crypto/tlscredsanon.h" #include "crypto/tlscredspsk.h" @@ -51,6 +53,14 @@ struct QCryptoTLSSession { */ Error *rerr; Error *werr; + + /* + * Used to protect against broken GNUTLS thread safety + * https://gitlab.com/gnutls/gnutls/-/issues/1717 + */ + bool requireThreadSafety; + bool lockEnabled; + QemuMutex lock; }; @@ -69,6 +79,7 @@ qcrypto_tls_session_free(QCryptoTLSSession *session) g_free(session->peername); g_free(session->authzid); object_unref(OBJECT(session->creds)); + qemu_mutex_destroy(&session->lock); g_free(session); } @@ -84,10 +95,19 @@ qcrypto_tls_session_push(void *opaque, const void *buf, size_t len) return -1; }; + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } + error_free(session->werr); session->werr = NULL; ret = session->writeFunc(buf, len, session->opaque, &session->werr); + + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } + if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) { errno = EAGAIN; return -1; @@ -114,7 +134,16 @@ qcrypto_tls_session_pull(void *opaque, void *buf, size_t len) error_free(session->rerr); session->rerr = NULL; + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } + ret = session->readFunc(buf, len, session->opaque, &session->rerr); + + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } + if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) { errno = EAGAIN; return -1; @@ -153,6 +182,8 @@ qcrypto_tls_session_new(QCryptoTLSCreds *creds, session->creds = creds; object_ref(OBJECT(creds)); + qemu_mutex_init(&session->lock); + if (creds->endpoint != endpoint) { error_setg(errp, "Credentials endpoint doesn't match session"); goto error; @@ -289,6 +320,11 @@ qcrypto_tls_session_new(QCryptoTLSCreds *creds, return NULL; } +void qcrypto_tls_session_require_thread_safety(QCryptoTLSSession *sess) +{ + sess->requireThreadSafety = true; +} + static int qcrypto_tls_session_check_certificate(QCryptoTLSSession *session, Error **errp) @@ -480,7 +516,17 @@ qcrypto_tls_session_write(QCryptoTLSSession *session, size_t len, Error **errp) { - ssize_t ret = gnutls_record_send(session->handle, buf, len); + ssize_t ret; + + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } + + ret = gnutls_record_send(session->handle, buf, len); + + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } if (ret < 0) { if (ret == GNUTLS_E_AGAIN) { @@ -506,17 +552,25 @@ ssize_t qcrypto_tls_session_read(QCryptoTLSSession *session, char *buf, size_t len, - bool gracefulTermination, Error **errp) { - ssize_t ret = gnutls_record_recv(session->handle, buf, len); + ssize_t ret; + + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } + + ret = gnutls_record_recv(session->handle, buf, len); + + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } if (ret < 0) { if (ret == GNUTLS_E_AGAIN) { return QCRYPTO_TLS_SESSION_ERR_BLOCK; - } else if ((ret == GNUTLS_E_PREMATURE_TERMINATION) && - gracefulTermination){ - return 0; + } else if (ret == GNUTLS_E_PREMATURE_TERMINATION) { + return QCRYPTO_TLS_SESSION_PREMATURE_TERMINATION; } else { if (session->rerr) { error_propagate(errp, session->rerr); @@ -545,8 +599,39 @@ int qcrypto_tls_session_handshake(QCryptoTLSSession *session, Error **errp) { - int ret = gnutls_handshake(session->handle); + int ret; + ret = gnutls_handshake(session->handle); + if (!ret) { +#ifdef CONFIG_GNUTLS_BUG1717_WORKAROUND + gnutls_cipher_algorithm_t cipher = + gnutls_cipher_get(session->handle); + + /* + * Any use of rekeying in TLS 1.3 is unsafe for + * a gnutls with bug 1717, however, we know that + * QEMU won't initiate manual rekeying. Thus we + * only have to protect against automatic rekeying + * which doesn't trigger with CHACHA20 + */ + trace_qcrypto_tls_session_parameters( + session, + session->requireThreadSafety, + gnutls_protocol_get_version(session->handle), + cipher); + + if (session->requireThreadSafety && + gnutls_protocol_get_version(session->handle) == + GNUTLS_TLS1_3 && + cipher != GNUTLS_CIPHER_CHACHA20_POLY1305) { + warn_report("WARNING: activating thread safety countermeasures " + "for potentially broken GNUTLS with TLS1.3 cipher=%d", + cipher); + trace_qcrypto_tls_session_bug1717_workaround(session); + session->lockEnabled = true; + } +#endif + session->handshakeComplete = true; return QCRYPTO_TLS_HANDSHAKE_COMPLETE; } @@ -584,8 +669,15 @@ qcrypto_tls_session_bye(QCryptoTLSSession *session, Error **errp) return 0; } + if (session->lockEnabled) { + qemu_mutex_lock(&session->lock); + } ret = gnutls_bye(session->handle, GNUTLS_SHUT_WR); + if (session->lockEnabled) { + qemu_mutex_unlock(&session->lock); + } + if (!ret) { return QCRYPTO_TLS_BYE_COMPLETE; } @@ -651,6 +743,9 @@ qcrypto_tls_session_new(QCryptoTLSCreds *creds G_GNUC_UNUSED, return NULL; } +void qcrypto_tls_session_require_thread_safety(QCryptoTLSSession *sess) +{ +} void qcrypto_tls_session_free(QCryptoTLSSession *sess G_GNUC_UNUSED) @@ -692,7 +787,6 @@ ssize_t qcrypto_tls_session_read(QCryptoTLSSession *sess, char *buf, size_t len, - bool gracefulTermination, Error **errp) { error_setg(errp, "TLS requires GNUTLS support"); diff --git a/crypto/trace-events b/crypto/trace-events index bccd0bbf291a2..d0e33427faa89 100644 --- a/crypto/trace-events +++ b/crypto/trace-events @@ -21,6 +21,8 @@ qcrypto_tls_creds_x509_load_cert_list(void *creds, const char *file) "TLS creds # tlssession.c qcrypto_tls_session_new(void *session, void *creds, const char *hostname, const char *authzid, int endpoint) "TLS session new session=%p creds=%p hostname=%s authzid=%s endpoint=%d" qcrypto_tls_session_check_creds(void *session, const char *status) "TLS session check creds session=%p status=%s" +qcrypto_tls_session_parameters(void *session, int threadSafety, int protocol, int cipher) "TLS session parameters session=%p threadSafety=%d protocol=%d cipher=%d" +qcrypto_tls_session_bug1717_workaround(void *session) "TLS session bug1717 workaround session=%p" # tls-cipher-suites.c qcrypto_tls_cipher_suite_priority(const char *name) "priority: %s" diff --git a/crypto/x509-utils.c b/crypto/x509-utils.c index 8bad00a51b867..39bb6d4d8c3f7 100644 --- a/crypto/x509-utils.c +++ b/crypto/x509-utils.c @@ -46,7 +46,11 @@ int qcrypto_get_x509_cert_fingerprint(uint8_t *cert, size_t size, return -1; } - gnutls_x509_crt_init(&crt); + if (gnutls_x509_crt_init(&crt) < 0) { + error_setg(errp, "Unable to initialize certificate: %s", + gnutls_strerror(ret)); + return -1; + } if (gnutls_x509_crt_import(crt, &datum, GNUTLS_X509_FMT_PEM) != 0) { error_setg(errp, "Failed to import certificate"); diff --git a/disas/disas-host.c b/disas/disas-host.c index 8146fafe804cd..4b06f41fa6c79 100644 --- a/disas/disas-host.c +++ b/disas/disas-host.c @@ -56,11 +56,9 @@ static void initialize_debug_host(CPUDebug *s) s->info.cap_mode = CS_MODE_64; s->info.cap_insn_unit = 1; s->info.cap_insn_split = 8; -#elif defined(_ARCH_PPC) +#elif defined(_ARCH_PPC64) s->info.cap_arch = CS_ARCH_PPC; -# ifdef _ARCH_PPC64 s->info.cap_mode = CS_MODE_64; -# endif #elif defined(__riscv) #if defined(_ILP32) || (__riscv_xlen == 32) s->info.print_insn = print_insn_riscv32; diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst index 8ecbd6b26f7d0..fc2743658d4a8 100644 --- a/docs/about/build-platforms.rst +++ b/docs/about/build-platforms.rst @@ -44,8 +44,6 @@ Those hosts are officially supported, with various accelerators: - Accelerators * - Arm - hvf (64 bit only), kvm (64 bit only), tcg, xen - * - MIPS (64 bit little endian only) - - kvm, tcg * - PPC - kvm, tcg * - RISC-V @@ -55,7 +53,7 @@ Those hosts are officially supported, with various accelerators: * - SPARC - tcg * - x86 - - hvf (64 bit only), kvm, nvmm, tcg, whpx (64 bit only), xen + - hvf (64 bit only), mshv (64 bit only), kvm, nvmm, tcg, whpx (64 bit only), xen Other host architectures are not supported. It is possible to build QEMU system emulation on an unsupported host architecture using the configure @@ -118,14 +116,23 @@ Rust build dependencies include bindgen or have an older version, it is recommended to install a newer version using ``cargo install bindgen-cli``. - QEMU requires Rust 1.77.0. This is available on all supported platforms - with one exception, namely the ``mips64el`` architecture on Debian bookworm. - For all other architectures, Debian bookworm provides a new-enough Rust - compiler in the ``rustc-web`` package. - - Also, on Ubuntu 22.04 or 24.04 this requires the ``rustc-1.77`` - (or newer) package. The path to ``rustc`` and ``rustdoc`` must be - provided manually to the configure script. + QEMU requires Rust 1.83.0. This is available on all supported platforms + with two exception: Ubuntu LTS releases 22.04 and 24.04, and the + ``mips64el`` architecture on Debian bookworm. For all other + architectures, Debian bookworm provides a new-enough Rust compiler + in the ``rustc-web`` package. + + It is expected that in the future Ubuntu will provide updated packages + like the existing ``rustc-1.82`` package. The path to ``rustc`` and + ``rustdoc`` will have to be provided manually to the configure script. + + Some distros prefer to avoid vendored crate sources, and instead use + local sources from e.g. ``/usr/share/cargo/registry``. QEMU includes a + script, ``scripts/get-wraps-from-cargo-registry.py``, that automatically + performs this task. The script is meant to be invoked after unpacking + the QEMU tarball. QEMU also includes ``rust/Cargo.toml`` and + ``rust/Cargo.lock`` files that can be used to compute QEMU's build + dependencies, e.g. using ``cargo2rpm -p rust/Cargo.toml buildrequires``. Optional build dependencies Build components whose absence does not affect the ability to build QEMU diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index d50645a07117b..dacf2882e445e 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -68,29 +68,6 @@ configurations (e.g. -smp drawers=1,books=1,clusters=1 for x86 PC machine) is marked deprecated since 9.0, users have to ensure that all the topology members described with -smp are supported by the target machine. -``-old-param`` option for booting Arm kernels via param_struct (since 10.0) -''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' - -The ``-old-param`` command line option is specific to Arm targets: -it is used when directly booting a guest kernel to pass it the -command line and other information via the old ``param_struct`` ABI, -rather than the newer ATAGS or DTB mechanisms. This option was only -ever needed to support ancient kernels on some old board types -like the ``akita`` or ``terrier``; it has been deprecated in the -kernel since 2001. None of the board types QEMU supports need -``param_struct`` support, so this option has been deprecated and will -be removed in a future QEMU version. - -User-mode emulator command line arguments ------------------------------------------ - -``-p`` (since 9.0) -'''''''''''''''''' - -The ``-p`` option pretends to control the host page size. However, -it is not possible to change the host page size, and using the -option only causes failures. - QEMU Machine Protocol (QMP) commands ------------------------------------ @@ -195,20 +172,14 @@ This argument has always been ignored. Host Architectures ------------------ -Big endian MIPS since 7.2; 32-bit little endian MIPS since 9.2 -'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' +MIPS (since 11.0) +''''''''''''''''' -As Debian 10 ("Buster") moved into LTS the big endian 32 bit version of -MIPS moved out of support making it hard to maintain our -cross-compilation CI tests of the architecture. As we no longer have -CI coverage support may bitrot away before the deprecation process +MIPS is not supported by Debian 13 ("Trixie") and newer, making it hard to +maintain our cross-compilation CI tests of the architecture. As we no longer +have CI coverage support may bitrot away before the deprecation process completes. -Likewise, the little endian variant of 32 bit MIPS is not supported by -Debian 13 ("Trixie") and newer. - -64 bit little endian MIPS is still a supported host architecture. - System emulation on 32-bit x86 hosts (since 8.0) '''''''''''''''''''''''''''''''''''''''''''''''' @@ -246,27 +217,6 @@ Keeping 32-bit host support alive is a substantial burden for the QEMU project. Thus QEMU will in future drop the support for all 32-bit host systems. -linux-user mode CPUs --------------------- - -iwMMXt emulation and the ``pxa`` CPUs (since 10.0) -'''''''''''''''''''''''''''''''''''''''''''''''''' - -The ``pxa`` CPU family (``pxa250``, ``pxa255``, ``pxa260``, -``pxa261``, ``pxa262``, ``pxa270-a0``, ``pxa270-a1``, ``pxa270``, -``pxa270-b0``, ``pxa270-b1``, ``pxa270-c0``, ``pxa270-c5``) are no -longer used in system emulation, because all the machine types which -used these CPUs were removed in the QEMU 9.2 release. These CPUs can -now only be used in linux-user mode, and to do that you would have to -explicitly select one of these CPUs with the ``-cpu`` command line -option or the ``QEMU_CPU`` environment variable. - -We don't believe that anybody is using the iwMMXt emulation, and we do -not have any tests to validate it or any real hardware or similar -known-good implementation to test against. GCC is in the process of -dropping their support for iwMMXt codegen. These CPU types are -therefore deprecated in QEMU, and will be removed in a future release. - System emulator CPUs -------------------- @@ -297,6 +247,15 @@ embedded 405 for power management (OCC) and other internal tasks, it is theoretically possible to use QEMU to model them. Let's keep the CPU implementation for a while before removing all support. +Power8E and Power8NVL CPUs and corresponding Pnv chips (since 10.1) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +The Power8E and Power8NVL variants of Power8 are not really useful anymore +in qemu, and are old and unmaintained now. + +The CPUs as well as corresponding Power8NVL and Power8E PnvChips will also +be considered deprecated. + System emulator machines ------------------------ @@ -323,17 +282,32 @@ and serves as the initial engineering sample rather than a production version. A newer revision, A1, is now supported, and the ``ast2700a1-evb`` should replace the older A0 version. -Mips ``mipssim`` machine (since 10.0) -''''''''''''''''''''''''''''''''''''' +Arm ``sonorapass-bmc`` machine (since 10.2) +''''''''''''''''''''''''''''''''''''''''''' + +The ``sonorapass-bmc`` machine represents a lab server that never +entered production. Since it does not rely on any specific device +models, it can be replaced by the ``ast2500-evb`` machine using the +``fmc-model`` option to specify the flash type. The I2C devices +connected to the board can be defined via the QEMU command line. + +Arm ``qcom-dc-scm-v1-bmc`` and ``qcom-firework-bmc`` machine (since 10.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' -Linux dropped support for this virtual machine type in kernel v3.7, and -there does not seem to be anybody around who is still using this board -in QEMU: Most former MIPS-related people are working on other architectures -in their everyday job nowadays, and we are also not aware of anybody still -using old binaries with this board (i.e. there is also no binary available -online to check that this board did not completely bitrot yet). It is -recommended to use another MIPS machine for future MIPS code development -instead. +The ``qcom-dc-scm-v1-bmc`` and ``qcom-firework-bmc`` represent lab +servers that never entered production. Since they do not rely on any +specific device models, they can be replaced by the ``ast2600-evb`` +machine using the ``fmc-model`` option to specify the flash type. The +I2C devices connected to the board can be defined via the QEMU command +line. + +Arm ``fp5280g2-bmc`` machine (since 10.2) +''''''''''''''''''''''''''''''''''''''''''' + +The ``fp5280g2-bmc`` machine does not rely on any specific device +models, it can be replaced by the ``ast2500-evb`` machine using the +``fmc-model`` option to specify the flash type. The I2C devices +connected to the board can be defined via the QEMU command line. RISC-V default machine option (since 10.0) '''''''''''''''''''''''''''''''''''''''''' @@ -444,12 +418,6 @@ recommending to switch to their stable counterparts: - "Zve64f" should be replaced with "zve64f" - "Zve64d" should be replaced with "zve64d" -``-device sd-card,spec_version=1`` (since 9.1) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -SD physical layer specification v2.00 supersedes the v1.10 one. -v2.00 is the default since QEMU 3.0.0. - Block device options '''''''''''''''''''' @@ -516,31 +484,6 @@ Stream ``reconnect`` (since 9.2) The ``reconnect`` option only allows specifying second granularity timeouts, which is not enough for all types of use cases, use ``reconnect-ms`` instead. -VFIO device options -''''''''''''''''''' - -``-device vfio-calxeda-xgmac`` (since 10.0) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The vfio-calxeda-xgmac device allows to assign a host Calxeda Highbank -10Gb XGMAC Ethernet controller device ("calxeda,hb-xgmac" compatibility -string) to a guest. Calxeda HW has been ewasted now and there is no point -keeping that device. - -``-device vfio-amd-xgbe`` (since 10.0) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The vfio-amd-xgbe device allows to assign a host AMD 10GbE controller -to a guest ("amd,xgbe-seattle-v1a" compatibility string). AMD "Seattle" -is not supported anymore and there is no point keeping that device. - -``-device vfio-platform`` (since 10.0) -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The vfio-platform device allows to assign a host platform device -to a guest in a generic manner. Integrating a new device into -the vfio-platform infrastructure requires some adaptation at -both kernel and qemu level. No such attempt has been done for years -and the conclusion is that vfio-platform has not got any traction. -PCIe passthrough shall be the mainline solution. - CPU device properties ''''''''''''''''''''' diff --git a/docs/about/emulation.rst b/docs/about/emulation.rst index 456d01d5b08f6..8a5e128f677db 100644 --- a/docs/about/emulation.rst +++ b/docs/about/emulation.rst @@ -816,6 +816,205 @@ This plugin can limit the number of Instructions Per Second that are executed:: The lower the number the more accurate time will be, but the less efficient the plugin. Defaults to ips/10 +Uftrace +....... + +``contrib/plugins/uftrace.c`` + +This plugin generates a binary trace compatible with +`uftrace `_. + +Plugin supports aarch64 and x64, and works in user and system mode, allowing to +trace a system boot, which is not something possible usually. + +In user mode, the memory mapping is directly copied from ``/proc/self/maps`` at +the end of execution. Uftrace should be able to retrieve symbols by itself, +without any additional step. +In system mode, the default memory mapping is empty, and you can generate +one (and associated symbols) using ``contrib/plugins/uftrace_symbols.py``. +Symbols must be present in ELF binaries. + +It tracks the call stack (based on frame pointer analysis). Thus, your program +and its dependencies must be compiled using ``-fno-omit-frame-pointer +-mno-omit-leaf-frame-pointer``. In 2024, `Ubuntu and Fedora enabled it by +default again on x64 +`_. +On aarch64, this is less of a problem, as they are usually part of the ABI, +except for leaf functions. That's true for user space applications, but not +necessarily for bare metal code. You can read this `section +` to easily build a system with frame pointers. + +When tracing long scenarios (> 1 min), the generated trace can become very long, +making it hard to extract data from it. In this case, a simple solution is to +trace execution while generating a timestamped output log using +``qemu-system-aarch64 ... | ts "%s"``. Then, ``uftrace --time-range=start~end`` +can be used to reduce trace for only this part of execution. + +Performance wise, overhead compared to normal tcg execution is around x5-x15. + +.. list-table:: Uftrace plugin arguments + :widths: 20 80 + :header-rows: 1 + + * - Option + - Description + * - trace-privilege-level=[on|off] + - Generate separate traces for each privilege level (Exception Level + + Security State on aarch64, Rings on x64). + +.. list-table:: uftrace_symbols.py arguments + :widths: 20 80 + :header-rows: 1 + + * - Option + - Description + * - elf_file [elf_file ...] + - path to an ELF file. Use /path/to/file:0xdeadbeef to add a mapping offset. + * - --prefix-symbols + - prepend binary name to symbols + +Example user trace +++++++++++++++++++ + +As an example, we can trace qemu itself running git:: + + $ ./build/qemu-aarch64 -plugin \ + build/contrib/plugins/libuftrace.so \ + ./build/qemu-aarch64 /usr/bin/git --help + + # and generate a chrome trace directly + $ uftrace dump --chrome | gzip > ~/qemu_aarch64_git_help.json.gz + +For convenience, you can download this trace `qemu_aarch64_git_help.json.gz +`_. +Download it and open this trace on https://ui.perfetto.dev/. You can zoom in/out +using :kbd:`W`, :kbd:`A`, :kbd:`S`, :kbd:`D` keys. +Some sequences taken from this trace: + +- Loading program and its interpreter + +.. image:: https://fileserver.linaro.org/s/fie8JgX76yyL5cq/preview + :height: 200px + +- open syscall + +.. image:: https://fileserver.linaro.org/s/rsXPTeZZPza4PcE/preview + :height: 200px + +- TB creation + +.. image:: https://fileserver.linaro.org/s/GXY6NKMw5EeRCew/preview + :height: 200px + +It's usually better to use ``uftrace record`` directly. However, tracing +binaries through qemu-user can be convenient when you don't want to recompile +them (``uftrace record`` requires instrumentation), as long as symbols are +present. + +Example system trace +++++++++++++++++++++ + +A full trace example (chrome trace, from instructions below) generated from a +system boot can be found `here +`_. +Download it and open this trace on https://ui.perfetto.dev/. You can see code +executed for all privilege levels, and zoom in/out using +:kbd:`W`, :kbd:`A`, :kbd:`S`, :kbd:`D` keys. You can find below some sequences +taken from this trace: + +- Two first stages of boot sequence in Arm Trusted Firmware (EL3 and S-EL1) + +.. image:: https://fileserver.linaro.org/s/kkxBS552W7nYESX/preview + :height: 200px + +- U-boot initialization (until code relocation, after which we can't track it) + +.. image:: https://fileserver.linaro.org/s/LKTgsXNZFi5GFNC/preview + :height: 200px + +- Stat and open syscalls in kernel + +.. image:: https://fileserver.linaro.org/s/dXe4MfraKg2F476/preview + :height: 200px + +- Timer interrupt + +.. image:: https://fileserver.linaro.org/s/TM5yobYzJtP7P3C/preview + :height: 200px + +- Poweroff sequence (from kernel back to firmware, NS-EL2 to EL3) + +.. image:: https://fileserver.linaro.org/s/oR2PtyGKJrqnfRf/preview + :height: 200px + +Build and run system example +++++++++++++++++++++++++++++ + +.. _uftrace_build_system_example: + +Building a full system image with frame pointers is not trivial. + +We provide a `simple way `_ to +build an aarch64 system, combining Arm Trusted firmware, U-boot, Linux kernel +and debian userland. It's based on containers (``podman`` only) and +``qemu-user-static (binfmt)`` to make sure it's easily reproducible and does not depend +on machine where you build it. + +You can follow the exact same instructions for a x64 system, combining edk2, +Linux, and Ubuntu, simply by switching to +`x86_64 `_ branch. + +To build the system:: + + # Install dependencies + $ sudo apt install -y podman qemu-user-static + + $ git clone https://github.com/pbo-linaro/qemu-linux-stack + $ cd qemu-linux-stack + $ ./build.sh + + # system can be started using: + $ ./run.sh /path/to/qemu-system-aarch64 + +To generate a uftrace for a system boot from that:: + + # run true and poweroff the system + $ env INIT=true ./run.sh path/to/qemu-system-aarch64 \ + -plugin path/to/contrib/plugins/libuftrace.so,trace-privilege-level=on + + # generate symbols and memory mapping + $ path/to/contrib/plugins/uftrace_symbols.py \ + --prefix-symbols \ + arm-trusted-firmware/build/qemu/debug/bl1/bl1.elf \ + arm-trusted-firmware/build/qemu/debug/bl2/bl2.elf \ + arm-trusted-firmware/build/qemu/debug/bl31/bl31.elf \ + u-boot/u-boot:0x60000000 \ + linux/vmlinux + + # inspect trace with + $ uftrace replay + +Uftrace allows to filter the trace, and dump flamegraphs, or a chrome trace. +This last one is very interesting to see visually the boot process:: + + $ uftrace dump --chrome > boot.json + # Open your browser, and load boot.json on https://ui.perfetto.dev/. + +Long visual chrome traces can't be easily opened, thus, it might be +interesting to generate them around a particular point of execution:: + + # execute qemu and timestamp output log + $ env INIT=true ./run.sh path/to/qemu-system-aarch64 \ + -plugin path/to/contrib/plugins/libuftrace.so,trace-privilege-level=on |& + ts "%s" | tee exec.log + + $ cat exec.log | grep 'Run /init' + 1753122320 [ 11.834391] Run /init as init process + # init was launched at 1753122320 + + # generate trace around init execution (2 seconds): + $ uftrace dump --chrome --time-range=1753122320~1753122322 > init.json + Other emulation features ------------------------ diff --git a/docs/about/removed-features.rst b/docs/about/removed-features.rst index d7c2113fc3eb2..53829f59e65fb 100644 --- a/docs/about/removed-features.rst +++ b/docs/about/removed-features.rst @@ -560,6 +560,18 @@ the options along with the machine models they were intended for. Use ``-run-with user=..`` instead. +``-old-param`` option for booting Arm kernels via param_struct (removed in 10.2) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +The ``-old-param`` command line option was specific to Arm targets: +it was used when directly booting a guest kernel to pass it the +command line and other information via the old ``param_struct`` ABI, +rather than the newer ATAGS or DTB mechanisms. This option was only +ever needed to support ancient kernels on some old board types +like the ``akita`` or ``terrier``; it has been deprecated in the +kernel since 2001. None of the board types QEMU supports need +``param_struct`` support, so this option has been removed. + User-mode emulator command line arguments ----------------------------------------- @@ -571,6 +583,14 @@ The ``-singlestep`` option has been given a name that better reflects what it actually does. For both linux-user and bsd-user, use the ``-one-insn-per-tb`` option instead. +``-p`` (removed in 10.2) +'''''''''''''''''''''''' + +The ``-p`` option pretends to control the host page size. However, +it is not possible to change the host page size; we stopped trying +to do anything with the option except print a warning from 9.0, +and now the option is removed entirely. + QEMU Machine Protocol (QMP) commands ------------------------------------ @@ -722,8 +742,8 @@ Use ``multifd-channels`` instead. Use ``multifd-compression`` instead. -Incorrectly typed ``device_add`` arguments (since 9.2) -'''''''''''''''''''''''''''''''''''''''''''''''''''''' +Incorrectly typed ``device_add`` arguments (removed in 9.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' Due to shortcomings in the internal implementation of ``device_add``, QEMU used to incorrectly accept certain invalid arguments. Any object @@ -876,6 +896,12 @@ work around the atomicity issues in system mode by running all vCPUs in a single thread context; in user mode atomicity was simply broken. From 10.0, QEMU has disabled configuration of 64-bit guests on 32-bit hosts. +32-bit MIPS (since 11.0) +'''''''''''''''''''''''' + +Debian 12 "Bookworm" removed support for 32-bit MIPS, making it hard to +maintain our cross-compilation CI tests of the architecture. + Guest Emulator ISAs ------------------- @@ -1099,6 +1125,11 @@ were added for little endian CPUs. Big endian support was never tested and likely never worked. Starting with QEMU v10.1, the machines are now only available as little-endian machines. +Mips ``mipssim`` machine (removed in 10.2) +'''''''''''''''''''''''''''''''''''''''''' + +Linux dropped support for this virtual machine type in kernel v3.7, and +there was also no binary available online to use with that board. linux-user mode CPUs -------------------- @@ -1125,6 +1156,20 @@ reason the maintainers strongly suspected no one actually used it. QEMU Nios II architecture was orphan; Intel has EOL'ed the Nios II processor IP (see `Intel discontinuance notification`_). +iwMMXt emulation and the ``pxa`` CPUs (removed in 10.2) +''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +The ``pxa`` CPU family (``pxa250``, ``pxa255``, ``pxa260``, +``pxa261``, ``pxa262``, ``pxa270-a0``, ``pxa270-a1``, ``pxa270``, +``pxa270-b0``, ``pxa270-b1``, ``pxa270-c0``, ``pxa270-c5``) were +not available in system emulation, because all the machine types which +used these CPUs were removed in the QEMU 9.2 release. We don't +believe that anybody was using the iwMMXt emulation (which you +would have to explicitly enable on the command line), and we did +not have any tests to validate it or any real hardware or similar +known-good implementation to test against. These CPUs have +therefore been removed in linux-user mode as well. + TCG introspection features -------------------------- @@ -1178,6 +1223,11 @@ by using ``-machine graphics=off``. The 'pvrdma' device and the whole RDMA subsystem have been removed. +``-device sd-card,spec_version=1`` (since 10.2) +''''''''''''''''''''''''''''''''''''''''''''''' + +SD physical layer specification v2.00 supersedes the v1.10 one. + Related binaries ---------------- @@ -1262,6 +1312,31 @@ The corresponding upstream server project is no longer maintained. Users are recommended to switch to an alternative distributed block device driver such as RBD. +VFIO devices +------------ + +``-device vfio-calxeda-xgmac`` (since 10.2) +''''''''''''''''''''''''''''''''''''''''''' +The vfio-calxeda-xgmac device allows to assign a host Calxeda Highbank +10Gb XGMAC Ethernet controller device ("calxeda,hb-xgmac" compatibility +string) to a guest. Calxeda HW has been ewasted now and there is no point +keeping that device. + +``-device vfio-amd-xgbe`` (since 10.2) +'''''''''''''''''''''''''''''''''''''' +The vfio-amd-xgbe device allows to assign a host AMD 10GbE controller +to a guest ("amd,xgbe-seattle-v1a" compatibility string). AMD "Seattle" +is not supported anymore and there is no point keeping that device. + +``-device vfio-platform`` (since 10.2) +'''''''''''''''''''''''''''''''''''''' +The vfio-platform device allows to assign a host platform device +to a guest in a generic manner. Integrating a new device into +the vfio-platform infrastructure requires some adaptation at +both kernel and qemu level. No such attempt has been done for years +and the conclusion is that vfio-platform has not got any traction. +PCIe passthrough shall be the mainline solution. + Tools ----- diff --git a/docs/conf.py b/docs/conf.py index f892a6e1da376..0c9ec74097fab 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # QEMU documentation build configuration file, created by # sphinx-quickstart on Thu Jan 31 16:40:14 2019. # @@ -341,7 +339,9 @@ # We use paths starting from qemu_docdir here so that you can run # sphinx-build from anywhere and the kerneldoc extension can still # find everything. -kerneldoc_bin = ['perl', os.path.join(qemu_docdir, '../scripts/kernel-doc')] +# Since kernel-doc is now a Python script, we should run it with whatever +# Python this sphinx is using (rather than letting it find one via env) +kerneldoc_bin = [sys.executable, os.path.join(qemu_docdir, '../scripts/kernel-doc.py')] kerneldoc_srctree = os.path.join(qemu_docdir, '..') hxtool_srctree = os.path.join(qemu_docdir, '..') qapidoc_srctree = os.path.join(qemu_docdir, '..') diff --git a/docs/devel/build-system.rst b/docs/devel/build-system.rst index 2c884197a20aa..6204aa6a72edf 100644 --- a/docs/devel/build-system.rst +++ b/docs/devel/build-system.rst @@ -450,7 +450,7 @@ are run with ``make bench``. Meson test suites such as ``unit`` can be ran with ``make check-unit``, and ``make check-tcg`` builds and runs "non-Meson" tests for all targets. -If desired, it is also possible to use ``ninja`` and ``meson test``, +If desired, it is also possible to use ``ninja`` and ``pyvenv/bin/meson test``, respectively to build emulators and run tests defined in meson.build. The main difference is that ``make`` needs the ``-jN`` flag in order to enable parallel builds or tests. diff --git a/docs/devel/code-provenance.rst b/docs/devel/code-provenance.rst index b5aae2e2532ee..8cdc56f6649a7 100644 --- a/docs/devel/code-provenance.rst +++ b/docs/devel/code-provenance.rst @@ -285,8 +285,8 @@ Such tools are acceptable to use, provided there is clearly defined copyright and licensing for their output. Note in particular the caveats applying to AI content generators below. -Use of AI content generators -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Use of AI-generated content +~~~~~~~~~~~~~~~~~~~~~~~~~~~ TL;DR: @@ -294,6 +294,10 @@ TL;DR: believed to include or derive from AI generated content. This includes ChatGPT, Claude, Copilot, Llama and similar tools.** + **This policy does not apply to other uses of AI, such as researching APIs + or algorithms, static analysis, or debugging, provided their output is not + included in contributions.** + The increasing prevalence of AI-assisted software development results in a number of difficult legal questions and risks for software projects, including QEMU. Of particular concern is content generated by `Large Language Models @@ -322,17 +326,24 @@ The QEMU project thus requires that contributors refrain from using AI content generators on patches intended to be submitted to the project, and will decline any contribution if use of AI is either known or suspected. -This policy does not apply to other uses of AI, such as researching APIs or -algorithms, static analysis, or debugging, provided their output is not to be -included in contributions. - Examples of tools impacted by this policy includes GitHub's CoPilot, OpenAI's ChatGPT, Anthropic's Claude, and Meta's Code Llama, and code/content generation agents which are built on top of such tools. This policy may evolve as AI tools mature and the legal situation is -clarifed. In the meanwhile, requests for exceptions to this policy will be -evaluated by the QEMU project on a case by case basis. To be granted an -exception, a contributor will need to demonstrate clarity of the license and -copyright status for the tool's output in relation to its training model and -code, to the satisfaction of the project maintainers. +clarified. + +Exceptions +^^^^^^^^^^ + +The QEMU project welcomes discussion on any exceptions to this policy, +or more general revisions. This can be done by contacting the qemu-devel +mailing list with details of a proposed tool, model, usage scenario, etc. +that is beneficial to QEMU, while still mitigating issues around compliance +with the DCO. After discussion, any exception will be listed below. + +Exceptions do not remove the need for authors to comply with all other +requirements for contribution. In particular, the "Signed-off-by" +label in a patch submission is a statement that the author takes +responsibility for the entire contents of the patch, including any parts +that were generated or assisted by AI tools or other tools. diff --git a/docs/devel/codebase.rst b/docs/devel/codebase.rst index 2a3143787a6ce..69d88271178bf 100644 --- a/docs/devel/codebase.rst +++ b/docs/devel/codebase.rst @@ -48,7 +48,7 @@ yet, so sometimes the source code is all you have. * `accel `_: Infrastructure and architecture agnostic code related to the various `accelerators ` supported by QEMU - (TCG, KVM, hvf, whpx, xen, nvmm). + (TCG, KVM, hvf, whpx, xen, nvmm, mshv). Contains interfaces for operations that will be implemented per `target `_. * `audio `_: diff --git a/docs/devel/kconfig.rst b/docs/devel/kconfig.rst index 493b76c4fbf70..1d4a114a022af 100644 --- a/docs/devel/kconfig.rst +++ b/docs/devel/kconfig.rst @@ -59,8 +59,6 @@ stanza like the following:: config ARM_VIRT bool imply PCI_DEVICES - imply VFIO_AMD_XGBE - imply VFIO_XGMAC select A15MPCORE select ACPI select ARM_SMMUV3 diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst index 9471bac8599df..c906c6509eedd 100644 --- a/docs/devel/loads-stores.rst +++ b/docs/devel/loads-stores.rst @@ -460,10 +460,8 @@ For new code they are better avoided: ``cpu_physical_memory_write`` -``cpu_physical_memory_rw`` - Regexes for git grep: - - ``\`` + - ``\`` ``cpu_memory_rw_debug`` ~~~~~~~~~~~~~~~~~~~~~~~ @@ -474,7 +472,7 @@ This function is intended for use by the GDB stub and similar code. It takes a virtual address, converts it to a physical address via an MMU lookup using the current settings of the specified CPU, and then performs the access (using ``address_space_rw`` for -reads or ``cpu_physical_memory_write_rom`` for writes). +reads or ``address_space_write_rom`` for writes). This means that if the access is a write to a ROM then this function will modify the contents (whereas a normal guest CPU access would ignore the write attempt). diff --git a/docs/devel/memory.rst b/docs/devel/memory.rst index 57fb2aec76e06..f22146e56ce55 100644 --- a/docs/devel/memory.rst +++ b/docs/devel/memory.rst @@ -158,21 +158,21 @@ ioeventfd) can be changed during the region lifecycle. They take effect as soon as the region is made visible. This can be immediately, later, or never. -Destruction of a memory region happens automatically when the owner -object dies. +Destruction of a memory region happens automatically when the owner object +dies. When there are multiple memory regions under the same owner object, +the memory API will guarantee all memory regions will be properly detached +and finalized one by one. The order in which memory regions will be +finalized is not guaranteed. If however the memory region is part of a dynamically allocated data -structure, you should call object_unparent() to destroy the memory region -before the data structure is freed. For an example see VFIOMSIXInfo -and VFIOQuirk in hw/vfio/pci.c. +structure, you should free the memory region in the instance_finalize +callback. For an example see VFIOMSIXInfo and VFIOQuirk in +hw/vfio/pci.c. You must not destroy a memory region as long as it may be in use by a device or CPU. In order to do this, as a general rule do not create or -destroy memory regions dynamically during a device's lifetime, and only -call object_unparent() in the memory region owner's instance_finalize -callback. The dynamically allocated data structure that contains the -memory region then should obviously be freed in the instance_finalize -callback as well. +destroy memory regions dynamically during a device's lifetime, and never +call object_unparent(). If you break this rule, the following situation can happen: @@ -198,9 +198,7 @@ this exception is rarely necessary, and therefore it is discouraged, but nevertheless it is used in a few places. For regions that "have no owner" (NULL is passed at creation time), the -machine object is actually used as the owner. Since instance_finalize is -never called for the machine object, you must never call object_unparent -on regions that have no owner, unless they are aliases or containers. +machine object is actually used as the owner. Overlapping regions and priority diff --git a/docs/devel/migration/CPR.rst b/docs/devel/migration/CPR.rst index 0a0fd4f6dc319..b6178568a854b 100644 --- a/docs/devel/migration/CPR.rst +++ b/docs/devel/migration/CPR.rst @@ -5,7 +5,7 @@ CPR is the umbrella name for a set of migration modes in which the VM is migrated to a new QEMU instance on the same host. It is intended for use when the goal is to update host software components that run the VM, such as QEMU or even the host kernel. At this time, -the cpr-reboot and cpr-transfer modes are available. +the cpr-reboot, cpr-transfer, and cpr-exec modes are available. Because QEMU is restarted on the same host, with access to the same local devices, CPR is allowed in certain cases where normal migration @@ -324,3 +324,113 @@ descriptors from old to new QEMU. In the future, descriptors for vhost, and char devices could be transferred, preserving those devices and their kernel state without interruption, even if they do not explicitly support live migration. + +cpr-exec mode +------------- + +In this mode, QEMU stops the VM, writes VM state to the migration +URI, and directly exec's a new version of QEMU on the same host, +replacing the original process while retaining its PID. Guest RAM is +preserved in place, albeit with new virtual addresses. The user +completes the migration by specifying the ``-incoming`` option, and +by issuing the ``migrate-incoming`` command if necessary; see details +below. + +This mode supports VFIO/IOMMUFD devices by preserving device +descriptors and hence kernel state across the exec, even for devices +that do not support live migration. + +Because the old and new QEMU instances are not active concurrently, +the URI cannot be a type that streams data from one instance to the +other. + +This mode does not require a channel of type ``cpr``. The information +that is passed over that channel for cpr-transfer mode is instead +serialized to a memfd, the number of the fd is saved in the +QEMU_CPR_EXEC_STATE environment variable during the exec of new QEMU. +and new QEMU mmaps the memfd. + +Usage +^^^^^ + +Arguments for the new QEMU process are taken from the +@cpr-exec-command parameter. The first argument should be the +path of a new QEMU binary, or a prefix command that exec's the +new QEMU binary, and the arguments should include the ''-incoming'' +option. + +Memory backend objects must have the ``share=on`` attribute. +The VM must be started with the ``-machine aux-ram-share=on`` option. + +Outgoing: + * Set the migration mode parameter to ``cpr-exec``. + * Set the ``cpr-exec-command`` parameter. + * Issue the ``migrate`` command. It is recommended that the URI be + a ``file`` type, but one can use other types such as ``exec``, + provided the command captures all the data from the outgoing side, + and provides all the data to the incoming side. + +Incoming: + * You do not need to explicitly start new QEMU. It is started as + a side effect of the migrate command above. + * If the VM was running when the outgoing ``migrate`` command was + issued, then QEMU automatically resumes VM execution. + +Example 1: incoming URI +^^^^^^^^^^^^^^^^^^^^^^^ + +In these examples, we simply restart the same version of QEMU, but in +a real scenario one would set a new QEMU binary path in +cpr-exec-command. + +:: + + # qemu-kvm -monitor stdio + -object memory-backend-memfd,id=ram0,size=4G + -machine memory-backend=ram0 + -machine aux-ram-share=on + ... + + QEMU 10.2.50 monitor - type 'help' for more information + (qemu) info status + VM status: running + (qemu) migrate_set_parameter mode cpr-exec + (qemu) migrate_set_parameter cpr-exec-command qemu-kvm ... -incoming file:vm.state + (qemu) migrate -d file:vm.state + (qemu) QEMU 10.2.50 monitor - type 'help' for more information + (qemu) info status + VM status: running + +Example 2: incoming defer +^^^^^^^^^^^^^^^^^^^^^^^^^ +:: + + # qemu-kvm -monitor stdio + -object memory-backend-memfd,id=ram0,size=4G + -machine memory-backend=ram0 + -machine aux-ram-share=on + ... + + QEMU 10.2.50 monitor - type 'help' for more information + (qemu) info status + VM status: running + (qemu) migrate_set_parameter mode cpr-exec + (qemu) migrate_set_parameter cpr-exec-command qemu-kvm ... -incoming defer + (qemu) migrate -d file:vm.state + (qemu) QEMU 10.2.50 monitor - type 'help' for more information + (qemu) info status + status: paused (inmigrate) + (qemu) migrate_incoming file:vm.state + (qemu) info status + VM status: running + +Caveats +^^^^^^^ + +cpr-exec mode may not be used with postcopy, background-snapshot, +or COLO. + +cpr-exec mode requires permission to use the exec system call, which +is denied by certain sandbox options, such as spawn. + +The guest pause time increases for large guest RAM backed by small pages. diff --git a/docs/devel/migration/main.rst b/docs/devel/migration/main.rst index 6493c1d2bca48..1afe7b9689bdf 100644 --- a/docs/devel/migration/main.rst +++ b/docs/devel/migration/main.rst @@ -444,6 +444,25 @@ The functions to do that are inside a vmstate definition, and are called: This function is called after we save the state of one device (even upon failure, unless the call to pre_save returned an error). +Following are the errp variants of these functions. + +- ``int (*pre_load_errp)(void *opaque, Error **errp);`` + + This function is called before we load the state of one device. + +- ``int (*post_load_errp)(void *opaque, int version_id, Error **errp);`` + + This function is called after we load the state of one device. + +- ``int (*pre_save_errp)(void *opaque, Error **errp);`` + + This function is called before we save the state of one device. + +New impls should preferentally use 'errp' variants of these +methods and existing impls incrementally converted. +The variants without 'errp' are intended to be removed +once all usage is converted. + Example: You can look at hpet.c, that uses the first three functions to massage the state that is transferred. diff --git a/docs/devel/qapi-code-gen.rst b/docs/devel/qapi-code-gen.rst index dfdbeac5a5a7f..d97602f464c7e 100644 --- a/docs/devel/qapi-code-gen.rst +++ b/docs/devel/qapi-code-gen.rst @@ -646,9 +646,9 @@ Member 'event' names the event. This is the event name used in the Client JSON Protocol. Member 'data' defines the event-specific data. It defaults to an -empty MEMBERS object. +empty MEMBERS_ object. -If 'data' is a MEMBERS object, then MEMBERS defines event-specific +If 'data' is a MEMBERS_ object, then MEMBERS defines event-specific data just like a struct type's 'data' defines struct type members. If 'data' is a STRING, then STRING names a complex type whose members @@ -786,8 +786,8 @@ Configuring the schema Syntax:: COND = STRING - | { 'all: [ COND, ... ] } - | { 'any: [ COND, ... ] } + | { 'all': [ COND, ... ] } + | { 'any': [ COND, ... ] } | { 'not': COND } All definitions take an optional 'if' member. Its value must be a @@ -943,9 +943,14 @@ The usual ****strong****, *\*emphasized\** and ````literal```` markup should be used. If you need a single literal ``*``, you will need to backslash-escape it. -Use ``@foo`` to reference a name in the schema. This is an rST -extension. It is rendered the same way as ````foo````, but carries -additional meaning. +Use ```foo``` to reference a definition in the schema. This generates +a link to the definition. In the event that such a cross-reference is +ambiguous, you can use `QAPI cross-reference roles +` to disambiguate. + +Use @foo to reference a member description within the current +definition. This is an rST extension. It is currently rendered the +same way as ````foo````, but carries additional meaning. Example:: diff --git a/docs/devel/qapi-domain.rst b/docs/devel/qapi-domain.rst index b71890f6609da..1924f12d42c05 100644 --- a/docs/devel/qapi-domain.rst +++ b/docs/devel/qapi-domain.rst @@ -9,7 +9,7 @@ in Sphinx is provided by the QAPI Domain, located in `Python Domain `_ included with Sphinx, but provides special directives and roles -speciically for annotating and documenting QAPI definitions +for annotating and documenting QAPI definitions specifically. A `Domain @@ -101,7 +101,7 @@ without types. The QAPI domain uses this class for features, returns, and enum values. TypedField: - * Creates a grouped, typed field. Multiple adjacent entres will be + * Creates a grouped, typed field. Multiple adjacent entries will be merged into one section, and the content will form a bulleted list. * *Must* take at least one argument, but supports up to two - nominally, a name and a type. @@ -375,6 +375,7 @@ Will allow you to add arbitrary field lists in QAPI directives:: :see also: Lorem ipsum, dolor sit amet ... +.. _QAPI-domain-cross-references: Cross-references ================ diff --git a/docs/devel/rust.rst b/docs/devel/rust.rst index b6737536c694f..79c26d9d165cd 100644 --- a/docs/devel/rust.rst +++ b/docs/devel/rust.rst @@ -1,4 +1,4 @@ -.. |msrv| replace:: 1.63.0 +.. |msrv| replace:: 1.83.0 Rust in QEMU ============ @@ -66,7 +66,7 @@ __ https://mesonbuild.com/Commands.html#devenv As shown above, you can use the ``--tests`` option as usual to operate on test code. Note however that you cannot *build* or run tests via ``cargo``, because they need support C code from QEMU that Cargo does not know about. Tests can -be run via ``meson test`` or ``make``:: +be run via Meson (``pyvenv/bin/meson test``) or ``make``:: make check-rust @@ -75,35 +75,23 @@ Note that doctests require all ``.o`` files from the build to be available. Supported tools ''''''''''''''' -QEMU supports rustc version 1.77.0 and newer. Notably, the following features -are missing: +QEMU supports rustc version 1.83.0 and newer. The following features +from relatively new versions of Rust are not used for historical reasons; +patches are welcome: -* inline const expression (stable in 1.79.0), currently worked around with - associated constants in the ``FnCall`` trait. - -* associated constants have to be explicitly marked ``'static`` (`changed in +* associated constants are still explicitly marked ``'static`` (`changed in 1.81.0`__) -* ``&raw`` (stable in 1.82.0). Use ``addr_of!`` and ``addr_of_mut!`` instead, - though hopefully the need for raw pointers will go down over time. - -* ``new_uninit`` (stable in 1.82.0). This is used internally by the ``pinned_init`` - crate, which is planned for inclusion in QEMU, but it can be easily patched - out. - -* referencing statics in constants (stable in 1.83.0). For now use a const - function; this is an important limitation for QEMU's migration stream - architecture (VMState). Right now, VMState lacks type safety because - it is hard to place the ``VMStateField`` definitions in traits. +* ``&raw`` (stable in 1.82.0). * NUL-terminated file names with ``#[track_caller]`` are scheduled for inclusion as ``#![feature(location_file_nul)]``, but it will be a while before QEMU can use them. For now, there is special code in ``util/error.c`` to support non-NUL-terminated file names. -* associated const equality would be nice to have for some users of - ``callbacks::FnCall``, but is still experimental. ``ASSERT_IS_SOME`` - replaces it. +Associated const equality would be nice to have for some users of +``callbacks::FnCall``, but is still experimental. Const assertions +are used instead. __ https://github.com/rust-lang/rust/pull/125258 @@ -115,15 +103,18 @@ anymore. Writing Rust code in QEMU ------------------------- -QEMU includes four crates: +QEMU includes several crates: + +* ``common`` provides Rust-only utilities -* ``qemu_api`` for bindings to C code and useful functionality +* ``bql``, ``chardev``, ``hw/core``, ``migration``, ``qom``, ``system``, + ``util`` for bindings to respective QEMU C library APIs -* ``qemu_api_macros`` defines several procedural macros that are useful when +* ``qemu_macros`` defines several procedural macros that are useful when writing C code * ``pl011`` (under ``rust/hw/char/pl011``) and ``hpet`` (under ``rust/hw/timer/hpet``) - are sample devices that demonstrate ``qemu_api`` and ``qemu_api_macros``, and are + are sample devices that demonstrate Rust binding usage and ``qemu_macros``, and are used to further develop them. These two crates are functional\ [#issues]_ replacements for the ``hw/char/pl011.c`` and ``hw/timer/hpet.c`` files. @@ -136,7 +127,7 @@ This section explains how to work with them. Status '''''' -Modules of ``qemu_api`` can be defined as: +The stability of the modules can be defined as: - *complete*: ready for use in new devices; if applicable, the API supports the full functionality available in C @@ -152,26 +143,27 @@ Modules of ``qemu_api`` can be defined as: The status of the modules is as follows: -================ ====================== -module status -================ ====================== -``assertions`` stable -``bitops`` complete -``callbacks`` complete -``cell`` stable -``errno`` complete -``error`` stable -``irq`` complete -``log`` proof of concept -``memory`` stable -``module`` complete -``qdev`` stable -``qom`` stable -``sysbus`` stable -``timer`` stable -``vmstate`` proof of concept -``zeroable`` stable -================ ====================== +========================== ====================== +module status +========================== ====================== +``bql::cell`` stable +``common::assertions`` stable +``common::bitops`` complete +``common::callbacks`` complete +``common::errno`` complete +``common::zeroable`` stable +``hwcore::irq`` complete +``hwcore::qdev`` stable +``hwcore::sysbus`` stable +``migration::migratable`` proof of concept +``migration::vmstate`` stable +``qom`` stable +``system::memory`` stable +``util::error`` stable +``util::log`` proof of concept +``util::module`` complete +``util::timer`` stable +========================== ====================== .. note:: API stability is not a promise, if anything because the C APIs are not a stable @@ -272,7 +264,7 @@ to go from a shared reference to a ``&mut``. Whenever C code provides you with an opaque ``void *``, avoid converting it to a Rust mutable reference, and use a shared reference instead. The -``qemu_api::cell`` module provides wrappers that can be used to tell the +``bql::cell`` module provides wrappers that can be used to tell the Rust compiler about interior mutability, and optionally to enforce locking rules for the "Big QEMU Lock". In the future, similar cell types might also be provided for ``AioContext``-based locking as well. @@ -290,7 +282,7 @@ a raw pointer, for use in calls to C functions. It can be used for example as follows:: #[repr(transparent)] - #[derive(Debug, qemu_api_macros::Wrapper)] + #[derive(Debug, common::Wrapper)] pub struct Object(Opaque); where the special ``derive`` macro provides useful methods such as @@ -304,7 +296,7 @@ the wrapper to be declared thread-safe:: Writing bindings to C code '''''''''''''''''''''''''' -Here are some things to keep in mind when working on the ``qemu_api`` crate. +Here are some things to keep in mind when working on the QEMU Rust crate. **Look at existing code** Very often, similar idioms in C code correspond to similar tricks in @@ -367,7 +359,7 @@ from the type after ``as`` in the invocation of ``parse_macro_input!``:: .into() } -The ``qemu_api_macros`` crate has utility functions to examine a +The ``qemu_macros`` crate has utility functions to examine a ``DeriveInput`` and perform common checks (e.g. looking for a struct with named fields). These functions return ``Result<..., syn::Error>`` and can be used easily in the procedural macro function:: @@ -408,7 +400,7 @@ Right now, only the nightly version of ``rustfmt`` is supported. This might change in the future. While CI checks for correct formatting via ``cargo fmt --check``, maintainers can fix this for you when applying patches. -It is expected that ``qemu_api`` provides full ``rustdoc`` documentation for +It is expected that QEMU Rust crates provides full ``rustdoc`` documentation for bindings that are in their final shape or close. Adding dependencies diff --git a/docs/devel/style.rst b/docs/devel/style.rst index d025933808e11..941fe14bfd49b 100644 --- a/docs/devel/style.rst +++ b/docs/devel/style.rst @@ -446,8 +446,8 @@ Low level memory management =========================== Use of the ``malloc/free/realloc/calloc/valloc/memalign/posix_memalign`` -APIs is not allowed in the QEMU codebase. Instead of these routines, -use the GLib memory allocation routines +or ``alloca/g_alloca/g_newa/g_newa0`` APIs is not allowed in the QEMU codebase. +Instead of these routines, use the GLib memory allocation routines ``g_malloc/g_malloc0/g_new/g_new0/g_realloc/g_free`` or QEMU's ``qemu_memalign/qemu_blockalign/qemu_vfree`` APIs. diff --git a/docs/devel/submitting-a-patch.rst b/docs/devel/submitting-a-patch.rst index f7917b899f689..dd1cf32ad35a9 100644 --- a/docs/devel/submitting-a-patch.rst +++ b/docs/devel/submitting-a-patch.rst @@ -235,6 +235,38 @@ to another list.) ``git send-email`` (`step-by-step setup guide works best for delivering the patch without mangling it, but attachments can be used as a last resort on a first-time submission. +.. _use_b4: + +Use B4 +~~~~~~ + +The `b4`_ tool, used for Linux kernel development, can also be used for QEMU +development. It is packaged in most distros and PyPi. The QEMU source tree +includes a ``b4`` project configuration file at the root: ``.b4-config``. + +Example workflow to prepare a patch series: + +1. Start with a clean checkout of the ``master`` branch. +2. Create a new series with a topical branch name using ``b4 prep -n descriptive-name``. + ``b4`` will create a ``b4/descriptive-name`` branch and switch to it. +3. Commit your changes, following this page's guidelines about proper commit messages etc. +4. Write a descriptive cover letter with ``b4 prep --edit-cover``. +5. Add maintainer and reviewer CCs with ``b4 prep --auto-to-cc``. You can make + changes to Cc: and To: recipients by editing the cover letter. +6. Run patch checks with ``b4 prep --check``. +7. Optionally review the patches with ``b4 send --dry-run`` which will print the + raw patches in standard output. + +To send the patches, you can: + +- Setup ``git-send-email`` and use ``b4 send``, or +- Export the patches to files using ``b4 send -o OUTPUT_DIR`` and send them manually. + +For more details, consult the `b4 documentation`_. + +.. _b4 documentation: https://b4.docs.kernel.org/ +.. _b4: https://github.com/mricon/b4/ + .. _use_git_publish: Use git-publish @@ -418,7 +450,7 @@ Retrieve an existing series --------------------------- If you want to apply an existing series on top of your tree, you can simply use -`b4 `__. +`b4`_. :: @@ -533,7 +565,11 @@ summary belongs. The `git-publish `__ script can help with tracking a good summary across versions. Also, the `git-backport-diff `__ script can help focus -reviewers on what changed between revisions. +reviewers on what changed between revisions. The ``b4`` tool automatically +generates a version history section in the cover letter, including links to the +previous versions on `Lore`_. + +.. _Lore: https://lore.kernel.org/ .. _tips_and_tricks: diff --git a/docs/devel/tcg-icount.rst b/docs/devel/tcg-icount.rst index 7df883446a74f..a1dcd79e0fdd5 100644 --- a/docs/devel/tcg-icount.rst +++ b/docs/devel/tcg-icount.rst @@ -37,7 +37,7 @@ translator starts by allocating a budget of instructions to be executed. The budget of instructions is limited by how long it will be until the next timer will expire. We store this budget as part of a vCPU icount_decr field which shared with the machinery for handling -cpu_exit(). The whole field is checked at the start of every +qemu_cpu_kick(). The whole field is checked at the start of every translated block and will cause a return to the outer loop to deal with whatever caused the exit. diff --git a/docs/devel/testing/functional.rst b/docs/devel/testing/functional.rst index 9e56dd1b11892..fdeaebaadcb99 100644 --- a/docs/devel/testing/functional.rst +++ b/docs/devel/testing/functional.rst @@ -65,7 +65,7 @@ directory should be your build folder. For example:: The test framework will automatically purge any scratch files created during the tests. If needing to debug a failed test, it is possible to keep these -files around on disk by setting ```QEMU_TEST_KEEP_SCRATCH=1``` as an env +files around on disk by setting ``QEMU_TEST_KEEP_SCRATCH=1`` as an env variable. Any preserved files will be deleted the next time the test is run without this variable set. @@ -312,6 +312,9 @@ The cache is populated in the ``~/.cache/qemu/download`` directory by default, but the location can be changed by setting the ``QEMU_TEST_CACHE_DIR`` environment variable. +To force the test suite to re-download the cache, even if still valid, +set the ``QEMU_TEST_REFRESH_CACHE`` environment variable. + Skipping tests -------------- diff --git a/docs/devel/testing/main.rst b/docs/devel/testing/main.rst index 2b5cb0c148048..0662766b5c915 100644 --- a/docs/devel/testing/main.rst +++ b/docs/devel/testing/main.rst @@ -178,6 +178,46 @@ parser (either fixing a bug or extending/modifying the syntax). To do this: ``qapi-schema += foo.json`` +The reference output can be automatically updated to match the latest QAPI +code generator by running the tests with the QEMU_TEST_REGENERATE environment +variable set. + +.. code:: + + QEMU_TEST_REGENERATE=1 make check-qapi-schema + +The resulting changes must be reviewed by the author to ensure they match +the intended results before adding the updated reference output to the +same commit that alters the generator code. + +.. _tracetool-tests: + +Tracetool tests +~~~~~~~~~~~~~~~ + +The tracetool tests validate the generated source files used for defining +probes for various tracing backends and source formats. The test operates +by running the tracetool program against a sample trace-events file, and +comparing the generated output against known good reference output. The +tests can be run with: + +.. code:: + + make check-tracetool + +The reference output is stored in files under tests/tracetool, and when +the tracetool backend/format output is intentionally changed, the reference +files need to be updated. This can be automated by setting the +QEMU_TEST_REGENERATE=1 environment variable: + +.. code:: + + QEMU_TEST_REGENERATE=1 make check-tracetool + +The resulting changes must be reviewed by the author to ensure they match +the intended results, before adding the updated reference output to the +same commit that alters the generator code. + check-block ~~~~~~~~~~~ diff --git a/docs/devel/uefi-vars.rst b/docs/devel/uefi-vars.rst index 0151a26a0a6f8..b4013b5d12e54 100644 --- a/docs/devel/uefi-vars.rst +++ b/docs/devel/uefi-vars.rst @@ -34,7 +34,7 @@ configures the shared buffer location and size, and traps to the host to process the requests. The ``uefi-vars`` device implements the UEFI virtual device. It comes -in ``uefi-vars-x86`` and ``uefi-vars-sysbus`` flavours. The device +in ``uefi-vars-x64`` and ``uefi-vars-sysbus`` flavours. The device reimplements the handlers needed, specifically ``EfiSmmVariableProtocol`` and ``VarCheckPolicyLibMmiHandler``. It also consumes events (``EfiEndOfDxeEventGroup``, @@ -57,7 +57,7 @@ usage on x86_64 .. code:: qemu-system-x86_64 \ - -device uefi-vars-x86,jsonfile=/path/to/vars.json + -device uefi-vars-x64,jsonfile=/path/to/vars.json usage on aarch64 ---------------- diff --git a/docs/glossary.rst b/docs/glossary.rst index 4fa044bfb6ee7..2857731bc4466 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -12,7 +12,7 @@ Accelerator A specific API used to accelerate execution of guest instructions. It can be hardware-based, through a virtualization API provided by the host OS (kvm, hvf, -whpx, ...), or software-based (tcg). See this description of `supported +whpx, mshv, ...), or software-based (tcg). See this description of `supported accelerators`. Board @@ -101,9 +101,8 @@ manage a virtual machine. QEMU is a virtualizer, that interacts with various hypervisors. In the context of QEMU, an hypervisor is an API, provided by the Host OS, -allowing to execute virtual machines. Linux implementation is KVM (and supports -Xen as well). For MacOS, it's HVF. Windows defines WHPX. And NetBSD provides -NVMM. +allowing to execute virtual machines. Linux provides a choice of KVM, Xen +or MSHV; MacOS provides HVF; Windows provides WHPX; NetBSD provides NVMM. .. _machine: diff --git a/docs/igd-assign.txt b/docs/igd-assign.txt index af4e8391fc258..e54040335ba72 100644 --- a/docs/igd-assign.txt +++ b/docs/igd-assign.txt @@ -48,6 +48,7 @@ Intel document [1] shows how to dump VBIOS to file. For UEFI Option ROM, see QEMU also provides a "Legacy" mode that implicitly enables full functionality on IGD, it is automatically enabled when * IGD generation is 6 to 9 (Sandy Bridge to Comet Lake) +* IGD claims VGA cycles on host (IGD is VGA controller on host) * Machine type is i440fx * IGD is assigned to guest BDF 00:02.0 * ROM BAR or romfile is present diff --git a/docs/interop/firmware.json b/docs/interop/firmware.json index 6bbe2cce0afef..ccbfaf828d62d 100644 --- a/docs/interop/firmware.json +++ b/docs/interop/firmware.json @@ -85,12 +85,14 @@ # # @loongarch64: 64-bit LoongArch. (since: 7.1) # +# @riscv64: 64-bit RISC-V. +# # @x86_64: 64-bit x86. # # Since: 3.0 ## { 'enum' : 'FirmwareArchitecture', - 'data' : [ 'aarch64', 'arm', 'i386', 'loongarch64', 'x86_64' ] } + 'data' : [ 'aarch64', 'arm', 'i386', 'loongarch64', 'riscv64', 'x86_64' ] } ## # @FirmwareTarget: diff --git a/docs/pcie_sriov.txt b/docs/pcie_sriov.txt index ab2142807f796..00d7bd93fdff3 100644 --- a/docs/pcie_sriov.txt +++ b/docs/pcie_sriov.txt @@ -72,8 +72,7 @@ setting up a BAR for a VF. 2) Similarly in the implementation of the virtual function, you need to make it a PCI Express device and add a similar set of capabilities except for the SR/IOV capability. Then you need to set up the VF BARs as - subregions of the PFs SR/IOV VF BARs by calling - pcie_sriov_vf_register_bar() instead of the normal pci_register_bar() call: + subregions of the PFs SR/IOV VF BARs by calling pci_register_bar(): pci_your_vf_dev_realize( ... ) { @@ -83,7 +82,7 @@ setting up a BAR for a VF. pcie_ari_init(d, 0x100); ... memory_region_init(mr, ... ) - pcie_sriov_vf_register_bar(d, bar_nr, mr); + pci_register_bar(d, bar_nr, bar_type, mr); ... } diff --git a/docs/qdev-device-use.txt b/docs/qdev-device-use.txt index c98c86d828021..fb420da2a9e1a 100644 --- a/docs/qdev-device-use.txt +++ b/docs/qdev-device-use.txt @@ -311,9 +311,9 @@ constraints. Host and guest part of audio devices have always been separate. -The old way to define guest audio devices is -soundhw C1,... +The old way to define guest audio devices was -soundhw C1,... -The new way is to define each guest audio device separately with +The current way is to define each guest audio device separately with -device. Map from -soundhw sound card name to -device: @@ -324,8 +324,10 @@ Map from -soundhw sound card name to -device: gus -device gus,iobase=IOADDR,irq=IRQ,dma=DMA,freq=F hda -device intel-hda,msi=MSI -device hda-duplex sb16 -device sb16,iobase=IOADDR,irq=IRQ,dma=DMA,dma16=DMA16,version=V - adlib not yet available with -device - pcspk not yet available with -device + adlib -device adlib,iobase=IOADDR,freq=F + + pcspk Not available with -device, + but audiodev can be set with -machine pcspk-audiodev= For PCI devices, you can add bus=PCI-BUS,addr=DEVFN to control the PCI device address, as usual. diff --git a/docs/specs/acpi_hest_ghes.rst b/docs/specs/acpi_hest_ghes.rst index c3e9f8d9a7023..aaf7b1ad11a5f 100644 --- a/docs/specs/acpi_hest_ghes.rst +++ b/docs/specs/acpi_hest_ghes.rst @@ -89,12 +89,21 @@ Design Details addresses in the "error_block_address" fields with a pointer to the respective "Error Status Data Block" in the "etc/hardware_errors" blob. -(8) QEMU defines a third and write-only fw_cfg blob which is called - "etc/hardware_errors_addr". Through that blob, the firmware can send back - the guest-side allocation addresses to QEMU. The "etc/hardware_errors_addr" - blob contains a 8-byte entry. QEMU generates a single WRITE_POINTER command - for the firmware. The firmware will write back the start address of - "etc/hardware_errors" blob to the fw_cfg file "etc/hardware_errors_addr". +(8) QEMU defines a third and write-only fw_cfg blob to store the location + where the error block offsets, read ack registers and CPER records are + stored. + + Up to QEMU 9.2, the location was at "etc/hardware_errors_addr", and + contains a GPA for the beginning of "etc/hardware_errors". + + Newer versions place the location at "etc/acpi_table_hest_addr", + pointing to the GPA of the HEST table. + + Using above mentioned 'fw_cfg' files, the firmware can send back the + guest-side allocation addresses to QEMU. They contain a 8-byte entry. + QEMU generates a single WRITE_POINTER command for the firmware. The + firmware will write back the start address of either "etc/hardware_errors" + or HEST table at the corresponding fw_cfg file. (9) When QEMU gets a SIGBUS from the kernel, QEMU writes CPER into corresponding "Error Status Data Block", guest memory, and then injects platform specific @@ -105,8 +114,5 @@ Design Details kernel, on receiving notification, guest APEI driver could read the CPER error and take appropriate action. -(11) kvm_arch_on_sigbus_vcpu() uses source_id as index in "etc/hardware_errors" to - find out "Error Status Data Block" entry corresponding to error source. So supported - source_id values should be assigned here and not be changed afterwards to make sure - that guest will write error into expected "Error Status Data Block" even if guest was - migrated to a newer QEMU. +(11) kvm_arch_on_sigbus_vcpu() reports RAS errors via a SEA notifications, + when a SIGBUS event is triggered. diff --git a/docs/specs/ppc-xive.rst b/docs/specs/ppc-xive.rst index 83d43f658b902..968cc760d4669 100644 --- a/docs/specs/ppc-xive.rst +++ b/docs/specs/ppc-xive.rst @@ -157,7 +157,7 @@ Interrupt flow from an O/S perspective After an event data has been enqueued in the O/S Event Queue, the IVPE raises the bit corresponding to the priority of the pending interrupt -in the register IBP (Interrupt Pending Buffer) to indicate that an +in the register IPB (Interrupt Pending Buffer) to indicate that an event is pending in one of the 8 priority queues. The Pending Interrupt Priority Register (PIPR) is also updated using the IPB. This register represent the priority of the most favored pending diff --git a/docs/specs/riscv-iommu.rst b/docs/specs/riscv-iommu.rst index 991d376fdc24a..571a6a6cc9634 100644 --- a/docs/specs/riscv-iommu.rst +++ b/docs/specs/riscv-iommu.rst @@ -30,15 +30,15 @@ This will add a RISC-V IOMMU PCI device in the board following any additional PCI parameters (like PCI bus address). The behavior of the RISC-V IOMMU is defined by the spec but its operation is OS dependent. -As of this writing the existing Linux kernel support `linux-v8`_, not yet merged, -does not have support for features like VFIO passthrough. The IOMMU emulation -was tested using a public Ventana Micro Systems kernel repository in -`ventana-linux`_. This kernel is based on `linux-v8`_ with additional patches that -enable features like KVM VFIO passthrough with irqbypass. Until the kernel support -is feature complete feel free to use the kernel available in the Ventana Micro Systems -mirror. - -The current Linux kernel support will use the IOMMU device to create IOMMU groups +Linux kernel iommu support was merged in v6.13. QEMU IOMMU emulation can be +used with mainline kernels for simple IOMMU PCIe support. + +As of v6.17, it does not have support for features like VFIO passthrough. +There is a `VFIO`_ RFC series that is not yet merged. The public Ventana Micro +Systems kernel repository in `ventana-linux`_ can be used for testing the VFIO +functions. + +The v6.13+ Linux kernel support uses the IOMMU device to create IOMMU groups with any eligible cards available in the system, regardless of factors such as the order in which the devices are added in the command line. @@ -49,7 +49,7 @@ IOMMU kernel driver behaves: $ qemu-system-riscv64 \ -M virt,aia=aplic-imsic,aia-guests=5 \ - -device riscv-iommu-pci,addr=1.0,vendor-id=0x1efd,device-id=0xedf1 \ + -device riscv-iommu-pci,addr=1.0 \ -device e1000e,netdev=net1 -netdev user,id=net1,net=192.168.0.0/24 \ -device e1000e,netdev=net2 -netdev user,id=net2,net=192.168.200.0/24 \ (...) @@ -58,21 +58,11 @@ IOMMU kernel driver behaves: -M virt,aia=aplic-imsic,aia-guests=5 \ -device e1000e,netdev=net1 -netdev user,id=net1,net=192.168.0.0/24 \ -device e1000e,netdev=net2 -netdev user,id=net2,net=192.168.200.0/24 \ - -device riscv-iommu-pci,addr=1.0,vendor-id=0x1efd,device-id=0xedf1 \ + -device riscv-iommu-pci,addr=3.0 \ (...) Both will create iommu groups for the two e1000e cards. -Another thing to notice on `linux-v8`_ and `ventana-linux`_ is that the kernel driver -considers an IOMMU identified as a Rivos device, i.e. it uses Rivos vendor ID. To -use the riscv-iommu-pci device with the existing kernel support we need to emulate -a Rivos PCI IOMMU by setting 'vendor-id' and 'device-id': - -.. code-block:: bash - - $ qemu-system-riscv64 -M virt \ - -device riscv-iommu-pci,vendor-id=0x1efd,device-id=0xedf1 (...) - Several options are available to control the capabilities of the device, namely: - "bus": the bus that the IOMMU device uses @@ -84,6 +74,7 @@ Several options are available to control the capabilities of the device, namely: - "g-stage": enable g-stage support - "hpm-counters": number of hardware performance counters available. Maximum value is 31. Default value is 31. Use 0 (zero) to disable HPM support +- "vendor-id"/"device-id": pci device ID. Defaults to 1b36:0014 (Redhat) riscv-iommu-sys device ---------------------- @@ -111,6 +102,6 @@ riscv-iommu options: .. _iommu1.0.0: https://github.com/riscv-non-isa/riscv-iommu/releases/download/v1.0.0/riscv-iommu.pdf -.. _linux-v8: https://lore.kernel.org/linux-riscv/cover.1718388908.git.tjeznach@rivosinc.com/ +.. _VFIO: https://lore.kernel.org/linux-riscv/20241114161845.502027-17-ajones@ventanamicro.com/ .. _ventana-linux: https://github.com/ventanamicro/linux/tree/dev-upstream diff --git a/docs/specs/spdm.rst b/docs/specs/spdm.rst index f7de080ff0b45..0e3ad25bc698f 100644 --- a/docs/specs/spdm.rst +++ b/docs/specs/spdm.rst @@ -102,7 +102,7 @@ Then you can add this to your QEMU command line: At which point QEMU will try to connect to the SPDM server. -Note that if using x64-64 you will want to use the q35 machine instead +Note that if using x86_64 you will want to use the q35 machine instead of the default. So the entire QEMU command might look like this .. code-block:: shell diff --git a/docs/sphinx-static/theme_overrides.css b/docs/sphinx-static/theme_overrides.css index b225bf706f505..f312e9b57e4a0 100644 --- a/docs/sphinx-static/theme_overrides.css +++ b/docs/sphinx-static/theme_overrides.css @@ -1,5 +1,4 @@ -/* -*- coding: utf-8; mode: css -*- - * +/* * Sphinx HTML theme customization: read the doc * Based on Linux Documentation/sphinx-static/theme_overrides.css */ diff --git a/docs/sphinx/kerneldoc.py b/docs/sphinx/kerneldoc.py index 3aa972f2e89c1..9721072e47685 100644 --- a/docs/sphinx/kerneldoc.py +++ b/docs/sphinx/kerneldoc.py @@ -63,11 +63,6 @@ def run(self): env = self.state.document.settings.env cmd = env.config.kerneldoc_bin + ['-rst', '-enable-lineno'] - # Pass the version string to kernel-doc, as it needs to use a different - # dialect, depending what the C domain supports for each specific - # Sphinx versions - cmd += ['-sphinx-version', sphinx.__version__] - # Pass through the warnings-as-errors flag if env.config.kerneldoc_werror: cmd += ['-Werror'] @@ -127,7 +122,7 @@ def run(self): result = ViewList() lineoffset = 0; - line_regex = re.compile("^#define LINENO ([0-9]+)$") + line_regex = re.compile(r"^(?:\.\.|#define) LINENO ([0-9]+)$") for line in lines: match = line_regex.search(line) if match: diff --git a/docs/system/arm/aspeed.rst b/docs/system/arm/aspeed.rst index bec0a1dfa8b8a..6317c0e910a23 100644 --- a/docs/system/arm/aspeed.rst +++ b/docs/system/arm/aspeed.rst @@ -1,4 +1,5 @@ Aspeed family boards (``ast2500-evb``, ``ast2600-evb``, ``ast2700-evb``, ``bletchley-bmc``, ``fuji-bmc``, ``gb200nvl-bmc``, ``fby35-bmc``, ``fp5280g2-bmc``, ``g220a-bmc``, ``palmetto-bmc``, ``qcom-dc-scm-v1-bmc``, ``qcom-firework-bmc``, ``quanta-q71l-bmc``, ``rainier-bmc``, ``romulus-bmc``, ``sonorapass-bmc``, ``supermicrox11-bmc``, ``supermicrox11spi-bmc``, ``tiogapass-bmc``, ``witherspoon-bmc``, ``yosemitev2-bmc``) +==================================================================================================================================================================================================================================================================================================================================================================================================================================== The QEMU Aspeed machines model BMCs of various OpenPOWER systems and Aspeed evaluation boards. They are based on different releases of the @@ -242,6 +243,37 @@ under Linux), use : -M ast2500-evb,bmc-console=uart3 +OTP Option +^^^^^^^^^^ + +Both the AST2600 and AST1030 chips use the same One Time Programmable +(OTP) memory module, which is utilized for configuration, key storage, +and storing user-programmable data. This OTP memory module is managed +by the Secure Boot Controller (SBC). The following options can be +specified or omitted based on your needs. + + * When the options are specified, the pre-generated configuration + file will be used as the OTP memory storage. + + * When the options are omitted, an internal memory buffer will be + used to store the OTP memory data. + +.. code-block:: bash + + -blockdev driver=file,filename=otpmem.img,node-name=otp \ + -global aspeed-otp.drive=otp \ + +The following bash command can be used to generate a default +configuration file for OTP memory: + +.. code-block:: bash + + if [ ! -f otpmem.img ]; then + for i in $(seq 1 2048); do + printf '\x00\x00\x00\x00\xff\xff\xff\xff' + done > otpmem.img + fi + Aspeed 2700 family boards (``ast2700-evb``) ================================================================== diff --git a/docs/system/arm/emulation.rst b/docs/system/arm/emulation.rst index 890dc6fee214c..bf81da124a088 100644 --- a/docs/system/arm/emulation.rst +++ b/docs/system/arm/emulation.rst @@ -23,13 +23,16 @@ the following architecture extensions: - FEAT_AFP (Alternate floating-point behavior) - FEAT_Armv9_Crypto (Armv9 Cryptographic Extension) - FEAT_ASID16 (16 bit ASID) +- FEAT_ATS1A (Address Translation operations that ignore stage 1 permissions) - FEAT_BBM at level 2 (Translation table break-before-make levels) - FEAT_BF16 (AArch64 BFloat16 instructions) - FEAT_BTI (Branch Target Identification) - FEAT_CCIDX (Extended cache index) +- FEAT_CHK (Check Feature Status) - FEAT_CMOW (Control for cache maintenance permission) - FEAT_CRC32 (CRC32 instructions) - FEAT_Crypto (Cryptographic Extension) +- FEAT_CSSC (Common Short Sequence Compression instructions) - FEAT_CSV2 (Cache speculation variant 2) - FEAT_CSV2_1p1 (Cache speculation variant 2, version 1.1) - FEAT_CSV2_1p2 (Cache speculation variant 2, version 1.2) @@ -70,6 +73,7 @@ the following architecture extensions: - FEAT_FRINTTS (Floating-point to integer instructions) - FEAT_FlagM (Flag manipulation instructions v2) - FEAT_FlagM2 (Enhancements to flag manipulation instructions) +- FEAT_GCS (Guarded Control Stack Extension) - FEAT_GTG (Guest translation granule size) - FEAT_HAFDBS (Hardware management of the access flag and dirty bit state) - FEAT_HBC (Hinted conditional branches) @@ -88,7 +92,11 @@ the following architecture extensions: - FEAT_LRCPC2 (Load-acquire RCpc instructions v2) - FEAT_LSE (Large System Extensions) - FEAT_LSE2 (Large System Extensions v2) +- FEAT_LSE128 (128-bit Atomics) - FEAT_LVA (Large Virtual Address space) +- FEAT_MEC (Memory Encryption Contexts) + + * This is a register-only implementation without encryption. - FEAT_MixedEnd (Mixed-endian support) - FEAT_MixedEndEL0 (Mixed-endian support at EL0) - FEAT_MOPS (Standardization of memory operations) @@ -117,10 +125,14 @@ the following architecture extensions: - FEAT_RASv1p1 (RAS Extension v1.1) - FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions) - FEAT_RME (Realm Management Extension) (NB: support status in QEMU is experimental) +- FEAT_RME_GPC2 (RME Granule Protection Check 2 Extension) - FEAT_RNG (Random number generator) - FEAT_RPRES (Increased precision of FRECPE and FRSQRTE) +- FEAT_S1PIE (Stage 1 permission indirections) +- FEAT_S2PIE (Stage 2 permission indirections) - FEAT_S2FWB (Stage 2 forced Write-Back) - FEAT_SB (Speculation Barrier) +- FEAT_SCTLR2 (Extension to SCTLR_ELx) - FEAT_SEL2 (Secure EL2) - FEAT_SHA1 (SHA1 instructions) - FEAT_SHA256 (SHA256 instructions) @@ -148,6 +160,7 @@ the following architecture extensions: - FEAT_SPECRES (Speculation restriction instructions) - FEAT_SSBS (Speculative Store Bypass Safe) - FEAT_SSBS2 (MRS and MSR instructions for SSBS version 2) +- FEAT_TCR2 (Support for TCR2_ELx) - FEAT_TGran16K (Support for 16KB memory translation granule size at stage 1) - FEAT_TGran4K (Support for 4KB memory translation granule size at stage 1) - FEAT_TGran64K (Support for 64KB memory translation granule size at stage 1) diff --git a/docs/system/arm/xlnx-versal-virt.rst b/docs/system/arm/xlnx-versal-virt.rst index c5f35f28e4f54..640cc07f808bc 100644 --- a/docs/system/arm/xlnx-versal-virt.rst +++ b/docs/system/arm/xlnx-versal-virt.rst @@ -1,29 +1,37 @@ -Xilinx Versal Virt (``xlnx-versal-virt``) -========================================= +AMD Versal Virt (``amd-versal-virt``, ``amd-versal2-virt``) +=========================================================== -Xilinx Versal is a family of heterogeneous multi-core SoCs +AMD Versal is a family of heterogeneous multi-core SoCs (System on Chip) that combine traditional hardened CPUs and I/O peripherals in a Processing System (PS) with runtime programmable FPGA logic (PL) and an Artificial Intelligence Engine (AIE). +QEMU implements the following Versal SoCs variants: + +- Versal (the ``amd-versal-virt`` machine, the alias ``xlnx-versal-virt`` is + kept for backward compatibility) +- Versal Gen 2 (the ``amd-versal2-virt`` machine) + More details here: -https://www.xilinx.com/products/silicon-devices/acap/versal.html +https://www.amd.com/en/products/adaptive-socs-and-fpgas/versal.html The family of Versal SoCs share a single architecture but come in different parts with different speed grades, amounts of PL and other differences. -The Xilinx Versal Virt board in QEMU is a model of a virtual board +The AMD Versal Virt board in QEMU is a model of a virtual board (does not exist in reality) with a virtual Versal SoC without I/O limitations. Currently, we support the following cores and devices: +Versal +"""""" Implemented CPU cores: -- 2 ACPUs (ARM Cortex-A72) +- 2 ACPUs (ARM Cortex-A72) with their GICv3 and ITS +- 2 RCPUs (ARM Cortex-R5F) with their GICv2 Implemented devices: -- Interrupt controller (ARM GICv3) - 2 UARTs (ARM PL011) - An RTC (Versal built-in) - 2 GEMs (Cadence MACB Ethernet MACs) @@ -35,6 +43,31 @@ Implemented devices: - BBRAM (36 bytes of Battery-backed RAM) - eFUSE (3072 bytes of one-time field-programmable bit array) - 2 CANFDs +- USB controller +- OSPI controller +- TRNG controller + +Versal Gen 2 +"""""""""""" +Implemented CPU cores: + +- 8 ACPUs (ARM Cortex-A78AE) with their GICv3 and ITS +- 10 RCPUs (ARM Cortex-R52) with their GICv3 (one per cluster) + +Implemented devices: + +- 2 UARTs (ARM PL011) +- An RTC (Versal built-in) +- 3 GEMs (Cadence MACB Ethernet MACs) +- 8 ADMA (Xilinx zDMA) channels +- 2 SD Controllers +- OCM (256KB of On Chip Memory) +- DDR memory +- BBRAM (36 bytes of Battery-backed RAM) +- 2 CANFDs +- 2 USB controllers +- OSPI controller +- TRNG controller QEMU does not yet model any other devices, including the PL and the AI Engine. @@ -44,8 +77,8 @@ Other differences between the hardware and the QEMU model: ``-m`` argument. If a DTB is provided on the command line then QEMU will edit it to include suitable entries describing the Versal DDR memory ranges. -- QEMU provides 8 virtio-mmio virtio transports; these start at - address ``0xa0000000`` and have IRQs from 111 and upwards. +- QEMU provides 8 virtio-mmio virtio transports. They use reserved memory + regions and IRQ pins to avoid conflicts with real SoC peripherals. Running """"""" @@ -58,7 +91,13 @@ When loading an OS, QEMU generates a DTB and selects an appropriate address where it gets loaded. This DTB will be passed to the kernel in register x0. If there's no ``-kernel`` option, we generate a DTB and place it at 0x1000 -for boot-loaders or firmware to pick it up. +for boot-loaders or firmware to pick it up. To dump and observe the generated +DTB, one can use the ``dumpdtb`` machine option: + +.. code-block:: bash + + $ qemu-system-aarch64 -M amd-versal-virt,dumpdtb=example.dtb -m 2G + If users want to provide their own DTB, they can use the ``-dtb`` option. These DTBs will have their memory nodes modified to match QEMU's @@ -74,7 +113,7 @@ Direct Linux boot of a generic ARM64 upstream Linux kernel: .. code-block:: bash - $ qemu-system-aarch64 -M xlnx-versal-virt -m 2G \ + $ qemu-system-aarch64 -M amd-versal-virt -m 2G \ -serial mon:stdio -display none \ -kernel arch/arm64/boot/Image \ -nic user -nic user \ @@ -87,7 +126,7 @@ Direct Linux boot of PetaLinux 2019.2: .. code-block:: bash - $ qemu-system-aarch64 -M xlnx-versal-virt -m 2G \ + $ qemu-system-aarch64 -M amd-versal-virt -m 2G \ -serial mon:stdio -display none \ -kernel petalinux-v2019.2/Image \ -append "rdinit=/sbin/init console=ttyAMA0,115200n8 earlycon=pl011,mmio,0xFF000000,115200n8" \ @@ -100,7 +139,7 @@ version of ATF tries to configure the CCI which we don't model) and U-boot: .. code-block:: bash - $ qemu-system-aarch64 -M xlnx-versal-virt -m 2G \ + $ qemu-system-aarch64 -M amd-versal-virt -m 2G \ -serial stdio -display none \ -device loader,file=petalinux-v2018.3/bl31.elf,cpu-num=0 \ -device loader,file=petalinux-v2019.2/u-boot.elf \ @@ -125,7 +164,7 @@ Boot Linux as DOM0 on Xen via U-Boot: .. code-block:: bash - $ qemu-system-aarch64 -M xlnx-versal-virt -m 4G \ + $ qemu-system-aarch64 -M amd-versal-virt -m 4G \ -serial stdio -display none \ -device loader,file=petalinux-v2019.2/u-boot.elf,cpu-num=0 \ -device loader,addr=0x30000000,file=linux/2018-04-24/xen \ @@ -153,7 +192,7 @@ Boot Linux as Dom0 on Xen via ARM Trusted Firmware and U-Boot: .. code-block:: bash - $ qemu-system-aarch64 -M xlnx-versal-virt -m 4G \ + $ qemu-system-aarch64 -M amd-versal-virt -m 4G \ -serial stdio -display none \ -device loader,file=petalinux-v2018.3/bl31.elf,cpu-num=0 \ -device loader,file=petalinux-v2019.2/u-boot.elf \ @@ -201,6 +240,11 @@ To use a different index value, N, from default of 0, add: eFUSE File Backend """""""""""""""""" + +.. note:: + The eFUSE device is not implemented in the Versal Gen 2 QEMU model + yet. + eFUSE can have an optional file backend, which must be a seekable binary file with a size of 3072 bytes or larger. A file with all binary 0s is a 'blank'. @@ -227,7 +271,7 @@ To use a different index value, N, from default of 1, add: is highly recommended (albeit with usage complexity). Better yet, do not use actual product data when running guest image - on this Xilinx Versal Virt board. + on this AMD Versal Virt board. Using CANFDs for Versal Virt """""""""""""""""""""""""""" @@ -258,3 +302,7 @@ To connect CANFD0 and CANFD1 to host machine's CAN interface can0: -object can-bus,id=canbus -machine canbus0=canbus -machine canbus1=canbus -object can-host-socketcan,id=canhost0,if=can0,canbus=canbus + +.. note:: + Versal Gen 2 has 4 CAN controllers. ``canbus0`` to ``canbus3`` can + be specified on the command line. diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst index 911381643f1c6..db714ad47aa5b 100644 --- a/docs/system/device-emulation.rst +++ b/docs/system/device-emulation.rst @@ -82,22 +82,18 @@ Emulated Devices .. toctree:: :maxdepth: 1 + devices/virtio/index.rst + devices/can.rst + devices/canokey.rst devices/ccid.rst devices/cxl.rst - devices/vfio-user.rst - devices/ivshmem.rst + devices/igb.rst devices/ivshmem-flat.rst + devices/ivshmem.rst devices/keyboard.rst devices/net.rst devices/nvme.rst - devices/usb.rst - devices/vhost-user.rst - devices/virtio-gpu.rst - devices/virtio-pmem.rst - devices/virtio-snd.rst - devices/vhost-user-input.rst - devices/vhost-user-rng.rst - devices/canokey.rst devices/usb-u2f.rst - devices/igb.rst + devices/usb.rst + devices/vfio-user.rst diff --git a/docs/system/devices/igb.rst b/docs/system/devices/igb.rst index 71f31cb116069..50f625fd77e42 100644 --- a/docs/system/devices/igb.rst +++ b/docs/system/devices/igb.rst @@ -54,7 +54,7 @@ directory: .. code-block:: shell - meson test qtest-x86_64/qos-test + pyvenv/bin/meson test qtest-x86_64/qos-test ethtool can test register accesses, interrupts, etc. It is automated as an functional test and can be run from the build directory with the following diff --git a/docs/system/devices/net.rst b/docs/system/devices/net.rst index 4d787c3aeb0a2..13199a44fda4d 100644 --- a/docs/system/devices/net.rst +++ b/docs/system/devices/net.rst @@ -21,11 +21,17 @@ configure it as if it was a real ethernet card. Linux host ^^^^^^^^^^ -As an example, you can download the ``linux-test-xxx.tar.gz`` archive -and copy the script ``qemu-ifup`` in ``/etc`` and configure properly -``sudo`` so that the command ``ifconfig`` contained in ``qemu-ifup`` can -be executed as root. You must verify that your host kernel supports the -TAP network interfaces: the device ``/dev/net/tun`` must be present. +A distribution will generally provide specific helper scripts when it +packages QEMU. By default these are found at ``/etc/qemu-ifup`` and +``/etc/qemu-ifdown`` and are called appropriately when QEMU wants to +change the network state. + +If QEMU is being run as a non-privileged user you may need properly +configure ``sudo`` so that network commands in the scripts can be +executed as root. + +You must verify that your host kernel supports the TAP network +interfaces: the device ``/dev/net/tun`` must be present. See :ref:`sec_005finvocation` to have examples of command lines using the TAP network interfaces. @@ -73,7 +79,7 @@ those sockets. To allow ping for GID 100 (usually users group):: When using the built-in TFTP server, the router is also the TFTP server. -When using the ``'-netdev user,hostfwd=...'`` option, TCP or UDP +When using the ``'-netdev user,hostfwd=...'`` option, TCP, UDP or UNIX connections can be redirected from the host to the guest. It allows for example to redirect X11, telnet or SSH connections. diff --git a/docs/system/devices/vfio-user.rst b/docs/system/devices/vfio-user.rst index b6dcaa5615e53..e10a6d0822993 100644 --- a/docs/system/devices/vfio-user.rst +++ b/docs/system/devices/vfio-user.rst @@ -6,7 +6,7 @@ vfio-user QEMU includes a ``vfio-user`` client. The ``vfio-user`` specification allows for implementing (PCI) devices in userspace outside of QEMU; it is similar to -``vhost-user`` in this respect (see :doc:`vhost-user`), but can emulate arbitrary +``vhost-user`` in this respect (see :doc:`virtio/vhost-user`), but can emulate arbitrary PCI devices, not just ``virtio``. Whereas ``vfio`` is handled by the host kernel, ``vfio-user``, while similar in implementation, is handled entirely in userspace. @@ -20,7 +20,7 @@ Presuming a suitable ``vfio-user`` server has opened a socket at .. code-block:: console --device '{"driver": "vfio-user-pci","socket": {"path": "/tmp/vfio-user.sock", "type": "unix"}}' + --device '{"driver": "vfio-user-pci","socket": {"path": "/tmp/vfio-user.sock", "type": "unix"}}' See `libvfio-user `_ for further information. diff --git a/docs/system/devices/vhost-user-input.rst b/docs/system/devices/vhost-user-input.rst deleted file mode 100644 index 118eb78101c6a..0000000000000 --- a/docs/system/devices/vhost-user-input.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. _vhost_user_input: - -QEMU vhost-user-input - Input emulation -======================================= - -This document describes the setup and usage of the Virtio input device. -The Virtio input device is a paravirtualized device for input events. - -Description ------------ - -The vhost-user-input device implementation was designed to work with a daemon -polling on input devices and passes input events to the guest. - -QEMU provides a backend implementation in contrib/vhost-user-input. - -Linux kernel support --------------------- - -Virtio input requires a guest Linux kernel built with the -``CONFIG_VIRTIO_INPUT`` option. - -Examples --------- - -The backend daemon should be started first: - -:: - - host# vhost-user-input --socket-path=input.sock \ - --evdev-path=/dev/input/event17 - -The QEMU invocation needs to create a chardev socket to communicate with the -backend daemon and access the VirtIO queues with the guest over the -:ref:`shared memory `. - -:: - - host# qemu-system \ - -chardev socket,path=/tmp/input.sock,id=mouse0 \ - -device vhost-user-input-pci,chardev=mouse0 \ - -m 4096 \ - -object memory-backend-file,id=mem,size=4G,mem-path=/dev/shm,share=on \ - -numa node,memdev=mem \ - ... diff --git a/docs/system/devices/vhost-user-rng.rst b/docs/system/devices/vhost-user-rng.rst deleted file mode 100644 index ead14053264d0..0000000000000 --- a/docs/system/devices/vhost-user-rng.rst +++ /dev/null @@ -1,41 +0,0 @@ -.. _vhost_user_rng: - -QEMU vhost-user-rng - RNG emulation -=================================== - -Background ----------- - -What follows builds on the material presented in vhost-user.rst - it should -be reviewed before moving forward with the content in this file. - -Description ------------ - -The vhost-user-rng device implementation was designed to work with a random -number generator daemon such as the one found in the vhost-device crate of -the rust-vmm project available on github [1]. - -[1]. https://github.com/rust-vmm/vhost-device - -Examples --------- - -The daemon should be started first: - -:: - - host# vhost-device-rng --socket-path=rng.sock -c 1 -m 512 -p 1000 - -The QEMU invocation needs to create a chardev socket the device can -use to communicate as well as share the guests memory over a memfd. - -:: - - host# qemu-system \ - -chardev socket,path=$(PATH)/rng.sock,id=rng0 \ - -device vhost-user-rng-pci,chardev=rng0 \ - -m 4096 \ - -object memory-backend-file,id=mem,size=4G,mem-path=/dev/shm,share=on \ - -numa node,memdev=mem \ - ... diff --git a/docs/system/devices/virtio/index.rst b/docs/system/devices/virtio/index.rst new file mode 100644 index 0000000000000..c292101ade638 --- /dev/null +++ b/docs/system/devices/virtio/index.rst @@ -0,0 +1,29 @@ +VirtIO Devices +============== + +VirtIO devices are paravirtualized devices designed to be efficient to +emulate and virtualize. Unless you are specifically trying to exercise +a driver for some particular hardware they are the recommended device +models to use for virtual machines. + +The `VirtIO specification`_ is an open standard managed by OASIS. It +describes how a *driver* in a guest operating system interacts with +the *device* model provided by QEMU. Multiple Operating Systems +support drivers for VirtIO with Linux perhaps having the widest range +of device types supported. + +The device implementation can either be provided wholly by QEMU, or in +concert with the kernel (known as *vhost*). The device implementation +can also be off-loaded to an external process via :ref:`vhost user +`. + +.. toctree:: + :maxdepth: 1 + + virtio-gpu.rst + virtio-pmem.rst + virtio-snd.rst + vhost-user.rst + vhost-user-contrib.rst + +.. _VirtIO specification: https://docs.oasis-open.org/virtio/virtio/v1.3/virtio-v1.3.html diff --git a/docs/system/devices/virtio/vhost-user-contrib.rst b/docs/system/devices/virtio/vhost-user-contrib.rst new file mode 100644 index 0000000000000..48d04d2ade3fb --- /dev/null +++ b/docs/system/devices/virtio/vhost-user-contrib.rst @@ -0,0 +1,87 @@ +vhost-user daemons in contrib +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +QEMU provides a number of :ref:`vhost_user` daemons in the contrib +directory. They were often written when vhost-user was initially added +to the code base. You should also consider if other vhost-user daemons +such as those from the rust-vmm `vhost-device repository`_ are better +suited for production use. + +.. _vhost-device repository: https://github.com/rust-vmm/vhost-device + +.. _vhost_user_block: + +vhost-user-block - block device +=============================== + +vhost-user-block is a backend for exposing block devices. It can +present a flat file or block device as a simple block device to the +guest. You almost certainly want to use the :ref:`storage-daemon` +instead which supports a wide variety of storage modes and exports a +number of interfaces including vhost-user. + +.. _vhost_user_gpu: + +vhost-user-gpu - gpu device +=========================== + +vhost-user-gpu presents a paravirtualized GPU and display controller. +You probably want to use the internal :ref:`virtio_gpu` implementation +if you want the latest features. There is also a `vhost_device_gpu`_ +daemon as part of the rust-vmm project. + +.. _vhost_device_gpu: https://github.com/rust-vmm/vhost-device/tree/main/vhost-device-gpu + +.. _vhost_user_input: + +vhost-user-input - Input emulation +================================== + +The Virtio input device is a paravirtualized device for input events. + +Description +----------- + +The vhost-user-input device implementation was designed to work with a daemon +polling on input devices and passes input events to the guest. + +QEMU provides a backend implementation in contrib/vhost-user-input. + +Linux kernel support +-------------------- + +Virtio input requires a guest Linux kernel built with the +``CONFIG_VIRTIO_INPUT`` option. + +Examples +-------- + +The backend daemon should be started first: + +:: + + host# vhost-user-input --socket-path=input.sock \ + --evdev-path=/dev/input/event17 + +The QEMU invocation needs to create a chardev socket to communicate with the +backend daemon and access the VirtIO queues with the guest over the +:ref:`shared memory `. + +:: + + host# qemu-system \ + -chardev socket,path=/tmp/input.sock,id=mouse0 \ + -device vhost-user-input-pci,chardev=mouse0 \ + -m 4096 \ + -object memory-backend-file,id=mem,size=4G,mem-path=/dev/shm,share=on \ + -numa node,memdev=mem \ + ... + + +.. _vhost_user_scsi: + +vhost-user-scsi - SCSI controller +================================= + +The vhost-user-scsi daemon can proxy iSCSI devices onto a virtualized +SCSI controller. diff --git a/docs/system/devices/vhost-user.rst b/docs/system/devices/virtio/vhost-user.rst similarity index 69% rename from docs/system/devices/vhost-user.rst rename to docs/system/devices/virtio/vhost-user.rst index 35259d8ec7c66..f556a840e996b 100644 --- a/docs/system/devices/vhost-user.rst +++ b/docs/system/devices/virtio/vhost-user.rst @@ -27,61 +27,55 @@ platform details for what sort of virtio bus to use. - Notes * - vhost-user-blk - Block storage - - See contrib/vhost-user-blk + - :ref:`storage-daemon` * - vhost-user-fs - File based storage driver - - See https://gitlab.com/virtio-fs/virtiofsd + - `virtiofsd `_ * - vhost-user-gpio - Proxy gpio pins to host - - See https://github.com/rust-vmm/vhost-device + - `vhost-device-gpio `_ * - vhost-user-gpu - GPU driver - - See contrib/vhost-user-gpu + - `vhost-device-gpu `_ or :ref:`vhost_user_gpu` * - vhost-user-i2c - Proxy i2c devices to host - - See https://github.com/rust-vmm/vhost-device + - `vhost-device-i2c `_ * - vhost-user-input - Generic input driver - - :ref:`vhost_user_input` + - `vhost-device-input `_ or :ref:`vhost_user_input` * - vhost-user-rng - Entropy driver - - :ref:`vhost_user_rng` + - `vhost-device-rng `_ * - vhost-user-scmi - System Control and Management Interface - - See https://github.com/rust-vmm/vhost-device + - `vhost-device-scmi `_ * - vhost-user-snd - Audio device - - See https://github.com/rust-vmm/vhost-device/staging + - `vhost-device-sound `_ * - vhost-user-scsi - SCSI based storage - - See contrib/vhost-user-scsi + - :ref:`vhost_user_scsi` * - vhost-user-vsock - Socket based communication - - See https://github.com/rust-vmm/vhost-device + - `vhost-device-vsock `_ The referenced *daemons* are not exhaustive, any conforming backend implementing the device and using the vhost-user protocol should work. -vhost-user-device -^^^^^^^^^^^^^^^^^ +vhost-user-test-device +^^^^^^^^^^^^^^^^^^^^^^ -The vhost-user-device is a generic development device intended for -expert use while developing new backends. The user needs to specify -all the required parameters including: +The vhost-user-test-device is a generic development device intended +for expert use while developing new backends. The user needs to +specify all the required parameters including: - Device ``virtio-id`` - The ``num_vqs`` it needs and their ``vq_size`` - The ``config_size`` if needed .. note:: - To prevent user confusion you cannot currently instantiate - vhost-user-device without first patching out:: - - /* Reason: stop inexperienced users confusing themselves */ - dc->user_creatable = false; - - in ``vhost-user-device.c`` and ``vhost-user-device-pci.c`` file and - rebuilding. + While this is a useful device for development it is not recommended + for production use. vhost-user daemon ================= diff --git a/docs/system/devices/virtio-gpu.rst b/docs/system/devices/virtio/virtio-gpu.rst similarity index 99% rename from docs/system/devices/virtio-gpu.rst rename to docs/system/devices/virtio/virtio-gpu.rst index b7eb0fc0e727a..0f4bb304a9b84 100644 --- a/docs/system/devices/virtio-gpu.rst +++ b/docs/system/devices/virtio/virtio-gpu.rst @@ -1,7 +1,9 @@ .. SPDX-License-Identifier: GPL-2.0-or-later -virtio-gpu +.. _virtio_gpu: + +VirtIO GPU ========== This document explains the setup and usage of the virtio-gpu device. diff --git a/docs/system/devices/virtio-pmem.rst b/docs/system/devices/virtio/virtio-pmem.rst similarity index 98% rename from docs/system/devices/virtio-pmem.rst rename to docs/system/devices/virtio/virtio-pmem.rst index c82ac0673153b..0c24de83ec704 100644 --- a/docs/system/devices/virtio-pmem.rst +++ b/docs/system/devices/virtio/virtio-pmem.rst @@ -1,7 +1,5 @@ - -=========== -virtio pmem -=========== +VirtIO Persistent Memory +======================== This document explains the setup and usage of the virtio pmem device. The virtio pmem device is a paravirtualized persistent memory device diff --git a/docs/system/devices/virtio-snd.rst b/docs/system/devices/virtio/virtio-snd.rst similarity index 99% rename from docs/system/devices/virtio-snd.rst rename to docs/system/devices/virtio/virtio-snd.rst index 2a9187fd701a3..3c797f66e039f 100644 --- a/docs/system/devices/virtio-snd.rst +++ b/docs/system/devices/virtio/virtio-snd.rst @@ -1,4 +1,4 @@ -virtio sound +VirtIO Sound ============ This document explains the setup and usage of the Virtio sound device. diff --git a/docs/system/introduction.rst b/docs/system/introduction.rst index 4cd46b5b8f958..9c57523b6c288 100644 --- a/docs/system/introduction.rst +++ b/docs/system/introduction.rst @@ -23,6 +23,9 @@ Tiny Code Generator (TCG) capable of emulating many CPUs. * - Xen - Linux (as dom0) - Arm, x86 + * - MSHV + - Linux (as dom0) + - x86 * - Hypervisor Framework (hvf) - MacOS - x86 (64 bit only), Arm (64 bit only) diff --git a/docs/system/ppc/powernv.rst b/docs/system/ppc/powernv.rst index f3ec2cc69c0da..5154794cc8cd4 100644 --- a/docs/system/ppc/powernv.rst +++ b/docs/system/ppc/powernv.rst @@ -1,5 +1,5 @@ -PowerNV family boards (``powernv8``, ``powernv9``, ``powernv10``) -================================================================== +PowerNV family boards (``powernv8``, ``powernv9``, ``powernv10``, ``powernv11``) +================================================================================ PowerNV (as Non-Virtualized) is the "bare metal" platform using the OPAL firmware. It runs Linux on IBM and OpenPOWER systems and it can @@ -15,11 +15,12 @@ beyond the scope of what QEMU addresses today. Supported devices ----------------- - * Multi processor support for POWER8, POWER8NVL and POWER9. + * Multi processor support for POWER8, POWER8NVL, POWER9, Power10 and Power11. * XSCOM, serial communication sideband bus to configure chiplets. * Simple LPC Controller. * Processor Service Interface (PSI) Controller. - * Interrupt Controller, XICS (POWER8) and XIVE (POWER9) and XIVE2 (Power10). + * Interrupt Controller, XICS (POWER8) and XIVE (POWER9) and XIVE2 (Power10 & + Power11). * POWER8 PHB3 PCIe Host bridge and POWER9 PHB4 PCIe Host bridge. * Simple OCC is an on-chip micro-controller used for power management tasks. * iBT device to handle BMC communication, with the internal BMC simulator diff --git a/docs/system/target-mips.rst b/docs/system/target-mips.rst index 9028c3b304d49..2a152e1338011 100644 --- a/docs/system/target-mips.rst +++ b/docs/system/target-mips.rst @@ -12,8 +12,6 @@ machine types are emulated: - An ACER Pica \"pica61\". This machine needs the 64-bit emulator. -- MIPS emulator pseudo board \"mipssim\" - - A MIPS Magnum R4000 machine \"magnum\". This machine needs the 64-bit emulator. @@ -80,15 +78,6 @@ The Loongson-3 virtual platform emulation supports: - Both KVM and TCG supported -The mipssim pseudo board emulation provides an environment similar to -what the proprietary MIPS emulator uses for running Linux. It supports: - -- A range of MIPS CPUs, default is the 24Kf - -- PC style serial port - -- MIPSnet network emulation - .. include:: cpu-models-mips.rst.inc .. _nanoMIPS-System-emulator: diff --git a/docs/user/index.rst b/docs/user/index.rst index 782d27cda2762..2307580cb97c2 100644 --- a/docs/user/index.rst +++ b/docs/user/index.rst @@ -5,8 +5,9 @@ User Mode Emulation ------------------- This section of the manual is the overall guide for users using QEMU -for user-mode emulation. In this mode, QEMU can launch -processes compiled for one CPU on another CPU. +for user-mode emulation. In this mode, QEMU can launch programs +compiled for one CPU architecture on the same Operating System (OS) +but running on a different CPU architecture. .. toctree:: :maxdepth: 2 diff --git a/docs/user/main.rst b/docs/user/main.rst index 9a1c60448c58e..a8ddf9142495e 100644 --- a/docs/user/main.rst +++ b/docs/user/main.rst @@ -17,28 +17,44 @@ Features QEMU user space emulation has the following notable features: -**System call translation:** - QEMU includes a generic system call translator. This means that the - parameters of the system calls can be converted to fix endianness and - 32/64-bit mismatches between hosts and targets. IOCTLs can be - converted too. - -**POSIX signal handling:** - QEMU can redirect to the running program all signals coming from the - host (such as ``SIGALRM``), as well as synthesize signals from - virtual CPU exceptions (for example ``SIGFPE`` when the program - executes a division by zero). - - QEMU relies on the host kernel to emulate most signal system calls, - for example to emulate the signal mask. On Linux, QEMU supports both - normal and real-time signals. - -**Threading:** - On Linux, QEMU can emulate the ``clone`` syscall and create a real - host thread (with a separate virtual CPU) for each emulated thread. - Note that not all targets currently emulate atomic operations - correctly. x86 and Arm use a global lock in order to preserve their - semantics. +System call translation +~~~~~~~~~~~~~~~~~~~~~~~ + +System calls are the principle interface between user-space and the +kernel. Generally the same system calls exist on all versions of the +kernel so QEMU includes a generic system call translator. The +translator takes care of adjusting endianess, 32/64 bit parameter size +and then calling the equivalent host system call. + +QEMU can also adjust device specific ``ioctl()`` calls in a similar +fashion. + +POSIX signal handling +~~~~~~~~~~~~~~~~~~~~~ + +QEMU can redirect to the running program all signals coming from the +host (such as ``SIGALRM``), as well as synthesize signals from +virtual CPU exceptions (for example ``SIGFPE`` when the program +executes a division by zero). + +QEMU relies on the host kernel to emulate most signal system calls, +for example to emulate the signal mask. On Linux, QEMU supports both +normal and real-time signals. + +Threading +~~~~~~~~~ + +On Linux, QEMU can emulate the ``clone`` syscall and create a real +host thread (with a separate virtual CPU) for each emulated thread. +However as QEMU relies on the system libc to call ``clone`` on its +behalf we limit the flags accepted to those it uses. Specifically this +means flags affecting namespaces (e.g. container runtimes) are not +supported. QEMU user-mode processes can still be run inside containers +though. + +While QEMU does its best to emulate atomic operations properly +differences between the host and guest memory models can cause issues +for software that makes assumptions about the memory model. QEMU was conceived so that ultimately it can emulate itself. Although it is not very useful, it is an important test to show the power of the @@ -246,9 +262,6 @@ Debug options: Activate logging of the specified items (use '-d help' for a list of log items) -``-p pagesize`` - Act as if the host page size was 'pagesize' bytes - ``-one-insn-per-tb`` Run the emulation with one guest instruction per translation block. This slows down emulation a lot, but can be useful in some situations, diff --git a/fsdev/file-op-9p.h b/fsdev/file-op-9p.h index b9dae8c84c230..b85c9934deff1 100644 --- a/fsdev/file-op-9p.h +++ b/fsdev/file-op-9p.h @@ -21,9 +21,11 @@ #ifdef CONFIG_LINUX # include -#endif -#ifdef CONFIG_DARWIN +#elif defined(CONFIG_DARWIN) || defined(CONFIG_FREEBSD) # include +# ifdef CONFIG_FREEBSD +# undef MACHINE /* work around some unfortunate namespace pollution */ +# endif # include #endif diff --git a/fsdev/meson.build b/fsdev/meson.build index c751d8cb6222b..95fe816604ff7 100644 --- a/fsdev/meson.build +++ b/fsdev/meson.build @@ -5,6 +5,6 @@ fsdev_ss.add(when: ['CONFIG_FSDEV_9P'], if_true: files( '9p-marshal.c', 'qemu-fsdev.c', ), if_false: files('qemu-fsdev-dummy.c')) -if host_os in ['linux', 'darwin'] +if host_os in ['linux', 'darwin', 'freebsd'] system_ss.add_all(fsdev_ss) endif diff --git a/gdbstub/internals.h b/gdbstub/internals.h index bf5a5c63029c9..92466b28c187b 100644 --- a/gdbstub/internals.h +++ b/gdbstub/internals.h @@ -11,7 +11,27 @@ #include "exec/cpu-common.h" -#define MAX_PACKET_LENGTH 4096 +/* + * Most "large" transfers (e.g. memory reads, feature XML + * transfer) have mechanisms in the gdb protocol for splitting + * them. However, register values in particular cannot currently + * be split. This packet size must therefore be at least big enough + * for the worst-case register size. Currently that is Arm SME + * ZA storage with a 256x256 byte value. We also must account + * for the conversion from raw data to hex in gdb_memtohex(), + * which writes 2 * size bytes, and for other protocol overhead + * including command, register number and checksum which add + * another 4 bytes of overhead. However, to be consistent with + * the changes made in gdbserver to address this same requirement, + * we add a total of 32 bytes to account for protocol overhead + * (unclear why specifically 32 bytes), bringing the value of + * MAX_PACKET_LENGTH to 2 * 256 * 256 + 32 = 131104. + * + * The commit making this change for gdbserver can be found here: + * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h= + * b816042e88583f280ad186ff124ab84d31fb592b + */ +#define MAX_PACKET_LENGTH 131104 /* * Shared structures and definitions diff --git a/gdbstub/system.c b/gdbstub/system.c index 5be0d3c58cec5..5221c579d90b2 100644 --- a/gdbstub/system.c +++ b/gdbstub/system.c @@ -18,13 +18,11 @@ #include "gdbstub/syscalls.h" #include "gdbstub/commands.h" #include "exec/hwaddr.h" -#include "exec/tb-flush.h" #include "accel/accel-ops.h" #include "accel/accel-cpu-ops.h" #include "system/cpus.h" #include "system/runstate.h" #include "system/replay.h" -#include "system/tcg.h" #include "hw/core/cpu.h" #include "hw/cpu/cluster.h" #include "hw/boards.h" @@ -174,9 +172,6 @@ static void gdb_vm_state_change(void *opaque, bool running, RunState state) } else { trace_gdbstub_hit_break(); } - if (tcg_enabled()) { - tb_flush(cpu); - } ret = GDB_SIGNAL_TRAP; break; case RUN_STATE_PAUSED: diff --git a/gdbstub/user.c b/gdbstub/user.c index 67403e5a252a6..2e14ded3f0105 100644 --- a/gdbstub/user.c +++ b/gdbstub/user.c @@ -15,7 +15,6 @@ #include "qemu/sockets.h" #include "qapi/error.h" #include "exec/hwaddr.h" -#include "exec/tb-flush.h" #include "exec/gdbstub.h" #include "gdbstub/commands.h" #include "gdbstub/syscalls.h" @@ -220,7 +219,6 @@ int gdb_handlesig(CPUState *cpu, int sig, const char *reason, void *siginfo, /* disable single step if it was enabled */ cpu_single_step(cpu, 0); - tb_flush(cpu); if (sig != 0) { gdb_set_stop_cpu(cpu); @@ -539,7 +537,6 @@ static void disable_gdbstub(CPUState *thread_cpu) /* no cpu_watchpoint_remove_all for user-mode */ cpu_single_step(cpu, 0); } - tb_flush(thread_cpu); } void gdbserver_fork_end(CPUState *cpu, pid_t pid) diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index 6142f60e7b164..2a7f5810d7060 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -271,12 +271,12 @@ ERST .name = "accel", .args_type = "", .params = "", - .help = "show accelerator info", + .help = "show accelerator statistics", }, SRST ``info accel`` - Show accelerator info. + Show accelerator statistics. ERST SRST @@ -307,6 +307,24 @@ SRST Show KVM information. ERST + { + .name = "accelerators", + .args_type = "", + .params = "", + .help = "show present and enabled information", + .cmd = hmp_info_accelerators, + }, + +SRST + ``info accelerators`` + Show which accelerators are compiled into a QEMU binary, and what accelerator + is in use. For example:: + + kvm qtest [tcg] + + indicates that TCG in use, and that KVM and qtest are also available. +ERST + { .name = "numa", .args_type = "", @@ -977,3 +995,16 @@ SRST ``info cryptodev`` Show the crypto devices. ERST + + { + .name = "firmware-log", + .args_type = "max-size:o?", + .params = "[max-size]", + .help = "show the firmware (ovmf) debug log", + .cmd = hmp_info_firmware_log, + }, + +SRST + ``info firmware-log`` + Show the firmware (ovmf) debug log. +ERST diff --git a/hmp-commands.hx b/hmp-commands.hx index d0e4f35a30afb..15f6082596837 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -1009,7 +1009,7 @@ ERST { .name = "migrate_set_parameter", - .args_type = "parameter:s,value:s", + .args_type = "parameter:s,value:S", .params = "parameter value", .help = "Set the parameter for migration", .cmd = hmp_migrate_set_parameter, @@ -1357,8 +1357,8 @@ ERST { .name = "hostfwd_add", .args_type = "arg1:s,arg2:s?", - .params = "[netdev_id] [tcp|udp]:[hostaddr]:hostport-[guestaddr]:guestport", - .help = "redirect TCP or UDP connections from host to guest (requires -net user)", + .params = "[netdev_id] [tcp|udp|unix]:[[hostaddr]:hostport|hostpath]-[guestaddr]:guestport", + .help = "redirect TCP, UDP or UNIX connections from host to guest (requires -net user)", .cmd = hmp_hostfwd_add, }, #endif diff --git a/host/include/aarch64/host/atomic128-cas.h b/host/include/aarch64/host/atomic128-cas.h deleted file mode 100644 index 991da4ef54333..0000000000000 --- a/host/include/aarch64/host/atomic128-cas.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * SPDX-License-Identifier: GPL-2.0-or-later - * Compare-and-swap for 128-bit atomic operations, AArch64 version. - * - * Copyright (C) 2018, 2023 Linaro, Ltd. - * - * See docs/devel/atomics.rst for discussion about the guarantees each - * atomic primitive is meant to provide. - */ - -#ifndef AARCH64_ATOMIC128_CAS_H -#define AARCH64_ATOMIC128_CAS_H - -/* Through gcc 10, aarch64 has no support for 128-bit atomics. */ -#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) -#include "host/include/generic/host/atomic128-cas.h.inc" -#else -static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) -{ - uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp); - uint64_t newl = int128_getlo(new), newh = int128_gethi(new); - uint64_t oldl, oldh; - uint32_t tmp; - - asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" - "cmp %[oldl], %[cmpl]\n\t" - "ccmp %[oldh], %[cmph], #0, eq\n\t" - "b.ne 1f\n\t" - "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t" - "cbnz %w[tmp], 0b\n" - "1:" - : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), - [oldl] "=&r"(oldl), [oldh] "=&r"(oldh) - : [cmpl] "r"(cmpl), [cmph] "r"(cmph), - [newl] "r"(newl), [newh] "r"(newh) - : "memory", "cc"); - - return int128_make128(oldl, oldh); -} - -# define CONFIG_CMPXCHG128 1 -# define HAVE_CMPXCHG128 1 -#endif - -#endif /* AARCH64_ATOMIC128_CAS_H */ diff --git a/host/include/aarch64/host/atomic128-cas.h.inc b/host/include/aarch64/host/atomic128-cas.h.inc new file mode 100644 index 0000000000000..aec27df182089 --- /dev/null +++ b/host/include/aarch64/host/atomic128-cas.h.inc @@ -0,0 +1,102 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * Compare-and-swap for 128-bit atomic operations, AArch64 version. + * + * Copyright (C) 2018, 2023 Linaro, Ltd. + * + * See docs/devel/atomics.rst for discussion about the guarantees each + * atomic primitive is meant to provide. + */ + +#ifndef AARCH64_ATOMIC128_CAS_H +#define AARCH64_ATOMIC128_CAS_H + +/* Through gcc 10, aarch64 has no support for 128-bit atomics. */ +#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128) +#include "host/include/generic/host/atomic128-cas.h.inc" +#else +static inline Int128 atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) +{ + uint64_t cmpl = int128_getlo(cmp), cmph = int128_gethi(cmp); + uint64_t newl = int128_getlo(new), newh = int128_gethi(new); + uint64_t oldl, oldh; + uint32_t tmp; + + asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" + "cmp %[oldl], %[cmpl]\n\t" + "ccmp %[oldh], %[cmph], #0, eq\n\t" + "b.ne 1f\n\t" + "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t" + "cbnz %w[tmp], 0b\n" + "1:" + : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), + [oldl] "=&r"(oldl), [oldh] "=&r"(oldh) + : [cmpl] "r"(cmpl), [cmph] "r"(cmph), + [newl] "r"(newl), [newh] "r"(newh) + : "memory", "cc"); + + return int128_make128(oldl, oldh); +} + +static inline Int128 atomic16_xchg(Int128 *ptr, Int128 new) +{ + uint64_t newl = int128_getlo(new), newh = int128_gethi(new); + uint64_t oldl, oldh; + uint32_t tmp; + + asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" + "stlxp %w[tmp], %[newl], %[newh], %[mem]\n\t" + "cbnz %w[tmp], 0b" + : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), + [oldl] "=&r"(oldl), [oldh] "=&r"(oldh) + : [newl] "r"(newl), [newh] "r"(newh) + : "memory"); + + return int128_make128(oldl, oldh); +} + +static inline Int128 atomic16_fetch_and(Int128 *ptr, Int128 new) +{ + uint64_t newl = int128_getlo(new), newh = int128_gethi(new); + uint64_t oldl, oldh, tmpl, tmph; + uint32_t tmp; + + asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" + "and %[tmpl], %[oldl], %[newl]\n\t" + "and %[tmph], %[oldh], %[newh]\n\t" + "stlxp %w[tmp], %[tmpl], %[tmph], %[mem]\n\t" + "cbnz %w[tmp], 0b" + : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), + [oldl] "=&r"(oldl), [oldh] "=&r"(oldh) + : [newl] "r"(newl), [newh] "r"(newh), + [tmpl] "r"(tmpl), [tmph] "r"(tmph) + : "memory"); + + return int128_make128(oldl, oldh); +} + +static inline Int128 atomic16_fetch_or(Int128 *ptr, Int128 new) +{ + uint64_t newl = int128_getlo(new), newh = int128_gethi(new); + uint64_t oldl, oldh, tmpl, tmph; + uint32_t tmp; + + asm("0: ldaxp %[oldl], %[oldh], %[mem]\n\t" + "orr %[tmpl], %[oldl], %[newl]\n\t" + "orr %[tmph], %[oldh], %[newh]\n\t" + "stlxp %w[tmp], %[tmpl], %[tmph], %[mem]\n\t" + "cbnz %w[tmp], 0b" + : [mem] "+m"(*ptr), [tmp] "=&r"(tmp), + [oldl] "=&r"(oldl), [oldh] "=&r"(oldh) + : [newl] "r"(newl), [newh] "r"(newh), + [tmpl] "r"(tmpl), [tmph] "r"(tmph) + : "memory"); + + return int128_make128(oldl, oldh); +} + +# define CONFIG_CMPXCHG128 1 +# define HAVE_CMPXCHG128 1 +#endif + +#endif /* AARCH64_ATOMIC128_CAS_H */ diff --git a/host/include/generic/host/atomic128-cas.h.inc b/host/include/generic/host/atomic128-cas.h.inc index 6b40cc2271046..990162c56fe2b 100644 --- a/host/include/generic/host/atomic128-cas.h.inc +++ b/host/include/generic/host/atomic128-cas.h.inc @@ -23,6 +23,51 @@ atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) r.i = qatomic_cmpxchg__nocheck(ptr_align, c.i, n.i); return r.s; } + +/* + * Since we're looping anyway, use weak compare and swap. + * If the host supports weak, this will eliminate a second loop hidden + * within the atomic operation itself; otherwise the weak parameter is + * ignored. + */ +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_xchg(Int128 *ptr, Int128 new) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128 old = *ptr_align; + + while (!__atomic_compare_exchange_n(ptr_align, &old, new, true, + __ATOMIC_SEQ_CST, 0)) { + continue; + } + return old; +} + +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_fetch_and(Int128 *ptr, Int128 val) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128 old = *ptr_align; + + while (!__atomic_compare_exchange_n(ptr_align, &old, old & val, true, + __ATOMIC_SEQ_CST, 0)) { + continue; + } + return old; +} + +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_fetch_or(Int128 *ptr, Int128 val) +{ + __int128_t *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128 old = *ptr_align; + + while (!__atomic_compare_exchange_n(ptr_align, &old, old | val, true, + __ATOMIC_SEQ_CST, 0)) { + continue; + } + return old; +} # define HAVE_CMPXCHG128 1 #elif defined(CONFIG_CMPXCHG128) static inline Int128 ATTRIBUTE_ATOMIC128_OPT @@ -36,6 +81,57 @@ atomic16_cmpxchg(Int128 *ptr, Int128 cmp, Int128 new) r.i = __sync_val_compare_and_swap_16(ptr_align, c.i, n.i); return r.s; } + +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_xchg(Int128 *ptr, Int128 new) +{ + Int128Aligned *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128Alias o, n; + + n.s = new; + o.s = *ptr_align; + while (1) { + __int128 c = __sync_val_compare_and_swap_16(ptr_align, o.i, n.i); + if (c == o.i) { + return o.s; + } + o.i = c; + } +} + +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_fetch_and(Int128 *ptr, Int128 val) +{ + Int128Aligned *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128Alias o, v; + + v.s = val; + o.s = *ptr_align; + while (1) { + __int128 c = __sync_val_compare_and_swap_16(ptr_align, o.i, o.i & v.i); + if (c == o.i) { + return o.s; + } + o.i = c; + } +} + +static inline Int128 ATTRIBUTE_ATOMIC128_OPT +atomic16_fetch_or(Int128 *ptr, Int128 val) +{ + Int128Aligned *ptr_align = __builtin_assume_aligned(ptr, 16); + Int128Alias o, v; + + v.s = val; + o.s = *ptr_align; + while (1) { + __int128 c = __sync_val_compare_and_swap_16(ptr_align, o.i, o.i | v.i); + if (c == o.i) { + return o.s; + } + o.i = c; + } +} # define HAVE_CMPXCHG128 1 #else /* Fallback definition that must be optimized away, or error. */ diff --git a/host/include/i386/host/cpuinfo.h b/host/include/i386/host/cpuinfo.h index 9541a64da6121..93d029d499bbc 100644 --- a/host/include/i386/host/cpuinfo.h +++ b/host/include/i386/host/cpuinfo.h @@ -27,6 +27,7 @@ #define CPUINFO_ATOMIC_VMOVDQU (1u << 17) #define CPUINFO_AES (1u << 18) #define CPUINFO_PCLMUL (1u << 19) +#define CPUINFO_GFNI (1u << 20) /* Initialized with a constructor. */ extern unsigned cpuinfo; diff --git a/hw/9pfs/9p-synth.c b/hw/9pfs/9p-synth.c index 9cd1884224218..b3743f61696d5 100644 --- a/hw/9pfs/9p-synth.c +++ b/hw/9pfs/9p-synth.c @@ -451,7 +451,7 @@ static int synth_statfs(FsContext *s, V9fsPath *fs_path, stbuf->f_bsize = 512; stbuf->f_blocks = 0; stbuf->f_files = synth_node_count; -#ifndef CONFIG_DARWIN +#if !defined(CONFIG_DARWIN) && !defined(CONFIG_FREEBSD) stbuf->f_namelen = NAME_MAX; #endif return 0; diff --git a/hw/9pfs/9p-util-freebsd.c b/hw/9pfs/9p-util-freebsd.c new file mode 100644 index 0000000000000..9dd1d069f67d0 --- /dev/null +++ b/hw/9pfs/9p-util-freebsd.c @@ -0,0 +1,132 @@ +/* + * 9p utilities (FreeBSD Implementation) + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +/* + * Not so fast! You might want to read the 9p developer docs first: + * https://wiki.qemu.org/Documentation/9p + */ + +#include "qemu/osdep.h" +#include "qemu/xattr.h" +#include "9p-util.h" + +static int mangle_xattr_name(const char **namep) +{ + const char *name = *namep; + + /* + * ZFS forbids attributes in starting with "user." or "system.". + */ + if (strncmp(name, "system.", 7) == 0) { + *namep = name + 7; + return EXTATTR_NAMESPACE_SYSTEM; + } + if (strncmp(name, "user.", 5) == 0) { + *namep = name + 5; + } + return EXTATTR_NAMESPACE_USER; +} + +ssize_t fgetxattr(int fd, const char *name, void *value, size_t size) +{ + int namespace; + + namespace = mangle_xattr_name(&name); + return extattr_get_fd(fd, namespace, name, value, size); +} + +ssize_t fgetxattrat_nofollow(int dirfd, const char *filename, const char *name, + void *value, size_t size) +{ + ssize_t ret; + int fd, namespace; + + fd = openat_file(dirfd, filename, + O_RDONLY | O_PATH_9P_UTIL | O_NOFOLLOW, 0); + if (fd == -1) { + return -1; + } + namespace = mangle_xattr_name(&name); + ret = extattr_get_fd(fd, namespace, name, value, size); + close_preserve_errno(fd); + return ret; +} + +ssize_t flistxattrat_nofollow(int dirfd, const char *filename, + char *list, size_t size) +{ + ssize_t ret; + int fd; + + fd = openat_file(dirfd, filename, + O_RDONLY | O_PATH_9P_UTIL | O_NOFOLLOW, 0); + if (fd == -1) { + return -1; + } + ret = extattr_list_fd(fd, EXTATTR_NAMESPACE_USER, list, size); + close_preserve_errno(fd); + return ret; +} + +ssize_t fremovexattrat_nofollow(int dirfd, const char *filename, + const char *name) +{ + int fd, namespace, ret; + + fd = openat_file(dirfd, filename, + O_RDONLY | O_PATH_9P_UTIL | O_NOFOLLOW, 0); + if (fd == -1) { + return -1; + } + namespace = mangle_xattr_name(&name); + ret = extattr_delete_fd(fd, namespace, name); + close_preserve_errno(fd); + return ret; +} + +int fsetxattrat_nofollow(int dirfd, const char *filename, const char *name, + void *value, size_t size, int flags) +{ + ssize_t ret; + int fd, namespace; + + namespace = mangle_xattr_name(&name); + if (flags == (XATTR_CREATE | XATTR_REPLACE)) { + errno = EINVAL; + return -1; + } + fd = openat_file(dirfd, filename, + O_RDONLY | O_PATH_9P_UTIL | O_NOFOLLOW, 0); + if (fd == -1) { + return -1; + } + if (flags & (XATTR_CREATE | XATTR_REPLACE)) { + ret = extattr_get_fd(fd, namespace, name, NULL, 0); + if (ret == -1 && errno != ENOATTR) { + close_preserve_errno(fd); + return -1; + } + if (ret >= 0 && (flags & XATTR_CREATE)) { + errno = EEXIST; + close_preserve_errno(fd); + return -1; + } + if (ret == -1 && (flags & XATTR_REPLACE)) { + errno = ENOATTR; + close_preserve_errno(fd); + return -1; + } + } + ret = extattr_set_fd(fd, namespace, name, value, size); + close_preserve_errno(fd); + return ret; +} + +int qemu_mknodat(int dirfd, const char *filename, mode_t mode, dev_t dev) +{ + return mknodat(dirfd, filename, mode, dev); +} diff --git a/hw/9pfs/9p-util-generic.c b/hw/9pfs/9p-util-generic.c index 4c1e9c887d00c..b71fa2cb37853 100644 --- a/hw/9pfs/9p-util-generic.c +++ b/hw/9pfs/9p-util-generic.c @@ -2,7 +2,6 @@ #include "qemu/osdep.h" #include "9p-util.h" -#include char *qemu_open_flags_tostr(int flags) { diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h index a1924fe3f05a8..8dfa803dc2c04 100644 --- a/hw/9pfs/9p-util.h +++ b/hw/9pfs/9p-util.h @@ -21,6 +21,15 @@ #define O_PATH_9P_UTIL 0 #endif +#ifdef CONFIG_FREEBSD +/* + * FreeBSD does not have these flags, so we can only emulate their intended + * behaviour (racily). + */ +#define XATTR_CREATE 0x1 +#define XATTR_REPLACE 0x2 +#endif + #if !defined(CONFIG_LINUX) /* @@ -64,9 +73,9 @@ static inline uint64_t host_dev_to_dotl_dev(dev_t dev) static inline int errno_to_dotl(int err) { #if defined(CONFIG_LINUX) /* nothing to translate (Linux -> Linux) */ -#elif defined(CONFIG_DARWIN) +#elif defined(CONFIG_DARWIN) || defined(CONFIG_FREEBSD) /* - * translation mandatory for macOS hosts + * translation mandatory for non-Linux hosts * * FIXME: Only most important errnos translated here yet, this should be * extended to as many errnos being translated as possible in future. @@ -155,13 +164,13 @@ static inline int openat_file(int dirfd, const char *name, int flags, { int fd, serrno, ret; -#ifndef CONFIG_DARWIN +#if !defined(CONFIG_DARWIN) && !defined(CONFIG_FREEBSD) again: #endif fd = qemu_openat(dirfd, name, flags | O_NOFOLLOW | O_NOCTTY | O_NONBLOCK, mode); if (fd == -1) { -#ifndef CONFIG_DARWIN +#if !defined(CONFIG_DARWIN) && !defined(CONFIG_FREEBSD) if (errno == EPERM && (flags & O_NOATIME)) { /* * The client passed O_NOATIME but we lack permissions to honor it. @@ -202,6 +211,9 @@ static inline int openat_file(int dirfd, const char *name, int flags, return fd; } +#ifdef CONFIG_FREEBSD +ssize_t fgetxattr(int dirfd, const char *name, void *value, size_t size); +#endif ssize_t fgetxattrat_nofollow(int dirfd, const char *path, const char *name, void *value, size_t size); int fsetxattrat_nofollow(int dirfd, const char *path, const char *name, diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c index acfa7db4e19cd..bc4a016ee33a4 100644 --- a/hw/9pfs/9p.c +++ b/hw/9pfs/9p.c @@ -136,8 +136,10 @@ static int dotl_to_open_flags(int flags) { P9_DOTL_NONBLOCK, O_NONBLOCK } , { P9_DOTL_DSYNC, O_DSYNC }, { P9_DOTL_FASYNC, FASYNC }, -#ifndef CONFIG_DARWIN +#if !defined(CONFIG_DARWIN) && !defined(CONFIG_FREEBSD) { P9_DOTL_NOATIME, O_NOATIME }, +#endif +#ifndef CONFIG_DARWIN /* * On Darwin, we could map to F_NOCACHE, which is * similar, but doesn't quite have the same @@ -3658,7 +3660,7 @@ static int v9fs_fill_statfs(V9fsState *s, V9fsPDU *pdu, struct statfs *stbuf) f_bavail = stbuf->f_bavail / bsize_factor; f_files = stbuf->f_files; f_ffree = stbuf->f_ffree; -#ifdef CONFIG_DARWIN +#if defined(CONFIG_DARWIN) || defined(CONFIG_FREEBSD) fsid_val = (unsigned int)stbuf->f_fsid.val[0] | (unsigned long long)stbuf->f_fsid.val[1] << 32; f_namelen = NAME_MAX; @@ -4050,6 +4052,16 @@ static void coroutine_fn v9fs_xattrwalk(void *opaque) * Linux guests. */ #define P9_XATTR_SIZE_MAX 65536 +#elif defined(CONFIG_FREEBSD) +/* + * FreeBSD similarly doesn't define a maximum xattr size, the limit is + * filesystem dependent. On UFS filesystems it's 2 times the filesystem block + * size, typically 32KB. On ZFS it depends on the value of the xattr property; + * with the default value there is no limit, and with xattr=sa it is 64KB. + * + * So, a limit of 64k seems reasonable here too. + */ +#define P9_XATTR_SIZE_MAX 65536 #else #error Missing definition for P9_XATTR_SIZE_MAX for this host system #endif diff --git a/hw/9pfs/meson.build b/hw/9pfs/meson.build index d35d4f44ffa3b..7f4d6e3a45177 100644 --- a/hw/9pfs/meson.build +++ b/hw/9pfs/meson.build @@ -15,6 +15,8 @@ fs_ss.add(files( )) if host_os == 'darwin' fs_ss.add(files('9p-util-darwin.c')) +elif host_os == 'freebsd' + fs_ss.add(files('9p-util-freebsd.c')) elif host_os == 'linux' fs_ss.add(files('9p-util-linux.c')) endif diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig index 1d4e9f0845c07..daabbe6cd11e2 100644 --- a/hw/acpi/Kconfig +++ b/hw/acpi/Kconfig @@ -51,6 +51,11 @@ config ACPI_APEI bool depends on ACPI +config GHES_CPER + bool + depends on ACPI_APEI + default y + config ACPI_PCI bool depends on ACPI && PCI diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index 1e685f982f311..2d5826a8f134a 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -2629,3 +2629,13 @@ Aml *aml_i2c_serial_bus_device(uint16_t address, const char *resource_source) return var; } + +/* ACPI 5.0b: 18.3.2.6.2 Event Notification For Generic Error Sources */ +Aml *aml_error_device(void) +{ + Aml *dev = aml_device(ACPI_APEI_ERROR_DEVICE); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0C33"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + return dev; +} diff --git a/hw/acpi/core.c b/hw/acpi/core.c index 58f8964e13090..ff1658280338a 100644 --- a/hw/acpi/core.c +++ b/hw/acpi/core.c @@ -547,6 +547,7 @@ void acpi_pm_tmr_init(ACPIREGS *ar, acpi_update_sci_fn update_sci, ar->tmr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, acpi_pm_tmr_timer, ar); memory_region_init_io(&ar->tmr.io, memory_region_owner(parent), &acpi_pm_tmr_ops, ar, "acpi-tmr", 4); + memory_region_enable_lockless_io(&ar->tmr.io); memory_region_add_subregion(parent, 8, &ar->tmr.io); } diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c index 95682b79a2df9..e7b773d84d50d 100644 --- a/hw/acpi/generic_event_device.c +++ b/hw/acpi/generic_event_device.c @@ -30,6 +30,7 @@ static const uint32_t ged_supported_events[] = { ACPI_GED_NVDIMM_HOTPLUG_EVT, ACPI_GED_CPU_HOTPLUG_EVT, ACPI_GED_PCI_HOTPLUG_EVT, + ACPI_GED_ERROR_EVT, }; /* @@ -120,6 +121,16 @@ void build_ged_aml(Aml *table, const char *name, HotplugHandler *hotplug_dev, aml_notify(aml_name(ACPI_POWER_BUTTON_DEVICE), aml_int(0x80))); break; + case ACPI_GED_ERROR_EVT: + /* + * ACPI 5.0b: 5.6.6 Device Object Notifications + * Table 5-135 Error Device Notification Values + * Defines 0x80 as the value to be used on notifications + */ + aml_append(if_ctx, + aml_notify(aml_name(ACPI_APEI_ERROR_DEVICE), + aml_int(0x80))); + break; case ACPI_GED_NVDIMM_HOTPLUG_EVT: aml_append(if_ctx, aml_notify(aml_name("\\_SB.NVDR"), @@ -320,6 +331,8 @@ static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev) sel = ACPI_GED_MEM_HOTPLUG_EVT; } else if (ev & ACPI_POWER_DOWN_STATUS) { sel = ACPI_GED_PWR_DOWN_EVT; + } else if (ev & ACPI_GENERIC_ERROR) { + sel = ACPI_GED_ERROR_EVT; } else if (ev & ACPI_NVDIMM_HOTPLUG_STATUS) { sel = ACPI_GED_NVDIMM_HOTPLUG_EVT; } else if (ev & ACPI_CPU_HOTPLUG_STATUS) { @@ -349,6 +362,8 @@ static const Property acpi_ged_properties[] = { pcihp_state.use_acpi_hotplug_bridge, 0), DEFINE_PROP_LINK("bus", AcpiGedState, pcihp_state.root, TYPE_PCI_BUS, PCIBus *), + DEFINE_PROP_BOOL("x-has-hest-addr", AcpiGedState, + ghes_state.use_hest_addr, true), }; static const VMStateDescription vmstate_memhp_state = { @@ -436,6 +451,34 @@ static const VMStateDescription vmstate_pcihp_state = { } }; +static const VMStateDescription vmstate_hest = { + .name = "acpi-hest", + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT64(hest_addr_le, AcpiGhesState), + VMSTATE_END_OF_LIST() + }, +}; + +static bool hest_needed(void *opaque) +{ + AcpiGedState *s = opaque; + return s->ghes_state.hest_addr_le; +} + +static const VMStateDescription vmstate_hest_state = { + .name = "acpi-ged/hest", + .version_id = 1, + .minimum_version_id = 1, + .needed = hest_needed, + .fields = (const VMStateField[]) { + VMSTATE_STRUCT(ghes_state, AcpiGedState, 1, + vmstate_hest, AcpiGhesState), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_acpi_ged = { .name = "acpi-ged", .version_id = 1, @@ -449,6 +492,7 @@ static const VMStateDescription vmstate_acpi_ged = { &vmstate_cpuhp_state, &vmstate_ghes_state, &vmstate_pcihp_state, + &vmstate_hest_state, NULL } }; diff --git a/hw/acpi/ghes-stub.c b/hw/acpi/ghes-stub.c index 7cec1812dad95..40f660c246fe2 100644 --- a/hw/acpi/ghes-stub.c +++ b/hw/acpi/ghes-stub.c @@ -11,12 +11,13 @@ #include "qemu/osdep.h" #include "hw/acpi/ghes.h" -int acpi_ghes_memory_errors(uint16_t source_id, uint64_t physical_address) +int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id, + uint64_t physical_address) { return -1; } -bool acpi_ghes_present(void) +AcpiGhesState *acpi_ghes_get_state(void) { - return false; + return NULL; } diff --git a/hw/acpi/ghes.c b/hw/acpi/ghes.c index b85bb48195a0c..06555905cebbf 100644 --- a/hw/acpi/ghes.c +++ b/hw/acpi/ghes.c @@ -30,6 +30,7 @@ #define ACPI_HW_ERROR_FW_CFG_FILE "etc/hardware_errors" #define ACPI_HW_ERROR_ADDR_FW_CFG_FILE "etc/hardware_errors_addr" +#define ACPI_HEST_ADDR_FW_CFG_FILE "etc/acpi_table_hest_addr" /* The max size in bytes for one error block */ #define ACPI_GHES_MAX_RAW_DATA_LENGTH (1 * KiB) @@ -40,6 +41,12 @@ /* Address offset in Generic Address Structure(GAS) */ #define GAS_ADDR_OFFSET 4 +/* + * ACPI spec 1.0b + * 5.2.3 System Description Table Header + */ +#define ACPI_DESC_HEADER_OFFSET 36 + /* * The total size of Generic Error Data Entry * ACPI 6.1/6.2: 18.3.2.7.1 Generic Error Data, @@ -60,6 +67,30 @@ */ #define ACPI_GHES_GESB_SIZE 20 +/* + * See the memory layout map at docs/specs/acpi_hest_ghes.rst. + */ + +/* + * ACPI 6.1: 18.3.2.8 Generic Hardware Error Source version 2 + * Table 18-344 Generic Hardware Error Source version 2 (GHESv2) Structure + */ +#define HEST_GHES_V2_ENTRY_SIZE 92 + +/* + * ACPI 6.1: 18.3.2.8 Generic Hardware Error Source version 2 + * Table 18-344 Generic Hardware Error Source version 2 (GHESv2) Structure + * Read Ack Register + */ +#define GHES_READ_ACK_ADDR_OFF 64 + +/* + * ACPI 6.1: 18.3.2.7: Generic Hardware Error Source + * Table 18-341 Generic Hardware Error Source Structure + * Error Status Address + */ +#define GHES_ERR_STATUS_ADDR_OFF 20 + /* * Values for error_severity field */ @@ -206,17 +237,18 @@ ghes_gen_err_data_uncorrectable_recoverable(GArray *block, * Initialize "etc/hardware_errors" and "etc/hardware_errors_addr" fw_cfg blobs. * See docs/specs/acpi_hest_ghes.rst for blobs format. */ -static void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker) +static void build_ghes_error_table(AcpiGhesState *ags, GArray *hardware_errors, + BIOSLinker *linker, int num_sources) { int i, error_status_block_offset; /* Build error_block_address */ - for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) { + for (i = 0; i < num_sources; i++) { build_append_int_noprefix(hardware_errors, 0, sizeof(uint64_t)); } /* Build read_ack_register */ - for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) { + for (i = 0; i < num_sources; i++) { /* * Initialize the value of read_ack_register to 1, so GHES can be * writable after (re)boot. @@ -231,13 +263,13 @@ static void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker) /* Reserve space for Error Status Data Block */ acpi_data_push(hardware_errors, - ACPI_GHES_MAX_RAW_DATA_LENGTH * ACPI_GHES_ERROR_SOURCE_COUNT); + ACPI_GHES_MAX_RAW_DATA_LENGTH * num_sources); /* Tell guest firmware to place hardware_errors blob into RAM */ bios_linker_loader_alloc(linker, ACPI_HW_ERROR_FW_CFG_FILE, hardware_errors, sizeof(uint64_t), false); - for (i = 0; i < ACPI_GHES_ERROR_SOURCE_COUNT; i++) { + for (i = 0; i < num_sources; i++) { /* * Tell firmware to patch error_block_address entries to point to * corresponding "Generic Error Status Block" @@ -251,22 +283,26 @@ static void build_ghes_error_table(GArray *hardware_errors, BIOSLinker *linker) i * ACPI_GHES_MAX_RAW_DATA_LENGTH); } - /* - * tell firmware to write hardware_errors GPA into - * hardware_errors_addr fw_cfg, once the former has been initialized. - */ - bios_linker_loader_write_pointer(linker, ACPI_HW_ERROR_ADDR_FW_CFG_FILE, 0, - sizeof(uint64_t), - ACPI_HW_ERROR_FW_CFG_FILE, 0); + if (!ags->use_hest_addr) { + /* + * Tell firmware to write hardware_errors GPA into + * hardware_errors_addr fw_cfg, once the former has been initialized. + */ + bios_linker_loader_write_pointer(linker, ACPI_HW_ERROR_ADDR_FW_CFG_FILE, + 0, sizeof(uint64_t), + ACPI_HW_ERROR_FW_CFG_FILE, 0); + } } /* Build Generic Hardware Error Source version 2 (GHESv2) */ -static void build_ghes_v2(GArray *table_data, - BIOSLinker *linker, - enum AcpiGhesNotifyType notify, - uint16_t source_id) +static void build_ghes_v2_entry(GArray *table_data, + BIOSLinker *linker, + const AcpiNotificationSourceId *notif_src, + uint16_t index, int num_sources) { uint64_t address_offset; + const uint16_t notify = notif_src->notify; + const uint16_t source_id = notif_src->source_id; /* * Type: @@ -297,7 +333,7 @@ static void build_ghes_v2(GArray *table_data, address_offset + GAS_ADDR_OFFSET, sizeof(uint64_t), ACPI_HW_ERROR_FW_CFG_FILE, - source_id * sizeof(uint64_t)); + index * sizeof(uint64_t)); /* Notification Structure */ build_ghes_hw_error_notification(table_data, notify); @@ -317,8 +353,7 @@ static void build_ghes_v2(GArray *table_data, address_offset + GAS_ADDR_OFFSET, sizeof(uint64_t), ACPI_HW_ERROR_FW_CFG_FILE, - (ACPI_GHES_ERROR_SOURCE_COUNT + source_id) - * sizeof(uint64_t)); + (num_sources + index) * sizeof(uint64_t)); /* * Read Ack Preserve field @@ -331,23 +366,42 @@ static void build_ghes_v2(GArray *table_data, } /* Build Hardware Error Source Table */ -void acpi_build_hest(GArray *table_data, GArray *hardware_errors, +void acpi_build_hest(AcpiGhesState *ags, GArray *table_data, + GArray *hardware_errors, BIOSLinker *linker, + const AcpiNotificationSourceId *notif_source, + int num_sources, const char *oem_id, const char *oem_table_id) { AcpiTable table = { .sig = "HEST", .rev = 1, .oem_id = oem_id, .oem_table_id = oem_table_id }; + uint32_t hest_offset; + int i; - build_ghes_error_table(hardware_errors, linker); + hest_offset = table_data->len; + + build_ghes_error_table(ags, hardware_errors, linker, num_sources); acpi_table_begin(&table, table_data); /* Error Source Count */ - build_append_int_noprefix(table_data, ACPI_GHES_ERROR_SOURCE_COUNT, 4); - build_ghes_v2(table_data, linker, - ACPI_GHES_NOTIFY_SEA, ACPI_HEST_SRC_ID_SEA); + build_append_int_noprefix(table_data, num_sources, 4); + for (i = 0; i < num_sources; i++) { + build_ghes_v2_entry(table_data, linker, ¬if_source[i], i, num_sources); + } acpi_table_end(linker, &table); + + if (ags->use_hest_addr) { + /* + * Tell firmware to write into GPA the address of HEST via fw_cfg, + * once initialized. + */ + bios_linker_loader_write_pointer(linker, + ACPI_HEST_ADDR_FW_CFG_FILE, 0, + sizeof(uint64_t), + ACPI_BUILD_TABLE_FILE, hest_offset); + } } void acpi_ghes_add_fw_cfg(AcpiGhesState *ags, FWCfgState *s, @@ -357,21 +411,20 @@ void acpi_ghes_add_fw_cfg(AcpiGhesState *ags, FWCfgState *s, fw_cfg_add_file(s, ACPI_HW_ERROR_FW_CFG_FILE, hardware_error->data, hardware_error->len); - /* Create a read-write fw_cfg file for Address */ - fw_cfg_add_file_callback(s, ACPI_HW_ERROR_ADDR_FW_CFG_FILE, NULL, NULL, - NULL, &(ags->hw_error_le), sizeof(ags->hw_error_le), false); - - ags->present = true; + if (ags->use_hest_addr) { + fw_cfg_add_file_callback(s, ACPI_HEST_ADDR_FW_CFG_FILE, NULL, NULL, + NULL, &(ags->hest_addr_le), sizeof(ags->hest_addr_le), false); + } else { + /* Create a read-write fw_cfg file for Address */ + fw_cfg_add_file_callback(s, ACPI_HW_ERROR_ADDR_FW_CFG_FILE, NULL, NULL, + NULL, &(ags->hw_error_le), sizeof(ags->hw_error_le), false); + } } static void get_hw_error_offsets(uint64_t ghes_addr, uint64_t *cper_addr, uint64_t *read_ack_register_addr) { - if (!ghes_addr) { - return; - } - /* * non-HEST version supports only one source, so no need to change * the start offset based on the source ID. Also, we can't validate @@ -390,35 +443,94 @@ static void get_hw_error_offsets(uint64_t ghes_addr, *read_ack_register_addr = ghes_addr + sizeof(uint64_t); } -static void ghes_record_cper_errors(const void *cper, size_t len, - uint16_t source_id, Error **errp) +static void get_ghes_source_offsets(uint16_t source_id, + uint64_t hest_addr, + uint64_t *cper_addr, + uint64_t *read_ack_start_addr, + Error **errp) { - uint64_t cper_addr = 0, read_ack_register_addr = 0, read_ack_register; - AcpiGedState *acpi_ged_state; - AcpiGhesState *ags; + uint64_t hest_err_block_addr, hest_read_ack_addr; + uint64_t err_source_entry, error_block_addr; + uint32_t num_sources, i; - if (len > ACPI_GHES_MAX_RAW_DATA_LENGTH) { - error_setg(errp, "GHES CPER record is too big: %zd", len); - return; - } + hest_addr += ACPI_DESC_HEADER_OFFSET; - acpi_ged_state = ACPI_GED(object_resolve_path_type("", TYPE_ACPI_GED, - NULL)); - if (!acpi_ged_state) { - error_setg(errp, "Can't find ACPI_GED object"); + cpu_physical_memory_read(hest_addr, &num_sources, + sizeof(num_sources)); + num_sources = le32_to_cpu(num_sources); + + err_source_entry = hest_addr + sizeof(num_sources); + + /* + * Currently, HEST Error source navigates only for GHESv2 tables + */ + for (i = 0; i < num_sources; i++) { + uint64_t addr = err_source_entry; + uint16_t type, src_id; + + cpu_physical_memory_read(addr, &type, sizeof(type)); + type = le16_to_cpu(type); + + /* For now, we only know the size of GHESv2 table */ + if (type != ACPI_GHES_SOURCE_GENERIC_ERROR_V2) { + error_setg(errp, "HEST: type %d not supported.", type); + return; + } + + /* Compare CPER source ID at the GHESv2 structure */ + addr += sizeof(type); + cpu_physical_memory_read(addr, &src_id, sizeof(src_id)); + if (le16_to_cpu(src_id) == source_id) { + break; + } + + err_source_entry += HEST_GHES_V2_ENTRY_SIZE; + } + if (i == num_sources) { + error_setg(errp, "HEST: Source %d not found.", source_id); return; } - ags = &acpi_ged_state->ghes_state; - assert(ACPI_GHES_ERROR_SOURCE_COUNT == 1); - get_hw_error_offsets(le64_to_cpu(ags->hw_error_le), - &cper_addr, &read_ack_register_addr); + /* Navigate through table address pointers */ + hest_err_block_addr = err_source_entry + GHES_ERR_STATUS_ADDR_OFF + + GAS_ADDR_OFFSET; + + cpu_physical_memory_read(hest_err_block_addr, &error_block_addr, + sizeof(error_block_addr)); + error_block_addr = le64_to_cpu(error_block_addr); - if (!cper_addr) { - error_setg(errp, "can not find Generic Error Status Block"); + cpu_physical_memory_read(error_block_addr, cper_addr, + sizeof(*cper_addr)); + *cper_addr = le64_to_cpu(*cper_addr); + + hest_read_ack_addr = err_source_entry + GHES_READ_ACK_ADDR_OFF + + GAS_ADDR_OFFSET; + cpu_physical_memory_read(hest_read_ack_addr, read_ack_start_addr, + sizeof(*read_ack_start_addr)); + *read_ack_start_addr = le64_to_cpu(*read_ack_start_addr); +} + +NotifierList acpi_generic_error_notifiers = + NOTIFIER_LIST_INITIALIZER(acpi_generic_error_notifiers); + +void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len, + uint16_t source_id, Error **errp) +{ + uint64_t cper_addr = 0, read_ack_register_addr = 0, read_ack_register; + + if (len > ACPI_GHES_MAX_RAW_DATA_LENGTH) { + error_setg(errp, "GHES CPER record is too big: %zd", len); return; } + if (!ags->use_hest_addr) { + get_hw_error_offsets(le64_to_cpu(ags->hw_error_le), + &cper_addr, &read_ack_register_addr); + } else { + get_ghes_source_offsets(source_id, le64_to_cpu(ags->hest_addr_le), + &cper_addr, &read_ack_register_addr, errp); + } + cpu_physical_memory_read(read_ack_register_addr, &read_ack_register, sizeof(read_ack_register)); @@ -440,9 +552,12 @@ static void ghes_record_cper_errors(const void *cper, size_t len, /* Write the generic error data entry into guest memory */ cpu_physical_memory_write(cper_addr, cper, len); + + notifier_list_notify(&acpi_generic_error_notifiers, &source_id); } -int acpi_ghes_memory_errors(uint16_t source_id, uint64_t physical_address) +int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id, + uint64_t physical_address) { /* Memory Error Section Type */ const uint8_t guid[] = @@ -468,7 +583,7 @@ int acpi_ghes_memory_errors(uint16_t source_id, uint64_t physical_address) acpi_ghes_build_append_mem_cper(block, physical_address); /* Report the error */ - ghes_record_cper_errors(block->data, block->len, source_id, &errp); + ghes_record_cper_errors(ags, block->data, block->len, source_id, &errp); g_array_free(block, true); @@ -480,7 +595,7 @@ int acpi_ghes_memory_errors(uint16_t source_id, uint64_t physical_address) return 0; } -bool acpi_ghes_present(void) +AcpiGhesState *acpi_ghes_get_state(void) { AcpiGedState *acpi_ged_state; AcpiGhesState *ags; @@ -489,8 +604,12 @@ bool acpi_ghes_present(void) NULL)); if (!acpi_ged_state) { - return false; + return NULL; } ags = &acpi_ged_state->ghes_state; - return ags->present; + + if (!ags->hw_error_le && !ags->hest_addr_le) { + return NULL; + } + return ags; } diff --git a/hw/acpi/ghes_cper.c b/hw/acpi/ghes_cper.c new file mode 100644 index 0000000000000..31cb2ffabeb48 --- /dev/null +++ b/hw/acpi/ghes_cper.c @@ -0,0 +1,40 @@ +/* + * CPER payload parser for error injection + * + * Copyright(C) 2024-2025 Huawei LTD. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" + +#include "qemu/base64.h" +#include "qemu/error-report.h" +#include "qemu/uuid.h" +#include "qapi/qapi-commands-acpi-hest.h" +#include "hw/acpi/ghes.h" + +void qmp_inject_ghes_v2_error(const char *qmp_cper, Error **errp) +{ + AcpiGhesState *ags; + uint8_t *cper; + size_t len; + + ags = acpi_ghes_get_state(); + if (!ags) { + return; + } + + cper = qbase64_decode(qmp_cper, -1, &len, errp); + if (!cper) { + error_setg(errp, "missing GHES CPER payload"); + return; + } + + ghes_record_cper_errors(ags, cper, len, ACPI_HEST_SRC_ID_QMP, errp); + + g_free(cper); +} diff --git a/hw/acpi/ghes_cper_stub.c b/hw/acpi/ghes_cper_stub.c new file mode 100644 index 0000000000000..b16be73502dbe --- /dev/null +++ b/hw/acpi/ghes_cper_stub.c @@ -0,0 +1,20 @@ +/* + * Stub interface for CPER payload parser for error injection + * + * Copyright(C) 2024-2025 Huawei LTD. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qapi-commands-acpi-hest.h" +#include "hw/acpi/ghes.h" + +void qmp_inject_ghes_v2_error(const char *cper, Error **errp) +{ + error_setg(errp, "GHES QMP error inject is not compiled in"); +} diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index 73f02b96912ba..56b5d1ec9691b 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -34,4 +34,6 @@ endif system_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c', 'acpi_interface.c')) system_ss.add(when: 'CONFIG_ACPI_PCI_BRIDGE', if_false: files('pci-bridge-stub.c')) system_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss) +system_ss.add(when: 'CONFIG_GHES_CPER', if_true: files('ghes_cper.c')) +system_ss.add(when: 'CONFIG_GHES_CPER', if_false: files('ghes_cper_stub.c')) system_ss.add(files('acpi-qmp-cmds.c')) diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index 2aa4b5d77864f..b44b85f436121 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -5,9 +5,6 @@ config ARM_VIRT depends on TCG || KVM || HVF imply PCI_DEVICES imply TEST_DEVICES - imply VFIO_AMD_XGBE - imply VFIO_PLATFORM - imply VFIO_XGMAC imply TPM_TIS_SYSBUS imply TPM_TIS_I2C imply NVDIMM @@ -544,6 +541,7 @@ config ASPEED_SOC bool default y depends on TCG && ARM + imply PCI_DEVICES select DS1338 select FTGMAC100 select I2C @@ -564,6 +562,8 @@ config ASPEED_SOC select MAX31785 select FSI_APB2OPB_ASPEED select AT24C + select PCI_EXPRESS + select PCI_EXPRESS_ASPEED config MPS2 bool diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index c31bbe7701381..1bc9e534ba49b 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -26,9 +26,7 @@ #include "hw/qdev-properties.h" #include "system/block-backend.h" #include "system/reset.h" -#include "hw/loader.h" #include "qemu/error-report.h" -#include "qemu/datadir.h" #include "qemu/units.h" #include "hw/qdev-clock.h" #include "system/system.h" @@ -263,102 +261,6 @@ static void aspeed_reset_secondary(ARMCPU *cpu, cpu_set_pc(cs, info->smp_loader_start); } -static void write_boot_rom(BlockBackend *blk, hwaddr addr, size_t rom_size, - Error **errp) -{ - g_autofree void *storage = NULL; - int64_t size; - - /* - * The block backend size should have already been 'validated' by - * the creation of the m25p80 object. - */ - size = blk_getlength(blk); - if (size <= 0) { - error_setg(errp, "failed to get flash size"); - return; - } - - if (rom_size > size) { - rom_size = size; - } - - storage = g_malloc0(rom_size); - if (blk_pread(blk, 0, rom_size, storage, 0) < 0) { - error_setg(errp, "failed to read the initial flash content"); - return; - } - - rom_add_blob_fixed("aspeed.boot_rom", storage, rom_size, addr); -} - -/* - * Create a ROM and copy the flash contents at the expected address - * (0x0). Boots faster than execute-in-place. - */ -static void aspeed_install_boot_rom(AspeedMachineState *bmc, BlockBackend *blk, - uint64_t rom_size) -{ - AspeedSoCState *soc = bmc->soc; - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(soc); - - memory_region_init_rom(&bmc->boot_rom, NULL, "aspeed.boot_rom", rom_size, - &error_abort); - memory_region_add_subregion_overlap(&soc->spi_boot_container, 0, - &bmc->boot_rom, 1); - write_boot_rom(blk, sc->memmap[ASPEED_DEV_SPI_BOOT], - rom_size, &error_abort); -} - -#define VBOOTROM_FILE_NAME "ast27x0_bootrom.bin" - -/* - * This function locates the vbootrom image file specified via the command line - * using the -bios option. It loads the specified image into the vbootrom - * memory region and handles errors if the file cannot be found or loaded. - */ -static void aspeed_load_vbootrom(AspeedMachineState *bmc, const char *bios_name, - Error **errp) -{ - g_autofree char *filename = NULL; - AspeedSoCState *soc = bmc->soc; - int ret; - - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); - if (!filename) { - error_setg(errp, "Could not find vbootrom image '%s'", bios_name); - return; - } - - ret = load_image_mr(filename, &soc->vbootrom); - if (ret < 0) { - error_setg(errp, "Failed to load vbootrom image '%s'", bios_name); - return; - } -} - -void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, - unsigned int count, int unit0) -{ - int i; - - if (!flashtype) { - return; - } - - for (i = 0; i < count; ++i) { - DriveInfo *dinfo = drive_get(IF_MTD, 0, unit0 + i); - DeviceState *dev; - - dev = qdev_new(flashtype); - if (dinfo) { - qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo)); - } - qdev_prop_set_uint8(dev, "cs", i); - qdev_realize_and_unref(dev, BUS(s->spi), &error_fatal); - } -} - static void sdhci_attach_drive(SDHCIState *sdhci, DriveInfo *dinfo, bool emmc, bool boot_emmc) { @@ -397,12 +299,14 @@ static void connect_serial_hds_to_uarts(AspeedMachineState *bmc) AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); int uart_chosen = bmc->uart_chosen ? bmc->uart_chosen : amc->uart_default; - aspeed_soc_uart_set_chr(s, uart_chosen, serial_hd(0)); + aspeed_soc_uart_set_chr(s->uart, uart_chosen, sc->uarts_base, + sc->uarts_num, serial_hd(0)); for (int i = 1, uart = sc->uarts_base; i < sc->uarts_num; uart++) { if (uart == uart_chosen) { continue; } - aspeed_soc_uart_set_chr(s, uart, serial_hd(i++)); + aspeed_soc_uart_set_chr(s->uart, uart, sc->uarts_base, sc->uarts_num, + serial_hd(i++)); } } @@ -511,15 +415,16 @@ static void aspeed_machine_init(MachineState *machine) if (fmc0 && !boot_emmc) { uint64_t rom_size = memory_region_size(&bmc->soc->spi_boot); - aspeed_install_boot_rom(bmc, fmc0, rom_size); + aspeed_install_boot_rom(bmc->soc, fmc0, &bmc->boot_rom, rom_size); } else if (emmc0) { - aspeed_install_boot_rom(bmc, blk_by_legacy_dinfo(emmc0), 64 * KiB); + aspeed_install_boot_rom(bmc->soc, blk_by_legacy_dinfo(emmc0), + &bmc->boot_rom, 64 * KiB); } } if (amc->vbootrom) { bios_name = machine->firmware ?: VBOOTROM_FILE_NAME; - aspeed_load_vbootrom(bmc, bios_name, &error_abort); + aspeed_load_vbootrom(bmc->soc, bios_name, &error_abort); } arm_load_kernel(ARM_CPU(first_cpu), machine, &aspeed_board_binfo); @@ -1407,8 +1312,8 @@ static void aspeed_set_bmc_console(Object *obj, const char *value, Error **errp) AspeedMachineClass *amc = ASPEED_MACHINE_GET_CLASS(bmc); AspeedSoCClass *sc = ASPEED_SOC_CLASS(object_class_by_name(amc->soc_name)); int val; - int uart_first = aspeed_uart_first(sc); - int uart_last = aspeed_uart_last(sc); + int uart_first = aspeed_uart_first(sc->uarts_base); + int uart_last = aspeed_uart_last(sc->uarts_base, sc->uarts_num); if (sscanf(value, "uart%u", &val) != 1) { error_setg(errp, "Bad value for \"uart\" property"); @@ -1515,7 +1420,6 @@ static void aspeed_machine_palmetto_class_init(ObjectClass *oc, amc->spi_model = "mx25l25635f"; amc->num_cs = 1; amc->i2c_init = palmetto_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 256 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1533,7 +1437,6 @@ static void aspeed_machine_quanta_q71l_class_init(ObjectClass *oc, amc->spi_model = "mx25l25635e"; amc->num_cs = 1; amc->i2c_init = quanta_q71l_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 128 * MiB; aspeed_machine_class_init_cpus_defaults(mc); } @@ -1552,7 +1455,6 @@ static void aspeed_machine_supermicrox11_bmc_class_init(ObjectClass *oc, amc->num_cs = 1; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = palmetto_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 256 * MiB; aspeed_machine_class_init_cpus_defaults(mc); } @@ -1571,7 +1473,6 @@ static void aspeed_machine_supermicro_x11spi_bmc_class_init(ObjectClass *oc, amc->num_cs = 1; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = palmetto_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); } @@ -1589,7 +1490,6 @@ static void aspeed_machine_ast2500_evb_class_init(ObjectClass *oc, amc->spi_model = "mx25l25635f"; amc->num_cs = 1; amc->i2c_init = ast2500_evb_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1608,7 +1508,6 @@ static void aspeed_machine_yosemitev2_class_init(ObjectClass *oc, amc->spi_model = "mx25l25635e"; amc->num_cs = 2; amc->i2c_init = yosemitev2_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1626,7 +1525,6 @@ static void aspeed_machine_romulus_class_init(ObjectClass *oc, amc->spi_model = "mx66l1g45g"; amc->num_cs = 2; amc->i2c_init = romulus_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1645,7 +1543,6 @@ static void aspeed_machine_tiogapass_class_init(ObjectClass *oc, amc->spi_model = "mx25l25635e"; amc->num_cs = 2; amc->i2c_init = tiogapass_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1657,13 +1554,13 @@ static void aspeed_machine_sonorapass_class_init(ObjectClass *oc, AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); mc->desc = "OCP SonoraPass BMC (ARM1176)"; + mc->deprecation_reason = "use 'ast2500-evb' instead"; amc->soc_name = "ast2500-a1"; amc->hw_strap1 = SONORAPASS_BMC_HW_STRAP1; amc->fmc_model = "mx66l1g45g"; amc->spi_model = "mx66l1g45g"; amc->num_cs = 2; amc->i2c_init = sonorapass_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1681,7 +1578,6 @@ static void aspeed_machine_witherspoon_class_init(ObjectClass *oc, amc->spi_model = "mx66l1g45g"; amc->num_cs = 2; amc->i2c_init = witherspoon_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1696,14 +1592,13 @@ static void aspeed_machine_ast2600_evb_class_init(ObjectClass *oc, amc->soc_name = "ast2600-a3"; amc->hw_strap1 = AST2600_EVB_HW_STRAP1; amc->hw_strap2 = AST2600_EVB_HW_STRAP2; - amc->fmc_model = "mx66u51235f"; - amc->spi_model = "mx66u51235f"; + amc->fmc_model = "w25q512jv"; + amc->spi_model = "w25q512jv"; amc->num_cs = 1; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON | ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->sdhci_wp_inverted = true; amc->i2c_init = ast2600_evb_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); aspeed_machine_ast2600_class_emmc_init(oc); @@ -1722,7 +1617,6 @@ static void aspeed_machine_g220a_class_init(ObjectClass *oc, const void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = g220a_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 1024 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1734,6 +1628,7 @@ static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); mc->desc = "Inspur FP5280G2 BMC (ARM1176)"; + mc->deprecation_reason = "use 'ast2500-evb' instead"; amc->soc_name = "ast2500-a1"; amc->hw_strap1 = FP5280G2_BMC_HW_STRAP1; amc->fmc_model = "n25q512a"; @@ -1741,7 +1636,6 @@ static void aspeed_machine_fp5280g2_class_init(ObjectClass *oc, amc->num_cs = 2; amc->macs_mask = ASPEED_MAC0_ON | ASPEED_MAC1_ON; amc->i2c_init = fp5280g2_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 512 * MiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1760,7 +1654,6 @@ static void aspeed_machine_rainier_class_init(ObjectClass *oc, const void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->i2c_init = rainier_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); aspeed_machine_ast2600_class_emmc_init(oc); @@ -1783,7 +1676,6 @@ static void aspeed_machine_fuji_class_init(ObjectClass *oc, const void *data) amc->macs_mask = ASPEED_MAC3_ON; amc->i2c_init = fuji_bmc_i2c_init; amc->uart_default = ASPEED_DEV_UART1; - mc->auto_create_sdcard = true; mc->default_ram_size = FUJI_BMC_RAM_SIZE; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -1805,7 +1697,6 @@ static void aspeed_machine_bletchley_class_init(ObjectClass *oc, amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON; amc->i2c_init = bletchley_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = BLETCHLEY_BMC_RAM_SIZE; aspeed_machine_class_init_cpus_defaults(mc); } @@ -1825,7 +1716,6 @@ static void aspeed_machine_catalina_class_init(ObjectClass *oc, amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON; amc->i2c_init = catalina_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = CATALINA_BMC_RAM_SIZE; aspeed_machine_class_init_cpus_defaults(mc); aspeed_machine_ast2600_class_emmc_init(oc); @@ -1893,7 +1783,6 @@ static void aspeed_machine_fby35_class_init(ObjectClass *oc, const void *data) amc->num_cs = 2; amc->macs_mask = ASPEED_MAC3_ON; amc->i2c_init = fby35_i2c_init; - mc->auto_create_sdcard = true; /* FIXME: Replace this macro with something more general */ mc->default_ram_size = FUJI_BMC_RAM_SIZE; aspeed_machine_class_init_cpus_defaults(mc); @@ -1995,7 +1884,6 @@ static void aspeed_machine_ast2700a0_evb_class_init(ObjectClass *oc, MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); - mc->alias = "ast2700-evb"; mc->desc = "Aspeed AST2700 A0 EVB (Cortex-A35)"; amc->soc_name = "ast2700-a0"; amc->hw_strap1 = AST2700_EVB_HW_STRAP1; @@ -2007,7 +1895,6 @@ static void aspeed_machine_ast2700a0_evb_class_init(ObjectClass *oc, amc->uart_default = ASPEED_DEV_UART12; amc->i2c_init = ast2700_evb_i2c_init; amc->vbootrom = true; - mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); } @@ -2018,6 +1905,7 @@ static void aspeed_machine_ast2700a1_evb_class_init(ObjectClass *oc, MachineClass *mc = MACHINE_CLASS(oc); AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); + mc->alias = "ast2700-evb"; mc->desc = "Aspeed AST2700 A1 EVB (Cortex-A35)"; amc->soc_name = "ast2700-a1"; amc->hw_strap1 = AST2700_EVB_HW_STRAP1; @@ -2029,7 +1917,6 @@ static void aspeed_machine_ast2700a1_evb_class_init(ObjectClass *oc, amc->uart_default = ASPEED_DEV_UART12; amc->i2c_init = ast2700_evb_i2c_init; amc->vbootrom = true; - mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); } @@ -2042,6 +1929,7 @@ static void aspeed_machine_qcom_dc_scm_v1_class_init(ObjectClass *oc, AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); mc->desc = "Qualcomm DC-SCM V1 BMC (Cortex A7)"; + mc->deprecation_reason = "use 'ast2600-evb' instead"; amc->soc_name = "ast2600-a3"; amc->hw_strap1 = QCOM_DC_SCM_V1_BMC_HW_STRAP1; amc->hw_strap2 = QCOM_DC_SCM_V1_BMC_HW_STRAP2; @@ -2050,7 +1938,6 @@ static void aspeed_machine_qcom_dc_scm_v1_class_init(ObjectClass *oc, amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->i2c_init = qcom_dc_scm_bmc_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); }; @@ -2062,6 +1949,7 @@ static void aspeed_machine_qcom_firework_class_init(ObjectClass *oc, AspeedMachineClass *amc = ASPEED_MACHINE_CLASS(oc); mc->desc = "Qualcomm DC-SCM V1/Firework BMC (Cortex A7)"; + mc->deprecation_reason = "use 'ast2600-evb' instead"; amc->soc_name = "ast2600-a3"; amc->hw_strap1 = QCOM_DC_SCM_V1_BMC_HW_STRAP1; amc->hw_strap2 = QCOM_DC_SCM_V1_BMC_HW_STRAP2; @@ -2070,7 +1958,6 @@ static void aspeed_machine_qcom_firework_class_init(ObjectClass *oc, amc->num_cs = 2; amc->macs_mask = ASPEED_MAC2_ON | ASPEED_MAC3_ON; amc->i2c_init = qcom_dc_scm_firework_i2c_init; - mc->auto_create_sdcard = true; mc->default_ram_size = 1 * GiB; aspeed_machine_class_init_cpus_defaults(mc); }; diff --git a/hw/arm/aspeed_ast10x0.c b/hw/arm/aspeed_ast10x0.c index e6e1ee63c1c4e..7f49c13391be0 100644 --- a/hw/arm/aspeed_ast10x0.c +++ b/hw/arm/aspeed_ast10x0.c @@ -154,7 +154,7 @@ static void aspeed_soc_ast1030_init(Object *obj) object_initialize_child(obj, "peci", &s->peci, TYPE_ASPEED_PECI); - object_initialize_child(obj, "sbc", &s->sbc, TYPE_ASPEED_SBC); + object_initialize_child(obj, "sbc", &s->sbc, TYPE_ASPEED_AST10X0_SBC); for (i = 0; i < sc->wdts_num; i++) { snprintf(typename, sizeof(typename), "aspeed.wdt-%s", socname); @@ -192,6 +192,7 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); DeviceState *armv7m; Error *err = NULL; + int uart; int i; g_autofree char *sram_name = NULL; @@ -201,17 +202,20 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) } /* General I/O memory space to catch all unimplemented device */ - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->iomem), + "aspeed.io", sc->memmap[ASPEED_DEV_IOMEM], ASPEED_SOC_IOMEM_SIZE); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->sbc_unimplemented), + aspeed_mmio_map_unimplemented(s->memory, + SYS_BUS_DEVICE(&s->sbc_unimplemented), "aspeed.sbc", sc->memmap[ASPEED_DEV_SBC], 0x40000); /* AST1030 CPU Core */ armv7m = DEVICE(&a->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 256); - qdev_prop_set_string(armv7m, "cpu-type", aspeed_soc_cpu_type(sc)); + qdev_prop_set_string(armv7m, "cpu-type", + aspeed_soc_cpu_type(sc->valid_cpu_types)); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); object_property_set_link(OBJECT(&a->armv7m), "memory", OBJECT(s->memory), &error_abort); @@ -241,7 +245,8 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->scu), 0, + sc->memmap[ASPEED_DEV_SCU]); /* I2C */ @@ -250,7 +255,8 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->i2c), 0, + sc->memmap[ASPEED_DEV_I2C]); for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { qemu_irq irq = qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[ASPEED_DEV_I2C] + i); @@ -262,7 +268,8 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->i3c), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->i3c), 0, + sc->memmap[ASPEED_DEV_I3C]); for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) { qemu_irq irq = qdev_get_gpio_in(DEVICE(&a->armv7m), sc->irqmap[ASPEED_DEV_I3C] + i); @@ -274,20 +281,21 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->peci), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->peci), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->peci), 0, sc->memmap[ASPEED_DEV_PECI]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->peci), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_PECI)); + aspeed_soc_ast1030_get_irq(s, ASPEED_DEV_PECI)); /* LPC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->lpc), 0, + sc->memmap[ASPEED_DEV_LPC]); /* Connect the LPC IRQ to the GIC. It is otherwise unused. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_LPC)); + aspeed_soc_ast1030_get_irq(s, ASPEED_DEV_LPC)); /* * On the AST1030 LPC subdevice IRQs are connected straight to the GIC. @@ -309,8 +317,13 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) sc->irqmap[ASPEED_DEV_KCS] + aspeed_lpc_kcs_4)); /* UART */ - if (!aspeed_soc_uart_realize(s, errp)) { - return; + for (i = 0, uart = sc->uarts_base; i < sc->uarts_num; i++, uart++) { + if (!aspeed_soc_uart_realize(s->memory, &s->uart[i], + sc->memmap[uart], errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + aspeed_soc_ast1030_get_irq(s, uart)); } /* Timer */ @@ -319,10 +332,10 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->timerctrl), 0, sc->memmap[ASPEED_DEV_TIMER1]); for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { - qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); + qemu_irq irq = aspeed_soc_ast1030_get_irq(s, ASPEED_DEV_TIMER1 + i); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } @@ -330,9 +343,10 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->adc), 0, + sc->memmap[ASPEED_DEV_ADC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + aspeed_soc_ast1030_get_irq(s, ASPEED_DEV_ADC)); /* FMC, The number of CS is set at the board level */ object_property_set_link(OBJECT(&s->fmc), "dram", OBJECT(&s->sram), @@ -340,11 +354,12 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 1, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->fmc), 0, + sc->memmap[ASPEED_DEV_FMC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->fmc), 1, ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + aspeed_soc_ast1030_get_irq(s, ASPEED_DEV_FMC)); /* SPI */ for (i = 0; i < sc->spis_num; i++) { @@ -353,9 +368,9 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->spi[i]), 0, sc->memmap[ASPEED_DEV_SPI1 + i]); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 1, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->spi[i]), 1, ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); } @@ -363,7 +378,8 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->sbc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sbc), 0, sc->memmap[ASPEED_DEV_SBC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sbc), 0, + sc->memmap[ASPEED_DEV_SBC]); /* HACE */ object_property_set_link(OBJECT(&s->hace), "dram", OBJECT(&s->sram), @@ -371,10 +387,10 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->hace), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->hace), 0, sc->memmap[ASPEED_DEV_HACE]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); + aspeed_soc_ast1030_get_irq(s, ASPEED_DEV_HACE)); /* Watch dog */ for (i = 0; i < sc->wdts_num; i++) { @@ -386,32 +402,38 @@ static void aspeed_soc_ast1030_realize(DeviceState *dev_soc, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->wdt[i]), 0, wdt_offset); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->wdt[i]), 0, wdt_offset); } /* GPIO */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); + aspeed_soc_ast1030_get_irq(s, ASPEED_DEV_GPIO)); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->pwm), "aspeed.pwm", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->pwm), + "aspeed.pwm", sc->memmap[ASPEED_DEV_PWM], 0x100); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->espi), "aspeed.espi", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->espi), + "aspeed.espi", sc->memmap[ASPEED_DEV_ESPI], 0x800); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->udc), "aspeed.udc", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->udc), + "aspeed.udc", sc->memmap[ASPEED_DEV_UDC], 0x1000); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->sgpiom), "aspeed.sgpiom", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->sgpiom), + "aspeed.sgpiom", sc->memmap[ASPEED_DEV_SGPIOM], 0x100); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->jtag[0]), "aspeed.jtag", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->jtag[0]), + "aspeed.jtag", sc->memmap[ASPEED_DEV_JTAG0], 0x20); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->jtag[1]), "aspeed.jtag", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->jtag[1]), + "aspeed.jtag", sc->memmap[ASPEED_DEV_JTAG1], 0x20); } @@ -441,7 +463,6 @@ static void aspeed_soc_ast1030_class_init(ObjectClass *klass, const void *data) sc->irqmap = aspeed_soc_ast1030_irqmap; sc->memmap = aspeed_soc_ast1030_memmap; sc->num_cpus = 1; - sc->get_irq = aspeed_soc_ast1030_get_irq; } static const TypeInfo aspeed_soc_ast10x0_types[] = { diff --git a/hw/arm/aspeed_ast2400.c b/hw/arm/aspeed_ast2400.c index c7b0f21887b5d..b1b826b7e0b13 100644 --- a/hw/arm/aspeed_ast2400.c +++ b/hw/arm/aspeed_ast2400.c @@ -157,7 +157,7 @@ static void aspeed_ast2400_soc_init(Object *obj) for (i = 0; i < sc->num_cpus; i++) { object_initialize_child(obj, "cpu[*]", &a->cpu[i], - aspeed_soc_cpu_type(sc)); + aspeed_soc_cpu_type(sc->valid_cpu_types)); } snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname); @@ -251,6 +251,7 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) AspeedSoCState *s = ASPEED_SOC(dev); AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); g_autofree char *sram_name = NULL; + int uart; /* Default boot region (SPI memory or ROMs) */ memory_region_init(&s->spi_boot_container, OBJECT(s), @@ -259,12 +260,14 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) &s->spi_boot_container); /* IO space */ - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->iomem), + "aspeed.io", sc->memmap[ASPEED_DEV_IOMEM], ASPEED_SOC_IOMEM_SIZE); /* Video engine stub */ - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->video), "aspeed.video", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->video), + "aspeed.video", sc->memmap[ASPEED_DEV_VIDEO], 0x1000); /* CPU */ @@ -289,13 +292,15 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->scu), 0, + sc->memmap[ASPEED_DEV_SCU]); /* VIC */ if (!sysbus_realize(SYS_BUS_DEVICE(&a->vic), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->vic), 0, sc->memmap[ASPEED_DEV_VIC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->vic), 0, + sc->memmap[ASPEED_DEV_VIC]); sysbus_connect_irq(SYS_BUS_DEVICE(&a->vic), 0, qdev_get_gpio_in(DEVICE(&a->cpu), ARM_CPU_IRQ)); sysbus_connect_irq(SYS_BUS_DEVICE(&a->vic), 1, @@ -305,9 +310,10 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->rtc), 0, + sc->memmap[ASPEED_DEV_RTC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_RTC)); /* Timer */ object_property_set_link(OBJECT(&s->timerctrl), "scu", OBJECT(&s->scu), @@ -315,10 +321,10 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->timerctrl), 0, sc->memmap[ASPEED_DEV_TIMER1]); for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { - qemu_irq irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); + qemu_irq irq = aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_TIMER1 + i); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } @@ -326,13 +332,19 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->adc), 0, + sc->memmap[ASPEED_DEV_ADC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_ADC)); /* UART */ - if (!aspeed_soc_uart_realize(s, errp)) { - return; + for (i = 0, uart = sc->uarts_base; i < sc->uarts_num; i++, uart++) { + if (!aspeed_soc_uart_realize(s->memory, &s->uart[i], + sc->memmap[uart], errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + aspeed_soc_ast2400_get_irq(s, uart)); } /* I2C */ @@ -341,18 +353,19 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->i2c), 0, + sc->memmap[ASPEED_DEV_I2C]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->i2c), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_I2C)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_I2C)); /* PECI */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->peci), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->peci), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->peci), 0, sc->memmap[ASPEED_DEV_PECI]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->peci), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_PECI)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_PECI)); /* FMC, The number of CS is set at the board level */ object_property_set_link(OBJECT(&s->fmc), "dram", OBJECT(s->dram_mr), @@ -360,11 +373,12 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 1, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->fmc), 0, + sc->memmap[ASPEED_DEV_FMC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->fmc), 1, ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_FMC)); /* Set up an alias on the FMC CE0 region (boot default) */ MemoryRegion *fmc0_mmio = &s->fmc.flashes[0].mmio; @@ -377,9 +391,9 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->spi[i]), 0, sc->memmap[ASPEED_DEV_SPI1 + i]); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 1, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->spi[i]), 1, ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); } @@ -388,17 +402,18 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ehci[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->ehci[i]), 0, sc->memmap[ASPEED_DEV_EHCI1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i)); + aspeed_soc_ast2400_get_irq(s, + ASPEED_DEV_EHCI1 + i)); } /* SDMC - SDRAM Memory Controller */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdmc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdmc), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sdmc), 0, sc->memmap[ASPEED_DEV_SDMC]); /* Watch dog */ @@ -411,7 +426,7 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->wdt[i]), 0, wdt_offset); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->wdt[i]), 0, wdt_offset); } /* RAM */ @@ -426,48 +441,49 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ftgmac100[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, sc->memmap[ASPEED_DEV_ETH1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_ETH1 + i)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_ETH1 + i)); } /* XDMA */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->xdma), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->xdma), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->xdma), 0, sc->memmap[ASPEED_DEV_XDMA]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->xdma), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_XDMA)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_XDMA)); /* GPIO */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_GPIO)); /* SDHCI */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdhci), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sdhci), 0, sc->memmap[ASPEED_DEV_SDHCI]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_SDHCI)); /* LPC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->lpc), 0, + sc->memmap[ASPEED_DEV_LPC]); /* Connect the LPC IRQ to the VIC */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_LPC)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_LPC)); /* * On the AST2400 and AST2500 the one LPC IRQ is shared between all of the @@ -496,10 +512,10 @@ static void aspeed_ast2400_soc_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->hace), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->hace), 0, sc->memmap[ASPEED_DEV_HACE]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); + aspeed_soc_ast2400_get_irq(s, ASPEED_DEV_HACE)); } static void aspeed_soc_ast2400_class_init(ObjectClass *oc, const void *data) @@ -527,7 +543,6 @@ static void aspeed_soc_ast2400_class_init(ObjectClass *oc, const void *data) sc->irqmap = aspeed_soc_ast2400_irqmap; sc->memmap = aspeed_soc_ast2400_memmap; sc->num_cpus = 1; - sc->get_irq = aspeed_soc_ast2400_get_irq; } static void aspeed_soc_ast2500_class_init(ObjectClass *oc, const void *data) @@ -555,7 +570,6 @@ static void aspeed_soc_ast2500_class_init(ObjectClass *oc, const void *data) sc->irqmap = aspeed_soc_ast2500_irqmap; sc->memmap = aspeed_soc_ast2500_memmap; sc->num_cpus = 1; - sc->get_irq = aspeed_soc_ast2400_get_irq; } static const TypeInfo aspeed_soc_ast2400_types[] = { diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index d12707f0abee0..498d1ecc078b7 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -48,11 +48,13 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = { [ASPEED_DEV_XDMA] = 0x1E6E7000, [ASPEED_DEV_ADC] = 0x1E6E9000, [ASPEED_DEV_DP] = 0x1E6EB000, + [ASPEED_DEV_PCIE_PHY1] = 0x1E6ED200, [ASPEED_DEV_SBC] = 0x1E6F2000, [ASPEED_DEV_EMMC_BC] = 0x1E6f5000, [ASPEED_DEV_VIDEO] = 0x1E700000, [ASPEED_DEV_SDHCI] = 0x1E740000, [ASPEED_DEV_EMMC] = 0x1E750000, + [ASPEED_DEV_PCIE0] = 0x1E770000, [ASPEED_DEV_GPIO] = 0x1E780000, [ASPEED_DEV_GPIO_1_8V] = 0x1E780800, [ASPEED_DEV_RTC] = 0x1E781000, @@ -79,6 +81,7 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = { [ASPEED_DEV_FSI1] = 0x1E79B000, [ASPEED_DEV_FSI2] = 0x1E79B100, [ASPEED_DEV_I3C] = 0x1E7A0000, + [ASPEED_DEV_PCIE_MMIO1] = 0x70000000, [ASPEED_DEV_SDRAM] = 0x80000000, }; @@ -127,6 +130,7 @@ static const int aspeed_soc_ast2600_irqmap[] = { [ASPEED_DEV_LPC] = 35, [ASPEED_DEV_IBT] = 143, [ASPEED_DEV_I2C] = 110, /* 110 -> 125 */ + [ASPEED_DEV_PCIE0] = 168, [ASPEED_DEV_PECI] = 38, [ASPEED_DEV_ETH1] = 2, [ASPEED_DEV_ETH2] = 3, @@ -163,7 +167,7 @@ static void aspeed_soc_ast2600_init(Object *obj) for (i = 0; i < sc->num_cpus; i++) { object_initialize_child(obj, "cpu[*]", &a->cpu[i], - aspeed_soc_cpu_type(sc)); + aspeed_soc_cpu_type(sc->valid_cpu_types)); } snprintf(typename, sizeof(typename), "aspeed.scu-%s", socname); @@ -191,6 +195,10 @@ static void aspeed_soc_ast2600_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.i2c-%s", socname); object_initialize_child(obj, "i2c", &s->i2c, typename); + object_initialize_child(obj, "pcie-cfg", &s->pcie[0], TYPE_ASPEED_PCIE_CFG); + object_initialize_child(obj, "pcie-phy[*]", &s->pcie_phy[0], + TYPE_ASPEED_PCIE_PHY); + object_initialize_child(obj, "peci", &s->peci, TYPE_ASPEED_PECI); snprintf(typename, sizeof(typename), "aspeed.fmc-%s", socname); @@ -261,7 +269,7 @@ static void aspeed_soc_ast2600_init(Object *obj) object_initialize_child(obj, "i3c", &s->i3c, TYPE_ASPEED_I3C); - object_initialize_child(obj, "sbc", &s->sbc, TYPE_ASPEED_SBC); + object_initialize_child(obj, "sbc", &s->sbc, TYPE_ASPEED_AST2600_SBC); object_initialize_child(obj, "iomem", &s->iomem, TYPE_UNIMPLEMENTED_DEVICE); object_initialize_child(obj, "video", &s->video, TYPE_UNIMPLEMENTED_DEVICE); @@ -285,6 +293,67 @@ static uint64_t aspeed_calc_affinity(int cpu) return (0xf << ARM_AFF1_SHIFT) | cpu; } +/* + * PCIe Root Complex (RC) + * + * H2X register space (single block 0x00-0xFF): + * 0x00-0x7F : shared by RC_L (PCIe0) and RC_H (PCIe1) + * 0x80-0xBF : RC_L only + * 0xC0-0xFF : RC_H only + * + * Model scope / limitations: + * - Firmware supports RC_H only; this QEMU model does not support RC_L. + * - RC_H uses PHY1 and the MMIO window [0x70000000, 0x80000000] + * (aka MMIO1). + * + * Indexing convention (this model): + * - Expose a single logical instance at index 0. + * - pcie[0] -> hardware RC_H (PCIe1) + * - phy[0] -> hardware PHY1 + * - mmio.0 -> guest address range MMIO1: 0x70000000-0x80000000 + * - RC_L / PCIe0 is not created and mapped. + */ +static bool aspeed_soc_ast2600_pcie_realize(DeviceState *dev, Error **errp) +{ + Aspeed2600SoCState *a = ASPEED2600_SOC(dev); + AspeedSoCState *s = ASPEED_SOC(dev); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + MemoryRegion *mmio_mr = NULL; + qemu_irq irq; + + if (!sysbus_realize(SYS_BUS_DEVICE(&s->pcie_phy[0]), errp)) { + return false; + } + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->pcie_phy[0]), 0, + sc->memmap[ASPEED_DEV_PCIE_PHY1]); + + object_property_set_int(OBJECT(&s->pcie[0]), "dram-base", + sc->memmap[ASPEED_DEV_SDRAM], + &error_abort); + object_property_set_link(OBJECT(&s->pcie[0]), "dram", OBJECT(s->dram_mr), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->pcie[0]), errp)) { + return false; + } + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->pcie[0]), 0, + sc->memmap[ASPEED_DEV_PCIE0]); + + irq = qdev_get_gpio_in(DEVICE(&a->a7mpcore), + sc->irqmap[ASPEED_DEV_PCIE0]); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie[0].rc), 0, irq); + + mmio_mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->pcie[0].rc), 1); + memory_region_init_alias(&s->pcie_mmio_alias[0], OBJECT(&s->pcie[0].rc), + "aspeed.pcie-mmio", mmio_mr, + sc->memmap[ASPEED_DEV_PCIE_MMIO1], + 0x10000000); + memory_region_add_subregion(s->memory, + sc->memmap[ASPEED_DEV_PCIE_MMIO1], + &s->pcie_mmio_alias[0]); + + return true; +} + static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) { int i; @@ -293,6 +362,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); qemu_irq irq; g_autofree char *sram_name = NULL; + int uart; /* Default boot region (SPI memory or ROMs) */ memory_region_init(&s->spi_boot_container, OBJECT(s), @@ -301,16 +371,19 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) &s->spi_boot_container); /* IO space */ - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->iomem), + "aspeed.io", sc->memmap[ASPEED_DEV_IOMEM], ASPEED_SOC_IOMEM_SIZE); /* Video engine stub */ - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->video), "aspeed.video", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->video), + "aspeed.video", sc->memmap[ASPEED_DEV_VIDEO], 0x1000); /* eMMC Boot Controller stub */ - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->emmc_boot_controller), + aspeed_mmio_map_unimplemented(s->memory, + SYS_BUS_DEVICE(&s->emmc_boot_controller), "aspeed.emmc-boot-controller", sc->memmap[ASPEED_DEV_EMMC_BC], 0x1000); @@ -345,7 +418,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) &error_abort); sysbus_realize(SYS_BUS_DEVICE(&a->a7mpcore), &error_abort); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->a7mpcore), 0, ASPEED_A7MPCORE_ADDR); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->a7mpcore), 0, + ASPEED_A7MPCORE_ADDR); for (i = 0; i < sc->num_cpus; i++) { SysBusDevice *sbd = SYS_BUS_DEVICE(&a->a7mpcore); @@ -371,7 +445,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) sc->memmap[ASPEED_DEV_SRAM], &s->sram); /* DPMCU */ - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->dpmcu), "aspeed.dpmcu", + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->dpmcu), + "aspeed.dpmcu", sc->memmap[ASPEED_DEV_DPMCU], ASPEED_SOC_DPMCU_SIZE); @@ -379,15 +454,17 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->scu), 0, + sc->memmap[ASPEED_DEV_SCU]); /* RTC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->rtc), 0, + sc->memmap[ASPEED_DEV_RTC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_RTC)); /* Timer */ object_property_set_link(OBJECT(&s->timerctrl), "scu", OBJECT(&s->scu), @@ -395,10 +472,10 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->timerctrl), 0, sc->memmap[ASPEED_DEV_TIMER1]); for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { - irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); + irq = aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_TIMER1 + i); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } @@ -406,13 +483,19 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->adc), 0, + sc->memmap[ASPEED_DEV_ADC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_ADC)); /* UART */ - if (!aspeed_soc_uart_realize(s, errp)) { - return; + for (i = 0, uart = sc->uarts_base; i < sc->uarts_num; i++, uart++) { + if (!aspeed_soc_uart_realize(s->memory, &s->uart[i], + sc->memmap[uart], errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + aspeed_soc_ast2600_get_irq(s, uart)); } /* I2C */ @@ -421,7 +504,8 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->i2c), 0, + sc->memmap[ASPEED_DEV_I2C]); for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { irq = qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[ASPEED_DEV_I2C] + i); @@ -433,10 +517,15 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->peci), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->peci), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->peci), 0, sc->memmap[ASPEED_DEV_PECI]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->peci), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_PECI)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_PECI)); + + /* PCIe Root Complex (RC) */ + if (!aspeed_soc_ast2600_pcie_realize(dev, errp)) { + return; + } /* FMC, The number of CS is set at the board level */ object_property_set_link(OBJECT(&s->fmc), "dram", OBJECT(s->dram_mr), @@ -444,11 +533,12 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 1, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->fmc), 0, + sc->memmap[ASPEED_DEV_FMC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->fmc), 1, ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_FMC)); /* Set up an alias on the FMC CE0 region (boot default) */ MemoryRegion *fmc0_mmio = &s->fmc.flashes[0].mmio; @@ -463,9 +553,9 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->spi[i]), 0, sc->memmap[ASPEED_DEV_SPI1 + i]); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 1, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->spi[i]), 1, ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); } @@ -474,17 +564,18 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ehci[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->ehci[i]), 0, sc->memmap[ASPEED_DEV_EHCI1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i)); + aspeed_soc_ast2600_get_irq(s, + ASPEED_DEV_EHCI1 + i)); } /* SDMC - SDRAM Memory Controller */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdmc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdmc), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sdmc), 0, sc->memmap[ASPEED_DEV_SDMC]); /* Watch dog */ @@ -497,7 +588,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->wdt[i]), 0, wdt_offset); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->wdt[i]), 0, wdt_offset); } /* RAM */ @@ -512,10 +603,10 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ftgmac100[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, sc->memmap[ASPEED_DEV_ETH1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_ETH1 + i)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_ETH1 + i)); object_property_set_link(OBJECT(&s->mii[i]), "nic", OBJECT(&s->ftgmac100[i]), &error_abort); @@ -523,7 +614,7 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->mii[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->mii[i]), 0, sc->memmap[ASPEED_DEV_MII1 + i]); } @@ -531,55 +622,56 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->xdma), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->xdma), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->xdma), 0, sc->memmap[ASPEED_DEV_XDMA]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->xdma), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_XDMA)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_XDMA)); /* GPIO */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_GPIO)); if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio_1_8v), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio_1_8v), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->gpio_1_8v), 0, sc->memmap[ASPEED_DEV_GPIO_1_8V]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio_1_8v), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_GPIO_1_8V)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_GPIO_1_8V)); /* SDHCI */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdhci), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sdhci), 0, sc->memmap[ASPEED_DEV_SDHCI]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_SDHCI)); /* eMMC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->emmc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->emmc), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->emmc), 0, sc->memmap[ASPEED_DEV_EMMC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->emmc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_EMMC)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_EMMC)); /* LPC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->lpc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->lpc), 0, sc->memmap[ASPEED_DEV_LPC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->lpc), 0, + sc->memmap[ASPEED_DEV_LPC]); /* Connect the LPC IRQ to the GIC. It is otherwise unused. */ sysbus_connect_irq(SYS_BUS_DEVICE(&s->lpc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_LPC)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_LPC)); /* * On the AST2600 LPC subdevice IRQs are connected straight to the GIC. @@ -611,16 +703,17 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->hace), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->hace), 0, sc->memmap[ASPEED_DEV_HACE]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_HACE)); /* I3C */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->i3c), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i3c), 0, sc->memmap[ASPEED_DEV_I3C]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->i3c), 0, + sc->memmap[ASPEED_DEV_I3C]); for (i = 0; i < ASPEED_I3C_NR_DEVICES; i++) { irq = qdev_get_gpio_in(DEVICE(&a->a7mpcore), sc->irqmap[ASPEED_DEV_I3C] + i); @@ -632,17 +725,18 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->sbc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sbc), 0, sc->memmap[ASPEED_DEV_SBC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sbc), 0, + sc->memmap[ASPEED_DEV_SBC]); /* FSI */ for (i = 0; i < ASPEED_FSI_NUM; i++) { if (!sysbus_realize(SYS_BUS_DEVICE(&s->fsi[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fsi[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->fsi[i]), 0, sc->memmap[ASPEED_DEV_FSI1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fsi[i]), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_FSI1 + i)); + aspeed_soc_ast2600_get_irq(s, ASPEED_DEV_FSI1 + i)); } } @@ -678,7 +772,6 @@ static void aspeed_soc_ast2600_class_init(ObjectClass *oc, const void *data) sc->irqmap = aspeed_soc_ast2600_irqmap; sc->memmap = aspeed_soc_ast2600_memmap; sc->num_cpus = 2; - sc->get_irq = aspeed_soc_ast2600_get_irq; sc->boot_from_emmc = aspeed_soc_ast2600_boot_from_emmc; } diff --git a/hw/arm/aspeed_ast27x0-fc.c b/hw/arm/aspeed_ast27x0-fc.c index 7087be4288bbc..580ac5f7a1249 100644 --- a/hw/arm/aspeed_ast27x0-fc.c +++ b/hw/arm/aspeed_ast27x0-fc.c @@ -21,7 +21,7 @@ #include "hw/loader.h" #include "hw/arm/boot.h" #include "hw/block/flash.h" - +#include "hw/arm/aspeed_coprocessor.h" #define TYPE_AST2700A1FC MACHINE_TYPE_NAME("ast2700fc") OBJECT_DECLARE_SIMPLE_TYPE(Ast2700FCState, AST2700A1FC); @@ -35,6 +35,7 @@ struct Ast2700FCState { MemoryRegion ca35_memory; MemoryRegion ca35_dram; + MemoryRegion ca35_boot_rom; MemoryRegion ssp_memory; MemoryRegion tsp_memory; @@ -42,10 +43,8 @@ struct Ast2700FCState { Clock *tsp_sysclk; Aspeed27x0SoCState ca35; - Aspeed27x0SSPSoCState ssp; - Aspeed27x0TSPSoCState tsp; - - bool mmio_exec; + Aspeed27x0CoprocessorState ssp; + Aspeed27x0CoprocessorState tsp; }; #define AST2700FC_BMC_RAM_SIZE (1 * GiB) @@ -56,11 +55,15 @@ struct Ast2700FCState { #define AST2700FC_FMC_MODEL "w25q01jvq" #define AST2700FC_SPI_MODEL "w25q512jv" -static void ast2700fc_ca35_init(MachineState *machine) +static bool ast2700fc_ca35_init(MachineState *machine, Error **errp) { Ast2700FCState *s = AST2700A1FC(machine); AspeedSoCState *soc; AspeedSoCClass *sc; + const char *bios_name = NULL; + BlockBackend *fmc0 = NULL; + DeviceState *dev = NULL; + uint64_t rom_size; object_initialize_child(OBJECT(s), "ca35", &s->ca35, "ast2700-a1"); soc = ASPEED_SOC(&s->ca35); @@ -71,22 +74,15 @@ static void ast2700fc_ca35_init(MachineState *machine) memory_region_add_subregion(get_system_memory(), 0, &s->ca35_memory); if (!memory_region_init_ram(&s->ca35_dram, OBJECT(&s->ca35), "ca35-dram", - AST2700FC_BMC_RAM_SIZE, &error_abort)) { - return; - } - if (!object_property_set_link(OBJECT(&s->ca35), "memory", - OBJECT(&s->ca35_memory), - &error_abort)) { - return; - }; - if (!object_property_set_link(OBJECT(&s->ca35), "dram", - OBJECT(&s->ca35_dram), &error_abort)) { - return; - } - if (!object_property_set_int(OBJECT(&s->ca35), "ram-size", - AST2700FC_BMC_RAM_SIZE, &error_abort)) { - return; + AST2700FC_BMC_RAM_SIZE, errp)) { + return false; } + object_property_set_link(OBJECT(&s->ca35), "memory", + OBJECT(&s->ca35_memory), &error_abort); + object_property_set_link(OBJECT(&s->ca35), "dram", OBJECT(&s->ca35_dram), + &error_abort); + object_property_set_int(OBJECT(&s->ca35), "ram-size", + AST2700FC_BMC_RAM_SIZE, &error_abort); for (int i = 0; i < sc->macs_num; i++) { if (!qemu_configure_nic_device(DEVICE(&soc->ftgmac100[i]), @@ -94,17 +90,18 @@ static void ast2700fc_ca35_init(MachineState *machine) break; } } - if (!object_property_set_int(OBJECT(&s->ca35), "hw-strap1", - AST2700FC_HW_STRAP1, &error_abort)) { - return; - } - if (!object_property_set_int(OBJECT(&s->ca35), "hw-strap2", - AST2700FC_HW_STRAP2, &error_abort)) { - return; - } - aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART12, serial_hd(0)); - if (!qdev_realize(DEVICE(&s->ca35), NULL, &error_abort)) { - return; + object_property_set_int(OBJECT(&s->ca35), "hw-strap1", + AST2700FC_HW_STRAP1, &error_abort); + object_property_set_int(OBJECT(&s->ca35), "hw-strap2", + AST2700FC_HW_STRAP2, &error_abort); + aspeed_soc_uart_set_chr(soc->uart, ASPEED_DEV_UART12, sc->uarts_base, + sc->uarts_num, serial_hd(0)); + aspeed_soc_uart_set_chr(soc->uart, ASPEED_DEV_UART4, sc->uarts_base, + sc->uarts_num, serial_hd(1)); + aspeed_soc_uart_set_chr(soc->uart, ASPEED_DEV_UART7, sc->uarts_base, + sc->uarts_num, serial_hd(2)); + if (!qdev_realize(DEVICE(&s->ca35), NULL, errp)) { + return false; } /* @@ -118,69 +115,98 @@ static void ast2700fc_ca35_init(MachineState *machine) ast2700fc_board_info.ram_size = machine->ram_size; ast2700fc_board_info.loader_start = sc->memmap[ASPEED_DEV_SDRAM]; + dev = ssi_get_cs(soc->fmc.spi, 0); + fmc0 = dev ? m25p80_get_blk(dev) : NULL; + + if (fmc0) { + rom_size = memory_region_size(&soc->spi_boot); + aspeed_install_boot_rom(soc, fmc0, &s->ca35_boot_rom, rom_size); + } + + /* VBOOTROM */ + bios_name = machine->firmware ?: VBOOTROM_FILE_NAME; + aspeed_load_vbootrom(soc, bios_name, errp); + arm_load_kernel(ARM_CPU(first_cpu), machine, &ast2700fc_board_info); + + return true; } -static void ast2700fc_ssp_init(MachineState *machine) +static bool ast2700fc_ssp_init(MachineState *machine, Error **errp) { - AspeedSoCState *soc; Ast2700FCState *s = AST2700A1FC(machine); + AspeedSoCState *psp = ASPEED_SOC(&s->ca35); + s->ssp_sysclk = clock_new(OBJECT(s), "SSP_SYSCLK"); clock_set_hz(s->ssp_sysclk, 200000000ULL); - object_initialize_child(OBJECT(s), "ssp", &s->ssp, TYPE_ASPEED27X0SSP_SOC); + object_initialize_child(OBJECT(s), "ssp", &s->ssp, + TYPE_ASPEED27X0SSP_COPROCESSOR); memory_region_init(&s->ssp_memory, OBJECT(&s->ssp), "ssp-memory", UINT64_MAX); qdev_connect_clock_in(DEVICE(&s->ssp), "sysclk", s->ssp_sysclk); - if (!object_property_set_link(OBJECT(&s->ssp), "memory", - OBJECT(&s->ssp_memory), &error_abort)) { - return; + object_property_set_link(OBJECT(&s->ssp), "memory", + OBJECT(&s->ssp_memory), &error_abort); + + object_property_set_link(OBJECT(&s->ssp), "uart", + OBJECT(&psp->uart[4]), &error_abort); + object_property_set_int(OBJECT(&s->ssp), "uart-dev", ASPEED_DEV_UART4, + &error_abort); + object_property_set_link(OBJECT(&s->ssp), "sram", + OBJECT(&psp->sram), &error_abort); + object_property_set_link(OBJECT(&s->ssp), "scu", + OBJECT(&psp->scu), &error_abort); + if (!qdev_realize(DEVICE(&s->ssp), NULL, errp)) { + return false; } - soc = ASPEED_SOC(&s->ssp); - aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART4, serial_hd(1)); - if (!qdev_realize(DEVICE(&s->ssp), NULL, &error_abort)) { - return; - } + return true; } -static void ast2700fc_tsp_init(MachineState *machine) +static bool ast2700fc_tsp_init(MachineState *machine, Error **errp) { - AspeedSoCState *soc; Ast2700FCState *s = AST2700A1FC(machine); + AspeedSoCState *psp = ASPEED_SOC(&s->ca35); + s->tsp_sysclk = clock_new(OBJECT(s), "TSP_SYSCLK"); clock_set_hz(s->tsp_sysclk, 200000000ULL); - object_initialize_child(OBJECT(s), "tsp", &s->tsp, TYPE_ASPEED27X0TSP_SOC); + object_initialize_child(OBJECT(s), "tsp", &s->tsp, + TYPE_ASPEED27X0TSP_COPROCESSOR); memory_region_init(&s->tsp_memory, OBJECT(&s->tsp), "tsp-memory", UINT64_MAX); qdev_connect_clock_in(DEVICE(&s->tsp), "sysclk", s->tsp_sysclk); - if (!object_property_set_link(OBJECT(&s->tsp), "memory", - OBJECT(&s->tsp_memory), &error_abort)) { - return; + object_property_set_link(OBJECT(&s->tsp), "memory", + OBJECT(&s->tsp_memory), &error_abort); + + object_property_set_link(OBJECT(&s->tsp), "uart", + OBJECT(&psp->uart[7]), &error_abort); + object_property_set_int(OBJECT(&s->tsp), "uart-dev", ASPEED_DEV_UART7, + &error_abort); + object_property_set_link(OBJECT(&s->tsp), "sram", + OBJECT(&psp->sram), &error_abort); + object_property_set_link(OBJECT(&s->tsp), "scu", + OBJECT(&psp->scu), &error_abort); + if (!qdev_realize(DEVICE(&s->tsp), NULL, errp)) { + return false; } - soc = ASPEED_SOC(&s->tsp); - aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART7, serial_hd(2)); - if (!qdev_realize(DEVICE(&s->tsp), NULL, &error_abort)) { - return; - } + return true; } static void ast2700fc_init(MachineState *machine) { - ast2700fc_ca35_init(machine); - ast2700fc_ssp_init(machine); - ast2700fc_tsp_init(machine); + ast2700fc_ca35_init(machine, &error_abort); + ast2700fc_ssp_init(machine, &error_abort); + ast2700fc_tsp_init(machine, &error_abort); } static void ast2700fc_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); - mc->alias = "ast2700fc"; mc->desc = "ast2700 full core support"; mc->init = ast2700fc_init; mc->no_floppy = 1; diff --git a/hw/arm/aspeed_ast27x0-ssp.c b/hw/arm/aspeed_ast27x0-ssp.c index 80ec5996c1d1d..d12a9b8459c23 100644 --- a/hw/arm/aspeed_ast27x0-ssp.c +++ b/hw/arm/aspeed_ast27x0-ssp.c @@ -1,5 +1,5 @@ /* - * ASPEED Ast27x0 SSP SoC + * ASPEED Ast27x0 SSP Coprocessor * * Copyright (C) 2025 ASPEED Technology Inc. * @@ -14,11 +14,13 @@ #include "hw/qdev-clock.h" #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" +#include "hw/arm/aspeed_coprocessor.h" -#define AST2700_SSP_RAM_SIZE (32 * MiB) +#define AST2700_SSP_SDRAM_SIZE (512 * MiB) static const hwaddr aspeed_soc_ast27x0ssp_memmap[] = { - [ASPEED_DEV_SRAM] = 0x00000000, + [ASPEED_DEV_SDRAM] = 0x00000000, + [ASPEED_DEV_SRAM] = 0x70000000, [ASPEED_DEV_INTC] = 0x72100000, [ASPEED_DEV_SCU] = 0x72C02000, [ASPEED_DEV_SCUIO] = 0x74C02000, @@ -104,10 +106,11 @@ static struct nvic_intc_irq_info ast2700_ssp_intcmap[] = { {136, 0, 9, NULL}, }; -static qemu_irq aspeed_soc_ast27x0ssp_get_irq(AspeedSoCState *s, int dev) +static qemu_irq aspeed_soc_ast27x0ssp_get_irq(AspeedCoprocessorState *s, + int dev) { - Aspeed27x0SSPSoCState *a = ASPEED27X0SSP_SOC(s); - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + Aspeed27x0CoprocessorState *a = ASPEED27X0SSP_COPROCESSOR(s); + AspeedCoprocessorClass *sc = ASPEED_COPROCESSOR_GET_CLASS(s); int or_idx; int idx; @@ -128,19 +131,11 @@ static qemu_irq aspeed_soc_ast27x0ssp_get_irq(AspeedSoCState *s, int dev) static void aspeed_soc_ast27x0ssp_init(Object *obj) { - Aspeed27x0SSPSoCState *a = ASPEED27X0SSP_SOC(obj); - AspeedSoCState *s = ASPEED_SOC(obj); - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - int i; + Aspeed27x0CoprocessorState *a = ASPEED27X0SSP_COPROCESSOR(obj); + AspeedCoprocessorState *s = ASPEED_COPROCESSOR(obj); object_initialize_child(obj, "armv7m", &a->armv7m, TYPE_ARMV7M); - object_initialize_child(obj, "scu", &s->scu, TYPE_ASPEED_2700_SCU); s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); - qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", sc->silicon_rev); - - for (i = 0; i < sc->uarts_num; i++) { - object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_SERIAL_MM); - } object_initialize_child(obj, "intc0", &a->intc[0], TYPE_ASPEED_2700SSP_INTC); @@ -159,11 +154,11 @@ static void aspeed_soc_ast27x0ssp_init(Object *obj) static void aspeed_soc_ast27x0ssp_realize(DeviceState *dev_soc, Error **errp) { - Aspeed27x0SSPSoCState *a = ASPEED27X0SSP_SOC(dev_soc); - AspeedSoCState *s = ASPEED_SOC(dev_soc); - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + Aspeed27x0CoprocessorState *a = ASPEED27X0SSP_COPROCESSOR(dev_soc); + AspeedCoprocessorState *s = ASPEED_COPROCESSOR(dev_soc); + AspeedCoprocessorClass *sc = ASPEED_COPROCESSOR_GET_CLASS(s); DeviceState *armv7m; - g_autofree char *sram_name = NULL; + g_autofree char *sdram_name = NULL; int i; if (!clock_has_source(s->sysclk)) { @@ -174,35 +169,43 @@ static void aspeed_soc_ast27x0ssp_realize(DeviceState *dev_soc, Error **errp) /* AST27X0 SSP Core */ armv7m = DEVICE(&a->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 256); - qdev_prop_set_string(armv7m, "cpu-type", aspeed_soc_cpu_type(sc)); + qdev_prop_set_string(armv7m, "cpu-type", + aspeed_soc_cpu_type(sc->valid_cpu_types)); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); object_property_set_link(OBJECT(&a->armv7m), "memory", OBJECT(s->memory), &error_abort); sysbus_realize(SYS_BUS_DEVICE(&a->armv7m), &error_abort); - sram_name = g_strdup_printf("aspeed.dram.%d", - CPU(a->armv7m.cpu)->cpu_index); - - if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, - errp)) { + /* SDRAM */ + sdram_name = g_strdup_printf("aspeed.sdram.%d", + CPU(a->armv7m.cpu)->cpu_index); + if (!memory_region_init_ram(&s->sdram, OBJECT(s), sdram_name, + AST2700_SSP_SDRAM_SIZE, errp)) { return; } memory_region_add_subregion(s->memory, - sc->memmap[ASPEED_DEV_SRAM], - &s->sram); + sc->memmap[ASPEED_DEV_SDRAM], + &s->sdram); + + /* SRAM */ + memory_region_init_alias(&s->sram_alias, OBJECT(s), "sram.alias", + s->sram, 0, memory_region_size(s->sram)); + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SRAM], + &s->sram_alias); /* SCU */ - if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { - return; - } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + memory_region_init_alias(&s->scu_alias, OBJECT(s), "scu.alias", + &s->scu->iomem, 0, + memory_region_size(&s->scu->iomem)); + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SCU], + &s->scu_alias); /* INTC */ if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[0]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[0]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->intc[0]), 0, sc->memmap[ASPEED_DEV_INTC]); /* INTCIO */ @@ -210,7 +213,7 @@ static void aspeed_soc_ast27x0ssp_realize(DeviceState *dev_soc, Error **errp) return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[1]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->intc[1]), 0, sc->memmap[ASPEED_DEV_INTCIO]); /* irq source orgates -> INTC0 */ @@ -234,58 +237,58 @@ static void aspeed_soc_ast27x0ssp_realize(DeviceState *dev_soc, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[1]), i, qdev_get_gpio_in(DEVICE(&a->intc[0].orgates[0]), i)); } - /* UART */ - if (!aspeed_soc_uart_realize(s, errp)) { - return; - } - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->timerctrl), + /* UART */ + memory_region_init_alias(&s->uart_alias, OBJECT(s), "uart.alias", + &s->uart->serial.io, 0, + memory_region_size(&s->uart->serial.io)); + memory_region_add_subregion(s->memory, sc->memmap[s->uart_dev], + &s->uart_alias); + /* + * Redirect the UART interrupt to the NVIC, replacing the default routing + * to the PSP's GIC. + */ + sysbus_connect_irq(SYS_BUS_DEVICE(s->uart), 0, + aspeed_soc_ast27x0ssp_get_irq(s, s->uart_dev)); + + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->timerctrl), "aspeed.timerctrl", sc->memmap[ASPEED_DEV_TIMER1], 0x200); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[0]), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&a->ipc[0]), "aspeed.ipc0", sc->memmap[ASPEED_DEV_IPC0], 0x1000); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[1]), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&a->ipc[1]), "aspeed.ipc1", sc->memmap[ASPEED_DEV_IPC1], 0x1000); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->scuio), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&a->scuio), "aspeed.scuio", sc->memmap[ASPEED_DEV_SCUIO], 0x1000); } -static void aspeed_soc_ast27x0ssp_class_init(ObjectClass *klass, const void *data) +static void aspeed_soc_ast27x0ssp_class_init(ObjectClass *klass, + const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-m4"), /* TODO: cortex-m4f */ NULL }; DeviceClass *dc = DEVICE_CLASS(klass); - AspeedSoCClass *sc = ASPEED_SOC_CLASS(dc); + AspeedCoprocessorClass *sc = ASPEED_COPROCESSOR_CLASS(dc); - /* Reason: The Aspeed SoC can only be instantiated from a board */ + /* Reason: The Aspeed Coprocessor can only be instantiated from a board */ dc->user_creatable = false; dc->realize = aspeed_soc_ast27x0ssp_realize; sc->valid_cpu_types = valid_cpu_types; - sc->silicon_rev = AST2700_A1_SILICON_REV; - sc->sram_size = AST2700_SSP_RAM_SIZE; - sc->spis_num = 0; - sc->ehcis_num = 0; - sc->wdts_num = 0; - sc->macs_num = 0; - sc->uarts_num = 13; - sc->uarts_base = ASPEED_DEV_UART0; sc->irqmap = aspeed_soc_ast27x0ssp_irqmap; sc->memmap = aspeed_soc_ast27x0ssp_memmap; - sc->num_cpus = 1; - sc->get_irq = aspeed_soc_ast27x0ssp_get_irq; } static const TypeInfo aspeed_soc_ast27x0ssp_types[] = { { - .name = TYPE_ASPEED27X0SSP_SOC, - .parent = TYPE_ASPEED_SOC, - .instance_size = sizeof(Aspeed27x0SSPSoCState), + .name = TYPE_ASPEED27X0SSP_COPROCESSOR, + .parent = TYPE_ASPEED_COPROCESSOR, + .instance_size = sizeof(Aspeed27x0CoprocessorState), .instance_init = aspeed_soc_ast27x0ssp_init, .class_init = aspeed_soc_ast27x0ssp_class_init, }, diff --git a/hw/arm/aspeed_ast27x0-tsp.c b/hw/arm/aspeed_ast27x0-tsp.c index 4e0efaef07c04..5b75e14206399 100644 --- a/hw/arm/aspeed_ast27x0-tsp.c +++ b/hw/arm/aspeed_ast27x0-tsp.c @@ -1,5 +1,5 @@ /* - * ASPEED Ast27x0 TSP SoC + * ASPEED Ast27x0 TSP Coprocessor * * Copyright (C) 2025 ASPEED Technology Inc. * @@ -14,11 +14,13 @@ #include "hw/qdev-clock.h" #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" +#include "hw/arm/aspeed_coprocessor.h" -#define AST2700_TSP_RAM_SIZE (32 * MiB) +#define AST2700_TSP_SDRAM_SIZE (512 * MiB) static const hwaddr aspeed_soc_ast27x0tsp_memmap[] = { - [ASPEED_DEV_SRAM] = 0x00000000, + [ASPEED_DEV_SDRAM] = 0x00000000, + [ASPEED_DEV_SRAM] = 0x70000000, [ASPEED_DEV_INTC] = 0x72100000, [ASPEED_DEV_SCU] = 0x72C02000, [ASPEED_DEV_SCUIO] = 0x74C02000, @@ -104,10 +106,11 @@ static struct nvic_intc_irq_info ast2700_tsp_intcmap[] = { {136, 0, 9, NULL}, }; -static qemu_irq aspeed_soc_ast27x0tsp_get_irq(AspeedSoCState *s, int dev) +static qemu_irq aspeed_soc_ast27x0tsp_get_irq(AspeedCoprocessorState *s, + int dev) { - Aspeed27x0TSPSoCState *a = ASPEED27X0TSP_SOC(s); - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + Aspeed27x0CoprocessorState *a = ASPEED27X0TSP_COPROCESSOR(s); + AspeedCoprocessorClass *sc = ASPEED_COPROCESSOR_GET_CLASS(s); int or_idx; int idx; @@ -128,19 +131,11 @@ static qemu_irq aspeed_soc_ast27x0tsp_get_irq(AspeedSoCState *s, int dev) static void aspeed_soc_ast27x0tsp_init(Object *obj) { - Aspeed27x0TSPSoCState *a = ASPEED27X0TSP_SOC(obj); - AspeedSoCState *s = ASPEED_SOC(obj); - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - int i; + Aspeed27x0CoprocessorState *a = ASPEED27X0TSP_COPROCESSOR(obj); + AspeedCoprocessorState *s = ASPEED_COPROCESSOR(obj); object_initialize_child(obj, "armv7m", &a->armv7m, TYPE_ARMV7M); - object_initialize_child(obj, "scu", &s->scu, TYPE_ASPEED_2700_SCU); s->sysclk = qdev_init_clock_in(DEVICE(s), "sysclk", NULL, NULL, 0); - qdev_prop_set_uint32(DEVICE(&s->scu), "silicon-rev", sc->silicon_rev); - - for (i = 0; i < sc->uarts_num; i++) { - object_initialize_child(obj, "uart[*]", &s->uart[i], TYPE_SERIAL_MM); - } object_initialize_child(obj, "intc0", &a->intc[0], TYPE_ASPEED_2700TSP_INTC); @@ -159,11 +154,11 @@ static void aspeed_soc_ast27x0tsp_init(Object *obj) static void aspeed_soc_ast27x0tsp_realize(DeviceState *dev_soc, Error **errp) { - Aspeed27x0TSPSoCState *a = ASPEED27X0TSP_SOC(dev_soc); - AspeedSoCState *s = ASPEED_SOC(dev_soc); - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + Aspeed27x0CoprocessorState *a = ASPEED27X0TSP_COPROCESSOR(dev_soc); + AspeedCoprocessorState *s = ASPEED_COPROCESSOR(dev_soc); + AspeedCoprocessorClass *sc = ASPEED_COPROCESSOR_GET_CLASS(s); DeviceState *armv7m; - g_autofree char *sram_name = NULL; + g_autofree char *sdram_name = NULL; int i; if (!clock_has_source(s->sysclk)) { @@ -174,35 +169,43 @@ static void aspeed_soc_ast27x0tsp_realize(DeviceState *dev_soc, Error **errp) /* AST27X0 TSP Core */ armv7m = DEVICE(&a->armv7m); qdev_prop_set_uint32(armv7m, "num-irq", 256); - qdev_prop_set_string(armv7m, "cpu-type", aspeed_soc_cpu_type(sc)); + qdev_prop_set_string(armv7m, "cpu-type", + aspeed_soc_cpu_type(sc->valid_cpu_types)); qdev_connect_clock_in(armv7m, "cpuclk", s->sysclk); object_property_set_link(OBJECT(&a->armv7m), "memory", OBJECT(s->memory), &error_abort); sysbus_realize(SYS_BUS_DEVICE(&a->armv7m), &error_abort); - sram_name = g_strdup_printf("aspeed.dram.%d", - CPU(a->armv7m.cpu)->cpu_index); - - if (!memory_region_init_ram(&s->sram, OBJECT(s), sram_name, sc->sram_size, - errp)) { + /* SDRAM */ + sdram_name = g_strdup_printf("aspeed.sdram.%d", + CPU(a->armv7m.cpu)->cpu_index); + if (!memory_region_init_ram(&s->sdram, OBJECT(s), sdram_name, + AST2700_TSP_SDRAM_SIZE, errp)) { return; } memory_region_add_subregion(s->memory, - sc->memmap[ASPEED_DEV_SRAM], - &s->sram); + sc->memmap[ASPEED_DEV_SDRAM], + &s->sdram); + + /* SRAM */ + memory_region_init_alias(&s->sram_alias, OBJECT(s), "sram.alias", + s->sram, 0, memory_region_size(s->sram)); + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SRAM], + &s->sram_alias); /* SCU */ - if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { - return; - } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + memory_region_init_alias(&s->scu_alias, OBJECT(s), "scu.alias", + &s->scu->iomem, 0, + memory_region_size(&s->scu->iomem)); + memory_region_add_subregion(s->memory, sc->memmap[ASPEED_DEV_SCU], + &s->scu_alias); /* INTC */ if (!sysbus_realize(SYS_BUS_DEVICE(&a->intc[0]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[0]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->intc[0]), 0, sc->memmap[ASPEED_DEV_INTC]); /* INTCIO */ @@ -210,7 +213,7 @@ static void aspeed_soc_ast27x0tsp_realize(DeviceState *dev_soc, Error **errp) return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[1]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->intc[1]), 0, sc->memmap[ASPEED_DEV_INTCIO]); /* irq source orgates -> INTC */ @@ -234,58 +237,58 @@ static void aspeed_soc_ast27x0tsp_realize(DeviceState *dev_soc, Error **errp) sysbus_connect_irq(SYS_BUS_DEVICE(&a->intc[1]), i, qdev_get_gpio_in(DEVICE(&a->intc[0].orgates[0]), i)); } - /* UART */ - if (!aspeed_soc_uart_realize(s, errp)) { - return; - } - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->timerctrl), + /* UART */ + memory_region_init_alias(&s->uart_alias, OBJECT(s), "uart.alias", + &s->uart->serial.io, 0, + memory_region_size(&s->uart->serial.io)); + memory_region_add_subregion(s->memory, sc->memmap[s->uart_dev], + &s->uart_alias); + /* + * Redirect the UART interrupt to the NVIC, replacing the default routing + * to the PSP's GIC. + */ + sysbus_connect_irq(SYS_BUS_DEVICE(s->uart), 0, + aspeed_soc_ast27x0tsp_get_irq(s, s->uart_dev)); + + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->timerctrl), "aspeed.timerctrl", sc->memmap[ASPEED_DEV_TIMER1], 0x200); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[0]), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&a->ipc[0]), "aspeed.ipc0", sc->memmap[ASPEED_DEV_IPC0], 0x1000); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->ipc[1]), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&a->ipc[1]), "aspeed.ipc1", sc->memmap[ASPEED_DEV_IPC1], 0x1000); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&a->scuio), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&a->scuio), "aspeed.scuio", sc->memmap[ASPEED_DEV_SCUIO], 0x1000); } -static void aspeed_soc_ast27x0tsp_class_init(ObjectClass *klass, const void *data) +static void aspeed_soc_ast27x0tsp_class_init(ObjectClass *klass, + const void *data) { static const char * const valid_cpu_types[] = { ARM_CPU_TYPE_NAME("cortex-m4"), /* TODO cortex-m4f */ NULL }; DeviceClass *dc = DEVICE_CLASS(klass); - AspeedSoCClass *sc = ASPEED_SOC_CLASS(dc); + AspeedCoprocessorClass *sc = ASPEED_COPROCESSOR_CLASS(dc); - /* Reason: The Aspeed SoC can only be instantiated from a board */ + /* Reason: The Aspeed Coprocessor can only be instantiated from a board */ dc->user_creatable = false; dc->realize = aspeed_soc_ast27x0tsp_realize; sc->valid_cpu_types = valid_cpu_types; - sc->silicon_rev = AST2700_A1_SILICON_REV; - sc->sram_size = AST2700_TSP_RAM_SIZE; - sc->spis_num = 0; - sc->ehcis_num = 0; - sc->wdts_num = 0; - sc->macs_num = 0; - sc->uarts_num = 13; - sc->uarts_base = ASPEED_DEV_UART0; sc->irqmap = aspeed_soc_ast27x0tsp_irqmap; sc->memmap = aspeed_soc_ast27x0tsp_memmap; - sc->num_cpus = 1; - sc->get_irq = aspeed_soc_ast27x0tsp_get_irq; } static const TypeInfo aspeed_soc_ast27x0tsp_types[] = { { - .name = TYPE_ASPEED27X0TSP_SOC, - .parent = TYPE_ASPEED_SOC, - .instance_size = sizeof(Aspeed27x0TSPSoCState), + .name = TYPE_ASPEED27X0TSP_COPROCESSOR, + .parent = TYPE_ASPEED_COPROCESSOR, + .instance_size = sizeof(Aspeed27x0CoprocessorState), .instance_init = aspeed_soc_ast27x0tsp_init, .class_init = aspeed_soc_ast27x0tsp_class_init, }, diff --git a/hw/arm/aspeed_ast27x0.c b/hw/arm/aspeed_ast27x0.c index 6aa3841b6911f..c484bcd4e22fb 100644 --- a/hw/arm/aspeed_ast27x0.c +++ b/hw/arm/aspeed_ast27x0.c @@ -38,6 +38,8 @@ static const hwaddr aspeed_soc_ast2700_memmap[] = { [ASPEED_DEV_EHCI2] = 0x12063000, [ASPEED_DEV_HACE] = 0x12070000, [ASPEED_DEV_EMMC] = 0x12090000, + [ASPEED_DEV_PCIE0] = 0x120E0000, + [ASPEED_DEV_PCIE1] = 0x120F0000, [ASPEED_DEV_INTC] = 0x12100000, [ASPEED_GIC_DIST] = 0x12200000, [ASPEED_GIC_REDIST] = 0x12280000, @@ -45,6 +47,8 @@ static const hwaddr aspeed_soc_ast2700_memmap[] = { [ASPEED_DEV_SCU] = 0x12C02000, [ASPEED_DEV_RTC] = 0x12C0F000, [ASPEED_DEV_TIMER1] = 0x12C10000, + [ASPEED_DEV_PCIE_PHY0] = 0x12C15000, + [ASPEED_DEV_PCIE_PHY1] = 0x12C15800, [ASPEED_DEV_SLI] = 0x12C17000, [ASPEED_DEV_UART4] = 0x12C1A000, [ASPEED_DEV_IOMEM1] = 0x14000000, @@ -59,6 +63,7 @@ static const hwaddr aspeed_soc_ast2700_memmap[] = { [ASPEED_DEV_ETH2] = 0x14060000, [ASPEED_DEV_ETH3] = 0x14070000, [ASPEED_DEV_SDHCI] = 0x14080000, + [ASPEED_DEV_PCIE2] = 0x140D0000, [ASPEED_DEV_EHCI3] = 0x14121000, [ASPEED_DEV_EHCI4] = 0x14123000, [ASPEED_DEV_ADC] = 0x14C00000, @@ -66,6 +71,7 @@ static const hwaddr aspeed_soc_ast2700_memmap[] = { [ASPEED_DEV_GPIO] = 0x14C0B000, [ASPEED_DEV_I2C] = 0x14C0F000, [ASPEED_DEV_INTCIO] = 0x14C18000, + [ASPEED_DEV_PCIE_PHY2] = 0x14C1C000, [ASPEED_DEV_SLIIO] = 0x14C1E000, [ASPEED_DEV_VUART] = 0x14C30000, [ASPEED_DEV_UART0] = 0x14C33000, @@ -81,6 +87,9 @@ static const hwaddr aspeed_soc_ast2700_memmap[] = { [ASPEED_DEV_UART11] = 0x14C33A00, [ASPEED_DEV_UART12] = 0x14C33B00, [ASPEED_DEV_WDT] = 0x14C37000, + [ASPEED_DEV_PCIE_MMIO0] = 0x60000000, + [ASPEED_DEV_PCIE_MMIO1] = 0x80000000, + [ASPEED_DEV_PCIE_MMIO2] = 0xA0000000, [ASPEED_DEV_SPI_BOOT] = 0x100000000, [ASPEED_DEV_LTPI] = 0x300000000, [ASPEED_DEV_SDRAM] = 0x400000000, @@ -156,6 +165,8 @@ static const int aspeed_soc_ast2700a1_irqmap[] = { [ASPEED_DEV_DP] = 28, [ASPEED_DEV_EHCI1] = 33, [ASPEED_DEV_EHCI2] = 37, + [ASPEED_DEV_PCIE0] = 56, + [ASPEED_DEV_PCIE1] = 57, [ASPEED_DEV_LPC] = 192, [ASPEED_DEV_IBT] = 192, [ASPEED_DEV_KCS] = 192, @@ -166,6 +177,7 @@ static const int aspeed_soc_ast2700a1_irqmap[] = { [ASPEED_DEV_WDT] = 195, [ASPEED_DEV_PWM] = 195, [ASPEED_DEV_I3C] = 195, + [ASPEED_DEV_PCIE2] = 196, [ASPEED_DEV_UART0] = 196, [ASPEED_DEV_UART1] = 196, [ASPEED_DEV_UART2] = 196, @@ -233,6 +245,7 @@ static const int ast2700_gic132_gic196_intcmap[] = { [ASPEED_DEV_UART12] = 18, [ASPEED_DEV_EHCI3] = 28, [ASPEED_DEV_EHCI4] = 29, + [ASPEED_DEV_PCIE2] = 31, }; /* GICINT 133 */ @@ -423,7 +436,7 @@ static void aspeed_soc_ast2700_init(Object *obj) for (i = 0; i < sc->num_cpus; i++) { object_initialize_child(obj, "cpu[*]", &a->cpu[i], - aspeed_soc_cpu_type(sc)); + aspeed_soc_cpu_type(sc->valid_cpu_types)); } object_initialize_child(obj, "gic", &a->gic, gicv3_class_name()); @@ -519,6 +532,17 @@ static void aspeed_soc_ast2700_init(Object *obj) snprintf(typename, sizeof(typename), "aspeed.hace-%s", socname); object_initialize_child(obj, "hace", &s->hace, typename); + + for (i = 0; i < sc->pcie_num; i++) { + snprintf(typename, sizeof(typename), "aspeed.pcie-phy-%s", socname); + object_initialize_child(obj, "pcie-phy[*]", &s->pcie_phy[i], typename); + object_property_set_int(OBJECT(&s->pcie_phy[i]), "id", i, &error_abort); + + snprintf(typename, sizeof(typename), "aspeed.pcie-cfg-%s", socname); + object_initialize_child(obj, "pcie-cfg[*]", &s->pcie[i], typename); + object_property_set_int(OBJECT(&s->pcie[i]), "id", i, &error_abort); + } + object_initialize_child(obj, "dpmcu", &s->dpmcu, TYPE_UNIMPLEMENTED_DEVICE); object_initialize_child(obj, "ltpi", &s->ltpi, @@ -565,9 +589,9 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp) return false; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->gic), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->gic), 0, sc->memmap[ASPEED_GIC_DIST]); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->gic), 1, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->gic), 1, sc->memmap[ASPEED_GIC_REDIST]); for (i = 0; i < sc->num_cpus; i++) { @@ -610,6 +634,49 @@ static bool aspeed_soc_ast2700_gic_realize(DeviceState *dev, Error **errp) return true; } +static bool aspeed_soc_ast2700_pcie_realize(DeviceState *dev, Error **errp) +{ + AspeedSoCState *s = ASPEED_SOC(dev); + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); + MemoryRegion *mmio_mr = NULL; + char name[64]; + qemu_irq irq; + int i; + + for (i = 0; i < sc->pcie_num; i++) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->pcie_phy[i]), errp)) { + return false; + } + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->pcie_phy[i]), 0, + sc->memmap[ASPEED_DEV_PCIE_PHY0 + i]); + + object_property_set_int(OBJECT(&s->pcie[i]), "dram-base", + sc->memmap[ASPEED_DEV_SDRAM], + &error_abort); + object_property_set_link(OBJECT(&s->pcie[i]), "dram", + OBJECT(s->dram_mr), &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->pcie[i]), errp)) { + return false; + } + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->pcie[i]), 0, + sc->memmap[ASPEED_DEV_PCIE0 + i]); + irq = aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_PCIE0 + i); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->pcie[i].rc), 0, irq); + + mmio_mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->pcie[i].rc), 1); + snprintf(name, sizeof(name), "aspeed.pcie-mmio.%d", i); + memory_region_init_alias(&s->pcie_mmio_alias[i], OBJECT(&s->pcie[i].rc), + name, mmio_mr, + sc->memmap[ASPEED_DEV_PCIE_MMIO0 + i], + 0x20000000); + memory_region_add_subregion(s->memory, + sc->memmap[ASPEED_DEV_PCIE_MMIO0 + i], + &s->pcie_mmio_alias[i]); + } + + return true; +} + static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) { int i; @@ -620,6 +687,7 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) AspeedINTCClass *icio = ASPEED_INTC_GET_CLASS(&a->intc[1]); g_autofree char *name = NULL; qemu_irq irq; + int uart; /* Default boot region (SPI memory or ROMs) */ memory_region_init(&s->spi_boot_container, OBJECT(s), @@ -652,7 +720,7 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[0]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->intc[0]), 0, sc->memmap[ASPEED_DEV_INTC]); /* INTCIO */ @@ -660,7 +728,7 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&a->intc[1]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&a->intc[1]), 0, sc->memmap[ASPEED_DEV_INTCIO]); /* irq sources -> orgates -> INTC */ @@ -710,18 +778,24 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scu), 0, sc->memmap[ASPEED_DEV_SCU]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->scu), 0, + sc->memmap[ASPEED_DEV_SCU]); /* SCU1 */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->scuio), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->scuio), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->scuio), 0, sc->memmap[ASPEED_DEV_SCUIO]); /* UART */ - if (!aspeed_soc_uart_realize(s, errp)) { - return; + for (i = 0, uart = sc->uarts_base; i < sc->uarts_num; i++, uart++) { + if (!aspeed_soc_uart_realize(s->memory, &s->uart[i], + sc->memmap[uart], errp)) { + return; + } + sysbus_connect_irq(SYS_BUS_DEVICE(&s->uart[i]), 0, + aspeed_soc_ast2700_get_irq(s, uart)); } /* FMC, The number of CS is set at the board level */ @@ -733,11 +807,12 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->fmc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 0, sc->memmap[ASPEED_DEV_FMC]); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->fmc), 1, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->fmc), 0, + sc->memmap[ASPEED_DEV_FMC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->fmc), 1, ASPEED_SMC_GET_CLASS(&s->fmc)->flash_window_base); sysbus_connect_irq(SYS_BUS_DEVICE(&s->fmc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_FMC)); + aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_FMC)); /* Set up an alias on the FMC CE0 region (boot default) */ MemoryRegion *fmc0_mmio = &s->fmc.flashes[0].mmio; @@ -752,9 +827,9 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->spi[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->spi[i]), 0, sc->memmap[ASPEED_DEV_SPI0 + i]); - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->spi[i]), 1, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->spi[i]), 1, ASPEED_SMC_GET_CLASS(&s->spi[i])->flash_window_base); } @@ -763,10 +838,11 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ehci[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ehci[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->ehci[i]), 0, sc->memmap[ASPEED_DEV_EHCI1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ehci[i]), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_EHCI1 + i)); + aspeed_soc_ast2700_get_irq(s, + ASPEED_DEV_EHCI1 + i)); } /* @@ -781,7 +857,7 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdmc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdmc), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sdmc), 0, sc->memmap[ASPEED_DEV_SDMC]); /* RAM */ @@ -798,10 +874,10 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->ftgmac100[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, sc->memmap[ASPEED_DEV_ETH1 + i]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->ftgmac100[i]), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_ETH1 + i)); + aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_ETH1 + i)); object_property_set_link(OBJECT(&s->mii[i]), "nic", OBJECT(&s->ftgmac100[i]), &error_abort); @@ -809,7 +885,7 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->mii[i]), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->mii[i]), 0, sc->memmap[ASPEED_DEV_MII1 + i]); } @@ -823,28 +899,30 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->wdt[i]), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->wdt[i]), 0, wdt_offset); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->wdt[i]), 0, wdt_offset); } /* SLI */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sli), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sli), 0, sc->memmap[ASPEED_DEV_SLI]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sli), 0, + sc->memmap[ASPEED_DEV_SLI]); if (!sysbus_realize(SYS_BUS_DEVICE(&s->sliio), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sliio), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sliio), 0, sc->memmap[ASPEED_DEV_SLIIO]); /* ADC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->adc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->adc), 0, sc->memmap[ASPEED_DEV_ADC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->adc), 0, + sc->memmap[ASPEED_DEV_ADC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->adc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_ADC)); + aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_ADC)); /* I2C */ object_property_set_link(OBJECT(&s->i2c), "dram", OBJECT(s->dram_mr), @@ -852,7 +930,8 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->i2c), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->i2c), 0, sc->memmap[ASPEED_DEV_I2C]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->i2c), 0, + sc->memmap[ASPEED_DEV_I2C]); for (i = 0; i < ASPEED_I2C_GET_CLASS(&s->i2c)->num_busses; i++) { /* * The AST2700 I2C controller has one source INTC per bus. @@ -881,36 +960,37 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->gpio), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->gpio), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->gpio), 0, sc->memmap[ASPEED_DEV_GPIO]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->gpio), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_GPIO)); + aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_GPIO)); /* RTC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->rtc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->rtc), 0, sc->memmap[ASPEED_DEV_RTC]); + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->rtc), 0, + sc->memmap[ASPEED_DEV_RTC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->rtc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_RTC)); + aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_RTC)); /* SDHCI */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->sdhci), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->sdhci), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->sdhci), 0, sc->memmap[ASPEED_DEV_SDHCI]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->sdhci), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_SDHCI)); + aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_SDHCI)); /* eMMC */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->emmc), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->emmc), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->emmc), 0, sc->memmap[ASPEED_DEV_EMMC]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->emmc), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_EMMC)); + aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_EMMC)); /* Timer */ object_property_set_link(OBJECT(&s->timerctrl), "scu", OBJECT(&s->scu), @@ -918,10 +998,10 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->timerctrl), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->timerctrl), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->timerctrl), 0, sc->memmap[ASPEED_DEV_TIMER1]); for (i = 0; i < ASPEED_TIMER_NR_TIMERS; i++) { - irq = aspeed_soc_get_irq(s, ASPEED_DEV_TIMER1 + i); + irq = aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_TIMER1 + i); sysbus_connect_irq(SYS_BUS_DEVICE(&s->timerctrl), i, irq); } @@ -931,28 +1011,33 @@ static void aspeed_soc_ast2700_realize(DeviceState *dev, Error **errp) if (!sysbus_realize(SYS_BUS_DEVICE(&s->hace), errp)) { return; } - aspeed_mmio_map(s, SYS_BUS_DEVICE(&s->hace), 0, + aspeed_mmio_map(s->memory, SYS_BUS_DEVICE(&s->hace), 0, sc->memmap[ASPEED_DEV_HACE]); sysbus_connect_irq(SYS_BUS_DEVICE(&s->hace), 0, - aspeed_soc_get_irq(s, ASPEED_DEV_HACE)); + aspeed_soc_ast2700_get_irq(s, ASPEED_DEV_HACE)); + + /* PCIe Root Complex (RC) */ + if (!aspeed_soc_ast2700_pcie_realize(dev, errp)) { + return; + } - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->dpmcu), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->dpmcu), "aspeed.dpmcu", sc->memmap[ASPEED_DEV_DPMCU], AST2700_SOC_DPMCU_SIZE); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->ltpi), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->ltpi), "aspeed.ltpi", sc->memmap[ASPEED_DEV_LTPI], AST2700_SOC_LTPI_SIZE); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->iomem), "aspeed.io", sc->memmap[ASPEED_DEV_IOMEM], AST2700_SOC_IO_SIZE); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem0), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->iomem0), "aspeed.iomem0", sc->memmap[ASPEED_DEV_IOMEM0], AST2700_SOC_IOMEM_SIZE); - aspeed_mmio_map_unimplemented(s, SYS_BUS_DEVICE(&s->iomem1), + aspeed_mmio_map_unimplemented(s->memory, SYS_BUS_DEVICE(&s->iomem1), "aspeed.iomem1", sc->memmap[ASPEED_DEV_IOMEM1], AST2700_SOC_IOMEM_SIZE); @@ -974,6 +1059,7 @@ static void aspeed_soc_ast2700a0_class_init(ObjectClass *oc, const void *data) sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST2700_A0_SILICON_REV; sc->sram_size = 0x20000; + sc->pcie_num = 0; sc->spis_num = 3; sc->ehcis_num = 2; sc->wdts_num = 8; @@ -983,7 +1069,6 @@ static void aspeed_soc_ast2700a0_class_init(ObjectClass *oc, const void *data) sc->uarts_base = ASPEED_DEV_UART0; sc->irqmap = aspeed_soc_ast2700a0_irqmap; sc->memmap = aspeed_soc_ast2700_memmap; - sc->get_irq = aspeed_soc_ast2700_get_irq; } static void aspeed_soc_ast2700a1_class_init(ObjectClass *oc, const void *data) @@ -1002,6 +1087,7 @@ static void aspeed_soc_ast2700a1_class_init(ObjectClass *oc, const void *data) sc->valid_cpu_types = valid_cpu_types; sc->silicon_rev = AST2700_A1_SILICON_REV; sc->sram_size = 0x20000; + sc->pcie_num = 3; sc->spis_num = 3; sc->ehcis_num = 4; sc->wdts_num = 8; @@ -1011,7 +1097,6 @@ static void aspeed_soc_ast2700a1_class_init(ObjectClass *oc, const void *data) sc->uarts_base = ASPEED_DEV_UART0; sc->irqmap = aspeed_soc_ast2700a1_irqmap; sc->memmap = aspeed_soc_ast2700_memmap; - sc->get_irq = aspeed_soc_ast2700_get_irq; } static const TypeInfo aspeed_soc_ast27x0_types[] = { diff --git a/hw/arm/aspeed_coprocessor_common.c b/hw/arm/aspeed_coprocessor_common.c new file mode 100644 index 0000000000000..f037d5b573fd4 --- /dev/null +++ b/hw/arm/aspeed_coprocessor_common.c @@ -0,0 +1,56 @@ +/* + * ASPEED Coprocessor + * + * Copyright (C) 2025 ASPEED Technology Inc. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "system/memory.h" +#include "hw/qdev-properties.h" +#include "hw/arm/aspeed_coprocessor.h" + +static void aspeed_coprocessor_realize(DeviceState *dev, Error **errp) +{ + AspeedCoprocessorState *s = ASPEED_COPROCESSOR(dev); + + if (!s->memory) { + error_setg(errp, "'memory' link is not set"); + return; + } +} + +static const Property aspeed_coprocessor_properties[] = { + DEFINE_PROP_LINK("memory", AspeedCoprocessorState, memory, + TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_LINK("sram", AspeedCoprocessorState, sram, TYPE_MEMORY_REGION, + MemoryRegion *), + DEFINE_PROP_LINK("scu", AspeedCoprocessorState, scu, TYPE_ASPEED_SCU, + AspeedSCUState *), + DEFINE_PROP_LINK("uart", AspeedCoprocessorState, uart, TYPE_SERIAL_MM, + SerialMM *), + DEFINE_PROP_INT32("uart-dev", AspeedCoprocessorState, uart_dev, 0), +}; + +static void aspeed_coprocessor_class_init(ObjectClass *oc, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + + dc->realize = aspeed_coprocessor_realize; + device_class_set_props(dc, aspeed_coprocessor_properties); +} + +static const TypeInfo aspeed_coprocessor_types[] = { + { + .name = TYPE_ASPEED_COPROCESSOR, + .parent = TYPE_DEVICE, + .instance_size = sizeof(AspeedCoprocessorState), + .class_size = sizeof(AspeedCoprocessorClass), + .class_init = aspeed_coprocessor_class_init, + .abstract = true, + }, +}; + +DEFINE_TYPES(aspeed_coprocessor_types) diff --git a/hw/arm/aspeed_soc_common.c b/hw/arm/aspeed_soc_common.c index 1c4ac93a0ff81..78b6ae18f87ec 100644 --- a/hw/arm/aspeed_soc_common.c +++ b/hw/arm/aspeed_soc_common.c @@ -16,54 +16,45 @@ #include "hw/misc/unimp.h" #include "hw/arm/aspeed_soc.h" #include "hw/char/serial-mm.h" +#include "system/blockdev.h" +#include "system/block-backend.h" +#include "hw/loader.h" +#include "qemu/datadir.h" -const char *aspeed_soc_cpu_type(AspeedSoCClass *sc) +const char *aspeed_soc_cpu_type(const char * const *valid_cpu_types) { - assert(sc->valid_cpu_types); - assert(sc->valid_cpu_types[0]); - assert(!sc->valid_cpu_types[1]); - return sc->valid_cpu_types[0]; + assert(valid_cpu_types); + assert(valid_cpu_types[0]); + assert(!valid_cpu_types[1]); + return valid_cpu_types[0]; } -qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev) +bool aspeed_soc_uart_realize(MemoryRegion *memory, SerialMM *smm, + const hwaddr addr, Error **errp) { - return ASPEED_SOC_GET_CLASS(s)->get_irq(s, dev); -} - -bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp) -{ - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - SerialMM *smm; - - for (int i = 0, uart = sc->uarts_base; i < sc->uarts_num; i++, uart++) { - smm = &s->uart[i]; - - /* Chardev property is set by the machine. */ - qdev_prop_set_uint8(DEVICE(smm), "regshift", 2); - qdev_prop_set_uint32(DEVICE(smm), "baudbase", 38400); - qdev_set_legacy_instance_id(DEVICE(smm), sc->memmap[uart], 2); - qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN); - if (!sysbus_realize(SYS_BUS_DEVICE(smm), errp)) { - return false; - } - - sysbus_connect_irq(SYS_BUS_DEVICE(smm), 0, aspeed_soc_get_irq(s, uart)); - aspeed_mmio_map(s, SYS_BUS_DEVICE(smm), 0, sc->memmap[uart]); + /* Chardev property is set by the machine. */ + qdev_prop_set_uint8(DEVICE(smm), "regshift", 2); + qdev_prop_set_uint32(DEVICE(smm), "baudbase", 38400); + qdev_set_legacy_instance_id(DEVICE(smm), addr, 2); + qdev_prop_set_uint8(DEVICE(smm), "endianness", DEVICE_LITTLE_ENDIAN); + if (!sysbus_realize(SYS_BUS_DEVICE(smm), errp)) { + return false; } + aspeed_mmio_map(memory, SYS_BUS_DEVICE(smm), 0, addr); return true; } -void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr) +void aspeed_soc_uart_set_chr(SerialMM *uart, int dev, int uarts_base, + int uarts_num, Chardev *chr) { - AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(s); - int uart_first = aspeed_uart_first(sc); + int uart_first = aspeed_uart_first(uarts_base); int uart_index = aspeed_uart_index(dev); int i = uart_index - uart_first; - g_assert(0 <= i && i < ARRAY_SIZE(s->uart) && i < sc->uarts_num); - qdev_prop_set_chr(DEVICE(&s->uart[i]), "chardev", chr); + g_assert(0 <= i && i < ASPEED_UARTS_NUM && i < uarts_num); + qdev_prop_set_chr(DEVICE(&uart[i]), "chardev", chr); } /* @@ -107,23 +98,115 @@ bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp) return true; } -void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr) +void aspeed_mmio_map(MemoryRegion *memory, SysBusDevice *dev, int n, + hwaddr addr) { - memory_region_add_subregion(s->memory, addr, - sysbus_mmio_get_region(dev, n)); + memory_region_add_subregion(memory, addr, sysbus_mmio_get_region(dev, n)); } -void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev, +void aspeed_mmio_map_unimplemented(MemoryRegion *memory, SysBusDevice *dev, const char *name, hwaddr addr, uint64_t size) { qdev_prop_set_string(DEVICE(dev), "name", name); qdev_prop_set_uint64(DEVICE(dev), "size", size); sysbus_realize(dev, &error_abort); - memory_region_add_subregion_overlap(s->memory, addr, + memory_region_add_subregion_overlap(memory, addr, sysbus_mmio_get_region(dev, 0), -1000); } +void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, + unsigned int count, int unit0) +{ + int i; + + if (!flashtype) { + return; + } + + for (i = 0; i < count; ++i) { + DriveInfo *dinfo = drive_get(IF_MTD, 0, unit0 + i); + DeviceState *dev; + + dev = qdev_new(flashtype); + if (dinfo) { + qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo)); + } + qdev_prop_set_uint8(dev, "cs", i); + qdev_realize_and_unref(dev, BUS(s->spi), &error_fatal); + } +} + +void aspeed_write_boot_rom(BlockBackend *blk, hwaddr addr, size_t rom_size, + Error **errp) +{ + g_autofree void *storage = NULL; + int64_t size; + + /* + * The block backend size should have already been 'validated' by + * the creation of the m25p80 object. + */ + size = blk_getlength(blk); + if (size <= 0) { + error_setg(errp, "failed to get flash size"); + return; + } + + if (rom_size > size) { + rom_size = size; + } + + storage = g_malloc0(rom_size); + if (blk_pread(blk, 0, rom_size, storage, 0) < 0) { + error_setg(errp, "failed to read the initial flash content"); + return; + } + + rom_add_blob_fixed("aspeed.boot_rom", storage, rom_size, addr); +} + +/* + * Create a ROM and copy the flash contents at the expected address + * (0x0). Boots faster than execute-in-place. + */ +void aspeed_install_boot_rom(AspeedSoCState *soc, BlockBackend *blk, + MemoryRegion *boot_rom, uint64_t rom_size) +{ + AspeedSoCClass *sc = ASPEED_SOC_GET_CLASS(soc); + + memory_region_init_rom(boot_rom, NULL, "aspeed.boot_rom", rom_size, + &error_abort); + memory_region_add_subregion_overlap(&soc->spi_boot_container, 0, + boot_rom, 1); + aspeed_write_boot_rom(blk, sc->memmap[ASPEED_DEV_SPI_BOOT], rom_size, + &error_abort); +} + +/* + * This function locates the vbootrom image file specified via the command line + * using the -bios option. It loads the specified image into the vbootrom + * memory region and handles errors if the file cannot be found or loaded. + */ +void aspeed_load_vbootrom(AspeedSoCState *soc, const char *bios_name, + Error **errp) +{ + g_autofree char *filename = NULL; + int ret; + + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + if (!filename) { + error_setg(errp, "Could not find vbootrom image '%s'", bios_name); + return; + } + + ret = load_image_mr(filename, &soc->vbootrom); + if (ret < 0) { + error_setg(errp, "Failed to load vbootrom image '%s'", bios_name); + return; + } +} + static void aspeed_soc_realize(DeviceState *dev, Error **errp) { AspeedSoCState *s = ASPEED_SOC(dev); diff --git a/hw/arm/boot.c b/hw/arm/boot.c index d391cd01bb1b9..e77d8679d8872 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -25,6 +25,7 @@ #include "hw/boards.h" #include "system/reset.h" #include "hw/loader.h" +#include "hw/mem/memory-device.h" #include "elf.h" #include "system/device_tree.h" #include "qemu/config-file.h" @@ -336,81 +337,6 @@ static void set_kernel_args(const struct arm_boot_info *info, AddressSpace *as) WRITE_WORD(p, 0); } -static void set_kernel_args_old(const struct arm_boot_info *info, - AddressSpace *as) -{ - hwaddr p; - const char *s; - int initrd_size = info->initrd_size; - hwaddr base = info->loader_start; - - /* see linux/include/asm-arm/setup.h */ - p = base + KERNEL_ARGS_ADDR; - /* page_size */ - WRITE_WORD(p, 4096); - /* nr_pages */ - WRITE_WORD(p, info->ram_size / 4096); - /* ramdisk_size */ - WRITE_WORD(p, 0); -#define FLAG_READONLY 1 -#define FLAG_RDLOAD 4 -#define FLAG_RDPROMPT 8 - /* flags */ - WRITE_WORD(p, FLAG_READONLY | FLAG_RDLOAD | FLAG_RDPROMPT); - /* rootdev */ - WRITE_WORD(p, (31 << 8) | 0); /* /dev/mtdblock0 */ - /* video_num_cols */ - WRITE_WORD(p, 0); - /* video_num_rows */ - WRITE_WORD(p, 0); - /* video_x */ - WRITE_WORD(p, 0); - /* video_y */ - WRITE_WORD(p, 0); - /* memc_control_reg */ - WRITE_WORD(p, 0); - /* unsigned char sounddefault */ - /* unsigned char adfsdrives */ - /* unsigned char bytes_per_char_h */ - /* unsigned char bytes_per_char_v */ - WRITE_WORD(p, 0); - /* pages_in_bank[4] */ - WRITE_WORD(p, 0); - WRITE_WORD(p, 0); - WRITE_WORD(p, 0); - WRITE_WORD(p, 0); - /* pages_in_vram */ - WRITE_WORD(p, 0); - /* initrd_start */ - if (initrd_size) { - WRITE_WORD(p, info->initrd_start); - } else { - WRITE_WORD(p, 0); - } - /* initrd_size */ - WRITE_WORD(p, initrd_size); - /* rd_start */ - WRITE_WORD(p, 0); - /* system_rev */ - WRITE_WORD(p, 0); - /* system_serial_low */ - WRITE_WORD(p, 0); - /* system_serial_high */ - WRITE_WORD(p, 0); - /* mem_fclk_21285 */ - WRITE_WORD(p, 0); - /* zero unused fields */ - while (p < base + KERNEL_ARGS_ADDR + 256 + 1024) { - WRITE_WORD(p, 0); - } - s = info->kernel_cmdline; - if (s) { - address_space_write(as, p, MEMTXATTRS_UNSPECIFIED, s, strlen(s) + 1); - } else { - WRITE_WORD(p, 0); - } -} - static int fdt_add_memory_node(void *fdt, uint32_t acells, hwaddr mem_base, uint32_t scells, hwaddr mem_len, int numa_node_id) @@ -515,6 +441,29 @@ static void fdt_add_psci_node(void *fdt, ARMCPU *armcpu) qemu_fdt_setprop_cell(fdt, "/psci", "migrate", migrate_fn); } +static int fdt_add_pmem_node(void *fdt, uint32_t acells, uint32_t scells, + int64_t mem_base, int64_t size, int64_t node) +{ + int ret; + + g_autofree char *nodename = g_strdup_printf("/pmem@%" PRIx64, mem_base); + + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "compatible", "pmem-region"); + ret = qemu_fdt_setprop_sized_cells(fdt, nodename, "reg", acells, + mem_base, scells, size); + if (ret) { + return ret; + } + + if (node >= 0) { + return qemu_fdt_setprop_cell(fdt, nodename, "numa-node-id", + node); + } + + return 0; +} + int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, hwaddr addr_limit, AddressSpace *as, MachineState *ms, ARMCPU *cpu) @@ -525,6 +474,7 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, unsigned int i; hwaddr mem_base, mem_len; char **node_path; + g_autoptr(MemoryDeviceInfoList) md_list = NULL; Error *err = NULL; if (binfo->dtb_filename) { @@ -628,6 +578,23 @@ int arm_load_dtb(hwaddr addr, const struct arm_boot_info *binfo, } } + md_list = qmp_memory_device_list(); + for (MemoryDeviceInfoList *m = md_list; m != NULL; m = m->next) { + MemoryDeviceInfo *mi = m->value; + + if (mi->type == MEMORY_DEVICE_INFO_KIND_NVDIMM) { + PCDIMMDeviceInfo *di = mi->u.nvdimm.data; + + rc = fdt_add_pmem_node(fdt, acells, scells, + di->addr, di->size, di->node); + if (rc < 0) { + fprintf(stderr, "couldn't add NVDIMM /pmem@%"PRIx64" node\n", + di->addr); + goto fail; + } + } + } + rc = fdt_path_offset(fdt, "/chosen"); if (rc < 0) { qemu_fdt_add_subnode(fdt, "/chosen"); @@ -760,11 +727,7 @@ static void do_cpu_reset(void *opaque) cpu_set_pc(cs, info->loader_start); if (!have_dtb(info)) { - if (old_param) { - set_kernel_args_old(info, as); - } else { - set_kernel_args(info, as); - } + set_kernel_args(info, as); } } else if (info->secondary_cpu_reset_hook) { info->secondary_cpu_reset_hook(cpu, info); diff --git a/hw/arm/fby35.c b/hw/arm/fby35.c index c14fc2efe9bb7..5a94c847d365e 100644 --- a/hw/arm/fby35.c +++ b/hw/arm/fby35.c @@ -71,9 +71,11 @@ static void fby35_bmc_write_boot_rom(DriveInfo *dinfo, MemoryRegion *mr, static void fby35_bmc_init(Fby35State *s) { AspeedSoCState *soc; + AspeedSoCClass *sc; object_initialize_child(OBJECT(s), "bmc", &s->bmc, "ast2600-a3"); soc = ASPEED_SOC(&s->bmc); + sc = ASPEED_SOC_GET_CLASS(soc); memory_region_init(&s->bmc_memory, OBJECT(&s->bmc), "bmc-memory", UINT64_MAX); @@ -91,7 +93,8 @@ static void fby35_bmc_init(Fby35State *s) &error_abort); object_property_set_int(OBJECT(&s->bmc), "hw-strap2", 0x00000003, &error_abort); - aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART5, serial_hd(0)); + aspeed_soc_uart_set_chr(soc->uart, ASPEED_DEV_UART5, sc->uarts_base, + sc->uarts_num, serial_hd(0)); qdev_realize(DEVICE(&s->bmc), NULL, &error_abort); aspeed_board_init_flashes(&soc->fmc, "n25q00", 2, 0); @@ -118,12 +121,14 @@ static void fby35_bmc_init(Fby35State *s) static void fby35_bic_init(Fby35State *s) { AspeedSoCState *soc; + AspeedSoCClass *sc; s->bic_sysclk = clock_new(OBJECT(s), "SYSCLK"); clock_set_hz(s->bic_sysclk, 200000000ULL); object_initialize_child(OBJECT(s), "bic", &s->bic, "ast1030-a1"); soc = ASPEED_SOC(&s->bic); + sc = ASPEED_SOC_GET_CLASS(soc); memory_region_init(&s->bic_memory, OBJECT(&s->bic), "bic-memory", UINT64_MAX); @@ -131,7 +136,8 @@ static void fby35_bic_init(Fby35State *s) qdev_connect_clock_in(DEVICE(&s->bic), "sysclk", s->bic_sysclk); object_property_set_link(OBJECT(&s->bic), "memory", OBJECT(&s->bic_memory), &error_abort); - aspeed_soc_uart_set_chr(soc, ASPEED_DEV_UART5, serial_hd(1)); + aspeed_soc_uart_set_chr(soc->uart, ASPEED_DEV_UART5, sc->uarts_base, + sc->uarts_num, serial_hd(1)); qdev_realize(DEVICE(&s->bic), NULL, &error_abort); aspeed_board_init_flashes(&soc->fmc, "sst25vf032b", 2, 2); diff --git a/hw/arm/meson.build b/hw/arm/meson.build index dc68391305fe1..b88b5b06d7ed0 100644 --- a/hw/arm/meson.build +++ b/hw/arm/meson.build @@ -45,14 +45,15 @@ arm_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( 'aspeed_soc_common.c', 'aspeed_ast2400.c', 'aspeed_ast2600.c', - 'aspeed_ast27x0-ssp.c', - 'aspeed_ast27x0-tsp.c', 'aspeed_ast10x0.c', 'aspeed_eeprom.c', 'fby35.c')) arm_common_ss.add(when: ['CONFIG_ASPEED_SOC', 'TARGET_AARCH64'], if_true: files( 'aspeed_ast27x0.c', - 'aspeed_ast27x0-fc.c',)) + 'aspeed_ast27x0-fc.c', + 'aspeed_ast27x0-ssp.c', + 'aspeed_ast27x0-tsp.c', + 'aspeed_coprocessor_common.c')) arm_common_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2.c')) arm_common_ss.add(when: 'CONFIG_MPS2', if_true: files('mps2-tz.c')) arm_common_ss.add(when: 'CONFIG_MSF2', if_true: files('msf2-soc.c')) diff --git a/hw/arm/raspi4b.c b/hw/arm/raspi4b.c index 20082d52667f7..4df951a0d827a 100644 --- a/hw/arm/raspi4b.c +++ b/hw/arm/raspi4b.c @@ -36,9 +36,8 @@ struct Raspi4bMachineState { * (see https://datasheets.raspberrypi.com/bcm2711/bcm2711-peripherals.pdf * 1.2 Address Map) */ -static int raspi_add_memory_node(void *fdt, hwaddr mem_base, hwaddr mem_len) +static void raspi_add_memory_node(void *fdt, hwaddr mem_base, hwaddr mem_len) { - int ret; uint32_t acells, scells; char *nodename = g_strdup_printf("/memory@%" PRIx64, mem_base); @@ -46,19 +45,16 @@ static int raspi_add_memory_node(void *fdt, hwaddr mem_base, hwaddr mem_len) NULL, &error_fatal); scells = qemu_fdt_getprop_cell(fdt, "/", "#size-cells", NULL, &error_fatal); - if (acells == 0 || scells == 0) { - fprintf(stderr, "dtb file invalid (#address-cells or #size-cells 0)\n"); - ret = -1; - } else { - qemu_fdt_add_subnode(fdt, nodename); - qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory"); - ret = qemu_fdt_setprop_sized_cells(fdt, nodename, "reg", - acells, mem_base, - scells, mem_len); - } + /* validated by arm_load_dtb */ + g_assert(acells && scells); + + qemu_fdt_add_subnode(fdt, nodename); + qemu_fdt_setprop_string(fdt, nodename, "device_type", "memory"); + qemu_fdt_setprop_sized_cells(fdt, nodename, "reg", + acells, mem_base, + scells, mem_len); g_free(nodename); - return ret; } static void raspi4_modify_dtb(const struct arm_boot_info *info, void *fdt) diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index f39b99e5269df..62a7612184198 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -20,6 +20,7 @@ #include "trace.h" #include "exec/target_page.h" #include "hw/core/cpu.h" +#include "hw/pci/pci_bridge.h" #include "hw/qdev-properties.h" #include "qapi/error.h" #include "qemu/jhash.h" @@ -319,7 +320,7 @@ void smmu_iotlb_inv_vmid(SMMUState *s, int vmid) g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid, &vmid); } -inline void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid) +void smmu_iotlb_inv_vmid_s1(SMMUState *s, int vmid) { trace_smmu_iotlb_inv_vmid_s1(vmid); g_hash_table_foreach_remove(s->iotlb, smmu_hash_remove_by_vmid_s1, &vmid); @@ -925,6 +926,7 @@ static void smmu_base_realize(DeviceState *dev, Error **errp) { SMMUState *s = ARM_SMMU(dev); SMMUBaseClass *sbc = ARM_SMMU_GET_CLASS(dev); + PCIBus *pci_bus = s->primary_bus; Error *local_err = NULL; sbc->parent_realize(dev, &local_err); @@ -937,11 +939,39 @@ static void smmu_base_realize(DeviceState *dev, Error **errp) g_free, g_free); s->smmu_pcibus_by_busptr = g_hash_table_new(NULL, NULL); - if (s->primary_bus) { - pci_setup_iommu(s->primary_bus, &smmu_ops, s); - } else { + if (!pci_bus) { error_setg(errp, "SMMU is not attached to any PCI bus!"); + return; + } + + /* + * We only allow default PCIe Root Complex(pcie.0) or pxb-pcie based extra + * root complexes to be associated with SMMU. + */ + if (pci_bus_is_express(pci_bus) && pci_bus_is_root(pci_bus) && + object_dynamic_cast(OBJECT(pci_bus)->parent, TYPE_PCI_HOST_BRIDGE)) { + /* + * This condition matches either the default pcie.0, pxb-pcie, or + * pxb-cxl. For both pxb-pcie and pxb-cxl, parent_dev will be set. + * Currently, we don't allow pxb-cxl as it requires further + * verification. Therefore, make sure this is indeed pxb-pcie. + */ + if (pci_bus->parent_dev) { + if (!object_dynamic_cast(OBJECT(pci_bus), TYPE_PXB_PCIE_BUS)) { + goto out_err; + } + } + + if (s->smmu_per_bus) { + pci_setup_iommu_per_bus(pci_bus, &smmu_ops, s); + } else { + pci_setup_iommu(pci_bus, &smmu_ops, s); + } + return; } +out_err: + error_setg(errp, "SMMU should be attached to a default PCIe root complex" + "(pcie.0) or a pxb-pcie based root complex"); } /* @@ -961,6 +991,7 @@ static void smmu_base_reset_exit(Object *obj, ResetType type) static const Property smmu_dev_properties[] = { DEFINE_PROP_UINT8("bus_num", SMMUState, bus_num, 0), + DEFINE_PROP_BOOL("smmu_per_bus", SMMUState, smmu_per_bus, false), DEFINE_PROP_LINK("primary-bus", SMMUState, primary_bus, TYPE_PCI_BUS, PCIBus *), }; diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index ab67972353315..bcf8af8dc731b 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -1996,6 +1996,8 @@ static void smmuv3_class_init(ObjectClass *klass, const void *data) device_class_set_parent_realize(dc, smmu_realize, &c->parent_realize); device_class_set_props(dc, smmuv3_properties); + dc->hotpluggable = false; + dc->user_creatable = true; } static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, diff --git a/hw/arm/stm32f205_soc.c b/hw/arm/stm32f205_soc.c index 229af7fb108b2..e3c7203c6e7fc 100644 --- a/hw/arm/stm32f205_soc.c +++ b/hw/arm/stm32f205_soc.c @@ -66,7 +66,7 @@ static void stm32f205_soc_initfn(Object *obj) TYPE_STM32F2XX_TIMER); } - s->adc_irqs = OR_IRQ(object_new(TYPE_OR_IRQ)); + object_initialize_child(obj, "adc-irq-orgate", &s->adc_irqs, TYPE_OR_IRQ); for (i = 0; i < STM_NUM_ADCS; i++) { object_initialize_child(obj, "adc[*]", &s->adc[i], TYPE_STM32F2XX_ADC); @@ -171,12 +171,12 @@ static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp) } /* ADC 1 to 3 */ - object_property_set_int(OBJECT(s->adc_irqs), "num-lines", STM_NUM_ADCS, + object_property_set_int(OBJECT(&s->adc_irqs), "num-lines", STM_NUM_ADCS, &error_abort); - if (!qdev_realize(DEVICE(s->adc_irqs), NULL, errp)) { + if (!qdev_realize(DEVICE(&s->adc_irqs), NULL, errp)) { return; } - qdev_connect_gpio_out(DEVICE(s->adc_irqs), 0, + qdev_connect_gpio_out(DEVICE(&s->adc_irqs), 0, qdev_get_gpio_in(armv7m, ADC_IRQ)); for (i = 0; i < STM_NUM_ADCS; i++) { @@ -187,7 +187,7 @@ static void stm32f205_soc_realize(DeviceState *dev_soc, Error **errp) busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, adc_addr[i]); sysbus_connect_irq(busdev, 0, - qdev_get_gpio_in(DEVICE(s->adc_irqs), i)); + qdev_get_gpio_in(DEVICE(&s->adc_irqs), i)); } /* SPI 1 and 2 */ diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index b01fc4f8ef08b..8bb6b60515484 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -45,6 +45,7 @@ #include "hw/acpi/generic_event_device.h" #include "hw/acpi/tpm.h" #include "hw/acpi/hmat.h" +#include "hw/arm/smmuv3.h" #include "hw/cxl/cxl.h" #include "hw/pci/pcie_host.h" #include "hw/pci/pci.h" @@ -305,29 +306,126 @@ static int iort_idmap_compare(gconstpointer a, gconstpointer b) return idmap_a->input_base - idmap_b->input_base; } -/* Compute ID ranges (RIDs) from RC that are directed to the ITS Group node */ -static void create_rc_its_idmaps(GArray *its_idmaps, GArray *smmu_idmaps) +typedef struct AcpiIortSMMUv3Dev { + int irq; + hwaddr base; + GArray *rc_smmu_idmaps; + /* Offset of the SMMUv3 IORT Node relative to the start of the IORT */ + size_t offset; +} AcpiIortSMMUv3Dev; + +/* + * Populate the struct AcpiIortSMMUv3Dev for the legacy SMMUv3 and + * return the total number of associated idmaps. + */ +static int populate_smmuv3_legacy_dev(GArray *sdev_blob) { - AcpiIortIdMapping *idmap; - AcpiIortIdMapping next_range = {0}; + VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); + AcpiIortSMMUv3Dev sdev; + sdev.rc_smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); + object_child_foreach_recursive(object_get_root(), iort_host_bridges, + sdev.rc_smmu_idmaps); /* - * Based on the RID ranges that are directed to the SMMU, determine the - * bypassed RID ranges, i.e., the ones that are directed to the ITS Group - * node and do not pass through the SMMU, by subtracting the SMMU-bound - * ranges from the full RID range (0x0000–0xFFFF). + * There can be only one legacy SMMUv3("iommu=smmuv3") as it is a machine + * wide one. Since it may cover multiple PCIe RCs(based on "bypass_iommu" + * property), may have multiple SMMUv3 idmaps. Sort it by input_base. */ - for (int i = 0; i < smmu_idmaps->len; i++) { - idmap = &g_array_index(smmu_idmaps, AcpiIortIdMapping, i); + g_array_sort(sdev.rc_smmu_idmaps, iort_idmap_compare); - if (next_range.input_base < idmap->input_base) { - next_range.id_count = idmap->input_base - next_range.input_base; - g_array_append_val(its_idmaps, next_range); - } + sdev.base = vms->memmap[VIRT_SMMU].base; + sdev.irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE; + g_array_append_val(sdev_blob, sdev); + return sdev.rc_smmu_idmaps->len; +} - next_range.input_base = idmap->input_base + idmap->id_count; +static int smmuv3_dev_idmap_compare(gconstpointer a, gconstpointer b) +{ + AcpiIortSMMUv3Dev *sdev_a = (AcpiIortSMMUv3Dev *)a; + AcpiIortSMMUv3Dev *sdev_b = (AcpiIortSMMUv3Dev *)b; + AcpiIortIdMapping *map_a = &g_array_index(sdev_a->rc_smmu_idmaps, + AcpiIortIdMapping, 0); + AcpiIortIdMapping *map_b = &g_array_index(sdev_b->rc_smmu_idmaps, + AcpiIortIdMapping, 0); + return map_a->input_base - map_b->input_base; +} + +static int iort_smmuv3_devices(Object *obj, void *opaque) +{ + VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); + GArray *sdev_blob = opaque; + AcpiIortIdMapping idmap; + PlatformBusDevice *pbus; + AcpiIortSMMUv3Dev sdev; + int min_bus, max_bus; + SysBusDevice *sbdev; + PCIBus *bus; + + if (!object_dynamic_cast(obj, TYPE_ARM_SMMUV3)) { + return 0; } + bus = PCI_BUS(object_property_get_link(obj, "primary-bus", &error_abort)); + pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + sbdev = SYS_BUS_DEVICE(obj); + sdev.base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + sdev.base += vms->memmap[VIRT_PLATFORM_BUS].base; + sdev.irq = platform_bus_get_irqn(pbus, sbdev, 0); + sdev.irq += vms->irqmap[VIRT_PLATFORM_BUS]; + sdev.irq += ARM_SPI_BASE; + + pci_bus_range(bus, &min_bus, &max_bus); + sdev.rc_smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); + idmap.input_base = min_bus << 8, + idmap.id_count = (max_bus - min_bus + 1) << 8, + g_array_append_val(sdev.rc_smmu_idmaps, idmap); + g_array_append_val(sdev_blob, sdev); + return 0; +} + +/* + * Populate the struct AcpiIortSMMUv3Dev for all SMMUv3 devices and + * return the total number of idmaps. + */ +static int populate_smmuv3_dev(GArray *sdev_blob) +{ + object_child_foreach_recursive(object_get_root(), + iort_smmuv3_devices, sdev_blob); + /* Sort the smmuv3 devices(if any) by smmu idmap input_base */ + g_array_sort(sdev_blob, smmuv3_dev_idmap_compare); + /* + * Since each SMMUv3 dev is assocaited with specific host bridge, + * total number of idmaps equals to total number of smmuv3 devices. + */ + return sdev_blob->len; +} + +/* Compute ID ranges (RIDs) from RC that are directed to the ITS Group node */ +static void create_rc_its_idmaps(GArray *its_idmaps, GArray *smmuv3_devs) +{ + AcpiIortIdMapping *idmap; + AcpiIortIdMapping next_range = {0}; + AcpiIortSMMUv3Dev *sdev; + + for (int i = 0; i < smmuv3_devs->len; i++) { + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); + /* + * Based on the RID ranges that are directed to the SMMU, determine the + * bypassed RID ranges, i.e., the ones that are directed to the ITS + * Group node and do not pass through the SMMU, by subtracting the + * SMMU-bound ranges from the full RID range (0x0000–0xFFFF). + */ + for (int j = 0; j < sdev->rc_smmu_idmaps->len; j++) { + idmap = &g_array_index(sdev->rc_smmu_idmaps, AcpiIortIdMapping, j); + + if (next_range.input_base < idmap->input_base) { + next_range.id_count = idmap->input_base - next_range.input_base; + g_array_append_val(its_idmaps, next_range); + } + + next_range.input_base = idmap->input_base + idmap->id_count; + } + } /* * Append the last RC -> ITS ID mapping. * @@ -341,7 +439,6 @@ static void create_rc_its_idmaps(GArray *its_idmaps, GArray *smmu_idmaps) } } - /* * Input Output Remapping Table (IORT) * Conforms to "IO Remapping Table System Software on ARM Platforms", @@ -351,9 +448,12 @@ static void build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { int i, nb_nodes, rc_mapping_count; - size_t node_size, smmu_offset = 0; + AcpiIortSMMUv3Dev *sdev; + size_t node_size; + int num_smmus = 0; uint32_t id = 0; - GArray *rc_smmu_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); + int rc_smmu_idmaps_len = 0; + GArray *smmuv3_devs = g_array_new(false, true, sizeof(AcpiIortSMMUv3Dev)); GArray *rc_its_idmaps = g_array_new(false, true, sizeof(AcpiIortIdMapping)); AcpiTable table = { .sig = "IORT", .rev = 3, .oem_id = vms->oem_id, @@ -361,22 +461,23 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) /* Table 2 The IORT */ acpi_table_begin(&table, table_data); - if (vms->iommu == VIRT_IOMMU_SMMUV3) { - object_child_foreach_recursive(object_get_root(), - iort_host_bridges, rc_smmu_idmaps); - - /* Sort the smmu idmap by input_base */ - g_array_sort(rc_smmu_idmaps, iort_idmap_compare); + if (vms->legacy_smmuv3_present) { + rc_smmu_idmaps_len = populate_smmuv3_legacy_dev(smmuv3_devs); + } else { + rc_smmu_idmaps_len = populate_smmuv3_dev(smmuv3_devs); + } - nb_nodes = 2; /* RC and SMMUv3 */ - rc_mapping_count = rc_smmu_idmaps->len; + num_smmus = smmuv3_devs->len; + if (num_smmus) { + nb_nodes = num_smmus + 1; /* RC and SMMUv3 */ + rc_mapping_count = rc_smmu_idmaps_len; if (vms->its) { /* * Knowing the ID ranges from the RC to the SMMU, it's possible to * determine the ID ranges from RC that go directly to ITS. */ - create_rc_its_idmaps(rc_its_idmaps, rc_smmu_idmaps); + create_rc_its_idmaps(rc_its_idmaps, smmuv3_devs); nb_nodes++; /* ITS */ rc_mapping_count += rc_its_idmaps->len; @@ -411,9 +512,10 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) build_append_int_noprefix(table_data, 0 /* MADT translation_id */, 4); } - if (vms->iommu == VIRT_IOMMU_SMMUV3) { - int irq = vms->irqmap[VIRT_SMMU] + ARM_SPI_BASE; + for (i = 0; i < num_smmus; i++) { + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); int smmu_mapping_count, offset_to_id_array; + int irq = sdev->irq; if (vms->its) { smmu_mapping_count = 1; /* ITS Group node */ @@ -422,7 +524,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) smmu_mapping_count = 0; /* No ID mappings */ offset_to_id_array = 0; /* No ID mappings array */ } - smmu_offset = table_data->len - table.table_offset; + sdev->offset = table_data->len - table.table_offset; /* Table 9 SMMUv3 Format */ build_append_int_noprefix(table_data, 4 /* SMMUv3 */, 1); /* Type */ node_size = SMMU_V3_ENTRY_SIZE + @@ -435,7 +537,7 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) /* Reference to ID Array */ build_append_int_noprefix(table_data, offset_to_id_array, 4); /* Base address */ - build_append_int_noprefix(table_data, vms->memmap[VIRT_SMMU].base, 8); + build_append_int_noprefix(table_data, sdev->base, 8); /* Flags */ build_append_int_noprefix(table_data, 1 /* COHACC Override */, 4); build_append_int_noprefix(table_data, 0, 4); /* Reserved */ @@ -486,21 +588,26 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) build_append_int_noprefix(table_data, 0, 3); /* Reserved */ /* Output Reference */ - if (vms->iommu == VIRT_IOMMU_SMMUV3) { + if (num_smmus) { AcpiIortIdMapping *range; - /* - * Map RIDs (input) from RC to SMMUv3 nodes: RC -> SMMUv3. - * - * N.B.: The mapping from SMMUv3 to ITS Group node (SMMUv3 -> ITS) is - * defined in the SMMUv3 table, where all SMMUv3 IDs are mapped to the - * ITS Group node, if ITS is available. - */ - for (i = 0; i < rc_smmu_idmaps->len; i++) { - range = &g_array_index(rc_smmu_idmaps, AcpiIortIdMapping, i); - /* Output IORT node is the SMMUv3 node. */ - build_iort_id_mapping(table_data, range->input_base, - range->id_count, smmu_offset); + for (i = 0; i < num_smmus; i++) { + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); + + /* + * Map RIDs (input) from RC to SMMUv3 nodes: RC -> SMMUv3. + * + * N.B.: The mapping from SMMUv3 to ITS Group node (SMMUv3 -> ITS) + * is defined in the SMMUv3 table, where all SMMUv3 IDs are mapped + * to the ITS Group node, if ITS is available. + */ + for (int j = 0; j < sdev->rc_smmu_idmaps->len; j++) { + range = &g_array_index(sdev->rc_smmu_idmaps, + AcpiIortIdMapping, j); + /* Output IORT node is the SMMUv3 node. */ + build_iort_id_mapping(table_data, range->input_base, + range->id_count, sdev->offset); + } } if (vms->its) { @@ -525,8 +632,12 @@ build_iort(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) } acpi_table_end(linker, &table); - g_array_free(rc_smmu_idmaps, true); g_array_free(rc_its_idmaps, true); + for (i = 0; i < num_smmus; i++) { + sdev = &g_array_index(smmuv3_devs, AcpiIortSMMUv3Dev, i); + g_array_free(sdev->rc_smmu_idmaps, true); + } + g_array_free(smmuv3_devs, true); } /* @@ -955,6 +1066,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) } acpi_dsdt_add_power_button(scope); + aml_append(scope, aml_error_device()); #ifdef CONFIG_TPM acpi_dsdt_add_tpm(scope, vms); #endif @@ -1014,6 +1126,15 @@ static void acpi_align_size(GArray *blob, unsigned align) g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); } +static const AcpiNotificationSourceId hest_ghes_notify[] = { + { ACPI_HEST_SRC_ID_SYNC, ACPI_GHES_NOTIFY_SEA }, + { ACPI_HEST_SRC_ID_QMP, ACPI_GHES_NOTIFY_GPIO }, +}; + +static const AcpiNotificationSourceId hest_ghes_notify_10_0[] = { + { ACPI_HEST_SRC_ID_SYNC, ACPI_GHES_NOTIFY_SEA }, +}; + static void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) { @@ -1070,9 +1191,28 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) build_dbg2(tables_blob, tables->linker, vms); if (vms->ras) { - acpi_add_table(table_offsets, tables_blob); - acpi_build_hest(tables_blob, tables->hardware_errors, tables->linker, - vms->oem_id, vms->oem_table_id); + AcpiGedState *acpi_ged_state; + static const AcpiNotificationSourceId *notify; + unsigned int notify_sz; + AcpiGhesState *ags; + + acpi_ged_state = ACPI_GED(vms->acpi_dev); + ags = &acpi_ged_state->ghes_state; + if (ags) { + acpi_add_table(table_offsets, tables_blob); + + if (!ags->use_hest_addr) { + notify = hest_ghes_notify_10_0; + notify_sz = ARRAY_SIZE(hest_ghes_notify_10_0); + } else { + notify = hest_ghes_notify; + notify_sz = ARRAY_SIZE(hest_ghes_notify); + } + + acpi_build_hest(ags, tables_blob, tables->hardware_errors, + tables->linker, notify, notify_sz, + vms->oem_id, vms->oem_table_id); + } } if (ms->numa_state->num_nodes > 0) { diff --git a/hw/arm/virt.c b/hw/arm/virt.c index ef6be3660f5fb..175023897a735 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -38,8 +38,6 @@ #include "hw/arm/primecell.h" #include "hw/arm/virt.h" #include "hw/block/flash.h" -#include "hw/vfio/vfio-calxeda-xgmac.h" -#include "hw/vfio/vfio-amd-xgbe.h" #include "hw/display/ramfb.h" #include "net/net.h" #include "system/device_tree.h" @@ -50,12 +48,14 @@ #include "system/kvm.h" #include "system/hvf.h" #include "system/qtest.h" +#include "system/system.h" #include "hw/loader.h" #include "qapi/error.h" #include "qemu/bitops.h" #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/module.h" +#include "hw/pci/pci_bus.h" #include "hw/pci-host/gpex.h" #include "hw/pci-bridge/pci_expander_bridge.h" #include "hw/virtio/virtio-pci.h" @@ -150,6 +150,9 @@ static void arm_virt_compat_set(MachineClass *mc) #define LEGACY_RAMLIMIT_GB 255 #define LEGACY_RAMLIMIT_BYTES (LEGACY_RAMLIMIT_GB * GiB) +/* MMIO region size for SMMUv3 */ +#define SMMU_IO_LEN 0x20000 + /* Addresses and sizes of our components. * 0..128MB is space for a flash device so we can run bootrom code such as UEFI. * 128MB..256MB is used for miscellaneous device I/O. @@ -181,7 +184,7 @@ static const MemMapEntry base_memmap[] = { [VIRT_FW_CFG] = { 0x09020000, 0x00000018 }, [VIRT_GPIO] = { 0x09030000, 0x00001000 }, [VIRT_UART1] = { 0x09040000, 0x00001000 }, - [VIRT_SMMU] = { 0x09050000, 0x00020000 }, + [VIRT_SMMU] = { 0x09050000, SMMU_IO_LEN }, [VIRT_PCDIMM_ACPI] = { 0x09070000, MEMORY_HOTPLUG_IO_LEN }, [VIRT_ACPI_GED] = { 0x09080000, ACPI_GED_EVT_SEL_LEN }, [VIRT_NVDIMM_ACPI] = { 0x09090000, NVDIMM_ACPI_IO_LEN}, @@ -690,7 +693,7 @@ static inline DeviceState *create_acpi_ged(VirtMachineState *vms) MachineState *ms = MACHINE(vms); SysBusDevice *sbdev; int irq = vms->irqmap[VIRT_ACPI_GED]; - uint32_t event = ACPI_GED_PWR_DOWN_EVT; + uint32_t event = ACPI_GED_PWR_DOWN_EVT | ACPI_GED_ERROR_EVT; bool acpi_pcihp; if (ms->ram_slots) { @@ -1047,6 +1050,20 @@ static void virt_powerdown_req(Notifier *n, void *opaque) } } +static void virt_generic_error_req(Notifier *n, void *opaque) +{ + uint16_t *source_id = opaque; + + /* Currently, only QMP source ID is async */ + if (*source_id != ACPI_HEST_SRC_ID_QMP) { + return; + } + + VirtMachineState *s = container_of(n, VirtMachineState, generic_error_notifier); + + acpi_send_event(s->acpi_dev, ACPI_GENERIC_ERROR); +} + static void create_gpio_keys(char *fdt, DeviceState *pl061_dev, uint32_t phandle) { @@ -1443,19 +1460,66 @@ static void create_pcie_irq_map(const MachineState *ms, 0x7 /* PCI irq */); } +static void create_smmuv3_dt_bindings(const VirtMachineState *vms, hwaddr base, + hwaddr size, int irq) +{ + char *node; + const char compat[] = "arm,smmu-v3"; + const char irq_names[] = "eventq\0priq\0cmdq-sync\0gerror"; + MachineState *ms = MACHINE(vms); + + node = g_strdup_printf("/smmuv3@%" PRIx64, base); + qemu_fdt_add_subnode(ms->fdt, node); + qemu_fdt_setprop(ms->fdt, node, "compatible", compat, sizeof(compat)); + qemu_fdt_setprop_sized_cells(ms->fdt, node, "reg", 2, base, 2, size); + + qemu_fdt_setprop_cells(ms->fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irq , GIC_FDT_IRQ_FLAGS_EDGE_LO_HI, + GIC_FDT_IRQ_TYPE_SPI, irq + 1, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI, + GIC_FDT_IRQ_TYPE_SPI, irq + 2, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI, + GIC_FDT_IRQ_TYPE_SPI, irq + 3, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI); + + qemu_fdt_setprop(ms->fdt, node, "interrupt-names", irq_names, + sizeof(irq_names)); + + qemu_fdt_setprop(ms->fdt, node, "dma-coherent", NULL, 0); + qemu_fdt_setprop_cell(ms->fdt, node, "#iommu-cells", 1); + qemu_fdt_setprop_cell(ms->fdt, node, "phandle", vms->iommu_phandle); + g_free(node); +} + +static void create_smmuv3_dev_dtb(VirtMachineState *vms, + DeviceState *dev, PCIBus *bus) +{ + PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); + int irq = platform_bus_get_irqn(pbus, sbdev, 0); + hwaddr base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + MachineState *ms = MACHINE(vms); + + if (!(vms->bootinfo.firmware_loaded && virt_is_acpi_enabled(vms)) && + strcmp("pcie.0", bus->qbus.name)) { + warn_report("SMMUv3 device only supported with pcie.0 for DT"); + return; + } + base += vms->memmap[VIRT_PLATFORM_BUS].base; + irq += vms->irqmap[VIRT_PLATFORM_BUS]; + + vms->iommu_phandle = qemu_fdt_alloc_phandle(ms->fdt); + create_smmuv3_dt_bindings(vms, base, SMMU_IO_LEN, irq); + qemu_fdt_setprop_cells(ms->fdt, vms->pciehb_nodename, "iommu-map", + 0x0, vms->iommu_phandle, 0x0, 0x10000); +} + static void create_smmu(const VirtMachineState *vms, PCIBus *bus) { VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); - char *node; - const char compat[] = "arm,smmu-v3"; int irq = vms->irqmap[VIRT_SMMU]; int i; hwaddr base = vms->memmap[VIRT_SMMU].base; hwaddr size = vms->memmap[VIRT_SMMU].size; - const char irq_names[] = "eventq\0priq\0cmdq-sync\0gerror"; DeviceState *dev; - MachineState *ms = MACHINE(vms); if (vms->iommu != VIRT_IOMMU_SMMUV3 || !vms->iommu_phandle) { return; @@ -1474,27 +1538,7 @@ static void create_smmu(const VirtMachineState *vms, sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, qdev_get_gpio_in(vms->gic, irq + i)); } - - node = g_strdup_printf("/smmuv3@%" PRIx64, base); - qemu_fdt_add_subnode(ms->fdt, node); - qemu_fdt_setprop(ms->fdt, node, "compatible", compat, sizeof(compat)); - qemu_fdt_setprop_sized_cells(ms->fdt, node, "reg", 2, base, 2, size); - - qemu_fdt_setprop_cells(ms->fdt, node, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, irq , GIC_FDT_IRQ_FLAGS_EDGE_LO_HI, - GIC_FDT_IRQ_TYPE_SPI, irq + 1, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI, - GIC_FDT_IRQ_TYPE_SPI, irq + 2, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI, - GIC_FDT_IRQ_TYPE_SPI, irq + 3, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI); - - qemu_fdt_setprop(ms->fdt, node, "interrupt-names", irq_names, - sizeof(irq_names)); - - qemu_fdt_setprop(ms->fdt, node, "dma-coherent", NULL, 0); - - qemu_fdt_setprop_cell(ms->fdt, node, "#iommu-cells", 1); - - qemu_fdt_setprop_cell(ms->fdt, node, "phandle", vms->iommu_phandle); - g_free(node); + create_smmuv3_dt_bindings(vms, base, size, irq); } static void create_virtio_iommu_dt_bindings(VirtMachineState *vms) @@ -1650,6 +1694,7 @@ static void create_pcie(VirtMachineState *vms) qemu_fdt_setprop_cells(ms->fdt, nodename, "iommu-map", 0x0, vms->iommu_phandle, 0x0, 0x10000); } + vms->legacy_smmuv3_present = true; break; default: g_assert_not_reached(); @@ -2469,6 +2514,9 @@ static void machvirt_init(MachineState *machine) if (has_ged && aarch64 && firmware_loaded && virt_is_acpi_enabled(vms)) { vms->acpi_dev = create_acpi_ged(vms); + vms->generic_error_notifier.notify = virt_generic_error_req; + notifier_list_add(&acpi_generic_error_notifiers, + &vms->generic_error_notifier); } else { create_gpio_devices(vms, VIRT_GPIO, sysmem); } @@ -2917,7 +2965,7 @@ static void virt_memory_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, const MachineState *ms = MACHINE(hotplug_dev); const bool is_nvdimm = object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM); - if (!vms->acpi_dev) { + if (!vms->acpi_dev && !(is_nvdimm && !dev->hotplugged)) { error_setg(errp, "memory hotplug is not enabled: missing acpi-ged device"); return; @@ -2949,8 +2997,10 @@ static void virt_memory_plug(HotplugHandler *hotplug_dev, nvdimm_plug(ms->nvdimms_state); } - hotplug_handler_plug(HOTPLUG_HANDLER(vms->acpi_dev), - dev, &error_abort); + if (vms->acpi_dev) { + hotplug_handler_plug(HOTPLUG_HANDLER(vms->acpi_dev), + dev, &error_abort); + } } static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, @@ -2995,6 +3045,16 @@ static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, qlist_append_str(reserved_regions, resv_prop_str); qdev_prop_set_array(dev, "reserved-regions", reserved_regions); g_free(resv_prop_str); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_ARM_SMMUV3)) { + if (vms->legacy_smmuv3_present || vms->iommu == VIRT_IOMMU_VIRTIO) { + error_setg(errp, "virt machine already has %s set. " + "Doesn't support incompatible iommus", + (vms->legacy_smmuv3_present) ? + "iommu=smmuv3" : "virtio-iommu"); + } else if (vms->iommu == VIRT_IOMMU_NONE) { + /* The new SMMUv3 device is specific to the PCI bus */ + object_property_set_bool(OBJECT(dev), "smmu_per_bus", true, NULL); + } } } @@ -3018,6 +3078,22 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev, virtio_md_pci_plug(VIRTIO_MD_PCI(dev), MACHINE(hotplug_dev), errp); } + if (object_dynamic_cast(OBJECT(dev), TYPE_ARM_SMMUV3)) { + if (!vms->legacy_smmuv3_present && vms->platform_bus_dev) { + PCIBus *bus; + + bus = PCI_BUS(object_property_get_link(OBJECT(dev), "primary-bus", + &error_abort)); + if (pci_bus_bypass_iommu(bus)) { + error_setg(errp, "Bypass option cannot be set for SMMUv3 " + "associated PCIe RC"); + return; + } + + create_smmuv3_dev_dtb(vms, dev, bus); + } + } + if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { PCIDevice *pdev = PCI_DEVICE(dev); @@ -3215,11 +3291,9 @@ static void virt_machine_class_init(ObjectClass *oc, const void *data) * configuration of the particular instance. */ mc->max_cpus = 512; - machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_CALXEDA_XGMAC); - machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_AMD_XGBE); machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); - machine_class_allow_dynamic_sysbus_dev(mc, TYPE_VFIO_PLATFORM); machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS); + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_ARM_SMMUV3); #ifdef CONFIG_TPM machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); #endif @@ -3455,10 +3529,18 @@ static void machvirt_machine_init(void) } type_init(machvirt_machine_init); +static void virt_machine_10_2_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(10, 2) + static void virt_machine_10_1_options(MachineClass *mc) { + virt_machine_10_2_options(mc); + mc->smbios_memory_device_size = 2047 * TiB; + compat_props_add(mc->compat_props, hw_compat_10_1, hw_compat_10_1_len); } -DEFINE_VIRT_MACHINE_AS_LATEST(10, 1) +DEFINE_VIRT_MACHINE(10, 1) static void virt_machine_10_0_options(MachineClass *mc) { diff --git a/hw/arm/xlnx-versal-virt.c b/hw/arm/xlnx-versal-virt.c index adadbb7290258..149b448546ee2 100644 --- a/hw/arm/xlnx-versal-virt.c +++ b/hw/arm/xlnx-versal-virt.c @@ -1,7 +1,8 @@ /* - * Xilinx Versal Virtual board. + * AMD/Xilinx Versal family Virtual board. * * Copyright (c) 2018 Xilinx Inc. + * Copyright (c) 2025 Advanced Micro Devices, Inc. * Written by Edgar E. Iglesias * * This program is free software; you can redistribute it and/or modify @@ -13,18 +14,22 @@ #include "qemu/error-report.h" #include "qapi/error.h" #include "system/device_tree.h" +#include "system/address-spaces.h" #include "hw/block/flash.h" #include "hw/boards.h" #include "hw/sysbus.h" #include "hw/arm/fdt.h" -#include "hw/qdev-properties.h" #include "hw/arm/xlnx-versal.h" #include "hw/arm/boot.h" -#include "target/arm/multiprocessing.h" #include "qom/object.h" +#include "target/arm/cpu.h" -#define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("xlnx-versal-virt") -OBJECT_DECLARE_SIMPLE_TYPE(VersalVirt, XLNX_VERSAL_VIRT_MACHINE) +#define TYPE_XLNX_VERSAL_VIRT_BASE_MACHINE \ + MACHINE_TYPE_NAME("amd-versal-virt-base") +OBJECT_DECLARE_TYPE(VersalVirt, VersalVirtClass, XLNX_VERSAL_VIRT_BASE_MACHINE) + +#define TYPE_XLNX_VERSAL_VIRT_MACHINE MACHINE_TYPE_NAME("amd-versal-virt") +#define TYPE_XLNX_VERSAL2_VIRT_MACHINE MACHINE_TYPE_NAME("amd-versal2-virt") #define XLNX_VERSAL_NUM_OSPI_FLASH 4 @@ -35,28 +40,27 @@ struct VersalVirt { void *fdt; int fdt_size; - struct { - uint32_t gic; - uint32_t ethernet_phy[2]; - uint32_t clk_125Mhz; - uint32_t clk_25Mhz; - uint32_t usb; - uint32_t dwc; - uint32_t canfd[2]; - } phandle; struct arm_boot_info binfo; - CanBusState *canbus[XLNX_VERSAL_NR_CANFD]; + CanBusState **canbus; + struct { - bool secure; + char *ospi_model; } cfg; - char *ospi_model; +}; + +struct VersalVirtClass { + MachineClass parent_class; + + VersalVersion version; }; static void fdt_create(VersalVirt *s) { MachineClass *mc = MACHINE_GET_CLASS(s); - int i; + VersalVirtClass *vvc = XLNX_VERSAL_VIRT_BASE_MACHINE_GET_CLASS(s); + const char versal_compat[] = "amd-versal-virt\0xlnx-versal-virt"; + const char versal2_compat[] = "amd-versal2-virt"; s->fdt = create_device_tree(&s->fdt_size); if (!s->fdt) { @@ -64,392 +68,26 @@ static void fdt_create(VersalVirt *s) exit(1); } - /* Allocate all phandles. */ - s->phandle.gic = qemu_fdt_alloc_phandle(s->fdt); - for (i = 0; i < ARRAY_SIZE(s->phandle.ethernet_phy); i++) { - s->phandle.ethernet_phy[i] = qemu_fdt_alloc_phandle(s->fdt); - } - s->phandle.clk_25Mhz = qemu_fdt_alloc_phandle(s->fdt); - s->phandle.clk_125Mhz = qemu_fdt_alloc_phandle(s->fdt); - - s->phandle.usb = qemu_fdt_alloc_phandle(s->fdt); - s->phandle.dwc = qemu_fdt_alloc_phandle(s->fdt); /* Create /chosen node for load_dtb. */ qemu_fdt_add_subnode(s->fdt, "/chosen"); + qemu_fdt_add_subnode(s->fdt, "/aliases"); /* Header */ - qemu_fdt_setprop_cell(s->fdt, "/", "interrupt-parent", s->phandle.gic); - qemu_fdt_setprop_cell(s->fdt, "/", "#size-cells", 0x2); - qemu_fdt_setprop_cell(s->fdt, "/", "#address-cells", 0x2); qemu_fdt_setprop_string(s->fdt, "/", "model", mc->desc); - qemu_fdt_setprop_string(s->fdt, "/", "compatible", "xlnx-versal-virt"); -} - -static void fdt_add_clk_node(VersalVirt *s, const char *name, - unsigned int freq_hz, uint32_t phandle) -{ - qemu_fdt_add_subnode(s->fdt, name); - qemu_fdt_setprop_cell(s->fdt, name, "phandle", phandle); - qemu_fdt_setprop_cell(s->fdt, name, "clock-frequency", freq_hz); - qemu_fdt_setprop_cell(s->fdt, name, "#clock-cells", 0x0); - qemu_fdt_setprop_string(s->fdt, name, "compatible", "fixed-clock"); - qemu_fdt_setprop(s->fdt, name, "u-boot,dm-pre-reloc", NULL, 0); -} - -static void fdt_add_cpu_nodes(VersalVirt *s, uint32_t psci_conduit) -{ - int i; - - qemu_fdt_add_subnode(s->fdt, "/cpus"); - qemu_fdt_setprop_cell(s->fdt, "/cpus", "#size-cells", 0x0); - qemu_fdt_setprop_cell(s->fdt, "/cpus", "#address-cells", 1); - - for (i = XLNX_VERSAL_NR_ACPUS - 1; i >= 0; i--) { - char *name = g_strdup_printf("/cpus/cpu@%d", i); - ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i)); - - qemu_fdt_add_subnode(s->fdt, name); - qemu_fdt_setprop_cell(s->fdt, name, "reg", - arm_cpu_mp_affinity(armcpu)); - if (psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) { - qemu_fdt_setprop_string(s->fdt, name, "enable-method", "psci"); - } - qemu_fdt_setprop_string(s->fdt, name, "device_type", "cpu"); - qemu_fdt_setprop_string(s->fdt, name, "compatible", - armcpu->dtb_compatible); - g_free(name); - } -} - -static void fdt_add_gic_nodes(VersalVirt *s) -{ - char *nodename; - - nodename = g_strdup_printf("/gic@%x", MM_GIC_APU_DIST_MAIN); - qemu_fdt_add_subnode(s->fdt, nodename); - qemu_fdt_setprop_cell(s->fdt, nodename, "phandle", s->phandle.gic); - qemu_fdt_setprop_cells(s->fdt, nodename, "interrupts", - GIC_FDT_IRQ_TYPE_PPI, VERSAL_GIC_MAINT_IRQ, - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop(s->fdt, nodename, "interrupt-controller", NULL, 0); - qemu_fdt_setprop_sized_cells(s->fdt, nodename, "reg", - 2, MM_GIC_APU_DIST_MAIN, - 2, MM_GIC_APU_DIST_MAIN_SIZE, - 2, MM_GIC_APU_REDIST_0, - 2, MM_GIC_APU_REDIST_0_SIZE); - qemu_fdt_setprop_cell(s->fdt, nodename, "#interrupt-cells", 3); - qemu_fdt_setprop_string(s->fdt, nodename, "compatible", "arm,gic-v3"); - g_free(nodename); -} - -static void fdt_add_timer_nodes(VersalVirt *s) -{ - const char compat[] = "arm,armv8-timer"; - uint32_t irqflags = GIC_FDT_IRQ_FLAGS_LEVEL_HI; - - qemu_fdt_add_subnode(s->fdt, "/timer"); - qemu_fdt_setprop_cells(s->fdt, "/timer", "interrupts", - GIC_FDT_IRQ_TYPE_PPI, VERSAL_TIMER_S_EL1_IRQ, irqflags, - GIC_FDT_IRQ_TYPE_PPI, VERSAL_TIMER_NS_EL1_IRQ, irqflags, - GIC_FDT_IRQ_TYPE_PPI, VERSAL_TIMER_VIRT_IRQ, irqflags, - GIC_FDT_IRQ_TYPE_PPI, VERSAL_TIMER_NS_EL2_IRQ, irqflags); - qemu_fdt_setprop(s->fdt, "/timer", "compatible", - compat, sizeof(compat)); -} - -static void fdt_add_usb_xhci_nodes(VersalVirt *s) -{ - const char clocknames[] = "bus_clk\0ref_clk"; - const char irq_name[] = "dwc_usb3"; - const char compatVersalDWC3[] = "xlnx,versal-dwc3"; - const char compatDWC3[] = "snps,dwc3"; - char *name = g_strdup_printf("/usb@%" PRIx32, MM_USB2_CTRL_REGS); - - qemu_fdt_add_subnode(s->fdt, name); - qemu_fdt_setprop(s->fdt, name, "compatible", - compatVersalDWC3, sizeof(compatVersalDWC3)); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, MM_USB2_CTRL_REGS, - 2, MM_USB2_CTRL_REGS_SIZE); - qemu_fdt_setprop(s->fdt, name, "clock-names", - clocknames, sizeof(clocknames)); - qemu_fdt_setprop_cells(s->fdt, name, "clocks", - s->phandle.clk_25Mhz, s->phandle.clk_125Mhz); - qemu_fdt_setprop(s->fdt, name, "ranges", NULL, 0); - qemu_fdt_setprop_cell(s->fdt, name, "#address-cells", 2); - qemu_fdt_setprop_cell(s->fdt, name, "#size-cells", 2); - qemu_fdt_setprop_cell(s->fdt, name, "phandle", s->phandle.usb); - g_free(name); - - name = g_strdup_printf("/usb@%" PRIx32 "/dwc3@%" PRIx32, - MM_USB2_CTRL_REGS, MM_USB_0); - qemu_fdt_add_subnode(s->fdt, name); - qemu_fdt_setprop(s->fdt, name, "compatible", - compatDWC3, sizeof(compatDWC3)); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, MM_USB_0, 2, MM_USB_0_SIZE); - qemu_fdt_setprop(s->fdt, name, "interrupt-names", - irq_name, sizeof(irq_name)); - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, VERSAL_USB0_IRQ_0, - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop_cell(s->fdt, name, - "snps,quirk-frame-length-adjustment", 0x20); - qemu_fdt_setprop_cells(s->fdt, name, "#stream-id-cells", 1); - qemu_fdt_setprop_string(s->fdt, name, "dr_mode", "host"); - qemu_fdt_setprop_string(s->fdt, name, "phy-names", "usb3-phy"); - qemu_fdt_setprop(s->fdt, name, "snps,dis_u2_susphy_quirk", NULL, 0); - qemu_fdt_setprop(s->fdt, name, "snps,dis_u3_susphy_quirk", NULL, 0); - qemu_fdt_setprop(s->fdt, name, "snps,refclk_fladj", NULL, 0); - qemu_fdt_setprop(s->fdt, name, "snps,mask_phy_reset", NULL, 0); - qemu_fdt_setprop_cell(s->fdt, name, "phandle", s->phandle.dwc); - qemu_fdt_setprop_string(s->fdt, name, "maximum-speed", "high-speed"); - g_free(name); -} - -static void fdt_add_uart_nodes(VersalVirt *s) -{ - uint64_t addrs[] = { MM_UART1, MM_UART0 }; - unsigned int irqs[] = { VERSAL_UART1_IRQ_0, VERSAL_UART0_IRQ_0 }; - const char compat[] = "arm,pl011\0arm,sbsa-uart"; - const char clocknames[] = "uartclk\0apb_pclk"; - int i; - - for (i = 0; i < ARRAY_SIZE(addrs); i++) { - char *name = g_strdup_printf("/uart@%" PRIx64, addrs[i]); - qemu_fdt_add_subnode(s->fdt, name); - qemu_fdt_setprop_cell(s->fdt, name, "current-speed", 115200); - qemu_fdt_setprop_cells(s->fdt, name, "clocks", - s->phandle.clk_125Mhz, s->phandle.clk_125Mhz); - qemu_fdt_setprop(s->fdt, name, "clock-names", - clocknames, sizeof(clocknames)); - - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, irqs[i], - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, addrs[i], 2, 0x1000); - qemu_fdt_setprop(s->fdt, name, "compatible", - compat, sizeof(compat)); - qemu_fdt_setprop(s->fdt, name, "u-boot,dm-pre-reloc", NULL, 0); - - if (addrs[i] == MM_UART0) { - /* Select UART0. */ - qemu_fdt_setprop_string(s->fdt, "/chosen", "stdout-path", name); - } - g_free(name); - } -} - -static void fdt_add_canfd_nodes(VersalVirt *s) -{ - uint64_t addrs[] = { MM_CANFD1, MM_CANFD0 }; - uint32_t size[] = { MM_CANFD1_SIZE, MM_CANFD0_SIZE }; - unsigned int irqs[] = { VERSAL_CANFD1_IRQ_0, VERSAL_CANFD0_IRQ_0 }; - const char clocknames[] = "can_clk\0s_axi_aclk"; - int i; - - /* Create and connect CANFD0 and CANFD1 nodes to canbus0. */ - for (i = 0; i < ARRAY_SIZE(addrs); i++) { - char *name = g_strdup_printf("/canfd@%" PRIx64, addrs[i]); - qemu_fdt_add_subnode(s->fdt, name); - - qemu_fdt_setprop_cell(s->fdt, name, "rx-fifo-depth", 0x40); - qemu_fdt_setprop_cell(s->fdt, name, "tx-mailbox-count", 0x20); - - qemu_fdt_setprop_cells(s->fdt, name, "clocks", - s->phandle.clk_25Mhz, s->phandle.clk_25Mhz); - qemu_fdt_setprop(s->fdt, name, "clock-names", - clocknames, sizeof(clocknames)); - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, irqs[i], - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, addrs[i], 2, size[i]); - qemu_fdt_setprop_string(s->fdt, name, "compatible", - "xlnx,canfd-2.0"); - - g_free(name); - } -} - -static void fdt_add_fixed_link_nodes(VersalVirt *s, char *gemname, - uint32_t phandle) -{ - char *name = g_strdup_printf("%s/fixed-link", gemname); - - qemu_fdt_add_subnode(s->fdt, name); - qemu_fdt_setprop_cell(s->fdt, name, "phandle", phandle); - qemu_fdt_setprop(s->fdt, name, "full-duplex", NULL, 0); - qemu_fdt_setprop_cell(s->fdt, name, "speed", 1000); - g_free(name); -} - -static void fdt_add_gem_nodes(VersalVirt *s) -{ - uint64_t addrs[] = { MM_GEM1, MM_GEM0 }; - unsigned int irqs[] = { VERSAL_GEM1_IRQ_0, VERSAL_GEM0_IRQ_0 }; - const char clocknames[] = "pclk\0hclk\0tx_clk\0rx_clk"; - const char compat_gem[] = "cdns,zynqmp-gem\0cdns,gem"; - int i; - - for (i = 0; i < ARRAY_SIZE(addrs); i++) { - char *name = g_strdup_printf("/ethernet@%" PRIx64, addrs[i]); - qemu_fdt_add_subnode(s->fdt, name); - - fdt_add_fixed_link_nodes(s, name, s->phandle.ethernet_phy[i]); - qemu_fdt_setprop_string(s->fdt, name, "phy-mode", "rgmii-id"); - qemu_fdt_setprop_cell(s->fdt, name, "phy-handle", - s->phandle.ethernet_phy[i]); - qemu_fdt_setprop_cells(s->fdt, name, "clocks", - s->phandle.clk_25Mhz, s->phandle.clk_25Mhz, - s->phandle.clk_125Mhz, s->phandle.clk_125Mhz); - qemu_fdt_setprop(s->fdt, name, "clock-names", - clocknames, sizeof(clocknames)); - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, irqs[i], - GIC_FDT_IRQ_FLAGS_LEVEL_HI, - GIC_FDT_IRQ_TYPE_SPI, irqs[i], - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, addrs[i], 2, 0x1000); - qemu_fdt_setprop(s->fdt, name, "compatible", - compat_gem, sizeof(compat_gem)); - qemu_fdt_setprop_cell(s->fdt, name, "#address-cells", 1); - qemu_fdt_setprop_cell(s->fdt, name, "#size-cells", 0); - g_free(name); - } -} -static void fdt_add_zdma_nodes(VersalVirt *s) -{ - const char clocknames[] = "clk_main\0clk_apb"; - const char compat[] = "xlnx,zynqmp-dma-1.0"; - int i; - - for (i = XLNX_VERSAL_NR_ADMAS - 1; i >= 0; i--) { - uint64_t addr = MM_ADMA_CH0 + MM_ADMA_CH0_SIZE * i; - char *name = g_strdup_printf("/dma@%" PRIx64, addr); - - qemu_fdt_add_subnode(s->fdt, name); - - qemu_fdt_setprop_cell(s->fdt, name, "xlnx,bus-width", 64); - qemu_fdt_setprop_cells(s->fdt, name, "clocks", - s->phandle.clk_25Mhz, s->phandle.clk_25Mhz); - qemu_fdt_setprop(s->fdt, name, "clock-names", - clocknames, sizeof(clocknames)); - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, VERSAL_ADMA_IRQ_0 + i, - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, addr, 2, 0x1000); - qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); - g_free(name); - } -} - -static void fdt_add_sd_nodes(VersalVirt *s) -{ - const char clocknames[] = "clk_xin\0clk_ahb"; - const char compat[] = "arasan,sdhci-8.9a"; - int i; + switch (vvc->version) { + case VERSAL_VER_VERSAL: + qemu_fdt_setprop(s->fdt, "/", "compatible", versal_compat, + sizeof(versal_compat)); + break; - for (i = ARRAY_SIZE(s->soc.pmc.iou.sd) - 1; i >= 0; i--) { - uint64_t addr = MM_PMC_SD0 + MM_PMC_SD0_SIZE * i; - char *name = g_strdup_printf("/sdhci@%" PRIx64, addr); - - qemu_fdt_add_subnode(s->fdt, name); - - qemu_fdt_setprop_cells(s->fdt, name, "clocks", - s->phandle.clk_25Mhz, s->phandle.clk_25Mhz); - qemu_fdt_setprop(s->fdt, name, "clock-names", - clocknames, sizeof(clocknames)); - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, VERSAL_SD0_IRQ_0 + i * 2, - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, addr, 2, MM_PMC_SD0_SIZE); - qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); - g_free(name); + case VERSAL_VER_VERSAL2: + qemu_fdt_setprop(s->fdt, "/", "compatible", versal2_compat, + sizeof(versal2_compat)); + break; } } -static void fdt_add_rtc_node(VersalVirt *s) -{ - const char compat[] = "xlnx,zynqmp-rtc"; - const char interrupt_names[] = "alarm\0sec"; - char *name = g_strdup_printf("/rtc@%x", MM_PMC_RTC); - - qemu_fdt_add_subnode(s->fdt, name); - - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_ALARM_IRQ, - GIC_FDT_IRQ_FLAGS_LEVEL_HI, - GIC_FDT_IRQ_TYPE_SPI, VERSAL_RTC_SECONDS_IRQ, - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop(s->fdt, name, "interrupt-names", - interrupt_names, sizeof(interrupt_names)); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, MM_PMC_RTC, 2, MM_PMC_RTC_SIZE); - qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); - g_free(name); -} - -static void fdt_add_bbram_node(VersalVirt *s) -{ - const char compat[] = TYPE_XLNX_BBRAM; - const char interrupt_names[] = "bbram-error"; - char *name = g_strdup_printf("/bbram@%x", MM_PMC_BBRAM_CTRL); - - qemu_fdt_add_subnode(s->fdt, name); - - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, VERSAL_PMC_APB_IRQ, - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop(s->fdt, name, "interrupt-names", - interrupt_names, sizeof(interrupt_names)); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, MM_PMC_BBRAM_CTRL, - 2, MM_PMC_BBRAM_CTRL_SIZE); - qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); - g_free(name); -} - -static void fdt_add_efuse_ctrl_node(VersalVirt *s) -{ - const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CTRL; - const char interrupt_names[] = "pmc_efuse"; - char *name = g_strdup_printf("/pmc_efuse@%x", MM_PMC_EFUSE_CTRL); - - qemu_fdt_add_subnode(s->fdt, name); - - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, VERSAL_EFUSE_IRQ, - GIC_FDT_IRQ_FLAGS_LEVEL_HI); - qemu_fdt_setprop(s->fdt, name, "interrupt-names", - interrupt_names, sizeof(interrupt_names)); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, MM_PMC_EFUSE_CTRL, - 2, MM_PMC_EFUSE_CTRL_SIZE); - qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); - g_free(name); -} - -static void fdt_add_efuse_cache_node(VersalVirt *s) -{ - const char compat[] = TYPE_XLNX_VERSAL_EFUSE_CACHE; - char *name = g_strdup_printf("/xlnx_pmc_efuse_cache@%x", - MM_PMC_EFUSE_CACHE); - - qemu_fdt_add_subnode(s->fdt, name); - - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", - 2, MM_PMC_EFUSE_CACHE, - 2, MM_PMC_EFUSE_CACHE_SIZE); - qemu_fdt_setprop(s->fdt, name, "compatible", compat, sizeof(compat)); - g_free(name); -} - static void fdt_nop_memory_nodes(void *fdt, Error **errp) { Error *err = NULL; @@ -470,88 +108,13 @@ static void fdt_nop_memory_nodes(void *fdt, Error **errp) g_strfreev(node_path); } -static void fdt_add_memory_nodes(VersalVirt *s, void *fdt, uint64_t ram_size) -{ - /* Describes the various split DDR access regions. */ - static const struct { - uint64_t base; - uint64_t size; - } addr_ranges[] = { - { MM_TOP_DDR, MM_TOP_DDR_SIZE }, - { MM_TOP_DDR_2, MM_TOP_DDR_2_SIZE }, - { MM_TOP_DDR_3, MM_TOP_DDR_3_SIZE }, - { MM_TOP_DDR_4, MM_TOP_DDR_4_SIZE } - }; - uint64_t mem_reg_prop[8] = {0}; - uint64_t size = ram_size; - Error *err = NULL; - char *name; - int i; - - fdt_nop_memory_nodes(fdt, &err); - if (err) { - error_report_err(err); - return; - } - - name = g_strdup_printf("/memory@%x", MM_TOP_DDR); - for (i = 0; i < ARRAY_SIZE(addr_ranges) && size; i++) { - uint64_t mapsize; - - mapsize = size < addr_ranges[i].size ? size : addr_ranges[i].size; - - mem_reg_prop[i * 2] = addr_ranges[i].base; - mem_reg_prop[i * 2 + 1] = mapsize; - size -= mapsize; - } - qemu_fdt_add_subnode(fdt, name); - qemu_fdt_setprop_string(fdt, name, "device_type", "memory"); - - switch (i) { - case 1: - qemu_fdt_setprop_sized_cells(fdt, name, "reg", - 2, mem_reg_prop[0], - 2, mem_reg_prop[1]); - break; - case 2: - qemu_fdt_setprop_sized_cells(fdt, name, "reg", - 2, mem_reg_prop[0], - 2, mem_reg_prop[1], - 2, mem_reg_prop[2], - 2, mem_reg_prop[3]); - break; - case 3: - qemu_fdt_setprop_sized_cells(fdt, name, "reg", - 2, mem_reg_prop[0], - 2, mem_reg_prop[1], - 2, mem_reg_prop[2], - 2, mem_reg_prop[3], - 2, mem_reg_prop[4], - 2, mem_reg_prop[5]); - break; - case 4: - qemu_fdt_setprop_sized_cells(fdt, name, "reg", - 2, mem_reg_prop[0], - 2, mem_reg_prop[1], - 2, mem_reg_prop[2], - 2, mem_reg_prop[3], - 2, mem_reg_prop[4], - 2, mem_reg_prop[5], - 2, mem_reg_prop[6], - 2, mem_reg_prop[7]); - break; - default: - g_assert_not_reached(); - } - g_free(name); -} - static void versal_virt_modify_dtb(const struct arm_boot_info *binfo, void *fdt) { VersalVirt *s = container_of(binfo, VersalVirt, binfo); - fdt_add_memory_nodes(s, fdt, binfo->ram_size); + fdt_nop_memory_nodes(s->fdt, &error_abort); + versal_fdt_add_memory_nodes(&s->soc, binfo->ram_size); } static void *versal_virt_get_dtb(const struct arm_boot_info *binfo, @@ -570,41 +133,34 @@ static void create_virtio_regions(VersalVirt *s) int i; for (i = 0; i < NUM_VIRTIO_TRANSPORT; i++) { - char *name = g_strdup_printf("virtio%d", i); - hwaddr base = MM_TOP_RSVD + i * virtio_mmio_size; - int irq = VERSAL_RSVD_IRQ_FIRST + i; + hwaddr base = versal_get_reserved_mmio_addr(&s->soc) + + i * virtio_mmio_size; + g_autofree char *node = g_strdup_printf("/virtio_mmio@%" PRIx64, base); + int dtb_irq; MemoryRegion *mr; DeviceState *dev; qemu_irq pic_irq; - pic_irq = qdev_get_gpio_in(DEVICE(&s->soc.fpd.apu.gic), irq); + pic_irq = versal_get_reserved_irq(&s->soc, i, &dtb_irq); dev = qdev_new("virtio-mmio"); - object_property_add_child(OBJECT(&s->soc), name, OBJECT(dev)); + object_property_add_child(OBJECT(s), "virtio-mmio[*]", OBJECT(dev)); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic_irq); mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); memory_region_add_subregion(&s->soc.mr_ps, base, mr); - g_free(name); - } - for (i = 0; i < NUM_VIRTIO_TRANSPORT; i++) { - hwaddr base = MM_TOP_RSVD + i * virtio_mmio_size; - int irq = VERSAL_RSVD_IRQ_FIRST + i; - char *name = g_strdup_printf("/virtio_mmio@%" PRIx64, base); - - qemu_fdt_add_subnode(s->fdt, name); - qemu_fdt_setprop(s->fdt, name, "dma-coherent", NULL, 0); - qemu_fdt_setprop_cells(s->fdt, name, "interrupts", - GIC_FDT_IRQ_TYPE_SPI, irq, + qemu_fdt_add_subnode(s->fdt, node); + qemu_fdt_setprop(s->fdt, node, "dma-coherent", NULL, 0); + qemu_fdt_setprop_cells(s->fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, dtb_irq, GIC_FDT_IRQ_FLAGS_EDGE_LO_HI); - qemu_fdt_setprop_sized_cells(s->fdt, name, "reg", + qemu_fdt_setprop_sized_cells(s->fdt, node, "reg", 2, base, 2, virtio_mmio_size); - qemu_fdt_setprop_string(s->fdt, name, "compatible", "virtio,mmio"); - g_free(name); + qemu_fdt_setprop_string(s->fdt, node, "compatible", "virtio,mmio"); } } -static void bbram_attach_drive(XlnxBBRam *dev) +static void bbram_attach_drive(VersalVirt *s) { DriveInfo *dinfo; BlockBackend *blk; @@ -612,11 +168,11 @@ static void bbram_attach_drive(XlnxBBRam *dev) dinfo = drive_get_by_index(IF_PFLASH, 0); blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; if (blk) { - qdev_prop_set_drive(DEVICE(dev), "drive", blk); + versal_bbram_attach_drive(&s->soc, blk); } } -static void efuse_attach_drive(XlnxEFuse *dev) +static void efuse_attach_drive(VersalVirt *s) { DriveInfo *dinfo; BlockBackend *blk; @@ -624,41 +180,37 @@ static void efuse_attach_drive(XlnxEFuse *dev) dinfo = drive_get_by_index(IF_PFLASH, 1); blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; if (blk) { - qdev_prop_set_drive(DEVICE(dev), "drive", blk); + versal_efuse_attach_drive(&s->soc, blk); } } -static void sd_plugin_card(SDHCIState *sd, DriveInfo *di) +static void sd_plug_card(VersalVirt *s, int idx, DriveInfo *di) { BlockBackend *blk = di ? blk_by_legacy_dinfo(di) : NULL; - DeviceState *card; - card = qdev_new(TYPE_SD_CARD); - object_property_add_child(OBJECT(sd), "card[*]", OBJECT(card)); - qdev_prop_set_drive_err(card, "drive", blk, &error_fatal); - qdev_realize_and_unref(card, qdev_get_child_bus(DEVICE(sd), "sd-bus"), - &error_fatal); + versal_sdhci_plug_card(&s->soc, idx, blk); } static char *versal_get_ospi_model(Object *obj, Error **errp) { - VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(obj); + VersalVirt *s = XLNX_VERSAL_VIRT_BASE_MACHINE(obj); - return g_strdup(s->ospi_model); + return g_strdup(s->cfg.ospi_model); } static void versal_set_ospi_model(Object *obj, const char *value, Error **errp) { - VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(obj); + VersalVirt *s = XLNX_VERSAL_VIRT_BASE_MACHINE(obj); - g_free(s->ospi_model); - s->ospi_model = g_strdup(value); + g_free(s->cfg.ospi_model); + s->cfg.ospi_model = g_strdup(value); } static void versal_virt_init(MachineState *machine) { - VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(machine); + VersalVirt *s = XLNX_VERSAL_VIRT_BASE_MACHINE(machine); + VersalVirtClass *vvc = XLNX_VERSAL_VIRT_BASE_MACHINE_GET_CLASS(machine); int psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; int i; @@ -690,48 +242,38 @@ static void versal_virt_init(MachineState *machine) } object_initialize_child(OBJECT(machine), "xlnx-versal", &s->soc, - TYPE_XLNX_VERSAL); + versal_get_class(vvc->version)); object_property_set_link(OBJECT(&s->soc), "ddr", OBJECT(machine->ram), &error_abort); - object_property_set_link(OBJECT(&s->soc), "canbus0", OBJECT(s->canbus[0]), - &error_abort); - object_property_set_link(OBJECT(&s->soc), "canbus1", OBJECT(s->canbus[1]), - &error_abort); - sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal); + + for (i = 0; i < versal_get_num_can(vvc->version); i++) { + g_autofree char *prop_name = g_strdup_printf("canbus%d", i); + + object_property_set_link(OBJECT(&s->soc), prop_name, + OBJECT(s->canbus[i]), + &error_abort); + } fdt_create(s); + versal_set_fdt(&s->soc, s->fdt); + sysbus_realize(SYS_BUS_DEVICE(&s->soc), &error_fatal); create_virtio_regions(s); - fdt_add_gem_nodes(s); - fdt_add_uart_nodes(s); - fdt_add_canfd_nodes(s); - fdt_add_gic_nodes(s); - fdt_add_timer_nodes(s); - fdt_add_zdma_nodes(s); - fdt_add_usb_xhci_nodes(s); - fdt_add_sd_nodes(s); - fdt_add_rtc_node(s); - fdt_add_bbram_node(s); - fdt_add_efuse_ctrl_node(s); - fdt_add_efuse_cache_node(s); - fdt_add_cpu_nodes(s, psci_conduit); - fdt_add_clk_node(s, "/clk125", 125000000, s->phandle.clk_125Mhz); - fdt_add_clk_node(s, "/clk25", 25000000, s->phandle.clk_25Mhz); - - /* Make the APU cpu address space visible to virtio and other - * modules unaware of multiple address-spaces. */ - memory_region_add_subregion_overlap(get_system_memory(), - 0, &s->soc.fpd.apu.mr, 0); + + /* + * Map the SoC address space onto system memory. This will allow virtio and + * other modules unaware of multiple address-spaces to work. + */ + memory_region_add_subregion(get_system_memory(), 0, &s->soc.mr_ps); /* Attach bbram backend, if given */ - bbram_attach_drive(&s->soc.pmc.bbram); + bbram_attach_drive(s); /* Attach efuse backend, if given */ - efuse_attach_drive(&s->soc.pmc.efuse); + efuse_attach_drive(s); - /* Plugin SD cards. */ - for (i = 0; i < ARRAY_SIZE(s->soc.pmc.iou.sd); i++) { - sd_plugin_card(&s->soc.pmc.iou.sd[i], - drive_get(IF_SD, 0, i)); + /* Plug SD cards */ + for (i = 0; i < versal_get_num_sdhci(vvc->version); i++) { + sd_plug_card(s, i, drive_get(IF_SD, 0, i)); } s->binfo.ram_size = machine->ram_size; @@ -745,100 +287,133 @@ static void versal_virt_init(MachineState *machine) s->binfo.loader_start = 0x1000; s->binfo.dtb_limit = 0x1000000; } - arm_load_kernel(&s->soc.fpd.apu.cpu[0], machine, &s->binfo); + arm_load_kernel(ARM_CPU(versal_get_boot_cpu(&s->soc)), machine, &s->binfo); for (i = 0; i < XLNX_VERSAL_NUM_OSPI_FLASH; i++) { - BusState *spi_bus; - DeviceState *flash_dev; ObjectClass *flash_klass; - qemu_irq cs_line; DriveInfo *dinfo = drive_get(IF_MTD, 0, i); + BlockBackend *blk; + const char *mdl; - spi_bus = qdev_get_child_bus(DEVICE(&s->soc.pmc.iou.ospi), "spi0"); - - if (s->ospi_model) { - flash_klass = object_class_by_name(s->ospi_model); + if (s->cfg.ospi_model) { + flash_klass = object_class_by_name(s->cfg.ospi_model); if (!flash_klass || object_class_is_abstract(flash_klass) || !object_class_dynamic_cast(flash_klass, TYPE_M25P80)) { error_report("'%s' is either abstract or" - " not a subtype of m25p80", s->ospi_model); + " not a subtype of m25p80", s->cfg.ospi_model); exit(1); } + mdl = s->cfg.ospi_model; + } else { + mdl = "mt35xu01g"; } - flash_dev = qdev_new(s->ospi_model ? s->ospi_model : "mt35xu01g"); - - if (dinfo) { - qdev_prop_set_drive_err(flash_dev, "drive", - blk_by_legacy_dinfo(dinfo), &error_fatal); - } - qdev_prop_set_uint8(flash_dev, "cs", i); - qdev_realize_and_unref(flash_dev, spi_bus, &error_fatal); - - cs_line = qdev_get_gpio_in_named(flash_dev, SSI_GPIO_CS, 0); - - sysbus_connect_irq(SYS_BUS_DEVICE(&s->soc.pmc.iou.ospi), - i + 1, cs_line); + blk = dinfo ? blk_by_legacy_dinfo(dinfo) : NULL; + versal_ospi_create_flash(&s->soc, i, mdl, blk); } } static void versal_virt_machine_instance_init(Object *obj) { - VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(obj); + VersalVirt *s = XLNX_VERSAL_VIRT_BASE_MACHINE(obj); + VersalVirtClass *vvc = XLNX_VERSAL_VIRT_BASE_MACHINE_GET_CLASS(s); + size_t i, num_can; + + num_can = versal_get_num_can(vvc->version); + s->canbus = g_new0(CanBusState *, num_can); /* - * User can set canbus0 and canbus1 properties to can-bus object and connect - * to socketcan(optional) interface via command line. + * User can set canbusx properties to can-bus object and optionally connect + * to socketcan interface via command line. */ - object_property_add_link(obj, "canbus0", TYPE_CAN_BUS, - (Object **)&s->canbus[0], - object_property_allow_set_link, - 0); - object_property_add_link(obj, "canbus1", TYPE_CAN_BUS, - (Object **)&s->canbus[1], - object_property_allow_set_link, - 0); + for (i = 0; i < num_can; i++) { + g_autofree char *prop_name = g_strdup_printf("canbus%zu", i); + + object_property_add_link(obj, prop_name, TYPE_CAN_BUS, + (Object **) &s->canbus[i], + object_property_allow_set_link, 0); + } } static void versal_virt_machine_finalize(Object *obj) { - VersalVirt *s = XLNX_VERSAL_VIRT_MACHINE(obj); + VersalVirt *s = XLNX_VERSAL_VIRT_BASE_MACHINE(obj); - g_free(s->ospi_model); + g_free(s->cfg.ospi_model); + g_free(s->canbus); } -static void versal_virt_machine_class_init(ObjectClass *oc, const void *data) +static void versal_virt_machine_class_init_common(ObjectClass *oc) { MachineClass *mc = MACHINE_CLASS(oc); + VersalVirtClass *vvc = XLNX_VERSAL_VIRT_BASE_MACHINE_CLASS(mc); + int num_cpu = versal_get_num_cpu(vvc->version); - mc->desc = "Xilinx Versal Virtual development board"; - mc->init = versal_virt_init; - mc->min_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS; - mc->max_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS; - mc->default_cpus = XLNX_VERSAL_NR_ACPUS + XLNX_VERSAL_NR_RCPUS; mc->no_cdrom = true; mc->auto_create_sdcard = true; mc->default_ram_id = "ddr"; + mc->min_cpus = num_cpu; + mc->max_cpus = num_cpu; + mc->default_cpus = num_cpu; + mc->init = versal_virt_init; + object_class_property_add_str(oc, "ospi-flash", versal_get_ospi_model, versal_set_ospi_model); object_class_property_set_description(oc, "ospi-flash", "Change the OSPI Flash model"); } -static const TypeInfo versal_virt_machine_init_typeinfo = { - .name = TYPE_XLNX_VERSAL_VIRT_MACHINE, +static void versal_virt_machine_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + VersalVirtClass *vvc = XLNX_VERSAL_VIRT_BASE_MACHINE_CLASS(oc); + + mc->desc = "AMD Versal Virtual development board"; + mc->alias = "xlnx-versal-virt"; + vvc->version = VERSAL_VER_VERSAL; + + versal_virt_machine_class_init_common(oc); +} + +static void versal2_virt_machine_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + VersalVirtClass *vvc = XLNX_VERSAL_VIRT_BASE_MACHINE_CLASS(oc); + + mc->desc = "AMD Versal Gen 2 Virtual development board"; + vvc->version = VERSAL_VER_VERSAL2; + + versal_virt_machine_class_init_common(oc); +} + +static const TypeInfo versal_virt_base_machine_init_typeinfo = { + .name = TYPE_XLNX_VERSAL_VIRT_BASE_MACHINE, .parent = TYPE_MACHINE, - .class_init = versal_virt_machine_class_init, + .class_size = sizeof(VersalVirtClass), .instance_init = versal_virt_machine_instance_init, .instance_size = sizeof(VersalVirt), .instance_finalize = versal_virt_machine_finalize, + .abstract = true, +}; + +static const TypeInfo versal_virt_machine_init_typeinfo = { + .name = TYPE_XLNX_VERSAL_VIRT_MACHINE, + .parent = TYPE_XLNX_VERSAL_VIRT_BASE_MACHINE, + .class_init = versal_virt_machine_class_init, +}; + +static const TypeInfo versal2_virt_machine_init_typeinfo = { + .name = TYPE_XLNX_VERSAL2_VIRT_MACHINE, + .parent = TYPE_XLNX_VERSAL_VIRT_BASE_MACHINE, + .class_init = versal2_virt_machine_class_init, }; static void versal_virt_machine_init_register_types(void) { + type_register_static(&versal_virt_base_machine_init_typeinfo); type_register_static(&versal_virt_machine_init_typeinfo); + type_register_static(&versal2_virt_machine_init_typeinfo); } type_init(versal_virt_machine_init_register_types) - diff --git a/hw/arm/xlnx-versal.c b/hw/arm/xlnx-versal.c index a42b9e7140b61..81cb6294cfa64 100644 --- a/hw/arm/xlnx-versal.c +++ b/hw/arm/xlnx-versal.c @@ -1,7 +1,8 @@ /* - * Xilinx Versal SoC model. + * AMD/Xilinx Versal family SoC model. * * Copyright (c) 2018 Xilinx Inc. + * Copyright (c) 2025 Advanced Micro Devices, Inc. * Written by Edgar E. Iglesias * * This program is free software; you can redistribute it and/or modify @@ -17,827 +18,1745 @@ #include "hw/sysbus.h" #include "net/net.h" #include "system/system.h" -#include "hw/arm/boot.h" #include "hw/misc/unimp.h" #include "hw/arm/xlnx-versal.h" #include "qemu/log.h" #include "target/arm/cpu-qom.h" #include "target/arm/gtimer.h" +#include "system/device_tree.h" +#include "hw/arm/fdt.h" +#include "hw/char/pl011.h" +#include "hw/net/xlnx-versal-canfd.h" +#include "hw/sd/sdhci.h" +#include "hw/net/cadence_gem.h" +#include "hw/dma/xlnx-zdma.h" +#include "hw/misc/xlnx-versal-xramc.h" +#include "hw/usb/xlnx-usb-subsystem.h" +#include "hw/nvram/xlnx-versal-efuse.h" +#include "hw/ssi/xlnx-versal-ospi.h" +#include "hw/misc/xlnx-versal-pmc-iou-slcr.h" +#include "hw/nvram/xlnx-bbram.h" +#include "hw/misc/xlnx-versal-trng.h" +#include "hw/rtc/xlnx-zynqmp-rtc.h" +#include "hw/misc/xlnx-versal-cfu.h" +#include "hw/misc/xlnx-versal-cframe-reg.h" +#include "hw/or-irq.h" +#include "hw/misc/xlnx-versal-crl.h" +#include "hw/intc/arm_gicv3_common.h" +#include "hw/intc/arm_gicv3_its_common.h" +#include "hw/intc/arm_gic.h" +#include "hw/core/split-irq.h" +#include "target/arm/cpu.h" +#include "hw/cpu/cluster.h" +#include "hw/arm/bsa.h" -#define XLNX_VERSAL_ACPU_TYPE ARM_CPU_TYPE_NAME("cortex-a72") -#define XLNX_VERSAL_RCPU_TYPE ARM_CPU_TYPE_NAME("cortex-r5f") -#define GEM_REVISION 0x40070106 +/* + * IRQ descriptor to catch the following cases: + * - An IRQ can either connect to the GICs, to the PPU1 intc, or the the EAM + * - Multiple devices can connect to the same IRQ. They are OR'ed together. + */ +FIELD(VERSAL_IRQ, IRQ, 0, 16) +FIELD(VERSAL_IRQ, TARGET, 16, 2) +FIELD(VERSAL_IRQ, ORED, 18, 1) +FIELD(VERSAL_IRQ, OR_IDX, 19, 4) /* input index on the IRQ OR gate */ + +typedef enum VersalIrqTarget { + IRQ_TARGET_GIC, + IRQ_TARGET_PPU1, + IRQ_TARGET_EAM, +} VersalIrqTarget; + +#define PPU1_IRQ(irq) ((IRQ_TARGET_PPU1 << R_VERSAL_IRQ_TARGET_SHIFT) | (irq)) +#define EAM_IRQ(irq) ((IRQ_TARGET_EAM << R_VERSAL_IRQ_TARGET_SHIFT) | (irq)) +#define OR_IRQ(irq, or_idx) \ + (R_VERSAL_IRQ_ORED_MASK | ((or_idx) << R_VERSAL_IRQ_OR_IDX_SHIFT) | (irq)) +#define PPU1_OR_IRQ(irq, or_idx) \ + ((IRQ_TARGET_PPU1 << R_VERSAL_IRQ_TARGET_SHIFT) | OR_IRQ(irq, or_idx)) + +typedef struct VersalSimplePeriphMap { + uint64_t addr; + int irq; +} VersalSimplePeriphMap; + +typedef struct VersalMemMap { + uint64_t addr; + uint64_t size; +} VersalMemMap; + +typedef struct VersalGicMap { + int version; + uint64_t dist; + uint64_t redist; + uint64_t cpu_iface; + uint64_t its; + size_t num_irq; + bool has_its; +} VersalGicMap; + +enum StartPoweredOffMode { + SPO_SECONDARIES, + SPO_ALL, +}; + +typedef struct VersalCpuClusterMap { + VersalGicMap gic; + /* + * true: one GIC per cluster. + * false: one GIC for all CPUs + */ + bool per_cluster_gic; + + const char *name; + const char *cpu_model; + size_t num_core; + size_t num_cluster; + uint32_t qemu_cluster_id; + bool dtb_expose; -#define VERSAL_NUM_PMC_APB_IRQS 18 -#define NUM_OSPI_IRQ_LINES 3 + struct { + uint64_t base; + uint64_t core_shift; + uint64_t cluster_shift; + } mp_affinity; + + enum StartPoweredOffMode start_powered_off; +} VersalCpuClusterMap; + +typedef struct VersalMap { + VersalMemMap ocm; + + struct VersalDDRMap { + VersalMemMap chan[4]; + size_t num_chan; + } ddr; + + VersalCpuClusterMap apu; + VersalCpuClusterMap rpu; + + VersalSimplePeriphMap uart[2]; + size_t num_uart; + + VersalSimplePeriphMap canfd[4]; + size_t num_canfd; + + VersalSimplePeriphMap sdhci[2]; + size_t num_sdhci; + + struct VersalGemMap { + VersalSimplePeriphMap map; + size_t num_prio_queue; + const char *phy_mode; + const uint32_t speed; + } gem[3]; + size_t num_gem; + + struct VersalZDMAMap { + const char *name; + VersalSimplePeriphMap map; + size_t num_chan; + uint64_t chan_stride; + int irq_stride; + } zdma[2]; + size_t num_zdma; + + struct VersalXramMap { + uint64_t mem; + uint64_t mem_stride; + uint64_t ctrl; + uint64_t ctrl_stride; + int irq; + size_t num; + } xram; + + struct VersalUsbMap { + uint64_t xhci; + uint64_t ctrl; + int irq; + } usb[2]; + size_t num_usb; + + struct VersalEfuseMap { + uint64_t ctrl; + uint64_t cache; + int irq; + } efuse; + + struct VersalOspiMap { + uint64_t ctrl; + uint64_t dac; + uint64_t dac_sz; + uint64_t dma_src; + uint64_t dma_dst; + int irq; + } ospi; + + VersalSimplePeriphMap pmc_iou_slcr; + VersalSimplePeriphMap bbram; + VersalSimplePeriphMap trng; + + struct VersalRtcMap { + VersalSimplePeriphMap map; + int alarm_irq; + int second_irq; + } rtc; + + struct VersalCfuMap { + uint64_t cframe_base; + uint64_t cframe_stride; + uint64_t cfu_fdro; + uint64_t cframe_bcast_reg; + uint64_t cframe_bcast_fdri; + uint64_t cfu_apb; + uint64_t cfu_stream; + uint64_t cfu_stream_2; + uint64_t cfu_sfr; + int cfu_apb_irq; + int cframe_irq; + size_t num_cframe; + struct VersalCfuCframeCfg { + uint32_t blktype_frames[7]; + } cframe_cfg[15]; + } cfu; + + VersalSimplePeriphMap crl; + + /* reserved MMIO/IRQ space that can safely be used for virtio devices */ + struct VersalReserved { + uint64_t mmio_start; + int irq_start; + int irq_num; + } reserved; +} VersalMap; + +static const VersalMap VERSAL_MAP = { + .ocm = { + .addr = 0xfffc0000, + .size = 0x40000, + }, + + .ddr = { + .chan[0] = { .addr = 0x0, .size = 2 * GiB }, + .chan[1] = { .addr = 0x800000000ull, .size = 32 * GiB }, + .chan[2] = { .addr = 0xc00000000ull, .size = 256 * GiB }, + .chan[3] = { .addr = 0x10000000000ull, .size = 734 * GiB }, + .num_chan = 4, + }, + + .apu = { + .name = "apu", + .cpu_model = ARM_CPU_TYPE_NAME("cortex-a72"), + .num_cluster = 1, + .num_core = 2, + .qemu_cluster_id = 0, + .mp_affinity = { + .core_shift = ARM_AFF0_SHIFT, + .cluster_shift = ARM_AFF1_SHIFT, + }, + .start_powered_off = SPO_SECONDARIES, + .dtb_expose = true, + .gic = { + .version = 3, + .dist = 0xf9000000, + .redist = 0xf9080000, + .num_irq = 192, + .has_its = true, + .its = 0xf9020000, + }, + }, + + .rpu = { + .name = "rpu", + .cpu_model = ARM_CPU_TYPE_NAME("cortex-r5f"), + .num_cluster = 1, + .num_core = 2, + .qemu_cluster_id = 1, + .mp_affinity = { + .base = 0x100, + .core_shift = ARM_AFF0_SHIFT, + .cluster_shift = ARM_AFF1_SHIFT, + }, + .start_powered_off = SPO_ALL, + .dtb_expose = false, + .gic = { + .version = 2, + .dist = 0xf9000000, + .cpu_iface = 0xf9001000, + .num_irq = 192, + }, + }, + + .uart[0] = { 0xff000000, 18 }, + .uart[1] = { 0xff010000, 19 }, + .num_uart = 2, + + .canfd[0] = { 0xff060000, 20 }, + .canfd[1] = { 0xff070000, 21 }, + .num_canfd = 2, + + .sdhci[0] = { 0xf1040000, 126 }, + .sdhci[1] = { 0xf1050000, 128 }, + .num_sdhci = 2, + + .gem[0] = { { 0xff0c0000, 56 }, 2, "rgmii-id", 1000 }, + .gem[1] = { { 0xff0d0000, 58 }, 2, "rgmii-id", 1000 }, + .num_gem = 2, + + .zdma[0] = { "adma", { 0xffa80000, 60 }, 8, 0x10000, 1 }, + .num_zdma = 1, + + .xram = { + .num = 4, + .mem = 0xfe800000, .mem_stride = 1 * MiB, + .ctrl = 0xff8e0000, .ctrl_stride = 0x10000, + .irq = 79, + }, + + .usb[0] = { .xhci = 0xfe200000, .ctrl = 0xff9d0000, .irq = 22 }, + .num_usb = 1, + + .efuse = { .ctrl = 0xf1240000, .cache = 0xf1250000, .irq = 139 }, + + .ospi = { + .ctrl = 0xf1010000, + .dac = 0xc0000000, .dac_sz = 0x20000000, + .dma_src = 0xf1011000, .dma_dst = 0xf1011800, + .irq = 124, + }, + + .pmc_iou_slcr = { 0xf1060000, OR_IRQ(121, 0) }, + .bbram = { 0xf11f0000, OR_IRQ(121, 1) }, + .trng = { 0xf1230000, 141 }, + .rtc = { + { 0xf12a0000, OR_IRQ(121, 2) }, + .alarm_irq = 142, .second_irq = 143 + }, + + .cfu = { + .cframe_base = 0xf12d0000, .cframe_stride = 0x1000, + .cframe_bcast_reg = 0xf12ee000, .cframe_bcast_fdri = 0xf12ef000, + .cfu_apb = 0xf12b0000, .cfu_sfr = 0xf12c1000, + .cfu_stream = 0xf12c0000, .cfu_stream_2 = 0xf1f80000, + .cfu_fdro = 0xf12c2000, + .cfu_apb_irq = 120, .cframe_irq = OR_IRQ(121, 3), + .num_cframe = 15, + .cframe_cfg = { + { { 34111, 3528, 12800, 11, 5, 1, 1 } }, + { { 38498, 3841, 15361, 13, 7, 3, 1 } }, + { { 38498, 3841, 15361, 13, 7, 3, 1 } }, + { { 38498, 3841, 15361, 13, 7, 3, 1 } }, + }, + }, + + .crl = { 0xff5e0000, 10 }, + + .reserved = { 0xa0000000, 111, 8 }, +}; + +static const VersalMap VERSAL2_MAP = { + .ocm = { + .addr = 0xbbe00000, + .size = 2 * MiB, + }, + + .ddr = { + .chan[0] = { .addr = 0x0, .size = 2046 * MiB }, + .chan[1] = { .addr = 0x800000000ull, .size = 32 * GiB }, + .chan[2] = { .addr = 0xc00000000ull, .size = 256 * GiB }, + .chan[3] = { .addr = 0x10000000000ull, .size = 734 * GiB }, + .num_chan = 4, + }, + + .apu = { + .name = "apu", + .cpu_model = ARM_CPU_TYPE_NAME("cortex-a78ae"), + .num_cluster = 4, + .num_core = 2, + .qemu_cluster_id = 0, + .mp_affinity = { + .base = 0x0, /* TODO: the MT bit should be set */ + .core_shift = ARM_AFF1_SHIFT, + .cluster_shift = ARM_AFF2_SHIFT, + }, + .start_powered_off = SPO_SECONDARIES, + .dtb_expose = true, + .gic = { + .version = 3, + .dist = 0xe2000000, + .redist = 0xe2060000, + .num_irq = 544, + .has_its = true, + .its = 0xe2040000, + }, + }, + + .rpu = { + .name = "rpu", + .cpu_model = ARM_CPU_TYPE_NAME("cortex-r52"), + .num_cluster = 5, + .num_core = 2, + .qemu_cluster_id = 1, + .mp_affinity = { + .core_shift = ARM_AFF0_SHIFT, + .cluster_shift = ARM_AFF1_SHIFT, + }, + .start_powered_off = SPO_ALL, + .dtb_expose = false, + .per_cluster_gic = true, + .gic = { + .version = 3, + .dist = 0x0, + .redist = 0x100000, + .num_irq = 288, + }, + }, + + .uart[0] = { 0xf1920000, 25 }, + .uart[1] = { 0xf1930000, 26 }, + .num_uart = 2, + + .canfd[0] = { 0xf19e0000, 27 }, + .canfd[1] = { 0xf19f0000, 28 }, + .canfd[2] = { 0xf1a00000, 95 }, + .canfd[3] = { 0xf1a10000, 96 }, + .num_canfd = 4, + + .gem[0] = { { 0xf1a60000, 39 }, 2, "rgmii-id", 1000 }, + .gem[1] = { { 0xf1a70000, 41 }, 2, "rgmii-id", 1000 }, + .gem[2] = { { 0xed920000, 164 }, 4, "usxgmii", 10000 }, /* MMI 10Gb GEM */ + .num_gem = 3, + + .zdma[0] = { "adma", { 0xebd00000, 72 }, 8, 0x10000, 1 }, + .zdma[1] = { "sdma", { 0xebd80000, 112 }, 8, 0x10000, 1 }, + .num_zdma = 2, + + .usb[0] = { .xhci = 0xf1b00000, .ctrl = 0xf1ee0000, .irq = 29 }, + .usb[1] = { .xhci = 0xf1c00000, .ctrl = 0xf1ef0000, .irq = 34 }, + .num_usb = 2, + + .efuse = { .ctrl = 0xf1240000, .cache = 0xf1250000, .irq = 230 }, + + .ospi = { + .ctrl = 0xf1010000, + .dac = 0xc0000000, .dac_sz = 0x20000000, + .dma_src = 0xf1011000, .dma_dst = 0xf1011800, + .irq = 216, + }, + + .sdhci[0] = { 0xf1040000, 218 }, + .sdhci[1] = { 0xf1050000, 220 }, /* eMMC */ + .num_sdhci = 2, + + .pmc_iou_slcr = { 0xf1060000, 222 }, + .bbram = { 0xf11f0000, PPU1_OR_IRQ(18, 0) }, + .crl = { 0xeb5e0000 }, + .trng = { 0xf1230000, 233 }, + .rtc = { + { 0xf12a0000, PPU1_OR_IRQ(18, 1) }, + .alarm_irq = 200, .second_irq = 201 + }, + + .cfu = { + .cframe_base = 0xf12d0000, .cframe_stride = 0x1000, + .cframe_bcast_reg = 0xf12ee000, .cframe_bcast_fdri = 0xf12ef000, + .cfu_apb = 0xf12b0000, .cfu_sfr = 0xf12c1000, + .cfu_stream = 0xf12c0000, .cfu_stream_2 = 0xf1f80000, + .cfu_fdro = 0xf12c2000, + .cfu_apb_irq = 235, .cframe_irq = EAM_IRQ(7), + }, + + .reserved = { 0xf5e00000, 270, 8 }, +}; -static void versal_create_apu_cpus(Versal *s) +static const VersalMap *VERSION_TO_MAP[] = { + [VERSAL_VER_VERSAL] = &VERSAL_MAP, + [VERSAL_VER_VERSAL2] = &VERSAL2_MAP, +}; + +static inline VersalVersion versal_get_version(Versal *s) { - int i; + return XLNX_VERSAL_BASE_GET_CLASS(s)->version; +} - object_initialize_child(OBJECT(s), "apu-cluster", &s->fpd.apu.cluster, - TYPE_CPU_CLUSTER); - qdev_prop_set_uint32(DEVICE(&s->fpd.apu.cluster), "cluster-id", 0); - - for (i = 0; i < ARRAY_SIZE(s->fpd.apu.cpu); i++) { - Object *obj; - - object_initialize_child(OBJECT(&s->fpd.apu.cluster), - "apu-cpu[*]", &s->fpd.apu.cpu[i], - XLNX_VERSAL_ACPU_TYPE); - obj = OBJECT(&s->fpd.apu.cpu[i]); - if (i) { - /* Secondary CPUs start in powered-down state */ - object_property_set_bool(obj, "start-powered-off", true, - &error_abort); - } +static inline const VersalMap *versal_get_map(Versal *s) +{ + return VERSION_TO_MAP[versal_get_version(s)]; +} - object_property_set_int(obj, "core-count", ARRAY_SIZE(s->fpd.apu.cpu), - &error_abort); - object_property_set_link(obj, "memory", OBJECT(&s->fpd.apu.mr), - &error_abort); - qdev_realize(DEVICE(obj), NULL, &error_fatal); +static inline Object *versal_get_child(Versal *s, const char *child) +{ + return object_resolve_path_at(OBJECT(s), child); +} + +static inline Object *versal_get_child_idx(Versal *s, const char *child, + size_t idx) +{ + g_autofree char *n = g_strdup_printf("%s[%zu]", child, idx); + + return versal_get_child(s, n); +} + +/* + * The SoC embeds multiple GICs. They all receives the same IRQ lines at the + * same index. This function creates a TYPE_SPLIT_IRQ device to fan out the + * given IRQ input to all the GICs. + * + * The TYPE_SPLIT_IRQ devices lie in the /soc/irq-splits QOM container + */ +static qemu_irq versal_get_gic_irq(Versal *s, int irq_idx) +{ + DeviceState *split; + Object *container = versal_get_child(s, "irq-splits"); + int idx = FIELD_EX32(irq_idx, VERSAL_IRQ, IRQ); + g_autofree char *name = g_strdup_printf("irq[%d]", idx); + + split = DEVICE(object_resolve_path_at(container, name)); + + if (split == NULL) { + size_t i; + + split = qdev_new(TYPE_SPLIT_IRQ); + qdev_prop_set_uint16(split, "num-lines", s->intc->len); + object_property_add_child(container, name, OBJECT(split)); + qdev_realize_and_unref(split, NULL, &error_abort); + + for (i = 0; i < s->intc->len; i++) { + DeviceState *gic; + + gic = g_array_index(s->intc, DeviceState *, i); + qdev_connect_gpio_out(split, i, qdev_get_gpio_in(gic, idx)); + } + } else { + g_assert(FIELD_EX32(irq_idx, VERSAL_IRQ, ORED)); } - qdev_realize(DEVICE(&s->fpd.apu.cluster), NULL, &error_fatal); + return qdev_get_gpio_in(split, 0); } -static void versal_create_apu_gic(Versal *s, qemu_irq *pic) +/* + * When the R_VERSAL_IRQ_ORED flag is set on an IRQ descriptor, this function is + * used to return the corresponding or gate input IRQ. The or gate is created if + * not already existant. + * + * Or gates are placed under the /soc/irq-or-gates QOM container. + */ +static qemu_irq versal_get_irq_or_gate_in(Versal *s, int irq_idx, + qemu_irq target_irq) { - static const uint64_t addrs[] = { - MM_GIC_APU_DIST_MAIN, - MM_GIC_APU_REDIST_0 + static const char *TARGET_STR[] = { + [IRQ_TARGET_GIC] = "gic", + [IRQ_TARGET_PPU1] = "ppu1", + [IRQ_TARGET_EAM] = "eam", }; - SysBusDevice *gicbusdev; - DeviceState *gicdev; - QList *redist_region_count; - int nr_apu_cpus = ARRAY_SIZE(s->fpd.apu.cpu); - int i; - object_initialize_child(OBJECT(s), "apu-gic", &s->fpd.apu.gic, - gicv3_class_name()); - gicbusdev = SYS_BUS_DEVICE(&s->fpd.apu.gic); - gicdev = DEVICE(&s->fpd.apu.gic); - qdev_prop_set_uint32(gicdev, "revision", 3); - qdev_prop_set_uint32(gicdev, "num-cpu", nr_apu_cpus); - qdev_prop_set_uint32(gicdev, "num-irq", XLNX_VERSAL_NR_IRQS + 32); + VersalIrqTarget target; + Object *container = versal_get_child(s, "irq-or-gates"); + DeviceState *dev; + g_autofree char *name; + int idx, or_idx; + + idx = FIELD_EX32(irq_idx, VERSAL_IRQ, IRQ); + or_idx = FIELD_EX32(irq_idx, VERSAL_IRQ, OR_IDX); + target = FIELD_EX32(irq_idx, VERSAL_IRQ, TARGET); + + name = g_strdup_printf("%s-irq[%d]", TARGET_STR[target], idx); + dev = DEVICE(object_resolve_path_at(container, name)); + + if (dev == NULL) { + dev = qdev_new(TYPE_OR_IRQ); + object_property_add_child(container, name, OBJECT(dev)); + qdev_prop_set_uint16(dev, "num-lines", 1 << R_VERSAL_IRQ_OR_IDX_LENGTH); + qdev_realize_and_unref(dev, NULL, &error_abort); + qdev_connect_gpio_out(dev, 0, target_irq); + } - redist_region_count = qlist_new(); - qlist_append_int(redist_region_count, nr_apu_cpus); - qdev_prop_set_array(gicdev, "redist-region-count", redist_region_count); + return qdev_get_gpio_in(dev, or_idx); +} + +static qemu_irq versal_get_irq(Versal *s, int irq_idx) +{ + VersalIrqTarget target; + qemu_irq irq; + bool ored; + + target = FIELD_EX32(irq_idx, VERSAL_IRQ, TARGET); + ored = FIELD_EX32(irq_idx, VERSAL_IRQ, ORED); - qdev_prop_set_bit(gicdev, "has-security-extensions", true); + switch (target) { + case IRQ_TARGET_EAM: + /* EAM not implemented */ + return NULL; - sysbus_realize(SYS_BUS_DEVICE(&s->fpd.apu.gic), &error_fatal); + case IRQ_TARGET_PPU1: + /* PPU1 CPU not implemented */ + return NULL; - for (i = 0; i < ARRAY_SIZE(addrs); i++) { - MemoryRegion *mr; + case IRQ_TARGET_GIC: + irq = versal_get_gic_irq(s, irq_idx); + break; - mr = sysbus_mmio_get_region(gicbusdev, i); - memory_region_add_subregion(&s->fpd.apu.mr, addrs[i], mr); + default: + g_assert_not_reached(); } - for (i = 0; i < nr_apu_cpus; i++) { - DeviceState *cpudev = DEVICE(&s->fpd.apu.cpu[i]); - int ppibase = XLNX_VERSAL_NR_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; - qemu_irq maint_irq; - int ti; - /* Mapping from the output timer irq lines from the CPU to the - * GIC PPI inputs. - */ - const int timer_irq[] = { - [GTIMER_PHYS] = VERSAL_TIMER_NS_EL1_IRQ, - [GTIMER_VIRT] = VERSAL_TIMER_VIRT_IRQ, - [GTIMER_HYP] = VERSAL_TIMER_NS_EL2_IRQ, - [GTIMER_SEC] = VERSAL_TIMER_S_EL1_IRQ, - }; + if (ored) { + irq = versal_get_irq_or_gate_in(s, irq_idx, irq); + } + + return irq; +} + +static void versal_sysbus_connect_irq(Versal *s, SysBusDevice *sbd, + int sbd_idx, int irq_idx) +{ + qemu_irq irq = versal_get_irq(s, irq_idx); + + if (irq == NULL) { + return; + } + + sysbus_connect_irq(sbd, sbd_idx, irq); +} + +static void versal_qdev_connect_gpio_out(Versal *s, DeviceState *dev, + int dev_idx, int irq_idx) +{ + qemu_irq irq = versal_get_irq(s, irq_idx); + + if (irq == NULL) { + return; + } + + qdev_connect_gpio_out(dev, dev_idx, irq); +} + +static inline char *versal_fdt_add_subnode(Versal *s, const char *path, + uint64_t at, const char *compat, + size_t compat_sz) +{ + char *p; + + p = g_strdup_printf("%s@%" PRIx64, path, at); + qemu_fdt_add_subnode(s->cfg.fdt, p); + + if (!strncmp(compat, "memory", compat_sz)) { + qemu_fdt_setprop(s->cfg.fdt, p, "device_type", compat, compat_sz); + } else { + qemu_fdt_setprop(s->cfg.fdt, p, "compatible", compat, compat_sz); + } + + return p; +} + +static inline char *versal_fdt_add_simple_subnode(Versal *s, const char *path, + uint64_t addr, uint64_t len, + const char *compat, + size_t compat_sz) +{ + char *p = versal_fdt_add_subnode(s, path, addr, compat, compat_sz); + + qemu_fdt_setprop_sized_cells(s->cfg.fdt, p, "reg", 2, addr, 2, len); + return p; +} + +static inline DeviceState *create_or_gate(Versal *s, Object *parent, + const char *name, uint16_t num_lines, + int irq_idx) +{ + DeviceState *or; + + or = qdev_new(TYPE_OR_IRQ); + qdev_prop_set_uint16(or, "num-lines", num_lines); + object_property_add_child(parent, name, OBJECT(or)); + qdev_realize_and_unref(or, NULL, &error_abort); + versal_qdev_connect_gpio_out(s, or, 0, irq_idx); + + return or; +} + +static MemoryRegion *create_cpu_mr(Versal *s, DeviceState *cluster, + const VersalCpuClusterMap *map) +{ + MemoryRegion *mr, *root_alias; + char *name; + + mr = g_new(MemoryRegion, 1); + name = g_strdup_printf("%s-mr", map->name); + memory_region_init(mr, OBJECT(cluster), name, UINT64_MAX); + g_free(name); + + root_alias = g_new(MemoryRegion, 1); + name = g_strdup_printf("ps-alias-for-%s", map->name); + memory_region_init_alias(root_alias, OBJECT(cluster), name, + &s->mr_ps, 0, UINT64_MAX); + g_free(name); + memory_region_add_subregion(mr, 0, root_alias); + + return mr; +} + +static void versal_create_gic_its(Versal *s, + const VersalCpuClusterMap *map, + DeviceState *gic, + MemoryRegion *mr, + char *gic_node) +{ + DeviceState *dev; + SysBusDevice *sbd; + g_autofree char *node_pat = NULL, *node = NULL; + const char compatible[] = "arm,gic-v3-its"; + + if (map->gic.version != 3) { + return; + } + + if (!map->gic.has_its) { + return; + } + + dev = qdev_new(TYPE_ARM_GICV3_ITS); + sbd = SYS_BUS_DEVICE(dev); + + object_property_add_child(OBJECT(gic), "its", OBJECT(dev)); + object_property_set_link(OBJECT(dev), "parent-gicv3", OBJECT(gic), + &error_abort); + + sysbus_realize_and_unref(sbd, &error_abort); + + memory_region_add_subregion(mr, map->gic.its, + sysbus_mmio_get_region(sbd, 0)); + + if (!map->dtb_expose) { + return; + } + + qemu_fdt_setprop(s->cfg.fdt, gic_node, "ranges", NULL, 0); + qemu_fdt_setprop_cell(s->cfg.fdt, gic_node, "#address-cells", 2); + qemu_fdt_setprop_cell(s->cfg.fdt, gic_node, "#size-cells", 2); + + node_pat = g_strdup_printf("%s/its", gic_node); + node = versal_fdt_add_simple_subnode(s, node_pat, map->gic.its, 0x20000, + compatible, sizeof(compatible)); + qemu_fdt_setprop(s->cfg.fdt, node, "msi-controller", NULL, 0); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "#msi-cells", 1); +} + +static DeviceState *versal_create_gic(Versal *s, + const VersalCpuClusterMap *map, + MemoryRegion *mr, + int first_cpu_idx, + size_t num_cpu) +{ + DeviceState *dev; + SysBusDevice *sbd; + g_autofree char *node = NULL; + g_autofree char *name = NULL; + const char gicv3_compat[] = "arm,gic-v3"; + const char gicv2_compat[] = "arm,cortex-a15-gic"; + + switch (map->gic.version) { + case 2: + dev = qdev_new(gic_class_name()); + break; + + case 3: + dev = qdev_new(gicv3_class_name()); + break; + + default: + g_assert_not_reached(); + } + + name = g_strdup_printf("%s-gic[*]", map->name); + object_property_add_child(OBJECT(s), name, OBJECT(dev)); + sbd = SYS_BUS_DEVICE(dev); + qdev_prop_set_uint32(dev, "revision", map->gic.version); + qdev_prop_set_uint32(dev, "num-cpu", num_cpu); + qdev_prop_set_uint32(dev, "num-irq", map->gic.num_irq + 32); + qdev_prop_set_bit(dev, "has-security-extensions", true); + qdev_prop_set_uint32(dev, "first-cpu-index", first_cpu_idx); + + if (map->gic.version == 3) { + QList *redist_region_count; + + redist_region_count = qlist_new(); + qlist_append_int(redist_region_count, num_cpu); + qdev_prop_set_array(dev, "redist-region-count", redist_region_count); + qdev_prop_set_bit(dev, "has-lpi", map->gic.has_its); + object_property_set_link(OBJECT(dev), "sysmem", OBJECT(mr), + &error_abort); + } + + sysbus_realize_and_unref(sbd, &error_fatal); + + memory_region_add_subregion(mr, map->gic.dist, + sysbus_mmio_get_region(sbd, 0)); + + if (map->gic.version == 3) { + memory_region_add_subregion(mr, map->gic.redist, + sysbus_mmio_get_region(sbd, 1)); + } else { + memory_region_add_subregion(mr, map->gic.cpu_iface, + sysbus_mmio_get_region(sbd, 1)); + } + + if (map->dtb_expose) { + if (map->gic.version == 3) { + node = versal_fdt_add_subnode(s, "/gic", map->gic.dist, + gicv3_compat, + sizeof(gicv3_compat)); + qemu_fdt_setprop_sized_cells(s->cfg.fdt, node, "reg", + 2, map->gic.dist, + 2, 0x10000, + 2, map->gic.redist, + 2, GICV3_REDIST_SIZE * num_cpu); + } else { + node = versal_fdt_add_subnode(s, "/gic", map->gic.dist, + gicv2_compat, + sizeof(gicv2_compat)); + qemu_fdt_setprop_sized_cells(s->cfg.fdt, node, "reg", + 2, map->gic.dist, + 2, 0x1000, + 2, map->gic.cpu_iface, + 2, 0x1000); + } + + qemu_fdt_setprop_cell(s->cfg.fdt, node, "phandle", s->phandle.gic); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "#interrupt-cells", 3); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_GIC_MAINT_IRQ), + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->cfg.fdt, node, "interrupt-controller", NULL, 0); + } + + versal_create_gic_its(s, map, dev, mr, node); + + g_array_append_val(s->intc, dev); + + return dev; +} + +static void connect_gic_to_cpu(const VersalCpuClusterMap *map, + DeviceState *gic, DeviceState *cpu, size_t idx, + size_t num_cpu) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(gic); + int ppibase = map->gic.num_irq + idx * GIC_INTERNAL + GIC_NR_SGIS; + int ti; + bool has_gtimer; + /* + * Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs. + */ + const int timer_irq[] = { + [GTIMER_PHYS] = INTID_TO_PPI(ARCH_TIMER_NS_EL1_IRQ), + [GTIMER_VIRT] = INTID_TO_PPI(ARCH_TIMER_VIRT_IRQ), + [GTIMER_HYP] = INTID_TO_PPI(ARCH_TIMER_NS_EL2_IRQ), + [GTIMER_SEC] = INTID_TO_PPI(ARCH_TIMER_S_EL1_IRQ), + }; + + has_gtimer = arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_GENERIC_TIMER); + + if (has_gtimer) { for (ti = 0; ti < ARRAY_SIZE(timer_irq); ti++) { - qdev_connect_gpio_out(cpudev, ti, - qdev_get_gpio_in(gicdev, + qdev_connect_gpio_out(cpu, ti, + qdev_get_gpio_in(gic, ppibase + timer_irq[ti])); } - maint_irq = qdev_get_gpio_in(gicdev, - ppibase + VERSAL_GIC_MAINT_IRQ); - qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", - 0, maint_irq); - sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); - sysbus_connect_irq(gicbusdev, i + nr_apu_cpus, - qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); - sysbus_connect_irq(gicbusdev, i + 2 * nr_apu_cpus, - qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); - sysbus_connect_irq(gicbusdev, i + 3 * nr_apu_cpus, - qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); } - for (i = 0; i < XLNX_VERSAL_NR_IRQS; i++) { - pic[i] = qdev_get_gpio_in(gicdev, i); + if (map->gic.version == 3) { + qemu_irq maint_irq; + int maint_idx = ppibase + INTID_TO_PPI(ARCH_GIC_MAINT_IRQ); + + maint_irq = qdev_get_gpio_in(gic, maint_idx); + qdev_connect_gpio_out_named(cpu, "gicv3-maintenance-interrupt", + 0, maint_irq); } + + sysbus_connect_irq(sbd, idx, qdev_get_gpio_in(cpu, ARM_CPU_IRQ)); + sysbus_connect_irq(sbd, idx + num_cpu, + qdev_get_gpio_in(cpu, ARM_CPU_FIQ)); + sysbus_connect_irq(sbd, idx + 2 * num_cpu, + qdev_get_gpio_in(cpu, ARM_CPU_VIRQ)); + sysbus_connect_irq(sbd, idx + 3 * num_cpu, + qdev_get_gpio_in(cpu, ARM_CPU_VFIQ)); } -static void versal_create_rpu_cpus(Versal *s) +static inline void versal_create_and_connect_gic(Versal *s, + const VersalCpuClusterMap *map, + MemoryRegion *mr, + DeviceState **cpus, + size_t num_cpu) { - int i; + DeviceState *gic; + int first_cpu_idx; + size_t i; - object_initialize_child(OBJECT(s), "rpu-cluster", &s->lpd.rpu.cluster, - TYPE_CPU_CLUSTER); - qdev_prop_set_uint32(DEVICE(&s->lpd.rpu.cluster), "cluster-id", 1); + first_cpu_idx = CPU(cpus[0])->cpu_index; + gic = versal_create_gic(s, map, mr, first_cpu_idx, num_cpu); - for (i = 0; i < ARRAY_SIZE(s->lpd.rpu.cpu); i++) { - Object *obj; + for (i = 0; i < num_cpu; i++) { + connect_gic_to_cpu(map, gic, cpus[i], i, num_cpu); + } +} - object_initialize_child(OBJECT(&s->lpd.rpu.cluster), - "rpu-cpu[*]", &s->lpd.rpu.cpu[i], - XLNX_VERSAL_RCPU_TYPE); - obj = OBJECT(&s->lpd.rpu.cpu[i]); - object_property_set_bool(obj, "start-powered-off", true, - &error_abort); +static DeviceState *versal_create_cpu(Versal *s, + const VersalCpuClusterMap *map, + DeviceState *qemu_cluster, + MemoryRegion *cpu_mr, + size_t cluster_idx, + size_t core_idx) +{ + DeviceState *cpu = qdev_new(map->cpu_model); + ARMCPU *arm_cpu = ARM_CPU(cpu); + Object *obj = OBJECT(cpu); + uint64_t affinity; + bool start_off; + size_t idx = cluster_idx * map->num_core + core_idx; + g_autofree char *name; + g_autofree char *node = NULL; + + affinity = map->mp_affinity.base; + affinity |= (cluster_idx & 0xff) << map->mp_affinity.cluster_shift; + affinity |= (core_idx & 0xff) << map->mp_affinity.core_shift; + + start_off = map->start_powered_off == SPO_ALL + || ((map->start_powered_off == SPO_SECONDARIES) + && (cluster_idx || core_idx)); + + name = g_strdup_printf("%s[*]", map->name); + object_property_add_child(OBJECT(qemu_cluster), name, obj); + object_property_set_bool(obj, "start-powered-off", start_off, + &error_abort); + qdev_prop_set_uint64(cpu, "mp-affinity", affinity); + qdev_prop_set_int32(cpu, "core-count", map->num_core); + object_property_set_link(obj, "memory", OBJECT(cpu_mr), &error_abort); + qdev_realize_and_unref(cpu, NULL, &error_fatal); - object_property_set_int(obj, "mp-affinity", 0x100 | i, &error_abort); - object_property_set_int(obj, "core-count", ARRAY_SIZE(s->lpd.rpu.cpu), - &error_abort); - object_property_set_link(obj, "memory", OBJECT(&s->lpd.rpu.mr), - &error_abort); - qdev_realize(DEVICE(obj), NULL, &error_fatal); + if (!map->dtb_expose) { + return cpu; } - qdev_realize(DEVICE(&s->lpd.rpu.cluster), NULL, &error_fatal); + node = versal_fdt_add_subnode(s, "/cpus/cpu", idx, + arm_cpu->dtb_compatible, + strlen(arm_cpu->dtb_compatible) + 1); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "reg", + arm_cpu_mp_affinity(arm_cpu) & ARM64_AFFINITY_MASK); + qemu_fdt_setprop_string(s->cfg.fdt, node, "device_type", "cpu"); + qemu_fdt_setprop_string(s->cfg.fdt, node, "enable-method", "psci"); + + return cpu; } -static void versal_create_uarts(Versal *s, qemu_irq *pic) +static void versal_create_cpu_cluster(Versal *s, const VersalCpuClusterMap *map) { - int i; + size_t i, j; + DeviceState *cluster; + MemoryRegion *mr; + char *name; + g_autofree DeviceState **cpus; + const char compatible[] = "arm,armv8-timer"; + bool has_gtimer; - for (i = 0; i < ARRAY_SIZE(s->lpd.iou.uart); i++) { - static const int irqs[] = { VERSAL_UART0_IRQ_0, VERSAL_UART1_IRQ_0}; - static const uint64_t addrs[] = { MM_UART0, MM_UART1 }; - char *name = g_strdup_printf("uart%d", i); - DeviceState *dev; - MemoryRegion *mr; + cluster = qdev_new(TYPE_CPU_CLUSTER); + name = g_strdup_printf("%s-cluster", map->name); + object_property_add_child(OBJECT(s), name, OBJECT(cluster)); + g_free(name); + qdev_prop_set_uint32(cluster, "cluster-id", map->qemu_cluster_id); - object_initialize_child(OBJECT(s), name, &s->lpd.iou.uart[i], - TYPE_PL011); - dev = DEVICE(&s->lpd.iou.uart[i]); - qdev_prop_set_chr(dev, "chardev", serial_hd(i)); - sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + mr = create_cpu_mr(s, cluster, map); - mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); - memory_region_add_subregion(&s->mr_ps, addrs[i], mr); + cpus = g_new(DeviceState *, map->num_cluster * map->num_core); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[irqs[i]]); - g_free(name); + if (map->dtb_expose) { + qemu_fdt_add_subnode(s->cfg.fdt, "/cpus"); + qemu_fdt_setprop_cell(s->cfg.fdt, "/cpus", "#size-cells", 0); + qemu_fdt_setprop_cell(s->cfg.fdt, "/cpus", "#address-cells", 1); + } + + for (i = 0; i < map->num_cluster; i++) { + for (j = 0; j < map->num_core; j++) { + DeviceState *cpu = versal_create_cpu(s, map, cluster, mr, i, j); + + cpus[i * map->num_core + j] = cpu; + } + + if (map->per_cluster_gic) { + versal_create_and_connect_gic(s, map, mr, &cpus[i * map->num_core], + map->num_core); + } + } + + qdev_realize_and_unref(cluster, NULL, &error_fatal); + + if (!map->per_cluster_gic) { + versal_create_and_connect_gic(s, map, mr, cpus, + map->num_cluster * map->num_core); + } + + has_gtimer = arm_feature(&ARM_CPU(cpus[0])->env, ARM_FEATURE_GENERIC_TIMER); + if (map->dtb_expose && has_gtimer) { + qemu_fdt_add_subnode(s->cfg.fdt, "/timer"); + qemu_fdt_setprop_cells(s->cfg.fdt, "/timer", "interrupts", + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_S_EL1_IRQ), + GIC_FDT_IRQ_FLAGS_LEVEL_HI, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_NS_EL1_IRQ), + GIC_FDT_IRQ_FLAGS_LEVEL_HI, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_VIRT_IRQ), + GIC_FDT_IRQ_FLAGS_LEVEL_HI, + GIC_FDT_IRQ_TYPE_PPI, + INTID_TO_PPI(ARCH_TIMER_NS_EL2_IRQ), + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->cfg.fdt, "/timer", "compatible", + compatible, sizeof(compatible)); } } -static void versal_create_canfds(Versal *s, qemu_irq *pic) +static void versal_create_uart(Versal *s, + const VersalSimplePeriphMap *map, + int chardev_idx) { - int i; - uint32_t irqs[] = { VERSAL_CANFD0_IRQ_0, VERSAL_CANFD1_IRQ_0}; - uint64_t addrs[] = { MM_CANFD0, MM_CANFD1 }; + DeviceState *dev; + MemoryRegion *mr; + g_autofree char *node; + g_autofree char *alias; + const char compatible[] = "arm,pl011\0arm,sbsa-uart"; + const char clocknames[] = "uartclk\0apb_pclk"; + + dev = qdev_new(TYPE_PL011); + object_property_add_child(OBJECT(s), "uart[*]", OBJECT(dev)); + qdev_prop_set_chr(dev, "chardev", serial_hd(chardev_idx)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - for (i = 0; i < ARRAY_SIZE(s->lpd.iou.canfd); i++) { - char *name = g_strdup_printf("canfd%d", i); - SysBusDevice *sbd; - MemoryRegion *mr; + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(&s->mr_ps, map->addr, mr); + + versal_sysbus_connect_irq(s, SYS_BUS_DEVICE(dev), 0, map->irq); + + node = versal_fdt_add_simple_subnode(s, "/uart", map->addr, 0x1000, + compatible, sizeof(compatible)); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "current-speed", 115200); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "clocks", + s->phandle.clk_125mhz, s->phandle.clk_125mhz); + qemu_fdt_setprop(s->cfg.fdt, node, "clock-names", clocknames, + sizeof(clocknames)); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, map->irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->cfg.fdt, node, "u-boot,dm-pre-reloc", NULL, 0); + + alias = g_strdup_printf("serial%d", chardev_idx); + qemu_fdt_setprop_string(s->cfg.fdt, "/aliases", alias, node); + + if (chardev_idx == 0) { + qemu_fdt_setprop_string(s->cfg.fdt, "/chosen", "stdout-path", node); + } +} - object_initialize_child(OBJECT(s), name, &s->lpd.iou.canfd[i], - TYPE_XILINX_CANFD); - sbd = SYS_BUS_DEVICE(&s->lpd.iou.canfd[i]); +static void versal_create_canfd(Versal *s, const VersalSimplePeriphMap *map, + CanBusState *bus) +{ + SysBusDevice *sbd; + MemoryRegion *mr; + g_autofree char *node; + const char compatible[] = "xlnx,canfd-2.0"; + const char clocknames[] = "can_clk\0s_axi_aclk"; - object_property_set_int(OBJECT(&s->lpd.iou.canfd[i]), "ext_clk_freq", - XLNX_VERSAL_CANFD_REF_CLK , &error_abort); + sbd = SYS_BUS_DEVICE(qdev_new(TYPE_XILINX_CANFD)); + object_property_add_child(OBJECT(s), "canfd[*]", OBJECT(sbd)); - object_property_set_link(OBJECT(&s->lpd.iou.canfd[i]), "canfdbus", - OBJECT(s->lpd.iou.canbus[i]), - &error_abort); + object_property_set_int(OBJECT(sbd), "ext_clk_freq", + 25 * 1000 * 1000 , &error_abort); - sysbus_realize(sbd, &error_fatal); + object_property_set_link(OBJECT(sbd), "canfdbus", OBJECT(bus), + &error_abort); - mr = sysbus_mmio_get_region(sbd, 0); - memory_region_add_subregion(&s->mr_ps, addrs[i], mr); + sysbus_realize_and_unref(sbd, &error_fatal); - sysbus_connect_irq(sbd, 0, pic[irqs[i]]); - g_free(name); - } + mr = sysbus_mmio_get_region(sbd, 0); + memory_region_add_subregion(&s->mr_ps, map->addr, mr); + + versal_sysbus_connect_irq(s, sbd, 0, map->irq); + + node = versal_fdt_add_simple_subnode(s, "/canfd", map->addr, 0x10000, + compatible, sizeof(compatible)); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "rx-fifo-depth", 0x40); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "tx-mailbox-count", 0x20); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "clocks", + s->phandle.clk_25mhz, s->phandle.clk_25mhz); + qemu_fdt_setprop(s->cfg.fdt, node, "clock-names", + clocknames, sizeof(clocknames)); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, map->irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); } -static void versal_create_usbs(Versal *s, qemu_irq *pic) +static void versal_create_usb(Versal *s, + const struct VersalUsbMap *map) { DeviceState *dev; MemoryRegion *mr; + g_autofree char *node, *subnode; + const char clocknames[] = "bus_clk\0ref_clk"; + const char irq_name[] = "dwc_usb3"; + const char compat_versal_dwc3[] = "xlnx,versal-dwc3"; + const char compat_dwc3[] = "snps,dwc3"; - object_initialize_child(OBJECT(s), "usb2", &s->lpd.iou.usb, - TYPE_XILINX_VERSAL_USB2); - dev = DEVICE(&s->lpd.iou.usb); + dev = qdev_new(TYPE_XILINX_VERSAL_USB2); + object_property_add_child(OBJECT(s), "usb[*]", OBJECT(dev)); object_property_set_link(OBJECT(dev), "dma", OBJECT(&s->mr_ps), &error_abort); qdev_prop_set_uint32(dev, "intrs", 1); qdev_prop_set_uint32(dev, "slots", 2); - sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); - memory_region_add_subregion(&s->mr_ps, MM_USB_0, mr); + memory_region_add_subregion(&s->mr_ps, map->xhci, mr); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[VERSAL_USB0_IRQ_0]); + versal_sysbus_connect_irq(s, SYS_BUS_DEVICE(dev), 0, map->irq); mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); - memory_region_add_subregion(&s->mr_ps, MM_USB2_CTRL_REGS, mr); + memory_region_add_subregion(&s->mr_ps, map->ctrl, mr); + + node = versal_fdt_add_simple_subnode(s, "/usb", map->ctrl, 0x10000, + compat_versal_dwc3, + sizeof(compat_versal_dwc3)); + qemu_fdt_setprop(s->cfg.fdt, node, "clock-names", + clocknames, sizeof(clocknames)); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "clocks", + s->phandle.clk_25mhz, s->phandle.clk_125mhz); + qemu_fdt_setprop(s->cfg.fdt, node, "ranges", NULL, 0); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "#address-cells", 2); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "#size-cells", 2); + + subnode = g_strdup_printf("/%s/dwc3", node); + g_free(node); + + node = versal_fdt_add_simple_subnode(s, subnode, map->xhci, 0x10000, + compat_dwc3, + sizeof(compat_dwc3)); + qemu_fdt_setprop(s->cfg.fdt, node, "interrupt-names", + irq_name, sizeof(irq_name)); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, map->irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop_cell(s->cfg.fdt, node, + "snps,quirk-frame-length-adjustment", 0x20); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "#stream-id-cells", 1); + qemu_fdt_setprop_string(s->cfg.fdt, node, "dr_mode", "host"); + qemu_fdt_setprop_string(s->cfg.fdt, node, "phy-names", "usb3-phy"); + qemu_fdt_setprop(s->cfg.fdt, node, "snps,dis_u2_susphy_quirk", NULL, 0); + qemu_fdt_setprop(s->cfg.fdt, node, "snps,dis_u3_susphy_quirk", NULL, 0); + qemu_fdt_setprop(s->cfg.fdt, node, "snps,refclk_fladj", NULL, 0); + qemu_fdt_setprop(s->cfg.fdt, node, "snps,mask_phy_reset", NULL, 0); + qemu_fdt_setprop_string(s->cfg.fdt, node, "maximum-speed", "high-speed"); } -static void versal_create_gems(Versal *s, qemu_irq *pic) +static void versal_create_gem(Versal *s, + const struct VersalGemMap *map) { + DeviceState *dev; + MemoryRegion *mr; + DeviceState *or; int i; - for (i = 0; i < ARRAY_SIZE(s->lpd.iou.gem); i++) { - static const int irqs[] = { VERSAL_GEM0_IRQ_0, VERSAL_GEM1_IRQ_0}; - static const uint64_t addrs[] = { MM_GEM0, MM_GEM1 }; - char *name = g_strdup_printf("gem%d", i); - DeviceState *dev; - MemoryRegion *mr; - OrIRQState *or_irq; - - object_initialize_child(OBJECT(s), name, &s->lpd.iou.gem[i], - TYPE_CADENCE_GEM); - or_irq = &s->lpd.iou.gem_irq_orgate[i]; - object_initialize_child(OBJECT(s), "gem-irq-orgate[*]", - or_irq, TYPE_OR_IRQ); - dev = DEVICE(&s->lpd.iou.gem[i]); - qemu_configure_nic_device(dev, true, NULL); - object_property_set_int(OBJECT(dev), "phy-addr", 23, &error_abort); - object_property_set_int(OBJECT(dev), "num-priority-queues", 2, - &error_abort); - object_property_set_int(OBJECT(or_irq), - "num-lines", 2, &error_fatal); - qdev_realize(DEVICE(or_irq), NULL, &error_fatal); - qdev_connect_gpio_out(DEVICE(or_irq), 0, pic[irqs[i]]); - - object_property_set_link(OBJECT(dev), "dma", OBJECT(&s->mr_ps), - &error_abort); - sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + dev = qdev_new(TYPE_CADENCE_GEM); + object_property_add_child(OBJECT(s), "gem[*]", OBJECT(dev)); - mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); - memory_region_add_subregion(&s->mr_ps, addrs[i], mr); + qemu_configure_nic_device(dev, true, NULL); + object_property_set_int(OBJECT(dev), "phy-addr", 23, &error_abort); + object_property_set_int(OBJECT(dev), "num-priority-queues", + map->num_prio_queue, &error_abort); + + object_property_set_link(OBJECT(dev), "dma", OBJECT(&s->mr_ps), + &error_abort); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(&s->mr_ps, map->map.addr, mr); + + /* + * The GEM controller exposes one IRQ line per priority queue. In Versal + * family devices, those are OR'ed together. + */ + or = create_or_gate(s, OBJECT(dev), "irq-orgate", + map->num_prio_queue, map->map.irq); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(DEVICE(or_irq), 0)); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 1, qdev_get_gpio_in(DEVICE(or_irq), 1)); - g_free(name); + for (i = 0; i < map->num_prio_queue; i++) { + sysbus_connect_irq(SYS_BUS_DEVICE(dev), i, qdev_get_gpio_in(or, i)); } } -static void versal_create_admas(Versal *s, qemu_irq *pic) +static void versal_create_gem_fdt(Versal *s, + const struct VersalGemMap *map) { int i; - - for (i = 0; i < ARRAY_SIZE(s->lpd.iou.adma); i++) { - char *name = g_strdup_printf("adma%d", i); - DeviceState *dev; - MemoryRegion *mr; - - object_initialize_child(OBJECT(s), name, &s->lpd.iou.adma[i], - TYPE_XLNX_ZDMA); - dev = DEVICE(&s->lpd.iou.adma[i]); - object_property_set_int(OBJECT(dev), "bus-width", 128, &error_abort); - object_property_set_link(OBJECT(dev), "dma", - OBJECT(get_system_memory()), &error_fatal); - sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); - - mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); - memory_region_add_subregion(&s->mr_ps, - MM_ADMA_CH0 + i * MM_ADMA_CH0_SIZE, mr); - - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, pic[VERSAL_ADMA_IRQ_0 + i]); - g_free(name); + g_autofree char *node; + g_autofree char *phy_node; + int phy_phandle; + const char compatible[] = "cdns,zynqmp-gem\0cdns,gem"; + const char clocknames[] = "pclk\0hclk\0tx_clk\0rx_clk"; + g_autofree uint32_t *irq_prop; + + node = versal_fdt_add_simple_subnode(s, "/ethernet", map->map.addr, 0x1000, + compatible, sizeof(compatible)); + phy_node = g_strdup_printf("%s/fixed-link", node); + phy_phandle = qemu_fdt_alloc_phandle(s->cfg.fdt); + + /* Fixed link PHY node */ + qemu_fdt_add_subnode(s->cfg.fdt, phy_node); + qemu_fdt_setprop_cell(s->cfg.fdt, phy_node, "phandle", phy_phandle); + qemu_fdt_setprop(s->cfg.fdt, phy_node, "full-duplex", NULL, 0); + qemu_fdt_setprop_cell(s->cfg.fdt, phy_node, "speed", map->speed); + + qemu_fdt_setprop_string(s->cfg.fdt, node, "phy-mode", map->phy_mode); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "phy-handle", phy_phandle); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "clocks", + s->phandle.clk_25mhz, s->phandle.clk_25mhz, + s->phandle.clk_125mhz, s->phandle.clk_125mhz); + qemu_fdt_setprop(s->cfg.fdt, node, "clock-names", + clocknames, sizeof(clocknames)); + + irq_prop = g_new(uint32_t, map->num_prio_queue * 3); + for (i = 0; i < map->num_prio_queue; i++) { + irq_prop[3 * i] = cpu_to_be32(GIC_FDT_IRQ_TYPE_SPI); + irq_prop[3 * i + 1] = cpu_to_be32(map->map.irq); + irq_prop[3 * i + 2] = cpu_to_be32(GIC_FDT_IRQ_FLAGS_LEVEL_HI); } + qemu_fdt_setprop(s->cfg.fdt, node, "interrupts", irq_prop, + sizeof(uint32_t) * map->num_prio_queue * 3); } -#define SDHCI_CAPABILITIES 0x280737ec6481 /* Same as on ZynqMP. */ -static void versal_create_sds(Versal *s, qemu_irq *pic) +static void versal_create_zdma(Versal *s, + const struct VersalZDMAMap *map) { - int i; + DeviceState *dev; + MemoryRegion *mr; + g_autofree char *name; + const char compatible[] = "xlnx,zynqmp-dma-1.0"; + const char clocknames[] = "clk_main\0clk_apb"; + size_t i; - for (i = 0; i < ARRAY_SIZE(s->pmc.iou.sd); i++) { - DeviceState *dev; - MemoryRegion *mr; + name = g_strdup_printf("%s[*]", map->name); - object_initialize_child(OBJECT(s), "sd[*]", &s->pmc.iou.sd[i], - TYPE_SYSBUS_SDHCI); - dev = DEVICE(&s->pmc.iou.sd[i]); + for (i = 0; i < map->num_chan; i++) { + uint64_t addr = map->map.addr + map->chan_stride * i; + int irq = map->map.irq + map->irq_stride * i; + g_autofree char *node; - object_property_set_uint(OBJECT(dev), "sd-spec-version", 3, - &error_fatal); - object_property_set_uint(OBJECT(dev), "capareg", SDHCI_CAPABILITIES, - &error_fatal); - object_property_set_uint(OBJECT(dev), "uhs", UHS_I, &error_fatal); - sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + dev = qdev_new(TYPE_XLNX_ZDMA); + object_property_add_child(OBJECT(s), name, OBJECT(dev)); + object_property_set_int(OBJECT(dev), "bus-width", 128, &error_abort); + object_property_set_link(OBJECT(dev), "dma", + OBJECT(get_system_memory()), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); - memory_region_add_subregion(&s->mr_ps, - MM_PMC_SD0 + i * MM_PMC_SD0_SIZE, mr); - - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, - pic[VERSAL_SD0_IRQ_0 + i * 2]); + memory_region_add_subregion(&s->mr_ps, addr, mr); + + versal_sysbus_connect_irq(s, SYS_BUS_DEVICE(dev), 0, irq); + + node = versal_fdt_add_simple_subnode(s, "/dma", addr, 0x1000, + compatible, sizeof(compatible)); + qemu_fdt_setprop_cell(s->cfg.fdt, node, "xlnx,bus-width", 64); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "clocks", + s->phandle.clk_25mhz, s->phandle.clk_25mhz); + qemu_fdt_setprop(s->cfg.fdt, node, "clock-names", + clocknames, sizeof(clocknames)); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); } } -static void versal_create_pmc_apb_irq_orgate(Versal *s, qemu_irq *pic) +#define SDHCI_CAPABILITIES 0x280737ec6481 /* Same as on ZynqMP. */ +static void versal_create_sdhci(Versal *s, + const VersalSimplePeriphMap *map) { - DeviceState *orgate; + DeviceState *dev; + MemoryRegion *mr; + g_autofree char *node; + const char compatible[] = "arasan,sdhci-8.9a"; + const char clocknames[] = "clk_xin\0clk_ahb"; + + dev = qdev_new(TYPE_SYSBUS_SDHCI); + object_property_add_child(OBJECT(s), "sdhci[*]", OBJECT(dev)); + + object_property_set_uint(OBJECT(dev), "sd-spec-version", 3, + &error_fatal); + object_property_set_uint(OBJECT(dev), "capareg", SDHCI_CAPABILITIES, + &error_fatal); + object_property_set_uint(OBJECT(dev), "uhs", UHS_I, &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - /* - * The VERSAL_PMC_APB_IRQ is an 'or' of the interrupts from the following - * models: - * - RTC - * - BBRAM - * - PMC SLCR - * - CFRAME regs (input 3 - 17 to the orgate) - */ - object_initialize_child(OBJECT(s), "pmc-apb-irq-orgate", - &s->pmc.apb_irq_orgate, TYPE_OR_IRQ); - orgate = DEVICE(&s->pmc.apb_irq_orgate); - object_property_set_int(OBJECT(orgate), - "num-lines", VERSAL_NUM_PMC_APB_IRQS, &error_fatal); - qdev_realize(orgate, NULL, &error_fatal); - qdev_connect_gpio_out(orgate, 0, pic[VERSAL_PMC_APB_IRQ]); + mr = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0); + memory_region_add_subregion(&s->mr_ps, map->addr, mr); + + versal_sysbus_connect_irq(s, SYS_BUS_DEVICE(dev), 0, map->irq); + + node = versal_fdt_add_simple_subnode(s, "/sdhci", map->addr, 0x10000, + compatible, sizeof(compatible)); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "clocks", + s->phandle.clk_25mhz, s->phandle.clk_25mhz); + qemu_fdt_setprop(s->cfg.fdt, node, "clock-names", + clocknames, sizeof(clocknames)); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, map->irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); } -static void versal_create_rtc(Versal *s, qemu_irq *pic) +static void versal_create_rtc(Versal *s, const struct VersalRtcMap *map) { SysBusDevice *sbd; MemoryRegion *mr; + g_autofree char *node; + const char compatible[] = "xlnx,zynqmp-rtc"; + const char interrupt_names[] = "alarm\0sec"; - object_initialize_child(OBJECT(s), "rtc", &s->pmc.rtc, - TYPE_XLNX_ZYNQMP_RTC); - sbd = SYS_BUS_DEVICE(&s->pmc.rtc); - sysbus_realize(sbd, &error_fatal); + sbd = SYS_BUS_DEVICE(qdev_new(TYPE_XLNX_ZYNQMP_RTC)); + object_property_add_child(OBJECT(s), "rtc", OBJECT(sbd)); + sysbus_realize_and_unref(sbd, &error_abort); mr = sysbus_mmio_get_region(sbd, 0); - memory_region_add_subregion(&s->mr_ps, MM_PMC_RTC, mr); + memory_region_add_subregion(&s->mr_ps, map->map.addr, mr); /* * TODO: Connect the ALARM and SECONDS interrupts once our RTC model * supports them. */ - sysbus_connect_irq(sbd, 1, - qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), 0)); + versal_sysbus_connect_irq(s, sbd, 0, map->map.irq); + + node = versal_fdt_add_simple_subnode(s, "/rtc", map->map.addr, 0x10000, + compatible, sizeof(compatible)); + qemu_fdt_setprop_cells(s->cfg.fdt, node, "interrupts", + GIC_FDT_IRQ_TYPE_SPI, map->alarm_irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI, + GIC_FDT_IRQ_TYPE_SPI, map->second_irq, + GIC_FDT_IRQ_FLAGS_LEVEL_HI); + qemu_fdt_setprop(s->cfg.fdt, node, "interrupt-names", + interrupt_names, sizeof(interrupt_names)); } -static void versal_create_trng(Versal *s, qemu_irq *pic) +static void versal_create_trng(Versal *s, const VersalSimplePeriphMap *map) { SysBusDevice *sbd; MemoryRegion *mr; - object_initialize_child(OBJECT(s), "trng", &s->pmc.trng, - TYPE_XLNX_VERSAL_TRNG); - sbd = SYS_BUS_DEVICE(&s->pmc.trng); - sysbus_realize(sbd, &error_fatal); + sbd = SYS_BUS_DEVICE(qdev_new(TYPE_XLNX_VERSAL_TRNG)); + object_property_add_child(OBJECT(s), "trng", OBJECT(sbd)); + sysbus_realize_and_unref(sbd, &error_abort); mr = sysbus_mmio_get_region(sbd, 0); - memory_region_add_subregion(&s->mr_ps, MM_PMC_TRNG, mr); - sysbus_connect_irq(sbd, 0, pic[VERSAL_TRNG_IRQ]); + memory_region_add_subregion(&s->mr_ps, map->addr, mr); + versal_sysbus_connect_irq(s, sbd, 0, map->irq); } -static void versal_create_xrams(Versal *s, qemu_irq *pic) +static void versal_create_xrams(Versal *s, const struct VersalXramMap *map) { - int nr_xrams = ARRAY_SIZE(s->lpd.xram.ctrl); - DeviceState *orgate; - int i; + SysBusDevice *sbd; + MemoryRegion *mr; + DeviceState *or; + size_t i; + + or = create_or_gate(s, OBJECT(s), "xram-orgate", map->num, map->irq); - /* XRAM IRQs get ORed into a single line. */ - object_initialize_child(OBJECT(s), "xram-irq-orgate", - &s->lpd.xram.irq_orgate, TYPE_OR_IRQ); - orgate = DEVICE(&s->lpd.xram.irq_orgate); - object_property_set_int(OBJECT(orgate), - "num-lines", nr_xrams, &error_fatal); - qdev_realize(orgate, NULL, &error_fatal); - qdev_connect_gpio_out(orgate, 0, pic[VERSAL_XRAM_IRQ_0]); + for (i = 0; i < map->num; i++) { + hwaddr ctrl, mem; - for (i = 0; i < ARRAY_SIZE(s->lpd.xram.ctrl); i++) { - SysBusDevice *sbd; - MemoryRegion *mr; + sbd = SYS_BUS_DEVICE(qdev_new(TYPE_XLNX_XRAM_CTRL)); + object_property_add_child(OBJECT(s), "xram[*]", OBJECT(sbd)); + sysbus_realize_and_unref(sbd, &error_fatal); - object_initialize_child(OBJECT(s), "xram[*]", &s->lpd.xram.ctrl[i], - TYPE_XLNX_XRAM_CTRL); - sbd = SYS_BUS_DEVICE(&s->lpd.xram.ctrl[i]); - sysbus_realize(sbd, &error_fatal); + ctrl = map->ctrl + map->ctrl_stride * i; + mem = map->mem + map->mem_stride * i; mr = sysbus_mmio_get_region(sbd, 0); - memory_region_add_subregion(&s->mr_ps, - MM_XRAMC + i * MM_XRAMC_SIZE, mr); + memory_region_add_subregion(&s->mr_ps, ctrl, mr); mr = sysbus_mmio_get_region(sbd, 1); - memory_region_add_subregion(&s->mr_ps, MM_XRAM + i * MiB, mr); + memory_region_add_subregion(&s->mr_ps, mem, mr); - sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(orgate, i)); + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(or, i)); } } -static void versal_create_bbram(Versal *s, qemu_irq *pic) +static void versal_create_bbram(Versal *s, + const VersalSimplePeriphMap *map) { + DeviceState *dev; SysBusDevice *sbd; - object_initialize_child_with_props(OBJECT(s), "bbram", &s->pmc.bbram, - sizeof(s->pmc.bbram), TYPE_XLNX_BBRAM, - &error_fatal, - "crc-zpads", "0", - NULL); - sbd = SYS_BUS_DEVICE(&s->pmc.bbram); + dev = qdev_new(TYPE_XLNX_BBRAM); + sbd = SYS_BUS_DEVICE(dev); - sysbus_realize(sbd, &error_fatal); - memory_region_add_subregion(&s->mr_ps, MM_PMC_BBRAM_CTRL, + object_property_add_child(OBJECT(s), "bbram", OBJECT(dev)); + qdev_prop_set_uint32(dev, "crc-zpads", 0); + sysbus_realize_and_unref(sbd, &error_abort); + memory_region_add_subregion(&s->mr_ps, map->addr, sysbus_mmio_get_region(sbd, 0)); - sysbus_connect_irq(sbd, 0, - qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), 1)); + versal_sysbus_connect_irq(s, sbd, 0, map->irq); } -static void versal_realize_efuse_part(Versal *s, Object *dev, hwaddr base) +static void versal_create_efuse(Versal *s, + const struct VersalEfuseMap *map) { - SysBusDevice *part = SYS_BUS_DEVICE(dev); + DeviceState *bits; + DeviceState *ctrl; + DeviceState *cache; - object_property_set_link(OBJECT(part), "efuse", - OBJECT(&s->pmc.efuse), &error_abort); + if (versal_get_version(s) != VERSAL_VER_VERSAL) { + /* TODO for versal2 */ + return; + } - sysbus_realize(part, &error_abort); - memory_region_add_subregion(&s->mr_ps, base, - sysbus_mmio_get_region(part, 0)); -} + ctrl = qdev_new(TYPE_XLNX_VERSAL_EFUSE_CTRL); + cache = qdev_new(TYPE_XLNX_VERSAL_EFUSE_CACHE); + bits = qdev_new(TYPE_XLNX_EFUSE); -static void versal_create_efuse(Versal *s, qemu_irq *pic) -{ - Object *bits = OBJECT(&s->pmc.efuse); - Object *ctrl = OBJECT(&s->pmc.efuse_ctrl); - Object *cache = OBJECT(&s->pmc.efuse_cache); + qdev_prop_set_uint32(bits, "efuse-nr", 3); + qdev_prop_set_uint32(bits, "efuse-size", 8192); + + object_property_add_child(OBJECT(s), "efuse", OBJECT(bits)); + qdev_realize_and_unref(bits, NULL, &error_abort); - object_initialize_child(OBJECT(s), "efuse-ctrl", &s->pmc.efuse_ctrl, - TYPE_XLNX_VERSAL_EFUSE_CTRL); + object_property_set_link(OBJECT(ctrl), "efuse", OBJECT(bits), &error_abort); - object_initialize_child(OBJECT(s), "efuse-cache", &s->pmc.efuse_cache, - TYPE_XLNX_VERSAL_EFUSE_CACHE); + object_property_set_link(OBJECT(cache), "efuse", OBJECT(bits), + &error_abort); - object_initialize_child_with_props(ctrl, "xlnx-efuse@0", bits, - sizeof(s->pmc.efuse), - TYPE_XLNX_EFUSE, &error_abort, - "efuse-nr", "3", - "efuse-size", "8192", - NULL); + object_property_add_child(OBJECT(s), "efuse-cache", OBJECT(cache)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(cache), &error_abort); - qdev_realize(DEVICE(bits), NULL, &error_abort); - versal_realize_efuse_part(s, ctrl, MM_PMC_EFUSE_CTRL); - versal_realize_efuse_part(s, cache, MM_PMC_EFUSE_CACHE); + object_property_add_child(OBJECT(s), "efuse-ctrl", OBJECT(ctrl)); + sysbus_realize_and_unref(SYS_BUS_DEVICE(ctrl), &error_abort); - sysbus_connect_irq(SYS_BUS_DEVICE(ctrl), 0, pic[VERSAL_EFUSE_IRQ]); + memory_region_add_subregion(&s->mr_ps, map->ctrl, + sysbus_mmio_get_region(SYS_BUS_DEVICE(ctrl), + 0)); + memory_region_add_subregion(&s->mr_ps, map->cache, + sysbus_mmio_get_region(SYS_BUS_DEVICE(cache), + 0)); + versal_sysbus_connect_irq(s, SYS_BUS_DEVICE(ctrl), 0, map->irq); } -static void versal_create_pmc_iou_slcr(Versal *s, qemu_irq *pic) +static DeviceState *versal_create_pmc_iou_slcr(Versal *s, + const VersalSimplePeriphMap *map) { SysBusDevice *sbd; + DeviceState *dev; - object_initialize_child(OBJECT(s), "versal-pmc-iou-slcr", &s->pmc.iou.slcr, - TYPE_XILINX_VERSAL_PMC_IOU_SLCR); + dev = qdev_new(TYPE_XILINX_VERSAL_PMC_IOU_SLCR); + object_property_add_child(OBJECT(s), "pmc-iou-slcr", OBJECT(dev)); - sbd = SYS_BUS_DEVICE(&s->pmc.iou.slcr); - sysbus_realize(sbd, &error_fatal); + sbd = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sbd, &error_fatal); - memory_region_add_subregion(&s->mr_ps, MM_PMC_PMC_IOU_SLCR, + memory_region_add_subregion(&s->mr_ps, map->addr, sysbus_mmio_get_region(sbd, 0)); - sysbus_connect_irq(sbd, 0, - qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), 2)); + versal_sysbus_connect_irq(s, sbd, 0, map->irq); + + return dev; } -static void versal_create_ospi(Versal *s, qemu_irq *pic) +static DeviceState *versal_create_ospi(Versal *s, + const struct VersalOspiMap *map) { SysBusDevice *sbd; MemoryRegion *mr_dac; - qemu_irq ospi_mux_sel; - DeviceState *orgate; + DeviceState *dev, *dma_dst, *dma_src, *orgate; + MemoryRegion *linear_mr = g_new(MemoryRegion, 1); - memory_region_init(&s->pmc.iou.ospi.linear_mr, OBJECT(s), - "versal-ospi-linear-mr" , MM_PMC_OSPI_DAC_SIZE); + dev = qdev_new(TYPE_XILINX_VERSAL_OSPI); + object_property_add_child(OBJECT(s), "ospi", OBJECT(dev)); - object_initialize_child(OBJECT(s), "versal-ospi", &s->pmc.iou.ospi.ospi, - TYPE_XILINX_VERSAL_OSPI); + memory_region_init(linear_mr, OBJECT(dev), "linear-mr", map->dac_sz); - mr_dac = sysbus_mmio_get_region(SYS_BUS_DEVICE(&s->pmc.iou.ospi.ospi), 1); - memory_region_add_subregion(&s->pmc.iou.ospi.linear_mr, 0x0, mr_dac); + mr_dac = sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 1); + memory_region_add_subregion(linear_mr, 0x0, mr_dac); /* Create the OSPI destination DMA */ - object_initialize_child(OBJECT(s), "versal-ospi-dma-dst", - &s->pmc.iou.ospi.dma_dst, - TYPE_XLNX_CSU_DMA); + dma_dst = qdev_new(TYPE_XLNX_CSU_DMA); + object_property_add_child(OBJECT(dev), "dma-dst-dev", OBJECT(dma_dst)); + object_property_set_link(OBJECT(dma_dst), "dma", + OBJECT(get_system_memory()), &error_abort); - object_property_set_link(OBJECT(&s->pmc.iou.ospi.dma_dst), - "dma", OBJECT(get_system_memory()), - &error_abort); + sbd = SYS_BUS_DEVICE(dma_dst); + sysbus_realize_and_unref(sbd, &error_fatal); - sbd = SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_dst); - sysbus_realize(sbd, &error_fatal); - - memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI_DMA_DST, + memory_region_add_subregion(&s->mr_ps, map->dma_dst, sysbus_mmio_get_region(sbd, 0)); /* Create the OSPI source DMA */ - object_initialize_child(OBJECT(s), "versal-ospi-dma-src", - &s->pmc.iou.ospi.dma_src, - TYPE_XLNX_CSU_DMA); - - object_property_set_bool(OBJECT(&s->pmc.iou.ospi.dma_src), "is-dst", - false, &error_abort); + dma_src = qdev_new(TYPE_XLNX_CSU_DMA); + object_property_add_child(OBJECT(dev), "dma-src-dev", OBJECT(dma_src)); - object_property_set_link(OBJECT(&s->pmc.iou.ospi.dma_src), - "dma", OBJECT(mr_dac), &error_abort); + object_property_set_bool(OBJECT(dma_src), "is-dst", false, &error_abort); - object_property_set_link(OBJECT(&s->pmc.iou.ospi.dma_src), - "stream-connected-dma", - OBJECT(&s->pmc.iou.ospi.dma_dst), + object_property_set_link(OBJECT(dma_src), "dma", OBJECT(mr_dac), &error_abort); - sbd = SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_src); - sysbus_realize(sbd, &error_fatal); + object_property_set_link(OBJECT(dma_src), "stream-connected-dma", + OBJECT(dma_dst), &error_abort); + + sbd = SYS_BUS_DEVICE(dma_src); + sysbus_realize_and_unref(sbd, &error_fatal); - memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI_DMA_SRC, + memory_region_add_subregion(&s->mr_ps, map->dma_src, sysbus_mmio_get_region(sbd, 0)); /* Realize the OSPI */ - object_property_set_link(OBJECT(&s->pmc.iou.ospi.ospi), "dma-src", - OBJECT(&s->pmc.iou.ospi.dma_src), &error_abort); + object_property_set_link(OBJECT(dev), "dma-src", + OBJECT(dma_src), &error_abort); - sbd = SYS_BUS_DEVICE(&s->pmc.iou.ospi.ospi); - sysbus_realize(sbd, &error_fatal); + sbd = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(sbd, &error_fatal); - memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI, + memory_region_add_subregion(&s->mr_ps, map->ctrl, sysbus_mmio_get_region(sbd, 0)); - memory_region_add_subregion(&s->mr_ps, MM_PMC_OSPI_DAC, - &s->pmc.iou.ospi.linear_mr); - - /* ospi_mux_sel */ - ospi_mux_sel = qdev_get_gpio_in_named(DEVICE(&s->pmc.iou.ospi.ospi), - "ospi-mux-sel", 0); - qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), "ospi-mux-sel", 0, - ospi_mux_sel); + memory_region_add_subregion(&s->mr_ps, map->dac, + linear_mr); /* OSPI irq */ - object_initialize_child(OBJECT(s), "ospi-irq-orgate", - &s->pmc.iou.ospi.irq_orgate, TYPE_OR_IRQ); - object_property_set_int(OBJECT(&s->pmc.iou.ospi.irq_orgate), - "num-lines", NUM_OSPI_IRQ_LINES, &error_fatal); - - orgate = DEVICE(&s->pmc.iou.ospi.irq_orgate); - qdev_realize(orgate, NULL, &error_fatal); + orgate = create_or_gate(s, OBJECT(dev), "irq-orgate", 3, + map->irq); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->pmc.iou.ospi.ospi), 0, - qdev_get_gpio_in(orgate, 0)); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_src), 0, - qdev_get_gpio_in(orgate, 1)); - sysbus_connect_irq(SYS_BUS_DEVICE(&s->pmc.iou.ospi.dma_dst), 0, - qdev_get_gpio_in(orgate, 2)); + sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(orgate, 0)); + sysbus_connect_irq(SYS_BUS_DEVICE(dma_src), 0, qdev_get_gpio_in(orgate, 1)); + sysbus_connect_irq(SYS_BUS_DEVICE(dma_dst), 0, qdev_get_gpio_in(orgate, 2)); - qdev_connect_gpio_out(orgate, 0, pic[VERSAL_OSPI_IRQ]); + return dev; } -static void versal_create_cfu(Versal *s, qemu_irq *pic) +static void versal_create_cfu(Versal *s, const struct VersalCfuMap *map) { SysBusDevice *sbd; - DeviceState *dev; + Object *container; + DeviceState *cfu_fdro, *cfu_apb, *cfu_sfr, *cframe_bcast; + DeviceState *cframe_irq_or; int i; - const struct { - uint64_t reg_base; - uint64_t fdri_base; - } cframe_addr[] = { - { MM_PMC_CFRAME0_REG, MM_PMC_CFRAME0_FDRI }, - { MM_PMC_CFRAME1_REG, MM_PMC_CFRAME1_FDRI }, - { MM_PMC_CFRAME2_REG, MM_PMC_CFRAME2_FDRI }, - { MM_PMC_CFRAME3_REG, MM_PMC_CFRAME3_FDRI }, - { MM_PMC_CFRAME4_REG, MM_PMC_CFRAME4_FDRI }, - { MM_PMC_CFRAME5_REG, MM_PMC_CFRAME5_FDRI }, - { MM_PMC_CFRAME6_REG, MM_PMC_CFRAME6_FDRI }, - { MM_PMC_CFRAME7_REG, MM_PMC_CFRAME7_FDRI }, - { MM_PMC_CFRAME8_REG, MM_PMC_CFRAME8_FDRI }, - { MM_PMC_CFRAME9_REG, MM_PMC_CFRAME9_FDRI }, - { MM_PMC_CFRAME10_REG, MM_PMC_CFRAME10_FDRI }, - { MM_PMC_CFRAME11_REG, MM_PMC_CFRAME11_FDRI }, - { MM_PMC_CFRAME12_REG, MM_PMC_CFRAME12_FDRI }, - { MM_PMC_CFRAME13_REG, MM_PMC_CFRAME13_FDRI }, - { MM_PMC_CFRAME14_REG, MM_PMC_CFRAME14_FDRI }, - }; - const struct { - uint32_t blktype0_frames; - uint32_t blktype1_frames; - uint32_t blktype2_frames; - uint32_t blktype3_frames; - uint32_t blktype4_frames; - uint32_t blktype5_frames; - uint32_t blktype6_frames; - } cframe_cfg[] = { - [0] = { 34111, 3528, 12800, 11, 5, 1, 1 }, - [1] = { 38498, 3841, 15361, 13, 7, 3, 1 }, - [2] = { 38498, 3841, 15361, 13, 7, 3, 1 }, - [3] = { 38498, 3841, 15361, 13, 7, 3, 1 }, - }; + + container = object_new(TYPE_CONTAINER); + object_property_add_child(OBJECT(s), "cfu", container); + object_unref(container); /* CFU FDRO */ - object_initialize_child(OBJECT(s), "cfu-fdro", &s->pmc.cfu_fdro, - TYPE_XLNX_VERSAL_CFU_FDRO); - sbd = SYS_BUS_DEVICE(&s->pmc.cfu_fdro); + cfu_fdro = qdev_new(TYPE_XLNX_VERSAL_CFU_FDRO); + object_property_add_child(container, "cfu-fdro", OBJECT(cfu_fdro)); + sbd = SYS_BUS_DEVICE(cfu_fdro); - sysbus_realize(sbd, &error_fatal); - memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_FDRO, + sysbus_realize_and_unref(sbd, &error_fatal); + memory_region_add_subregion(&s->mr_ps, map->cfu_fdro, sysbus_mmio_get_region(sbd, 0)); - /* CFRAME REG */ - for (i = 0; i < ARRAY_SIZE(s->pmc.cframe); i++) { - g_autofree char *name = g_strdup_printf("cframe%d", i); + /* cframe bcast */ + cframe_bcast = qdev_new(TYPE_XLNX_VERSAL_CFRAME_BCAST_REG); + object_property_add_child(container, "cframe-bcast", OBJECT(cframe_bcast)); - object_initialize_child(OBJECT(s), name, &s->pmc.cframe[i], - TYPE_XLNX_VERSAL_CFRAME_REG); + /* CFU APB */ + cfu_apb = qdev_new(TYPE_XLNX_VERSAL_CFU_APB); + object_property_add_child(container, "cfu-apb", OBJECT(cfu_apb)); + + /* IRQ or gate for cframes */ + cframe_irq_or = qdev_new(TYPE_OR_IRQ); + object_property_add_child(container, "cframe-irq-or-gate", + OBJECT(cframe_irq_or)); + qdev_prop_set_uint16(cframe_irq_or, "num-lines", map->num_cframe); + qdev_realize_and_unref(cframe_irq_or, NULL, &error_abort); + versal_qdev_connect_gpio_out(s, cframe_irq_or, 0, map->cframe_irq); + + /* cframe reg */ + for (i = 0; i < map->num_cframe; i++) { + uint64_t reg_base; + uint64_t fdri_base; + DeviceState *dev; + g_autofree char *prop_name; + size_t j; - sbd = SYS_BUS_DEVICE(&s->pmc.cframe[i]); - dev = DEVICE(&s->pmc.cframe[i]); + dev = qdev_new(TYPE_XLNX_VERSAL_CFRAME_REG); + object_property_add_child(container, "cframe[*]", OBJECT(dev)); - if (i < ARRAY_SIZE(cframe_cfg)) { - object_property_set_int(OBJECT(dev), "blktype0-frames", - cframe_cfg[i].blktype0_frames, - &error_abort); - object_property_set_int(OBJECT(dev), "blktype1-frames", - cframe_cfg[i].blktype1_frames, - &error_abort); - object_property_set_int(OBJECT(dev), "blktype2-frames", - cframe_cfg[i].blktype2_frames, - &error_abort); - object_property_set_int(OBJECT(dev), "blktype3-frames", - cframe_cfg[i].blktype3_frames, - &error_abort); - object_property_set_int(OBJECT(dev), "blktype4-frames", - cframe_cfg[i].blktype4_frames, - &error_abort); - object_property_set_int(OBJECT(dev), "blktype5-frames", - cframe_cfg[i].blktype5_frames, - &error_abort); - object_property_set_int(OBJECT(dev), "blktype6-frames", - cframe_cfg[i].blktype6_frames, + sbd = SYS_BUS_DEVICE(dev); + + for (j = 0; j < ARRAY_SIZE(map->cframe_cfg[i].blktype_frames); j++) { + g_autofree char *blktype_prop_name; + + blktype_prop_name = g_strdup_printf("blktype%zu-frames", j); + object_property_set_int(OBJECT(dev), blktype_prop_name, + map->cframe_cfg[i].blktype_frames[j], &error_abort); } + object_property_set_link(OBJECT(dev), "cfu-fdro", - OBJECT(&s->pmc.cfu_fdro), &error_fatal); + OBJECT(cfu_fdro), &error_abort); - sysbus_realize(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_abort); - memory_region_add_subregion(&s->mr_ps, cframe_addr[i].reg_base, + reg_base = map->cframe_base + i * map->cframe_stride * 2; + fdri_base = reg_base + map->cframe_stride; + memory_region_add_subregion(&s->mr_ps, reg_base, sysbus_mmio_get_region(sbd, 0)); - memory_region_add_subregion(&s->mr_ps, cframe_addr[i].fdri_base, + memory_region_add_subregion(&s->mr_ps, fdri_base, sysbus_mmio_get_region(sbd, 1)); - sysbus_connect_irq(sbd, 0, - qdev_get_gpio_in(DEVICE(&s->pmc.apb_irq_orgate), - 3 + i)); - } - - /* CFRAME BCAST */ - object_initialize_child(OBJECT(s), "cframe_bcast", &s->pmc.cframe_bcast, - TYPE_XLNX_VERSAL_CFRAME_BCAST_REG); - - sbd = SYS_BUS_DEVICE(&s->pmc.cframe_bcast); - dev = DEVICE(&s->pmc.cframe_bcast); + sysbus_connect_irq(sbd, 0, qdev_get_gpio_in(cframe_irq_or, i)); - for (i = 0; i < ARRAY_SIZE(s->pmc.cframe); i++) { - g_autofree char *propname = g_strdup_printf("cframe%d", i); - object_property_set_link(OBJECT(dev), propname, - OBJECT(&s->pmc.cframe[i]), &error_fatal); + prop_name = g_strdup_printf("cframe%d", i); + object_property_set_link(OBJECT(cframe_bcast), prop_name, + OBJECT(dev), &error_abort); + object_property_set_link(OBJECT(cfu_apb), prop_name, + OBJECT(dev), &error_abort); } - sysbus_realize(sbd, &error_fatal); - - memory_region_add_subregion(&s->mr_ps, MM_PMC_CFRAME_BCAST_REG, + sbd = SYS_BUS_DEVICE(cframe_bcast); + sysbus_realize_and_unref(sbd, &error_abort); + memory_region_add_subregion(&s->mr_ps, map->cframe_bcast_reg, sysbus_mmio_get_region(sbd, 0)); - memory_region_add_subregion(&s->mr_ps, MM_PMC_CFRAME_BCAST_FDRI, + memory_region_add_subregion(&s->mr_ps, map->cframe_bcast_fdri, sysbus_mmio_get_region(sbd, 1)); - /* CFU APB */ - object_initialize_child(OBJECT(s), "cfu-apb", &s->pmc.cfu_apb, - TYPE_XLNX_VERSAL_CFU_APB); - sbd = SYS_BUS_DEVICE(&s->pmc.cfu_apb); - dev = DEVICE(&s->pmc.cfu_apb); - - for (i = 0; i < ARRAY_SIZE(s->pmc.cframe); i++) { - g_autofree char *propname = g_strdup_printf("cframe%d", i); - object_property_set_link(OBJECT(dev), propname, - OBJECT(&s->pmc.cframe[i]), &error_fatal); - } - - sysbus_realize(sbd, &error_fatal); - memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_APB, + sbd = SYS_BUS_DEVICE(cfu_apb); + sysbus_realize_and_unref(sbd, &error_fatal); + memory_region_add_subregion(&s->mr_ps, map->cfu_apb, sysbus_mmio_get_region(sbd, 0)); - memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_STREAM, + memory_region_add_subregion(&s->mr_ps, map->cfu_stream, sysbus_mmio_get_region(sbd, 1)); - memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_STREAM_2, + memory_region_add_subregion(&s->mr_ps, map->cfu_stream_2, sysbus_mmio_get_region(sbd, 2)); - sysbus_connect_irq(sbd, 0, pic[VERSAL_CFU_IRQ_0]); + versal_sysbus_connect_irq(s, sbd, 0, map->cfu_apb_irq); /* CFU SFR */ - object_initialize_child(OBJECT(s), "cfu-sfr", &s->pmc.cfu_sfr, - TYPE_XLNX_VERSAL_CFU_SFR); - - sbd = SYS_BUS_DEVICE(&s->pmc.cfu_sfr); - - object_property_set_link(OBJECT(&s->pmc.cfu_sfr), - "cfu", OBJECT(&s->pmc.cfu_apb), &error_abort); - - sysbus_realize(sbd, &error_fatal); - memory_region_add_subregion(&s->mr_ps, MM_PMC_CFU_SFR, + cfu_sfr = qdev_new(TYPE_XLNX_VERSAL_CFU_SFR); + object_property_add_child(container, "cfu-sfr", OBJECT(cfu_sfr)); + sbd = SYS_BUS_DEVICE(cfu_sfr); + + object_property_set_link(OBJECT(cfu_sfr), + "cfu", OBJECT(cfu_apb), &error_abort); + sysbus_realize_and_unref(sbd, &error_fatal); + memory_region_add_subregion(&s->mr_ps, map->cfu_sfr, sysbus_mmio_get_region(sbd, 0)); } -static void versal_create_crl(Versal *s, qemu_irq *pic) +static inline void crl_connect_dev(Object *crl, Object *dev) { - SysBusDevice *sbd; - int i; + const char *prop = object_get_canonical_path_component(dev); - object_initialize_child(OBJECT(s), "crl", &s->lpd.crl, - TYPE_XLNX_VERSAL_CRL); - sbd = SYS_BUS_DEVICE(&s->lpd.crl); + /* The component part of the device path matches the CRL property name */ + object_property_set_link(crl, prop, dev, &error_abort); +} - for (i = 0; i < ARRAY_SIZE(s->lpd.rpu.cpu); i++) { - g_autofree gchar *name = g_strdup_printf("cpu_r5[%d]", i); +static inline void crl_connect_dev_by_name(Versal *s, Object *crl, + const char *name, size_t num) +{ + size_t i; - object_property_set_link(OBJECT(&s->lpd.crl), - name, OBJECT(&s->lpd.rpu.cpu[i]), - &error_abort); + for (i = 0; i < num; i++) { + Object *dev = versal_get_child_idx(s, name, i); + + crl_connect_dev(crl, dev); } +} - for (i = 0; i < ARRAY_SIZE(s->lpd.iou.gem); i++) { - g_autofree gchar *name = g_strdup_printf("gem[%d]", i); +static inline void versal_create_crl(Versal *s) +{ + const VersalMap *map; + VersalVersion ver; + const char *crl_class; + DeviceState *dev; + size_t num_gem; + Object *obj; - object_property_set_link(OBJECT(&s->lpd.crl), - name, OBJECT(&s->lpd.iou.gem[i]), - &error_abort); - } + map = versal_get_map(s); + ver = versal_get_version(s); - for (i = 0; i < ARRAY_SIZE(s->lpd.iou.adma); i++) { - g_autofree gchar *name = g_strdup_printf("adma[%d]", i); + crl_class = xlnx_versal_crl_class_name(ver); + dev = qdev_new(crl_class); + obj = OBJECT(dev); + object_property_add_child(OBJECT(s), "crl", obj); - object_property_set_link(OBJECT(&s->lpd.crl), - name, OBJECT(&s->lpd.iou.adma[i]), - &error_abort); - } + /* + * The 3rd GEM controller on versal2 is in the MMI subsystem. + * Its reset line is not connected to the CRL. Consider only the first two + * ones. + */ + num_gem = ver == VERSAL_VER_VERSAL2 ? 2 : map->num_gem; - for (i = 0; i < ARRAY_SIZE(s->lpd.iou.uart); i++) { - g_autofree gchar *name = g_strdup_printf("uart[%d]", i); + crl_connect_dev_by_name(s, obj, "rpu-cluster/rpu", + map->rpu.num_cluster * map->rpu.num_core); + crl_connect_dev_by_name(s, obj, map->zdma[0].name, map->zdma[0].num_chan); + crl_connect_dev_by_name(s, obj, "uart", map->num_uart); + crl_connect_dev_by_name(s, obj, "gem", num_gem); + crl_connect_dev_by_name(s, obj, "usb", map->num_usb); - object_property_set_link(OBJECT(&s->lpd.crl), - name, OBJECT(&s->lpd.iou.uart[i]), - &error_abort); - } + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_abort); - object_property_set_link(OBJECT(&s->lpd.crl), - "usb", OBJECT(&s->lpd.iou.usb), - &error_abort); + memory_region_add_subregion(&s->mr_ps, map->crl.addr, + sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); - sysbus_realize(sbd, &error_fatal); - memory_region_add_subregion(&s->mr_ps, MM_CRL, - sysbus_mmio_get_region(sbd, 0)); - sysbus_connect_irq(sbd, 0, pic[VERSAL_CRL_IRQ]); + if (ver == VERSAL_VER_VERSAL) { + /* CRL IRQ line has been removed in versal2 */ + versal_sysbus_connect_irq(s, SYS_BUS_DEVICE(dev), 0, map->crl.irq); + } } -/* This takes the board allocated linear DDR memory and creates aliases +/* + * This takes the board allocated linear DDR memory and creates aliases * for each split DDR range/aperture on the Versal address map. */ -static void versal_map_ddr(Versal *s) +static void versal_map_ddr(Versal *s, const struct VersalDDRMap *map) { uint64_t size = memory_region_size(s->cfg.mr_ddr); - /* Describes the various split DDR access regions. */ - static const struct { - uint64_t base; - uint64_t size; - } addr_ranges[] = { - { MM_TOP_DDR, MM_TOP_DDR_SIZE }, - { MM_TOP_DDR_2, MM_TOP_DDR_2_SIZE }, - { MM_TOP_DDR_3, MM_TOP_DDR_3_SIZE }, - { MM_TOP_DDR_4, MM_TOP_DDR_4_SIZE } - }; uint64_t offset = 0; int i; - assert(ARRAY_SIZE(addr_ranges) == ARRAY_SIZE(s->noc.mr_ddr_ranges)); - for (i = 0; i < ARRAY_SIZE(addr_ranges) && size; i++) { - char *name; + for (i = 0; i < map->num_chan && size; i++) { uint64_t mapsize; + MemoryRegion *alias; + + mapsize = MIN(size, map->chan[i].size); - mapsize = size < addr_ranges[i].size ? size : addr_ranges[i].size; - name = g_strdup_printf("noc-ddr-range%d", i); /* Create the MR alias. */ - memory_region_init_alias(&s->noc.mr_ddr_ranges[i], OBJECT(s), - name, s->cfg.mr_ddr, - offset, mapsize); + alias = g_new(MemoryRegion, 1); + memory_region_init_alias(alias, OBJECT(s), "noc-ddr-range", + s->cfg.mr_ddr, offset, mapsize); /* Map it onto the NoC MR. */ - memory_region_add_subregion(&s->mr_ps, addr_ranges[i].base, - &s->noc.mr_ddr_ranges[i]); + memory_region_add_subregion(&s->mr_ps, map->chan[i].addr, alias); offset += mapsize; size -= mapsize; - g_free(name); } } +void versal_fdt_add_memory_nodes(Versal *s, uint64_t size) +{ + const struct VersalDDRMap *map = &versal_get_map(s)->ddr; + g_autofree char *node; + g_autofree uint64_t *reg; + int i; + + reg = g_new(uint64_t, map->num_chan * 2); + + for (i = 0; i < map->num_chan && size; i++) { + uint64_t mapsize; + + mapsize = MIN(size, map->chan[i].size); + + reg[i * 2] = cpu_to_be64(map->chan[i].addr); + reg[i * 2 + 1] = cpu_to_be64(mapsize); + + size -= mapsize; + } + + node = versal_fdt_add_subnode(s, "/memory", 0, "memory", sizeof("memory")); + qemu_fdt_setprop(s->cfg.fdt, node, "reg", reg, sizeof(uint64_t) * i * 2); +} + static void versal_unimp_area(Versal *s, const char *name, MemoryRegion *mr, hwaddr base, hwaddr size) @@ -875,22 +1794,12 @@ static void versal_unimp_irq_parity_imr(void *opaque, int n, int level) "is not yet implemented\n"); } -static void versal_unimp(Versal *s) +static void versal_unimp_common(Versal *s) { + DeviceState *slcr; qemu_irq gpio_in; - versal_unimp_area(s, "psm", &s->mr_ps, - MM_PSM_START, MM_PSM_END - MM_PSM_START); - versal_unimp_area(s, "crf", &s->mr_ps, - MM_FPD_CRF, MM_FPD_CRF_SIZE); - versal_unimp_area(s, "apu", &s->mr_ps, - MM_FPD_FPD_APU, MM_FPD_FPD_APU_SIZE); - versal_unimp_area(s, "crp", &s->mr_ps, - MM_PMC_CRP, MM_PMC_CRP_SIZE); - versal_unimp_area(s, "iou-scntr", &s->mr_ps, - MM_IOU_SCNTR, MM_IOU_SCNTR_SIZE); - versal_unimp_area(s, "iou-scntr-seucre", &s->mr_ps, - MM_IOU_SCNTRS, MM_IOU_SCNTRS_SIZE); + versal_unimp_area(s, "crp", &s->mr_ps, 0xf1260000, 0x10000); qdev_init_gpio_in_named(DEVICE(s), versal_unimp_sd_emmc_sel, "sd-emmc-sel-dummy", 2); @@ -899,102 +1808,353 @@ static void versal_unimp(Versal *s) qdev_init_gpio_in_named(DEVICE(s), versal_unimp_irq_parity_imr, "irq-parity-imr-dummy", 1); + slcr = DEVICE(versal_get_child(s, "pmc-iou-slcr")); gpio_in = qdev_get_gpio_in_named(DEVICE(s), "sd-emmc-sel-dummy", 0); - qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), "sd-emmc-sel", 0, - gpio_in); + qdev_connect_gpio_out_named(slcr, "sd-emmc-sel", 0, gpio_in); gpio_in = qdev_get_gpio_in_named(DEVICE(s), "sd-emmc-sel-dummy", 1); - qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), "sd-emmc-sel", 1, - gpio_in); + qdev_connect_gpio_out_named(slcr, "sd-emmc-sel", 1, gpio_in); gpio_in = qdev_get_gpio_in_named(DEVICE(s), "qspi-ospi-mux-sel-dummy", 0); - qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), - "qspi-ospi-mux-sel", 0, - gpio_in); + qdev_connect_gpio_out_named(slcr, "qspi-ospi-mux-sel", 0, gpio_in); gpio_in = qdev_get_gpio_in_named(DEVICE(s), "irq-parity-imr-dummy", 0); - qdev_connect_gpio_out_named(DEVICE(&s->pmc.iou.slcr), - SYSBUS_DEVICE_GPIO_IRQ, 0, - gpio_in); + qdev_connect_gpio_out_named(slcr, SYSBUS_DEVICE_GPIO_IRQ, 0, gpio_in); +} + +static void versal_unimp(Versal *s) +{ + versal_unimp_area(s, "psm", &s->mr_ps, 0xffc80000, 0x70000); + versal_unimp_area(s, "crf", &s->mr_ps, 0xfd1a0000, 0x140000); + versal_unimp_area(s, "apu", &s->mr_ps, 0xfd5c0000, 0x100); + versal_unimp_area(s, "iou-scntr", &s->mr_ps, 0xff130000, 0x10000); + versal_unimp_area(s, "iou-scntr-secure", &s->mr_ps, 0xff140000, 0x10000); + + versal_unimp_common(s); +} + +static void versal2_unimp(Versal *s) +{ + versal_unimp_area(s, "fpd-systmr-ctrl", &s->mr_ps, 0xec920000, 0x1000); + versal_unimp_area(s, "crf", &s->mr_ps, 0xec200000, 0x100000); + + versal_unimp_common(s); +} + +static uint32_t fdt_add_clk_node(Versal *s, const char *name, + unsigned int freq_hz) +{ + uint32_t phandle; + + phandle = qemu_fdt_alloc_phandle(s->cfg.fdt); + + qemu_fdt_add_subnode(s->cfg.fdt, name); + qemu_fdt_setprop_cell(s->cfg.fdt, name, "phandle", phandle); + qemu_fdt_setprop_cell(s->cfg.fdt, name, "clock-frequency", freq_hz); + qemu_fdt_setprop_cell(s->cfg.fdt, name, "#clock-cells", 0x0); + qemu_fdt_setprop_string(s->cfg.fdt, name, "compatible", "fixed-clock"); + qemu_fdt_setprop(s->cfg.fdt, name, "u-boot,dm-pre-reloc", NULL, 0); + + return phandle; +} + +static void versal_realize_common(Versal *s) +{ + DeviceState *slcr, *ospi; + MemoryRegion *ocm; + Object *container; + const VersalMap *map = versal_get_map(s); + size_t i; + + g_assert(s->cfg.fdt != NULL); + + s->phandle.clk_25mhz = fdt_add_clk_node(s, "/clk25", 25 * 1000 * 1000); + s->phandle.clk_125mhz = fdt_add_clk_node(s, "/clk125", 125 * 1000 * 1000); + s->phandle.gic = qemu_fdt_alloc_phandle(s->cfg.fdt); + + container = object_new(TYPE_CONTAINER); + object_property_add_child(OBJECT(s), "irq-splits", container); + object_unref(container); + + container = object_new(TYPE_CONTAINER); + object_property_add_child(OBJECT(s), "irq-or-gates", container); + object_unref(container); + + qemu_fdt_setprop_cell(s->cfg.fdt, "/", "interrupt-parent", s->phandle.gic); + qemu_fdt_setprop_cell(s->cfg.fdt, "/", "#size-cells", 0x2); + qemu_fdt_setprop_cell(s->cfg.fdt, "/", "#address-cells", 0x2); + + versal_create_cpu_cluster(s, &map->apu); + versal_create_cpu_cluster(s, &map->rpu); + + for (i = 0; i < map->num_uart; i++) { + versal_create_uart(s, &map->uart[i], i); + } + + for (i = 0; i < map->num_canfd; i++) { + versal_create_canfd(s, &map->canfd[i], s->cfg.canbus[i]); + } + + for (i = 0; i < map->num_sdhci; i++) { + versal_create_sdhci(s, &map->sdhci[i]); + } + + for (i = 0; i < map->num_gem; i++) { + versal_create_gem(s, &map->gem[i]); + /* + * Create fdt node in reverse order to keep backward compatibility with + * previous versions of the generated FDT. This affects Linux kernel + * interface naming order when persistent naming scheme is not in use. + */ + versal_create_gem_fdt(s, &map->gem[map->num_gem - 1 - i]); + } + + for (i = 0; i < map->num_zdma; i++) { + versal_create_zdma(s, &map->zdma[i]); + } + + versal_create_xrams(s, &map->xram); + + for (i = 0; i < map->num_usb; i++) { + versal_create_usb(s, &map->usb[i]); + } + + versal_create_efuse(s, &map->efuse); + ospi = versal_create_ospi(s, &map->ospi); + slcr = versal_create_pmc_iou_slcr(s, &map->pmc_iou_slcr); + + qdev_connect_gpio_out_named(slcr, "ospi-mux-sel", 0, + qdev_get_gpio_in_named(ospi, + "ospi-mux-sel", 0)); + + versal_create_bbram(s, &map->bbram); + versal_create_trng(s, &map->trng); + versal_create_rtc(s, &map->rtc); + versal_create_cfu(s, &map->cfu); + versal_create_crl(s); + + versal_map_ddr(s, &map->ddr); + + /* Create the On Chip Memory (OCM). */ + ocm = g_new(MemoryRegion, 1); + memory_region_init_ram(ocm, OBJECT(s), "ocm", map->ocm.size, &error_fatal); + memory_region_add_subregion_overlap(&s->mr_ps, map->ocm.addr, ocm, 0); } static void versal_realize(DeviceState *dev, Error **errp) { - Versal *s = XLNX_VERSAL(dev); - qemu_irq pic[XLNX_VERSAL_NR_IRQS]; - - versal_create_apu_cpus(s); - versal_create_apu_gic(s, pic); - versal_create_rpu_cpus(s); - versal_create_uarts(s, pic); - versal_create_canfds(s, pic); - versal_create_usbs(s, pic); - versal_create_gems(s, pic); - versal_create_admas(s, pic); - versal_create_sds(s, pic); - versal_create_pmc_apb_irq_orgate(s, pic); - versal_create_rtc(s, pic); - versal_create_trng(s, pic); - versal_create_xrams(s, pic); - versal_create_bbram(s, pic); - versal_create_efuse(s, pic); - versal_create_pmc_iou_slcr(s, pic); - versal_create_ospi(s, pic); - versal_create_crl(s, pic); - versal_create_cfu(s, pic); - versal_map_ddr(s); + Versal *s = XLNX_VERSAL_BASE(dev); + + versal_realize_common(s); versal_unimp(s); +} - /* Create the On Chip Memory (OCM). */ - memory_region_init_ram(&s->lpd.mr_ocm, OBJECT(s), "ocm", - MM_OCM_SIZE, &error_fatal); +static void versal2_realize(DeviceState *dev, Error **errp) +{ + Versal *s = XLNX_VERSAL_BASE(dev); + + versal_realize_common(s); + versal2_unimp(s); +} + +DeviceState *versal_get_boot_cpu(Versal *s) +{ + return DEVICE(versal_get_child_idx(s, "apu-cluster/apu", 0)); +} + +void versal_sdhci_plug_card(Versal *s, int sd_idx, BlockBackend *blk) +{ + DeviceState *sdhci, *card; + + sdhci = DEVICE(versal_get_child_idx(s, "sdhci", sd_idx)); + + if (sdhci == NULL) { + return; + } + + card = qdev_new(TYPE_SD_CARD); + object_property_add_child(OBJECT(sdhci), "card[*]", OBJECT(card)); + qdev_prop_set_drive_err(card, "drive", blk, &error_fatal); + qdev_realize_and_unref(card, qdev_get_child_bus(DEVICE(sdhci), "sd-bus"), + &error_fatal); +} + +void versal_efuse_attach_drive(Versal *s, BlockBackend *blk) +{ + DeviceState *efuse; + + efuse = DEVICE(versal_get_child(s, "efuse")); + + if (efuse == NULL) { + return; + } + + qdev_prop_set_drive(efuse, "drive", blk); +} + +void versal_bbram_attach_drive(Versal *s, BlockBackend *blk) +{ + DeviceState *bbram; + + bbram = DEVICE(versal_get_child(s, "bbram")); + + if (bbram == NULL) { + return; + } + + qdev_prop_set_drive(bbram, "drive", blk); +} + +void versal_ospi_create_flash(Versal *s, int flash_idx, const char *flash_mdl, + BlockBackend *blk) +{ + BusState *spi_bus; + DeviceState *flash, *ospi; + qemu_irq cs_line; + + ospi = DEVICE(versal_get_child(s, "ospi")); + spi_bus = qdev_get_child_bus(ospi, "spi0"); + + flash = qdev_new(flash_mdl); - memory_region_add_subregion_overlap(&s->mr_ps, MM_OCM, &s->lpd.mr_ocm, 0); - memory_region_add_subregion_overlap(&s->fpd.apu.mr, 0, &s->mr_ps, 0); - memory_region_add_subregion_overlap(&s->lpd.rpu.mr, 0, - &s->lpd.rpu.mr_ps_alias, 0); + if (blk) { + qdev_prop_set_drive_err(flash, "drive", blk, &error_fatal); + } + qdev_prop_set_uint8(flash, "cs", flash_idx); + qdev_realize_and_unref(flash, spi_bus, &error_fatal); + + cs_line = qdev_get_gpio_in_named(flash, SSI_GPIO_CS, 0); + + sysbus_connect_irq(SYS_BUS_DEVICE(ospi), + flash_idx + 1, cs_line); +} + +qemu_irq versal_get_reserved_irq(Versal *s, int idx, int *dtb_idx) +{ + const VersalMap *map = versal_get_map(s); + + g_assert(idx < map->reserved.irq_num); + + *dtb_idx = map->reserved.irq_start + idx; + return versal_get_irq(s, *dtb_idx); } -static void versal_init(Object *obj) +hwaddr versal_get_reserved_mmio_addr(Versal *s) { - Versal *s = XLNX_VERSAL(obj); + const VersalMap *map = versal_get_map(s); + + return map->reserved.mmio_start; +} + +int versal_get_num_cpu(VersalVersion version) +{ + const VersalMap *map = VERSION_TO_MAP[version]; + + return map->apu.num_cluster * map->apu.num_core + + map->rpu.num_cluster * map->rpu.num_core; +} + +int versal_get_num_can(VersalVersion version) +{ + const VersalMap *map = VERSION_TO_MAP[version]; + + return map->num_canfd; +} + +int versal_get_num_sdhci(VersalVersion version) +{ + const VersalMap *map = VERSION_TO_MAP[version]; + + return map->num_sdhci; +} + +static void versal_base_init(Object *obj) +{ + Versal *s = XLNX_VERSAL_BASE(obj); + size_t i, num_can; - memory_region_init(&s->fpd.apu.mr, obj, "mr-apu", UINT64_MAX); - memory_region_init(&s->lpd.rpu.mr, obj, "mr-rpu", UINT64_MAX); memory_region_init(&s->mr_ps, obj, "mr-ps-switch", UINT64_MAX); - memory_region_init_alias(&s->lpd.rpu.mr_ps_alias, OBJECT(s), - "mr-rpu-ps-alias", &s->mr_ps, 0, UINT64_MAX); + s->intc = g_array_new(false, false, sizeof(DeviceState *)); + + num_can = versal_get_map(s)->num_canfd; + s->cfg.canbus = g_new0(CanBusState *, num_can); + + for (i = 0; i < num_can; i++) { + g_autofree char *prop_name = g_strdup_printf("canbus%zu", i); + + object_property_add_link(obj, prop_name, TYPE_CAN_BUS, + (Object **) &s->cfg.canbus[i], + object_property_allow_set_link, 0); + } +} + +static void versal_base_finalize(Object *obj) +{ + Versal *s = XLNX_VERSAL_BASE(obj); + + g_array_free(s->intc, true); + g_free(s->cfg.canbus); } static const Property versal_properties[] = { DEFINE_PROP_LINK("ddr", Versal, cfg.mr_ddr, TYPE_MEMORY_REGION, MemoryRegion *), - DEFINE_PROP_LINK("canbus0", Versal, lpd.iou.canbus[0], - TYPE_CAN_BUS, CanBusState *), - DEFINE_PROP_LINK("canbus1", Versal, lpd.iou.canbus[1], - TYPE_CAN_BUS, CanBusState *), }; -static void versal_class_init(ObjectClass *klass, const void *data) +static void versal_base_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->realize = versal_realize; device_class_set_props(dc, versal_properties); /* No VMSD since we haven't got any top-level SoC state to save. */ } -static const TypeInfo versal_info = { - .name = TYPE_XLNX_VERSAL, +static void versal_class_init(ObjectClass *klass, const void *data) +{ + VersalClass *vc = XLNX_VERSAL_BASE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + vc->version = VERSAL_VER_VERSAL; + dc->realize = versal_realize; +} + +static void versal2_class_init(ObjectClass *klass, const void *data) +{ + VersalClass *vc = XLNX_VERSAL_BASE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + vc->version = VERSAL_VER_VERSAL2; + dc->realize = versal2_realize; +} + +static const TypeInfo versal_base_info = { + .name = TYPE_XLNX_VERSAL_BASE, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(Versal), - .instance_init = versal_init, + .instance_init = versal_base_init, + .instance_finalize = versal_base_finalize, + .class_init = versal_base_class_init, + .class_size = sizeof(VersalClass), + .abstract = true, +}; + +static const TypeInfo versal_info = { + .name = TYPE_XLNX_VERSAL, + .parent = TYPE_XLNX_VERSAL_BASE, .class_init = versal_class_init, }; +static const TypeInfo versal2_info = { + .name = TYPE_XLNX_VERSAL2, + .parent = TYPE_XLNX_VERSAL_BASE, + .class_init = versal2_class_init, +}; + static void versal_register_types(void) { + type_register_static(&versal_base_info); type_register_static(&versal_info); + type_register_static(&versal2_info); } type_init(versal_register_types); diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index ec96a46eec3ce..ffed6e5126ed0 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -26,8 +26,6 @@ #include "target/arm/cpu-qom.h" #include "target/arm/gtimer.h" -#define GIC_NUM_SPI_INTR 160 - #define ARM_PHYS_TIMER_PPI 30 #define ARM_VIRT_TIMER_PPI 27 #define ARM_HYP_TIMER_PPI 26 @@ -206,17 +204,26 @@ static const XlnxZynqMPGICRegion xlnx_zynqmp_gic_regions[] = { static inline int arm_gic_ppi_index(int cpu_nr, int ppi_index) { - return GIC_NUM_SPI_INTR + cpu_nr * GIC_INTERNAL + ppi_index; + return XLNX_ZYNQMP_GIC_NUM_SPI_INTR + cpu_nr * GIC_INTERNAL + ppi_index; +} + +static unsigned int xlnx_zynqmp_get_rpu_number(MachineState *ms) +{ + /* + * RPUs will be created only if "-smp" is higher than the maximum + * of APUs. Round it up to 0 to avoid dealing with negative values. + */ + return MAX(0, MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS), + XLNX_ZYNQMP_NUM_RPU_CPUS)); } static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, const char *boot_cpu, Error **errp) { int i; - int num_rpus = MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS), - XLNX_ZYNQMP_NUM_RPU_CPUS); + int num_rpus = xlnx_zynqmp_get_rpu_number(ms); - if (num_rpus <= 0) { + if (!num_rpus) { /* Don't create rpu-cluster object if there's nothing to put in it */ return; } @@ -377,6 +384,7 @@ static void xlnx_zynqmp_init(Object *obj) XlnxZynqMPState *s = XLNX_ZYNQMP(obj); int i; int num_apus = MIN(ms->smp.cpus, XLNX_ZYNQMP_NUM_APU_CPUS); + int num_rpus = xlnx_zynqmp_get_rpu_number(ms); object_initialize_child(obj, "apu-cluster", &s->apu_cluster, TYPE_CPU_CLUSTER); @@ -390,6 +398,12 @@ static void xlnx_zynqmp_init(Object *obj) object_initialize_child(obj, "gic", &s->gic, gic_class_name()); + if (num_rpus) { + /* Do not create the rpu_gic if we don't have rpus */ + object_initialize_child(obj, "rpu_gic", &s->rpu_gic, + gic_class_name()); + } + for (i = 0; i < XLNX_ZYNQMP_NUM_GEMS; i++) { object_initialize_child(obj, "gem[*]", &s->gem[i], TYPE_CADENCE_GEM); object_initialize_child(obj, "gem-irq-orgate[*]", @@ -439,6 +453,15 @@ static void xlnx_zynqmp_init(Object *obj) object_initialize_child(obj, "qspi-irq-orgate", &s->qspi_irq_orgate, TYPE_OR_IRQ); + if (num_rpus) { + for (i = 0; i < ARRAY_SIZE(s->splitter); i++) { + g_autofree char *name = g_strdup_printf("irq-splitter%d", i); + object_initialize_child(obj, name, &s->splitter[i], TYPE_SPLIT_IRQ); + } + } + + + for (i = 0; i < XLNX_ZYNQMP_NUM_USB; i++) { object_initialize_child(obj, "usb[*]", &s->usb[i], TYPE_USB_DWC3); } @@ -452,9 +475,10 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) uint8_t i; uint64_t ram_size; int num_apus = MIN(ms->smp.cpus, XLNX_ZYNQMP_NUM_APU_CPUS); + int num_rpus = xlnx_zynqmp_get_rpu_number(ms); const char *boot_cpu = s->boot_cpu ? s->boot_cpu : "apu-cpu[0]"; ram_addr_t ddr_low_size, ddr_high_size; - qemu_irq gic_spi[GIC_NUM_SPI_INTR]; + qemu_irq gic_spi[XLNX_ZYNQMP_GIC_NUM_SPI_INTR]; Error *err = NULL; ram_size = memory_region_size(s->ddr_ram); @@ -502,13 +526,22 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) g_free(ocm_name); } - qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", GIC_NUM_SPI_INTR + 32); + qdev_prop_set_uint32(DEVICE(&s->gic), "num-irq", + XLNX_ZYNQMP_GIC_NUM_SPI_INTR + 32); qdev_prop_set_uint32(DEVICE(&s->gic), "revision", 2); qdev_prop_set_uint32(DEVICE(&s->gic), "num-cpu", num_apus); qdev_prop_set_bit(DEVICE(&s->gic), "has-security-extensions", s->secure); qdev_prop_set_bit(DEVICE(&s->gic), "has-virtualization-extensions", s->virt); + if (num_rpus) { + qdev_prop_set_uint32(DEVICE(&s->rpu_gic), "num-irq", + XLNX_ZYNQMP_GIC_NUM_SPI_INTR + 32); + qdev_prop_set_uint32(DEVICE(&s->rpu_gic), "revision", 1); + qdev_prop_set_uint32(DEVICE(&s->rpu_gic), "num-cpu", num_rpus); + qdev_prop_set_uint32(DEVICE(&s->rpu_gic), "first-cpu-index", 4); + } + qdev_realize(DEVICE(&s->apu_cluster), NULL, &error_fatal); /* Realize APUs before realizing the GIC. KVM requires this. */ @@ -608,13 +641,63 @@ static void xlnx_zynqmp_realize(DeviceState *dev, Error **errp) return; } + if (num_rpus) { + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rpu_gic), errp)) { + return; + } + + for (i = 0; i < num_rpus; i++) { + qemu_irq irq; + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rpu_gic), i + 1, + GIC_BASE_ADDR + i * 0x1000); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rpu_gic), i, + qdev_get_gpio_in(DEVICE(&s->rpu_cpu[i]), + ARM_CPU_IRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rpu_gic), i + num_rpus, + qdev_get_gpio_in(DEVICE(&s->rpu_cpu[i]), + ARM_CPU_FIQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rpu_gic), i + num_rpus * 2, + qdev_get_gpio_in(DEVICE(&s->rpu_cpu[i]), + ARM_CPU_VIRQ)); + sysbus_connect_irq(SYS_BUS_DEVICE(&s->rpu_gic), i + num_rpus * 3, + qdev_get_gpio_in(DEVICE(&s->rpu_cpu[i]), + ARM_CPU_VFIQ)); + irq = qdev_get_gpio_in(DEVICE(&s->rpu_gic), + arm_gic_ppi_index(i, ARM_PHYS_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->rpu_cpu[i]), GTIMER_PHYS, irq); + irq = qdev_get_gpio_in(DEVICE(&s->rpu_gic), + arm_gic_ppi_index(i, ARM_VIRT_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->rpu_cpu[i]), GTIMER_VIRT, irq); + irq = qdev_get_gpio_in(DEVICE(&s->rpu_gic), + arm_gic_ppi_index(i, ARM_HYP_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->rpu_cpu[i]), GTIMER_HYP, irq); + irq = qdev_get_gpio_in(DEVICE(&s->rpu_gic), + arm_gic_ppi_index(i, ARM_SEC_TIMER_PPI)); + qdev_connect_gpio_out(DEVICE(&s->rpu_cpu[i]), GTIMER_SEC, irq); + } + + sysbus_mmio_map(SYS_BUS_DEVICE(&s->rpu_gic), 0, GIC_BASE_ADDR); + } + if (!s->boot_cpu_ptr) { error_setg(errp, "ZynqMP Boot cpu %s not found", boot_cpu); return; } - for (i = 0; i < GIC_NUM_SPI_INTR; i++) { - gic_spi[i] = qdev_get_gpio_in(DEVICE(&s->gic), i); + for (i = 0; i < XLNX_ZYNQMP_GIC_NUM_SPI_INTR; i++) { + if (num_rpus) { + DeviceState *splitter = DEVICE(&s->splitter[i]); + qdev_prop_set_uint16(splitter, "num-lines", 2); + qdev_realize(splitter, NULL, &error_abort); + gic_spi[i] = qdev_get_gpio_in(splitter, 0); + qdev_connect_gpio_out(splitter, 0, + qdev_get_gpio_in(DEVICE(&s->gic), i)); + qdev_connect_gpio_out(splitter, 1, + qdev_get_gpio_in(DEVICE(&s->rpu_gic), i)); + } else { + gic_spi[i] = qdev_get_gpio_in(DEVICE(&s->gic), i); + } } for (i = 0; i < XLNX_ZYNQMP_NUM_GEMS; i++) { diff --git a/hw/audio/ac97.c b/hw/audio/ac97.c index eb7a847080da9..828333b66af40 100644 --- a/hw/audio/ac97.c +++ b/hw/audio/ac97.c @@ -26,6 +26,7 @@ #include "qemu/module.h" #include "system/dma.h" #include "qom/object.h" +#include "qemu/error-report.h" #include "ac97.h" #define SOFT_VOLUME @@ -141,11 +142,12 @@ enum { BUP_LAST = 2 }; -#ifdef DEBUG_AC97 -#define dolog(...) AUD_log("ac97", __VA_ARGS__) -#else -#define dolog(...) -#endif +#define DEBUG_AC97 0 +#define dolog(fmt, ...) do { \ + if (DEBUG_AC97) { \ + error_report("ac97: " fmt, ##__VA_ARGS__); \ + } \ + } while (0) #define MKREGS(prefix, start) \ enum { \ @@ -190,7 +192,7 @@ static void fetch_bd(AC97LinkState *s, AC97BusMasterRegs *r) r->bd.addr = le32_to_cpu(*(uint32_t *) &b[0]) & ~3; r->bd.ctl_len = le32_to_cpu(*(uint32_t *) &b[4]); r->picb = r->bd.ctl_len & 0xffff; - dolog("bd %2d addr=0x%x ctl=0x%06x len=0x%x(%d bytes)\n", + dolog("bd %2d addr=0x%x ctl=0x%06x len=0x%x(%d bytes)", r->civ, r->bd.addr, r->bd.ctl_len >> 16, r->bd.ctl_len & 0xffff, (r->bd.ctl_len & 0xffff) << 1); } @@ -222,7 +224,7 @@ static void update_sr(AC97LinkState *s, AC97BusMasterRegs *r, uint32_t new_sr) r->sr = new_sr; - dolog("IOC%d LVB%d sr=0x%x event=%d level=%d\n", + dolog("IOC%d LVB%d sr=0x%x event=%d level=%d", r->sr & SR_BCIS, r->sr & SR_LVBCI, r->sr, event, level); if (!event) { @@ -231,11 +233,11 @@ static void update_sr(AC97LinkState *s, AC97BusMasterRegs *r, uint32_t new_sr) if (level) { s->glob_sta |= masks[r - s->bm_regs]; - dolog("set irq level=1\n"); + dolog("set irq level=1"); pci_irq_assert(&s->dev); } else { s->glob_sta &= ~masks[r - s->bm_regs]; - dolog("set irq level=0\n"); + dolog("set irq level=0"); pci_irq_deassert(&s->dev); } } @@ -256,14 +258,14 @@ static void voice_set_active(AC97LinkState *s, int bm_index, int on) break; default: - AUD_log("ac97", "invalid bm_index(%d) in voice_set_active", bm_index); + error_report("ac97: invalid bm_index(%d) in voice_set_active", bm_index); break; } } static void reset_bm_regs(AC97LinkState *s, AC97BusMasterRegs *r) { - dolog("reset_bm_regs\n"); + dolog("reset_bm_regs"); r->bdbar = 0; r->civ = 0; r->lvi = 0; @@ -281,7 +283,7 @@ static void reset_bm_regs(AC97LinkState *s, AC97BusMasterRegs *r) static void mixer_store(AC97LinkState *s, uint32_t i, uint16_t v) { if (i + 2 > sizeof(s->mixer_data)) { - dolog("mixer_store: index %d out of bounds %zd\n", + dolog("mixer_store: index %d out of bounds %zd", i, sizeof(s->mixer_data)); return; } @@ -295,7 +297,7 @@ static uint16_t mixer_load(AC97LinkState *s, uint32_t i) uint16_t val = 0xffff; if (i + 2 > sizeof(s->mixer_data)) { - dolog("mixer_load: index %d out of bounds %zd\n", + dolog("mixer_load: index %d out of bounds %zd", i, sizeof(s->mixer_data)); } else { val = s->mixer_data[i + 0] | (s->mixer_data[i + 1] << 8); @@ -460,7 +462,7 @@ static void mixer_reset(AC97LinkState *s) { uint8_t active[LAST_INDEX]; - dolog("mixer_reset\n"); + dolog("mixer_reset"); memset(s->mixer_data, 0, sizeof(s->mixer_data)); memset(active, 0, sizeof(active)); mixer_store(s, AC97_Reset, 0x0000); /* 6940 */ @@ -508,7 +510,7 @@ static void mixer_reset(AC97LinkState *s) static uint32_t nam_readb(void *opaque, uint32_t addr) { AC97LinkState *s = opaque; - dolog("U nam readb 0x%x\n", addr); + dolog("U nam readb 0x%x", addr); s->cas = 0; return ~0U; } @@ -523,7 +525,7 @@ static uint32_t nam_readw(void *opaque, uint32_t addr) static uint32_t nam_readl(void *opaque, uint32_t addr) { AC97LinkState *s = opaque; - dolog("U nam readl 0x%x\n", addr); + dolog("U nam readl 0x%x", addr); s->cas = 0; return ~0U; } @@ -535,7 +537,7 @@ static uint32_t nam_readl(void *opaque, uint32_t addr) static void nam_writeb(void *opaque, uint32_t addr, uint32_t val) { AC97LinkState *s = opaque; - dolog("U nam writeb 0x%x <- 0x%x\n", addr, val); + dolog("U nam writeb 0x%x <- 0x%x", addr, val); s->cas = 0; } @@ -563,10 +565,10 @@ static void nam_writew(void *opaque, uint32_t addr, uint32_t val) break; case AC97_Vendor_ID1: case AC97_Vendor_ID2: - dolog("Attempt to write vendor ID to 0x%x\n", val); + dolog("Attempt to write vendor ID to 0x%x", val); break; case AC97_Extended_Audio_ID: - dolog("Attempt to write extended audio ID to 0x%x\n", val); + dolog("Attempt to write extended audio ID to 0x%x", val); break; case AC97_Extended_Audio_Ctrl_Stat: if (!(val & EACS_VRA)) { @@ -579,36 +581,36 @@ static void nam_writew(void *opaque, uint32_t addr, uint32_t val) mixer_store(s, AC97_MIC_ADC_Rate, 0xbb80); open_voice(s, MC_INDEX, 48000); } - dolog("Setting extended audio control to 0x%x\n", val); + dolog("Setting extended audio control to 0x%x", val); mixer_store(s, AC97_Extended_Audio_Ctrl_Stat, val); break; case AC97_PCM_Front_DAC_Rate: if (mixer_load(s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) { mixer_store(s, addr, val); - dolog("Set front DAC rate to %d\n", val); + dolog("Set front DAC rate to %d", val); open_voice(s, PO_INDEX, val); } else { - dolog("Attempt to set front DAC rate to %d, but VRA is not set\n", + dolog("Attempt to set front DAC rate to %d, but VRA is not set", val); } break; case AC97_MIC_ADC_Rate: if (mixer_load(s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRM) { mixer_store(s, addr, val); - dolog("Set MIC ADC rate to %d\n", val); + dolog("Set MIC ADC rate to %d", val); open_voice(s, MC_INDEX, val); } else { - dolog("Attempt to set MIC ADC rate to %d, but VRM is not set\n", + dolog("Attempt to set MIC ADC rate to %d, but VRM is not set", val); } break; case AC97_PCM_LR_ADC_Rate: if (mixer_load(s, AC97_Extended_Audio_Ctrl_Stat) & EACS_VRA) { mixer_store(s, addr, val); - dolog("Set front LR ADC rate to %d\n", val); + dolog("Set front LR ADC rate to %d", val); open_voice(s, PI_INDEX, val); } else { - dolog("Attempt to set LR ADC rate to %d, but VRA is not set\n", + dolog("Attempt to set LR ADC rate to %d, but VRA is not set", val); } break; @@ -630,7 +632,7 @@ static void nam_writew(void *opaque, uint32_t addr, uint32_t val) /* None of the features in these regs are emulated, so they are RO */ break; default: - dolog("U nam writew 0x%x <- 0x%x\n", addr, val); + dolog("U nam writew 0x%x <- 0x%x", addr, val); mixer_store(s, addr, val); break; } @@ -639,7 +641,7 @@ static void nam_writew(void *opaque, uint32_t addr, uint32_t val) static void nam_writel(void *opaque, uint32_t addr, uint32_t val) { AC97LinkState *s = opaque; - dolog("U nam writel 0x%x <- 0x%x\n", addr, val); + dolog("U nam writel 0x%x <- 0x%x", addr, val); s->cas = 0; } @@ -655,7 +657,7 @@ static uint32_t nabm_readb(void *opaque, uint32_t addr) switch (addr) { case CAS: - dolog("CAS %d\n", s->cas); + dolog("CAS %d", s->cas); val = s->cas; s->cas = 1; break; @@ -664,38 +666,38 @@ static uint32_t nabm_readb(void *opaque, uint32_t addr) case MC_CIV: r = &s->bm_regs[GET_BM(addr)]; val = r->civ; - dolog("CIV[%d] -> 0x%x\n", GET_BM(addr), val); + dolog("CIV[%d] -> 0x%x", GET_BM(addr), val); break; case PI_LVI: case PO_LVI: case MC_LVI: r = &s->bm_regs[GET_BM(addr)]; val = r->lvi; - dolog("LVI[%d] -> 0x%x\n", GET_BM(addr), val); + dolog("LVI[%d] -> 0x%x", GET_BM(addr), val); break; case PI_PIV: case PO_PIV: case MC_PIV: r = &s->bm_regs[GET_BM(addr)]; val = r->piv; - dolog("PIV[%d] -> 0x%x\n", GET_BM(addr), val); + dolog("PIV[%d] -> 0x%x", GET_BM(addr), val); break; case PI_CR: case PO_CR: case MC_CR: r = &s->bm_regs[GET_BM(addr)]; val = r->cr; - dolog("CR[%d] -> 0x%x\n", GET_BM(addr), val); + dolog("CR[%d] -> 0x%x", GET_BM(addr), val); break; case PI_SR: case PO_SR: case MC_SR: r = &s->bm_regs[GET_BM(addr)]; val = r->sr & 0xff; - dolog("SRb[%d] -> 0x%x\n", GET_BM(addr), val); + dolog("SRb[%d] -> 0x%x", GET_BM(addr), val); break; default: - dolog("U nabm readb 0x%x -> 0x%x\n", addr, val); + dolog("U nabm readb 0x%x -> 0x%x", addr, val); break; } return val; @@ -713,17 +715,17 @@ static uint32_t nabm_readw(void *opaque, uint32_t addr) case MC_SR: r = &s->bm_regs[GET_BM(addr)]; val = r->sr; - dolog("SR[%d] -> 0x%x\n", GET_BM(addr), val); + dolog("SR[%d] -> 0x%x", GET_BM(addr), val); break; case PI_PICB: case PO_PICB: case MC_PICB: r = &s->bm_regs[GET_BM(addr)]; val = r->picb; - dolog("PICB[%d] -> 0x%x\n", GET_BM(addr), val); + dolog("PICB[%d] -> 0x%x", GET_BM(addr), val); break; default: - dolog("U nabm readw 0x%x -> 0x%x\n", addr, val); + dolog("U nabm readw 0x%x -> 0x%x", addr, val); break; } return val; @@ -741,14 +743,14 @@ static uint32_t nabm_readl(void *opaque, uint32_t addr) case MC_BDBAR: r = &s->bm_regs[GET_BM(addr)]; val = r->bdbar; - dolog("BMADDR[%d] -> 0x%x\n", GET_BM(addr), val); + dolog("BMADDR[%d] -> 0x%x", GET_BM(addr), val); break; case PI_CIV: case PO_CIV: case MC_CIV: r = &s->bm_regs[GET_BM(addr)]; val = r->civ | (r->lvi << 8) | (r->sr << 16); - dolog("CIV LVI SR[%d] -> 0x%x, 0x%x, 0x%x\n", GET_BM(addr), + dolog("CIV LVI SR[%d] -> 0x%x, 0x%x, 0x%x", GET_BM(addr), r->civ, r->lvi, r->sr); break; case PI_PICB: @@ -756,19 +758,19 @@ static uint32_t nabm_readl(void *opaque, uint32_t addr) case MC_PICB: r = &s->bm_regs[GET_BM(addr)]; val = r->picb | (r->piv << 16) | (r->cr << 24); - dolog("PICB PIV CR[%d] -> 0x%x 0x%x 0x%x 0x%x\n", GET_BM(addr), + dolog("PICB PIV CR[%d] -> 0x%x 0x%x 0x%x 0x%x", GET_BM(addr), val, r->picb, r->piv, r->cr); break; case GLOB_CNT: val = s->glob_cnt; - dolog("glob_cnt -> 0x%x\n", val); + dolog("glob_cnt -> 0x%x", val); break; case GLOB_STA: val = s->glob_sta | GS_S0CR; - dolog("glob_sta -> 0x%x\n", val); + dolog("glob_sta -> 0x%x", val); break; default: - dolog("U nabm readl 0x%x -> 0x%x\n", addr, val); + dolog("U nabm readl 0x%x -> 0x%x", addr, val); break; } return val; @@ -795,7 +797,7 @@ static void nabm_writeb(void *opaque, uint32_t addr, uint32_t val) fetch_bd(s, r); } r->lvi = val % 32; - dolog("LVI[%d] <- 0x%x\n", GET_BM(addr), val); + dolog("LVI[%d] <- 0x%x", GET_BM(addr), val); break; case PI_CR: case PO_CR: @@ -816,7 +818,7 @@ static void nabm_writeb(void *opaque, uint32_t addr, uint32_t val) voice_set_active(s, r - s->bm_regs, 1); } } - dolog("CR[%d] <- 0x%x (cr 0x%x)\n", GET_BM(addr), val, r->cr); + dolog("CR[%d] <- 0x%x (cr 0x%x)", GET_BM(addr), val, r->cr); break; case PI_SR: case PO_SR: @@ -824,10 +826,10 @@ static void nabm_writeb(void *opaque, uint32_t addr, uint32_t val) r = &s->bm_regs[GET_BM(addr)]; r->sr |= val & ~(SR_RO_MASK | SR_WCLEAR_MASK); update_sr(s, r, r->sr & ~(val & SR_WCLEAR_MASK)); - dolog("SR[%d] <- 0x%x (sr 0x%x)\n", GET_BM(addr), val, r->sr); + dolog("SR[%d] <- 0x%x (sr 0x%x)", GET_BM(addr), val, r->sr); break; default: - dolog("U nabm writeb 0x%x <- 0x%x\n", addr, val); + dolog("U nabm writeb 0x%x <- 0x%x", addr, val); break; } } @@ -844,10 +846,10 @@ static void nabm_writew(void *opaque, uint32_t addr, uint32_t val) r = &s->bm_regs[GET_BM(addr)]; r->sr |= val & ~(SR_RO_MASK | SR_WCLEAR_MASK); update_sr(s, r, r->sr & ~(val & SR_WCLEAR_MASK)); - dolog("SR[%d] <- 0x%x (sr 0x%x)\n", GET_BM(addr), val, r->sr); + dolog("SR[%d] <- 0x%x (sr 0x%x)", GET_BM(addr), val, r->sr); break; default: - dolog("U nabm writew 0x%x <- 0x%x\n", addr, val); + dolog("U nabm writew 0x%x <- 0x%x", addr, val); break; } } @@ -863,22 +865,22 @@ static void nabm_writel(void *opaque, uint32_t addr, uint32_t val) case MC_BDBAR: r = &s->bm_regs[GET_BM(addr)]; r->bdbar = val & ~3; - dolog("BDBAR[%d] <- 0x%x (bdbar 0x%x)\n", GET_BM(addr), val, r->bdbar); + dolog("BDBAR[%d] <- 0x%x (bdbar 0x%x)", GET_BM(addr), val, r->bdbar); break; case GLOB_CNT: /* TODO: Handle WR or CR being set (warm/cold reset requests) */ if (!(val & (GC_WR | GC_CR))) { s->glob_cnt = val & GC_VALID_MASK; } - dolog("glob_cnt <- 0x%x (glob_cnt 0x%x)\n", val, s->glob_cnt); + dolog("glob_cnt <- 0x%x (glob_cnt 0x%x)", val, s->glob_cnt); break; case GLOB_STA: s->glob_sta &= ~(val & GS_WCLEAR_MASK); s->glob_sta |= (val & ~(GS_WCLEAR_MASK | GS_RO_MASK)) & GS_VALID_MASK; - dolog("glob_sta <- 0x%x (glob_sta 0x%x)\n", val, s->glob_sta); + dolog("glob_sta <- 0x%x (glob_sta 0x%x)", val, s->glob_sta); break; default: - dolog("U nabm writel 0x%x <- 0x%x\n", addr, val); + dolog("U nabm writel 0x%x <- 0x%x", addr, val); break; } } @@ -903,7 +905,7 @@ static int write_audio(AC97LinkState *s, AC97BusMasterRegs *r, to_copy = MIN(temp, sizeof(tmpbuf)); pci_dma_read(&s->dev, addr, tmpbuf, to_copy); copied = AUD_write(s->voice_po, tmpbuf, to_copy); - dolog("write_audio max=%x to_copy=%x copied=%x\n", + dolog("write_audio max=%x to_copy=%x copied=%x", max, to_copy, copied); if (!copied) { *stop = 1; @@ -916,7 +918,7 @@ static int write_audio(AC97LinkState *s, AC97BusMasterRegs *r, if (!temp) { if (to_copy < 4) { - dolog("whoops\n"); + dolog("whoops"); s->last_samp = 0; } else { s->last_samp = *(uint32_t *)&tmpbuf[to_copy - 4]; @@ -929,7 +931,7 @@ static int write_audio(AC97LinkState *s, AC97BusMasterRegs *r, static void write_bup(AC97LinkState *s, int elapsed) { - dolog("write_bup\n"); + dolog("write_bup"); if (!(s->bup_flag & BUP_SET)) { if (s->bup_flag & BUP_LAST) { int i; @@ -997,7 +999,7 @@ static void transfer_audio(AC97LinkState *s, int index, int elapsed) int stop = 0; if (s->invalid_freq[index]) { - AUD_log("ac97", "attempt to use voice %d with invalid frequency %d\n", + error_report("ac97: attempt to use voice %d with invalid frequency %d", index, s->invalid_freq[index]); return; } @@ -1017,12 +1019,12 @@ static void transfer_audio(AC97LinkState *s, int index, int elapsed) int temp; if (!r->bd_valid) { - dolog("invalid bd\n"); + dolog("invalid bd"); fetch_bd(s, r); } if (!r->picb) { - dolog("fresh bd %d is empty 0x%x 0x%x\n", + dolog("fresh bd %d is empty 0x%x 0x%x", r->civ, r->bd.addr, r->bd.ctl_len); if (r->civ == r->lvi) { r->sr |= SR_DCH; /* CELV? */ @@ -1059,7 +1061,7 @@ static void transfer_audio(AC97LinkState *s, int index, int elapsed) } if (r->civ == r->lvi) { - dolog("Underrun civ (%d) == lvi (%d)\n", r->civ, r->lvi); + dolog("Underrun civ (%d) == lvi (%d)", r->civ, r->lvi); new_sr |= SR_LVBCI | SR_DCH | SR_CELV; stop = 1; diff --git a/hw/audio/adlib.c b/hw/audio/adlib.c index 1f29a7e319df9..772435f04cd55 100644 --- a/hw/audio/adlib.c +++ b/hw/audio/adlib.c @@ -29,24 +29,24 @@ #include "audio/audio.h" #include "hw/isa/isa.h" #include "hw/qdev-properties.h" +#include "qemu/error-report.h" #include "qom/object.h" -//#define DEBUG +#define DEBUG 0 #define ADLIB_KILL_TIMERS 1 #define ADLIB_DESC "Yamaha YM3812 (OPL2)" -#ifdef DEBUG +#if DEBUG #include "qemu/timer.h" #endif -#define dolog(...) AUD_log ("adlib", __VA_ARGS__) -#ifdef DEBUG -#define ldebug(...) dolog (__VA_ARGS__) -#else -#define ldebug(...) -#endif +#define ldebug(fmt, ...) do { \ + if (DEBUG) { \ + error_report("adlib: " fmt, ##__VA_ARGS__); \ + } \ + } while (0) #include "fmopl.h" #define SHIFT 1 @@ -64,7 +64,7 @@ struct AdlibState { int enabled; int active; int bufpos; -#ifdef DEBUG +#if DEBUG int64_t exp[2]; #endif int16_t *mixbuf; @@ -92,7 +92,7 @@ static void adlib_kill_timers (AdlibState *s) delta = AUD_get_elapsed_usec_out (s->voice, &s->ats); ldebug ( - "delta = %f dexp = %f expired => %d\n", + "delta = %f dexp = %f expired => %d", delta / 1000000.0, s->dexp[i] / 1000000.0, delta >= s->dexp[i] @@ -131,7 +131,7 @@ static void timer_handler (void *opaque, int c, double interval_Sec) { AdlibState *s = opaque; unsigned n = c & 1; -#ifdef DEBUG +#if DEBUG double interval; int64_t exp; #endif @@ -142,7 +142,7 @@ static void timer_handler (void *opaque, int c, double interval_Sec) } s->ticking[n] = 1; -#ifdef DEBUG +#if DEBUG interval = NANOSECONDS_PER_SECOND * interval_Sec; exp = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + interval; s->exp[n] = exp; diff --git a/hw/audio/cs4231a.c b/hw/audio/cs4231a.c index 6dfff202ff924..7931fcfec8171 100644 --- a/hw/audio/cs4231a.c +++ b/hw/audio/cs4231a.c @@ -30,7 +30,7 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qemu/module.h" -#include "qemu/timer.h" +#include "qemu/error-report.h" #include "qapi/error.h" #include "qom/object.h" @@ -43,21 +43,21 @@ More... */ -/* #define DEBUG */ +#define DEBUG 0 /* #define DEBUG_XLAW */ static struct { int aci_counter; } conf = {1}; -#ifdef DEBUG -#define dolog(...) AUD_log ("cs4231a", __VA_ARGS__) -#else -#define dolog(...) -#endif +#define dolog(fmt, ...) do { \ + if (DEBUG) { \ + error_report("cs4231a: " fmt, ##__VA_ARGS__); \ + } \ + } while (0) -#define lwarn(...) AUD_log ("cs4231a", "warning: " __VA_ARGS__) -#define lerr(...) AUD_log ("cs4231a", "error: " __VA_ARGS__) +#define lwarn(fmt, ...) warn_report("cs4231a: " fmt, ##__VA_ARGS__) +#define lerr(fmt, ...) error_report("cs4231a: " fmt, ##__VA_ARGS__) #define CS_REGS 16 #define CS_DREGS 32 @@ -284,7 +284,7 @@ static void cs_reset_voices (CSState *s, uint32_t val) as.freq = freqs[xtal][(val >> 1) & 7]; if (as.freq == -1) { - lerr ("unsupported frequency (val=%#x)\n", val); + lerr("unsupported frequency (val=0x%x)", val); goto error; } @@ -319,11 +319,11 @@ static void cs_reset_voices (CSState *s, uint32_t val) case 7: case 4: - lerr ("attempt to use reserved format value (%#x)\n", val); + lerr("attempt to use reserved format value (0x%x)", val); goto error; case 5: - lerr ("ADPCM 4 bit IMA compatible format is not supported\n"); + lerr("ADPCM 4 bit IMA compatible format is not supported"); goto error; } @@ -393,7 +393,7 @@ static uint64_t cs_read (void *opaque, hwaddr addr, unsigned size) ret = s->regs[saddr]; break; } - dolog ("read %d:%d -> %d\n", saddr, iaddr, ret); + dolog("read %d:%d -> %d", saddr, iaddr, ret); return ret; } @@ -425,7 +425,7 @@ static void cs_write (void *opaque, hwaddr addr, case RESERVED: case RESERVED_2: case RESERVED_3: - lwarn ("attempt to write %#x to reserved indirect register %d\n", + lwarn("attempt to write 0x%x to reserved indirect register %d", val, iaddr); break; @@ -439,7 +439,7 @@ static void cs_write (void *opaque, hwaddr addr, cs_reset_voices (s, val); } else { - lwarn ("[P]MCE(%#x, %#x) is not set, val=%#x\n", + lwarn("[P]MCE(0x%x, 0x%x) is not set, val=0x%x", s->regs[Index_Address], s->dregs[Alternate_Feature_Status], val); @@ -453,7 +453,7 @@ static void cs_write (void *opaque, hwaddr addr, val &= ~(1 << 5); /* D5 is reserved */ s->dregs[iaddr] = val; if (val & PPIO) { - lwarn ("PIO is not supported (%#x)\n", val); + lwarn("PIO is not supported (0x%x)", val); break; } if (val & PEN) { @@ -472,11 +472,11 @@ static void cs_write (void *opaque, hwaddr addr, break; case Error_Status_And_Initialization: - lwarn ("attempt to write to read only register %d\n", iaddr); + lwarn("attempt to write to read only register %d", iaddr); break; case MODE_And_ID: - dolog ("val=%#x\n", val); + dolog("val=0x%x", val); if (val & MODE2) s->dregs[iaddr] |= MODE2; else @@ -485,7 +485,7 @@ static void cs_write (void *opaque, hwaddr addr, case Alternate_Feature_Enable_I: if (val & TE) - lerr ("timer is not yet supported\n"); + lerr("timer is not yet supported"); s->dregs[iaddr] = val; break; @@ -499,7 +499,7 @@ static void cs_write (void *opaque, hwaddr addr, break; case Version_Chip_ID: - lwarn ("write to Version_Chip_ID register %#x\n", val); + lwarn("write to Version_Chip_ID register 0x%x", val); s->dregs[iaddr] = val; break; @@ -507,7 +507,7 @@ static void cs_write (void *opaque, hwaddr addr, s->dregs[iaddr] = val; break; } - dolog ("written value %#x to indirect register %d\n", val, iaddr); + dolog("written value 0x%x to indirect register %d", val, iaddr); break; case Status: @@ -519,7 +519,7 @@ static void cs_write (void *opaque, hwaddr addr, break; case PIO_Data: - lwarn ("attempt to write value %#x to PIO register\n", val); + lwarn("attempt to write value 0x%x to PIO register", val); break; } } diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c index a6a32a6348bc0..6b0da0746ecba 100644 --- a/hw/audio/es1370.c +++ b/hw/audio/es1370.c @@ -32,7 +32,7 @@ #include "migration/vmstate.h" #include "qemu/cutils.h" #include "qemu/module.h" -#include "system/dma.h" +#include "qemu/error-report.h" #include "qom/object.h" #include "trace.h" @@ -190,7 +190,7 @@ static void print_ctl(uint32_t val) a(CDC_EN); a(SERR_DIS); #undef a - AUD_log("es1370", "ctl - PCLKDIV %d(DAC2 freq %d), freq %d,%s\n", + error_report("es1370: ctl - PCLKDIV %d(DAC2 freq %d), freq %d,%s", (val & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV, DAC2_DIVTOSR((val & CTRL_PCLKDIV) >> CTRL_SH_PCLKDIV), dac1_samplerate[(val & CTRL_WTSRSEL) >> CTRL_SH_WTSRSEL], @@ -226,7 +226,7 @@ static void print_sctl(uint32_t val) } #undef b #undef a - AUD_log("es1370", + error_report("es1370: " "%s p2_end_inc %d, p2_st_inc %d," " r1_fmt %s, p2_fmt %s, p1_fmt %s\n", buf, @@ -238,10 +238,10 @@ static void print_sctl(uint32_t val) } } -#define lwarn(...) \ +#define lwarn(fmt, ...) \ do { \ if (VERBOSE_ES1370) { \ - AUD_log("es1370: warning", __VA_ARGS__); \ + error_report("es1370: " fmt, ##__VA_ARGS__); \ } \ } while (0) @@ -502,10 +502,10 @@ static void es1370_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) break; case ES1370_REG_PHANTOM_FRAMECNT: - lwarn("writing to phantom frame count 0x%" PRIx64 "\n", val); + lwarn("writing to phantom frame count 0x%" PRIx64, val); break; case ES1370_REG_PHANTOM_FRAMEADR: - lwarn("writing to phantom frame address 0x%" PRIx64 "\n", val); + lwarn("writing to phantom frame address 0x%" PRIx64, val); break; case ES1370_REG_ADC_FRAMECNT: @@ -522,7 +522,7 @@ static void es1370_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) break; default: - lwarn("writel 0x%" PRIx64 " <- 0x%" PRIx64 "\n", addr, val); + lwarn("writel 0x%" PRIx64 " <- 0x%" PRIx64, addr, val); break; } } @@ -586,16 +586,16 @@ static uint64_t es1370_read(void *opaque, hwaddr addr, unsigned size) case ES1370_REG_PHANTOM_FRAMECNT: val = ~0U; - lwarn("reading from phantom frame count\n"); + lwarn("reading from phantom frame count"); break; case ES1370_REG_PHANTOM_FRAMEADR: val = ~0U; - lwarn("reading from phantom frame address\n"); + lwarn("reading from phantom frame address"); break; default: val = ~0U; - lwarn("readl 0x%" PRIx64 " -> 0x%x\n", addr, val); + lwarn("readl 0x%" PRIx64 " -> 0x%x", addr, val); break; } return val; @@ -677,7 +677,7 @@ static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel, * when the sample count reaches zero) or 1 for stop mode (set * interrupt and stop recording). */ - AUD_log ("es1370: warning", "non looping mode\n"); + warn_report("es1370: non looping mode"); } else { d->frame_cnt = size; diff --git a/hw/audio/gus.c b/hw/audio/gus.c index c36df0240fe23..91d07e0f81ccb 100644 --- a/hw/audio/gus.c +++ b/hw/audio/gus.c @@ -32,15 +32,16 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "gusemu.h" -#include "gustate.h" +#include "qemu/error-report.h" #include "qom/object.h" -#define dolog(...) AUD_log ("audio", __VA_ARGS__) -#ifdef DEBUG -#define ldebug(...) dolog (__VA_ARGS__) -#else -#define ldebug(...) -#endif +#define DEBUG 0 + +#define ldebug(fmt, ...) do { \ + if (DEBUG) { \ + error_report("gus: " fmt, ##__VA_ARGS__); \ + } \ + } while (0) #define TYPE_GUS "gus" OBJECT_DECLARE_SIMPLE_TYPE(GUSState, GUS) @@ -154,14 +155,14 @@ int GUS_irqrequest (GUSEmuState *emu, int hwirq, int n) /* qemu_irq_lower (s->pic); */ qemu_irq_raise (s->pic); s->irqs += n; - ldebug ("irqrequest %d %d %d\n", hwirq, n, s->irqs); + ldebug("irqrequest %d %d %d", hwirq, n, s->irqs); return n; } void GUS_irqclear (GUSEmuState *emu, int hwirq) { GUSState *s = emu->opaque; - ldebug ("irqclear %d %d\n", hwirq, s->irqs); + ldebug("irqclear %d %d", hwirq, s->irqs); qemu_irq_lower (s->pic); s->irqs -= 1; #ifdef IRQ_STORM @@ -175,7 +176,7 @@ void GUS_dmarequest (GUSEmuState *emu) { GUSState *s = emu->opaque; IsaDmaClass *k = ISADMA_GET_CLASS(s->isa_dma); - ldebug ("dma request %d\n", der->gusdma); + ldebug("dma request %d", s->emu.gusdma); k->hold_DREQ(s->isa_dma, s->emu.gusdma); } @@ -186,13 +187,13 @@ static int GUS_read_DMA (void *opaque, int nchan, int dma_pos, int dma_len) QEMU_UNINITIALIZED char tmpbuf[4096]; int pos = dma_pos, mode, left = dma_len - dma_pos; - ldebug ("read DMA %#x %d\n", dma_pos, dma_len); + ldebug("read DMA 0x%x %d", dma_pos, dma_len); mode = k->has_autoinitialization(s->isa_dma, s->emu.gusdma); while (left) { int to_copy = MIN ((size_t) left, sizeof (tmpbuf)); int copied; - ldebug ("left=%d to_copy=%d pos=%d\n", left, to_copy, pos); + ldebug("left=%d to_copy=%d pos=%d", left, to_copy, pos); copied = k->read_memory(s->isa_dma, nchan, tmpbuf, pos, to_copy); gus_dma_transferdata (&s->emu, tmpbuf, copied, left == copied); left -= copied; diff --git a/hw/audio/pcspk.c b/hw/audio/pcspk.c index a419161b5b17e..a719912872acc 100644 --- a/hw/audio/pcspk.c +++ b/hw/audio/pcspk.c @@ -34,6 +34,7 @@ #include "hw/audio/pcspk.h" #include "qapi/error.h" #include "qom/object.h" +#include "trace.h" #define PCSPK_BUF_LEN 1792 #define PCSPK_SAMPLE_RATE 32000 @@ -50,7 +51,7 @@ struct PCSpkState { uint8_t sample_buf[PCSPK_BUF_LEN]; QEMUSoundCard card; SWVoiceOut *voice; - void *pit; + PITCommonState *pit; unsigned int pit_count; unsigned int samples; unsigned int play_pos; @@ -60,7 +61,6 @@ struct PCSpkState { }; static const char *s_spk = "pcspk"; -static PCSpkState *pcspk_state; static inline void generate_samples(PCSpkState *s) { @@ -125,7 +125,7 @@ static int pcspk_audio_init(PCSpkState *s) s->voice = AUD_open_out(&s->card, s->voice, s_spk, s, pcspk_callback, &as); if (!s->voice) { - AUD_log(s_spk, "Could not open voice\n"); + error_report("pcspk: Could not open voice"); return -1; } @@ -137,13 +137,18 @@ static uint64_t pcspk_io_read(void *opaque, hwaddr addr, { PCSpkState *s = opaque; PITChannelInfo ch; + uint8_t val; pit_get_channel_info(s->pit, 2, &ch); s->dummy_refresh_clock ^= (1 << 4); - return ch.gate | (s->data_on << 1) | s->dummy_refresh_clock | + val = ch.gate | (s->data_on << 1) | s->dummy_refresh_clock | (ch.out << 5); + + trace_pcspk_io_read(s->iobase, val); + + return val; } static void pcspk_io_write(void *opaque, hwaddr addr, uint64_t val, @@ -152,6 +157,8 @@ static void pcspk_io_write(void *opaque, hwaddr addr, uint64_t val, PCSpkState *s = opaque; const int gate = val & 1; + trace_pcspk_io_write(s->iobase, val); + s->data_on = (val >> 1) & 1; pit_set_gate(s->pit, 2, gate); if (s->voice) { @@ -175,11 +182,6 @@ static void pcspk_initfn(Object *obj) PCSpkState *s = PC_SPEAKER(obj); memory_region_init_io(&s->ioport, OBJECT(s), &pcspk_io_ops, s, "pcspk", 1); - - object_property_add_link(obj, "pit", TYPE_PIT_COMMON, - (Object **)&s->pit, - qdev_prop_allow_set_link_before_realize, - 0); } static void pcspk_realizefn(DeviceState *dev, Error **errp) @@ -187,13 +189,16 @@ static void pcspk_realizefn(DeviceState *dev, Error **errp) ISADevice *isadev = ISA_DEVICE(dev); PCSpkState *s = PC_SPEAKER(dev); + if (!s->pit) { + error_setg(errp, "pcspk: No \"pit\" set or available"); + return; + } + isa_register_ioport(isadev, &s->ioport, s->iobase); if (s->card.state && AUD_register_card(s_spk, &s->card, errp)) { pcspk_audio_init(s); } - - pcspk_state = s; } static bool migrate_needed(void *opaque) @@ -219,6 +224,7 @@ static const Property pcspk_properties[] = { DEFINE_AUDIO_PROPERTIES(PCSpkState, card), DEFINE_PROP_UINT32("iobase", PCSpkState, iobase, 0x61), DEFINE_PROP_BOOL("migrate", PCSpkState, migrate, true), + DEFINE_PROP_LINK("pit", PCSpkState, pit, TYPE_PIT_COMMON, PITCommonState *), }; static void pcspk_class_initfn(ObjectClass *klass, const void *data) @@ -229,7 +235,6 @@ static void pcspk_class_initfn(ObjectClass *klass, const void *data) set_bit(DEVICE_CATEGORY_SOUND, dc->categories); dc->vmsd = &vmstate_spk; device_class_set_props(dc, pcspk_properties); - /* Reason: realize sets global pcspk_state */ /* Reason: pit object link */ dc->user_creatable = false; } diff --git a/hw/audio/sb16.c b/hw/audio/sb16.c index bac64118fef91..03c82f2777ec6 100644 --- a/hw/audio/sb16.c +++ b/hw/audio/sb16.c @@ -30,22 +30,21 @@ #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "qemu/timer.h" +#include "qemu/error-report.h" #include "qemu/host-utils.h" #include "qemu/log.h" #include "qemu/module.h" #include "qapi/error.h" #include "qom/object.h" -#define dolog(...) AUD_log ("sb16", __VA_ARGS__) - -/* #define DEBUG */ +#define DEBUG 0 /* #define DEBUG_SB16_MOST */ -#ifdef DEBUG -#define ldebug(...) dolog (__VA_ARGS__) -#else -#define ldebug(...) -#endif +#define ldebug(fmt, ...) do { \ + if (DEBUG) { \ + error_report("sb16: " fmt, ##__VA_ARGS__); \ + } \ + } while (0) static const char e3[] = "COPYRIGHT (C) CREATIVE TECHNOLOGY LTD, 1992."; @@ -157,7 +156,7 @@ static int irq_of_magic (int magic) #if 0 static void log_dsp (SB16State *dsp) { - ldebug ("%s:%s:%d:%s:dmasize=%d:freq=%d:const=%d:speaker=%d\n", + ldebug("%s:%s:%d:%s:dmasize=%d:freq=%d:const=%d:speaker=%d", dsp->fmt_stereo ? "Stereo" : "Mono", dsp->fmt_signed ? "Signed" : "Unsigned", dsp->fmt_bits, @@ -182,7 +181,7 @@ static void control (SB16State *s, int hold) IsaDmaClass *k = ISADMA_GET_CLASS(isa_dma); s->dma_running = hold; - ldebug ("hold %d high %d dma %d\n", hold, s->use_hdma, dma); + ldebug("hold %d high %d dma %d", hold, s->use_hdma, dma); if (hold) { k->hold_DREQ(isa_dma, dma); @@ -289,8 +288,8 @@ static void dma_cmd8 (SB16State *s, int mask, int dma_len) " alignment %d\n", s->block_size, s->align + 1); } - ldebug ("freq %d, stereo %d, sign %d, bits %d, " - "dma %d, auto %d, fifo %d, high %d\n", + ldebug("freq %d, stereo %d, sign %d, bits %d, " + "dma %d, auto %d, fifo %d, high %d", s->freq, s->fmt_stereo, s->fmt_signed, s->fmt_bits, s->block_size, s->dma_auto, s->fifo, s->highspeed); @@ -337,8 +336,8 @@ static void dma_cmd (SB16State *s, uint8_t cmd, uint8_t d0, int dma_len) s->block_size <<= s->fmt_stereo; } - ldebug ("freq %d, stereo %d, sign %d, bits %d, " - "dma %d, auto %d, fifo %d, high %d\n", + ldebug("freq %d, stereo %d, sign %d, bits %d, " + "dma %d, auto %d, fifo %d, high %d", s->freq, s->fmt_stereo, s->fmt_signed, s->fmt_bits, s->block_size, s->dma_auto, s->fifo, s->highspeed); @@ -395,7 +394,7 @@ static void dma_cmd (SB16State *s, uint8_t cmd, uint8_t d0, int dma_len) static inline void dsp_out_data (SB16State *s, uint8_t val) { - ldebug ("outdata %#x\n", val); + ldebug("outdata 0x%x", val); if ((size_t) s->out_data_len < sizeof (s->out_data)) { s->out_data[s->out_data_len++] = val; } @@ -407,18 +406,18 @@ static inline uint8_t dsp_get_data (SB16State *s) return s->in2_data[--s->in_index]; } else { - dolog ("buffer underflow\n"); + warn_report("sb16: buffer underflow"); return 0; } } static void command (SB16State *s, uint8_t cmd) { - ldebug ("command %#x\n", cmd); + ldebug("command 0x%x", cmd); if (cmd > 0xaf && cmd < 0xd0) { if (cmd & 8) { - qemu_log_mask(LOG_UNIMP, "ADC not yet supported (command %#x)\n", + qemu_log_mask(LOG_UNIMP, "ADC not yet supported (command 0x%x)\n", cmd); } @@ -427,7 +426,7 @@ static void command (SB16State *s, uint8_t cmd) case 12: break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%#x wrong bits\n", cmd); + qemu_log_mask(LOG_GUEST_ERROR, "0x%x wrong bits\n", cmd); } s->needed_bytes = 3; } @@ -645,13 +644,13 @@ static void command (SB16State *s, uint8_t cmd) goto warn; default: - qemu_log_mask(LOG_UNIMP, "Unrecognized command %#x\n", cmd); + qemu_log_mask(LOG_UNIMP, "Unrecognized command 0x%x\n", cmd); break; } } if (!s->needed_bytes) { - ldebug ("\n"); + ldebug("!needed_bytes"); } exit: @@ -664,7 +663,7 @@ static void command (SB16State *s, uint8_t cmd) return; warn: - qemu_log_mask(LOG_UNIMP, "warning: command %#x,%d is not truly understood" + qemu_log_mask(LOG_UNIMP, "warning: command 0x%x,%d is not truly understood" " yet\n", cmd, s->needed_bytes); goto exit; @@ -687,7 +686,7 @@ static uint16_t dsp_get_hilo (SB16State *s) static void complete (SB16State *s) { int d0, d1, d2; - ldebug ("complete command %#x, in_index %d, needed_bytes %d\n", + ldebug("complete command 0x%x, in_index %d, needed_bytes %d", s->cmd, s->in_index, s->needed_bytes); if (s->cmd > 0xaf && s->cmd < 0xd0) { @@ -696,11 +695,11 @@ static void complete (SB16State *s) d0 = dsp_get_data (s); if (s->cmd & 8) { - dolog ("ADC params cmd = %#x d0 = %d, d1 = %d, d2 = %d\n", + warn_report("sb16: ADC params cmd = 0x%x d0 = %d, d1 = %d, d2 = %d", s->cmd, d0, d1, d2); } else { - ldebug ("cmd = %#x d0 = %d, d1 = %d, d2 = %d\n", + ldebug("cmd = 0x%x d0 = %d, d1 = %d, d2 = %d", s->cmd, d0, d1, d2); dma_cmd (s, s->cmd, d0, d1 + (d2 << 8)); } @@ -711,13 +710,13 @@ static void complete (SB16State *s) s->csp_mode = dsp_get_data (s); s->csp_reg83r = 0; s->csp_reg83w = 0; - ldebug ("CSP command 0x04: mode=%#x\n", s->csp_mode); + ldebug("CSP command 0x04: mode=0x%x", s->csp_mode); break; case 0x05: s->csp_param = dsp_get_data (s); s->csp_value = dsp_get_data (s); - ldebug ("CSP command 0x05: param=%#x value=%#x\n", + ldebug("CSP command 0x05: param=0x%x value=0x%x", s->csp_param, s->csp_value); break; @@ -725,9 +724,9 @@ static void complete (SB16State *s) case 0x0e: d0 = dsp_get_data (s); d1 = dsp_get_data (s); - ldebug ("write CSP register %d <- %#x\n", d1, d0); + ldebug("write CSP register %d <- 0x%x", d1, d0); if (d1 == 0x83) { - ldebug ("0x83[%d] <- %#x\n", s->csp_reg83r, d0); + ldebug("0x83[%d] <- 0x%x", s->csp_reg83r, d0); s->csp_reg83[s->csp_reg83r % 4] = d0; s->csp_reg83r += 1; } @@ -738,10 +737,10 @@ static void complete (SB16State *s) case 0x0f: d0 = dsp_get_data (s); - ldebug ("read CSP register %#x -> %#x, mode=%#x\n", + ldebug("read CSP register 0x%x -> 0x%x, mode=0x%x", d0, s->csp_regs[d0], s->csp_mode); if (d0 == 0x83) { - ldebug ("0x83[%d] -> %#x\n", + ldebug("0x83[%d] -> 0x%x", s->csp_reg83w, s->csp_reg83[s->csp_reg83w % 4]); dsp_out_data (s, s->csp_reg83[s->csp_reg83w % 4]); @@ -754,7 +753,7 @@ static void complete (SB16State *s) case 0x10: d0 = dsp_get_data (s); - dolog ("cmd 0x10 d0=%#x\n", d0); + warn_report("sb16: cmd 0x10 d0=0x%x", d0); break; case 0x14: @@ -763,7 +762,7 @@ static void complete (SB16State *s) case 0x40: s->time_const = dsp_get_data (s); - ldebug ("set time const %d\n", s->time_const); + ldebug("set time const %d", s->time_const); break; case 0x41: @@ -776,12 +775,12 @@ static void complete (SB16State *s) * http://homepages.cae.wisc.edu/~brodskye/sb16doc/sb16doc.html#SamplingRate */ s->freq = restrict_sampling_rate(dsp_get_hilo(s)); - ldebug ("set freq %d\n", s->freq); + ldebug("set freq %d", s->freq); break; case 0x48: s->block_size = dsp_get_lohi (s) + 1; - ldebug ("set dma block len %d\n", s->block_size); + ldebug("set dma block len %d", s->block_size); break; case 0x74: @@ -811,21 +810,21 @@ static void complete (SB16State *s) ); } } - ldebug ("mix silence %d %d %" PRId64 "\n", samples, bytes, ticks); + ldebug("mix silence %d %d %" PRId64, samples, bytes, ticks); } break; case 0xe0: d0 = dsp_get_data (s); s->out_data_len = 0; - ldebug ("E0 data = %#x\n", d0); + ldebug("E0 data = 0x%x", d0); dsp_out_data (s, ~d0); break; case 0xe2: -#ifdef DEBUG +#if DEBUG d0 = dsp_get_data (s); - dolog ("E2 = %#x\n", d0); + warn_report("sb16: E2 = 0x%x", d0); #endif break; @@ -835,7 +834,7 @@ static void complete (SB16State *s) case 0xf9: d0 = dsp_get_data (s); - ldebug ("command 0xf9 with %#x\n", d0); + ldebug("command 0xf9 with 0x%x", d0); switch (d0) { case 0x0e: dsp_out_data (s, 0xff); @@ -856,13 +855,13 @@ static void complete (SB16State *s) break; default: - qemu_log_mask(LOG_UNIMP, "complete: unrecognized command %#x\n", + qemu_log_mask(LOG_UNIMP, "complete: unrecognized command 0x%x\n", s->cmd); return; } } - ldebug ("\n"); + ldebug(""); s->cmd = -1; } @@ -926,7 +925,7 @@ static void dsp_write(void *opaque, uint32_t nport, uint32_t val) iport = nport - s->port; - ldebug ("write %#x <- %#x\n", nport, val); + ldebug("write 0x%x <- 0x%x", nport, val); switch (iport) { case 0x06: switch (val) { @@ -976,7 +975,7 @@ static void dsp_write(void *opaque, uint32_t nport, uint32_t val) } else { if (s->in_index == sizeof (s->in2_data)) { - dolog ("in data overrun\n"); + warn_report("sb16: in data overrun"); } else { s->in2_data[s->in_index++] = val; @@ -992,7 +991,7 @@ static void dsp_write(void *opaque, uint32_t nport, uint32_t val) break; default: - ldebug ("(nport=%#x, val=%#x)\n", nport, val); + ldebug("(nport=0x%x, val=0x%x)", nport, val); break; } } @@ -1016,7 +1015,7 @@ static uint32_t dsp_read(void *opaque, uint32_t nport) } else { if (s->cmd != -1) { - dolog ("empty output buffer for command %#x\n", + warn_report("sb16: empty output buffer for command 0x%x", s->cmd); } retval = s->last_read_byte; @@ -1029,7 +1028,7 @@ static uint32_t dsp_read(void *opaque, uint32_t nport) break; case 0x0d: /* timer interrupt clear */ - /* dolog ("timer interrupt clear\n"); */ + /* warn_report("sb16: timer interrupt clear"); */ retval = 0; break; @@ -1056,13 +1055,13 @@ static uint32_t dsp_read(void *opaque, uint32_t nport) } if (!ack) { - ldebug ("read %#x -> %#x\n", nport, retval); + ldebug("read 0x%x -> 0x%x", nport, retval); } return retval; error: - dolog ("warning: dsp_read %#x error\n", nport); + warn_report("sb16: dsp_read 0x%x error", nport); return 0xff; } @@ -1108,7 +1107,7 @@ static void mixer_write_datab(void *opaque, uint32_t nport, uint32_t val) SB16State *s = opaque; (void) nport; - ldebug ("mixer_write [%#x] <- %#x\n", s->mixer_nreg, val); + ldebug("mixer_write [0x%x] <- 0x%x", s->mixer_nreg, val); switch (s->mixer_nreg) { case 0x00: @@ -1118,7 +1117,7 @@ static void mixer_write_datab(void *opaque, uint32_t nport, uint32_t val) case 0x80: { int irq = irq_of_magic (val); - ldebug ("setting irq to %d (val=%#x)\n", irq, val); + ldebug("setting irq to %d (val=0x%x)", irq, val); if (irq > 0) { s->irq = irq; } @@ -1133,7 +1132,7 @@ static void mixer_write_datab(void *opaque, uint32_t nport, uint32_t val) hdma = ctz32 (val & 0xf0); if (dma != s->dma || hdma != s->hdma) { qemu_log_mask(LOG_GUEST_ERROR, "attempt to change DMA 8bit" - " %d(%d), 16bit %d(%d) (val=%#x)\n", dma, s->dma, + " %d(%d), 16bit %d(%d) (val=0x%x)\n", dma, s->dma, hdma, s->hdma, val); } #if 0 @@ -1145,12 +1144,12 @@ static void mixer_write_datab(void *opaque, uint32_t nport, uint32_t val) case 0x82: qemu_log_mask(LOG_GUEST_ERROR, "attempt to write into IRQ status" - " register (val=%#x)\n", val); + " register (val=0x%x)\n", val); return; default: if (s->mixer_nreg >= 0x80) { - ldebug ("attempt to write mixer[%#x] <- %#x\n", s->mixer_nreg, val); + ldebug("attempt to write mixer[0x%x] <- 0x%x", s->mixer_nreg, val); } break; } @@ -1165,11 +1164,11 @@ static uint32_t mixer_read(void *opaque, uint32_t nport) (void) nport; #ifndef DEBUG_SB16_MOST if (s->mixer_nreg != 0x82) { - ldebug ("mixer_read[%#x] -> %#x\n", + ldebug("mixer_read[0x%x] -> 0x%x", s->mixer_nreg, s->mixer_regs[s->mixer_nreg]); } #else - ldebug ("mixer_read[%#x] -> %#x\n", + ldebug("mixer_read[0x%x] -> 0x%x", s->mixer_nreg, s->mixer_regs[s->mixer_nreg]); #endif return s->mixer_regs[s->mixer_nreg]; @@ -1241,7 +1240,7 @@ static int SB_read_DMA (void *opaque, int nchan, int dma_pos, int dma_len) till = s->left_till_irq; #ifdef DEBUG_SB16_MOST - dolog ("pos:%06d %d till:%d len:%d\n", + warn_report("sb16: pos:%06d %d till:%d len:%d", dma_pos, free, till, dma_len); #endif @@ -1265,7 +1264,7 @@ static int SB_read_DMA (void *opaque, int nchan, int dma_pos, int dma_len) } #ifdef DEBUG_SB16_MOST - ldebug ("pos %5d free %5d size %5d till % 5d copy %5d written %5d size %5d\n", + ldebug("pos %5d free %5d size %5d till % 5d copy %5d written %5d size %5d", dma_pos, free, dma_len, s->left_till_irq, copy, written, s->block_size); #endif diff --git a/hw/audio/soundhw.c b/hw/audio/soundhw.c index d18fd9fa05153..63a685563520e 100644 --- a/hw/audio/soundhw.c +++ b/hw/audio/soundhw.c @@ -65,18 +65,18 @@ void deprecated_register_soundhw(const char *name, const char *descr, soundhw_count++; } -void show_valid_soundhw(void) +void audio_print_available_models(void) { struct soundhw *c; if (soundhw_count) { - printf("Valid sound card names (comma separated):\n"); - for (c = soundhw; c->name; ++c) { - printf ("%-11s %s\n", c->name, c->descr); - } + printf("Valid audio device model names:\n"); + for (c = soundhw; c->name; ++c) { + printf("%-11s %s\n", c->name, c->descr); + } } else { - printf("Machine has no user-selectable audio hardware " - "(it may or may not have always-present audio hardware).\n"); + printf("Machine has no user-selectable audio hardware " + "(it may or may not have always-present audio hardware).\n"); } } @@ -88,7 +88,7 @@ void select_soundhw(const char *name, const char *audiodev) struct soundhw *c; if (selected) { - error_report("only one -soundhw option is allowed"); + error_report("only one -audio option is allowed"); exit(1); } @@ -101,8 +101,8 @@ void select_soundhw(const char *name, const char *audiodev) } if (!c->name) { - error_report("Unknown sound card name `%s'", name); - show_valid_soundhw(); + error_report("Unknown audio device model `%s'", name); + audio_print_available_models(); exit(1); } } @@ -140,4 +140,3 @@ void soundhw_init(void) c->init_pci(pci_bus, audiodev_id); } } - diff --git a/hw/audio/trace-events b/hw/audio/trace-events index b8ef5727678f6..30f5921545308 100644 --- a/hw/audio/trace-events +++ b/hw/audio/trace-events @@ -23,6 +23,10 @@ hda_audio_format(const char *stream, int chan, const char *fmt, int freq) "st %s hda_audio_adjust(const char *stream, int pos) "st %s, pos %d" hda_audio_overrun(const char *stream) "st %s" +# pcspk.c +pcspk_io_read(uint16_t addr, uint8_t val) "[0x%"PRIx16"] -> 0x%"PRIx8 +pcspk_io_write(uint16_t addr, uint8_t val) "[0x%"PRIx16"] <- 0x%"PRIx8 + #via-ac97.c via_ac97_codec_write(uint8_t addr, uint16_t val) "0x%x <- 0x%x" via_ac97_sgd_fetch(uint32_t curr, uint32_t addr, char stop, char eol, char flag, uint32_t len) "curr=0x%x addr=0x%x %c%c%c len=%d" diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index 9bab2716c1456..64efce48462ea 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -62,11 +62,7 @@ void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status) iov_discard_undo(&req->inhdr_undo); iov_discard_undo(&req->outhdr_undo); virtqueue_push(req->vq, &req->elem, req->in_len); - if (qemu_in_iothread()) { - virtio_notify_irqfd(vdev, req->vq); - } else { - virtio_notify(vdev, req->vq); - } + virtio_notify(vdev, req->vq); } static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, diff --git a/hw/char/max78000_uart.c b/hw/char/max78000_uart.c index 19506d52ef995..c76c0e759b6af 100644 --- a/hw/char/max78000_uart.c +++ b/hw/char/max78000_uart.c @@ -247,6 +247,12 @@ static void max78000_uart_init(Object *obj) sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->mmio); } +static void max78000_uart_finalize(Object *obj) +{ + Max78000UartState *s = MAX78000_UART(obj); + fifo8_destroy(&s->rx_fifo); +} + static void max78000_uart_realize(DeviceState *dev, Error **errp) { Max78000UartState *s = MAX78000_UART(dev); @@ -274,6 +280,7 @@ static const TypeInfo max78000_uart_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(Max78000UartState), .instance_init = max78000_uart_init, + .instance_finalize = max78000_uart_finalize, .class_init = max78000_uart_class_init, }; diff --git a/hw/char/serial-pci-multi.c b/hw/char/serial-pci-multi.c index 13df272691a64..34f30fb70b80c 100644 --- a/hw/char/serial-pci-multi.c +++ b/hw/char/serial-pci-multi.c @@ -180,7 +180,8 @@ static void multi_serial_init(Object *o) size_t i, nports = multi_serial_get_port_count(PCI_DEVICE_GET_CLASS(dev)); for (i = 0; i < nports; i++) { - qemu_init_irq(&pms->irqs[i], multi_serial_irq_mux, pms, i); + qemu_init_irq_child(o, "irq[*]", &pms->irqs[i], + multi_serial_irq_mux, pms, i); object_initialize_child(o, "serial[*]", &pms->state[i], TYPE_SERIAL); } } diff --git a/hw/char/sifive_uart.c b/hw/char/sifive_uart.c index 9bc697a67b520..e7357d585a1ae 100644 --- a/hw/char/sifive_uart.c +++ b/hw/char/sifive_uart.c @@ -28,23 +28,18 @@ #define TX_INTERRUPT_TRIGGER_DELAY_NS 100 -/* - * Not yet implemented: - * - * Transmit FIFO using "qemu/fifo8.h" - */ - /* Returns the state of the IP (interrupt pending) register */ -static uint64_t sifive_uart_ip(SiFiveUARTState *s) +static uint32_t sifive_uart_ip(SiFiveUARTState *s) { - uint64_t ret = 0; + uint32_t ret = 0; - uint64_t txcnt = SIFIVE_UART_GET_TXCNT(s->txctrl); - uint64_t rxcnt = SIFIVE_UART_GET_RXCNT(s->rxctrl); + uint32_t txcnt = SIFIVE_UART_GET_TXCNT(s->txctrl); + uint32_t rxcnt = SIFIVE_UART_GET_RXCNT(s->rxctrl); - if (txcnt != 0) { + if (fifo8_num_used(&s->tx_fifo) < txcnt) { ret |= SIFIVE_UART_IP_TXWM; } + if (s->rx_fifo_len > rxcnt) { ret |= SIFIVE_UART_IP_RXWM; } @@ -55,15 +50,14 @@ static uint64_t sifive_uart_ip(SiFiveUARTState *s) static void sifive_uart_update_irq(SiFiveUARTState *s) { int cond = 0; - if ((s->ie & SIFIVE_UART_IE_TXWM) || - ((s->ie & SIFIVE_UART_IE_RXWM) && s->rx_fifo_len)) { + uint32_t ip = sifive_uart_ip(s); + + if (((ip & SIFIVE_UART_IP_TXWM) && (s->ie & SIFIVE_UART_IE_TXWM)) || + ((ip & SIFIVE_UART_IP_RXWM) && (s->ie & SIFIVE_UART_IE_RXWM))) { cond = 1; } - if (cond) { - qemu_irq_raise(s->irq); - } else { - qemu_irq_lower(s->irq); - } + + qemu_set_irq(s->irq, cond); } static gboolean sifive_uart_xmit(void *do_not_use, GIOCondition cond, @@ -119,10 +113,12 @@ static void sifive_uart_write_tx_fifo(SiFiveUARTState *s, const uint8_t *buf, if (size > fifo8_num_free(&s->tx_fifo)) { size = fifo8_num_free(&s->tx_fifo); - qemu_log_mask(LOG_GUEST_ERROR, "sifive_uart: TX FIFO overflow"); + qemu_log_mask(LOG_GUEST_ERROR, "sifive_uart: TX FIFO overflow.\n"); } - fifo8_push_all(&s->tx_fifo, buf, size); + if (size > 0) { + fifo8_push_all(&s->tx_fifo, buf, size); + } if (fifo8_is_full(&s->tx_fifo)) { s->txfifo |= SIFIVE_UART_TXFIFO_FULL; diff --git a/hw/char/trace-events b/hw/char/trace-events index 05a33036c1207..9e74be2c14f5d 100644 --- a/hw/char/trace-events +++ b/hw/char/trace-events @@ -58,15 +58,15 @@ imx_serial_write(const char *chrname, uint64_t addr, uint64_t value) "%s:[0x%03" imx_serial_put_data(const char *chrname, uint32_t value) "%s: 0x%" PRIx32 # pl011.c -pl011_irq_state(int level) "irq state %d" -pl011_read(uint32_t addr, uint32_t value, const char *regname) "addr 0x%03x value 0x%08x reg %s" -pl011_read_fifo(unsigned rx_fifo_used, size_t rx_fifo_depth) "RX FIFO read, used %u/%zu" -pl011_write(uint32_t addr, uint32_t value, const char *regname) "addr 0x%03x value 0x%08x reg %s" -pl011_can_receive(uint32_t lcr, unsigned rx_fifo_used, size_t rx_fifo_depth, unsigned rx_fifo_available) "LCR 0x%02x, RX FIFO used %u/%zu, can_receive %u chars" -pl011_fifo_rx_put(uint32_t c, unsigned read_count, size_t rx_fifo_depth) "RX FIFO push char [0x%02x] %d/%zu depth used" +pl011_irq_state(bool level) "irq state %d" +pl011_read(uint64_t addr, uint32_t value, const char *regname) "addr 0x%03" PRIx64 " value 0x%08x reg %s" +pl011_read_fifo(unsigned rx_fifo_used, unsigned rx_fifo_depth) "RX FIFO read, used %u/%u" +pl011_write(uint64_t addr, uint32_t value, const char *regname) "addr 0x%03" PRIx64 " value 0x%08x reg %s" +pl011_can_receive(uint32_t lcr, unsigned rx_fifo_used, unsigned rx_fifo_depth, unsigned rx_fifo_available) "LCR 0x%02x, RX FIFO used %u/%u, can_receive %u chars" +pl011_fifo_rx_put(uint32_t c, unsigned read_count, unsigned rx_fifo_depth) "RX FIFO push char [0x%02x] %d/%u depth used" pl011_fifo_rx_full(void) "RX FIFO now full, RXFF set" pl011_baudrate_change(unsigned int baudrate, uint64_t clock, uint32_t ibrd, uint32_t fbrd) "new baudrate %u (clk: %" PRIu64 "hz, ibrd: %" PRIu32 ", fbrd: %" PRIu32 ")" -pl011_receive(int size) "recv %d chars" +pl011_receive(size_t size) "recv %zd chars" # cmsdk-apb-uart.c cmsdk_apb_uart_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB UART read: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index 39e674aca2154..8c306c89e4530 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -67,27 +67,16 @@ CPUState *cpu_create(const char *typename) return cpu; } -/* Resetting the IRQ comes from across the code base so we take the - * BQL here if we need to. cpu_interrupt assumes it is held.*/ void cpu_reset_interrupt(CPUState *cpu, int mask) { - bool need_lock = !bql_locked(); - - if (need_lock) { - bql_lock(); - } - cpu->interrupt_request &= ~mask; - if (need_lock) { - bql_unlock(); - } + qatomic_and(&cpu->interrupt_request, ~mask); } void cpu_exit(CPUState *cpu) { - qatomic_set(&cpu->exit_request, 1); - /* Ensure cpu_exec will see the exit request after TCG has exited. */ - smp_wmb(); - qatomic_set(&cpu->neg.icount_decr.u16.high, -1); + /* Ensure cpu_exec will see the reason why the exit request was set. */ + qatomic_store_release(&cpu->exit_request, true); + qemu_cpu_kick(cpu); } static int cpu_common_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg) @@ -119,11 +108,6 @@ static void cpu_common_reset_hold(Object *obj, ResetType type) { CPUState *cpu = CPU(obj); - if (qemu_loglevel_mask(CPU_LOG_RESET)) { - qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index); - log_cpu_state(cpu, cpu->cc->reset_dump_flags); - } - cpu->interrupt_request = 0; cpu->halted = cpu->start_powered_off; cpu->mem_io_pc = 0; @@ -137,6 +121,21 @@ static void cpu_common_reset_hold(Object *obj, ResetType type) cpu_exec_reset_hold(cpu); } +static void cpu_common_reset_exit(Object *obj, ResetType type) +{ + if (qemu_loglevel_mask(CPU_LOG_RESET)) { + FILE *f = qemu_log_trylock(); + + if (f) { + CPUState *cpu = CPU(obj); + + fprintf(f, "CPU Reset (CPU %d)\n", cpu->cpu_index); + cpu_dump_state(cpu, f, cpu->cc->reset_dump_flags); + qemu_log_unlock(f); + } + } +} + ObjectClass *cpu_class_by_name(const char *typename, const char *cpu_model) { ObjectClass *oc; @@ -295,6 +294,7 @@ void cpu_exec_unrealizefn(CPUState *cpu) * accel_cpu_common_unrealize, which may free fields using call_rcu. */ accel_cpu_common_unrealize(cpu); + cpu_destroy_address_spaces(cpu); } static void cpu_common_initfn(Object *obj) @@ -380,6 +380,7 @@ static void cpu_common_class_init(ObjectClass *klass, const void *data) dc->realize = cpu_common_realizefn; dc->unrealize = cpu_common_unrealizefn; rc->phases.hold = cpu_common_reset_hold; + rc->phases.exit = cpu_common_reset_exit; cpu_class_init_props(dc); /* * Reason: CPUs still need special care by board code: wiring up diff --git a/hw/core/cpu-system.c b/hw/core/cpu-system.c index a975405d3a0a2..f601a083d143b 100644 --- a/hw/core/cpu-system.c +++ b/hw/core/cpu-system.c @@ -23,7 +23,6 @@ #include "system/address-spaces.h" #include "exec/cputlb.h" #include "system/memory.h" -#include "exec/tb-flush.h" #include "qemu/target-info.h" #include "hw/qdev-core.h" #include "hw/qdev-properties.h" @@ -204,17 +203,9 @@ static int cpu_common_post_load(void *opaque, int version_id) * 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the * version_id is increased. */ - cpu->interrupt_request &= ~0x01; + cpu_reset_interrupt(cpu, 0x01); tlb_flush(cpu); - - /* - * loadvm has just updated the content of RAM, bypassing the - * usual mechanisms that ensure we flush TBs for writes to - * memory we've translated code from. So we must flush all TBs, - * which will now be stale. - */ - tb_flush(cpu); } return 0; diff --git a/hw/core/irq.c b/hw/core/irq.c index 6dd8d47bd6ea5..0c768f7704e91 100644 --- a/hw/core/irq.c +++ b/hw/core/irq.c @@ -49,6 +49,14 @@ void qemu_init_irq(IRQState *irq, qemu_irq_handler handler, void *opaque, init_irq_fields(irq, handler, opaque, n); } +void qemu_init_irq_child(Object *parent, const char *propname, + IRQState *irq, qemu_irq_handler handler, + void *opaque, int n) +{ + object_initialize_child(parent, propname, irq, TYPE_IRQ); + init_irq_fields(irq, handler, opaque, n); +} + void qemu_init_irqs(IRQState irq[], size_t count, qemu_irq_handler handler, void *opaque) { diff --git a/hw/core/loader.c b/hw/core/loader.c index e7056ba4bd3b7..477661a0255c9 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -295,10 +295,6 @@ static void *load_at(int fd, off_t offset, size_t size) return ptr; } -#ifdef ELF_CLASS -#undef ELF_CLASS -#endif - #define ELF_CLASS ELFCLASS32 #include "elf.h" @@ -1246,7 +1242,7 @@ static void rom_reset(void *unused) * that the instruction cache for that new region is clear, so that the * CPU definitely fetches its instructions from the just written data. */ - cpu_flush_icache_range(rom->addr, rom->datasize); + address_space_flush_icache_range(rom->as, rom->addr, rom->datasize); trace_loader_write_rom(rom->name, rom->addr, rom->datasize, rom->isrom); } diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c index 3a612e2232d90..74a56600be835 100644 --- a/hw/core/machine-hmp-cmds.c +++ b/hw/core/machine-hmp-cmds.c @@ -163,6 +163,24 @@ void hmp_info_kvm(Monitor *mon, const QDict *qdict) qapi_free_KvmInfo(info); } +void hmp_info_accelerators(Monitor *mon, const QDict *qdict) +{ + AcceleratorInfo *info; + AcceleratorList *accel; + + info = qmp_query_accelerators(NULL); + for (accel = info->present; accel; accel = accel->next) { + char trail = accel->next ? ' ' : '\n'; + if (info->enabled == accel->value) { + monitor_printf(mon, "[%s]%c", Accelerator_str(accel->value), trail); + } else { + monitor_printf(mon, "%s%c", Accelerator_str(accel->value), trail); + } + } + + qapi_free_AcceleratorInfo(info); +} + void hmp_info_uuid(Monitor *mon, const QDict *qdict) { UuidInfo *info; diff --git a/hw/core/machine-qmp-cmds.c b/hw/core/machine-qmp-cmds.c index 6aca1a626e60e..28dfd3e15bd0a 100644 --- a/hw/core/machine-qmp-cmds.c +++ b/hw/core/machine-qmp-cmds.c @@ -20,6 +20,7 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/type-helpers.h" #include "qemu/uuid.h" +#include "qemu/target-info.h" #include "qemu/target-info-qapi.h" #include "qom/qom-qobject.h" #include "system/hostmem.h" @@ -28,6 +29,30 @@ #include "system/runstate.h" #include "system/system.h" #include "hw/s390x/storage-keys.h" +#include + +/* + * QMP query for enabled and present accelerators + */ +AcceleratorInfo *qmp_query_accelerators(Error **errp) +{ + AcceleratorInfo *info = g_malloc0(sizeof(*info)); + AccelClass *current_class = ACCEL_GET_CLASS(current_accel()); + int i; + + for (i = ACCELERATOR__MAX; i-- > 0; ) { + const char *s = Accelerator_str(i); + AccelClass *this_class = accel_find(s); + + if (this_class) { + QAPI_LIST_PREPEND(info->present, i); + if (this_class == current_class) { + info->enabled = i; + } + } + } + return info; +} /* * fast means: we NEVER interrupt vCPU threads to retrieve @@ -70,9 +95,10 @@ CpuInfoFastList *qmp_query_cpus_fast(Error **errp) MachineInfoList *qmp_query_machines(bool has_compat_props, bool compat_props, Error **errp) { - GSList *el, *machines = object_class_get_list(TYPE_MACHINE, false); + GSList *el, *machines; MachineInfoList *mach_list = NULL; + machines = object_class_get_list(target_machine_typename(), false); for (el = machines; el; el = el->next) { MachineClass *mc = el->data; const char *default_cpu_type = machine_class_default_cpu_type(mc); diff --git a/hw/core/machine.c b/hw/core/machine.c index 8063271a7439c..cd63803000c40 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -35,11 +35,19 @@ #include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-net.h" #include "hw/virtio/virtio-iommu.h" +#include "hw/acpi/generic_event_device.h" #include "audio/audio.h" +GlobalProperty hw_compat_10_1[] = { + { TYPE_ACPI_GED, "x-has-hest-addr", "false" }, +}; +const size_t hw_compat_10_1_len = G_N_ELEMENTS(hw_compat_10_1); + GlobalProperty hw_compat_10_0[] = { { "scsi-hd", "dpofua", "off" }, { "vfio-pci", "x-migration-load-config-after-iter", "off" }, + { "ramfb", "use-legacy-x86-rom", "true"}, + { "vfio-pci-nohotplug", "use-legacy-x86-rom", "true" }, }; const size_t hw_compat_10_0_len = G_N_ELEMENTS(hw_compat_10_0); @@ -640,9 +648,6 @@ static void machine_set_mem(Object *obj, Visitor *v, const char *name, mem->size = mc->default_ram_size; } mem->size = QEMU_ALIGN_UP(mem->size, 8192); - if (mc->fixup_ram_size) { - mem->size = mc->fixup_ram_size(mem->size); - } if ((ram_addr_t)mem->size != mem->size) { error_setg(errp, "ram size %llu exceeds permitted maximum %llu", (unsigned long long)mem->size, @@ -1110,8 +1115,11 @@ static void machine_class_init(ObjectClass *oc, const void *data) * SMBIOS 3.1.0 7.18.5 Memory Device — Extended Size * use max possible value that could be encoded into * 'Extended Size' field (2047Tb). + * + * Unfortunately (current) Windows Server 2025 and earlier do not handle + * 4Tb+ DIMM size. */ - mc->smbios_memory_device_size = 2047 * TiB; + mc->smbios_memory_device_size = 2 * TiB; /* numa node memory size aligned on 8MB by default. * On Linux, each node's border has to be 8MB aligned @@ -1548,6 +1556,16 @@ const char *machine_class_default_cpu_type(MachineClass *mc) return mc->default_cpu_type; } +const char *machine_default_cpu_type(const MachineState *ms) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + + if (mc->get_default_cpu_type) { + return mc->get_default_cpu_type(ms); + } + return machine_class_default_cpu_type(mc); +} + static bool is_cpu_type_supported(const MachineState *machine, Error **errp) { MachineClass *mc = MACHINE_GET_CLASS(machine); @@ -1562,6 +1580,8 @@ static bool is_cpu_type_supported(const MachineState *machine, Error **errp) */ if (mc->valid_cpu_types) { assert(mc->valid_cpu_types[0] != NULL); + assert(!mc->get_valid_cpu_types); + for (i = 0; mc->valid_cpu_types[i]; i++) { if (object_class_dynamic_cast(oc, mc->valid_cpu_types[i])) { break; @@ -1588,6 +1608,32 @@ static bool is_cpu_type_supported(const MachineState *machine, Error **errp) error_append_hint(errp, "\n"); } + return false; + } + } else if (mc->get_valid_cpu_types) { + GPtrArray *vct = mc->get_valid_cpu_types(machine); + bool valid = false; + + for (i = 0; i < vct->len; i++) { + if (object_class_dynamic_cast(oc, vct->pdata[i])) { + valid = true; + break; + } + } + + if (!valid) { + g_autofree char *requested = cpu_model_from_type(machine->cpu_type); + + error_setg(errp, "Invalid CPU model: %s", requested); + error_append_hint(errp, "The valid models are: "); + for (i = 0; i < vct->len; i++) { + g_autofree char *model = cpu_model_from_type(vct->pdata[i]); + error_append_hint(errp, "%s%s", + model, i + 1 == vct->len ? "\n" : ", "); + } + } + g_ptr_array_free(vct, true); + if (!valid) { return false; } } diff --git a/hw/core/qdev.c b/hw/core/qdev.c index f600226176871..fab42a727059b 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -411,6 +411,35 @@ char *qdev_get_dev_path(DeviceState *dev) return NULL; } +const char *qdev_get_printable_name(DeviceState *vdev) +{ + /* + * Return device ID if explicity set + * (e.g. -device virtio-blk-pci,id=foo) + * This allows users to correlate errors with their custom device + * names. + */ + if (vdev->id) { + return vdev->id; + } + /* + * Fall back to the canonical QOM device path (eg. ID for PCI + * devices). + * This ensures the device is still uniquely and meaningfully + * identified. + */ + const char *path = qdev_get_dev_path(vdev); + if (path) { + return path; + } + + /* + * Final fallback: if all else fails, return a placeholder string. + * This ensures the error message always contains a valid string. + */ + return ""; +} + void qdev_add_unplug_blocker(DeviceState *dev, Error *reason) { dev->unplug_blockers = g_slist_prepend(dev->unplug_blockers, reason); diff --git a/hw/core/register.c b/hw/core/register.c index 8f63d9f227c45..81316d485979d 100644 --- a/hw/core/register.c +++ b/hw/core/register.c @@ -245,10 +245,16 @@ static RegisterInfoArray *register_init_block(DeviceState *owner, size_t data_size_bits) { const char *device_prefix = object_get_typename(OBJECT(owner)); - RegisterInfoArray *r_array = g_new0(RegisterInfoArray, 1); + Object *obj; + RegisterInfoArray *r_array; int data_size = data_size_bits >> 3; int i; + obj = object_new(TYPE_REGISTER_ARRAY); + object_property_add_child(OBJECT(owner), "reg-array[*]", obj); + object_unref(obj); + + r_array = REGISTER_ARRAY(obj); r_array->r = g_new0(RegisterInfo *, num); r_array->num_elements = num; r_array->debug = debug_enabled; @@ -258,9 +264,6 @@ static RegisterInfoArray *register_init_block(DeviceState *owner, int index = rae[i].addr / data_size; RegisterInfo *r = &ri[index]; - /* Init the register, this will zero it. */ - object_initialize((void *)r, sizeof(*r), TYPE_REGISTER); - /* Set the properties of the register */ r->data = data + data_size * index; r->data_size = data_size; @@ -270,7 +273,7 @@ static RegisterInfoArray *register_init_block(DeviceState *owner, r_array->r[i] = r; } - memory_region_init_io(&r_array->mem, OBJECT(owner), ops, r_array, + memory_region_init_io(&r_array->mem, OBJECT(r_array), ops, r_array, device_prefix, memory_size); return r_array; @@ -312,31 +315,23 @@ RegisterInfoArray *register_init_block64(DeviceState *owner, data, ops, debug_enabled, memory_size, 64); } -void register_finalize_block(RegisterInfoArray *r_array) +static void register_array_finalize(Object *obj) { - object_unparent(OBJECT(&r_array->mem)); - g_free(r_array->r); - g_free(r_array); -} + RegisterInfoArray *r_array = REGISTER_ARRAY(obj); -static void register_class_init(ObjectClass *oc, const void *data) -{ - DeviceClass *dc = DEVICE_CLASS(oc); - - /* Reason: needs to be wired up to work */ - dc->user_creatable = false; + g_free(r_array->r); } -static const TypeInfo register_info = { - .name = TYPE_REGISTER, - .parent = TYPE_DEVICE, - .class_init = register_class_init, - .instance_size = sizeof(RegisterInfo), +static const TypeInfo register_array_info = { + .name = TYPE_REGISTER_ARRAY, + .parent = TYPE_OBJECT, + .instance_size = sizeof(RegisterInfoArray), + .instance_finalize = register_array_finalize, }; static void register_register_types(void) { - type_register_static(®ister_info); + type_register_static(®ister_array_info); } type_init(register_register_types) diff --git a/hw/core/sysbus-fdt.c b/hw/core/sysbus-fdt.c index c339a27875cbe..59f1d17de112c 100644 --- a/hw/core/sysbus-fdt.c +++ b/hw/core/sysbus-fdt.c @@ -31,11 +31,8 @@ #include "qemu/error-report.h" #include "system/device_tree.h" #include "system/tpm.h" +#include "hw/arm/smmuv3.h" #include "hw/platform-bus.h" -#include "hw/vfio/vfio-platform.h" -#include "hw/vfio/vfio-calxeda-xgmac.h" -#include "hw/vfio/vfio-amd-xgbe.h" -#include "hw/vfio/vfio-region.h" #include "hw/display/ramfb.h" #include "hw/uefi/var-service-api.h" #include "hw/arm/fdt.h" @@ -66,380 +63,6 @@ typedef struct HostProperty { bool optional; } HostProperty; -#ifdef CONFIG_LINUX - -/** - * copy_properties_from_host - * - * copies properties listed in an array from host device tree to - * guest device tree. If a non optional property is not found, the - * function asserts. An optional property is ignored if not found - * in the host device tree. - * @props: array of HostProperty to copy - * @nb_props: number of properties in the array - * @host_dt: host device tree blob - * @guest_dt: guest device tree blob - * @node_path: host dt node path where the property is supposed to be - found - * @nodename: guest node name the properties should be added to - */ -static void copy_properties_from_host(HostProperty *props, int nb_props, - void *host_fdt, void *guest_fdt, - char *node_path, char *nodename) -{ - int i, prop_len; - const void *r; - Error *err = NULL; - - for (i = 0; i < nb_props; i++) { - r = qemu_fdt_getprop(host_fdt, node_path, - props[i].name, - &prop_len, - &err); - if (r) { - qemu_fdt_setprop(guest_fdt, nodename, - props[i].name, r, prop_len); - } else { - if (props[i].optional && prop_len == -FDT_ERR_NOTFOUND) { - /* optional property does not exist */ - error_free(err); - } else { - error_report_err(err); - } - if (!props[i].optional) { - /* mandatory property not found: bail out */ - exit(1); - } - err = NULL; - } - } -} - -/* clock properties whose values are copied/pasted from host */ -static HostProperty clock_copied_properties[] = { - {"compatible", false}, - {"#clock-cells", false}, - {"clock-frequency", true}, - {"clock-output-names", true}, -}; - -/** - * fdt_build_clock_node - * - * Build a guest clock node, used as a dependency from a passthrough'ed - * device. Most information are retrieved from the host clock node. - * Also check the host clock is a fixed one. - * - * @host_fdt: host device tree blob from which info are retrieved - * @guest_fdt: guest device tree blob where the clock node is added - * @host_phandle: phandle of the clock in host device tree - * @guest_phandle: phandle to assign to the guest node - */ -static void fdt_build_clock_node(void *host_fdt, void *guest_fdt, - uint32_t host_phandle, - uint32_t guest_phandle) -{ - char *node_path = NULL; - char *nodename; - const void *r; - int ret, node_offset, prop_len, path_len = 16; - - node_offset = fdt_node_offset_by_phandle(host_fdt, host_phandle); - if (node_offset <= 0) { - error_report("not able to locate clock handle %d in host device tree", - host_phandle); - exit(1); - } - node_path = g_malloc(path_len); - while ((ret = fdt_get_path(host_fdt, node_offset, node_path, path_len)) - == -FDT_ERR_NOSPACE) { - path_len += 16; - node_path = g_realloc(node_path, path_len); - } - if (ret < 0) { - error_report("not able to retrieve node path for clock handle %d", - host_phandle); - exit(1); - } - - r = qemu_fdt_getprop(host_fdt, node_path, "compatible", &prop_len, - &error_fatal); - if (strcmp(r, "fixed-clock")) { - error_report("clock handle %d is not a fixed clock", host_phandle); - exit(1); - } - - nodename = strrchr(node_path, '/'); - qemu_fdt_add_subnode(guest_fdt, nodename); - - copy_properties_from_host(clock_copied_properties, - ARRAY_SIZE(clock_copied_properties), - host_fdt, guest_fdt, - node_path, nodename); - - qemu_fdt_setprop_cell(guest_fdt, nodename, "phandle", guest_phandle); - - g_free(node_path); -} - -/** - * sysfs_to_dt_name: convert the name found in sysfs into the node name - * for instance e0900000.xgmac is converted into xgmac@e0900000 - * @sysfs_name: directory name in sysfs - * - * returns the device tree name upon success or NULL in case the sysfs name - * does not match the expected format - */ -static char *sysfs_to_dt_name(const char *sysfs_name) -{ - gchar **substrings = g_strsplit(sysfs_name, ".", 2); - char *dt_name = NULL; - - if (!substrings || !substrings[0] || !substrings[1]) { - goto out; - } - dt_name = g_strdup_printf("%s@%s", substrings[1], substrings[0]); -out: - g_strfreev(substrings); - return dt_name; -} - -/* Device Specific Code */ - -/** - * add_calxeda_midway_xgmac_fdt_node - * - * Generates a simple node with following properties: - * compatible string, regs, interrupts, dma-coherent - */ -static int add_calxeda_midway_xgmac_fdt_node(SysBusDevice *sbdev, void *opaque) -{ - PlatformBusFDTData *data = opaque; - PlatformBusDevice *pbus = data->pbus; - void *fdt = data->fdt; - const char *parent_node = data->pbus_node_name; - int compat_str_len, i; - char *nodename; - uint32_t *irq_attr, *reg_attr; - uint64_t mmio_base, irq_number; - VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); - VFIODevice *vbasedev = &vdev->vbasedev; - - mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); - nodename = g_strdup_printf("%s/%s@%" PRIx64, parent_node, - vbasedev->name, mmio_base); - qemu_fdt_add_subnode(fdt, nodename); - - compat_str_len = strlen(vdev->compat) + 1; - qemu_fdt_setprop(fdt, nodename, "compatible", - vdev->compat, compat_str_len); - - qemu_fdt_setprop(fdt, nodename, "dma-coherent", "", 0); - - reg_attr = g_new(uint32_t, vbasedev->num_regions * 2); - for (i = 0; i < vbasedev->num_regions; i++) { - mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, i); - reg_attr[2 * i] = cpu_to_be32(mmio_base); - reg_attr[2 * i + 1] = cpu_to_be32( - memory_region_size(vdev->regions[i]->mem)); - } - qemu_fdt_setprop(fdt, nodename, "reg", reg_attr, - vbasedev->num_regions * 2 * sizeof(uint32_t)); - - irq_attr = g_new(uint32_t, vbasedev->num_irqs * 3); - for (i = 0; i < vbasedev->num_irqs; i++) { - irq_number = platform_bus_get_irqn(pbus, sbdev , i) - + data->irq_start; - irq_attr[3 * i] = cpu_to_be32(GIC_FDT_IRQ_TYPE_SPI); - irq_attr[3 * i + 1] = cpu_to_be32(irq_number); - irq_attr[3 * i + 2] = cpu_to_be32(GIC_FDT_IRQ_FLAGS_LEVEL_HI); - } - qemu_fdt_setprop(fdt, nodename, "interrupts", - irq_attr, vbasedev->num_irqs * 3 * sizeof(uint32_t)); - g_free(irq_attr); - g_free(reg_attr); - g_free(nodename); - return 0; -} - -/* AMD xgbe properties whose values are copied/pasted from host */ -static HostProperty amd_xgbe_copied_properties[] = { - {"compatible", false}, - {"dma-coherent", true}, - {"amd,per-channel-interrupt", true}, - {"phy-mode", false}, - {"mac-address", true}, - {"amd,speed-set", false}, - {"amd,serdes-blwc", true}, - {"amd,serdes-cdr-rate", true}, - {"amd,serdes-pq-skew", true}, - {"amd,serdes-tx-amp", true}, - {"amd,serdes-dfe-tap-config", true}, - {"amd,serdes-dfe-tap-enable", true}, - {"clock-names", false}, -}; - -/** - * add_amd_xgbe_fdt_node - * - * Generates the combined xgbe/phy node following kernel >=4.2 - * binding documentation: - * Documentation/devicetree/bindings/net/amd-xgbe.txt: - * Also 2 clock nodes are created (dma and ptp) - * - * Asserts in case of error - */ -static int add_amd_xgbe_fdt_node(SysBusDevice *sbdev, void *opaque) -{ - PlatformBusFDTData *data = opaque; - PlatformBusDevice *pbus = data->pbus; - VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); - VFIODevice *vbasedev = &vdev->vbasedev; - VFIOINTp *intp; - const char *parent_node = data->pbus_node_name; - char **node_path, *nodename, *dt_name; - void *guest_fdt = data->fdt, *host_fdt; - const void *r; - int i, prop_len; - uint32_t *irq_attr, *reg_attr; - const uint32_t *host_clock_phandles; - uint64_t mmio_base, irq_number; - uint32_t guest_clock_phandles[2]; - - host_fdt = load_device_tree_from_sysfs(); - - dt_name = sysfs_to_dt_name(vbasedev->name); - if (!dt_name) { - error_report("%s incorrect sysfs device name %s", - __func__, vbasedev->name); - exit(1); - } - node_path = qemu_fdt_node_path(host_fdt, dt_name, vdev->compat, - &error_fatal); - if (!node_path || !node_path[0]) { - error_report("%s unable to retrieve node path for %s/%s", - __func__, dt_name, vdev->compat); - exit(1); - } - - if (node_path[1]) { - error_report("%s more than one node matching %s/%s!", - __func__, dt_name, vdev->compat); - exit(1); - } - - g_free(dt_name); - - if (vbasedev->num_regions != 5) { - error_report("%s Does the host dt node combine XGBE/PHY?", __func__); - exit(1); - } - - /* generate nodes for DMA_CLK and PTP_CLK */ - r = qemu_fdt_getprop(host_fdt, node_path[0], "clocks", - &prop_len, &error_fatal); - if (prop_len != 8) { - error_report("%s clocks property should contain 2 handles", __func__); - exit(1); - } - host_clock_phandles = r; - guest_clock_phandles[0] = qemu_fdt_alloc_phandle(guest_fdt); - guest_clock_phandles[1] = qemu_fdt_alloc_phandle(guest_fdt); - - /** - * clock handles fetched from host dt are in be32 layout whereas - * rest of the code uses cpu layout. Also guest clock handles are - * in cpu layout. - */ - fdt_build_clock_node(host_fdt, guest_fdt, - be32_to_cpu(host_clock_phandles[0]), - guest_clock_phandles[0]); - - fdt_build_clock_node(host_fdt, guest_fdt, - be32_to_cpu(host_clock_phandles[1]), - guest_clock_phandles[1]); - - /* combined XGBE/PHY node */ - mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); - nodename = g_strdup_printf("%s/%s@%" PRIx64, parent_node, - vbasedev->name, mmio_base); - qemu_fdt_add_subnode(guest_fdt, nodename); - - copy_properties_from_host(amd_xgbe_copied_properties, - ARRAY_SIZE(amd_xgbe_copied_properties), - host_fdt, guest_fdt, - node_path[0], nodename); - - qemu_fdt_setprop_cells(guest_fdt, nodename, "clocks", - guest_clock_phandles[0], - guest_clock_phandles[1]); - - reg_attr = g_new(uint32_t, vbasedev->num_regions * 2); - for (i = 0; i < vbasedev->num_regions; i++) { - mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, i); - reg_attr[2 * i] = cpu_to_be32(mmio_base); - reg_attr[2 * i + 1] = cpu_to_be32( - memory_region_size(vdev->regions[i]->mem)); - } - qemu_fdt_setprop(guest_fdt, nodename, "reg", reg_attr, - vbasedev->num_regions * 2 * sizeof(uint32_t)); - - irq_attr = g_new(uint32_t, vbasedev->num_irqs * 3); - for (i = 0; i < vbasedev->num_irqs; i++) { - irq_number = platform_bus_get_irqn(pbus, sbdev , i) - + data->irq_start; - irq_attr[3 * i] = cpu_to_be32(GIC_FDT_IRQ_TYPE_SPI); - irq_attr[3 * i + 1] = cpu_to_be32(irq_number); - /* - * General device interrupt and PCS auto-negotiation interrupts are - * level-sensitive while the 4 per-channel interrupts are edge - * sensitive - */ - QLIST_FOREACH(intp, &vdev->intp_list, next) { - if (intp->pin == i) { - break; - } - } - if (intp->flags & VFIO_IRQ_INFO_AUTOMASKED) { - irq_attr[3 * i + 2] = cpu_to_be32(GIC_FDT_IRQ_FLAGS_LEVEL_HI); - } else { - irq_attr[3 * i + 2] = cpu_to_be32(GIC_FDT_IRQ_FLAGS_EDGE_LO_HI); - } - } - qemu_fdt_setprop(guest_fdt, nodename, "interrupts", - irq_attr, vbasedev->num_irqs * 3 * sizeof(uint32_t)); - - g_free(host_fdt); - g_strfreev(node_path); - g_free(irq_attr); - g_free(reg_attr); - g_free(nodename); - return 0; -} - -/* DT compatible matching */ -static bool vfio_platform_match(SysBusDevice *sbdev, - const BindingEntry *entry) -{ - VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); - const char *compat; - unsigned int n; - - for (n = vdev->num_compat, compat = vdev->compat; n > 0; - n--, compat += strlen(compat) + 1) { - if (!strcmp(entry->compat, compat)) { - return true; - } - } - - return false; -} - -#define VFIO_PLATFORM_BINDING(compat, add_fn) \ - {TYPE_VFIO_PLATFORM, (compat), (add_fn), vfio_platform_match} - -#endif /* CONFIG_LINUX */ - #ifdef CONFIG_TPM /* * add_tpm_tis_fdt_node: Create a DT node for TPM TIS @@ -510,14 +133,11 @@ static bool type_match(SysBusDevice *sbdev, const BindingEntry *entry) /* list of supported dynamic sysbus bindings */ static const BindingEntry bindings[] = { -#ifdef CONFIG_LINUX - TYPE_BINDING(TYPE_VFIO_CALXEDA_XGMAC, add_calxeda_midway_xgmac_fdt_node), - TYPE_BINDING(TYPE_VFIO_AMD_XGBE, add_amd_xgbe_fdt_node), - VFIO_PLATFORM_BINDING("amd,xgbe-seattle-v1a", add_amd_xgbe_fdt_node), -#endif #ifdef CONFIG_TPM TYPE_BINDING(TYPE_TPM_TIS_SYSBUS, add_tpm_tis_fdt_node), #endif + /* No generic DT support for smmuv3 dev. Support added for arm virt only */ + TYPE_BINDING(TYPE_ARM_SMMUV3, no_fdt_node), TYPE_BINDING(TYPE_RAMFB_DEVICE, no_fdt_node), TYPE_BINDING(TYPE_UEFI_VARS_SYSBUS, add_uefi_vars_node), TYPE_BINDING("", NULL), /* last element */ diff --git a/hw/cxl/cxl-host.c b/hw/cxl/cxl-host.c index 5c2ce25a19cbc..0d891c651dfe8 100644 --- a/hw/cxl/cxl-host.c +++ b/hw/cxl/cxl-host.c @@ -72,6 +72,7 @@ static void cxl_fixed_memory_window_config(CXLFixedMemoryWindowOptions *object, static int cxl_fmws_link(Object *obj, void *opaque) { + Error **errp = opaque; struct CXLFixedWindow *fw; int i; @@ -87,9 +88,9 @@ static int cxl_fmws_link(Object *obj, void *opaque) o = object_resolve_path_type(fw->targets[i], TYPE_PXB_CXL_DEV, &ambig); if (!o) { - error_setg(&error_fatal, "Could not resolve CXLFM target %s", + error_setg(errp, "Could not resolve CXLFM target %s", fw->targets[i]); - return 1; + return -1; } fw->target_hbs[i] = PXB_CXL_DEV(o); } @@ -99,7 +100,7 @@ static int cxl_fmws_link(Object *obj, void *opaque) void cxl_fmws_link_targets(Error **errp) { /* Order doesn't matter for this, so no need to build list */ - object_child_foreach_recursive(object_get_root(), cxl_fmws_link, NULL); + object_child_foreach_recursive(object_get_root(), cxl_fmws_link, errp); } static bool cxl_hdm_find_target(uint32_t *cache_mem, hwaddr addr, diff --git a/hw/display/bcm2835_fb.c b/hw/display/bcm2835_fb.c index 820e67ac8bb49..1bb2ee45a0157 100644 --- a/hw/display/bcm2835_fb.c +++ b/hw/display/bcm2835_fb.c @@ -27,6 +27,7 @@ #include "hw/display/bcm2835_fb.h" #include "hw/hw.h" #include "hw/irq.h" +#include "ui/console.h" #include "framebuffer.h" #include "ui/pixel_ops.h" #include "hw/misc/bcm2835_mbox_defs.h" diff --git a/hw/display/framebuffer.c b/hw/display/framebuffer.c index 4485aa335bbce..b4296e8a33ec2 100644 --- a/hw/display/framebuffer.c +++ b/hw/display/framebuffer.c @@ -95,9 +95,9 @@ void framebuffer_update_display( } first = -1; - addr += i * src_width; - src += i * src_width; - dest += i * dest_row_pitch; + addr += (uint64_t)i * src_width; + src += (uint64_t)i * src_width; + dest += (uint64_t)i * dest_row_pitch; snap = memory_region_snapshot_and_clear_dirty(mem, addr, src_width * rows, DIRTY_MEMORY_VGA); diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c index eda6d3de37c3a..c6a9ac1da104f 100644 --- a/hw/display/qxl-render.c +++ b/hw/display/qxl-render.c @@ -222,6 +222,7 @@ static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl, uint32_t max_chunks = 32; size_t offset = 0; size_t bytes; + QXLPHYSICAL next_chunk_phys = 0; for (;;) { bytes = MIN(size - offset, chunk->data_size); @@ -230,7 +231,15 @@ static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl, if (offset == size) { return; } - chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id, + next_chunk_phys = chunk->next_chunk; + /* fist time, only get the next chunk's data size */ + chunk = qxl_phys2virt(qxl, next_chunk_phys, group_id, + sizeof(QXLDataChunk)); + if (!chunk) { + return; + } + /* second time, check data size and get data */ + chunk = qxl_phys2virt(qxl, next_chunk_phys, group_id, sizeof(QXLDataChunk) + chunk->data_size); if (!chunk) { return; diff --git a/hw/display/ramfb-standalone.c b/hw/display/ramfb-standalone.c index 08f2d5db4eca4..72b2071aed013 100644 --- a/hw/display/ramfb-standalone.c +++ b/hw/display/ramfb-standalone.c @@ -17,6 +17,7 @@ struct RAMFBStandaloneState { QemuConsole *con; RAMFBState *state; bool migrate; + bool use_legacy_x86_rom; }; static void display_update_wrapper(void *dev) @@ -39,7 +40,7 @@ static void ramfb_realizefn(DeviceState *dev, Error **errp) RAMFBStandaloneState *ramfb = RAMFB(dev); ramfb->con = graphic_console_init(dev, 0, &wrapper_ops, dev); - ramfb->state = ramfb_setup(errp); + ramfb->state = ramfb_setup(ramfb->use_legacy_x86_rom, errp); } static bool migrate_needed(void *opaque) @@ -62,6 +63,8 @@ static const VMStateDescription ramfb_dev_vmstate = { static const Property ramfb_properties[] = { DEFINE_PROP_BOOL("x-migrate", RAMFBStandaloneState, migrate, true), + DEFINE_PROP_BOOL("use-legacy-x86-rom", RAMFBStandaloneState, + use_legacy_x86_rom, false), }; static void ramfb_class_initfn(ObjectClass *klass, const void *data) diff --git a/hw/display/ramfb-stubs.c b/hw/display/ramfb-stubs.c index cf64733b10cda..b83551357bb3f 100644 --- a/hw/display/ramfb-stubs.c +++ b/hw/display/ramfb-stubs.c @@ -8,7 +8,7 @@ void ramfb_display_update(QemuConsole *con, RAMFBState *s) { } -RAMFBState *ramfb_setup(Error **errp) +RAMFBState *ramfb_setup(bool romfile, Error **errp) { error_setg(errp, "ramfb support not available"); return NULL; diff --git a/hw/display/ramfb.c b/hw/display/ramfb.c index 8c0f907673da4..9a17d97d076fb 100644 --- a/hw/display/ramfb.c +++ b/hw/display/ramfb.c @@ -135,7 +135,7 @@ const VMStateDescription ramfb_vmstate = { } }; -RAMFBState *ramfb_setup(Error **errp) +RAMFBState *ramfb_setup(bool romfile, Error **errp) { FWCfgState *fw_cfg = fw_cfg_find(); RAMFBState *s; @@ -147,7 +147,9 @@ RAMFBState *ramfb_setup(Error **errp) s = g_new0(RAMFBState, 1); - rom_add_vga("vgabios-ramfb.bin"); + if (romfile) { + rom_add_vga("vgabios-ramfb.bin"); + } fw_cfg_add_file_callback(fw_cfg, "etc/ramfb", NULL, ramfb_fw_cfg_write, s, &s->cfg, sizeof(s->cfg), false); diff --git a/hw/display/sm501.c b/hw/display/sm501.c index 6d2f18684c366..bc091b3c9fb1e 100644 --- a/hw/display/sm501.c +++ b/hw/display/sm501.c @@ -26,6 +26,7 @@ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "qemu/log.h" #include "qemu/module.h" #include "hw/usb/hcd-ohci.h" diff --git a/hw/display/trace-events b/hw/display/trace-events index 52786e6e1840b..e323a82cff24b 100644 --- a/hw/display/trace-events +++ b/hw/display/trace-events @@ -38,6 +38,8 @@ virtio_gpu_cmd_set_scanout_blob(uint32_t id, uint32_t res, uint32_t w, uint32_t virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h) "res 0x%x, fmt 0x%x, w %d, h %d" virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d" virtio_gpu_cmd_res_create_blob(uint32_t res, uint64_t size) "res 0x%x, size %" PRId64 +virtio_gpu_cmd_res_map_blob(uint32_t res, void *vmr, void *mr) "res 0x%x, vmr %p, mr %p" +virtio_gpu_cmd_res_unmap_blob(uint32_t res, void *mr, bool finish_unmapping) "res 0x%x, mr %p, finish_unmapping %d" virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x" virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x" virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x" diff --git a/hw/display/virtio-gpu-virgl.c b/hw/display/virtio-gpu-virgl.c index 94ddc01f91c6b..07f6355ad62e1 100644 --- a/hw/display/virtio-gpu-virgl.c +++ b/hw/display/virtio-gpu-virgl.c @@ -134,6 +134,8 @@ virtio_gpu_virgl_map_resource_blob(VirtIOGPU *g, res->mr = mr; + trace_virtio_gpu_cmd_res_map_blob(res->base.resource_id, vmr, mr); + return 0; } @@ -153,6 +155,8 @@ virtio_gpu_virgl_unmap_resource_blob(VirtIOGPU *g, vmr = to_hostmem_region(res->mr); + trace_virtio_gpu_cmd_res_unmap_blob(res->base.resource_id, mr, vmr->finish_unmapping); + /* * Perform async unmapping in 3 steps: * diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 0a1a625b0ea6c..3a555125be60a 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -242,6 +242,7 @@ static uint32_t calc_image_hostmem(pixman_format_code_t pformat, static void virtio_gpu_resource_create_2d(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { + Error *err = NULL; pixman_format_code_t pformat; struct virtio_gpu_simple_resource *res; struct virtio_gpu_resource_create_2d c2d; @@ -293,7 +294,8 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g, c2d.width, c2d.height, c2d.height ? res->hostmem / c2d.height : 0, - &error_warn)) { + &err)) { + warn_report_err(err); goto end; } } @@ -1246,7 +1248,8 @@ static int virtio_gpu_save(QEMUFile *f, void *opaque, size_t size, } qemu_put_be32(f, 0); /* end of list */ - return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL); + return vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL, + &error_fatal); } static bool virtio_gpu_load_restore_mapping(VirtIOGPU *g, @@ -1282,6 +1285,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, const VMStateField *field) { VirtIOGPU *g = opaque; + Error *err = NULL; struct virtio_gpu_simple_resource *res; uint32_t resource_id, pformat; int i; @@ -1317,7 +1321,8 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, res->width, res->height, res->height ? res->hostmem / res->height : 0, - &error_warn)) { + &err)) { + warn_report_err(err); g_free(res); return -EINVAL; } @@ -1343,7 +1348,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, } /* load & apply scanout state */ - vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); + vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1, &error_fatal); return 0; } diff --git a/hw/display/xenfb.c b/hw/display/xenfb.c index 22822fecea305..164fd0b248571 100644 --- a/hw/display/xenfb.c +++ b/hw/display/xenfb.c @@ -283,8 +283,7 @@ static void xenfb_mouse_event(DeviceState *dev, QemuConsole *src, scale = surface_height(surface) - 1; break; default: - scale = 0x8000; - break; + g_assert_not_reached(); } xenfb->axis[move->axis] = move->value * scale / 0x7fff; } diff --git a/hw/display/xlnx_dp.c b/hw/display/xlnx_dp.c index 7c980ee6423d0..ef73e1815fc18 100644 --- a/hw/display/xlnx_dp.c +++ b/hw/display/xlnx_dp.c @@ -1267,14 +1267,18 @@ static void xlnx_dp_init(Object *obj) s->aux_bus = aux_bus_init(DEVICE(obj), "aux"); /* - * Initialize DPCD and EDID.. + * Initialize DPCD and EDID. Once we have added the objects as + * child properties of this device, we can drop the reference we + * hold to them, leaving the child-property as the only reference. */ s->dpcd = DPCD(qdev_new("dpcd")); object_property_add_child(OBJECT(s), "dpcd", OBJECT(s->dpcd)); + object_unref(s->dpcd); s->edid = I2CDDC(qdev_new("i2c-ddc")); i2c_slave_set_address(I2C_SLAVE(s->edid), 0x50); object_property_add_child(OBJECT(s), "edid", OBJECT(s->edid)); + object_unref(s->edid); fifo8_create(&s->rx_fifo, 16); fifo8_create(&s->tx_fifo, 16); @@ -1311,8 +1315,8 @@ static void xlnx_dp_realize(DeviceState *dev, Error **errp) qdev_realize(DEVICE(s->dpcd), BUS(s->aux_bus), &error_fatal); aux_map_slave(AUX_SLAVE(s->dpcd), 0x0000); - qdev_realize_and_unref(DEVICE(s->edid), BUS(aux_get_i2c_bus(s->aux_bus)), - &error_fatal); + qdev_realize(DEVICE(s->edid), BUS(aux_get_i2c_bus(s->aux_bus)), + &error_fatal); s->console = graphic_console_init(dev, 0, &xlnx_dp_gfx_ops, s); surface = qemu_console_surface(s->console); diff --git a/hw/gpio/aspeed_gpio.c b/hw/gpio/aspeed_gpio.c index 609a556908f94..2d78bf9515c40 100644 --- a/hw/gpio/aspeed_gpio.c +++ b/hw/gpio/aspeed_gpio.c @@ -1308,6 +1308,57 @@ static void aspeed_gpio_2700_write(void *opaque, hwaddr offset, } /* Setup functions */ +static void aspeed_gpio_set_set(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + uint32_t set_val = 0; + AspeedGPIOState *s = ASPEED_GPIO(obj); + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + int set_idx = 0; + + if (!visit_type_uint32(v, name, &set_val, errp)) { + return; + } + + if (sscanf(name, "gpio-set[%d]", &set_idx) != 1) { + error_setg(errp, "%s: error reading %s", __func__, name); + return; + } + + if (set_idx >= agc->nr_gpio_sets || set_idx < 0) { + error_setg(errp, "%s: invalid set_idx %s", __func__, name); + return; + } + + aspeed_gpio_update(s, &s->sets[set_idx], set_val, + ~s->sets[set_idx].direction); +} + +static void aspeed_gpio_get_set(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + uint32_t set_val = 0; + AspeedGPIOState *s = ASPEED_GPIO(obj); + AspeedGPIOClass *agc = ASPEED_GPIO_GET_CLASS(s); + int set_idx = 0; + + if (sscanf(name, "gpio-set[%d]", &set_idx) != 1) { + error_setg(errp, "%s: error reading %s", __func__, name); + return; + } + + if (set_idx >= agc->nr_gpio_sets || set_idx < 0) { + error_setg(errp, "%s: invalid set_idx %s", __func__, name); + return; + } + + set_val = s->sets[set_idx].data_value; + visit_type_uint32(v, name, &set_val, errp); +} + +/****************** Setup functions ******************/ static const GPIOSetProperties ast2400_set_props[ASPEED_GPIO_MAX_NR_SETS] = { [0] = {0xffffffff, 0xffffffff, {"A", "B", "C", "D"} }, [1] = {0xffffffff, 0xffffffff, {"E", "F", "G", "H"} }, @@ -1435,6 +1486,12 @@ static void aspeed_gpio_init(Object *obj) g_free(name); } } + + for (int i = 0; i < agc->nr_gpio_sets; i++) { + char *name = g_strdup_printf("gpio-set[%d]", i); + object_property_add(obj, name, "uint32", aspeed_gpio_get_set, + aspeed_gpio_set_set, NULL, NULL); + } } static const VMStateDescription vmstate_gpio_regs = { diff --git a/hw/gpio/pca9554.c b/hw/gpio/pca9554.c index de3f883aee933..eac0d23be34a5 100644 --- a/hw/gpio/pca9554.c +++ b/hw/gpio/pca9554.c @@ -174,7 +174,7 @@ static void pca9554_set_pin(Object *obj, Visitor *v, const char *name, PCA9554State *s = PCA9554(obj); int pin, rc, val; uint8_t state, mask; - char *state_str; + g_autofree char *state_str = NULL; if (!visit_type_str(v, name, &state_str, errp)) { return; diff --git a/hw/gpio/zaurus.c b/hw/gpio/zaurus.c index b8d27f5973839..590ffde89d129 100644 --- a/hw/gpio/zaurus.c +++ b/hw/gpio/zaurus.c @@ -18,7 +18,6 @@ #include "qemu/osdep.h" #include "hw/irq.h" -#include "hw/arm/sharpsl.h" #include "hw/sysbus.h" #include "migration/vmstate.h" #include "qemu/module.h" @@ -265,44 +264,3 @@ static void scoop_register_types(void) } type_init(scoop_register_types) - -/* Write the bootloader parameters memory area. */ - -#define MAGIC_CHG(a, b, c, d) ((d << 24) | (c << 16) | (b << 8) | a) - -static struct QEMU_PACKED sl_param_info { - uint32_t comadj_keyword; - int32_t comadj; - - uint32_t uuid_keyword; - char uuid[16]; - - uint32_t touch_keyword; - int32_t touch_xp; - int32_t touch_yp; - int32_t touch_xd; - int32_t touch_yd; - - uint32_t adadj_keyword; - int32_t adadj; - - uint32_t phad_keyword; - int32_t phadadj; -} zaurus_bootparam = { - .comadj_keyword = MAGIC_CHG('C', 'M', 'A', 'D'), - .comadj = 125, - .uuid_keyword = MAGIC_CHG('U', 'U', 'I', 'D'), - .uuid = { -1 }, - .touch_keyword = MAGIC_CHG('T', 'U', 'C', 'H'), - .touch_xp = -1, - .adadj_keyword = MAGIC_CHG('B', 'V', 'A', 'D'), - .adadj = -1, - .phad_keyword = MAGIC_CHG('P', 'H', 'A', 'D'), - .phadadj = 0x01, -}; - -void sl_bootparam_write(hwaddr ptr) -{ - cpu_physical_memory_write(ptr, &zaurus_bootparam, - sizeof(struct sl_param_info)); -} diff --git a/hw/hppa/machine.c b/hw/hppa/machine.c index dacedc5409c6c..cddca69b93868 100644 --- a/hw/hppa/machine.c +++ b/hw/hppa/machine.c @@ -36,6 +36,13 @@ #include "net/net.h" #include "qemu/log.h" +#define TYPE_HPPA_COMMON_MACHINE MACHINE_TYPE_NAME("hppa-common") +OBJECT_DECLARE_SIMPLE_TYPE(HppaMachineState, HPPA_COMMON_MACHINE) + +struct HppaMachineState { + MachineState parent_obj; +}; + #define MIN_SEABIOS_HPPA_VERSION 12 /* require at least this fw version */ #define HPA_POWER_BUTTON (FIRMWARE_END - 0x10) @@ -345,16 +352,11 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, TranslateFn *translate) { const char *kernel_filename = machine->kernel_filename; - const char *kernel_cmdline = machine->kernel_cmdline; - const char *initrd_filename = machine->initrd_filename; - const char *firmware = machine->firmware; MachineClass *mc = MACHINE_GET_CLASS(machine); DeviceState *dev; PCIDevice *pci_dev; - char *firmware_filename; - uint64_t firmware_low, firmware_high; long size; - uint64_t kernel_entry = 0, kernel_low, kernel_high; + uint64_t kernel_entry = 0; MemoryRegion *addr_space = get_system_memory(); MemoryRegion *rom_region; SysBusDevice *s; @@ -424,6 +426,10 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, firmware on 64-bit machines by default if not specified on command line. */ if (!qtest_enabled()) { + const char *firmware = machine->firmware; + uint64_t firmware_low, firmware_high; + char *firmware_filename; + if (!firmware) { firmware = lasi_dev ? "hppa-firmware.img" : "hppa-firmware64.img"; } @@ -460,6 +466,10 @@ static void machine_HP_common_init_tail(MachineState *machine, PCIBus *pci_bus, /* Load kernel */ if (kernel_filename) { + const char *kernel_cmdline = machine->kernel_cmdline; + const char *initrd_filename = machine->initrd_filename; + uint64_t kernel_low, kernel_high; + size = load_elf(kernel_filename, NULL, linux_kernel_virt_to_phys, NULL, &kernel_entry, &kernel_low, &kernel_high, NULL, ELFDATA2MSB, EM_PARISC, 0, 0); @@ -683,6 +693,22 @@ static void hppa_nmi(NMIState *n, int cpu_index, Error **errp) } } +static void hppa_machine_common_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + NMIClass *nc = NMI_CLASS(oc); + + mc->reset = hppa_machine_reset; + mc->block_default_type = IF_SCSI; + mc->default_cpus = 1; + mc->max_cpus = HPPA_MAX_CPUS; + mc->default_boot_order = "cd"; + mc->default_ram_id = "ram"; + mc->default_nic = "tulip"; + + nc->nmi_monitor_handler = hppa_nmi; +} + static void HP_B160L_machine_init_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { @@ -690,35 +716,15 @@ static void HP_B160L_machine_init_class_init(ObjectClass *oc, const void *data) NULL }; MachineClass *mc = MACHINE_CLASS(oc); - NMIClass *nc = NMI_CLASS(oc); mc->desc = "HP B160L workstation"; mc->default_cpu_type = TYPE_HPPA_CPU; mc->valid_cpu_types = valid_cpu_types; mc->init = machine_HP_B160L_init; - mc->reset = hppa_machine_reset; - mc->block_default_type = IF_SCSI; - mc->max_cpus = HPPA_MAX_CPUS; - mc->default_cpus = 1; mc->is_default = true; mc->default_ram_size = 512 * MiB; - mc->default_boot_order = "cd"; - mc->default_ram_id = "ram"; - mc->default_nic = "tulip"; - - nc->nmi_monitor_handler = hppa_nmi; } -static const TypeInfo HP_B160L_machine_init_typeinfo = { - .name = MACHINE_TYPE_NAME("B160L"), - .parent = TYPE_MACHINE, - .class_init = HP_B160L_machine_init_class_init, - .interfaces = (const InterfaceInfo[]) { - { TYPE_NMI }, - { } - }, -}; - static void HP_C3700_machine_init_class_init(ObjectClass *oc, const void *data) { static const char * const valid_cpu_types[] = { @@ -726,39 +732,35 @@ static void HP_C3700_machine_init_class_init(ObjectClass *oc, const void *data) NULL }; MachineClass *mc = MACHINE_CLASS(oc); - NMIClass *nc = NMI_CLASS(oc); mc->desc = "HP C3700 workstation"; mc->default_cpu_type = TYPE_HPPA64_CPU; mc->valid_cpu_types = valid_cpu_types; mc->init = machine_HP_C3700_init; - mc->reset = hppa_machine_reset; - mc->block_default_type = IF_SCSI; mc->max_cpus = HPPA_MAX_CPUS; - mc->default_cpus = 1; - mc->is_default = false; mc->default_ram_size = 1024 * MiB; - mc->default_boot_order = "cd"; - mc->default_ram_id = "ram"; - mc->default_nic = "tulip"; - - nc->nmi_monitor_handler = hppa_nmi; } -static const TypeInfo HP_C3700_machine_init_typeinfo = { - .name = MACHINE_TYPE_NAME("C3700"), - .parent = TYPE_MACHINE, - .class_init = HP_C3700_machine_init_class_init, - .interfaces = (const InterfaceInfo[]) { - { TYPE_NMI }, - { } +static const TypeInfo hppa_machine_types[] = { + { + .name = TYPE_HPPA_COMMON_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(HppaMachineState), + .class_init = hppa_machine_common_class_init, + .abstract = true, + .interfaces = (const InterfaceInfo[]) { + { TYPE_NMI }, + { } + }, + }, { + .name = MACHINE_TYPE_NAME("B160L"), + .parent = TYPE_HPPA_COMMON_MACHINE, + .class_init = HP_B160L_machine_init_class_init, + }, { + .name = MACHINE_TYPE_NAME("C3700"), + .parent = TYPE_HPPA_COMMON_MACHINE, + .class_init = HP_C3700_machine_init_class_init, }, }; -static void hppa_machine_init_register_types(void) -{ - type_register_static(&HP_B160L_machine_init_typeinfo); - type_register_static(&HP_C3700_machine_init_typeinfo); -} - -type_init(hppa_machine_init_register_types) +DEFINE_TYPES(hppa_machine_types) diff --git a/hw/hyperv/hv-balloon-our_range_memslots.c b/hw/hyperv/hv-balloon-our_range_memslots.c index 1505a395cf7da..1fc95e1648021 100644 --- a/hw/hyperv/hv-balloon-our_range_memslots.c +++ b/hw/hyperv/hv-balloon-our_range_memslots.c @@ -8,6 +8,7 @@ */ #include "qemu/osdep.h" +#include "system/ramblock.h" #include "hv-balloon-internal.h" #include "hv-balloon-our_range_memslots.h" #include "trace.h" diff --git a/hw/hyperv/hv-balloon.c b/hw/hyperv/hv-balloon.c index 6dbcb2d9a29dc..2d6d7db4ee0e5 100644 --- a/hw/hyperv/hv-balloon.c +++ b/hw/hyperv/hv-balloon.c @@ -1475,16 +1475,6 @@ static void hv_balloon_ensure_mr(HvBalloon *balloon) balloon->mr->align = memory_region_get_alignment(hostmem_mr); } -static void hv_balloon_free_mr(HvBalloon *balloon) -{ - if (!balloon->mr) { - return; - } - - object_unparent(OBJECT(balloon->mr)); - g_clear_pointer(&balloon->mr, g_free); -} - static void hv_balloon_vmdev_realize(VMBusDevice *vdev, Error **errp) { ERRP_GUARD(); @@ -1580,7 +1570,7 @@ static void hv_balloon_vmdev_reset(VMBusDevice *vdev) */ static void hv_balloon_unrealize_finalize_common(HvBalloon *balloon) { - hv_balloon_free_mr(balloon); + g_clear_pointer(&balloon->mr, g_free); balloon->addr = 0; balloon->memslot_count = 0; diff --git a/hw/hyperv/syndbg.c b/hw/hyperv/syndbg.c index ac7e15f6f1d5e..bcdfdf6af75d3 100644 --- a/hw/hyperv/syndbg.c +++ b/hw/hyperv/syndbg.c @@ -338,7 +338,9 @@ static void hv_syndbg_realize(DeviceState *dev, Error **errp) return; } - qemu_socket_set_nonblock(syndbg->socket); + if (!qemu_set_blocking(syndbg->socket, false, errp)) { + return; + } syndbg->servaddr.sin_port = htons(syndbg->host_port); syndbg->servaddr.sin_family = AF_INET; diff --git a/hw/i2c/smbus_eeprom.c b/hw/i2c/smbus_eeprom.c index 0a1088fbb0ab3..26e211b31ad58 100644 --- a/hw/i2c/smbus_eeprom.c +++ b/hw/i2c/smbus_eeprom.c @@ -288,6 +288,7 @@ uint8_t *spd_data_generate(enum sdram_type type, ram_addr_t ram_size) spd[33] = 8; /* addr/cmd hold time */ spd[34] = 20; /* data input setup time */ spd[35] = 8; /* data input hold time */ + spd[36] = (type == DDR2 ? 13 << 2 : 0); /* min. write recovery time */ /* checksum */ for (i = 0; i < 63; i++) { diff --git a/hw/i386/Kconfig b/hw/i386/Kconfig index 14d23e27b580b..6a0ab54bea4ab 100644 --- a/hw/i386/Kconfig +++ b/hw/i386/Kconfig @@ -4,7 +4,7 @@ config X86_FW_OVMF config SEV bool select X86_FW_OVMF - depends on KVM + depends on KVM && X86_64 config SGX bool @@ -96,9 +96,6 @@ config ISAPC select ISA_BUS select PC select IDE_ISA - # FIXME: it is in the same file as i440fx, and does not compile - # if separated - depends on I440FX config Q35 bool @@ -131,6 +128,7 @@ config MICROVM select I8259 select MC146818RTC select VIRTIO_MMIO + select ACPI_PCI select ACPI_HW_REDUCED select PCI_EXPRESS_GENERIC_BRIDGE select USB_XHCI_SYSBUS diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index 423c4959fe809..9446a9f862ca4 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -1863,7 +1863,11 @@ build_amd_iommu(GArray *table_data, BIOSLinker *linker, const char *oem_id, /* IOMMU info */ build_append_int_noprefix(table_data, 0, 2); /* IOMMU Attributes */ - build_append_int_noprefix(table_data, 0, 4); + if (!s->iommu.dma_translation) { + build_append_int_noprefix(table_data, (1UL << 0) /* HATDis */, 4); + } else { + build_append_int_noprefix(table_data, 0, 4); + } /* EFR Register Image */ build_append_int_noprefix(table_data, amdvi_extended_feature_register(s), diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index 5a24c17548d45..378e0cb55eab6 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -33,6 +33,7 @@ #include "hw/i386/apic-msidef.h" #include "hw/qdev-properties.h" #include "kvm/kvm_i386.h" +#include "qemu/iova-tree.h" /* used AMD-Vi MMIO registers */ const char *amdvi_mmio_low[] = { @@ -66,6 +67,15 @@ struct AMDVIAddressSpace { MemoryRegion iommu_nodma; /* Alias of shared nodma memory region */ MemoryRegion iommu_ir; /* Device's interrupt remapping region */ AddressSpace as; /* device's corresponding address space */ + + /* DMA address translation support */ + IOMMUNotifierFlag notifier_flags; + /* entry in list of Address spaces with registered notifiers */ + QLIST_ENTRY(AMDVIAddressSpace) next; + /* Record DMA translation ranges */ + IOVATree *iova_tree; + /* DMA address translation active */ + bool addr_translation; }; /* AMDVI cache entry */ @@ -77,12 +87,29 @@ typedef struct AMDVIIOTLBEntry { uint64_t page_mask; /* physical page size */ } AMDVIIOTLBEntry; +/* + * These 'fault' reasons have an overloaded meaning since they are not only + * intended for describing reasons that generate an IO_PAGE_FAULT as per the AMD + * IOMMU specification, but are also used to signal internal errors in the + * emulation code. + */ +typedef enum AMDVIFaultReason { + AMDVI_FR_DTE_RTR_ERR = 1, /* Failure to retrieve DTE */ + AMDVI_FR_DTE_V, /* DTE[V] = 0 */ + AMDVI_FR_DTE_TV, /* DTE[TV] = 0 */ + AMDVI_FR_PT_ROOT_INV, /* Page Table Root ptr invalid */ + AMDVI_FR_PT_ENTRY_INV, /* Failure to read PTE from guest memory */ +} AMDVIFaultReason; + uint64_t amdvi_extended_feature_register(AMDVIState *s) { uint64_t feature = AMDVI_DEFAULT_EXT_FEATURES; if (s->xtsup) { feature |= AMDVI_FEATURE_XT; } + if (!s->iommu.dma_translation) { + feature |= AMDVI_HATS_MODE_RESERVED; + } return feature; } @@ -123,8 +150,13 @@ static void amdvi_writew(AMDVIState *s, hwaddr addr, uint16_t val) uint16_t romask = lduw_le_p(&s->romask[addr]); uint16_t w1cmask = lduw_le_p(&s->w1cmask[addr]); uint16_t oldval = lduw_le_p(&s->mmior[addr]); + + uint16_t oldval_preserved = oldval & (romask | w1cmask); + uint16_t newval_write = val & ~romask; + uint16_t newval_w1c_set = val & w1cmask; + stw_le_p(&s->mmior[addr], - ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); + (oldval_preserved | newval_write) & ~newval_w1c_set); } static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val) @@ -132,8 +164,13 @@ static void amdvi_writel(AMDVIState *s, hwaddr addr, uint32_t val) uint32_t romask = ldl_le_p(&s->romask[addr]); uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); uint32_t oldval = ldl_le_p(&s->mmior[addr]); + + uint32_t oldval_preserved = oldval & (romask | w1cmask); + uint32_t newval_write = val & ~romask; + uint32_t newval_w1c_set = val & w1cmask; + stl_le_p(&s->mmior[addr], - ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); + (oldval_preserved | newval_write) & ~newval_w1c_set); } static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val) @@ -141,14 +178,19 @@ static void amdvi_writeq(AMDVIState *s, hwaddr addr, uint64_t val) uint64_t romask = ldq_le_p(&s->romask[addr]); uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); uint64_t oldval = ldq_le_p(&s->mmior[addr]); + + uint64_t oldval_preserved = oldval & (romask | w1cmask); + uint64_t newval_write = val & ~romask; + uint64_t newval_w1c_set = val & w1cmask; + stq_le_p(&s->mmior[addr], - ((oldval & romask) | (val & ~romask)) & ~(val & w1cmask)); + (oldval_preserved | newval_write) & ~newval_w1c_set); } -/* OR a 64-bit register with a 64-bit value */ +/* AND a 64-bit register with a 64-bit value */ static bool amdvi_test_mask(AMDVIState *s, hwaddr addr, uint64_t val) { - return amdvi_readq(s, addr) | val; + return amdvi_readq(s, addr) & val; } /* OR a 64-bit register with a 64-bit value storing result in the register */ @@ -177,19 +219,31 @@ static void amdvi_generate_msi_interrupt(AMDVIState *s) } } +static uint32_t get_next_eventlog_entry(AMDVIState *s) +{ + uint32_t evtlog_size = s->evtlog_len * AMDVI_EVENT_LEN; + return (s->evtlog_tail + AMDVI_EVENT_LEN) % evtlog_size; +} + static void amdvi_log_event(AMDVIState *s, uint64_t *evt) { + uint32_t evtlog_tail_next; + /* event logging not enabled */ if (!s->evtlog_enabled || amdvi_test_mask(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF)) { return; } + evtlog_tail_next = get_next_eventlog_entry(s); + /* event log buffer full */ - if (s->evtlog_tail >= s->evtlog_len) { - amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF); - /* generate interrupt */ - amdvi_generate_msi_interrupt(s); + if (evtlog_tail_next == s->evtlog_head) { + /* generate overflow interrupt */ + if (s->evtlog_intr) { + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVT_OVF); + amdvi_generate_msi_interrupt(s); + } return; } @@ -198,9 +252,13 @@ static void amdvi_log_event(AMDVIState *s, uint64_t *evt) trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail); } - s->evtlog_tail += AMDVI_EVENT_LEN; - amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); - amdvi_generate_msi_interrupt(s); + s->evtlog_tail = evtlog_tail_next; + amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_TAIL, s->evtlog_tail); + + if (s->evtlog_intr) { + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_EVENT_INT); + amdvi_generate_msi_interrupt(s); + } } static void amdvi_setevent_bits(uint64_t *buffer, uint64_t value, int start, @@ -355,56 +413,732 @@ static void amdvi_iotlb_remove_page(AMDVIState *s, hwaddr addr, g_hash_table_remove(s->iotlb, &key); } -static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid, - uint64_t gpa, IOMMUTLBEntry to_cache, - uint16_t domid) +static void amdvi_update_iotlb(AMDVIState *s, uint16_t devid, + uint64_t gpa, IOMMUTLBEntry to_cache, + uint16_t domid) +{ + /* don't cache erroneous translations */ + if (to_cache.perm != IOMMU_NONE) { + AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1); + uint64_t *key = g_new(uint64_t, 1); + uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K; + + trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid), + PCI_FUNC(devid), gpa, to_cache.translated_addr); + + if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) { + amdvi_iotlb_reset(s); + } + + entry->domid = domid; + entry->perms = to_cache.perm; + entry->translated_addr = to_cache.translated_addr; + entry->page_mask = to_cache.addr_mask; + *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); + g_hash_table_replace(s->iotlb, key, entry); + } +} + +static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd) +{ + /* pad the last 3 bits */ + hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3; + uint64_t data = cpu_to_le64(cmd[1]); + + if (extract64(cmd[0], 52, 8)) { + amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), + s->cmdbuf + s->cmdbuf_head); + } + if (extract64(cmd[0], 0, 1)) { + if (dma_memory_write(&address_space_memory, addr, &data, + AMDVI_COMPLETION_DATA_SIZE, + MEMTXATTRS_UNSPECIFIED)) { + trace_amdvi_completion_wait_fail(addr); + } + } + /* set completion interrupt */ + if (extract64(cmd[0], 1, 1)) { + amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); + /* generate interrupt */ + amdvi_generate_msi_interrupt(s); + } + trace_amdvi_completion_wait(addr, data); +} + +static inline uint64_t amdvi_get_perms(uint64_t entry) +{ + return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >> + AMDVI_DEV_PERM_SHIFT; +} + +/* validate that reserved bits are honoured */ +static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid, + uint64_t *dte) +{ + + uint64_t root; + + if ((dte[0] & AMDVI_DTE_QUAD0_RESERVED) || + (dte[1] & AMDVI_DTE_QUAD1_RESERVED) || + (dte[2] & AMDVI_DTE_QUAD2_RESERVED) || + (dte[3] & AMDVI_DTE_QUAD3_RESERVED)) { + amdvi_log_illegaldevtab_error(s, devid, + s->devtab + + devid * AMDVI_DEVTAB_ENTRY_SIZE, 0); + return false; + } + + /* + * 1 = Host Address Translation is not supported. Value in MMIO Offset + * 0030h[HATS] is not meaningful. A non-zero host page table root pointer + * in the DTE would result in an ILLEGAL_DEV_TABLE_ENTRY event. + */ + root = (dte[0] & AMDVI_DEV_PT_ROOT_MASK) >> 12; + if (root && !s->iommu.dma_translation) { + amdvi_log_illegaldevtab_error(s, devid, + s->devtab + + devid * AMDVI_DEVTAB_ENTRY_SIZE, 0); + return false; + } + + return true; +} + +/* get a device table entry given the devid */ +static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry) +{ + uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE; + + if (dma_memory_read(&address_space_memory, s->devtab + offset, entry, + AMDVI_DEVTAB_ENTRY_SIZE, MEMTXATTRS_UNSPECIFIED)) { + trace_amdvi_dte_get_fail(s->devtab, offset); + /* log error accessing dte */ + amdvi_log_devtab_error(s, devid, s->devtab + offset, 0); + return false; + } + + *entry = le64_to_cpu(*entry); + if (!amdvi_validate_dte(s, devid, entry)) { + trace_amdvi_invalid_dte(entry[0]); + return false; + } + + return true; +} + +/* get pte translation mode */ +static inline uint8_t get_pte_translation_mode(uint64_t pte) +{ + return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK; +} + +static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr, + uint16_t devid) +{ + uint64_t pte; + + if (dma_memory_read(&address_space_memory, pte_addr, + &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED)) { + trace_amdvi_get_pte_hwerror(pte_addr); + amdvi_log_pagetab_error(s, devid, pte_addr, 0); + pte = (uint64_t)-1; + return pte; + } + + pte = le64_to_cpu(pte); + return pte; +} + +static int amdvi_as_to_dte(AMDVIAddressSpace *as, uint64_t *dte) +{ + uint16_t devid = PCI_BUILD_BDF(as->bus_num, as->devfn); + AMDVIState *s = as->iommu_state; + + if (!amdvi_get_dte(s, devid, dte)) { + /* Unable to retrieve DTE for devid */ + return -AMDVI_FR_DTE_RTR_ERR; + } + + if (!(dte[0] & AMDVI_DEV_VALID)) { + /* DTE[V] not set, address is passed untranslated for devid */ + return -AMDVI_FR_DTE_V; + } + + if (!(dte[0] & AMDVI_DEV_TRANSLATION_VALID)) { + /* DTE[TV] not set, host page table not valid for devid */ + return -AMDVI_FR_DTE_TV; + } + return 0; +} + +/* + * For a PTE encoding a large page, return the page size it encodes as described + * by the AMD IOMMU Specification Table 14: Example Page Size Encodings. + * No need to adjust the value of the PTE to point to the first PTE in the large + * page since the encoding guarantees all "base" PTEs in the large page are the + * same. + */ +static uint64_t large_pte_page_size(uint64_t pte) +{ + assert(PTE_NEXT_LEVEL(pte) == 7); + + /* Determine size of the large/contiguous page encoded in the PTE */ + return PTE_LARGE_PAGE_SIZE(pte); +} + +/* + * Helper function to fetch a PTE using AMD v1 pgtable format. + * On successful page walk, returns 0 and pte parameter points to a valid PTE. + * On failure, returns: + * -AMDVI_FR_PT_ROOT_INV: A page walk is not possible due to conditions like DTE + * with invalid permissions, Page Table Root can not be read from DTE, or a + * larger IOVA than supported by page table level encoded in DTE[Mode]. + * -AMDVI_FR_PT_ENTRY_INV: A PTE could not be read from guest memory during a + * page table walk. This means that the DTE has valid data, but one of the + * lower level entries in the Page Table could not be read. + */ +static uint64_t fetch_pte(AMDVIAddressSpace *as, hwaddr address, uint64_t dte, + uint64_t *pte, hwaddr *page_size) +{ + IOMMUAccessFlags perms = amdvi_get_perms(dte); + + uint8_t level, mode; + uint64_t pte_addr; + + *pte = dte; + *page_size = 0; + + if (perms == IOMMU_NONE) { + return -AMDVI_FR_PT_ROOT_INV; + } + + /* + * The Linux kernel driver initializes the default mode to 3, corresponding + * to a 39-bit GPA space, where each entry in the pagetable translates to a + * 1GB (2^30) page size. + */ + level = mode = get_pte_translation_mode(dte); + assert(mode > 0 && mode < 7); + + /* + * If IOVA is larger than the max supported by the current pgtable level, + * there is nothing to do. + */ + if (address > PT_LEVEL_MAX_ADDR(mode - 1)) { + /* IOVA too large for the current DTE */ + return -AMDVI_FR_PT_ROOT_INV; + } + + do { + level -= 1; + + /* Update the page_size */ + *page_size = PTE_LEVEL_PAGE_SIZE(level); + + /* Permission bits are ANDed at every level, including the DTE */ + perms &= amdvi_get_perms(*pte); + if (perms == IOMMU_NONE) { + return 0; + } + + /* Not Present */ + if (!IOMMU_PTE_PRESENT(*pte)) { + return 0; + } + + /* Large or Leaf PTE found */ + if (PTE_NEXT_LEVEL(*pte) == 7 || PTE_NEXT_LEVEL(*pte) == 0) { + /* Leaf PTE found */ + break; + } + + /* + * Index the pgtable using the IOVA bits corresponding to current level + * and walk down to the lower level. + */ + pte_addr = NEXT_PTE_ADDR(*pte, level, address); + *pte = amdvi_get_pte_entry(as->iommu_state, pte_addr, as->devfn); + + if (*pte == (uint64_t)-1) { + /* + * A returned PTE of -1 indicates a failure to read the page table + * entry from guest memory. + */ + if (level == mode - 1) { + /* Failure to retrieve the Page Table from Root Pointer */ + *page_size = 0; + return -AMDVI_FR_PT_ROOT_INV; + } else { + /* Failure to read PTE. Page walk skips a page_size chunk */ + return -AMDVI_FR_PT_ENTRY_INV; + } + } + } while (level > 0); + + assert(PTE_NEXT_LEVEL(*pte) == 0 || PTE_NEXT_LEVEL(*pte) == 7 || + level == 0); + /* + * Page walk ends when Next Level field on PTE shows that either a leaf PTE + * or a series of large PTEs have been reached. In the latter case, even if + * the range starts in the middle of a contiguous page, the returned PTE + * must be the first PTE of the series. + */ + if (PTE_NEXT_LEVEL(*pte) == 7) { + /* Update page_size with the large PTE page size */ + *page_size = large_pte_page_size(*pte); + } + + return 0; +} + +/* + * Invoke notifiers registered for the address space. Update record of mapped + * ranges in IOVA Tree. + */ +static void amdvi_notify_iommu(AMDVIAddressSpace *as, IOMMUTLBEvent *event) +{ + IOMMUTLBEntry *entry = &event->entry; + + DMAMap target = { + .iova = entry->iova, + .size = entry->addr_mask, + .translated_addr = entry->translated_addr, + .perm = entry->perm, + }; + + /* + * Search the IOVA Tree for an existing translation for the target, and skip + * the notification if the mapping is already recorded. + * When the guest uses large pages, comparing against the record makes it + * possible to determine the size of the original MAP and adjust the UNMAP + * request to match it. This avoids failed checks against the mappings kept + * by the VFIO kernel driver. + */ + const DMAMap *mapped = iova_tree_find(as->iova_tree, &target); + + if (event->type == IOMMU_NOTIFIER_UNMAP) { + if (!mapped) { + /* No record exists of this mapping, nothing to do */ + return; + } + /* + * Adjust the size based on the original record. This is essential to + * determine when large/contiguous pages are used, since the guest has + * already cleared the PTE (erasing the pagesize encoded on it) before + * issuing the invalidation command. + */ + if (mapped->size != target.size) { + assert(mapped->size > target.size); + target.size = mapped->size; + /* Adjust event to invoke notifier with correct range */ + entry->addr_mask = mapped->size; + } + iova_tree_remove(as->iova_tree, target); + } else { /* IOMMU_NOTIFIER_MAP */ + if (mapped) { + /* + * If a mapping is present and matches the request, skip the + * notification. + */ + if (!memcmp(mapped, &target, sizeof(DMAMap))) { + return; + } else { + /* + * This should never happen unless a buggy guest OS omits or + * sends incorrect invalidation(s). Report an error in the event + * it does happen. + */ + error_report("Found conflicting translation. This could be due " + "to an incorrect or missing invalidation command"); + } + } + /* Record the new mapping */ + iova_tree_insert(as->iova_tree, &target); + } + + /* Invoke the notifiers registered for this address space */ + memory_region_notify_iommu(&as->iommu, 0, *event); +} + +/* + * Walk the guest page table for an IOVA and range and signal the registered + * notifiers to sync the shadow page tables in the host. + * Must be called with a valid DTE for DMA remapping i.e. V=1,TV=1 + */ +static void amdvi_sync_shadow_page_table_range(AMDVIAddressSpace *as, + uint64_t *dte, hwaddr addr, + uint64_t size, bool send_unmap) +{ + IOMMUTLBEvent event; + + hwaddr page_mask, pagesize; + hwaddr iova = addr; + hwaddr end = iova + size - 1; + + uint64_t pte; + int ret; + + while (iova < end) { + + ret = fetch_pte(as, iova, dte[0], &pte, &pagesize); + + if (ret == -AMDVI_FR_PT_ROOT_INV) { + /* + * Invalid conditions such as the IOVA being larger than supported + * by current page table mode as configured in the DTE, or a failure + * to fetch the Page Table from the Page Table Root Pointer in DTE. + */ + assert(pagesize == 0); + return; + } + /* PTE has been validated for major errors and pagesize is set */ + assert(pagesize); + page_mask = ~(pagesize - 1); + + if (ret == -AMDVI_FR_PT_ENTRY_INV) { + /* + * Failure to read PTE from memory, the pagesize matches the current + * level. Unable to determine the region type, so a safe strategy is + * to skip the range and continue the page walk. + */ + goto next; + } + + event.entry.target_as = &address_space_memory; + event.entry.iova = iova & page_mask; + /* translated_addr is irrelevant for the unmap case */ + event.entry.translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & + page_mask; + event.entry.addr_mask = ~page_mask; + event.entry.perm = amdvi_get_perms(pte); + + /* + * In cases where the leaf PTE is not found, or it has invalid + * permissions, an UNMAP type notification is sent, but only if the + * caller requested it. + */ + if (!IOMMU_PTE_PRESENT(pte) || (event.entry.perm == IOMMU_NONE)) { + if (!send_unmap) { + goto next; + } + event.type = IOMMU_NOTIFIER_UNMAP; + } else { + event.type = IOMMU_NOTIFIER_MAP; + } + + /* + * The following call might need to adjust event.entry.size in cases + * where the guest unmapped a series of large pages. + */ + amdvi_notify_iommu(as, &event); + /* + * In the special scenario where the guest is unmapping a large page, + * addr_mask has been adjusted before sending the notification. Update + * pagesize accordingly in order to correctly compute the next IOVA. + */ + pagesize = event.entry.addr_mask + 1; + +next: + iova &= ~(pagesize - 1); + + /* Check for 64-bit overflow and terminate walk in such cases */ + if ((iova + pagesize) < iova) { + break; + } else { + iova += pagesize; + } + } +} + +/* + * Unmap entire range that the notifier registered for i.e. the full AS. + * + * This is seemingly technically equivalent to directly calling + * memory_region_unmap_iommu_notifier_range(), but it allows to check for + * notifier boundaries and issue notifications with ranges within those bounds. + */ +static void amdvi_address_space_unmap(AMDVIAddressSpace *as, IOMMUNotifier *n) +{ + + hwaddr start = n->start; + hwaddr end = n->end; + hwaddr remain; + DMAMap map; + + assert(start <= end); + remain = end - start + 1; + + /* + * Divide the notifier range into chunks that are aligned and do not exceed + * the notifier boundaries. + */ + while (remain >= AMDVI_PAGE_SIZE) { + + IOMMUTLBEvent event; + + uint64_t mask = dma_aligned_pow2_mask(start, end, 64); + + event.type = IOMMU_NOTIFIER_UNMAP; + + IOMMUTLBEntry entry = { + .target_as = &address_space_memory, + .iova = start, + .translated_addr = 0, /* irrelevant for unmap case */ + .addr_mask = mask, + .perm = IOMMU_NONE, + }; + event.entry = entry; + + /* Call notifier registered for updates on this address space */ + memory_region_notify_iommu_one(n, &event); + + start += mask + 1; + remain -= mask + 1; + } + + assert(!remain); + + map.iova = n->start; + map.size = n->end - n->start; + + iova_tree_remove(as->iova_tree, map); +} + +/* + * For all the address spaces with notifiers registered, unmap the entire range + * the notifier registered for i.e. clear all the address spaces managed by the + * IOMMU. + */ +static void amdvi_address_space_unmap_all(AMDVIState *s) +{ + AMDVIAddressSpace *as; + IOMMUNotifier *n; + + QLIST_FOREACH(as, &s->amdvi_as_with_notifiers, next) { + IOMMU_NOTIFIER_FOREACH(n, &as->iommu) { + amdvi_address_space_unmap(as, n); + } + } +} + +/* + * For every translation present in the IOMMU, construct IOMMUTLBEntry data + * and pass it as parameter to notifier callback. + */ +static void amdvi_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) +{ + AMDVIAddressSpace *as = container_of(iommu_mr, AMDVIAddressSpace, iommu); + uint64_t dte[4] = { 0 }; + + if (!(n->notifier_flags & IOMMU_NOTIFIER_MAP)) { + return; + } + + if (amdvi_as_to_dte(as, dte)) { + return; + } + + /* Dropping all mappings for the address space. Also clears the IOVA tree */ + amdvi_address_space_unmap(as, n); + + amdvi_sync_shadow_page_table_range(as, &dte[0], 0, UINT64_MAX, false); +} + +static void amdvi_address_space_sync(AMDVIAddressSpace *as) +{ + IOMMUNotifier *n; + uint64_t dte[4] = { 0 }; + + /* If only UNMAP notifiers are registered, drop all existing mappings */ + if (!(as->notifier_flags & IOMMU_NOTIFIER_MAP)) { + IOMMU_NOTIFIER_FOREACH(n, &as->iommu) { + /* + * Directly calling memory_region_unmap_iommu_notifier_range() does + * not guarantee that the addr_mask eventually passed as parameter + * to the notifier is valid. Use amdvi_address_space_unmap() which + * ensures the notifier range is divided into properly aligned + * regions, and issues notifications for each one. + */ + amdvi_address_space_unmap(as, n); + } + return; + } + + if (amdvi_as_to_dte(as, dte)) { + return; + } + + amdvi_sync_shadow_page_table_range(as, &dte[0], 0, UINT64_MAX, true); +} + +/* + * This differs from the replay() method in that it issues both MAP and UNMAP + * notifications since it is called after global invalidation events in order to + * re-sync all address spaces. + */ +static void amdvi_iommu_address_space_sync_all(AMDVIState *s) +{ + AMDVIAddressSpace *as; + + QLIST_FOREACH(as, &s->amdvi_as_with_notifiers, next) { + amdvi_address_space_sync(as); + } +} + +/* + * Toggle between address translation and passthrough modes by enabling the + * corresponding memory regions. + */ +static void amdvi_switch_address_space(AMDVIAddressSpace *amdvi_as) +{ + AMDVIState *s = amdvi_as->iommu_state; + + if (s->dma_remap && amdvi_as->addr_translation) { + /* Enabling DMA region */ + memory_region_set_enabled(&amdvi_as->iommu_nodma, false); + memory_region_set_enabled(MEMORY_REGION(&amdvi_as->iommu), true); + } else { + /* Disabling DMA region, using passthrough */ + memory_region_set_enabled(MEMORY_REGION(&amdvi_as->iommu), false); + memory_region_set_enabled(&amdvi_as->iommu_nodma, true); + } +} + +/* + * For all existing address spaces managed by the IOMMU, enable/disable the + * corresponding memory regions to reset the address translation mode and + * use passthrough by default. + */ +static void amdvi_reset_address_translation_all(AMDVIState *s) +{ + AMDVIAddressSpace **iommu_as; + + for (int bus_num = 0; bus_num < PCI_BUS_MAX; bus_num++) { + + /* Nothing to do if there are no devices on the current bus */ + if (!s->address_spaces[bus_num]) { + continue; + } + iommu_as = s->address_spaces[bus_num]; + + for (int devfn = 0; devfn < PCI_DEVFN_MAX; devfn++) { + + if (!iommu_as[devfn]) { + continue; + } + /* Use passthrough as default mode after reset */ + iommu_as[devfn]->addr_translation = false; + amdvi_switch_address_space(iommu_as[devfn]); + } + } +} + +static void enable_dma_mode(AMDVIAddressSpace *as, bool inval_current) { - /* don't cache erroneous translations */ - if (to_cache.perm != IOMMU_NONE) { - AMDVIIOTLBEntry *entry = g_new(AMDVIIOTLBEntry, 1); - uint64_t *key = g_new(uint64_t, 1); - uint64_t gfn = gpa >> AMDVI_PAGE_SHIFT_4K; + /* + * When enabling DMA mode for the purpose of isolating guest devices on + * a failure to retrieve or invalid DTE, all existing mappings must be + * dropped. + */ + if (inval_current) { + IOMMUNotifier *n; + IOMMU_NOTIFIER_FOREACH(n, &as->iommu) { + amdvi_address_space_unmap(as, n); + } + } - trace_amdvi_cache_update(domid, PCI_BUS_NUM(devid), PCI_SLOT(devid), - PCI_FUNC(devid), gpa, to_cache.translated_addr); + if (as->addr_translation) { + return; + } - if (g_hash_table_size(s->iotlb) >= AMDVI_IOTLB_MAX_SIZE) { - amdvi_iotlb_reset(s); - } + /* Installing DTE enabling translation, activate region */ + as->addr_translation = true; + amdvi_switch_address_space(as); + /* Sync shadow page tables */ + amdvi_address_space_sync(as); +} - entry->domid = domid; - entry->perms = to_cache.perm; - entry->translated_addr = to_cache.translated_addr; - entry->page_mask = to_cache.addr_mask; - *key = gfn | ((uint64_t)(devid) << AMDVI_DEVID_SHIFT); - g_hash_table_replace(s->iotlb, key, entry); +/* + * If paging was previously in use in the address space + * - invalidate all existing mappings + * - switch to no_dma memory region + */ +static void enable_nodma_mode(AMDVIAddressSpace *as) +{ + IOMMUNotifier *n; + + if (!as->addr_translation) { + /* passthrough is already active, nothing to do */ + return; } + + as->addr_translation = false; + IOMMU_NOTIFIER_FOREACH(n, &as->iommu) { + /* Drop all mappings for the address space */ + amdvi_address_space_unmap(as, n); + } + amdvi_switch_address_space(as); } -static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd) +/* + * A guest driver must issue the INVALIDATE_DEVTAB_ENTRY command to the IOMMU + * after changing a Device Table entry. We can use this fact to detect when a + * Device Table entry is created for a device attached to a paging domain and + * enable the corresponding IOMMU memory region to allow for DMA translation if + * appropriate. + */ +static void amdvi_update_addr_translation_mode(AMDVIState *s, uint16_t devid) { - /* pad the last 3 bits */ - hwaddr addr = cpu_to_le64(extract64(cmd[0], 3, 49)) << 3; - uint64_t data = cpu_to_le64(cmd[1]); + uint8_t bus_num, devfn, dte_mode; + AMDVIAddressSpace *as; + uint64_t dte[4] = { 0 }; + int ret; - if (extract64(cmd[0], 52, 8)) { - amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), - s->cmdbuf + s->cmdbuf_head); + /* + * Convert the devid encoded in the command to a bus and devfn in + * order to retrieve the corresponding address space. + */ + bus_num = PCI_BUS_NUM(devid); + devfn = devid & 0xff; + + /* + * The main buffer of size (AMDVIAddressSpace *) * (PCI_BUS_MAX) has already + * been allocated within AMDVIState, but must be careful to not access + * unallocated devfn. + */ + if (!s->address_spaces[bus_num] || !s->address_spaces[bus_num][devfn]) { + return; } - if (extract64(cmd[0], 0, 1)) { - if (dma_memory_write(&address_space_memory, addr, &data, - AMDVI_COMPLETION_DATA_SIZE, - MEMTXATTRS_UNSPECIFIED)) { - trace_amdvi_completion_wait_fail(addr); - } + as = s->address_spaces[bus_num][devfn]; + + ret = amdvi_as_to_dte(as, dte); + + if (!ret) { + dte_mode = (dte[0] >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK; } - /* set completion interrupt */ - if (extract64(cmd[0], 1, 1)) { - amdvi_assign_orq(s, AMDVI_MMIO_STATUS, AMDVI_MMIO_STATUS_COMP_INT); - /* generate interrupt */ - amdvi_generate_msi_interrupt(s); + + switch (ret) { + case 0: + /* DTE was successfully retrieved */ + if (!dte_mode) { + enable_nodma_mode(as); /* DTE[V]=1 && DTE[Mode]=0 => passthrough */ + } else { + enable_dma_mode(as, false); /* Enable DMA translation */ + } + break; + case -AMDVI_FR_DTE_V: + /* DTE[V]=0, address is passed untranslated */ + enable_nodma_mode(as); + break; + case -AMDVI_FR_DTE_RTR_ERR: + case -AMDVI_FR_DTE_TV: + /* + * Enforce isolation by using DMA in rare scenarios where the DTE cannot + * be retrieved or DTE[TV]=0. Existing mappings are dropped. + */ + enable_dma_mode(as, true); + break; } - trace_amdvi_completion_wait(addr, data); } /* log error without aborting since linux seems to be using reserved bits */ @@ -412,13 +1146,23 @@ static void amdvi_inval_devtab_entry(AMDVIState *s, uint64_t *cmd) { uint16_t devid = cpu_to_le16((uint16_t)extract64(cmd[0], 0, 16)); + trace_amdvi_devtab_inval(PCI_BUS_NUM(devid), PCI_SLOT(devid), + PCI_FUNC(devid)); + /* This command should invalidate internal caches of which there isn't */ if (extract64(cmd[0], 16, 44) || cmd[1]) { amdvi_log_illegalcom_error(s, extract64(cmd[0], 60, 4), s->cmdbuf + s->cmdbuf_head); + return; + } + + /* + * When DMA remapping capability is enabled, check if updated DTE is setup + * for paging or not, and configure the corresponding memory regions. + */ + if (s->dma_remap) { + amdvi_update_addr_translation_mode(s, devid); } - trace_amdvi_devtab_inval(PCI_BUS_NUM(devid), PCI_SLOT(devid), - PCI_FUNC(devid)); } static void amdvi_complete_ppr(AMDVIState *s, uint64_t *cmd) @@ -449,6 +1193,13 @@ static void amdvi_inval_all(AMDVIState *s, uint64_t *cmd) amdvi_intremap_inval_notify_all(s, true, 0, 0); amdvi_iotlb_reset(s); + + /* + * Fully replay the address space i.e. send both UNMAP and MAP events in + * order to synchronize guest and host IO page tables tables. + */ + amdvi_iommu_address_space_sync_all(s); + trace_amdvi_all_inval(); } @@ -460,10 +1211,109 @@ static gboolean amdvi_iotlb_remove_by_domid(gpointer key, gpointer value, return entry->domid == domid; } +/* + * Helper to decode the size of the range to invalidate encoded in the + * INVALIDATE_IOMMU_PAGES Command format. + * The size of the region to invalidate depends on the S bit and address. + * S bit value: + * 0 : Invalidation size is 4 Kbytes. + * 1 : Invalidation size is determined by first zero bit in the address + * starting from Address[12]. + * + * In the AMD IOMMU Linux driver, an invalidation command with address + * ((1 << 63) - 1) is sent when intending to clear the entire cache. + * However, Table 14: Example Page Size Encodings shows that an address of + * ((1ULL << 51) - 1) encodes the entire cache, so effectively any address with + * first zero at bit 51 or larger is a request to invalidate the entire address + * space. + */ +static uint64_t amdvi_decode_invalidation_size(hwaddr addr, uint16_t flags) +{ + uint64_t size = AMDVI_PAGE_SIZE; + uint8_t fzbit = 0; + + if (flags & AMDVI_CMD_INVAL_IOMMU_PAGES_S) { + fzbit = cto64(addr | 0xFFF); + + if (fzbit >= 51) { + size = AMDVI_INV_ALL_PAGES; + } else { + size = 1ULL << (fzbit + 1); + } + } + return size; +} + +/* + * Synchronize the guest page tables with the shadow page tables kept in the + * host for the specified range. + * The invalidation command issued by the guest and intercepted by the VMM + * does not specify a device, but a domain, since all devices in the same domain + * share the same page tables. However, vIOMMU emulation creates separate + * address spaces per device, so it is necessary to traverse the list of all of + * address spaces (i.e. devices) that have notifiers registered in order to + * propagate the changes to the host page tables. + * We cannot return early from this function once a matching domain has been + * identified and its page tables synced (based on the fact that all devices in + * the same domain share the page tables). The reason is that different devices + * (i.e. address spaces) could have different notifiers registered, and by + * skipping address spaces that appear later on the amdvi_as_with_notifiers list + * their notifiers (which could differ from the ones registered for the first + * device/address space) would not be invoked. + */ +static void amdvi_sync_domain(AMDVIState *s, uint16_t domid, uint64_t addr, + uint16_t flags) +{ + AMDVIAddressSpace *as; + + uint64_t size = amdvi_decode_invalidation_size(addr, flags); + + if (size == AMDVI_INV_ALL_PAGES) { + addr = 0; /* Set start address to 0 and invalidate entire AS */ + } else { + addr &= ~(size - 1); + } + + /* + * Call notifiers that have registered for each address space matching the + * domain ID, in order to sync the guest pagetable state with the host. + */ + QLIST_FOREACH(as, &s->amdvi_as_with_notifiers, next) { + + uint64_t dte[4] = { 0 }; + + /* + * Retrieve the Device Table entry for the devid corresponding to the + * current address space, and verify the DomainID matches i.e. the page + * tables to be synced belong to devices in the domain. + */ + if (amdvi_as_to_dte(as, dte)) { + continue; + } + + /* Only need to sync the Page Tables for a matching domain */ + if (domid != (dte[1] & AMDVI_DEV_DOMID_ID_MASK)) { + continue; + } + + /* + * We have determined that there is a valid Device Table Entry for a + * device matching the DomainID in the INV_IOMMU_PAGES command issued by + * the guest. Walk the guest page table to sync shadow page table. + */ + if (as->notifier_flags & IOMMU_NOTIFIER_MAP) { + /* Sync guest IOMMU mappings with host */ + amdvi_sync_shadow_page_table_range(as, &dte[0], addr, size, true); + } + } +} + /* we don't have devid - we can't remove pages by address */ static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd) { uint16_t domid = cpu_to_le16((uint16_t)extract64(cmd[0], 32, 16)); + uint64_t addr = cpu_to_le64(extract64(cmd[1], 12, 52)) << 12; + uint16_t flags = cpu_to_le16((uint16_t)extract64(cmd[1], 0, 3)); if (extract64(cmd[0], 20, 12) || extract64(cmd[0], 48, 12) || extract64(cmd[1], 3, 9)) { @@ -473,6 +1323,8 @@ static void amdvi_inval_pages(AMDVIState *s, uint64_t *cmd) g_hash_table_foreach_remove(s->iotlb, amdvi_iotlb_remove_by_domid, &domid); + + amdvi_sync_domain(s, domid, addr, flags); trace_amdvi_pages_inval(domid); } @@ -592,18 +1444,31 @@ static void amdvi_cmdbuf_run(AMDVIState *s) } } -static void amdvi_mmio_trace(hwaddr addr, unsigned size) +static inline uint8_t amdvi_mmio_get_index(hwaddr addr) { uint8_t index = (addr & ~0x2000) / 8; if ((addr & 0x2000)) { /* high table */ index = index >= AMDVI_MMIO_REGS_HIGH ? AMDVI_MMIO_REGS_HIGH : index; - trace_amdvi_mmio_read(amdvi_mmio_high[index], addr, size, addr & ~0x07); } else { index = index >= AMDVI_MMIO_REGS_LOW ? AMDVI_MMIO_REGS_LOW : index; - trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07); } + + return index; +} + +static void amdvi_mmio_trace_read(hwaddr addr, unsigned size) +{ + uint8_t index = amdvi_mmio_get_index(addr); + trace_amdvi_mmio_read(amdvi_mmio_low[index], addr, size, addr & ~0x07); +} + +static void amdvi_mmio_trace_write(hwaddr addr, unsigned size, uint64_t val) +{ + uint8_t index = amdvi_mmio_get_index(addr); + trace_amdvi_mmio_write(amdvi_mmio_low[index], addr, size, val, + addr & ~0x07); } static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size) @@ -623,7 +1488,7 @@ static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size) } else if (size == 8) { val = amdvi_readq(s, addr); } - amdvi_mmio_trace(addr, size); + amdvi_mmio_trace_read(addr, size); return val; } @@ -633,7 +1498,6 @@ static void amdvi_handle_control_write(AMDVIState *s) unsigned long control = amdvi_readq(s, AMDVI_MMIO_CONTROL); s->enabled = !!(control & AMDVI_MMIO_CONTROL_AMDVIEN); - s->ats_enabled = !!(control & AMDVI_MMIO_CONTROL_HTTUNEN); s->evtlog_enabled = s->enabled && !!(control & AMDVI_MMIO_CONTROL_EVENTLOGEN); @@ -704,9 +1568,19 @@ static inline void amdvi_handle_excllim_write(AMDVIState *s) static inline void amdvi_handle_evtbase_write(AMDVIState *s) { uint64_t val = amdvi_readq(s, AMDVI_MMIO_EVENT_BASE); + + if (amdvi_readq(s, AMDVI_MMIO_STATUS) & AMDVI_MMIO_STATUS_EVENT_INT) + /* Do not reset if eventlog interrupt bit is set*/ + return; + s->evtlog = val & AMDVI_MMIO_EVTLOG_BASE_MASK; s->evtlog_len = 1UL << (amdvi_readq(s, AMDVI_MMIO_EVTLOG_SIZE_BYTE) & AMDVI_MMIO_EVTLOG_SIZE_MASK); + + /* clear tail and head pointer to 0 when event base is updated */ + s->evtlog_tail = s->evtlog_head = 0; + amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_HEAD, s->evtlog_head); + amdvi_writeq_raw(s, AMDVI_MMIO_EVENT_TAIL, s->evtlog_tail); } static inline void amdvi_handle_evttail_write(AMDVIState *s) @@ -770,7 +1644,7 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val, return; } - amdvi_mmio_trace(addr, size); + amdvi_mmio_trace_write(addr, size, val); switch (addr & ~0x07) { case AMDVI_MMIO_CONTROL: amdvi_mmio_reg_write(s, size, val, addr); @@ -835,153 +1709,74 @@ static void amdvi_mmio_write(void *opaque, hwaddr addr, uint64_t val, amdvi_mmio_reg_write(s, size, val, addr); amdvi_handle_pprtail_write(s); break; + case AMDVI_MMIO_STATUS: + amdvi_mmio_reg_write(s, size, val, addr); + break; } } -static inline uint64_t amdvi_get_perms(uint64_t entry) -{ - return (entry & (AMDVI_DEV_PERM_READ | AMDVI_DEV_PERM_WRITE)) >> - AMDVI_DEV_PERM_SHIFT; -} - -/* validate that reserved bits are honoured */ -static bool amdvi_validate_dte(AMDVIState *s, uint16_t devid, - uint64_t *dte) -{ - if ((dte[0] & AMDVI_DTE_QUAD0_RESERVED) || - (dte[1] & AMDVI_DTE_QUAD1_RESERVED) || - (dte[2] & AMDVI_DTE_QUAD2_RESERVED) || - (dte[3] & AMDVI_DTE_QUAD3_RESERVED)) { - amdvi_log_illegaldevtab_error(s, devid, - s->devtab + - devid * AMDVI_DEVTAB_ENTRY_SIZE, 0); - return false; - } - - return true; -} - -/* get a device table entry given the devid */ -static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry) +static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte, + IOMMUTLBEntry *ret, unsigned perms, + hwaddr addr) { - uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE; + hwaddr page_mask, pagesize = 0; + uint8_t mode; + uint64_t pte; + int fetch_ret; - if (dma_memory_read(&address_space_memory, s->devtab + offset, entry, - AMDVI_DEVTAB_ENTRY_SIZE, MEMTXATTRS_UNSPECIFIED)) { - trace_amdvi_dte_get_fail(s->devtab, offset); - /* log error accessing dte */ - amdvi_log_devtab_error(s, devid, s->devtab + offset, 0); - return false; + /* make sure the DTE has TV = 1 */ + if (!(dte[0] & AMDVI_DEV_TRANSLATION_VALID)) { + /* + * A DTE with V=1, TV=0 does not have a valid Page Table Root Pointer. + * An IOMMU processing a request that requires a table walk terminates + * the walk when it encounters this condition. Do the same and return + * instead of assuming that the address is forwarded without translation + * i.e. the passthrough case, as it is done for the case where DTE[V]=0. + */ + return; } - *entry = le64_to_cpu(*entry); - if (!amdvi_validate_dte(s, devid, entry)) { - trace_amdvi_invalid_dte(entry[0]); - return false; + mode = get_pte_translation_mode(dte[0]); + if (mode >= 7) { + trace_amdvi_mode_invalid(mode, addr); + return; } - - return true; -} - -/* get pte translation mode */ -static inline uint8_t get_pte_translation_mode(uint64_t pte) -{ - return (pte >> AMDVI_DEV_MODE_RSHIFT) & AMDVI_DEV_MODE_MASK; -} - -static inline uint64_t pte_override_page_mask(uint64_t pte) -{ - uint8_t page_mask = 13; - uint64_t addr = (pte & AMDVI_DEV_PT_ROOT_MASK) >> 12; - /* find the first zero bit */ - while (addr & 1) { - page_mask++; - addr = addr >> 1; + if (mode == 0) { + goto no_remap; } - return ~((1ULL << page_mask) - 1); -} - -static inline uint64_t pte_get_page_mask(uint64_t oldlevel) -{ - return ~((1UL << ((oldlevel * 9) + 3)) - 1); -} + /* Attempt to fetch the PTE to determine if a valid mapping exists */ + fetch_ret = fetch_pte(as, addr, dte[0], &pte, &pagesize); -static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr, - uint16_t devid) -{ - uint64_t pte; + /* + * If walking the page table results in an error of any type, returns an + * empty PTE i.e. no mapping, or the permissions do not match, return since + * there is no translation available. + */ + if (fetch_ret < 0 || !IOMMU_PTE_PRESENT(pte) || + perms != (perms & amdvi_get_perms(pte))) { - if (dma_memory_read(&address_space_memory, pte_addr, - &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED)) { - trace_amdvi_get_pte_hwerror(pte_addr); - amdvi_log_pagetab_error(s, devid, pte_addr, 0); - pte = 0; - return pte; + amdvi_page_fault(as->iommu_state, as->devfn, addr, perms); + trace_amdvi_page_fault(addr); + return; } - pte = le64_to_cpu(pte); - return pte; -} - -static void amdvi_page_walk(AMDVIAddressSpace *as, uint64_t *dte, - IOMMUTLBEntry *ret, unsigned perms, - hwaddr addr) -{ - unsigned level, present, pte_perms, oldlevel; - uint64_t pte = dte[0], pte_addr, page_mask; - - /* make sure the DTE has TV = 1 */ - if (pte & AMDVI_DEV_TRANSLATION_VALID) { - level = get_pte_translation_mode(pte); - if (level >= 7) { - trace_amdvi_mode_invalid(level, addr); - return; - } - if (level == 0) { - goto no_remap; - } - - /* we are at the leaf page table or page table encodes a huge page */ - do { - pte_perms = amdvi_get_perms(pte); - present = pte & 1; - if (!present || perms != (perms & pte_perms)) { - amdvi_page_fault(as->iommu_state, as->devfn, addr, perms); - trace_amdvi_page_fault(addr); - return; - } - - /* go to the next lower level */ - pte_addr = pte & AMDVI_DEV_PT_ROOT_MASK; - /* add offset and load pte */ - pte_addr += ((addr >> (3 + 9 * level)) & 0x1FF) << 3; - pte = amdvi_get_pte_entry(as->iommu_state, pte_addr, as->devfn); - if (!pte) { - return; - } - oldlevel = level; - level = get_pte_translation_mode(pte); - } while (level > 0 && level < 7); + /* A valid PTE and page size has been retrieved */ + assert(pagesize); + page_mask = ~(pagesize - 1); - if (level == 0x7) { - page_mask = pte_override_page_mask(pte); - } else { - page_mask = pte_get_page_mask(oldlevel); - } + /* get access permissions from pte */ + ret->iova = addr & page_mask; + ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & page_mask; + ret->addr_mask = ~page_mask; + ret->perm = amdvi_get_perms(pte); + return; - /* get access permissions from pte */ - ret->iova = addr & page_mask; - ret->translated_addr = (pte & AMDVI_DEV_PT_ROOT_MASK) & page_mask; - ret->addr_mask = ~page_mask; - ret->perm = amdvi_get_perms(pte); - return; - } no_remap: ret->iova = addr & AMDVI_PAGE_MASK_4K; ret->translated_addr = addr & AMDVI_PAGE_MASK_4K; ret->addr_mask = ~AMDVI_PAGE_MASK_4K; - ret->perm = amdvi_get_perms(pte); + ret->perm = amdvi_get_perms(dte[0]); } static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr, @@ -991,6 +1786,7 @@ static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr, uint16_t devid = PCI_BUILD_BDF(as->bus_num, as->devfn); AMDVIIOTLBEntry *iotlb_entry = amdvi_iotlb_lookup(s, addr, devid); uint64_t entry[4]; + int dte_ret; if (iotlb_entry) { trace_amdvi_iotlb_hit(PCI_BUS_NUM(devid), PCI_SLOT(devid), @@ -1002,13 +1798,14 @@ static void amdvi_do_translate(AMDVIAddressSpace *as, hwaddr addr, return; } - if (!amdvi_get_dte(s, devid, entry)) { - return; - } + dte_ret = amdvi_as_to_dte(as, entry); - /* devices with V = 0 are not translated */ - if (!(entry[0] & AMDVI_DEV_VALID)) { - goto out; + if (dte_ret < 0) { + if (dte_ret == -AMDVI_FR_DTE_V) { + /* DTE[V]=0, address is passed untranslated */ + goto out; + } + return; } amdvi_page_walk(as, entry, ret, @@ -1444,6 +2241,9 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) iommu_as[devfn]->bus_num = (uint8_t)bus_num; iommu_as[devfn]->devfn = (uint8_t)devfn; iommu_as[devfn]->iommu_state = s; + iommu_as[devfn]->notifier_flags = IOMMU_NOTIFIER_NONE; + iommu_as[devfn]->iova_tree = iova_tree_new(); + iommu_as[devfn]->addr_translation = false; amdvi_dev_as = iommu_as[devfn]; @@ -1486,8 +2286,7 @@ static AddressSpace *amdvi_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) AMDVI_INT_ADDR_FIRST, &amdvi_dev_as->iommu_ir, 1); - memory_region_set_enabled(&amdvi_dev_as->iommu_nodma, false); - memory_region_set_enabled(MEMORY_REGION(&amdvi_dev_as->iommu), true); + amdvi_switch_address_space(amdvi_dev_as); } return &iommu_as[devfn]->as; } @@ -1517,14 +2316,35 @@ static int amdvi_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, Error **errp) { AMDVIAddressSpace *as = container_of(iommu, AMDVIAddressSpace, iommu); + AMDVIState *s = as->iommu_state; + + /* + * Accurate synchronization of the vIOMMU page tables required to support + * MAP notifiers is provided by the dma-remap feature. In addition, this + * also requires that the vIOMMU presents the NpCache capability, so a guest + * driver issues invalidations for both map() and unmap() operations. The + * capability is already set by default as part of AMDVI_CAPAB_FEATURES and + * written to the configuration in amdvi_pci_realize(). + */ + if (!s->dma_remap && (new & IOMMU_NOTIFIER_MAP)) { + error_setg_errno(errp, ENOTSUP, + "device %02x.%02x.%x requires dma-remap=1", + as->bus_num, PCI_SLOT(as->devfn), PCI_FUNC(as->devfn)); + return -ENOTSUP; + } + + /* + * Update notifier flags for address space and the list of address spaces + * with registered notifiers. + */ + as->notifier_flags = new; - if (new & IOMMU_NOTIFIER_MAP) { - error_setg(errp, - "device %02x.%02x.%x requires iommu notifier which is not " - "currently supported", as->bus_num, PCI_SLOT(as->devfn), - PCI_FUNC(as->devfn)); - return -EINVAL; + if (old == IOMMU_NOTIFIER_NONE) { + QLIST_INSERT_HEAD(&s->amdvi_as_with_notifiers, as, next); + } else if (new == IOMMU_NOTIFIER_NONE) { + QLIST_REMOVE(as, next); } + return 0; } @@ -1542,7 +2362,6 @@ static void amdvi_init(AMDVIState *s) s->excl_allow = false; s->mmio_enabled = false; s->enabled = false; - s->ats_enabled = false; s->cmdbuf_enabled = false; /* reset MMIO */ @@ -1602,6 +2421,10 @@ static void amdvi_sysbus_reset(DeviceState *dev) msi_reset(&s->pci->dev); amdvi_init(s); + + /* Discard all mappings on device reset */ + amdvi_address_space_unmap_all(s); + amdvi_reset_address_translation_all(s); } static const VMStateDescription vmstate_amdvi_sysbus_migratable = { @@ -1613,7 +2436,8 @@ static const VMStateDescription vmstate_amdvi_sysbus_migratable = { /* Updated in amdvi_handle_control_write() */ VMSTATE_BOOL(enabled, AMDVIState), VMSTATE_BOOL(ga_enabled, AMDVIState), - VMSTATE_BOOL(ats_enabled, AMDVIState), + /* bool ats_enabled is obsolete */ + VMSTATE_UNUSED(1), /* was ats_enabled */ VMSTATE_BOOL(cmdbuf_enabled, AMDVIState), VMSTATE_BOOL(completion_wait_intr, AMDVIState), VMSTATE_BOOL(evtlog_enabled, AMDVIState), @@ -1686,9 +2510,6 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) s->iotlb = g_hash_table_new_full(amdvi_uint64_hash, amdvi_uint64_equal, g_free, g_free); - /* Pseudo address space under root PCI bus. */ - x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); - /* set up MMIO */ memory_region_init_io(&s->mr_mmio, OBJECT(s), &mmio_mem_ops, s, "amdvi-mmio", AMDVI_MMIO_SIZE); @@ -1711,6 +2532,9 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) memory_region_add_subregion_overlap(&s->mr_sys, AMDVI_INT_ADDR_FIRST, &s->mr_ir, 1); + /* Pseudo address space under root PCI bus. */ + x86ms->ioapic_as = amdvi_host_dma_iommu(bus, s, AMDVI_IOAPIC_SB_DEVID); + if (kvm_enabled() && x86ms->apic_id_limit > 255 && !s->xtsup) { error_report("AMD IOMMU with x2APIC configuration requires xtsup=on"); exit(EXIT_FAILURE); @@ -1731,6 +2555,7 @@ static void amdvi_sysbus_realize(DeviceState *dev, Error **errp) static const Property amdvi_properties[] = { DEFINE_PROP_BOOL("xtsup", AMDVIState, xtsup, false), DEFINE_PROP_STRING("pci-id", AMDVIState, pci_id), + DEFINE_PROP_BOOL("dma-remap", AMDVIState, dma_remap, false), }; static const VMStateDescription vmstate_amdvi_sysbus = { @@ -1792,6 +2617,7 @@ static void amdvi_iommu_memory_region_class_init(ObjectClass *klass, imrc->translate = amdvi_translate; imrc->notify_flag_changed = amdvi_iommu_notify_flag_changed; + imrc->replay = amdvi_iommu_replay; } static const TypeInfo amdvi_iommu_memory_region_info = { diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h index 8b42913ed8dab..daf82fc85f961 100644 --- a/hw/i386/amd_iommu.h +++ b/hw/i386/amd_iommu.h @@ -111,6 +111,7 @@ #define AMDVI_MMIO_STATUS_CMDBUF_RUN (1 << 4) #define AMDVI_MMIO_STATUS_EVT_RUN (1 << 3) #define AMDVI_MMIO_STATUS_COMP_INT (1 << 2) +#define AMDVI_MMIO_STATUS_EVENT_INT (1 << 1) #define AMDVI_MMIO_STATUS_EVT_OVF (1 << 0) #define AMDVI_CMDBUF_ID_BYTE 0x07 @@ -125,6 +126,10 @@ #define AMDVI_CMD_COMPLETE_PPR_REQUEST 0x07 #define AMDVI_CMD_INVAL_AMDVI_ALL 0x08 + +#define AMDVI_CMD_INVAL_IOMMU_PAGES_S (1ULL << 0) +#define AMDVI_INV_ALL_PAGES (1ULL << 52) + #define AMDVI_DEVTAB_ENTRY_SIZE 32 /* Device table entry bits 0:63 */ @@ -172,6 +177,47 @@ /* AMDVI paging mode */ #define AMDVI_GATS_MODE (2ULL << 12) #define AMDVI_HATS_MODE (2ULL << 10) +#define AMDVI_HATS_MODE_RESERVED (3ULL << 10) + +/* Page Table format */ + +#define AMDVI_PTE_PR (1ULL << 0) +#define AMDVI_PTE_NEXT_LEVEL_MASK GENMASK64(11, 9) + +#define IOMMU_PTE_PRESENT(pte) ((pte) & AMDVI_PTE_PR) + +/* Using level=0 for leaf PTE at 4K page size */ +#define PT_LEVEL_SHIFT(level) (12 + ((level) * 9)) + +/* Return IOVA bit group used to index the Page Table at specific level */ +#define PT_LEVEL_INDEX(level, iova) (((iova) >> PT_LEVEL_SHIFT(level)) & \ + GENMASK64(8, 0)) + +/* Return the max address for a specified level i.e. max_oaddr */ +#define PT_LEVEL_MAX_ADDR(x) (((x) < 5) ? \ + ((1ULL << PT_LEVEL_SHIFT((x + 1))) - 1) : \ + (~(0ULL))) + +/* Extract the NextLevel field from PTE/PDE */ +#define PTE_NEXT_LEVEL(pte) (((pte) & AMDVI_PTE_NEXT_LEVEL_MASK) >> 9) + +/* Take page table level and return default pagetable size for level */ +#define PTE_LEVEL_PAGE_SIZE(level) (1ULL << (PT_LEVEL_SHIFT(level))) + +/* + * Return address of lower level page table encoded in PTE and specified by + * current level and corresponding IOVA bit group at such level. + */ +#define NEXT_PTE_ADDR(pte, level, iova) (((pte) & AMDVI_DEV_PT_ROOT_MASK) + \ + (PT_LEVEL_INDEX(level, iova) * 8)) + +/* + * Take a PTE value with mode=0x07 and return the page size it encodes. + */ +#define PTE_LARGE_PAGE_SIZE(pte) (1ULL << (1 + cto64(((pte) | 0xfffULL)))) + +/* Return number of PTEs to use for a given page size (expected power of 2) */ +#define PAGE_SIZE_PTE_COUNT(pgsz) (1ULL << ((ctz64(pgsz) - 12) % 9)) /* IOTLB */ #define AMDVI_IOTLB_MAX_SIZE 1024 @@ -322,7 +368,6 @@ struct AMDVIState { uint64_t mmio_addr; bool enabled; /* IOMMU enabled */ - bool ats_enabled; /* address translation enabled */ bool cmdbuf_enabled; /* command buffer enabled */ bool evtlog_enabled; /* event log enabled */ bool excl_enabled; @@ -365,12 +410,18 @@ struct AMDVIState { /* for each served device */ AMDVIAddressSpace **address_spaces[PCI_BUS_MAX]; + /* list of address spaces with registered notifiers */ + QLIST_HEAD(, AMDVIAddressSpace) amdvi_as_with_notifiers; + /* IOTLB */ GHashTable *iotlb; /* Interrupt remapping */ bool ga_enabled; bool xtsup; + + /* DMA address translation */ + bool dma_remap; }; uint64_t amdvi_extended_feature_register(AMDVIState *s); diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index fe9a5f2872799..6a168d5107725 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -45,6 +45,8 @@ ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK) #define VTD_CE_GET_PASID_DIR_TABLE(ce) \ ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK) +#define VTD_CE_GET_PRE(ce) \ + ((ce)->val[0] & VTD_SM_CONTEXT_ENTRY_PRE) /* pe operations */ #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT) @@ -85,13 +87,6 @@ struct vtd_iotlb_key { static void vtd_address_space_refresh_all(IntelIOMMUState *s); static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n); -static void vtd_panic_require_caching_mode(void) -{ - error_report("We need to set caching-mode=on for intel-iommu to enable " - "device assignment with IOMMU protection."); - exit(1); -} - static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val, uint64_t wmask, uint64_t w1cmask) { @@ -1838,6 +1833,7 @@ static const bool vtd_qualified_faults[] = { [VTD_FR_FS_NON_CANONICAL] = true, [VTD_FR_FS_PAGING_ENTRY_US] = true, [VTD_FR_SM_WRITE] = true, + [VTD_FR_SM_PRE_ABS] = true, [VTD_FR_SM_INTERRUPT_ADDR] = true, [VTD_FR_FS_BIT_UPDATE_FAILED] = true, [VTD_FR_MAX] = false, @@ -2701,7 +2697,7 @@ static void vtd_handle_gcmd_write(IntelIOMMUState *s) uint32_t changed = status ^ val; trace_vtd_reg_write_gcmd(status, val); - if ((changed & VTD_GCMD_TE) && s->dma_translation) { + if ((changed & VTD_GCMD_TE) && x86_iommu->dma_translation) { /* Translation enable/disable */ vtd_handle_gcmd_te(s, val & VTD_GCMD_TE); } @@ -2828,6 +2824,7 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { uint64_t mask[4] = {VTD_INV_DESC_WAIT_RSVD_LO, VTD_INV_DESC_WAIT_RSVD_HI, VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + bool ret = true; if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, __func__, "wait")) { @@ -2839,8 +2836,6 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) uint32_t status_data = (uint32_t)(inv_desc->lo >> VTD_INV_DESC_WAIT_DATA_SHIFT); - assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF)); - /* FIXME: need to be masked with HAW? */ dma_addr_t status_addr = inv_desc->hi; trace_vtd_inv_desc_wait_sw(status_addr, status_data); @@ -2849,18 +2844,28 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) &status_data, sizeof(status_data), MEMTXATTRS_UNSPECIFIED)) { trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); - return false; + ret = false; } - } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { + } + + if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { /* Interrupt flag */ vtd_generate_completion_event(s); - } else { + } + + /* + * SW=0, IF=0, FN=1 is also a valid descriptor (VT-d 7.10) + * Nothing to do as we process the descriptors in order + */ + + if (!(inv_desc->lo & (VTD_INV_DESC_WAIT_IF | VTD_INV_DESC_WAIT_SW | + VTD_INV_DESC_WAIT_FN))) { error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 " (unknown type)", __func__, inv_desc->hi, inv_desc->lo); return false; } - return true; + return ret; } static bool vtd_process_context_cache_desc(IntelIOMMUState *s, @@ -3143,6 +3148,59 @@ static bool vtd_process_device_piotlb_desc(IntelIOMMUState *s, return true; } +static bool vtd_process_page_group_response_desc(IntelIOMMUState *s, + VTDInvDesc *inv_desc) +{ + VTDAddressSpace *vtd_dev_as; + bool pasid_present; + uint8_t response_code; + uint16_t rid; + uint32_t pasid; + uint16_t prgi; + IOMMUPRIResponse response; + + if ((inv_desc->lo & VTD_INV_DESC_PGRESP_RSVD_LO) || + (inv_desc->hi & VTD_INV_DESC_PGRESP_RSVD_HI)) { + error_report_once("%s: invalid page group response desc: hi=%"PRIx64 + ", lo=%"PRIx64" (reserved nonzero)", __func__, + inv_desc->hi, inv_desc->lo); + return false; + } + + pasid_present = VTD_INV_DESC_PGRESP_PP(inv_desc->lo); + response_code = VTD_INV_DESC_PGRESP_RC(inv_desc->lo); + rid = VTD_INV_DESC_PGRESP_RID(inv_desc->lo); + pasid = VTD_INV_DESC_PGRESP_PASID(inv_desc->lo); + prgi = VTD_INV_DESC_PGRESP_PRGI(inv_desc->hi); + + if (!pasid_present) { + error_report_once("Page group response without PASID is" + "not supported yet"); + return false; + } + + vtd_dev_as = vtd_get_as_by_sid_and_pasid(s, rid, pasid); + if (!vtd_dev_as) { + return true; + } + + response.prgi = prgi; + + if (response_code == 0x0u) { + response.response_code = IOMMU_PRI_RESP_SUCCESS; + } else if (response_code == 0x1u) { + response.response_code = IOMMU_PRI_RESP_INVALID_REQUEST; + } else { + response.response_code = IOMMU_PRI_RESP_FAILURE; + } + + if (vtd_dev_as->pri_notifier) { + vtd_dev_as->pri_notifier->notify(vtd_dev_as->pri_notifier, &response); + } + + return true; +} + static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { @@ -3243,6 +3301,13 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) } break; + case VTD_INV_DESC_PGRESP: + trace_vtd_inv_desc("page group response", inv_desc.hi, inv_desc.lo); + if (!vtd_process_page_group_response_desc(s, &inv_desc)) { + return false; + } + break; + /* * TODO: the entity of below two cases will be implemented in future series. * To make guest (which integrates scalable mode support patch set in @@ -3377,6 +3442,27 @@ static void vtd_handle_iectl_write(IntelIOMMUState *s) } } +static void vtd_handle_prs_write(IntelIOMMUState *s) +{ + uint32_t prs = vtd_get_long_raw(s, DMAR_PRS_REG); + if (!(prs & VTD_PR_STATUS_PPR) && !(prs & VTD_PR_STATUS_PRO)) { + vtd_set_clear_mask_long(s, DMAR_PECTL_REG, VTD_PR_PECTL_IP, 0); + } +} + +static void vtd_handle_pectl_write(IntelIOMMUState *s) +{ + uint32_t pectl = vtd_get_long_raw(s, DMAR_PECTL_REG); + if ((pectl & VTD_PR_PECTL_IP) && !(pectl & VTD_PR_PECTL_IM)) { + /* + * If IP field was 1 when software clears the IM field, + * the interrupt is generated along with clearing the IP field. + */ + vtd_set_clear_mask_long(s, DMAR_PECTL_REG, VTD_PR_PECTL_IP, 0); + vtd_generate_interrupt(s, DMAR_PEADDR_REG, DMAR_PEDATA_REG); + } +} + static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size) { IntelIOMMUState *s = opaque; @@ -3419,6 +3505,11 @@ static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size) val = s->iq >> 32; break; + case DMAR_PEUADDR_REG: + assert(size == 4); + val = vtd_get_long_raw(s, DMAR_PEUADDR_REG); + break; + default: if (size == 4) { val = vtd_get_long(s, addr); @@ -3482,6 +3573,11 @@ static void vtd_mem_write(void *opaque, hwaddr addr, vtd_handle_iotlb_write(s); break; + case DMAR_PEUADDR_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + break; + /* Invalidate Address Register, 64-bit */ case DMAR_IVA_REG: if (size == 4) { @@ -3662,6 +3758,18 @@ static void vtd_mem_write(void *opaque, hwaddr addr, vtd_set_long(s, addr, val); break; + case DMAR_PRS_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + vtd_handle_prs_write(s); + break; + + case DMAR_PECTL_REG: + assert(size == 4); + vtd_set_long(s, addr, val); + vtd_handle_pectl_write(s); + break; + default: if (size == 4) { vtd_set_long(s, addr, val); @@ -3832,7 +3940,6 @@ static const Property vtd_properties[] = { DEFINE_PROP_BOOL("snoop-control", IntelIOMMUState, snoop_control, false), DEFINE_PROP_BOOL("x-pasid-mode", IntelIOMMUState, pasid, false), DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true), - DEFINE_PROP_BOOL("dma-translation", IntelIOMMUState, dma_translation, true), DEFINE_PROP_BOOL("stale-tm", IntelIOMMUState, stale_tm, false), DEFINE_PROP_BOOL("fs1gp", IntelIOMMUState, fs1gp, true), }; @@ -4375,6 +4482,12 @@ static bool vtd_dev_set_iommu_device(PCIBus *bus, void *opaque, int devfn, assert(hiod); + if (!s->caching_mode) { + error_setg(errp, "Device assignment is not allowed without enabling " + "caching-mode=on for Intel IOMMU."); + return false; + } + vtd_iommu_lock(s); if (g_hash_table_lookup(s->vtd_host_iommu_dev, &key)) { @@ -4546,11 +4659,11 @@ static void vtd_cap_init(IntelIOMMUState *s) s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS | - VTD_CAP_MGAW(s->aw_bits); + VTD_CAP_ESRTPS | VTD_CAP_MGAW(s->aw_bits); if (s->dma_drain) { s->cap |= VTD_CAP_DRAIN; } - if (s->dma_translation) { + if (x86_iommu->dma_translation) { if (s->aw_bits >= VTD_HOST_AW_39BIT) { s->cap |= VTD_CAP_SAGAW_39bit; } @@ -4713,6 +4826,18 @@ static void vtd_init(IntelIOMMUState *s) * Interrupt remapping registers. */ vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0); + + /* Page request registers */ + if (s->ecap & VTD_ECAP_PRS) { + vtd_define_quad(s, DMAR_PQH_REG, 0, 0x7ffe0ULL, 0); + vtd_define_quad(s, DMAR_PQT_REG, 0, 0x7ffe0ULL, 0); + vtd_define_quad(s, DMAR_PQA_REG, 0, 0xfffffffffffff007ULL, 0); + vtd_define_long(s, DMAR_PRS_REG, 0, 0, 0x3UL); + vtd_define_long(s, DMAR_PECTL_REG, 0, 0x80000000UL, 0); + vtd_define_long(s, DMAR_PEDATA_REG, 0, 0xffffUL, 0); + vtd_define_long(s, DMAR_PEADDR_REG, 0, 0xfffffffcUL, 0); + vtd_define_long(s, DMAR_PEUADDR_REG, 0, 0xffffffffUL, 0); + } } /* Should not reset address_spaces when reset because devices will still use @@ -4800,6 +4925,194 @@ static ssize_t vtd_ats_request_translation(PCIBus *bus, void *opaque, return res_index; } +/* 11.4.11.3 : The number of entries in the page request queue is 2^(PQS + 7) */ +static inline uint64_t vtd_prq_size(IntelIOMMUState *s) +{ + return 1ULL << ((vtd_get_quad(s, DMAR_PQA_REG) & VTD_PQA_SIZE) + 7); +} + +/** + * Return true if the bit is accessible and correctly set, false otherwise + */ +static bool vtd_check_pre_bit(VTDAddressSpace *vtd_as, hwaddr addr, + uint16_t sid, bool is_write) +{ + int ret; + IntelIOMMUState *s = vtd_as->iommu_state; + uint8_t bus_n = pci_bus_num(vtd_as->bus); + VTDContextEntry ce; + bool is_fpd_set = false; + + ret = vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce); + + if (ret) { + goto error_report; + } + + if (!VTD_CE_GET_PRE(&ce)) { + ret = -VTD_FR_SM_PRE_ABS; + goto error_get_fpd_and_report; + } + + return true; + +error_get_fpd_and_report: + /* Try to get fpd (may not work but we are already on an error path) */ + is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; + vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set, vtd_as->pasid); +error_report: + vtd_report_fault(s, -ret, is_fpd_set, sid, addr, is_write, + vtd_as->pasid != PCI_NO_PASID, vtd_as->pasid); + return false; +} + +/* Logic described in section 7.5 */ +static void vtd_generate_page_request_event(IntelIOMMUState *s, + uint32_t old_pr_status) +{ + uint32_t current_pectl = vtd_get_long(s, DMAR_PECTL_REG); + /* + * Hardware evaluates PPR and PRO fields in the Page Request Status Register + * and if any of them is set, Page Request Event is not generated + */ + if (old_pr_status & (VTD_PR_STATUS_PRO | VTD_PR_STATUS_PPR)) { + return; + } + + vtd_set_clear_mask_long(s, DMAR_PECTL_REG, 0, VTD_PR_PECTL_IP); + if (!(current_pectl & VTD_PR_PECTL_IM)) { + vtd_set_clear_mask_long(s, DMAR_PECTL_REG, VTD_PR_PECTL_IP, 0); + vtd_generate_interrupt(s, DMAR_PEADDR_REG, DMAR_PEDATA_REG); + } +} + +/* When calling this function, we known that we are in scalable mode */ +static int vtd_pri_perform_implicit_invalidation(VTDAddressSpace *vtd_as, + hwaddr addr) +{ + IntelIOMMUState *s = vtd_as->iommu_state; + VTDContextEntry ce; + VTDPASIDEntry pe; + uint16_t pgtt; + uint16_t domain_id; + int ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), + vtd_as->devfn, &ce); + if (ret) { + return -EINVAL; + } + ret = vtd_ce_get_rid2pasid_entry(s, &ce, &pe, vtd_as->pasid); + if (ret) { + return -EINVAL; + } + pgtt = VTD_PE_GET_TYPE(&pe); + domain_id = VTD_SM_PASID_ENTRY_DID(pe.val[1]); + ret = 0; + switch (pgtt) { + case VTD_SM_PASID_ENTRY_FLT: + vtd_piotlb_page_invalidate(s, domain_id, vtd_as->pasid, addr, 0); + break; + /* Room for other pgtt values */ + default: + error_report_once("Translation type not supported yet : %d", pgtt); + ret = -EINVAL; + break; + } + + return ret; +} + +/* Page Request Descriptor : 7.4.1.1 */ +static int vtd_pri_request_page(PCIBus *bus, void *opaque, int devfn, + uint32_t pasid, bool priv_req, bool exec_req, + hwaddr addr, bool lpig, uint16_t prgi, + bool is_read, bool is_write) +{ + IntelIOMMUState *s = opaque; + VTDAddressSpace *vtd_as; + + vtd_as = vtd_find_add_as(s, bus, devfn, pasid); + + uint64_t queue_addr_reg = vtd_get_quad(s, DMAR_PQA_REG); + uint64_t queue_tail_offset_reg = vtd_get_quad(s, DMAR_PQT_REG); + uint64_t new_queue_tail_offset = ( + (queue_tail_offset_reg + VTD_PQA_ENTRY_SIZE) % + (vtd_prq_size(s) * VTD_PQA_ENTRY_SIZE)); + uint64_t queue_head_offset_reg = vtd_get_quad(s, DMAR_PQH_REG); + hwaddr queue_tail = (queue_addr_reg & VTD_PQA_ADDR) + queue_tail_offset_reg; + uint32_t old_pr_status = vtd_get_long(s, DMAR_PRS_REG); + uint16_t sid = PCI_BUILD_BDF(pci_bus_num(vtd_as->bus), vtd_as->devfn); + VTDPRDesc desc; + + if (!(s->ecap & VTD_ECAP_PRS)) { + return -EPERM; + } + + /* + * No need to check if scalable mode is enabled as we already known that + * VTD_ECAP_PRS is set (see vtd_decide_config) + */ + + /* We do not support PRI without PASID */ + if (vtd_as->pasid == PCI_NO_PASID) { + return -EPERM; + } + if (exec_req && !is_read) { + return -EINVAL; + } + + /* Check PRE bit in the scalable mode context entry */ + if (!vtd_check_pre_bit(vtd_as, addr, sid, is_write)) { + return -EPERM; + } + + if (old_pr_status & VTD_PR_STATUS_PRO) { + /* + * No action is taken by hardware to report a fault + * or generate an event + */ + return -ENOSPC; + } + + /* Check for overflow */ + if (new_queue_tail_offset == queue_head_offset_reg) { + vtd_set_clear_mask_long(s, DMAR_PRS_REG, 0, VTD_PR_STATUS_PRO); + vtd_generate_page_request_event(s, old_pr_status); + return -ENOSPC; + } + + if (vtd_pri_perform_implicit_invalidation(vtd_as, addr)) { + return -EINVAL; + } + + desc.lo = VTD_PRD_TYPE | VTD_PRD_PP(true) | VTD_PRD_RID(sid) | + VTD_PRD_PASID(vtd_as->pasid) | VTD_PRD_PMR(priv_req); + desc.hi = VTD_PRD_RDR(is_read) | VTD_PRD_WRR(is_write) | + VTD_PRD_LPIG(lpig) | VTD_PRD_PRGI(prgi) | VTD_PRD_ADDR(addr); + + desc.lo = cpu_to_le64(desc.lo); + desc.hi = cpu_to_le64(desc.hi); + if (dma_memory_write(&address_space_memory, queue_tail, &desc, sizeof(desc), + MEMTXATTRS_UNSPECIFIED)) { + error_report_once("IO error, the PQ tail cannot be updated"); + return -EIO; + } + + /* increment the tail register and set the pending request bit */ + vtd_set_quad(s, DMAR_PQT_REG, new_queue_tail_offset); + /* + * read status again so that the kernel does not miss a request. + * in some cases, we can trigger an unecessary interrupt but this strategy + * drastically improves performance as we don't need to take a lock. + */ + old_pr_status = vtd_get_long(s, DMAR_PRS_REG); + if (!(old_pr_status & VTD_PR_STATUS_PPR)) { + vtd_set_clear_mask_long(s, DMAR_PRS_REG, 0, VTD_PR_STATUS_PPR); + vtd_generate_page_request_event(s, old_pr_status); + } + + return 0; +} + static void vtd_init_iotlb_notifier(PCIBus *bus, void *opaque, int devfn, IOMMUNotifier *n, IOMMUNotify fn, void *user_opaque) @@ -4841,6 +5154,26 @@ static void vtd_unregister_iotlb_notifier(PCIBus *bus, void *opaque, memory_region_unregister_iommu_notifier(MEMORY_REGION(&vtd_as->iommu), n); } +static void vtd_pri_register_notifier(PCIBus *bus, void *opaque, int devfn, + uint32_t pasid, IOMMUPRINotifier *notifier) +{ + IntelIOMMUState *s = opaque; + VTDAddressSpace *vtd_as; + + vtd_as = vtd_find_add_as(s, bus, devfn, pasid); + vtd_as->pri_notifier = notifier; +} + +static void vtd_pri_unregister_notifier(PCIBus *bus, void *opaque, + int devfn, uint32_t pasid) +{ + IntelIOMMUState *s = opaque; + VTDAddressSpace *vtd_as; + + vtd_as = vtd_find_add_as(s, bus, devfn, pasid); + vtd_as->pri_notifier = NULL; +} + static PCIIOMMUOps vtd_iommu_ops = { .get_address_space = vtd_host_dma_iommu, .set_iommu_device = vtd_dev_set_iommu_device, @@ -4850,6 +5183,9 @@ static PCIIOMMUOps vtd_iommu_ops = { .register_iotlb_notifier = vtd_register_iotlb_notifier, .unregister_iotlb_notifier = vtd_unregister_iotlb_notifier, .ats_request_translation = vtd_ats_request_translation, + .pri_register_notifier = vtd_pri_register_notifier, + .pri_unregister_notifier = vtd_pri_unregister_notifier, + .pri_request_page = vtd_pri_request_page, }; static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) @@ -4907,32 +5243,6 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) return true; } -static int vtd_machine_done_notify_one(Object *child, void *unused) -{ - IntelIOMMUState *iommu = INTEL_IOMMU_DEVICE(x86_iommu_get_default()); - - /* - * We hard-coded here because vfio-pci is the only special case - * here. Let's be more elegant in the future when we can, but so - * far there seems to be no better way. - */ - if (object_dynamic_cast(child, "vfio-pci") && !iommu->caching_mode) { - vtd_panic_require_caching_mode(); - } - - return 0; -} - -static void vtd_machine_done_hook(Notifier *notifier, void *unused) -{ - object_child_foreach_recursive(object_get_root(), - vtd_machine_done_notify_one, NULL); -} - -static Notifier vtd_machine_done_notify = { - .notify = vtd_machine_done_hook, -}; - static void vtd_realize(DeviceState *dev, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); @@ -4987,7 +5297,6 @@ static void vtd_realize(DeviceState *dev, Error **errp) pci_setup_iommu(bus, &vtd_iommu_ops, dev); /* Pseudo address space under root PCI bus. */ x86ms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); - qemu_add_machine_init_done_notifier(&vtd_machine_done_notify); } static void vtd_class_init(ObjectClass *klass, const void *data) diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 360e937989dc6..0f6a1237e4cf8 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -190,6 +190,7 @@ #define VTD_ECAP_EIM (1ULL << 4) #define VTD_ECAP_PT (1ULL << 6) #define VTD_ECAP_SC (1ULL << 7) +#define VTD_ECAP_PRS (1ULL << 29) #define VTD_ECAP_MHMV (15ULL << 20) #define VTD_ECAP_SRS (1ULL << 31) #define VTD_ECAP_PSS (7ULL << 35) /* limit: MemTxAttrs::pid */ @@ -214,6 +215,7 @@ #define VTD_CAP_DRAIN_WRITE (1ULL << 54) #define VTD_CAP_DRAIN_READ (1ULL << 55) #define VTD_CAP_FS1GP (1ULL << 56) +#define VTD_CAP_ESRTPS (1ULL << 63) #define VTD_CAP_DRAIN (VTD_CAP_DRAIN_READ | VTD_CAP_DRAIN_WRITE) #define VTD_CAP_CM (1ULL << 7) #define VTD_PASID_ID_SHIFT 20 @@ -314,6 +316,8 @@ typedef enum VTDFaultReason { * request while disabled */ VTD_FR_IR_SID_ERR = 0x26, /* Invalid Source-ID */ + VTD_FR_SM_PRE_ABS = 0x47, /* SCT.8 : PRE bit in a present SM CE is 0 */ + /* PASID directory entry access failure */ VTD_FR_PASID_DIR_ACCESS_ERR = 0x50, /* The Present(P) field of pasid directory entry is 0 */ @@ -376,6 +380,18 @@ union VTDInvDesc { }; typedef union VTDInvDesc VTDInvDesc; +/* Page Request Descriptor */ +union VTDPRDesc { + struct { + uint64_t lo; + uint64_t hi; + }; + struct { + uint64_t val[4]; + }; +}; +typedef union VTDPRDesc VTDPRDesc; + /* Masks for struct VTDInvDesc */ #define VTD_INV_DESC_ALL_ONE -1ULL #define VTD_INV_DESC_TYPE(val) ((((val) >> 5) & 0x70ULL) | \ @@ -389,6 +405,7 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_PIOTLB 0x6 /* PASID-IOTLB Invalidate Desc */ #define VTD_INV_DESC_PC 0x7 /* PASID-cache Invalidate Desc */ #define VTD_INV_DESC_DEV_PIOTLB 0x8 /* PASID-based-DIOTLB inv_desc*/ +#define VTD_INV_DESC_PGRESP 0x9 /* Page Group Response Desc */ #define VTD_INV_DESC_NONE 0 /* Not an Invalidate Descriptor */ /* Masks for Invalidation Wait Descriptor*/ @@ -440,6 +457,15 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL0 0xfff000000000f000ULL #define VTD_INV_DESC_PASID_DEVICE_IOTLB_RSVD_VAL1 0x7feULL +/* Mask for Page Group Response Descriptor */ +#define VTD_INV_DESC_PGRESP_RSVD_HI 0xfffffffffffff003ULL +#define VTD_INV_DESC_PGRESP_RSVD_LO 0xfff00000000001e0ULL +#define VTD_INV_DESC_PGRESP_PP(val) (((val) >> 4) & 0x1ULL) +#define VTD_INV_DESC_PGRESP_RC(val) (((val) >> 12) & 0xfULL) +#define VTD_INV_DESC_PGRESP_RID(val) (((val) >> 16) & 0xffffULL) +#define VTD_INV_DESC_PGRESP_PASID(val) (((val) >> 32) & 0xfffffULL) +#define VTD_INV_DESC_PGRESP_PRGI(val) (((val) >> 3) & 0x1ffULL) + /* Rsvd field masks for spte */ #define VTD_SPTE_SNP 0x800ULL @@ -491,6 +517,31 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_PIOTLB_RSVD_VAL0 0xfff000000000f1c0ULL #define VTD_INV_DESC_PIOTLB_RSVD_VAL1 0xf80ULL +/* Page Request Descriptor */ +/* For the low 64-bit of 128-bit */ +#define VTD_PRD_TYPE (1ULL) +#define VTD_PRD_PP(val) (((val) & 1ULL) << 8) +#define VTD_PRD_RID(val) (((val) & 0xffffULL) << 16) +#define VTD_PRD_PASID(val) (((val) & 0xfffffULL) << 32) +#define VTD_PRD_EXR(val) (((val) & 1ULL) << 52) +#define VTD_PRD_PMR(val) (((val) & 1ULL) << 53) +/* For the high 64-bit of 128-bit */ +#define VTD_PRD_RDR(val) ((val) & 1ULL) +#define VTD_PRD_WRR(val) (((val) & 1ULL) << 1) +#define VTD_PRD_LPIG(val) (((val) & 1ULL) << 2) +#define VTD_PRD_PRGI(val) (((val) & 0x1ffULL) << 3) +#define VTD_PRD_ADDR(val) ((val) & 0xfffffffffffff000ULL) + +/* Page Request Queue constants */ +#define VTD_PQA_ENTRY_SIZE 32 /* Size of an entry in bytes */ +/* Page Request Queue masks */ +#define VTD_PQA_ADDR 0xfffffffffffff000ULL /* PR queue address */ +#define VTD_PQA_SIZE 0x7ULL /* PR queue size */ +#define VTD_PR_STATUS_PPR 1UL /* Pending page request */ +#define VTD_PR_STATUS_PRO 2UL /* Page request overflow */ +#define VTD_PR_PECTL_IP 0x40000000UL /* PR control interrup pending */ +#define VTD_PR_PECTL_IM 0x80000000UL /* PR control interrup mask */ + /* Information about page-selective IOTLB invalidate */ struct VTDIOTLBPageInvInfo { uint16_t domain_id; @@ -550,6 +601,7 @@ typedef struct VTDRootEntry VTDRootEntry; #define VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK 0xfffff #define VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(aw) (0x1e0ULL | ~VTD_HAW_MASK(aw)) #define VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 0xffffffffffe00000ULL +#define VTD_SM_CONTEXT_ENTRY_PRE 0x10ULL /* PASID Table Related Definitions */ #define VTD_PASID_DIR_BASE_ADDR_MASK (~0xfffULL) diff --git a/hw/i386/isapc.c b/hw/i386/isapc.c new file mode 100644 index 0000000000000..44f4a44672439 --- /dev/null +++ b/hw/i386/isapc.c @@ -0,0 +1,189 @@ +/* + * QEMU PC System Emulator + * + * Copyright (c) 2003-2004 Fabrice Bellard + * + * SPDX-License-Identifier: MIT + */ + +#include "qemu/osdep.h" + +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "hw/char/parallel-isa.h" +#include "hw/dma/i8257.h" +#include "hw/i386/pc.h" +#include "hw/ide/isa.h" +#include "hw/ide/ide-bus.h" +#include "system/kvm.h" +#include "hw/i386/kvm/clock.h" +#include "hw/xen/xen-x86.h" +#include "system/xen.h" +#include "hw/rtc/mc146818rtc.h" +#include "target/i386/cpu.h" + +static const int ide_iobase[MAX_IDE_BUS] = { 0x1f0, 0x170 }; +static const int ide_iobase2[MAX_IDE_BUS] = { 0x3f6, 0x376 }; +static const int ide_irq[MAX_IDE_BUS] = { 14, 15 }; + + +static void pc_init_isa(MachineState *machine) +{ + PCMachineState *pcms = PC_MACHINE(machine); + PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); + X86MachineState *x86ms = X86_MACHINE(machine); + MemoryRegion *system_memory = get_system_memory(); + MemoryRegion *system_io = get_system_io(); + ISABus *isa_bus; + uint32_t irq; + GSIState *gsi_state; + MemoryRegion *ram_memory; + DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; + int i; + + /* + * There is a small chance that someone unintentionally passes "-cpu max" + * for the isapc machine, which will provide a much more modern 32-bit + * CPU than would be expected for an ISA-era PC. If the "max" cpu type has + * been specified, choose the "best" 32-bit cpu possible which we consider + * be the pentium3 (deliberately choosing an Intel CPU given that the + * default 486 CPU for the isapc machine is also an Intel CPU). + */ + if (!strcmp(machine->cpu_type, X86_CPU_TYPE_NAME("max"))) { + machine->cpu_type = X86_CPU_TYPE_NAME("pentium3"); + warn_report("-cpu max is invalid for isapc machine, using pentium3"); + } + + /* + * Similarly if someone unintentionally passes "-cpu host" for the isapc + * machine then display a warning and also switch to the "best" 32-bit + * cpu possible which we consider to be the pentium3. This is because any + * host CPU will already be modern than this, but it also ensures any + * newer CPU flags/features are filtered out for older guests. + */ + if (!strcmp(machine->cpu_type, X86_CPU_TYPE_NAME("host"))) { + machine->cpu_type = X86_CPU_TYPE_NAME("pentium3"); + warn_report("-cpu host is invalid for isapc machine, using pentium3"); + } + + if (machine->ram_size > 3.5 * GiB) { + error_report("Too much memory for this machine: %" PRId64 " MiB, " + "maximum 3584 MiB", machine->ram_size / MiB); + exit(1); + } + + /* + * There is no RAM split for the isapc machine + */ + if (xen_enabled()) { + xen_hvm_init_pc(pcms, &ram_memory); + } else { + ram_memory = machine->ram; + + pcms->max_ram_below_4g = 3.5 * GiB; + x86ms->above_4g_mem_size = 0; + x86ms->below_4g_mem_size = machine->ram_size; + } + + x86_cpus_init(x86ms, pcmc->default_cpu_version); + + if (kvm_enabled()) { + kvmclock_create(pcmc->kvmclock_create_always); + } + + /* allocate ram and load rom/bios */ + if (!xen_enabled()) { + pc_memory_init(pcms, system_memory, system_memory, 0); + } else { + assert(machine->ram_size == x86ms->below_4g_mem_size + + x86ms->above_4g_mem_size); + + if (machine->kernel_filename != NULL) { + /* For xen HVM direct kernel boot, load linux here */ + xen_load_linux(pcms); + } + } + + gsi_state = pc_gsi_create(&x86ms->gsi, false); + + isa_bus = isa_bus_new(NULL, system_memory, system_io, + &error_abort); + isa_bus_register_input_irqs(isa_bus, x86ms->gsi); + + x86ms->rtc = isa_new(TYPE_MC146818_RTC); + qdev_prop_set_int32(DEVICE(x86ms->rtc), "base_year", 2000); + isa_realize_and_unref(x86ms->rtc, isa_bus, &error_fatal); + irq = object_property_get_uint(OBJECT(x86ms->rtc), "irq", + &error_fatal); + isa_connect_gpio_out(ISA_DEVICE(x86ms->rtc), 0, irq); + + i8257_dma_init(OBJECT(machine), isa_bus, 0); + pcms->hpet_enabled = false; + + if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) { + pc_i8259_create(isa_bus, gsi_state->i8259_irq); + } + + if (tcg_enabled()) { + x86_register_ferr_irq(x86ms->gsi[13]); + } + + pc_vga_init(isa_bus, NULL); + + /* init basic PC hardware */ + pc_basic_device_init(pcms, isa_bus, x86ms->gsi, x86ms->rtc, + !MACHINE_CLASS(pcmc)->no_floppy, 0x4); + + pc_nic_init(pcmc, isa_bus, NULL); + + ide_drive_get(hd, ARRAY_SIZE(hd)); + for (i = 0; i < MAX_IDE_BUS; i++) { + ISADevice *dev; + char busname[] = "ide.0"; + dev = isa_ide_init(isa_bus, ide_iobase[i], ide_iobase2[i], + ide_irq[i], + hd[MAX_IDE_DEVS * i], hd[MAX_IDE_DEVS * i + 1]); + /* + * The ide bus name is ide.0 for the first bus and ide.1 for the + * second one. + */ + busname[4] = '0' + i; + pcms->idebus[i] = qdev_get_child_bus(DEVICE(dev), busname); + } +} + +static void isapc_machine_options(MachineClass *m) +{ + static const char * const valid_cpu_types[] = { + X86_CPU_TYPE_NAME("486"), + X86_CPU_TYPE_NAME("athlon"), + X86_CPU_TYPE_NAME("kvm32"), + X86_CPU_TYPE_NAME("pentium"), + X86_CPU_TYPE_NAME("pentium2"), + X86_CPU_TYPE_NAME("pentium3"), + X86_CPU_TYPE_NAME("qemu32"), + X86_CPU_TYPE_NAME("max"), + X86_CPU_TYPE_NAME("host"), + NULL + }; + PCMachineClass *pcmc = PC_MACHINE_CLASS(m); + + m->desc = "ISA-only PC"; + m->max_cpus = 1; + m->option_rom_has_mr = true; + m->rom_file_has_mr = false; + pcmc->pci_enabled = false; + pcmc->has_acpi_build = false; + pcmc->smbios_defaults = false; + pcmc->gigabyte_align = false; + pcmc->smbios_legacy_mode = true; + pcmc->has_reserved_memory = false; + m->default_nic = "ne2k_isa"; + m->default_cpu_type = X86_CPU_TYPE_NAME("486"); + m->valid_cpu_types = valid_cpu_types; + m->no_floppy = !module_object_class_by_name(TYPE_ISA_FDC); + m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); +} + +DEFINE_PC_MACHINE(isapc, "isapc", pc_init_isa, + isapc_machine_options); diff --git a/hw/i386/kvm/apic.c b/hw/i386/kvm/apic.c index 1be9bfe36e911..82355f04631a8 100644 --- a/hw/i386/kvm/apic.c +++ b/hw/i386/kvm/apic.c @@ -60,9 +60,8 @@ static void kvm_put_apic_state(APICCommonState *s, struct kvm_lapic_state *kapic kvm_apic_set_reg(kapic, 0x3e, s->divide_conf); } -void kvm_get_apic_state(DeviceState *dev, struct kvm_lapic_state *kapic) +void kvm_get_apic_state(APICCommonState *s, struct kvm_lapic_state *kapic) { - APICCommonState *s = APIC_COMMON(dev); int i, v; if (kvm_has_x2apic_api() && s->apicbase & MSR_IA32_APICBASE_EXTD) { diff --git a/hw/i386/meson.build b/hw/i386/meson.build index 7896f348cff80..436b3ce52d648 100644 --- a/hw/i386/meson.build +++ b/hw/i386/meson.build @@ -14,6 +14,7 @@ i386_ss.add(when: 'CONFIG_X86_IOMMU', if_true: files('x86-iommu.c'), i386_ss.add(when: 'CONFIG_AMD_IOMMU', if_true: files('amd_iommu.c'), if_false: files('amd_iommu-stub.c')) i386_ss.add(when: 'CONFIG_I440FX', if_true: files('pc_piix.c')) +i386_ss.add(when: 'CONFIG_ISAPC', if_true: files('isapc.c')) i386_ss.add(when: 'CONFIG_MICROVM', if_true: files('x86-common.c', 'microvm.c', 'acpi-microvm.c', 'microvm-dt.c')) i386_ss.add(when: 'CONFIG_NITRO_ENCLAVE', if_true: files('nitro_enclave.c')) i386_ss.add(when: 'CONFIG_Q35', if_true: files('pc_q35.c')) diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index e0daf0d4fc301..94d22a232aca1 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -49,6 +49,7 @@ #include "hw/acpi/generic_event_device.h" #include "hw/pci-host/gpex.h" #include "hw/usb/xhci.h" +#include "hw/vfio/types.h" #include "elf.h" #include "kvm/kvm_i386.h" @@ -633,6 +634,8 @@ GlobalProperty microvm_properties[] = { * so reserving io space is not going to work. Turn it off. */ { "pcie-root-port", "io-reserve", "0" }, + { TYPE_RAMFB_DEVICE, "use-legacy-x86-rom", "true" }, + { TYPE_VFIO_PCI_NOHOTPLUG, "use-legacy-x86-rom", "true" }, }; static void microvm_class_init(ObjectClass *oc, const void *data) diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 2f58e73d33473..4d6bcbb846a0d 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -81,9 +81,14 @@ { "qemu64-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, },\ { "athlon-" TYPE_X86_CPU, "model-id", "QEMU Virtual CPU version " v, }, +GlobalProperty pc_compat_10_1[] = {}; +const size_t pc_compat_10_1_len = G_N_ELEMENTS(pc_compat_10_1); + GlobalProperty pc_compat_10_0[] = { { TYPE_X86_CPU, "x-consistent-cache", "false" }, { TYPE_X86_CPU, "x-vendor-cpuid-only-v2", "false" }, + { TYPE_X86_CPU, "x-arch-cap-always-on", "true" }, + { TYPE_X86_CPU, "x-pdcm-on-even-without-pmu", "true" }, }; const size_t pc_compat_10_0_len = G_N_ELEMENTS(pc_compat_10_0); @@ -834,6 +839,7 @@ void pc_memory_init(PCMachineState *pcms, hwaddr maxphysaddr, maxusedaddr; hwaddr cxl_base, cxl_resv_end = 0; X86CPU *cpu = X86_CPU(first_cpu); + uint64_t res_mem_end; assert(machine->ram_size == x86ms->below_4g_mem_size + x86ms->above_4g_mem_size); @@ -975,16 +981,17 @@ void pc_memory_init(PCMachineState *pcms, rom_set_fw(fw_cfg); - if (machine->device_memory) { - uint64_t *val = g_malloc(sizeof(*val)); - uint64_t res_mem_end; + if (pcms->cxl_devices_state.is_enabled) { + res_mem_end = cxl_resv_end; + } else if (machine->device_memory) { + res_mem_end = machine->device_memory->base + + memory_region_size(&machine->device_memory->mr); + } else { + res_mem_end = 0; + } - if (pcms->cxl_devices_state.is_enabled) { - res_mem_end = cxl_resv_end; - } else { - res_mem_end = machine->device_memory->base - + memory_region_size(&machine->device_memory->mr); - } + if (res_mem_end) { + uint64_t *val = g_malloc(sizeof(*val)); *val = cpu_to_le64(ROUND_UP(res_mem_end, 1 * GiB)); fw_cfg_add_file(fw_cfg, "etc/reserved-memory-end", val, sizeof(*val)); } @@ -1717,25 +1724,6 @@ static void pc_machine_wakeup(MachineState *machine) cpu_synchronize_all_post_reset(); } -static bool pc_hotplug_allowed(MachineState *ms, DeviceState *dev, Error **errp) -{ - X86IOMMUState *iommu = x86_iommu_get_default(); - IntelIOMMUState *intel_iommu; - - if (iommu && - object_dynamic_cast((Object *)iommu, TYPE_INTEL_IOMMU_DEVICE) && - object_dynamic_cast((Object *)dev, "vfio-pci")) { - intel_iommu = INTEL_IOMMU_DEVICE(iommu); - if (!intel_iommu->caching_mode) { - error_setg(errp, "Device assignment is not allowed without " - "enabling caching-mode=on for Intel IOMMU."); - return false; - } - } - - return true; -} - static void pc_machine_class_init(ObjectClass *oc, const void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -1755,7 +1743,6 @@ static void pc_machine_class_init(ObjectClass *oc, const void *data) x86mc->apic_xrupt_override = true; assert(!mc->get_hotplug_handler); mc->get_hotplug_handler = pc_get_hotplug_handler; - mc->hotplug_allowed = pc_hotplug_allowed; mc->auto_enable_numa_with_memhp = true; mc->auto_enable_numa_with_memdev = true; mc->has_hotpluggable_cpus = true; diff --git a/hw/i386/pc_piix.c b/hw/i386/pc_piix.c index a3285fbc64509..7b3611e973cd0 100644 --- a/hw/i386/pc_piix.c +++ b/hw/i386/pc_piix.c @@ -27,20 +27,16 @@ #include "qemu/units.h" #include "hw/char/parallel-isa.h" -#include "hw/dma/i8257.h" -#include "hw/loader.h" #include "hw/i386/x86.h" #include "hw/i386/pc.h" #include "hw/i386/apic.h" #include "hw/pci-host/i440fx.h" -#include "hw/rtc/mc146818rtc.h" #include "hw/southbridge/piix.h" #include "hw/display/ramfb.h" #include "hw/pci/pci.h" #include "hw/pci/pci_ids.h" #include "hw/usb.h" #include "net/net.h" -#include "hw/ide/isa.h" #include "hw/ide/pci.h" #include "hw/irq.h" #include "system/kvm.h" @@ -49,6 +45,7 @@ #include "hw/i2c/smbus_eeprom.h" #include "system/memory.h" #include "hw/acpi/acpi.h" +#include "hw/vfio/types.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "system/xen.h" @@ -71,11 +68,12 @@ #define XEN_IOAPIC_NUM_PIRQS 128ULL -#ifdef CONFIG_IDE_ISA -static const int ide_iobase[MAX_IDE_BUS] = { 0x1f0, 0x170 }; -static const int ide_iobase2[MAX_IDE_BUS] = { 0x3f6, 0x376 }; -static const int ide_irq[MAX_IDE_BUS] = { 14, 15 }; -#endif +static GlobalProperty pc_piix_compat_defaults[] = { + { TYPE_RAMFB_DEVICE, "use-legacy-x86-rom", "true" }, + { TYPE_VFIO_PCI_NOHOTPLUG, "use-legacy-x86-rom", "true" }, +}; +static const size_t pc_piix_compat_defaults_len = + G_N_ELEMENTS(pc_piix_compat_defaults); /* * Return the global irq number corresponding to a given device irq @@ -108,16 +106,20 @@ static void pc_init1(MachineState *machine, const char *pci_type) X86MachineState *x86ms = X86_MACHINE(machine); MemoryRegion *system_memory = get_system_memory(); MemoryRegion *system_io = get_system_io(); - Object *phb = NULL; + Object *phb; ISABus *isa_bus; Object *piix4_pm = NULL; qemu_irq smi_irq; GSIState *gsi_state; MemoryRegion *ram_memory; MemoryRegion *pci_memory = NULL; - MemoryRegion *rom_memory = system_memory; ram_addr_t lowmem; uint64_t hole64_size = 0; + PCIDevice *pci_dev; + DeviceState *dev; + size_t i; + + assert(pcmc->pci_enabled); /* * Calculate ram split, for memory below and above 4G. It's a bit @@ -188,42 +190,39 @@ static void pc_init1(MachineState *machine, const char *pci_type) kvmclock_create(pcmc->kvmclock_create_always); } - if (pcmc->pci_enabled) { - pci_memory = g_new(MemoryRegion, 1); - memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); - rom_memory = pci_memory; - - phb = OBJECT(qdev_new(TYPE_I440FX_PCI_HOST_BRIDGE)); - object_property_add_child(OBJECT(machine), "i440fx", phb); - object_property_set_link(phb, PCI_HOST_PROP_RAM_MEM, - OBJECT(ram_memory), &error_fatal); - object_property_set_link(phb, PCI_HOST_PROP_PCI_MEM, - OBJECT(pci_memory), &error_fatal); - object_property_set_link(phb, PCI_HOST_PROP_SYSTEM_MEM, - OBJECT(system_memory), &error_fatal); - object_property_set_link(phb, PCI_HOST_PROP_IO_MEM, - OBJECT(system_io), &error_fatal); - object_property_set_uint(phb, PCI_HOST_BELOW_4G_MEM_SIZE, - x86ms->below_4g_mem_size, &error_fatal); - object_property_set_uint(phb, PCI_HOST_ABOVE_4G_MEM_SIZE, - x86ms->above_4g_mem_size, &error_fatal); - object_property_set_str(phb, I440FX_HOST_PROP_PCI_TYPE, pci_type, - &error_fatal); - sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal); - - pcms->pcibus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pci.0")); - pci_bus_map_irqs(pcms->pcibus, - xen_enabled() ? xen_pci_slot_get_pirq - : pc_pci_slot_get_pirq); - - hole64_size = object_property_get_uint(phb, - PCI_HOST_PROP_PCI_HOLE64_SIZE, - &error_abort); - } + pci_memory = g_new(MemoryRegion, 1); + memory_region_init(pci_memory, NULL, "pci", UINT64_MAX); + + phb = OBJECT(qdev_new(TYPE_I440FX_PCI_HOST_BRIDGE)); + object_property_add_child(OBJECT(machine), "i440fx", phb); + object_property_set_link(phb, PCI_HOST_PROP_RAM_MEM, + OBJECT(ram_memory), &error_fatal); + object_property_set_link(phb, PCI_HOST_PROP_PCI_MEM, + OBJECT(pci_memory), &error_fatal); + object_property_set_link(phb, PCI_HOST_PROP_SYSTEM_MEM, + OBJECT(system_memory), &error_fatal); + object_property_set_link(phb, PCI_HOST_PROP_IO_MEM, + OBJECT(system_io), &error_fatal); + object_property_set_uint(phb, PCI_HOST_BELOW_4G_MEM_SIZE, + x86ms->below_4g_mem_size, &error_fatal); + object_property_set_uint(phb, PCI_HOST_ABOVE_4G_MEM_SIZE, + x86ms->above_4g_mem_size, &error_fatal); + object_property_set_str(phb, I440FX_HOST_PROP_PCI_TYPE, pci_type, + &error_fatal); + sysbus_realize_and_unref(SYS_BUS_DEVICE(phb), &error_fatal); + + pcms->pcibus = PCI_BUS(qdev_get_child_bus(DEVICE(phb), "pci.0")); + pci_bus_map_irqs(pcms->pcibus, + xen_enabled() ? xen_pci_slot_get_pirq + : pc_pci_slot_get_pirq); + + hole64_size = object_property_get_uint(phb, + PCI_HOST_PROP_PCI_HOLE64_SIZE, + &error_abort); /* allocate ram and load rom/bios */ if (!xen_enabled()) { - pc_memory_init(pcms, system_memory, rom_memory, hole64_size); + pc_memory_init(pcms, system_memory, pci_memory, hole64_size); } else { assert(machine->ram_size == x86ms->below_4g_mem_size + x86ms->above_4g_mem_size); @@ -235,86 +234,63 @@ static void pc_init1(MachineState *machine, const char *pci_type) } } - gsi_state = pc_gsi_create(&x86ms->gsi, pcmc->pci_enabled); - - if (pcmc->pci_enabled) { - PCIDevice *pci_dev; - DeviceState *dev; - size_t i; - - pci_dev = pci_new_multifunction(-1, pcms->south_bridge); - object_property_set_bool(OBJECT(pci_dev), "has-usb", - machine_usb(machine), &error_abort); - object_property_set_bool(OBJECT(pci_dev), "has-acpi", - x86_machine_is_acpi_enabled(x86ms), - &error_abort); - object_property_set_bool(OBJECT(pci_dev), "has-pic", false, - &error_abort); - object_property_set_bool(OBJECT(pci_dev), "has-pit", false, - &error_abort); - qdev_prop_set_uint32(DEVICE(pci_dev), "smb_io_base", 0xb100); - object_property_set_bool(OBJECT(pci_dev), "smm-enabled", - x86_machine_is_smm_enabled(x86ms), - &error_abort); - dev = DEVICE(pci_dev); - for (i = 0; i < ISA_NUM_IRQS; i++) { - qdev_connect_gpio_out_named(dev, "isa-irqs", i, x86ms->gsi[i]); - } - pci_realize_and_unref(pci_dev, pcms->pcibus, &error_fatal); - - if (xen_enabled()) { - pci_device_set_intx_routing_notifier( - pci_dev, piix_intx_routing_notifier_xen); - - /* - * Xen supports additional interrupt routes from the PCI devices to - * the IOAPIC: the four pins of each PCI device on the bus are also - * connected to the IOAPIC directly. - * These additional routes can be discovered through ACPI. - */ - pci_bus_irqs(pcms->pcibus, xen_intx_set_irq, pci_dev, - XEN_IOAPIC_NUM_PIRQS); - } - - isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(pci_dev), "isa.0")); - x86ms->rtc = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev), - "rtc")); - piix4_pm = object_resolve_path_component(OBJECT(pci_dev), "pm"); - dev = DEVICE(object_resolve_path_component(OBJECT(pci_dev), "ide")); - pci_ide_create_devs(PCI_DEVICE(dev)); - pcms->idebus[0] = qdev_get_child_bus(dev, "ide.0"); - pcms->idebus[1] = qdev_get_child_bus(dev, "ide.1"); - } else { - uint32_t irq; + gsi_state = pc_gsi_create(&x86ms->gsi, true); + + pci_dev = pci_new_multifunction(-1, pcms->south_bridge); + object_property_set_bool(OBJECT(pci_dev), "has-usb", + machine_usb(machine), &error_abort); + object_property_set_bool(OBJECT(pci_dev), "has-acpi", + x86_machine_is_acpi_enabled(x86ms), + &error_abort); + object_property_set_bool(OBJECT(pci_dev), "has-pic", false, + &error_abort); + object_property_set_bool(OBJECT(pci_dev), "has-pit", false, + &error_abort); + qdev_prop_set_uint32(DEVICE(pci_dev), "smb_io_base", 0xb100); + object_property_set_bool(OBJECT(pci_dev), "smm-enabled", + x86_machine_is_smm_enabled(x86ms), + &error_abort); + dev = DEVICE(pci_dev); + for (i = 0; i < ISA_NUM_IRQS; i++) { + qdev_connect_gpio_out_named(dev, "isa-irqs", i, x86ms->gsi[i]); + } + pci_realize_and_unref(pci_dev, pcms->pcibus, &error_fatal); - isa_bus = isa_bus_new(NULL, system_memory, system_io, - &error_abort); - isa_bus_register_input_irqs(isa_bus, x86ms->gsi); + if (xen_enabled()) { + pci_device_set_intx_routing_notifier( + pci_dev, piix_intx_routing_notifier_xen); + + /* + * Xen supports additional interrupt routes from the PCI devices to + * the IOAPIC: the four pins of each PCI device on the bus are also + * connected to the IOAPIC directly. + * These additional routes can be discovered through ACPI. + */ + pci_bus_irqs(pcms->pcibus, xen_intx_set_irq, pci_dev, + XEN_IOAPIC_NUM_PIRQS); + } - x86ms->rtc = isa_new(TYPE_MC146818_RTC); - qdev_prop_set_int32(DEVICE(x86ms->rtc), "base_year", 2000); - isa_realize_and_unref(x86ms->rtc, isa_bus, &error_fatal); - irq = object_property_get_uint(OBJECT(x86ms->rtc), "irq", - &error_fatal); - isa_connect_gpio_out(ISA_DEVICE(x86ms->rtc), 0, irq); + isa_bus = ISA_BUS(qdev_get_child_bus(DEVICE(pci_dev), "isa.0")); + x86ms->rtc = ISA_DEVICE(object_resolve_path_component(OBJECT(pci_dev), + "rtc")); + piix4_pm = object_resolve_path_component(OBJECT(pci_dev), "pm"); + dev = DEVICE(object_resolve_path_component(OBJECT(pci_dev), "ide")); + pci_ide_create_devs(PCI_DEVICE(dev)); + pcms->idebus[0] = qdev_get_child_bus(dev, "ide.0"); + pcms->idebus[1] = qdev_get_child_bus(dev, "ide.1"); - i8257_dma_init(OBJECT(machine), isa_bus, 0); - pcms->hpet_enabled = false; - } if (x86ms->pic == ON_OFF_AUTO_ON || x86ms->pic == ON_OFF_AUTO_AUTO) { pc_i8259_create(isa_bus, gsi_state->i8259_irq); } - if (phb) { - ioapic_init_gsi(gsi_state, phb); - } + ioapic_init_gsi(gsi_state, phb); if (tcg_enabled()) { x86_register_ferr_irq(x86ms->gsi[13]); } - pc_vga_init(isa_bus, pcmc->pci_enabled ? pcms->pcibus : NULL); + pc_vga_init(isa_bus, pcms->pcibus); /* init basic PC hardware */ pc_basic_device_init(pcms, isa_bus, x86ms->gsi, x86ms->rtc, @@ -322,28 +298,6 @@ static void pc_init1(MachineState *machine, const char *pci_type) pc_nic_init(pcmc, isa_bus, pcms->pcibus); -#ifdef CONFIG_IDE_ISA - if (!pcmc->pci_enabled) { - DriveInfo *hd[MAX_IDE_BUS * MAX_IDE_DEVS]; - int i; - - ide_drive_get(hd, ARRAY_SIZE(hd)); - for (i = 0; i < MAX_IDE_BUS; i++) { - ISADevice *dev; - char busname[] = "ide.0"; - dev = isa_ide_init(isa_bus, ide_iobase[i], ide_iobase2[i], - ide_irq[i], - hd[MAX_IDE_DEVS * i], hd[MAX_IDE_DEVS * i + 1]); - /* - * The ide bus name is ide.0 for the first bus and ide.1 for the - * second one. - */ - busname[4] = '0' + i; - pcms->idebus[i] = qdev_get_child_bus(DEVICE(dev), busname); - } - } -#endif - if (piix4_pm) { smi_irq = qemu_allocate_irq(pc_acpi_smi_interrupt, first_cpu, 0); @@ -425,22 +379,7 @@ static void pc_set_south_bridge(Object *obj, int value, Error **errp) pcms->south_bridge = PCSouthBridgeOption_lookup.array[value]; } -#ifdef CONFIG_ISAPC -static void pc_init_isa(MachineState *machine) -{ - pc_init1(machine, NULL); -} -#endif - #ifdef CONFIG_XEN -static void pc_xen_hvm_init_pci(MachineState *machine) -{ - const char *pci_type = xen_igd_gfx_pt_enabled() ? - TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE : TYPE_I440FX_PCI_DEVICE; - - pc_init1(machine, pci_type); -} - static void pc_xen_hvm_init(MachineState *machine) { PCMachineState *pcms = PC_MACHINE(machine); @@ -450,7 +389,10 @@ static void pc_xen_hvm_init(MachineState *machine) exit(1); } - pc_xen_hvm_init_pci(machine); + pc_init1(machine, xen_igd_gfx_pt_enabled() + ? TYPE_IGD_PASSTHROUGH_I440FX_PCI_DEVICE + : TYPE_I440FX_PCI_DEVICE); + xen_igd_reserve_slot(pcms->pcibus); pci_create_simple(pcms->pcibus, -1, "xen-platform"); } @@ -492,14 +434,26 @@ static void pc_i440fx_machine_options(MachineClass *m) pc_set_south_bridge); object_class_property_set_description(oc, "x-south-bridge", "Use a different south bridge than PIIX3"); + compat_props_add(m->compat_props, + pc_piix_compat_defaults, pc_piix_compat_defaults_len); } -static void pc_i440fx_machine_10_1_options(MachineClass *m) +static void pc_i440fx_machine_10_2_options(MachineClass *m) { pc_i440fx_machine_options(m); } -DEFINE_I440FX_MACHINE_AS_LATEST(10, 1); +DEFINE_I440FX_MACHINE_AS_LATEST(10, 2); + +static void pc_i440fx_machine_10_1_options(MachineClass *m) +{ + pc_i440fx_machine_10_2_options(m); + m->smbios_memory_device_size = 2047 * TiB; + compat_props_add(m->compat_props, hw_compat_10_1, hw_compat_10_1_len); + compat_props_add(m->compat_props, pc_compat_10_1, pc_compat_10_1_len); +} + +DEFINE_I440FX_MACHINE(10, 1); static void pc_i440fx_machine_10_0_options(MachineClass *m) { @@ -793,30 +747,6 @@ static void pc_i440fx_machine_2_6_options(MachineClass *m) DEFINE_I440FX_MACHINE(2, 6); -#ifdef CONFIG_ISAPC -static void isapc_machine_options(MachineClass *m) -{ - PCMachineClass *pcmc = PC_MACHINE_CLASS(m); - m->desc = "ISA-only PC"; - m->max_cpus = 1; - m->option_rom_has_mr = true; - m->rom_file_has_mr = false; - pcmc->pci_enabled = false; - pcmc->has_acpi_build = false; - pcmc->smbios_defaults = false; - pcmc->gigabyte_align = false; - pcmc->smbios_legacy_mode = true; - pcmc->has_reserved_memory = false; - m->default_nic = "ne2k_isa"; - m->default_cpu_type = X86_CPU_TYPE_NAME("486"); - m->no_floppy = !module_object_class_by_name(TYPE_ISA_FDC); - m->no_parallel = !module_object_class_by_name(TYPE_ISA_PARALLEL); -} - -DEFINE_PC_MACHINE(isapc, "isapc", pc_init_isa, - isapc_machine_options); -#endif - #ifdef CONFIG_XEN static void xenfv_machine_4_2_options(MachineClass *m) { diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index cf871cfdad863..6015e639d7bc0 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -45,6 +45,7 @@ #include "hw/i386/pc.h" #include "hw/i386/amd_iommu.h" #include "hw/i386/intel_iommu.h" +#include "hw/vfio/types.h" #include "hw/virtio/virtio-iommu.h" #include "hw/display/ramfb.h" #include "hw/ide/pci.h" @@ -67,6 +68,8 @@ static GlobalProperty pc_q35_compat_defaults[] = { { TYPE_VIRTIO_IOMMU_PCI, "aw-bits", "39" }, + { TYPE_RAMFB_DEVICE, "use-legacy-x86-rom", "true" }, + { TYPE_VFIO_PCI_NOHOTPLUG, "use-legacy-x86-rom", "true" }, }; static const size_t pc_q35_compat_defaults_len = G_N_ELEMENTS(pc_q35_compat_defaults); @@ -371,12 +374,22 @@ static void pc_q35_machine_options(MachineClass *m) pc_q35_compat_defaults, pc_q35_compat_defaults_len); } -static void pc_q35_machine_10_1_options(MachineClass *m) +static void pc_q35_machine_10_2_options(MachineClass *m) { pc_q35_machine_options(m); } -DEFINE_Q35_MACHINE_AS_LATEST(10, 1); +DEFINE_Q35_MACHINE_AS_LATEST(10, 2); + +static void pc_q35_machine_10_1_options(MachineClass *m) +{ + pc_q35_machine_10_2_options(m); + m->smbios_memory_device_size = 2047 * TiB; + compat_props_add(m->compat_props, hw_compat_10_1, hw_compat_10_1_len); + compat_props_add(m->compat_props, pc_compat_10_1, pc_compat_10_1_len); +} + +DEFINE_Q35_MACHINE(10, 1); static void pc_q35_machine_10_0_options(MachineClass *m) { diff --git a/hw/i386/vapic.c b/hw/i386/vapic.c index 0c1c92c479349..f1089f0a7c826 100644 --- a/hw/i386/vapic.c +++ b/hw/i386/vapic.c @@ -490,7 +490,7 @@ void vapic_report_tpr_access(DeviceState *dev, CPUState *cs, target_ulong ip, } typedef struct VAPICEnableTPRReporting { - DeviceState *apic; + APICCommonState *apic; bool enable; } VAPICEnableTPRReporting; diff --git a/hw/i386/x86-common.c b/hw/i386/x86-common.c index b1b5f11e73964..7512be64d67b9 100644 --- a/hw/i386/x86-common.c +++ b/hw/i386/x86-common.c @@ -952,7 +952,7 @@ void x86_load_linux(X86MachineState *x86ms, * kernel on the other side of the fw_cfg interface matches the hash of the * file the user passed in. */ - if (!sev_enabled() && protocol > 0) { + if (!MACHINE(x86ms)->cgs && protocol > 0) { memcpy(setup, header, MIN(sizeof(header), setup_size)); } diff --git a/hw/i386/x86-cpu.c b/hw/i386/x86-cpu.c index c876e6709e038..1a86a853d5ffc 100644 --- a/hw/i386/x86-cpu.c +++ b/hw/i386/x86-cpu.c @@ -86,7 +86,7 @@ int cpu_get_pic_interrupt(CPUX86State *env) return intno; } -DeviceState *cpu_get_current_apic(void) +APICCommonState *cpu_get_current_apic(void) { if (current_cpu) { X86CPU *cpu = X86_CPU(current_cpu); diff --git a/hw/i386/x86-iommu.c b/hw/i386/x86-iommu.c index d34a6849f4ae9..c127a44bb4bc8 100644 --- a/hw/i386/x86-iommu.c +++ b/hw/i386/x86-iommu.c @@ -130,6 +130,7 @@ static const Property x86_iommu_properties[] = { intr_supported, ON_OFF_AUTO_AUTO), DEFINE_PROP_BOOL("device-iotlb", X86IOMMUState, dt_supported, false), DEFINE_PROP_BOOL("pt", X86IOMMUState, pt_supported, true), + DEFINE_PROP_BOOL("dma-translation", X86IOMMUState, dma_translation, true), }; static void x86_iommu_class_init(ObjectClass *klass, const void *data) diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index 1303c21cb70c5..14bc66fb7fad6 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -1417,7 +1417,7 @@ static void ahci_pio_transfer(const IDEDMA *dma) } /* Update number of transferred bytes, destroy sglist */ - dma_buf_commit(s, size); + ide_dma_buf_commit(s, size); out: /* declare that we processed everything */ @@ -1482,8 +1482,8 @@ static int32_t ahci_dma_prepare_buf(const IDEDMA *dma, int32_t limit) /** * Updates the command header with a bytes-read value. - * Called via dma_buf_commit, for both DMA and PIO paths. - * sglist destruction is handled within dma_buf_commit. + * Called via ide_dma_buf_commit, for both DMA and PIO paths. + * sglist destruction is handled within ide_dma_buf_commit. */ static void ahci_commit_buf(const IDEDMA *dma, uint32_t tx_bytes) { @@ -1511,7 +1511,7 @@ static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write) } /* free sglist, update byte count */ - dma_buf_commit(s, l); + ide_dma_buf_commit(s, l); s->io_buffer_index += l; trace_ahci_dma_rw_buf(ad->hba, ad->port_no, l); diff --git a/hw/ide/core.c b/hw/ide/core.c index b14983ec54faf..8c380abf7c162 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -827,7 +827,7 @@ static void ide_sector_read(IDEState *s) ide_sector_read_cb, s); } -void dma_buf_commit(IDEState *s, uint32_t tx_bytes) +void ide_dma_buf_commit(IDEState *s, uint32_t tx_bytes) { if (s->bus->dma->ops->commit_buf) { s->bus->dma->ops->commit_buf(s->bus->dma, tx_bytes); @@ -848,7 +848,7 @@ void ide_set_inactive(IDEState *s, bool more) void ide_dma_error(IDEState *s) { - dma_buf_commit(s, 0); + ide_dma_buf_commit(s, 0); ide_abort_command(s); ide_set_inactive(s, false); ide_bus_set_irq(s->bus); @@ -893,7 +893,7 @@ static void ide_dma_cb(void *opaque, int ret) if (ret < 0) { if (ide_handle_rw_error(s, -ret, ide_dma_cmd_to_retry(s->dma_cmd))) { s->bus->dma->aiocb = NULL; - dma_buf_commit(s, 0); + ide_dma_buf_commit(s, 0); return; } } @@ -912,7 +912,7 @@ static void ide_dma_cb(void *opaque, int ret) sector_num = ide_get_sector(s); if (n > 0) { assert(n * 512 == s->sg.size); - dma_buf_commit(s, s->sg.size); + ide_dma_buf_commit(s, s->sg.size); sector_num += n; ide_set_sector(s, sector_num); s->nsector -= n; @@ -944,7 +944,7 @@ static void ide_dma_cb(void *opaque, int ret) * Reset the Active bit and don't raise the interrupt. */ s->status = READY_STAT | SEEK_STAT; - dma_buf_commit(s, 0); + ide_dma_buf_commit(s, 0); goto eot; } diff --git a/hw/ide/ich.c b/hw/ide/ich.c index 4cade0d12199c..b00987f08d4cd 100644 --- a/hw/ide/ich.c +++ b/hw/ide/ich.c @@ -114,7 +114,8 @@ static void pci_ich9_ahci_init(Object *obj) { AHCIPCIState *d = ICH9_AHCI(obj); - qemu_init_irq(&d->irq, pci_ich9_ahci_update_irq, d, 0); + qemu_init_irq_child(obj, "update-irq", &d->irq, + pci_ich9_ahci_update_irq, d, 0); ahci_init(&d->ahci, DEVICE(obj)); d->ahci.irq = &d->irq; } diff --git a/hw/ide/ide-internal.h b/hw/ide/ide-internal.h index 0d64805da20c0..281d07c9d58f0 100644 --- a/hw/ide/ide-internal.h +++ b/hw/ide/ide-internal.h @@ -398,7 +398,7 @@ int64_t ide_get_sector(IDEState *s); void ide_set_sector(IDEState *s, int64_t sector_num); void ide_start_dma(IDEState *s, BlockCompletionFunc *cb); -void dma_buf_commit(IDEState *s, uint32_t tx_bytes); +void ide_dma_buf_commit(IDEState *s, uint32_t tx_bytes); void ide_dma_error(IDEState *s); void ide_abort_command(IDEState *s); diff --git a/hw/input/hid.c b/hw/input/hid.c index 76bedc18443bf..de24cd0ef0452 100644 --- a/hw/input/hid.c +++ b/hw/input/hid.c @@ -478,6 +478,7 @@ int hid_keyboard_write(HIDState *hs, uint8_t *buf, int len) ledstate |= QEMU_CAPS_LOCK_LED; } kbd_put_ledstate(ledstate); + return 1; } return 0; } diff --git a/hw/input/virtio-input-host.c b/hw/input/virtio-input-host.c index bbfee9d3b9f23..9f62532559d6a 100644 --- a/hw/input/virtio-input-host.c +++ b/hw/input/virtio-input-host.c @@ -114,8 +114,7 @@ static void virtio_input_host_realize(DeviceState *dev, Error **errp) error_setg_file_open(errp, errno, vih->evdev); return; } - if (!g_unix_set_fd_nonblocking(vih->fd, true, NULL)) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (!qemu_set_blocking(vih->fd, false, errp)) { goto err_close; } diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig index 7547528f2c27b..9f456d7e4312a 100644 --- a/hw/intc/Kconfig +++ b/hw/intc/Kconfig @@ -109,3 +109,6 @@ config LOONGARCH_PCH_MSI config LOONGARCH_EXTIOI bool + +config LOONGARCH_DINTC + bool diff --git a/hw/intc/apic.c b/hw/intc/apic.c index bcb103560c726..aad253af15822 100644 --- a/hw/intc/apic.c +++ b/hw/intc/apic.c @@ -27,6 +27,7 @@ #include "hw/pci/msi.h" #include "qemu/host-utils.h" #include "system/kvm.h" +#include "system/mshv.h" #include "trace.h" #include "hw/i386/apic-msidef.h" #include "qapi/error.h" @@ -180,10 +181,8 @@ static void apic_local_deliver(APICCommonState *s, int vector) } } -void apic_deliver_pic_intr(DeviceState *dev, int level) +void apic_deliver_pic_intr(APICCommonState *s, int level) { - APICCommonState *s = APIC(dev); - if (level) { apic_local_deliver(s, APIC_LVT_LINT0); } else { @@ -300,10 +299,8 @@ static void apic_deliver_irq(uint32_t dest, uint8_t dest_mode, apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode); } -bool is_x2apic_mode(DeviceState *dev) +bool is_x2apic_mode(APICCommonState *s) { - APICCommonState *s = APIC(dev); - return s->apicbase & MSR_IA32_APICBASE_EXTD; } @@ -387,15 +384,12 @@ static void apic_set_tpr(APICCommonState *s, uint8_t val) } } -int apic_get_highest_priority_irr(DeviceState *dev) +int apic_get_highest_priority_irr(APICCommonState *s) { - APICCommonState *s; - - if (!dev) { + if (!s) { /* no interrupts */ return -1; } - s = APIC_COMMON(dev); return get_highest_priority_int(s->irr); } @@ -457,22 +451,19 @@ static int apic_irq_pending(APICCommonState *s) static void apic_update_irq(APICCommonState *s) { CPUState *cpu; - DeviceState *dev = (DeviceState *)s; cpu = CPU(s->cpu); if (!qemu_cpu_is_self(cpu)) { cpu_interrupt(cpu, CPU_INTERRUPT_POLL); } else if (apic_irq_pending(s) > 0) { cpu_interrupt(cpu, CPU_INTERRUPT_HARD); - } else if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { + } else if (!apic_accept_pic_intr(s) || !pic_get_output(isa_pic)) { cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); } } -void apic_poll_irq(DeviceState *dev) +void apic_poll_irq(APICCommonState *s) { - APICCommonState *s = APIC(dev); - apic_sync_vapic(s, SYNC_FROM_VAPIC); apic_update_irq(s); } @@ -515,7 +506,7 @@ static void apic_eoi(APICCommonState *s) static bool apic_match_dest(APICCommonState *apic, uint32_t dest) { - if (is_x2apic_mode(&apic->parent_obj)) { + if (is_x2apic_mode(apic)) { return apic->initial_apic_id == dest; } else { return apic->id == (uint8_t)dest; @@ -549,7 +540,7 @@ static void apic_get_broadcast_bitmask(uint32_t *deliver_bitmask, for (i = 0; i < max_apics; i++) { apic_iter = local_apics[i]; if (apic_iter) { - bool apic_in_x2apic = is_x2apic_mode(&apic_iter->parent_obj); + bool apic_in_x2apic = is_x2apic_mode(apic_iter); if (is_x2apic_broadcast && apic_in_x2apic) { apic_set_bit(deliver_bitmask, i); @@ -641,29 +632,24 @@ static void apic_startup(APICCommonState *s, int vector_num) cpu_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); } -void apic_sipi(DeviceState *dev) +void apic_sipi(APICCommonState *s) { - APICCommonState *s = APIC(dev); - - cpu_reset_interrupt(CPU(s->cpu), CPU_INTERRUPT_SIPI); - if (!s->wait_for_sipi) return; cpu_x86_load_seg_cache_sipi(s->cpu, s->sipi_vector); s->wait_for_sipi = 0; } -static void apic_deliver(DeviceState *dev, uint32_t dest, uint8_t dest_mode, +static void apic_deliver(APICCommonState *s, uint32_t dest, uint8_t dest_mode, uint8_t delivery_mode, uint8_t vector_num, uint8_t trigger_mode, uint8_t dest_shorthand) { - APICCommonState *s = APIC(dev); APICCommonState *apic_iter; uint32_t deliver_bitmask_size = max_apic_words * sizeof(uint32_t); g_autofree uint32_t *deliver_bitmask = g_new(uint32_t, max_apic_words); uint32_t current_apic_id; - if (is_x2apic_mode(dev)) { + if (is_x2apic_mode(s)) { current_apic_id = s->initial_apic_id; } else { current_apic_id = s->id; @@ -710,18 +696,15 @@ static void apic_deliver(DeviceState *dev, uint32_t dest, uint8_t dest_mode, static bool apic_check_pic(APICCommonState *s) { - DeviceState *dev = (DeviceState *)s; - - if (!apic_accept_pic_intr(dev) || !pic_get_output(isa_pic)) { + if (!apic_accept_pic_intr(s) || !pic_get_output(isa_pic)) { return false; } - apic_deliver_pic_intr(dev, 1); + apic_deliver_pic_intr(s, 1); return true; } -int apic_get_interrupt(DeviceState *dev) +int apic_get_interrupt(APICCommonState *s) { - APICCommonState *s = APIC(dev); int intno; /* if the APIC is installed or enabled, we let the 8259 handle the @@ -753,9 +736,8 @@ int apic_get_interrupt(DeviceState *dev) return intno; } -int apic_accept_pic_intr(DeviceState *dev) +int apic_accept_pic_intr(APICCommonState *s) { - APICCommonState *s = APIC(dev); uint32_t lvt0; if (!s) @@ -787,22 +769,14 @@ static void apic_timer(void *opaque) apic_timer_update(s, s->next_time); } -static int apic_register_read(int index, uint64_t *value) +static int apic_register_read(APICCommonState *s, int index, uint64_t *value) { - DeviceState *dev; - APICCommonState *s; uint32_t val; int ret = 0; - dev = cpu_get_current_apic(); - if (!dev) { - return -1; - } - s = APIC(dev); - switch(index) { case 0x02: /* id */ - if (is_x2apic_mode(dev)) { + if (is_x2apic_mode(s)) { val = s->initial_apic_id; } else { val = s->id << 24; @@ -829,14 +803,14 @@ static int apic_register_read(int index, uint64_t *value) val = 0; break; case 0x0d: - if (is_x2apic_mode(dev)) { + if (is_x2apic_mode(s)) { val = s->extended_log_dest; } else { val = s->log_dest << 24; } break; case 0x0e: - if (is_x2apic_mode(dev)) { + if (is_x2apic_mode(s)) { val = 0; ret = -1; } else { @@ -888,6 +862,7 @@ static int apic_register_read(int index, uint64_t *value) static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) { + APICCommonState *s = cpu_get_current_apic(); uint64_t val; int index; @@ -895,26 +870,27 @@ static uint64_t apic_mem_read(void *opaque, hwaddr addr, unsigned size) return 0; } + if (!s) { + return -1; + } + index = (addr >> 4) & 0xff; - apic_register_read(index, &val); + apic_register_read(s, index, &val); return val; } -int apic_msr_read(int index, uint64_t *val) +int apic_msr_read(APICCommonState *s, int index, uint64_t *val) { - DeviceState *dev; - - dev = cpu_get_current_apic(); - if (!dev) { + if (!s) { return -1; } - if (!is_x2apic_mode(dev)) { + if (!is_x2apic_mode(s)) { return -1; } - return apic_register_read(index, val); + return apic_register_read(s, index, val); } static void apic_send_msi(MSIMessage *msi) @@ -932,25 +908,23 @@ static void apic_send_msi(MSIMessage *msi) uint8_t trigger_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; uint8_t delivery = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7; /* XXX: Ignore redirection hint. */ +#ifdef CONFIG_MSHV + if (mshv_enabled()) { + mshv_request_interrupt(mshv_state, delivery, vector, dest, + dest_mode, trigger_mode); + return; + } +#endif apic_deliver_irq(dest, dest_mode, delivery, vector, trigger_mode); } -static int apic_register_write(int index, uint64_t val) +static int apic_register_write(APICCommonState *s, int index, uint64_t val) { - DeviceState *dev; - APICCommonState *s; - - dev = cpu_get_current_apic(); - if (!dev) { - return -1; - } - s = APIC(dev); - trace_apic_register_write(index, val); switch(index) { case 0x02: - if (is_x2apic_mode(dev)) { + if (is_x2apic_mode(s)) { return -1; } @@ -973,14 +947,14 @@ static int apic_register_write(int index, uint64_t val) apic_eoi(s); break; case 0x0d: - if (is_x2apic_mode(dev)) { + if (is_x2apic_mode(s)) { return -1; } s->log_dest = val >> 24; break; case 0x0e: - if (is_x2apic_mode(dev)) { + if (is_x2apic_mode(s)) { return -1; } @@ -999,20 +973,20 @@ static int apic_register_write(int index, uint64_t val) uint32_t dest; s->icr[0] = val; - if (is_x2apic_mode(dev)) { + if (is_x2apic_mode(s)) { s->icr[1] = val >> 32; dest = s->icr[1]; } else { dest = (s->icr[1] >> 24) & 0xff; } - apic_deliver(dev, dest, (s->icr[0] >> 11) & 1, + apic_deliver(s, dest, (s->icr[0] >> 11) & 1, (s->icr[0] >> 8) & 7, (s->icr[0] & 0xff), (s->icr[0] >> 15) & 1, (s->icr[0] >> 18) & 3); break; } case 0x31: - if (is_x2apic_mode(dev)) { + if (is_x2apic_mode(s)) { return -1; } @@ -1047,7 +1021,7 @@ static int apic_register_write(int index, uint64_t val) case 0x3f: { int vector = val & 0xff; - if (!is_x2apic_mode(dev)) { + if (!is_x2apic_mode(s)) { return -1; } @@ -1057,7 +1031,7 @@ static int apic_register_write(int index, uint64_t val) * - Trigger mode: 0 (Edge) * - Delivery mode: 0 (Fixed) */ - apic_deliver(dev, 0, 0, APIC_DM_FIXED, vector, 0, 1); + apic_deliver(s, 0, 0, APIC_DM_FIXED, vector, 0, 1); break; } @@ -1072,6 +1046,7 @@ static int apic_register_write(int index, uint64_t val) static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { + APICCommonState *s = cpu_get_current_apic(); int index = (addr >> 4) & 0xff; if (size < 4) { @@ -1091,23 +1066,24 @@ static void apic_mem_write(void *opaque, hwaddr addr, uint64_t val, return; } - apic_register_write(index, val); + if (!s) { + return; + } + + apic_register_write(s, index, val); } -int apic_msr_write(int index, uint64_t val) +int apic_msr_write(APICCommonState *s, int index, uint64_t val) { - DeviceState *dev; - - dev = cpu_get_current_apic(); - if (!dev) { + if (!s) { return -1; } - if (!is_x2apic_mode(dev)) { + if (!is_x2apic_mode(s)) { return -1; } - return apic_register_write(index, val); + return apic_register_write(s, index, val); } static void apic_pre_save(APICCommonState *s) diff --git a/hw/intc/apic_common.c b/hw/intc/apic_common.c index 37a7a7019d33c..ec9e978b0b40f 100644 --- a/hw/intc/apic_common.c +++ b/hw/intc/apic_common.c @@ -35,12 +35,11 @@ bool apic_report_tpr_access; -int cpu_set_apic_base(DeviceState *dev, uint64_t val) +int cpu_set_apic_base(APICCommonState *s, uint64_t val) { trace_cpu_set_apic_base(val); - if (dev) { - APICCommonState *s = APIC_COMMON(dev); + if (s) { APICCommonClass *info = APIC_COMMON_GET_CLASS(s); /* Reset possibly modified xAPIC ID */ s->id = s->initial_apic_id; @@ -50,10 +49,9 @@ int cpu_set_apic_base(DeviceState *dev, uint64_t val) return 0; } -uint64_t cpu_get_apic_base(DeviceState *dev) +uint64_t cpu_get_apic_base(APICCommonState *s) { - if (dev) { - APICCommonState *s = APIC_COMMON(dev); + if (s) { trace_cpu_get_apic_base((uint64_t)s->apicbase); return s->apicbase; } else { @@ -62,52 +60,43 @@ uint64_t cpu_get_apic_base(DeviceState *dev) } } -bool cpu_is_apic_enabled(DeviceState *dev) +bool cpu_is_apic_enabled(APICCommonState *s) { - APICCommonState *s; - - if (!dev) { + if (!s) { return false; } - s = APIC_COMMON(dev); - return s->apicbase & MSR_IA32_APICBASE_ENABLE; } -void cpu_set_apic_tpr(DeviceState *dev, uint8_t val) +void cpu_set_apic_tpr(APICCommonState *s, uint8_t val) { - APICCommonState *s; APICCommonClass *info; - if (!dev) { + if (!s) { return; } - s = APIC_COMMON(dev); info = APIC_COMMON_GET_CLASS(s); info->set_tpr(s, val); } -uint8_t cpu_get_apic_tpr(DeviceState *dev) +uint8_t cpu_get_apic_tpr(APICCommonState *s) { - APICCommonState *s; APICCommonClass *info; - if (!dev) { + if (!s) { return 0; } - s = APIC_COMMON(dev); info = APIC_COMMON_GET_CLASS(s); return info->get_tpr(s); } -void apic_enable_tpr_access_reporting(DeviceState *dev, bool enable) +void apic_enable_tpr_access_reporting(APICCommonState *s, bool enable) { - APICCommonState *s = APIC_COMMON(dev); APICCommonClass *info = APIC_COMMON_GET_CLASS(s); apic_report_tpr_access = enable; @@ -116,26 +105,22 @@ void apic_enable_tpr_access_reporting(DeviceState *dev, bool enable) } } -void apic_enable_vapic(DeviceState *dev, hwaddr paddr) +void apic_enable_vapic(APICCommonState *s, hwaddr paddr) { - APICCommonState *s = APIC_COMMON(dev); APICCommonClass *info = APIC_COMMON_GET_CLASS(s); s->vapic_paddr = paddr; info->vapic_base_update(s); } -void apic_handle_tpr_access_report(DeviceState *dev, target_ulong ip, +void apic_handle_tpr_access_report(APICCommonState *s, target_ulong ip, TPRAccess access) { - APICCommonState *s = APIC_COMMON(dev); - vapic_report_tpr_access(s->vapic, CPU(s->cpu), ip, access); } -void apic_deliver_nmi(DeviceState *dev) +void apic_deliver_nmi(APICCommonState *s) { - APICCommonState *s = APIC_COMMON(dev); APICCommonClass *info = APIC_COMMON_GET_CLASS(s); info->external_nmi(s); @@ -193,16 +178,14 @@ uint32_t apic_get_current_count(APICCommonState *s) return val; } -void apic_init_reset(DeviceState *dev) +void apic_init_reset(APICCommonState *s) { - APICCommonState *s; APICCommonClass *info; int i; - if (!dev) { + if (!s) { return; } - s = APIC_COMMON(dev); s->tpr = 0; s->spurious_vec = 0xff; s->log_dest = 0; @@ -233,13 +216,12 @@ void apic_init_reset(DeviceState *dev) } } -void apic_designate_bsp(DeviceState *dev, bool bsp) +void apic_designate_bsp(APICCommonState *s, bool bsp) { - if (dev == NULL) { + if (s == NULL) { return; } - APICCommonState *s = APIC_COMMON(dev); if (bsp) { s->apicbase |= MSR_IA32_APICBASE_BSP; } else { @@ -262,7 +244,7 @@ static void apic_reset_common(DeviceState *dev) s->vapic_paddr = 0; info->vapic_base_update(s); - apic_init_reset(dev); + apic_init_reset(s); } static const VMStateDescription vmstate_apic_common; @@ -379,6 +361,7 @@ static const VMStateDescription vmstate_apic_common = { .pre_load = apic_pre_load, .pre_save = apic_dispatch_pre_save, .post_load = apic_dispatch_post_load, + .priority = MIG_PRI_APIC, .fields = (const VMStateField[]) { VMSTATE_UINT32(apicbase, APICCommonState), VMSTATE_UINT8(id, APICCommonState), diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index e438d8c042d67..2d0df6da86c23 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -436,7 +436,7 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) s->cpu = g_new0(GICv3CPUState, s->num_cpu); for (i = 0; i < s->num_cpu; i++) { - CPUState *cpu = qemu_get_cpu(i); + CPUState *cpu = qemu_get_cpu(s->first_cpu_idx + i); uint64_t cpu_affid; s->cpu[i].cpu = cpu; @@ -622,6 +622,7 @@ static const Property arm_gicv3_common_properties[] = { redist_region_count, qdev_prop_uint32, uint32_t), DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION, MemoryRegion *), + DEFINE_PROP_UINT32("first-cpu-index", GICv3State, first_cpu_idx, 0), }; static void arm_gicv3_common_class_init(ObjectClass *klass, const void *data) diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 4b4cf09157053..2e6c1f778a9cf 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -3024,7 +3024,7 @@ void gicv3_init_cpuif(GICv3State *s) int i; for (i = 0; i < s->num_cpu; i++) { - ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); + ARMCPU *cpu = ARM_CPU(qemu_get_cpu(s->first_cpu_idx + i)); GICv3CPUState *cs = &s->cpu[i]; /* @@ -3037,15 +3037,7 @@ void gicv3_init_cpuif(GICv3State *s) * cpu->gic_pribits */ - /* Note that we can't just use the GICv3CPUState as an opaque pointer - * in define_arm_cp_regs_with_opaque(), because when we're called back - * it might be with code translated by CPU 0 but run by CPU 1, in - * which case we'd get the wrong value. - * So instead we define the regs with no ri->opaque info, and - * get back to the GICv3CPUState from the CPUARMState. - * - * These CP regs callbacks can be called from either TCG or HVF code. - */ + /* These CP regs callbacks can be called from either TCG or HVF. */ define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); /* diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index 8ed88e742994b..9829e2146daba 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -31,6 +31,7 @@ #include "gicv3_internal.h" #include "vgic_common.h" #include "migration/blocker.h" +#include "migration/misc.h" #include "qom/object.h" #include "target/arm/cpregs.h" @@ -295,7 +296,7 @@ static void kvm_dist_putbmp(GICv3State *s, uint32_t offset, * the 1 bits. */ if (clroffset != 0) { - reg = 0; + reg = ~0; kvm_gicd_access(s, clroffset, ®, true); clroffset += 4; } @@ -387,8 +388,6 @@ static void kvm_arm_gicv3_put(GICv3State *s) reg = c->level; kvm_gic_line_level_access(s, 0, ncpu, ®, true); - reg = ~0; - kvm_gicr_access(s, GICR_ICPENDR0, ncpu, ®, true); reg = c->gicr_ipendr0; kvm_gicr_access(s, GICR_ISPENDR0, ncpu, ®, true); @@ -445,7 +444,7 @@ static void kvm_arm_gicv3_put(GICv3State *s) kvm_gic_put_line_level_bmp(s, s->level); /* s->pending bitmap -> GICD_ISPENDRn */ - kvm_dist_putbmp(s, GICD_ISPENDR, GICD_ICPENDR, s->pending); + kvm_dist_putbmp(s, GICD_ISPENDR, 0, s->pending); /* s->active bitmap -> GICD_ISACTIVERn */ kvm_dist_putbmp(s, GICD_ISACTIVER, GICD_ICACTIVER, s->active); @@ -778,6 +777,17 @@ static void vm_change_state_handler(void *opaque, bool running, } } +static int kvm_arm_gicv3_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) +{ + if (e->type == MIG_EVENT_PRECOPY_DONE) { + GICv3State *s = container_of(notifier, GICv3State, cpr_notifier); + return kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES, + NULL, true, errp); + } + return 0; +} static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) { @@ -811,6 +821,12 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) return; } + if (s->first_cpu_idx != 0) { + error_setg(errp, "Non-zero first-cpu-idx is unsupported with the " + "in-kernel GIC"); + return; + } + gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL); for (i = 0; i < s->num_cpu; i++) { @@ -919,6 +935,9 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES)) { qemu_add_vm_change_state_handler(vm_change_state_handler, s); + migration_add_notifier_mode(&s->cpr_notifier, + kvm_arm_gicv3_notifier, + MIG_MODE_CPR_TRANSFER); } } diff --git a/hw/intc/ioapic.c b/hw/intc/ioapic.c index 133bef852d1e0..e431d00311768 100644 --- a/hw/intc/ioapic.c +++ b/hw/intc/ioapic.c @@ -30,12 +30,18 @@ #include "hw/intc/ioapic_internal.h" #include "hw/pci/msi.h" #include "hw/qdev-properties.h" +#include "system/accel-irq.h" #include "system/kvm.h" #include "system/system.h" #include "hw/i386/apic-msidef.h" #include "hw/i386/x86-iommu.h" #include "trace.h" + +#if defined(CONFIG_KVM) || defined(CONFIG_MSHV) +#define ACCEL_GSI_IRQFD_POSSIBLE +#endif + #define APIC_DELIVERY_MODE_SHIFT 8 #define APIC_POLARITY_SHIFT 14 #define APIC_TRIG_MODE_SHIFT 15 @@ -191,10 +197,10 @@ static void ioapic_set_irq(void *opaque, int vector, int level) static void ioapic_update_kvm_routes(IOAPICCommonState *s) { -#ifdef CONFIG_KVM +#ifdef ACCEL_GSI_IRQFD_POSSIBLE int i; - if (kvm_irqchip_is_split()) { + if (accel_irqchip_is_split()) { for (i = 0; i < IOAPIC_NUM_PINS; i++) { MSIMessage msg; struct ioapic_entry_info info; @@ -202,15 +208,15 @@ static void ioapic_update_kvm_routes(IOAPICCommonState *s) if (!info.masked) { msg.address = info.addr; msg.data = info.data; - kvm_irqchip_update_msi_route(kvm_state, i, msg, NULL); + accel_irqchip_update_msi_route(i, msg, NULL); } } - kvm_irqchip_commit_routes(kvm_state); + accel_irqchip_commit_routes(); } #endif } -#ifdef CONFIG_KVM +#ifdef ACCEL_KERNEL_GSI_IRQFD_POSSIBLE static void ioapic_iec_notifier(void *private, bool global, uint32_t index, uint32_t mask) { @@ -428,11 +434,11 @@ static const MemoryRegionOps ioapic_io_ops = { static void ioapic_machine_done_notify(Notifier *notifier, void *data) { -#ifdef CONFIG_KVM +#ifdef ACCEL_KERNEL_GSI_IRQFD_POSSIBLE IOAPICCommonState *s = container_of(notifier, IOAPICCommonState, machine_done); - if (kvm_irqchip_is_split()) { + if (accel_irqchip_is_split()) { X86IOMMUState *iommu = x86_iommu_get_default(); if (iommu) { /* Register this IOAPIC with IOMMU IEC notifier, so that diff --git a/hw/intc/loongarch_dintc.c b/hw/intc/loongarch_dintc.c new file mode 100644 index 0000000000000..32bdd171c588f --- /dev/null +++ b/hw/intc/loongarch_dintc.c @@ -0,0 +1,213 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QEMU LoongArch direct interrupt controller. + * + * Copyright (C) 2025 Loongson Technology Corporation Limited + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "hw/intc/loongarch_pch_msi.h" +#include "hw/intc/loongarch_pch_pic.h" +#include "hw/intc/loongarch_dintc.h" +#include "hw/pci/msi.h" +#include "hw/misc/unimp.h" +#include "migration/vmstate.h" +#include "trace.h" +#include "hw/qdev-properties.h" +#include "target/loongarch/cpu.h" +#include "qemu/error-report.h" +#include "system/hw_accel.h" + +/* msg addr field */ +FIELD(MSG_ADDR, IRQ_NUM, 4, 8) +FIELD(MSG_ADDR, CPU_NUM, 12, 8) +FIELD(MSG_ADDR, FIX, 28, 12) + +static uint64_t loongarch_dintc_mem_read(void *opaque, + hwaddr addr, unsigned size) +{ + return 0; +} + +static void do_set_vcpu_dintc_irq(CPUState *cs, run_on_cpu_data data) +{ + int irq = data.host_int; + CPULoongArchState *env; + + env = &LOONGARCH_CPU(cs)->env; + cpu_synchronize_state(cs); + set_bit(irq, (unsigned long *)&env->CSR_MSGIS); +} + +static void loongarch_dintc_mem_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + int irq_num, cpu_num = 0; + LoongArchDINTCState *s = LOONGARCH_DINTC(opaque); + uint64_t msg_addr = addr + VIRT_DINTC_BASE; + CPUState *cs; + + cpu_num = FIELD_EX64(msg_addr, MSG_ADDR, CPU_NUM); + cs = cpu_by_arch_id(cpu_num); + irq_num = FIELD_EX64(msg_addr, MSG_ADDR, IRQ_NUM); + + async_run_on_cpu(cs, do_set_vcpu_dintc_irq, + RUN_ON_CPU_HOST_INT(irq_num)); + qemu_set_irq(s->cpu[cpu_num].parent_irq, 1); +} + +static const MemoryRegionOps loongarch_dintc_ops = { + .read = loongarch_dintc_mem_read, + .write = loongarch_dintc_mem_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void loongarch_dintc_realize(DeviceState *dev, Error **errp) +{ + LoongArchDINTCState *s = LOONGARCH_DINTC(dev); + LoongArchDINTCClass *lac = LOONGARCH_DINTC_GET_CLASS(dev); + MachineState *machine = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(machine); + const CPUArchIdList *id_list; + int i; + + Error *local_err = NULL; + lac->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + assert(mc->possible_cpu_arch_ids); + id_list = mc->possible_cpu_arch_ids(machine); + s->num_cpu = id_list->len; + s->cpu = g_new(DINTCCore, s->num_cpu); + if (s->cpu == NULL) { + error_setg(errp, "Memory allocation for DINTCCore fail"); + return; + } + + for (i = 0; i < s->num_cpu; i++) { + s->cpu[i].arch_id = id_list->cpus[i].arch_id; + s->cpu[i].cpu = CPU(id_list->cpus[i].cpu); + qdev_init_gpio_out(dev, &s->cpu[i].parent_irq, 1); + } + + return; +} + +static void loongarch_dintc_unrealize(DeviceState *dev) +{ + LoongArchDINTCState *s = LOONGARCH_DINTC(dev); + g_free(s->cpu); +} + +static void loongarch_dintc_init(Object *obj) +{ + LoongArchDINTCState *s = LOONGARCH_DINTC(obj); + SysBusDevice *shd = SYS_BUS_DEVICE(obj); + memory_region_init_io(&s->dintc_mmio, OBJECT(s), &loongarch_dintc_ops, + s, TYPE_LOONGARCH_DINTC, VIRT_DINTC_SIZE); + sysbus_init_mmio(shd, &s->dintc_mmio); + msi_nonbroken = true; + return; +} + +static DINTCCore *loongarch_dintc_get_cpu(LoongArchDINTCState *s, + DeviceState *dev) +{ + CPUClass *k = CPU_GET_CLASS(dev); + uint64_t arch_id = k->get_arch_id(CPU(dev)); + int i; + + for (i = 0; i < s->num_cpu; i++) { + if (s->cpu[i].arch_id == arch_id) { + return &s->cpu[i]; + } + } + + return NULL; +} + +static void loongarch_dintc_cpu_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchDINTCState *s = LOONGARCH_DINTC(hotplug_dev); + Object *obj = OBJECT(dev); + DINTCCore *core; + int index; + + if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) { + warn_report("LoongArch DINTC: Invalid %s device type", + object_get_typename(obj)); + return; + } + core = loongarch_dintc_get_cpu(s, dev); + if (!core) { + return; + } + + core->cpu = CPU(dev); + index = core - s->cpu; + + /* connect dintc msg irq to cpu irq */ + qdev_connect_gpio_out(DEVICE(s), index, qdev_get_gpio_in(dev, INT_DMSI)); + return; +} + +static void loongarch_dintc_cpu_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongArchDINTCState *s = LOONGARCH_DINTC(hotplug_dev); + Object *obj = OBJECT(dev); + DINTCCore *core; + + if (!object_dynamic_cast(obj, TYPE_LOONGARCH_CPU)) { + warn_report("LoongArch DINTC: Invalid %s device type", + object_get_typename(obj)); + return; + } + + core = loongarch_dintc_get_cpu(s, dev); + + if (!core) { + return; + } + + core->cpu = NULL; +} + +static void loongarch_dintc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + LoongArchDINTCClass *lac = LOONGARCH_DINTC_CLASS(klass); + + dc->unrealize = loongarch_dintc_unrealize; + device_class_set_parent_realize(dc, loongarch_dintc_realize, + &lac->parent_realize); + hc->plug = loongarch_dintc_cpu_plug; + hc->unplug = loongarch_dintc_cpu_unplug; +} + +static const TypeInfo loongarch_dintc_info = { + .name = TYPE_LOONGARCH_DINTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LoongArchDINTCState), + .instance_init = loongarch_dintc_init, + .class_size = sizeof(LoongArchDINTCClass), + .class_init = loongarch_dintc_class_init, + .interfaces = (const InterfaceInfo[]) { + { TYPE_HOTPLUG_HANDLER }, + { } + }, +}; + +static void loongarch_dintc_register_types(void) +{ + type_register_static(&loongarch_dintc_info); +} + +type_init(loongarch_dintc_register_types) diff --git a/hw/intc/loongarch_ipi_kvm.c b/hw/intc/loongarch_ipi_kvm.c index 4cb3acc921f8b..dd4c367abf9d0 100644 --- a/hw/intc/loongarch_ipi_kvm.c +++ b/hw/intc/loongarch_ipi_kvm.c @@ -23,36 +23,41 @@ static void kvm_ipi_access_regs(void *opaque, bool write) LoongarchIPIState *lis = LOONGARCH_IPI(opaque); IPICore *core; uint64_t attr; - int cpu, fd = lis->dev_fd; + int i, cpu_index, fd = lis->dev_fd; if (fd == 0) { return; } - for (cpu = 0; cpu < ipi->num_cpu; cpu++) { - core = &ipi->cpu[cpu]; - attr = (cpu << 16) | CORE_STATUS_OFF; + for (i = 0; i < ipi->num_cpu; i++) { + core = &ipi->cpu[i]; + if (core->cpu == NULL) { + continue; + } + cpu_index = i; + + attr = (cpu_index << 16) | CORE_STATUS_OFF; kvm_ipi_access_reg(fd, attr, &core->status, write); - attr = (cpu << 16) | CORE_EN_OFF; + attr = (cpu_index << 16) | CORE_EN_OFF; kvm_ipi_access_reg(fd, attr, &core->en, write); - attr = (cpu << 16) | CORE_SET_OFF; + attr = (cpu_index << 16) | CORE_SET_OFF; kvm_ipi_access_reg(fd, attr, &core->set, write); - attr = (cpu << 16) | CORE_CLEAR_OFF; + attr = (cpu_index << 16) | CORE_CLEAR_OFF; kvm_ipi_access_reg(fd, attr, &core->clear, write); - attr = (cpu << 16) | CORE_BUF_20; + attr = (cpu_index << 16) | CORE_BUF_20; kvm_ipi_access_reg(fd, attr, &core->buf[0], write); - attr = (cpu << 16) | CORE_BUF_28; + attr = (cpu_index << 16) | CORE_BUF_28; kvm_ipi_access_reg(fd, attr, &core->buf[2], write); - attr = (cpu << 16) | CORE_BUF_30; + attr = (cpu_index << 16) | CORE_BUF_30; kvm_ipi_access_reg(fd, attr, &core->buf[4], write); - attr = (cpu << 16) | CORE_BUF_38; + attr = (cpu_index << 16) | CORE_BUF_38; kvm_ipi_access_reg(fd, attr, &core->buf[6], write); } } diff --git a/hw/intc/loongarch_pch_pic.c b/hw/intc/loongarch_pch_pic.c index c4b242dbf4169..32f01aabf0ed5 100644 --- a/hw/intc/loongarch_pch_pic.c +++ b/hw/intc/loongarch_pch_pic.c @@ -110,10 +110,10 @@ static uint64_t pch_pic_read(void *opaque, hwaddr addr, uint64_t field_mask) val = s->int_polarity; break; case PCH_PIC_HTMSI_VEC ... PCH_PIC_HTMSI_VEC_END: - val = *(uint64_t *)(s->htmsi_vector + addr - PCH_PIC_HTMSI_VEC); + val = ldq_le_p(&s->htmsi_vector[addr - PCH_PIC_HTMSI_VEC]); break; case PCH_PIC_ROUTE_ENTRY ... PCH_PIC_ROUTE_ENTRY_END: - val = *(uint64_t *)(s->route_entry + addr - PCH_PIC_ROUTE_ENTRY); + val = ldq_le_p(&s->route_entry[addr - PCH_PIC_ROUTE_ENTRY]); break; default: qemu_log_mask(LOG_GUEST_ERROR, @@ -129,7 +129,8 @@ static void pch_pic_write(void *opaque, hwaddr addr, uint64_t value, { LoongArchPICCommonState *s = LOONGARCH_PIC_COMMON(opaque); uint32_t offset; - uint64_t old, mask, data, *ptemp; + uint64_t old, mask, data; + void *ptemp; offset = addr & 7; addr -= offset; @@ -168,12 +169,12 @@ static void pch_pic_write(void *opaque, hwaddr addr, uint64_t value, s->int_polarity = (s->int_polarity & ~mask) | data; break; case PCH_PIC_HTMSI_VEC ... PCH_PIC_HTMSI_VEC_END: - ptemp = (uint64_t *)(s->htmsi_vector + addr - PCH_PIC_HTMSI_VEC); - *ptemp = (*ptemp & ~mask) | data; + ptemp = &s->htmsi_vector[addr - PCH_PIC_HTMSI_VEC]; + stq_le_p(ptemp, (ldq_le_p(ptemp) & ~mask) | data); break; case PCH_PIC_ROUTE_ENTRY ... PCH_PIC_ROUTE_ENTRY_END: - ptemp = (uint64_t *)(s->route_entry + addr - PCH_PIC_ROUTE_ENTRY); - *ptemp = (*ptemp & ~mask) | data; + ptemp = (uint64_t *)&s->route_entry[addr - PCH_PIC_ROUTE_ENTRY]; + stq_le_p(ptemp, (ldq_le_p(ptemp) & ~mask) | data); break; default: qemu_log_mask(LOG_GUEST_ERROR, diff --git a/hw/intc/loongarch_pic_kvm.c b/hw/intc/loongarch_pic_kvm.c index dd504ec6a6fac..6cfddf45206a3 100644 --- a/hw/intc/loongarch_pic_kvm.c +++ b/hw/intc/loongarch_pic_kvm.c @@ -10,7 +10,6 @@ #include "hw/boards.h" #include "hw/intc/loongarch_pch_pic.h" #include "hw/loongarch/virt.h" -#include "hw/pci-host/ls7a.h" #include "system/kvm.h" static void kvm_pch_pic_access_reg(int fd, uint64_t addr, void *val, bool write) diff --git a/hw/intc/meson.build b/hw/intc/meson.build index 3137521a4ad19..faae20b93d333 100644 --- a/hw/intc/meson.build +++ b/hw/intc/meson.build @@ -38,11 +38,11 @@ if config_all_devices.has_key('CONFIG_APIC') or \ endif specific_ss.add(when: 'CONFIG_APIC', if_true: files('apic.c', 'apic_common.c')) -specific_ss.add(when: 'CONFIG_ARM_GIC', if_true: files('arm_gicv3_cpuif_common.c')) -specific_ss.add(when: 'CONFIG_ARM_GICV3', if_true: files('arm_gicv3_cpuif.c')) +arm_common_ss.add(when: 'CONFIG_ARM_GIC', if_true: files('arm_gicv3_cpuif_common.c')) +arm_common_ss.add(when: 'CONFIG_ARM_GICV3', if_true: files('arm_gicv3_cpuif.c')) specific_ss.add(when: 'CONFIG_ARM_GIC_KVM', if_true: files('arm_gic_kvm.c')) specific_ss.add(when: ['CONFIG_ARM_GIC_KVM', 'TARGET_AARCH64'], if_true: files('arm_gicv3_kvm.c', 'arm_gicv3_its_kvm.c')) -specific_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_nvic.c')) +arm_common_ss.add(when: 'CONFIG_ARM_V7M', if_true: files('armv7m_nvic.c')) specific_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_irqmp.c')) specific_ss.add(when: 'CONFIG_IOAPIC', if_true: files('ioapic.c')) specific_ss.add(when: 'CONFIG_LOONGSON_LIOINTC', if_true: files('loongson_liointc.c')) @@ -80,3 +80,4 @@ specific_ss.add(when: 'CONFIG_LOONGARCH_PCH_MSI', if_true: files('loongarch_pch_ specific_ss.add(when: 'CONFIG_LOONGARCH_EXTIOI', if_true: files('loongarch_extioi.c', 'loongarch_extioi_common.c')) specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_LOONGARCH_EXTIOI'], if_true: files('loongarch_extioi_kvm.c')) +specific_ss.add(when: 'CONFIG_LOONGARCH_DINTC', if_true: files('loongarch_dintc.c')) diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c index 935c0e4742f5f..c2ca40b8be87c 100644 --- a/hw/intc/pnv_xive.c +++ b/hw/intc/pnv_xive.c @@ -470,14 +470,13 @@ static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) return xive->regs[reg >> 3] & PPC_BIT(bit); } -static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { PnvXive *xive = PNV_XIVE(xptr); PnvChip *chip = xive->chip; - int count = 0; int i, j; for (i = 0; i < chip->nr_cores; i++) { @@ -510,17 +509,18 @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " "thread context NVT %x/%x\n", nvt_blk, nvt_idx); - return -1; + match->count++; + continue; } match->ring = ring; match->tctx = tctx; - count++; + match->count++; } } } - return count; + return !!match->count; } static uint32_t pnv_xive_presenter_get_config(XivePresenter *xptr) diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c index ec8b0c68f1a49..0663baab544ca 100644 --- a/hw/intc/pnv_xive2.c +++ b/hw/intc/pnv_xive2.c @@ -101,24 +101,35 @@ static uint32_t pnv_xive2_block_id(PnvXive2 *xive) } /* - * Remote access to controllers. HW uses MMIOs. For now, a simple scan - * of the chips is good enough. - * - * TODO: Block scope support + * Remote access to INT controllers. HW uses MMIOs(?). For now, a simple + * scan of all the chips INT controller is good enough. */ -static PnvXive2 *pnv_xive2_get_remote(uint8_t blk) +static PnvXive2 *pnv_xive2_get_remote(uint32_t vsd_type, hwaddr fwd_addr) { PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); int i; for (i = 0; i < pnv->num_chips; i++) { - Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); - PnvXive2 *xive = &chip10->xive; + PnvChipClass *k = PNV_CHIP_GET_CLASS(pnv->chips[i]); + PnvXive2 *xive = PNV_XIVE2(k->intc_get(pnv->chips[i])); - if (pnv_xive2_block_id(xive) == blk) { + /* + * Is this the XIVE matching the forwarded VSD address is for this + * VSD type + */ + if ((vsd_type == VST_ESB && fwd_addr == xive->esb_base) || + (vsd_type == VST_END && fwd_addr == xive->end_base) || + ((vsd_type == VST_NVP || + vsd_type == VST_NVG) && fwd_addr == xive->nvpg_base) || + (vsd_type == VST_NVC && fwd_addr == xive->nvc_base)) { return xive; } } + + qemu_log_mask(LOG_GUEST_ERROR, + "XIVE: >>>>> %s vsd_type %u fwd_addr 0x%"HWADDR_PRIx + " NOT FOUND\n", + __func__, vsd_type, fwd_addr); return NULL; } @@ -251,8 +262,7 @@ static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk, /* Remote VST access */ if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { - xive = pnv_xive2_get_remote(blk); - + xive = pnv_xive2_get_remote(type, (vsd & VSD_ADDRESS_MASK)); return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0; } @@ -595,20 +605,28 @@ static uint32_t pnv_xive2_get_config(Xive2Router *xrtr) { PnvXive2 *xive = PNV_XIVE2(xrtr); uint32_t cfg = 0; + uint64_t reg = xive->cq_regs[CQ_XIVE_CFG >> 3]; - if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) { + if (reg & CQ_XIVE_CFG_GEN1_TIMA_OS) { cfg |= XIVE2_GEN1_TIMA_OS; } - if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) { + if (reg & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) { cfg |= XIVE2_VP_SAVE_RESTORE; } - if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE, - xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) { + if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE, reg) == + CQ_XIVE_CFG_THREADID_8BITS) { cfg |= XIVE2_THREADID_8BITS; } + if (reg & CQ_XIVE_CFG_EN_VP_GRP_PRIORITY) { + cfg |= XIVE2_EN_VP_GRP_PRIORITY; + } + + cfg = SETFIELD(XIVE2_VP_INT_PRIO, cfg, + GETFIELD(CQ_XIVE_CFG_VP_INT_PRIO, reg)); + return cfg; } @@ -622,24 +640,28 @@ static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu) return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit); } -static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { PnvXive2 *xive = PNV_XIVE2(xptr); PnvChip *chip = xive->chip; - int count = 0; int i, j; bool gen1_tima_os = xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; + static int next_start_core; + static int next_start_thread; + int start_core = next_start_core; + int start_thread = next_start_thread; for (i = 0; i < chip->nr_cores; i++) { - PnvCore *pc = chip->cores[i]; + PnvCore *pc = chip->cores[(i + start_core) % chip->nr_cores]; CPUCore *cc = CPU_CORE(pc); for (j = 0; j < cc->nr_threads; j++) { - PowerPCCPU *cpu = pc->threads[j]; + /* Start search for match with different thread each call */ + PowerPCCPU *cpu = pc->threads[(j + start_thread) % cc->nr_threads]; XiveTCTX *tctx; int ring; @@ -669,7 +691,8 @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, "thread context NVT %x/%x\n", nvt_blk, nvt_idx); /* Should set a FIR if we ever model it */ - return -1; + match->count++; + continue; } /* * For a group notification, we need to know if the @@ -684,14 +707,23 @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, if (!match->tctx) { match->ring = ring; match->tctx = tctx; + + next_start_thread = j + start_thread + 1; + if (next_start_thread >= cc->nr_threads) { + next_start_thread = 0; + next_start_core = i + start_core + 1; + if (next_start_core >= chip->nr_cores) { + next_start_core = 0; + } + } } - count++; + match->count++; } } } } - return count; + return !!match->count; } static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) @@ -1173,7 +1205,8 @@ static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset, case CQ_FIRMASK_OR: /* FIR error reporting */ break; default: - xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset); + xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -1304,7 +1337,6 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset, case VC_ENDC_WATCH2_SPEC: case VC_ENDC_WATCH3_SPEC: watch_engine = (offset - VC_ENDC_WATCH0_SPEC) >> 6; - xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT); pnv_xive2_endc_cache_watch_release(xive, watch_engine); val = xive->vc_regs[reg]; break; @@ -1315,10 +1347,11 @@ static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset, case VC_ENDC_WATCH3_DATA0: /* * Load DATA registers from cache with data requested by the - * SPEC register + * SPEC register. Clear gen_flipped bit in word 1. */ watch_engine = (offset - VC_ENDC_WATCH0_DATA0) >> 6; pnv_xive2_end_cache_load(xive, watch_engine); + xive->vc_regs[reg] &= ~(uint64_t)END2_W1_GEN_FLIPPED; val = xive->vc_regs[reg]; break; @@ -1386,7 +1419,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, /* * ESB cache updates (not modeled) */ - /* case VC_ESBC_FLUSH_CTRL: */ + case VC_ESBC_FLUSH_CTRL: + if (val & VC_ESBC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case VC_ESBC_FLUSH_POLL: xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID; /* ESB update */ @@ -1402,7 +1442,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, /* * EAS cache updates (not modeled) */ - /* case VC_EASC_FLUSH_CTRL: */ + case VC_EASC_FLUSH_CTRL: + if (val & VC_EASC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case VC_EASC_FLUSH_POLL: xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID; /* EAS update */ @@ -1441,7 +1488,14 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, break; - /* case VC_ENDC_FLUSH_CTRL: */ + case VC_ENDC_FLUSH_CTRL: + if (val & VC_ENDC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case VC_ENDC_FLUSH_POLL: xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID; break; @@ -1470,7 +1524,8 @@ static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset, break; default: - xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "VC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -1661,7 +1716,14 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset, pnv_xive2_nxc_update(xive, watch_engine); break; - /* case PC_NXC_FLUSH_CTRL: */ + case PC_NXC_FLUSH_CTRL: + if (val & PC_NXC_FLUSH_CTRL_WANT_CACHE_DISABLE) { + xive2_error(xive, "VC: unsupported write @0x%"HWADDR_PRIx + " value 0x%"PRIx64" bit[2] poll_want_cache_disable", + offset, val); + return; + } + break; case PC_NXC_FLUSH_POLL: xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID; break; @@ -1678,7 +1740,8 @@ static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset, break; default: - xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "PC: invalid write @0x%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } @@ -1765,7 +1828,8 @@ static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset, xive->tctxt_regs[reg] = val; break; default: - xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "TCTXT: invalid write @0x%"HWADDR_PRIx + " data 0x%"PRIx64, offset, val); return; } } @@ -1836,7 +1900,8 @@ static void pnv_xive2_xscom_write(void *opaque, hwaddr offset, pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size); break; default: - xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx + " value 0x%"PRIx64, offset, val); } } @@ -1904,7 +1969,8 @@ static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset, break; default: - xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx + " value 0x%"PRIx64, offset, val); } } @@ -1946,7 +2012,8 @@ static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset, { PnvXive2 *xive = PNV_XIVE2(opaque); - xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); } static const MemoryRegionOps pnv_xive2_ic_lsi_ops = { @@ -2049,7 +2116,8 @@ static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset, inject_type = PNV_XIVE2_QUEUE_NXC_ST_RMT_CI; break; default: - xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset); + xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx" value 0x%"PRIx64, + offset, val); return; } diff --git a/hw/intc/pnv_xive2_regs.h b/hw/intc/pnv_xive2_regs.h index e8b87b3d2c135..d53300f709b04 100644 --- a/hw/intc/pnv_xive2_regs.h +++ b/hw/intc/pnv_xive2_regs.h @@ -66,6 +66,7 @@ #define CQ_XIVE_CFG_GEN1_TIMA_HYP_BLK0 PPC_BIT(26) /* 0 if bit[25]=0 */ #define CQ_XIVE_CFG_GEN1_TIMA_CROWD_DIS PPC_BIT(27) /* 0 if bit[25]=0 */ #define CQ_XIVE_CFG_GEN1_END_ESX PPC_BIT(28) +#define CQ_XIVE_CFG_EN_VP_GRP_PRIORITY PPC_BIT(32) /* 0 if bit[25]=1 */ #define CQ_XIVE_CFG_EN_VP_SAVE_RESTORE PPC_BIT(38) /* 0 if bit[25]=1 */ #define CQ_XIVE_CFG_EN_VP_SAVE_REST_STRICT PPC_BIT(39) /* 0 if bit[25]=1 */ diff --git a/hw/intc/riscv_aclint.c b/hw/intc/riscv_aclint.c index 4623cfa029365..9f4c36e965e2a 100644 --- a/hw/intc/riscv_aclint.c +++ b/hw/intc/riscv_aclint.c @@ -323,12 +323,15 @@ static void riscv_aclint_mtimer_reset_enter(Object *obj, ResetType type) static const VMStateDescription vmstate_riscv_mtimer = { .name = "riscv_mtimer", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 3, + .minimum_version_id = 3, .fields = (const VMStateField[]) { + VMSTATE_UINT64(time_delta, RISCVAclintMTimerState), VMSTATE_VARRAY_UINT32(timecmp, RISCVAclintMTimerState, num_harts, 0, vmstate_info_uint64, uint64_t), + VMSTATE_TIMER_PTR_VARRAY(timers, RISCVAclintMTimerState, + num_harts), VMSTATE_END_OF_LIST() } }; diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c index 4fa5f7597b08a..a1d9fa50855f0 100644 --- a/hw/intc/riscv_aplic.c +++ b/hw/intc/riscv_aplic.c @@ -628,7 +628,7 @@ static void riscv_aplic_request(void *opaque, int irq, int level) static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size) { - uint32_t irq, word, idc; + uint32_t irq, word, idc, sm; RISCVAPLICState *aplic = opaque; /* Reads must be 4 byte words */ @@ -696,6 +696,10 @@ static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size) } else if ((APLIC_TARGET_BASE <= addr) && (addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) { irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1; + sm = aplic->sourcecfg[irq] & APLIC_SOURCECFG_SM_MASK; + if (sm == APLIC_SOURCECFG_SM_INACTIVE) { + return 0; + } return aplic->target[irq]; } else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) && (addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) { diff --git a/hw/intc/s390_flic.c b/hw/intc/s390_flic.c index 8f4c9fd52e864..1eed5125d17b6 100644 --- a/hw/intc/s390_flic.c +++ b/hw/intc/s390_flic.c @@ -190,7 +190,7 @@ static void qemu_s390_flic_notify(uint32_t type) CPU_FOREACH(cs) { S390CPU *cpu = S390_CPU(cs); - cs->interrupt_request |= CPU_INTERRUPT_HARD; + cpu_set_interrupt(cs, CPU_INTERRUPT_HARD); /* ignore CPUs that are not sleeping */ if (s390_cpu_get_state(cpu) != S390_CPU_STATE_OPERATING && diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 440edb97d8d39..e393f5dcdccfe 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -428,14 +428,13 @@ static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, g_assert_not_reached(); } -static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, - uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, + uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { CPUState *cs; - int count = 0; CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -463,16 +462,17 @@ static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, if (match->tctx) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a thread " "context NVT %x/%x\n", nvt_blk, nvt_idx); - return -1; + match->count++; + continue; } match->ring = ring; match->tctx = tctx; - count++; + match->count++; } } - return count; + return !!match->count; } static uint32_t spapr_xive_presenter_get_config(XivePresenter *xptr) diff --git a/hw/intc/trace-events b/hw/intc/trace-events index 334aa6a97bad0..018c609ca5eb9 100644 --- a/hw/intc/trace-events +++ b/hw/intc/trace-events @@ -274,11 +274,13 @@ kvm_xive_cpu_connect(uint32_t id) "connect CPU%d to KVM device" kvm_xive_source_reset(uint32_t srcno) "IRQ 0x%x" # xive.c -xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK" -xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !" -xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IBP=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x" +xive_tctx_accept(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x ACK" +xive_tctx_notify(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x CPPR=0x%02x NSR=0x%02x raise !" +xive_tctx_set_cppr(uint32_t index, uint8_t ring, uint8_t ipb, uint8_t pipr, uint8_t cppr, uint8_t nsr) "target=%d ring=0x%x IPB=0x%02x PIPR=0x%02x new CPPR=0x%02x NSR=0x%02x" xive_source_esb_read(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 xive_source_esb_write(uint64_t addr, uint32_t srcno, uint64_t value) "@0x%"PRIx64" IRQ 0x%x val=0x%"PRIx64 +xive_source_notify(uint32_t srcno) "Processing notification for queued IRQ 0x%x" +xive_source_blocked(uint32_t srcno) "No action needed for IRQ 0x%x currently" xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "END 0x%02x/0x%04x -> enqueue 0x%08x" xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x" xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 @@ -289,6 +291,10 @@ xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x # xive2.c xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d" xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d" +xive_redistribute(uint32_t index, uint8_t ring, uint8_t end_blk, uint32_t end_idx) "Redistribute from target=%d ring=0x%x NVP 0x%x/0x%x" +xive_end_enqueue(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "Queue event for END 0x%x/0x%x data=0x%x" +xive_escalate_end(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t esc_data) "Escalate from END 0x%x/0x%x to END 0x%x/0x%x data=0x%x" +xive_escalate_esb(uint8_t end_blk, uint32_t end_idx, uint32_t lisn) "Escalate from END 0x%x/0x%x to LISN=0x%x" # pnv_xive.c pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64 diff --git a/hw/intc/xics.c b/hw/intc/xics.c index d9a199e88341b..200710eb6ca9c 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -335,6 +335,8 @@ static void icp_realize(DeviceState *dev, Error **errp) return; } } + + vmstate_register(NULL, icp->cs->cpu_index, &vmstate_icp_server, icp); } static void icp_unrealize(DeviceState *dev) diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 27b473e4d7625..e0ffcf89ebff6 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -25,6 +25,58 @@ /* * XIVE Thread Interrupt Management context */ +bool xive_ring_valid(XiveTCTX *tctx, uint8_t ring) +{ + uint8_t cur_ring; + + for (cur_ring = ring; cur_ring <= TM_QW3_HV_PHYS; + cur_ring += XIVE_TM_RING_SIZE) { + if (!(tctx->regs[cur_ring + TM_WORD2] & 0x80)) { + return false; + } + } + return true; +} + +bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr) +{ + switch (ring) { + case TM_QW1_OS: + return !!(nsr & TM_QW1_NSR_EO); + case TM_QW2_HV_POOL: + case TM_QW3_HV_PHYS: + return !!(nsr & TM_QW3_NSR_HE); + default: + g_assert_not_reached(); + } +} + +bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr) +{ + if ((nsr & TM_NSR_GRP_LVL) > 0) { + g_assert(xive_nsr_indicates_exception(ring, nsr)); + return true; + } + return false; +} + +uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr) +{ + /* NSR determines if pool/phys ring is for phys or pool interrupt */ + if ((ring == TM_QW3_HV_PHYS) || (ring == TM_QW2_HV_POOL)) { + uint8_t he = (nsr & TM_QW3_NSR_HE) >> 6; + + if (he == TM_QW3_NSR_HE_PHYS) { + return TM_QW3_HV_PHYS; + } else if (he == TM_QW3_NSR_HE_POOL) { + return TM_QW2_HV_POOL; + } else { + /* Don't support LSI mode */ + g_assert_not_reached(); + } + } + return ring; +} static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) { @@ -41,74 +93,83 @@ static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) } } -static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) +/* + * interrupt is accepted on the presentation ring, for PHYS ring the NSR + * directs it to the PHYS or POOL rings. + */ +uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t sig_ring) { - uint8_t *regs = &tctx->regs[ring]; - uint8_t nsr = regs[TM_NSR]; + uint8_t *sig_regs = &tctx->regs[sig_ring]; + uint8_t nsr = sig_regs[TM_NSR]; - qemu_irq_lower(xive_tctx_output(tctx, ring)); + g_assert(sig_ring == TM_QW1_OS || sig_ring == TM_QW3_HV_PHYS); - if (regs[TM_NSR] != 0) { - uint8_t cppr = regs[TM_PIPR]; - uint8_t alt_ring; - uint8_t *alt_regs; + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); - /* POOL interrupt uses IPB in QW2, POOL ring */ - if ((ring == TM_QW3_HV_PHYS) && (nsr & (TM_QW3_NSR_HE_POOL << 6))) { - alt_ring = TM_QW2_HV_POOL; - } else { - alt_ring = ring; - } - alt_regs = &tctx->regs[alt_ring]; + if (xive_nsr_indicates_exception(sig_ring, nsr)) { + uint8_t cppr = sig_regs[TM_PIPR]; + uint8_t ring; + uint8_t *regs; + + ring = xive_nsr_exception_ring(sig_ring, nsr); + regs = &tctx->regs[ring]; - regs[TM_CPPR] = cppr; + sig_regs[TM_CPPR] = cppr; /* * If the interrupt was for a specific VP, reset the pending * buffer bit, otherwise clear the logical server indicator */ - if (regs[TM_NSR] & TM_NSR_GRP_LVL) { - regs[TM_NSR] &= ~TM_NSR_GRP_LVL; - } else { - alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); + if (!xive_nsr_indicates_group_exception(sig_ring, nsr)) { + regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); } - /* Drop the exception bit and any group/crowd */ - regs[TM_NSR] = 0; + /* Clear the exception from NSR */ + sig_regs[TM_NSR] = 0; + qemu_irq_lower(xive_tctx_output(tctx, sig_ring)); - trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, - alt_regs[TM_IPB], regs[TM_PIPR], - regs[TM_CPPR], regs[TM_NSR]); + trace_xive_tctx_accept(tctx->cs->cpu_index, ring, + regs[TM_IPB], sig_regs[TM_PIPR], + sig_regs[TM_CPPR], sig_regs[TM_NSR]); } - return ((uint64_t)nsr << 8) | regs[TM_CPPR]; + return ((uint64_t)nsr << 8) | sig_regs[TM_CPPR]; } -void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level) +/* Change PIPR and calculate NSR and irq based on PIPR, CPPR, group */ +void xive_tctx_pipr_set(XiveTCTX *tctx, uint8_t ring, uint8_t pipr, + uint8_t group_level) { - /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ - uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; - uint8_t *alt_regs = &tctx->regs[alt_ring]; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); uint8_t *regs = &tctx->regs[ring]; - if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) { + g_assert(!xive_nsr_indicates_group_exception(ring, sig_regs[TM_NSR])); + + sig_regs[TM_PIPR] = pipr; + + if (pipr < sig_regs[TM_CPPR]) { switch (ring) { case TM_QW1_OS: - regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); + sig_regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); break; case TM_QW2_HV_POOL: - alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); + sig_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); break; case TM_QW3_HV_PHYS: - regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); + sig_regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); break; default: g_assert_not_reached(); } trace_xive_tctx_notify(tctx->cs->cpu_index, ring, - regs[TM_IPB], alt_regs[TM_PIPR], - alt_regs[TM_CPPR], alt_regs[TM_NSR]); + regs[TM_IPB], pipr, + sig_regs[TM_CPPR], sig_regs[TM_NSR]); qemu_irq_raise(xive_tctx_output(tctx, ring)); + } else { + sig_regs[TM_NSR] = 0; + qemu_irq_lower(xive_tctx_output(tctx, ring)); } } @@ -124,25 +185,32 @@ void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring) static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) { - uint8_t *regs = &tctx->regs[ring]; + uint8_t *sig_regs = &tctx->regs[ring]; uint8_t pipr_min; uint8_t ring_min; + g_assert(ring == TM_QW1_OS || ring == TM_QW3_HV_PHYS); + + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); + + /* XXX: should show pool IPB for PHYS ring */ trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], - cppr, regs[TM_NSR]); + sig_regs[TM_IPB], sig_regs[TM_PIPR], + cppr, sig_regs[TM_NSR]); if (cppr > XIVE_PRIORITY_MAX) { cppr = 0xff; } - tctx->regs[ring + TM_CPPR] = cppr; + sig_regs[TM_CPPR] = cppr; /* * Recompute the PIPR based on local pending interrupts. The PHYS * ring must take the minimum of both the PHYS and POOL PIPR values. */ - pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); + pipr_min = xive_ipb_to_pipr(sig_regs[TM_IPB]); ring_min = ring; /* PHYS updates also depend on POOL values */ @@ -151,7 +219,6 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) /* POOL values only matter if POOL ctx is valid */ if (pool_regs[TM_WORD2] & 0x80) { - uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); /* @@ -165,30 +232,39 @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) } } - regs[TM_PIPR] = pipr_min; + /* CPPR has changed, this may present or preclude a pending exception */ + xive_tctx_pipr_set(tctx, ring_min, pipr_min, 0); +} + +static void xive_tctx_pipr_recompute_from_ipb(XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t *regs = &tctx->regs[ring]; - /* CPPR has changed, check if we need to raise a pending exception */ - xive_tctx_notify(tctx, ring_min, 0); + /* Does not support a presented group interrupt */ + g_assert(!xive_nsr_indicates_group_exception(ring, sig_regs[TM_NSR])); + + xive_tctx_pipr_set(tctx, ring, xive_ipb_to_pipr(regs[TM_IPB]), 0); } -void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, - uint8_t group_level) - { - /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ - uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; - uint8_t *alt_regs = &tctx->regs[alt_ring]; +void xive_tctx_pipr_present(XiveTCTX *tctx, uint8_t ring, uint8_t priority, + uint8_t group_level) +{ + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); uint8_t *regs = &tctx->regs[ring]; + uint8_t pipr = xive_priority_to_pipr(priority); if (group_level == 0) { - /* VP-specific */ regs[TM_IPB] |= xive_priority_to_ipb(priority); - alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]); - } else { - /* VP-group */ - alt_regs[TM_PIPR] = xive_priority_to_pipr(priority); + if (pipr >= sig_regs[TM_PIPR]) { + /* VP interrupts can come here with lower priority than PIPR */ + return; + } } - xive_tctx_notify(tctx, ring, group_level); - } + g_assert(pipr <= xive_ipb_to_pipr(regs[TM_IPB])); + g_assert(pipr < sig_regs[TM_PIPR]); + xive_tctx_pipr_set(tctx, ring, pipr, group_level); +} /* * XIVE Thread Interrupt Management Area (TIMA) @@ -206,25 +282,78 @@ static uint64_t xive_tm_ack_hv_reg(XivePresenter *xptr, XiveTCTX *tctx, return xive_tctx_accept(tctx, TM_QW3_HV_PHYS); } +static void xive_pool_cam_decode(uint32_t cam, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vp) +{ + if (nvt_blk) { + *nvt_blk = xive_nvt_blk(cam); + } + if (nvt_idx) { + *nvt_idx = xive_nvt_idx(cam); + } + if (vp) { + *vp = !!(cam & TM_QW2W2_VP); + } +} + +static uint32_t xive_tctx_get_pool_cam(XiveTCTX *tctx, uint8_t *nvt_blk, + uint32_t *nvt_idx, bool *vp) +{ + uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); + uint32_t cam = be32_to_cpu(qw2w2); + + xive_pool_cam_decode(cam, nvt_blk, nvt_idx, vp); + return qw2w2; +} + +static void xive_tctx_set_pool_cam(XiveTCTX *tctx, uint32_t qw2w2) +{ + memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); +} + static uint64_t xive_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size) { - uint32_t qw2w2_prev = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]); uint32_t qw2w2; + uint32_t qw2w2_new; + uint8_t nvt_blk; + uint32_t nvt_idx; + bool vp; + + qw2w2 = xive_tctx_get_pool_cam(tctx, &nvt_blk, &nvt_idx, &vp); + + if (!vp) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid POOL NVT %x/%x !?\n", + nvt_blk, nvt_idx); + } + + /* Invalidate CAM line */ + qw2w2_new = xive_set_field32(TM_QW2W2_VP, qw2w2, 0); + xive_tctx_set_pool_cam(tctx, qw2w2_new); + + xive_tctx_reset_signal(tctx, TM_QW1_OS); + xive_tctx_reset_signal(tctx, TM_QW2_HV_POOL); + /* Re-check phys for interrupts if pool was disabled */ + xive_tctx_pipr_recompute_from_ipb(tctx, TM_QW3_HV_PHYS); - qw2w2 = xive_set_field32(TM_QW2W2_VP, qw2w2_prev, 0); - memcpy(&tctx->regs[TM_QW2_HV_POOL + TM_WORD2], &qw2w2, 4); return qw2w2; } static uint64_t xive_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size) { - uint8_t qw3b8_prev = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; - uint8_t qw3b8; + uint8_t qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; + uint8_t qw3b8_new; + + qw3b8 = tctx->regs[TM_QW3_HV_PHYS + TM_WORD2]; + if (!(qw3b8 & TM_QW3B8_VT)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid PHYS thread!?\n"); + } + qw3b8_new = qw3b8 & ~TM_QW3B8_VT; + tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8_new; - qw3b8 = qw3b8_prev & ~TM_QW3B8_VT; - tctx->regs[TM_QW3_HV_PHYS + TM_WORD2] = qw3b8; + xive_tctx_reset_signal(tctx, TM_QW1_OS); + xive_tctx_reset_signal(tctx, TM_QW3_HV_PHYS); return qw3b8; } @@ -255,14 +384,14 @@ static uint64_t xive_tm_vt_poll(XivePresenter *xptr, XiveTCTX *tctx, static const uint8_t xive_tm_hw_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ - 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ 0, 0, 3, 3, 0, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 3, 3, 3, 0, /* QW-3 PHYS */ }; static const uint8_t xive_tm_hv_view[] = { 3, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-0 User */ - 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 0, /* QW-1 OS */ + 3, 3, 3, 3, 3, 3, 0, 2, 3, 3, 3, 3, 0, 0, 0, 3, /* QW-1 OS */ 0, 0, 3, 3, 0, 3, 3, 0, 0, 3, 3, 3, 0, 0, 0, 0, /* QW-2 POOL */ 3, 3, 3, 3, 0, 3, 0, 2, 3, 0, 0, 3, 0, 0, 0, 0, /* QW-3 PHYS */ }; @@ -326,7 +455,7 @@ static void xive_tm_raw_write(XiveTCTX *tctx, hwaddr offset, uint64_t value, */ if (size < 4 || !mask || ring_offset == TM_QW0_USER) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA @%" - HWADDR_PRIx"\n", offset); + HWADDR_PRIx" size %d\n", offset, size); return; } @@ -357,7 +486,7 @@ static uint64_t xive_tm_raw_read(XiveTCTX *tctx, hwaddr offset, unsigned size) */ if (size < 4 || !mask || ring_offset == TM_QW0_USER) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access at TIMA @%" - HWADDR_PRIx"\n", offset); + HWADDR_PRIx" size %d\n", offset, size); return -1; } @@ -403,6 +532,12 @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, xive_tctx_set_lgs(tctx, TM_QW1_OS, value & 0xff); } +static void xive_tm_set_pool_lgs(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive_tctx_set_lgs(tctx, TM_QW2_HV_POOL, value & 0xff); +} + /* * Adjust the PIPR to allow a CPU to process event queues of other * priorities during one physical interrupt cycle. @@ -410,7 +545,12 @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { - xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0); + uint8_t ring = TM_QW1_OS; + uint8_t *regs = &tctx->regs[ring]; + + /* XXX: how should this work exactly? */ + regs[TM_IPB] |= xive_priority_to_ipb(value & 0xff); + xive_tctx_pipr_recompute_from_ipb(tctx, ring); } static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, @@ -454,7 +594,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, qw1w2 = xive_tctx_get_os_cam(tctx, &nvt_blk, &nvt_idx, &vo); if (!vo) { - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVT %x/%x !?\n", + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pull invalid OS NVT %x/%x !?\n", nvt_blk, nvt_idx); } @@ -466,7 +606,7 @@ static uint64_t xive_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, return qw1w2; } -static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, +static void xive_tctx_restore_nvp(XiveRouter *xrtr, XiveTCTX *tctx, uint8_t nvt_blk, uint32_t nvt_idx) { XiveNVT nvt; @@ -492,16 +632,6 @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, uint8_t *regs = &tctx->regs[TM_QW1_OS]; regs[TM_IPB] |= ipb; } - - /* - * Always call xive_tctx_pipr_update(). Even if there were no - * escalation triggered, there could be a pending interrupt which - * was saved when the context was pulled and that we need to take - * into account by recalculating the PIPR (which is not - * saved/restored). - * It will also raise the External interrupt signal if needed. - */ - xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */ } /* @@ -523,7 +653,17 @@ static void xive_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, /* Check the interrupt pending bits */ if (vo) { - xive_tctx_need_resend(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); + xive_tctx_restore_nvp(XIVE_ROUTER(xptr), tctx, nvt_blk, nvt_idx); + + /* + * Always call xive_tctx_recompute_from_ipb(). Even if there were no + * escalation triggered, there could be a pending interrupt which + * was saved when the context was pulled and that we need to take + * into account by recalculating the PIPR (which is not + * saved/restored). + * It will also raise the External interrupt signal if needed. + */ + xive_tctx_pipr_recompute_from_ipb(tctx, TM_QW1_OS); /* fxb */ } } @@ -542,6 +682,8 @@ typedef struct XiveTmOp { uint8_t page_offset; uint32_t op_offset; unsigned size; + bool hw_ok; + bool sw_ok; void (*write_handler)(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); @@ -554,34 +696,34 @@ static const XiveTmOp xive_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive_tm_push_os_ctx, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, - xive_tm_vt_poll }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true, + xive_tm_set_os_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true, + xive_tm_push_os_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true, + xive_tm_set_hv_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, false, true, + xive_tm_vt_push, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true, + NULL, xive_tm_vt_poll }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, - xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, - NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, - xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, - xive_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, - xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, - xive_tm_pull_phys_ctx }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false, + NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false, + xive_tm_set_os_pending, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false, + NULL, xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false, + NULL, xive_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false, + NULL, xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false, + NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false, + NULL, xive_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false, + NULL, xive_tm_pull_phys_ctx }, }; static const XiveTmOp xive2_tm_operations[] = { @@ -589,50 +731,58 @@ static const XiveTmOp xive2_tm_operations[] = { * MMIOs below 2K : raw values and special operations without side * effects */ - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, xive2_tm_push_os_ctx, - NULL }, - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, - NULL }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, NULL, - xive_tm_vt_poll }, - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, xive2_tm_set_hv_target, - NULL }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, true, true, + xive2_tm_set_os_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, true, true, + xive2_tm_push_os_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 8, true, true, + xive2_tm_push_os_ctx, NULL }, + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, true, true, + xive_tm_set_os_lgs, NULL }, + { XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_WORD2, 4, true, true, + xive2_tm_push_pool_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_WORD2, 8, true, true, + xive2_tm_push_pool_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW2_HV_POOL + TM_LGS, 1, true, true, + xive_tm_set_pool_lgs, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, true, true, + xive2_tm_set_hv_cppr, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, false, true, + xive2_tm_push_phys_ctx, NULL }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, true, true, + NULL, xive_tm_vt_poll }, + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_T, 1, true, true, + xive2_tm_set_hv_target, NULL }, /* MMIOs above 2K : special operations with side effects */ - { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, NULL, - xive_tm_ack_os_reg }, - { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, xive_tm_set_os_pending, - NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, NULL, - xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, NULL, - xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, NULL, - xive2_tm_pull_os_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, NULL, - xive_tm_ack_hv_reg }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, NULL, - xive_tm_pull_pool_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, xive2_tm_pull_os_ctx_ol, - NULL }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, NULL, - xive_tm_pull_phys_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, NULL, - xive_tm_pull_phys_ctx }, - { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, xive2_tm_pull_phys_ctx_ol, - NULL }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_REG, 2, true, false, + NULL, xive_tm_ack_os_reg }, + { XIVE_TM_OS_PAGE, TM_SPC_SET_OS_PENDING, 1, true, false, + xive2_tm_set_os_pending, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_G2, 4, true, false, + NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 4, true, false, + NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX, 8, true, false, + NULL, xive2_tm_pull_os_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_ACK_HV_REG, 2, true, false, + NULL, xive_tm_ack_hv_reg }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX_G2, 4, true, false, + NULL, xive2_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 4, true, false, + NULL, xive2_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_POOL_CTX, 8, true, false, + NULL, xive2_tm_pull_pool_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_OS_CTX_OL, 1, true, false, + xive2_tm_pull_os_ctx_ol, NULL }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_G2, 4, true, false, + NULL, xive2_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX, 1, true, false, + NULL, xive2_tm_pull_phys_ctx }, + { XIVE_TM_HV_PAGE, TM_SPC_PULL_PHYS_CTX_OL, 1, true, false, + xive2_tm_pull_phys_ctx_ol, NULL }, + { XIVE_TM_OS_PAGE, TM_SPC_ACK_OS_EL, 1, true, false, + xive2_tm_ack_os_el, NULL }, }; static const XiveTmOp *xive_tm_find_op(XivePresenter *xptr, hwaddr offset, @@ -674,21 +824,31 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size) { const XiveTmOp *xto; + uint8_t ring = offset & TM_RING_OFFSET; + bool is_valid = xive_ring_valid(tctx, ring); + bool hw_owned = is_valid; trace_xive_tctx_tm_write(tctx->cs->cpu_index, offset, size, value); - /* - * TODO: check V bit in Q[0-3]W2 - */ - /* * First, check for special operations in the 2K region */ + xto = xive_tm_find_op(tctx->xptr, offset, size, true); + if (xto) { + if (hw_owned && !xto->hw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + if (!hw_owned && !xto->sw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to SW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + } + if (offset & TM_SPECIAL_OP) { - xto = xive_tm_find_op(tctx->xptr, offset, size, true); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid write access at TIMA " - "@%"HWADDR_PRIx"\n", offset); + "@%"HWADDR_PRIx" size %d\n", offset, size); } else { xto->write_handler(xptr, tctx, offset, value, size); } @@ -698,7 +858,6 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Then, for special operations in the region below 2K. */ - xto = xive_tm_find_op(tctx->xptr, offset, size, true); if (xto) { xto->write_handler(xptr, tctx, offset, value, size); return; @@ -707,6 +866,11 @@ void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Finish with raw access to the register values */ + if (hw_owned) { + /* Store context operations are dangerous when context is valid */ + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined write to HW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } xive_tm_raw_write(tctx, offset, value, size); } @@ -714,20 +878,30 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size) { const XiveTmOp *xto; + uint8_t ring = offset & TM_RING_OFFSET; + bool is_valid = xive_ring_valid(tctx, ring); + bool hw_owned = is_valid; uint64_t ret; - /* - * TODO: check V bit in Q[0-3]W2 - */ + xto = xive_tm_find_op(tctx->xptr, offset, size, false); + if (xto) { + if (hw_owned && !xto->hw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to HW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + if (!hw_owned && !xto->sw_ok) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: undefined read to SW TIMA " + "@%"HWADDR_PRIx" size %d\n", offset, size); + } + } /* * First, check for special operations in the 2K region */ if (offset & TM_SPECIAL_OP) { - xto = xive_tm_find_op(tctx->xptr, offset, size, false); if (!xto) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid read access to TIMA" - "@%"HWADDR_PRIx"\n", offset); + "@%"HWADDR_PRIx" size %d\n", offset, size); return -1; } ret = xto->read_handler(xptr, tctx, offset, size); @@ -737,7 +911,6 @@ uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, /* * Then, for special operations in the region below 2K. */ - xto = xive_tm_find_op(tctx->xptr, offset, size, false); if (xto) { ret = xto->read_handler(xptr, tctx, offset, size); goto out; @@ -1191,6 +1364,7 @@ static uint64_t xive_source_esb_read(void *opaque, hwaddr addr, unsigned size) /* Forward the source event notification for routing */ if (ret) { + trace_xive_source_notify(srcno); xive_source_notify(xsrc, srcno); } break; @@ -1286,6 +1460,8 @@ static void xive_source_esb_write(void *opaque, hwaddr addr, /* Forward the source event notification for routing */ if (notify) { xive_source_notify(xsrc, srcno); + } else { + trace_xive_source_blocked(srcno); } } @@ -1672,8 +1848,8 @@ uint32_t xive_get_vpgroup_size(uint32_t nvp_index) return 1U << (first_zero + 1); } -static uint8_t xive_get_group_level(bool crowd, bool ignore, - uint32_t nvp_blk, uint32_t nvp_index) +uint8_t xive_get_group_level(bool crowd, bool ignore, + uint32_t nvp_blk, uint32_t nvp_index) { int first_zero; uint8_t level; @@ -1791,15 +1967,14 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, * This is our simple Xive Presenter Engine model. It is merged in the * Router as it does not require an extra object. */ -bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, +bool xive_presenter_match(XiveFabric *xfb, uint8_t format, uint8_t nvt_blk, uint32_t nvt_idx, bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, bool *precluded) + uint32_t logic_serv, XiveTCTXMatch *match) { XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); - XiveTCTXMatch match = { .tctx = NULL, .ring = 0, .precluded = false }; - uint8_t group_level; - int count; + + memset(match, 0, sizeof(*match)); /* * Ask the machine to scan the interrupt controllers for a match. @@ -1824,22 +1999,8 @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, * a new command to the presenters (the equivalent of the "assign" * power bus command in the documented full notify sequence. */ - count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, - priority, logic_serv, &match); - if (count < 0) { - return false; - } - - /* handle CPU exception delivery */ - if (count) { - group_level = xive_get_group_level(crowd, cam_ignore, nvt_blk, nvt_idx); - trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); - xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); - } else { - *precluded = match.precluded; - } - - return !!count; + return xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, + priority, logic_serv, match); } /* @@ -1876,7 +2037,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) uint8_t nvt_blk; uint32_t nvt_idx; XiveNVT nvt; - bool found, precluded; + XiveTCTXMatch match; uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); @@ -1956,16 +2117,16 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) return; } - found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, - false /* crowd */, - xive_get_field32(END_W7_F0_IGNORE, end.w7), - priority, - xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), - &precluded); - /* we don't support VP-group notification on P9, so precluded is not used */ /* TODO: Auto EOI. */ - - if (found) { + /* we don't support VP-group notification on P9, so precluded is not used */ + if (xive_presenter_match(xrtr->xfb, format, nvt_blk, nvt_idx, + false /* crowd */, + xive_get_field32(END_W7_F0_IGNORE, end.w7), + priority, + xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), + &match)) { + trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, 0); + xive_tctx_pipr_present(match.tctx, match.ring, priority, 0); return; } diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c index a08cf906d0e65..fbb3b7975e50b 100644 --- a/hw/intc/xive2.c +++ b/hw/intc/xive2.c @@ -19,6 +19,13 @@ #include "hw/ppc/xive2_regs.h" #include "trace.h" +static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, + uint32_t end_idx, uint32_t end_data, + bool redistribute); + +static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, + uint8_t *nvp_blk, uint32_t *nvp_idx); + uint32_t xive2_router_get_config(Xive2Router *xrtr) { Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr); @@ -88,6 +95,35 @@ static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, } } +static uint32_t xive2_nvgc_get_idx(uint32_t nvp_idx, uint8_t group) +{ + uint32_t nvgc_idx; + + if (group > 0) { + nvgc_idx = (nvp_idx & (0xffffffffULL << group)) | + ((1 << (group - 1)) - 1); + } else { + nvgc_idx = nvp_idx; + } + + return nvgc_idx; +} + +static uint8_t xive2_nvgc_get_blk(uint8_t nvp_blk, uint8_t crowd) +{ + uint8_t nvgc_blk; + + if (crowd > 0) { + crowd = (crowd == 3) ? 4 : crowd; + nvgc_blk = (nvp_blk & (0xffffffffULL << crowd)) | + ((1 << (crowd - 1)) - 1); + } else { + nvgc_blk = nvp_blk; + } + + return nvgc_blk; +} + uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, bool crowd, uint8_t blk, uint32_t idx, @@ -188,12 +224,27 @@ void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w)); } +#define XIVE2_QSIZE_CHUNK_CL 128 +#define XIVE2_QSIZE_CHUNK_4k 4096 +/* Calculate max number of queue entries for an END */ +static uint32_t xive2_end_get_qentries(Xive2End *end) +{ + uint32_t w3 = end->w3; + uint32_t qsize = xive_get_field32(END2_W3_QSIZE, w3); + if (xive_get_field32(END2_W3_CL, w3)) { + g_assert(qsize <= 4); + return (XIVE2_QSIZE_CHUNK_CL << qsize) / sizeof(uint32_t); + } else { + g_assert(qsize <= 12); + return (XIVE2_QSIZE_CHUNK_4k << qsize) / sizeof(uint32_t); + } +} + void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width, GString *buf) { uint64_t qaddr_base = xive2_end_qaddr(end); - uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); - uint32_t qentries = 1 << (qsize + 10); + uint32_t qentries = xive2_end_get_qentries(end); int i; /* @@ -223,8 +274,7 @@ void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, GString *buf) uint64_t qaddr_base = xive2_end_qaddr(end); uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); - uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); - uint32_t qentries = 1 << (qsize + 10); + uint32_t qentries = xive2_end_get_qentries(end); uint32_t nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6); uint32_t nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6); @@ -341,13 +391,12 @@ void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, GString *buf) static void xive2_end_enqueue(Xive2End *end, uint32_t data) { uint64_t qaddr_base = xive2_end_qaddr(end); - uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3); uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1); uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1); uint64_t qaddr = qaddr_base + (qindex << 2); uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); - uint32_t qentries = 1 << (qsize + 10); + uint32_t qentries = xive2_end_get_qentries(end); if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { @@ -361,8 +410,8 @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) qgen ^= 1; end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen); - /* TODO(PowerNV): reset GF bit on a cache watch operation */ - end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen); + /* Set gen flipped to 1, it gets reset on a cache watch operation */ + end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, 1); } end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); } @@ -492,12 +541,13 @@ static void xive2_presenter_backlog_decr(XivePresenter *xptr, */ static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, - uint8_t nvp_blk, uint32_t nvp_idx, - uint8_t ring) + uint8_t ring, + uint8_t nvp_blk, uint32_t nvp_idx) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; Xive2Nvp nvp; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); uint8_t *regs = &tctx->regs[ring]; if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { @@ -533,7 +583,14 @@ static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, } nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); - nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); + + if ((nvp.w0 & NVP2_W0_P) || ring != TM_QW2_HV_POOL) { + /* + * Non-pool contexts always save CPPR (ignore p bit). XXX: Clarify + * whether that is the correct behaviour. + */ + nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, sig_regs[TM_CPPR]); + } if (nvp.w0 & NVP2_W0_L) { /* * Typically not used. If LSMFB is restored with 0, it will @@ -555,6 +612,7 @@ static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1); } +/* POOL cam is the same as OS cam encoding */ static void xive2_cam_decode(uint32_t cam, uint8_t *nvp_blk, uint32_t *nvp_idx, bool *valid, bool *hw) { @@ -584,6 +642,67 @@ static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask)); } +static void xive2_redistribute(Xive2Router *xrtr, XiveTCTX *tctx, uint8_t ring) +{ + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t nsr = sig_regs[TM_NSR]; + uint8_t pipr = sig_regs[TM_PIPR]; + uint8_t crowd = NVx_CROWD_LVL(nsr); + uint8_t group = NVx_GROUP_LVL(nsr); + uint8_t nvgc_blk, end_blk, nvp_blk; + uint32_t nvgc_idx, end_idx, nvp_idx; + Xive2Nvgc nvgc; + uint8_t prio_limit; + uint32_t cfg; + + /* redistribution is only for group/crowd interrupts */ + if (!xive_nsr_indicates_group_exception(ring, nsr)) { + return; + } + + /* Don't check return code since ring is expected to be invalidated */ + xive2_tctx_get_nvp_indexes(tctx, ring, &nvp_blk, &nvp_idx); + + trace_xive_redistribute(tctx->cs->cpu_index, ring, nvp_blk, nvp_idx); + + trace_xive_redistribute(tctx->cs->cpu_index, ring, nvp_blk, nvp_idx); + /* convert crowd/group to blk/idx */ + nvgc_idx = xive2_nvgc_get_idx(nvp_idx, group); + nvgc_blk = xive2_nvgc_get_blk(nvp_blk, crowd); + + /* Use blk/idx to retrieve the NVGC */ + if (xive2_router_get_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, &nvgc)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n", + crowd ? "NVC" : "NVG", nvgc_blk, nvgc_idx); + return; + } + + /* retrieve the END blk/idx from the NVGC */ + end_blk = xive_get_field32(NVGC2_W1_END_BLK, nvgc.w1); + end_idx = xive_get_field32(NVGC2_W1_END_IDX, nvgc.w1); + + /* determine number of priorities being used */ + cfg = xive2_router_get_config(xrtr); + if (cfg & XIVE2_EN_VP_GRP_PRIORITY) { + prio_limit = 1 << GETFIELD(NVGC2_W1_PSIZE, nvgc.w1); + } else { + prio_limit = 1 << GETFIELD(XIVE2_VP_INT_PRIO, cfg); + } + + /* add priority offset to end index */ + end_idx += pipr % prio_limit; + + /* trigger the group END */ + xive2_router_end_notify(xrtr, end_blk, end_idx, 0, true); + + /* clear interrupt indication for the context */ + sig_regs[TM_NSR] = 0; + sig_regs[TM_PIPR] = sig_regs[TM_CPPR]; + xive_tctx_reset_signal(tctx, ring); +} + +static void xive2_tctx_process_pending(XiveTCTX *tctx, uint8_t sig_ring); + static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, unsigned size, uint8_t ring) { @@ -595,10 +714,11 @@ static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, uint8_t cur_ring; bool valid; bool do_save; + uint8_t nsr; xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &valid, &do_save); - if (!valid) { + if (xive2_tctx_get_nvp_indexes(tctx, ring, &nvp_blk, &nvp_idx)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n", nvp_blk, nvp_idx); } @@ -608,21 +728,53 @@ static uint64_t xive2_tm_pull_ctx(XivePresenter *xptr, XiveTCTX *tctx, cur_ring += XIVE_TM_RING_SIZE) { uint32_t ringw2 = xive_tctx_word2(&tctx->regs[cur_ring]); uint32_t ringw2_new = xive_set_field32(TM2_QW1W2_VO, ringw2, 0); + bool is_valid = !!(xive_get_field32(TM2_QW1W2_VO, ringw2)); + uint8_t *sig_regs; + memcpy(&tctx->regs[cur_ring + TM_WORD2], &ringw2_new, 4); + + /* Skip the rest for USER or invalid contexts */ + if ((cur_ring == TM_QW0_USER) || !is_valid) { + continue; + } + + /* Active group/crowd interrupts need to be redistributed */ + sig_regs = xive_tctx_signal_regs(tctx, ring); + nsr = sig_regs[TM_NSR]; + if (xive_nsr_indicates_group_exception(cur_ring, nsr)) { + /* Ensure ring matches NSR (for HV NSR POOL vs PHYS rings) */ + if (cur_ring == xive_nsr_exception_ring(cur_ring, nsr)) { + xive2_redistribute(xrtr, tctx, cur_ring); + } + } + + /* + * Lower external interrupt line of requested ring and below except for + * USER, which doesn't exist. + */ + if (xive_nsr_indicates_exception(cur_ring, nsr)) { + if (cur_ring == xive_nsr_exception_ring(cur_ring, nsr)) { + xive_tctx_reset_signal(tctx, cur_ring); + } + } } - if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { - xive2_tctx_save_ctx(xrtr, tctx, nvp_blk, nvp_idx, ring); + if (ring == TM_QW2_HV_POOL) { + /* Re-check phys for interrupts if pool was disabled */ + nsr = tctx->regs[TM_QW3_HV_PHYS + TM_NSR]; + if (xive_nsr_indicates_exception(TM_QW3_HV_PHYS, nsr)) { + /* Ring must be PHYS because POOL would have been redistributed */ + g_assert(xive_nsr_exception_ring(TM_QW3_HV_PHYS, nsr) == + TM_QW3_HV_PHYS); + } else { + xive2_tctx_process_pending(tctx, TM_QW3_HV_PHYS); + } } - /* - * Lower external interrupt line of requested ring and below except for - * USER, which doesn't exist. - */ - for (cur_ring = TM_QW1_OS; cur_ring <= ring; - cur_ring += XIVE_TM_RING_SIZE) { - xive_tctx_reset_signal(tctx, cur_ring); + if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) { + xive2_tctx_save_ctx(xrtr, tctx, ring, nvp_blk, nvp_idx); } + return target_ringw2; } @@ -632,6 +784,18 @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW1_OS); } +uint64_t xive2_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW2_HV_POOL); +} + +uint64_t xive2_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size) +{ + return xive2_tm_pull_ctx(xptr, tctx, offset, size, TM_QW3_HV_PHYS); +} + #define REPORT_LINE_GEN1_SIZE 16 static void xive2_tm_report_line_gen1(XiveTCTX *tctx, uint8_t *data, @@ -741,12 +905,15 @@ void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, xive2_tm_pull_ctx_ol(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); } -static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, - uint8_t nvp_blk, uint32_t nvp_idx, - Xive2Nvp *nvp) +static uint8_t xive2_tctx_restore_ctx(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t ring, + uint8_t nvp_blk, uint32_t nvp_idx, + Xive2Nvp *nvp) { CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env; uint32_t pir = env->spr_cb[SPR_PIR].default_value; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t *regs = &tctx->regs[ring]; uint8_t cppr; if (!xive2_nvp_is_hw(nvp)) { @@ -759,10 +926,10 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0); xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2); - tctx->regs[TM_QW1_OS + TM_CPPR] = cppr; - tctx->regs[TM_QW1_OS + TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2); - tctx->regs[TM_QW1_OS + TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2); - tctx->regs[TM_QW1_OS + TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2); + sig_regs[TM_CPPR] = cppr; + regs[TM_LSMFB] = xive_get_field32(NVP2_W2_LSMFB, nvp->w2); + regs[TM_LGS] = xive_get_field32(NVP2_W2_LGS, nvp->w2); + regs[TM_T] = xive_get_field32(NVP2_W2_T, nvp->w2); nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1); nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1); @@ -771,9 +938,18 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, /* * Checkout privilege: 0:OS, 1:Pool, 2:Hard * - * TODO: we only support OS push/pull + * TODO: we don't support hard push/pull */ - nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); + switch (ring) { + case TM_QW1_OS: + nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0); + break; + case TM_QW2_HV_POOL: + nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 1); + break; + default: + g_assert_not_reached(); + } xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1); @@ -781,18 +957,14 @@ static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx, return cppr; } -static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, +/* Restore TIMA VP context from NVP backlog */ +static void xive2_tctx_restore_nvp(Xive2Router *xrtr, XiveTCTX *tctx, + uint8_t ring, uint8_t nvp_blk, uint32_t nvp_idx, bool do_restore) { - XivePresenter *xptr = XIVE_PRESENTER(xrtr); + uint8_t *regs = &tctx->regs[ring]; uint8_t ipb; - uint8_t backlog_level; - uint8_t group_level; - uint8_t first_group; - uint8_t backlog_prio; - uint8_t group_prio; - uint8_t *regs = &tctx->regs[TM_QW1_OS]; Xive2Nvp nvp; /* @@ -812,9 +984,8 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, } /* Automatically restore thread context registers */ - if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && - do_restore) { - xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp); + if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_restore) { + xive2_tctx_restore_ctx(xrtr, tctx, ring, nvp_blk, nvp_idx, &nvp); } ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); @@ -822,143 +993,230 @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); } + /* IPB bits in the backlog are merged with the TIMA IPB bits */ regs[TM_IPB] |= ipb; - backlog_prio = xive_ipb_to_pipr(ipb); - backlog_level = 0; - - first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); - if (first_group && regs[TM_LSMFB] < backlog_prio) { - group_prio = xive2_presenter_backlog_scan(xptr, nvp_blk, nvp_idx, - first_group, &group_level); - regs[TM_LSMFB] = group_prio; - if (regs[TM_LGS] && group_prio < backlog_prio) { - /* VP can take a group interrupt */ - xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx, - group_prio, group_level); - backlog_prio = group_prio; - backlog_level = group_level; - } - } - - /* - * Compute the PIPR based on the restored state. - * It will raise the External interrupt signal if needed. - */ - xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level); } /* - * Updating the OS CAM line can trigger a resend of interrupt + * Updating the ring CAM line can trigger a resend of interrupt */ -void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, - hwaddr offset, uint64_t value, unsigned size) +static void xive2_tm_push_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size, + uint8_t ring) { uint32_t cam; - uint32_t qw1w2; - uint64_t qw1dw1; + uint32_t w2; + uint64_t dw1; uint8_t nvp_blk; uint32_t nvp_idx; - bool vo; + bool v; bool do_restore; + if (xive_ring_valid(tctx, ring)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Attempt to push VP to enabled" + " ring 0x%02x\n", ring); + return; + } + /* First update the thead context */ switch (size) { + case 1: + tctx->regs[ring + TM_WORD2] = value & 0xff; + cam = xive2_tctx_hw_cam_line(xptr, tctx); + cam |= ((value & 0xc0) << 24); /* V and H bits */ + break; case 4: cam = value; - qw1w2 = cpu_to_be32(cam); - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4); + w2 = cpu_to_be32(cam); + memcpy(&tctx->regs[ring + TM_WORD2], &w2, 4); break; case 8: cam = value >> 32; - qw1dw1 = cpu_to_be64(value); - memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1dw1, 8); + dw1 = cpu_to_be64(value); + memcpy(&tctx->regs[ring + TM_WORD2], &dw1, 8); break; default: g_assert_not_reached(); } - xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore); + xive2_cam_decode(cam, &nvp_blk, &nvp_idx, &v, &do_restore); /* Check the interrupt pending bits */ - if (vo) { - xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx, - do_restore); + if (v) { + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint8_t cur_ring; + + xive2_tctx_restore_nvp(xrtr, tctx, ring, + nvp_blk, nvp_idx, do_restore); + + for (cur_ring = TM_QW1_OS; cur_ring <= ring; + cur_ring += XIVE_TM_RING_SIZE) { + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, cur_ring); + uint8_t nsr = sig_regs[TM_NSR]; + + if (!xive_ring_valid(tctx, cur_ring)) { + continue; + } + + if (cur_ring == TM_QW2_HV_POOL) { + if (xive_nsr_indicates_exception(cur_ring, nsr)) { + g_assert(xive_nsr_exception_ring(cur_ring, nsr) == + TM_QW3_HV_PHYS); + xive2_redistribute(xrtr, tctx, + xive_nsr_exception_ring(ring, nsr)); + } + xive2_tctx_process_pending(tctx, TM_QW3_HV_PHYS); + break; + } + xive2_tctx_process_pending(tctx, cur_ring); + } } } +void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_push_ctx(xptr, tctx, offset, value, size, TM_QW1_OS); +} + +void xive2_tm_push_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_push_ctx(xptr, tctx, offset, value, size, TM_QW2_HV_POOL); +} + +void xive2_tm_push_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tm_push_ctx(xptr, tctx, offset, value, size, TM_QW3_HV_PHYS); +} + +/* returns -1 if ring is invalid, but still populates block and index */ static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, - uint32_t *nvp_blk, uint32_t *nvp_idx) + uint8_t *nvp_blk, uint32_t *nvp_idx) { - uint32_t w2, cam; + uint32_t w2; + uint32_t cam = 0; + int rc = 0; w2 = xive_tctx_word2(&tctx->regs[ring]); switch (ring) { case TM_QW1_OS: if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) { - return -1; + rc = -1; } cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2); break; case TM_QW2_HV_POOL: if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) { - return -1; + rc = -1; } cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2); break; case TM_QW3_HV_PHYS: if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) { - return -1; + rc = -1; } cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx); break; default: - return -1; + rc = -1; } *nvp_blk = xive2_nvp_blk(cam); *nvp_idx = xive2_nvp_idx(cam); - return 0; + return rc; } -static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) +static void xive2_tctx_accept_el(XivePresenter *xptr, XiveTCTX *tctx, + uint8_t ring, uint8_t cl_ring) { - uint8_t *regs = &tctx->regs[ring]; - Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); - uint8_t old_cppr, backlog_prio, first_group, group_level = 0; - uint8_t pipr_min, lsmfb_min, ring_min; - bool group_enabled; - uint32_t nvp_blk, nvp_idx; + uint64_t rd; + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint32_t nvp_idx, xive2_cfg; + uint8_t nvp_blk; Xive2Nvp nvp; - int rc; + uint64_t phys_addr; + uint8_t OGen = 0; - trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, - regs[TM_IPB], regs[TM_PIPR], - cppr, regs[TM_NSR]); + xive2_tctx_get_nvp_indexes(tctx, cl_ring, &nvp_blk, &nvp_idx); - if (cppr > XIVE_PRIORITY_MAX) { - cppr = 0xff; + if (xive2_router_get_nvp(xrtr, (uint8_t)nvp_blk, nvp_idx, &nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", + nvp_blk, nvp_idx); + return; + } + + if (!xive2_nvp_is_valid(&nvp)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", + nvp_blk, nvp_idx); + return; } - old_cppr = regs[TM_CPPR]; - regs[TM_CPPR] = cppr; + + rd = xive_tctx_accept(tctx, ring); + + if (ring == TM_QW1_OS) { + OGen = tctx->regs[ring + TM_OGEN]; + } + xive2_cfg = xive2_router_get_config(xrtr); + phys_addr = xive2_nvp_reporting_addr(&nvp); + uint8_t report_data[REPORT_LINE_GEN1_SIZE]; + memset(report_data, 0xff, sizeof(report_data)); + if ((OGen == 1) || (xive2_cfg & XIVE2_GEN1_TIMA_OS)) { + report_data[8] = (rd >> 8) & 0xff; + report_data[9] = rd & 0xff; + } else { + report_data[0] = (rd >> 8) & 0xff; + report_data[1] = rd & 0xff; + } + cpu_physical_memory_write(phys_addr, report_data, REPORT_LINE_GEN1_SIZE); +} + +void xive2_tm_ack_os_el(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + xive2_tctx_accept_el(xptr, tctx, TM_QW1_OS, TM_QW1_OS); +} + +/* Re-calculate and present pending interrupts */ +static void xive2_tctx_process_pending(XiveTCTX *tctx, uint8_t sig_ring) +{ + uint8_t *sig_regs = &tctx->regs[sig_ring]; + Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); + uint8_t backlog_prio; + uint8_t first_group; + uint8_t group_level; + uint8_t pipr_min; + uint8_t lsmfb_min; + uint8_t ring_min; + uint8_t cppr = sig_regs[TM_CPPR]; + bool group_enabled; + Xive2Nvp nvp; + int rc; + + g_assert(sig_ring == TM_QW3_HV_PHYS || sig_ring == TM_QW1_OS); + g_assert(sig_regs[TM_WORD2] & 0x80); + g_assert(!xive_nsr_indicates_group_exception(sig_ring, sig_regs[TM_NSR])); /* * Recompute the PIPR based on local pending interrupts. It will * be adjusted below if needed in case of pending group interrupts. */ - pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); - group_enabled = !!regs[TM_LGS]; - lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff; - ring_min = ring; +again: + pipr_min = xive_ipb_to_pipr(sig_regs[TM_IPB]); + group_enabled = !!sig_regs[TM_LGS]; + lsmfb_min = group_enabled ? sig_regs[TM_LSMFB] : 0xff; + ring_min = sig_ring; + group_level = 0; /* PHYS updates also depend on POOL values */ - if (ring == TM_QW3_HV_PHYS) { - uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL]; + if (sig_ring == TM_QW3_HV_PHYS) { + uint8_t *pool_regs = &tctx->regs[TM_QW2_HV_POOL]; /* POOL values only matter if POOL ctx is valid */ - if (pregs[TM_WORD2] & 0x80) { - - uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]); - uint8_t pool_lsmfb = pregs[TM_LSMFB]; + if (pool_regs[TM_WORD2] & 0x80) { + uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); + uint8_t pool_lsmfb = pool_regs[TM_LSMFB]; /* * Determine highest priority interrupt and @@ -972,7 +1230,7 @@ static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) } /* Values needed for group priority calculation */ - if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { + if (pool_regs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { group_enabled = true; lsmfb_min = pool_lsmfb; if (lsmfb_min < pipr_min) { @@ -981,32 +1239,26 @@ static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) } } } - regs[TM_PIPR] = pipr_min; - - rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); - if (rc) { - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n"); - return; - } - - if (cppr < old_cppr) { - /* - * FIXME: check if there's a group interrupt being presented - * and if the new cppr prevents it. If so, then the group - * interrupt needs to be re-added to the backlog and - * re-triggered (see re-trigger END info in the NVGC - * structure) - */ - } if (group_enabled && lsmfb_min < cppr && - lsmfb_min < regs[TM_PIPR]) { + lsmfb_min < pipr_min) { + + uint8_t nvp_blk; + uint32_t nvp_idx; + /* * Thread has seen a group interrupt with a higher priority * than the new cppr or pending local interrupt. Check the * backlog */ + rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); + if (rc) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid " + "context\n"); + return; + } + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", nvp_blk, nvp_idx); @@ -1030,14 +1282,85 @@ static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) nvp_blk, nvp_idx, first_group, &group_level); tctx->regs[ring_min + TM_LSMFB] = backlog_prio; - if (backlog_prio != 0xFF) { - xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, - backlog_prio, group_level); - regs[TM_PIPR] = backlog_prio; + if (backlog_prio != lsmfb_min) { + /* + * If the group backlog scan finds a less favored or no interrupt, + * then re-do the processing which may turn up a more favored + * interrupt from IPB or the other pool. Backlog should not + * find a priority < LSMFB. + */ + g_assert(backlog_prio >= lsmfb_min); + goto again; + } + + xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, + backlog_prio, group_level); + pipr_min = backlog_prio; + } + + if (pipr_min > cppr) { + pipr_min = cppr; + } + xive_tctx_pipr_set(tctx, ring_min, pipr_min, group_level); +} + +/* NOTE: CPPR only exists for TM_QW1_OS and TM_QW3_HV_PHYS */ +static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t sig_ring, uint8_t cppr) +{ + uint8_t *sig_regs = &tctx->regs[sig_ring]; + Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); + uint8_t old_cppr; + uint8_t nsr = sig_regs[TM_NSR]; + + g_assert(sig_ring == TM_QW1_OS || sig_ring == TM_QW3_HV_PHYS); + + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); + + /* XXX: should show pool IPB for PHYS ring */ + trace_xive_tctx_set_cppr(tctx->cs->cpu_index, sig_ring, + sig_regs[TM_IPB], sig_regs[TM_PIPR], + cppr, nsr); + + if (cppr > XIVE_PRIORITY_MAX) { + cppr = 0xff; + } + + old_cppr = sig_regs[TM_CPPR]; + sig_regs[TM_CPPR] = cppr; + + /* Handle increased CPPR priority (lower value) */ + if (cppr < old_cppr) { + if (cppr <= sig_regs[TM_PIPR]) { + /* CPPR lowered below PIPR, must un-present interrupt */ + if (xive_nsr_indicates_exception(sig_ring, nsr)) { + if (xive_nsr_indicates_group_exception(sig_ring, nsr)) { + /* redistribute precluded active grp interrupt */ + xive2_redistribute(xrtr, tctx, + xive_nsr_exception_ring(sig_ring, nsr)); + return; + } + } + + /* interrupt is VP directed, pending in IPB */ + xive_tctx_pipr_set(tctx, sig_ring, cppr, 0); + return; + } else { + /* CPPR was lowered, but still above PIPR. No action needed. */ + return; } } - /* CPPR has changed, check if we need to raise a pending exception */ - xive_tctx_notify(tctx, ring_min, group_level); + + /* CPPR didn't change, nothing needs to be done */ + if (cppr == old_cppr) { + return; + } + + /* CPPR priority decreased (higher value) */ + if (!xive_nsr_indicates_exception(sig_ring, nsr)) { + xive2_tctx_process_pending(tctx, sig_ring); + } } void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, @@ -1052,6 +1375,34 @@ void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); } +/* + * Adjust the IPB to allow a CPU to process event queues of other + * priorities during one physical interrupt cycle. + */ +void xive2_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xptr); + uint8_t ring = TM_QW1_OS; + uint8_t *regs = &tctx->regs[ring]; + uint8_t priority = value & 0xff; + + /* + * XXX: should this simply set a bit in IPB and wait for it to be picked + * up next cycle, or is it supposed to present it now? We implement the + * latter here. + */ + regs[TM_IPB] |= xive_priority_to_ipb(priority); + if (xive_ipb_to_pipr(regs[TM_IPB]) >= regs[TM_PIPR]) { + return; + } + if (xive_nsr_indicates_group_exception(ring, regs[TM_NSR])) { + xive2_redistribute(xrtr, tctx, ring); + } + + xive_tctx_pipr_present(tctx, ring, priority, 0); +} + static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) { uint8_t *regs = &tctx->regs[ring]; @@ -1259,9 +1610,7 @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) { - /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ - uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; - uint8_t *alt_regs = &tctx->regs[alt_ring]; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); /* * The xive2_presenter_tctx_match() above tells if there's a match @@ -1269,7 +1618,7 @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) * priority to know if the thread can take the interrupt now or if * it is precluded. */ - if (priority < alt_regs[TM_CPPR]) { + if (priority < sig_regs[TM_PIPR]) { return false; } return true; @@ -1322,12 +1671,14 @@ static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk, * message has the same parameters than in the function below. */ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, - uint32_t end_idx, uint32_t end_data) + uint32_t end_idx, uint32_t end_data, + bool redistribute) { Xive2End end; uint8_t priority; uint8_t format; - bool found, precluded; + XiveTCTXMatch match; + bool crowd, cam_ignore; uint8_t nvx_blk; uint32_t nvx_idx; @@ -1350,7 +1701,8 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, return; } - if (xive2_end_is_enqueue(&end)) { + if (!redistribute && xive2_end_is_enqueue(&end)) { + trace_xive_end_enqueue(end_blk, end_idx, end_data); xive2_end_enqueue(&end, end_data); /* Enqueuing event data modifies the EQ toggle and index */ xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1); @@ -1396,16 +1748,28 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, */ nvx_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6); nvx_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6); - - found = xive_presenter_notify(xrtr->xfb, format, nvx_blk, nvx_idx, - xive2_end_is_crowd(&end), xive2_end_is_ignore(&end), - priority, - xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), - &precluded); + crowd = xive2_end_is_crowd(&end); + cam_ignore = xive2_end_is_ignore(&end); /* TODO: Auto EOI. */ + if (xive_presenter_match(xrtr->xfb, format, nvx_blk, nvx_idx, + crowd, cam_ignore, priority, + xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), + &match)) { + XiveTCTX *tctx = match.tctx; + uint8_t ring = match.ring; + uint8_t *sig_regs = xive_tctx_signal_regs(tctx, ring); + uint8_t nsr = sig_regs[TM_NSR]; + uint8_t group_level; + + if (priority < sig_regs[TM_PIPR] && + xive_nsr_indicates_group_exception(ring, nsr)) { + xive2_redistribute(xrtr, tctx, xive_nsr_exception_ring(ring, nsr)); + } - if (found) { + group_level = xive_get_group_level(crowd, cam_ignore, nvx_blk, nvx_idx); + trace_xive_presenter_notify(nvx_blk, nvx_idx, ring, group_level); + xive_tctx_pipr_present(tctx, ring, priority, group_level); return; } @@ -1423,7 +1787,7 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, return; } - if (!xive2_end_is_ignore(&end)) { + if (!cam_ignore) { uint8_t ipb; Xive2Nvp nvp; @@ -1452,9 +1816,6 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, } else { Xive2Nvgc nvgc; uint32_t backlog; - bool crowd; - - crowd = xive2_end_is_crowd(&end); /* * For groups and crowds, the per-priority backlog @@ -1486,9 +1847,7 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, if (backlog == 1) { XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); xfc->broadcast(xrtr->xfb, nvx_blk, nvx_idx, - xive2_end_is_crowd(&end), - xive2_end_is_ignore(&end), - priority); + crowd, cam_ignore, priority); if (!xive2_end_is_precluded_escalation(&end)) { /* @@ -1522,18 +1881,41 @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, } } - /* - * The END trigger becomes an Escalation trigger - */ - xive2_router_end_notify(xrtr, - xive_get_field32(END2_W4_END_BLOCK, end.w4), - xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), - xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); + if (xive2_end_is_escalate_end(&end)) { + /* + * Perform END Adaptive escalation processing + * The END trigger becomes an Escalation trigger + */ + uint8_t esc_blk = xive_get_field32(END2_W4_END_BLOCK, end.w4); + uint32_t esc_idx = xive_get_field32(END2_W4_ESC_END_INDEX, end.w4); + uint32_t esc_data = xive_get_field32(END2_W5_ESC_END_DATA, end.w5); + trace_xive_escalate_end(end_blk, end_idx, esc_blk, esc_idx, esc_data); + xive2_router_end_notify(xrtr, esc_blk, esc_idx, esc_data, false); + } /* end END adaptive escalation */ + + else { + uint32_t lisn; /* Logical Interrupt Source Number */ + + /* + * Perform ESB escalation processing + * E[N] == 1 --> N + * Req[Block] <- E[ESB_Block] + * Req[Index] <- E[ESB_Index] + * Req[Offset] <- 0x000 + * Execute Req command + */ + lisn = XIVE_EAS(xive_get_field32(END2_W4_END_BLOCK, end.w4), + xive_get_field32(END2_W4_ESC_END_INDEX, end.w4)); + + trace_xive_escalate_esb(end_blk, end_idx, lisn); + xive2_notify(xrtr, lisn, true /* pq_checked */); + } + + return; } -void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) +void xive2_notify(Xive2Router *xrtr , uint32_t lisn, bool pq_checked) { - Xive2Router *xrtr = XIVE2_ROUTER(xn); uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); uint32_t eas_idx = XIVE_EAS_INDEX(lisn); Xive2Eas eas; @@ -1576,13 +1958,31 @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) return; } + /* TODO: add support for EAS resume */ + if (xive2_eas_is_resume(&eas)) { + qemu_log_mask(LOG_UNIMP, + "XIVE: EAS resume processing unimplemented - LISN %x\n", + lisn); + return; + } + /* * The event trigger becomes an END trigger */ xive2_router_end_notify(xrtr, - xive_get_field64(EAS2_END_BLOCK, eas.w), - xive_get_field64(EAS2_END_INDEX, eas.w), - xive_get_field64(EAS2_END_DATA, eas.w)); + xive_get_field64(EAS2_END_BLOCK, eas.w), + xive_get_field64(EAS2_END_INDEX, eas.w), + xive_get_field64(EAS2_END_DATA, eas.w), + false); + return; +} + +void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) +{ + Xive2Router *xrtr = XIVE2_ROUTER(xn); + + xive2_notify(xrtr, lisn, pq_checked); + return; } static const Property xive2_router_properties[] = { diff --git a/hw/isa/isa-superio.c b/hw/isa/isa-superio.c index 2853485977cf4..941b0f91d7522 100644 --- a/hw/isa/isa-superio.c +++ b/hw/isa/isa-superio.c @@ -15,6 +15,7 @@ #include "qemu/module.h" #include "qapi/error.h" #include "system/blockdev.h" +#include "system/system.h" #include "chardev/char.h" #include "hw/char/parallel.h" #include "hw/block/fdc.h" diff --git a/hw/isa/lpc_ich9.c b/hw/isa/lpc_ich9.c index 304dffac3227f..c9cb8f7779952 100644 --- a/hw/isa/lpc_ich9.c +++ b/hw/isa/lpc_ich9.c @@ -132,6 +132,11 @@ static void ich9_cc_init(ICH9LPCState *lpc) static void ich9_cc_reset(ICH9LPCState *lpc) { uint8_t *c = lpc->chip_config; + uint32_t gcs = ICH9_CC_GCS_DEFAULT; + + if (lpc->pin_strap.spkr_hi) { + gcs |= ICH9_CC_GCS_NO_REBOOT; + } memset(lpc->chip_config, 0, sizeof(lpc->chip_config)); @@ -142,7 +147,7 @@ static void ich9_cc_reset(ICH9LPCState *lpc) pci_set_long(c + ICH9_CC_D27IR, ICH9_CC_DIR_DEFAULT); pci_set_long(c + ICH9_CC_D26IR, ICH9_CC_DIR_DEFAULT); pci_set_long(c + ICH9_CC_D25IR, ICH9_CC_DIR_DEFAULT); - pci_set_long(c + ICH9_CC_GCS, ICH9_CC_GCS_DEFAULT); + pci_set_long(c + ICH9_CC_GCS, gcs); ich9_cc_update(lpc); } diff --git a/hw/loongarch/Kconfig b/hw/loongarch/Kconfig index bb2838b7b53a2..8024ddf1f34c9 100644 --- a/hw/loongarch/Kconfig +++ b/hw/loongarch/Kconfig @@ -15,6 +15,7 @@ config LOONGARCH_VIRT select LOONGARCH_PCH_PIC select LOONGARCH_PCH_MSI select LOONGARCH_EXTIOI + select LOONGARCH_DINTC select LS7A_RTC select SMBIOS select ACPI_CPU_HOTPLUG diff --git a/hw/loongarch/boot.c b/hw/loongarch/boot.c index 14d6c52d4e4d5..3dd48cb8aab01 100644 --- a/hw/loongarch/boot.c +++ b/hw/loongarch/boot.c @@ -35,13 +35,19 @@ struct loongarch_linux_hdr { uint32_t pe_header_offset; } QEMU_PACKED; -static const unsigned int slave_boot_code[] = { +static const unsigned int aux_boot_code[] = { /* Configure reset ebase. */ 0x0400302c, /* csrwr $t0, LOONGARCH_CSR_EENTRY */ /* Disable interrupt. */ 0x0380100c, /* ori $t0, $zero,0x4 */ 0x04000180, /* csrxchg $zero, $t0, LOONGARCH_CSR_CRMD */ + 0x03400000, /* nop */ + + 0x0400800c, /* csrrd $t0, LOONGARCH_CSR_CPUNUM */ + 0x034ffd8c, /* andi $t0, $t0, 0x3ff */ + 0x0015000d, /* move $t1, $zero */ + 0x5800718d, /* beq $t0, $t1, 112 */ /* Clear mailbox. */ 0x1400002d, /* lu12i.w $t1, 1(0x1) */ @@ -81,6 +87,26 @@ static const unsigned int slave_boot_code[] = { 0x06480dac, /* iocsrrd.d $t0, $t1 */ 0x00150181, /* move $ra, $t0 */ 0x4c000020, /* jirl $zero, $ra,0 */ + /* BSP Core */ + 0x03400000, /* nop */ + 0x1800000d, /* pcaddi $t1, 0 */ + 0x28c0a1a4, /* ld.d $a0, $t1, 40 */ + 0x1800000d, /* pcaddi $t1, 0 */ + 0x28c0a1a5, /* ld.d $a1, $t1, 40 */ + 0x1800000d, /* pcaddi $t1, 0 */ + 0x28c0a1a6, /* ld.d $a2, $t1, 40 */ + 0x1800000d, /* pcaddi $t1, 0 */ + 0x28c0a1ac, /* ld.d $t0, $t1, 40 */ + 0x00150181, /* move $ra, $t0 */ + 0x4c000020, /* jirl $zero, $ra,0 */ + 0x00000000, /* .dword 0 A0 */ + 0x00000000, + 0x00000000, /* .dword 0 A1 */ + 0x00000000, + 0x00000000, /* .dword 0 A2 */ + 0x00000000, + 0x00000000, /* .dword 0 PC */ + 0x00000000, }; static inline void *guidcpy(void *dst, const void *src) @@ -280,7 +306,7 @@ static ram_addr_t alloc_initrd_memory(struct loongarch_boot_info *info, static int64_t load_kernel_info(struct loongarch_boot_info *info) { uint64_t kernel_entry, kernel_low, kernel_high, initrd_offset = 0; - ssize_t kernel_size, initrd_size; + ssize_t kernel_size; kernel_size = load_elf(info->kernel_filename, NULL, cpu_loongarch_virt_to_phys, NULL, @@ -302,7 +328,8 @@ static int64_t load_kernel_info(struct loongarch_boot_info *info) } if (info->initrd_filename) { - initrd_size = get_image_size(info->initrd_filename); + ssize_t initrd_size = get_image_size(info->initrd_filename); + if (initrd_size > 0) { initrd_offset = ROUND_UP(kernel_high + 4 * kernel_size, 64 * KiB); initrd_offset = alloc_initrd_memory(info, initrd_offset, @@ -311,7 +338,7 @@ static int64_t load_kernel_info(struct loongarch_boot_info *info) initrd_offset, initrd_size); } - if (initrd_size == (target_ulong)-1) { + if (initrd_size == -1) { error_report("could not load initial ram disk '%s'", info->initrd_filename); exit(1); @@ -324,22 +351,6 @@ static int64_t load_kernel_info(struct loongarch_boot_info *info) return kernel_entry; } -static void reset_load_elf(void *opaque) -{ - LoongArchCPU *cpu = opaque; - CPULoongArchState *env = &cpu->env; - - cpu_reset(CPU(cpu)); - if (env->load_elf) { - if (cpu == LOONGARCH_CPU(first_cpu)) { - env->gpr[4] = env->boot_info->a0; - env->gpr[5] = env->boot_info->a1; - env->gpr[6] = env->boot_info->a2; - } - cpu_set_pc(CPU(cpu), env->elf_address); - } -} - static void fw_cfg_add_kernel_info(struct loongarch_boot_info *info, FWCfgState *fw_cfg) { @@ -389,8 +400,7 @@ static void loongarch_direct_kernel_boot(MachineState *ms, { void *p, *bp; int64_t kernel_addr = VIRT_FLASH0_BASE; - LoongArchCPU *lacpu; - CPUState *cs; + uint64_t *data; if (info->kernel_filename) { kernel_addr = load_kernel_info(info); @@ -408,20 +418,14 @@ static void loongarch_direct_kernel_boot(MachineState *ms, /* Load slave boot code at pflash0 . */ void *boot_code = g_malloc0(VIRT_FLASH0_SIZE); - memcpy(boot_code, &slave_boot_code, sizeof(slave_boot_code)); + memcpy(boot_code, &aux_boot_code, sizeof(aux_boot_code)); + data = (uint64_t *)(boot_code + sizeof(aux_boot_code)); + *(data - 4) = cpu_to_le64(info->a0); + *(data - 3) = cpu_to_le64(info->a1); + *(data - 2) = cpu_to_le64(info->a2); + *(data - 1) = cpu_to_le64(kernel_addr); rom_add_blob_fixed("boot_code", boot_code, VIRT_FLASH0_SIZE, VIRT_FLASH0_BASE); - CPU_FOREACH(cs) { - lacpu = LOONGARCH_CPU(cs); - lacpu->env.load_elf = true; - if (cs == first_cpu) { - lacpu->env.elf_address = kernel_addr; - } else { - lacpu->env.elf_address = VIRT_FLASH0_BASE; - } - lacpu->env.boot_info = info; - } - g_free(boot_code); g_free(bp); } @@ -429,12 +433,6 @@ static void loongarch_direct_kernel_boot(MachineState *ms, void loongarch_load_kernel(MachineState *ms, struct loongarch_boot_info *info) { LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(ms); - int i; - - /* register reset function */ - for (i = 0; i < ms->smp.cpus; i++) { - qemu_register_reset(reset_load_elf, LOONGARCH_CPU(qemu_get_cpu(i))); - } info->kernel_filename = ms->kernel_filename; info->kernel_cmdline = ms->kernel_cmdline; diff --git a/hw/loongarch/virt-acpi-build.c b/hw/loongarch/virt-acpi-build.c index 8c2228a77225c..3694c9827f045 100644 --- a/hw/loongarch/virt-acpi-build.c +++ b/hw/loongarch/virt-acpi-build.c @@ -21,7 +21,6 @@ #include "system/reset.h" /* Supported chipsets: */ -#include "hw/pci-host/ls7a.h" #include "hw/loongarch/virt.h" #include "hw/acpi/utils.h" diff --git a/hw/loongarch/virt-fdt-build.c b/hw/loongarch/virt-fdt-build.c index 728ce466996f8..1f0ba01f711e0 100644 --- a/hw/loongarch/virt-fdt-build.c +++ b/hw/loongarch/virt-fdt-build.c @@ -12,7 +12,6 @@ #include "hw/loader.h" #include "hw/loongarch/virt.h" #include "hw/pci-host/gpex.h" -#include "hw/pci-host/ls7a.h" #include "system/device_tree.h" #include "system/reset.h" #include "target/loongarch/cpu.h" diff --git a/hw/loongarch/virt.c b/hw/loongarch/virt.c index b15ada20787ba..49434ad1828b0 100644 --- a/hw/loongarch/virt.c +++ b/hw/loongarch/virt.c @@ -28,7 +28,7 @@ #include "hw/intc/loongarch_extioi.h" #include "hw/intc/loongarch_pch_pic.h" #include "hw/intc/loongarch_pch_msi.h" -#include "hw/pci-host/ls7a.h" +#include "hw/intc/loongarch_dintc.h" #include "hw/pci-host/gpex.h" #include "hw/misc/unimp.h" #include "hw/loongarch/fw_cfg.h" @@ -46,6 +46,31 @@ #include "hw/block/flash.h" #include "hw/virtio/virtio-iommu.h" #include "qemu/error-report.h" +#include "kvm/kvm_loongarch.h" + +static void virt_get_dmsi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); + OnOffAuto dmsi = lvms->dmsi; + + visit_type_OnOffAuto(v, name, &dmsi, errp); + +} +static void virt_set_dmsi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(obj); + + visit_type_OnOffAuto(v, name, &lvms->dmsi, errp); + + if (lvms->dmsi == ON_OFF_AUTO_OFF) { + lvms->misc_feature &= ~BIT(IOCSRF_DMSI); + lvms->misc_status &= ~BIT_ULL(IOCSRM_DMSI_EN); + } else if (lvms->dmsi == ON_OFF_AUTO_ON) { + lvms->misc_feature = BIT(IOCSRF_DMSI); + } +} static void virt_get_veiointc(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) @@ -355,13 +380,17 @@ static void virt_cpu_irq_init(LoongArchVirtMachineState *lvms) &error_abort); hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), DEVICE(cs), &error_abort); + if (lvms->dintc) { + hotplug_handler_plug(HOTPLUG_HANDLER(lvms->dintc), DEVICE(cs), + &error_abort); + } } } static void virt_irq_init(LoongArchVirtMachineState *lvms) { DeviceState *pch_pic, *pch_msi; - DeviceState *ipi, *extioi; + DeviceState *ipi, *extioi, *dintc; SysBusDevice *d; int i, start, num; @@ -407,6 +436,33 @@ static void virt_irq_init(LoongArchVirtMachineState *lvms) * +--------+ +---------+ +---------+ * | UARTs | | Devices | | Devices | * +--------+ +---------+ +---------+ + * + * + * Advanced Extended IRQ model + * + * +-----+ +---------------------------------+ +-------+ + * | IPI | --> | CPUINTC | <-- | Timer | + * +-----+ +---------------------------------+ +-------+ + * ^ ^ ^ + * | | | + * +-------------+ +----------+ +---------+ +-------+ + * | EIOINTC | | DINTC | | LIOINTC | <-- | UARTs | + * +-------------+ +----------+ +---------+ +-------+ + * ^ ^ ^ + * | | | + * +---------+ +---------+ | + * | PCH-PIC | | PCH-MSI | | + * +---------+ +---------+ | + * ^ ^ ^ | + * | | | | + * +---------+ +---------+ +---------+ + * | Devices | | PCH-LPC | | Devices | + * +---------+ +---------+ +---------+ + * ^ + * | + * +---------+ + * | Devices | + * +---------+ */ /* Create IPI device */ @@ -414,6 +470,14 @@ static void virt_irq_init(LoongArchVirtMachineState *lvms) lvms->ipi = ipi; sysbus_realize_and_unref(SYS_BUS_DEVICE(ipi), &error_fatal); + /* Create DINTC device*/ + if (virt_has_dmsi(lvms)) { + dintc = qdev_new(TYPE_LOONGARCH_DINTC); + lvms->dintc = dintc; + sysbus_realize_and_unref(SYS_BUS_DEVICE(dintc), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dintc), 0, VIRT_DINTC_BASE); + } + /* Create EXTIOI device */ extioi = qdev_new(TYPE_LOONGARCH_EXTIOI); lvms->extioi = extioi; @@ -456,7 +520,7 @@ static void virt_irq_init(LoongArchVirtMachineState *lvms) } /* PCH_PIC memory region */ - memory_region_add_subregion(get_system_memory(), VIRT_IOAPIC_REG_BASE, + memory_region_add_subregion(get_system_memory(), VIRT_PCH_REG_BASE, sysbus_mmio_get_region(SYS_BUS_DEVICE(pch_pic), 0)); /* Connect pch_pic irqs to extioi */ @@ -539,6 +603,10 @@ static MemTxResult virt_iocsr_misc_write(void *opaque, hwaddr addr, return MEMTX_OK; } + if (virt_has_dmsi(lvms) && val & BIT_ULL(IOCSRM_DMSI_EN)) { + lvms->misc_status |= BIT_ULL(IOCSRM_DMSI_EN); + } + features = address_space_ldl(&lvms->as_iocsr, EXTIOI_VIRT_BASE + EXTIOI_VIRT_CONFIG, attrs, NULL); @@ -574,6 +642,9 @@ static MemTxResult virt_iocsr_misc_read(void *opaque, hwaddr addr, break; case FEATURE_REG: ret = BIT(IOCSRF_MSI) | BIT(IOCSRF_EXTIOI) | BIT(IOCSRF_CSRIPI); + if (virt_has_dmsi(lvms)) { + ret |= BIT(IOCSRF_DMSI); + } if (kvm_enabled()) { ret |= BIT(IOCSRF_VM); } @@ -603,6 +674,10 @@ static MemTxResult virt_iocsr_misc_read(void *opaque, hwaddr addr, if (features & BIT(EXTIOI_ENABLE_INT_ENCODE)) { ret |= BIT_ULL(IOCSRM_EXTIOI_INT_ENCODE); } + if (virt_has_dmsi(lvms) && + (lvms->misc_status & BIT_ULL(IOCSRM_DMSI_EN))) { + ret |= BIT_ULL(IOCSRM_DMSI_EN); + } break; default: g_assert_not_reached(); @@ -682,6 +757,25 @@ static void fw_cfg_add_memory(MachineState *ms) } } +static void virt_check_dmsi(MachineState *machine) +{ + LoongArchCPU *cpu; + LoongArchVirtMachineState *lvms = LOONGARCH_VIRT_MACHINE(machine); + + cpu = LOONGARCH_CPU(first_cpu); + if (lvms->dmsi == ON_OFF_AUTO_AUTO) { + if (cpu->msgint != ON_OFF_AUTO_OFF) { + lvms->misc_feature = BIT(IOCSRF_DMSI); + } + } + + if (lvms->dmsi == ON_OFF_AUTO_ON && cpu->msgint == ON_OFF_AUTO_OFF) { + error_report("Fail to enable dmsi , cpu msgint is off " + "pleass add cpu feature mesgint=on."); + exit(EXIT_FAILURE); + } +} + static void virt_init(MachineState *machine) { const char *cpu_model = machine->cpu_type; @@ -716,6 +810,7 @@ static void virt_init(MachineState *machine) } qdev_realize_and_unref(DEVICE(cpuobj), NULL, &error_fatal); } + virt_check_dmsi(machine); fw_cfg_add_memory(machine); /* Node0 memory */ @@ -846,6 +941,8 @@ static void virt_initfn(Object *obj) if (tcg_enabled()) { lvms->veiointc = ON_OFF_AUTO_OFF; } + + lvms->dmsi = ON_OFF_AUTO_AUTO; lvms->acpi = ON_OFF_AUTO_AUTO; lvms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); lvms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); @@ -1009,10 +1106,14 @@ static void virt_cpu_unplug(HotplugHandler *hotplug_dev, /* Notify ipi and extioi irqchip to remove interrupt routing to CPU */ hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->ipi), dev, &error_abort); hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->extioi), dev, &error_abort); + if (lvms->dintc) { + hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->dintc), dev, &error_abort); + } /* Notify acpi ged CPU removed */ hotplug_handler_unplug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &error_abort); + qemu_unregister_resettable(OBJECT(dev)); cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id); cpu_slot->cpu = NULL; } @@ -1032,11 +1133,16 @@ static void virt_cpu_plug(HotplugHandler *hotplug_dev, hotplug_handler_plug(HOTPLUG_HANDLER(lvms->extioi), dev, &error_abort); } + if (lvms->dintc) { + hotplug_handler_plug(HOTPLUG_HANDLER(lvms->dintc), dev, &error_abort); + } + if (lvms->acpi_ged) { hotplug_handler_plug(HOTPLUG_HANDLER(lvms->acpi_ged), dev, &error_abort); } + qemu_register_resettable(OBJECT(dev)); cpu_slot = virt_find_cpu_slot(MACHINE(lvms), cpu->phy_id); cpu_slot->cpu = CPU(dev); } @@ -1238,6 +1344,10 @@ static void virt_class_init(ObjectClass *oc, const void *data) NULL, NULL); object_class_property_set_description(oc, "v-eiointc", "Enable Virt Extend I/O Interrupt Controller."); + object_class_property_add(oc, "dmsi", "OnOffAuto", + virt_get_dmsi, virt_set_dmsi, NULL, NULL); + object_class_property_set_description(oc, "dmsi", + "Enable direct Message-interrupts Controller."); machine_class_allow_dynamic_sysbus_dev(mc, TYPE_RAMFB_DEVICE); machine_class_allow_dynamic_sysbus_dev(mc, TYPE_UEFI_VARS_SYSBUS); #ifdef CONFIG_TPM diff --git a/hw/m68k/virt.c b/hw/m68k/virt.c index 875fd00ef8d28..98cfe43c73ae2 100644 --- a/hw/m68k/virt.c +++ b/hw/m68k/virt.c @@ -367,10 +367,17 @@ type_init(virt_machine_register_types) #define DEFINE_VIRT_MACHINE(major, minor) \ DEFINE_VIRT_MACHINE_IMPL(false, major, minor) +static void virt_machine_10_2_options(MachineClass *mc) +{ +} +DEFINE_VIRT_MACHINE_AS_LATEST(10, 2) + static void virt_machine_10_1_options(MachineClass *mc) { + virt_machine_10_2_options(mc); + compat_props_add(mc->compat_props, hw_compat_10_1, hw_compat_10_1_len); } -DEFINE_VIRT_MACHINE_AS_LATEST(10, 1) +DEFINE_VIRT_MACHINE(10, 1) static void virt_machine_10_0_options(MachineClass *mc) { diff --git a/hw/meson.build b/hw/meson.build index 791ce21ab42af..1022bdb8069a1 100644 --- a/hw/meson.build +++ b/hw/meson.build @@ -1,3 +1,26 @@ +# Enter target code first to reuse variables associated +subdir('alpha') +subdir('arm') +subdir('avr') +subdir('hppa') +subdir('xenpv') # i386 uses it +subdir('i386') +subdir('loongarch') +subdir('m68k') +subdir('microblaze') +subdir('mips') +subdir('openrisc') +subdir('ppc') +subdir('remote') +subdir('riscv') +subdir('rx') +subdir('s390x') +subdir('sh4') +subdir('sparc') +subdir('sparc64') +subdir('tricore') +subdir('xtensa') + subdir('9pfs') subdir('acpi') subdir('adc') @@ -44,26 +67,4 @@ subdir('virtio') subdir('vmapple') subdir('watchdog') subdir('xen') -subdir('xenpv') subdir('fsi') - -subdir('alpha') -subdir('arm') -subdir('avr') -subdir('hppa') -subdir('i386') -subdir('loongarch') -subdir('m68k') -subdir('microblaze') -subdir('mips') -subdir('openrisc') -subdir('ppc') -subdir('remote') -subdir('riscv') -subdir('rx') -subdir('s390x') -subdir('sh4') -subdir('sparc') -subdir('sparc64') -subdir('tricore') -subdir('xtensa') diff --git a/hw/mips/Kconfig b/hw/mips/Kconfig index f84fffcd3238d..b59cb2f1114e0 100644 --- a/hw/mips/Kconfig +++ b/hw/mips/Kconfig @@ -13,13 +13,6 @@ config MALTA select SERIAL_MM select SMBUS_EEPROM -config MIPSSIM - bool - default y - depends on MIPS - select SERIAL_MM - select MIPSNET - config JAZZ bool default y diff --git a/hw/mips/loongson3_virt.c b/hw/mips/loongson3_virt.c index de6fbcc0cb4f5..672083dec9864 100644 --- a/hw/mips/loongson3_virt.c +++ b/hw/mips/loongson3_virt.c @@ -49,6 +49,7 @@ #include "system/qtest.h" #include "system/reset.h" #include "system/runstate.h" +#include "system/system.h" #include "qemu/error-report.h" #define PM_CNTL_MODE 0x10 diff --git a/hw/mips/malta.c b/hw/mips/malta.c index cbdbb21056807..02da629b5afca 100644 --- a/hw/mips/malta.c +++ b/hw/mips/malta.c @@ -52,6 +52,7 @@ #include "system/qtest.h" #include "system/reset.h" #include "system/runstate.h" +#include "system/system.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "system/kvm.h" @@ -1190,7 +1191,7 @@ void mips_malta_init(MachineState *machine) * In little endian mode the 32bit words in the bios are swapped, * a neat trick which allows bi-endian firmware. */ - if (!TARGET_BIG_ENDIAN) { + if (!TARGET_BIG_ENDIAN && bios_size > 0) { uint32_t *end, *addr; const size_t swapsize = MIN(bios_size, 0x3e0000); addr = rom_ptr(FLASH_ADDRESS, swapsize); diff --git a/hw/mips/meson.build b/hw/mips/meson.build index 31dbd2bf4d91f..390f0fd7f9d16 100644 --- a/hw/mips/meson.build +++ b/hw/mips/meson.build @@ -8,7 +8,6 @@ mips_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('cps.c')) if 'CONFIG_TCG' in config_all_accel mips_ss.add(when: 'CONFIG_JAZZ', if_true: files('jazz.c')) -mips_ss.add(when: 'CONFIG_MIPSSIM', if_true: files('mipssim.c')) mips_ss.add(when: 'CONFIG_FULOONG', if_true: files('fuloong2e.c')) mips_ss.add(when: 'CONFIG_MIPS_BOSTON', if_true: files('boston.c')) endif diff --git a/hw/mips/mipssim.c b/hw/mips/mipssim.c deleted file mode 100644 index e843307b9b6ac..0000000000000 --- a/hw/mips/mipssim.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * QEMU/mipssim emulation - * - * Emulates a very simple machine model similar to the one used by the - * proprietary MIPS emulator. - * - * Copyright (c) 2007 Thiemo Seufer - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include "qemu/osdep.h" -#include "qapi/error.h" -#include "qemu/datadir.h" -#include "system/address-spaces.h" -#include "hw/clock.h" -#include "hw/mips/mips.h" -#include "hw/char/serial-mm.h" -#include "net/net.h" -#include "system/system.h" -#include "hw/boards.h" -#include "hw/loader.h" -#include "elf.h" -#include "hw/sysbus.h" -#include "hw/qdev-properties.h" -#include "qemu/error-report.h" -#include "system/qtest.h" -#include "system/reset.h" -#include "cpu.h" - -#define BIOS_SIZE (4 * MiB) - -static struct _loaderparams { - int ram_size; - const char *kernel_filename; - const char *kernel_cmdline; - const char *initrd_filename; -} loaderparams; - -typedef struct ResetData { - MIPSCPU *cpu; - uint64_t vector; -} ResetData; - -static uint64_t load_kernel(void) -{ - uint64_t entry, kernel_high, initrd_size; - long kernel_size; - ram_addr_t initrd_offset; - - kernel_size = load_elf(loaderparams.kernel_filename, NULL, - cpu_mips_kseg0_to_phys, NULL, - &entry, NULL, - &kernel_high, NULL, - TARGET_BIG_ENDIAN ? ELFDATA2MSB : ELFDATA2LSB, - EM_MIPS, 1, 0); - if (kernel_size < 0) { - error_report("could not load kernel '%s': %s", - loaderparams.kernel_filename, - load_elf_strerror(kernel_size)); - exit(1); - } - - /* load initrd */ - initrd_size = 0; - initrd_offset = 0; - if (loaderparams.initrd_filename) { - initrd_size = get_image_size(loaderparams.initrd_filename); - if (initrd_size > 0) { - initrd_offset = ROUND_UP(kernel_high, INITRD_PAGE_SIZE); - if (initrd_offset + initrd_size > loaderparams.ram_size) { - error_report("memory too small for initial ram disk '%s'", - loaderparams.initrd_filename); - exit(1); - } - initrd_size = load_image_targphys(loaderparams.initrd_filename, - initrd_offset, loaderparams.ram_size - initrd_offset); - } - if (initrd_size == (target_ulong) -1) { - error_report("could not load initial ram disk '%s'", - loaderparams.initrd_filename); - exit(1); - } - } - return entry; -} - -static void main_cpu_reset(void *opaque) -{ - ResetData *s = (ResetData *)opaque; - CPUMIPSState *env = &s->cpu->env; - - cpu_reset(CPU(s->cpu)); - env->active_tc.PC = s->vector & ~(target_ulong)1; - if (s->vector & 1) { - env->hflags |= MIPS_HFLAG_M16; - } -} - -static void mipsnet_init(int base, qemu_irq irq) -{ - DeviceState *dev; - SysBusDevice *s; - - dev = qemu_create_nic_device("mipsnet", true, NULL); - if (!dev) { - return; - } - - s = SYS_BUS_DEVICE(dev); - sysbus_realize_and_unref(s, &error_fatal); - sysbus_connect_irq(s, 0, irq); - memory_region_add_subregion(get_system_io(), - base, - sysbus_mmio_get_region(s, 0)); -} - -static void -mips_mipssim_init(MachineState *machine) -{ - const char *kernel_filename = machine->kernel_filename; - const char *kernel_cmdline = machine->kernel_cmdline; - const char *initrd_filename = machine->initrd_filename; - const char *bios_name = TARGET_BIG_ENDIAN ? "mips_bios.bin" - : "mipsel_bios.bin"; - char *filename; - MemoryRegion *address_space_mem = get_system_memory(); - MemoryRegion *isa = g_new(MemoryRegion, 1); - MemoryRegion *bios = g_new(MemoryRegion, 1); - Clock *cpuclk; - MIPSCPU *cpu; - CPUMIPSState *env; - ResetData *reset_info; - int bios_size; - - cpuclk = clock_new(OBJECT(machine), "cpu-refclk"); -#ifdef TARGET_MIPS64 - clock_set_hz(cpuclk, 6000000); /* 6 MHz */ -#else - clock_set_hz(cpuclk, 12000000); /* 12 MHz */ -#endif - - /* Init CPUs. */ - cpu = mips_cpu_create_with_clock(machine->cpu_type, cpuclk, - TARGET_BIG_ENDIAN); - env = &cpu->env; - - reset_info = g_new0(ResetData, 1); - reset_info->cpu = cpu; - reset_info->vector = env->active_tc.PC; - qemu_register_reset(main_cpu_reset, reset_info); - - /* Allocate RAM. */ - memory_region_init_rom(bios, NULL, "mips_mipssim.bios", BIOS_SIZE, - &error_fatal); - - memory_region_add_subregion(address_space_mem, 0, machine->ram); - - /* Map the BIOS / boot exception handler. */ - memory_region_add_subregion(address_space_mem, 0x1fc00000LL, bios); - /* Load a BIOS / boot exception handler image. */ - filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, - machine->firmware ?: bios_name); - if (filename) { - bios_size = load_image_targphys(filename, 0x1fc00000LL, BIOS_SIZE); - g_free(filename); - } else { - bios_size = -1; - } - if ((bios_size < 0 || bios_size > BIOS_SIZE) && - machine->firmware && !qtest_enabled()) { - /* Bail out if we have neither a kernel image nor boot vector code. */ - error_report("Could not load MIPS bios '%s'", machine->firmware); - exit(1); - } else { - /* We have a boot vector start address. */ - env->active_tc.PC = (target_long)(int32_t)0xbfc00000; - } - - if (kernel_filename) { - loaderparams.ram_size = machine->ram_size; - loaderparams.kernel_filename = kernel_filename; - loaderparams.kernel_cmdline = kernel_cmdline; - loaderparams.initrd_filename = initrd_filename; - reset_info->vector = load_kernel(); - } - - /* Init CPU internal devices. */ - cpu_mips_irq_init_cpu(cpu); - cpu_mips_clock_init(cpu); - - /* - * Register 64 KB of ISA IO space at 0x1fd00000. But without interrupts - * (except for the hardcoded serial port interrupt) -device cannot work, - * so do not expose the ISA bus to the user. - */ - memory_region_init_alias(isa, NULL, "isa_mmio", - get_system_io(), 0, 0x00010000); - memory_region_add_subregion(get_system_memory(), 0x1fd00000, isa); - - /* - * A single 16450 sits at offset 0x3f8. It is attached to - * MIPS CPU INT2, which is interrupt 4. - */ - if (serial_hd(0)) { - DeviceState *dev = qdev_new(TYPE_SERIAL_MM); - - qdev_prop_set_chr(dev, "chardev", serial_hd(0)); - qdev_prop_set_uint8(dev, "regshift", 0); - qdev_prop_set_uint8(dev, "endianness", DEVICE_LITTLE_ENDIAN); - sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); - sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, env->irq[4]); - memory_region_add_subregion(get_system_io(), 0x3f8, - sysbus_mmio_get_region(SYS_BUS_DEVICE(dev), 0)); - } - - /* MIPSnet uses the MIPS CPU INT0, which is interrupt 2. */ - mipsnet_init(0x4200, env->irq[2]); -} - -static void mips_mipssim_machine_init(MachineClass *mc) -{ - mc->desc = "MIPS MIPSsim platform"; - mc->init = mips_mipssim_init; -#ifdef TARGET_MIPS64 - mc->default_cpu_type = MIPS_CPU_TYPE_NAME("5Kf"); -#else - mc->default_cpu_type = MIPS_CPU_TYPE_NAME("24Kf"); -#endif - mc->default_ram_id = "mips_mipssim.ram"; -} - -DEFINE_MACHINE("mipssim", mips_mipssim_machine_init) diff --git a/hw/misc/aspeed_sbc.c b/hw/misc/aspeed_sbc.c index a7d101ba71f40..2fc5db749d23e 100644 --- a/hw/misc/aspeed_sbc.c +++ b/hw/misc/aspeed_sbc.c @@ -15,9 +15,14 @@ #include "hw/misc/aspeed_sbc.h" #include "qapi/error.h" #include "migration/vmstate.h" +#include "trace.h" #define R_PROT (0x000 / 4) +#define R_CMD (0x004 / 4) +#define R_ADDR (0x010 / 4) #define R_STATUS (0x014 / 4) +#define R_CAMP1 (0x020 / 4) +#define R_CAMP2 (0x024 / 4) #define R_QSR (0x040 / 4) /* R_STATUS */ @@ -41,6 +46,20 @@ #define QSR_RSA_MASK (0x3 << 12) #define QSR_HASH_MASK (0x3 << 10) +#define OTP_MEMORY_SIZE 0x4000 +/* OTP command */ +#define SBC_OTP_CMD_READ 0x23b1e361 +#define SBC_OTP_CMD_WRITE 0x23b1e362 +#define SBC_OTP_CMD_PROG 0x23b1e364 + +#define OTP_DATA_DWORD_COUNT (0x800) +#define OTP_TOTAL_DWORD_COUNT (0x1000) + +/* Voltage mode */ +#define MODE_REGISTER (0x1000) +#define MODE_REGISTER_A (0x3000) +#define MODE_REGISTER_B (0x5000) + static uint64_t aspeed_sbc_read(void *opaque, hwaddr addr, unsigned int size) { AspeedSBCState *s = ASPEED_SBC(opaque); @@ -57,6 +76,142 @@ static uint64_t aspeed_sbc_read(void *opaque, hwaddr addr, unsigned int size) return s->regs[addr]; } +static bool aspeed_sbc_otp_read(AspeedSBCState *s, + uint32_t otp_addr) +{ + MemTxResult ret; + AspeedOTPState *otp = &s->otp; + uint32_t value, otp_offset; + bool is_data = false; + + if (otp_addr < OTP_DATA_DWORD_COUNT) { + is_data = true; + } else if (otp_addr >= OTP_TOTAL_DWORD_COUNT) { + qemu_log_mask(LOG_GUEST_ERROR, + "Invalid OTP addr 0x%x\n", + otp_addr); + return false; + } + + otp_offset = otp_addr << 2; + ret = address_space_read(&otp->as, otp_offset, MEMTXATTRS_UNSPECIFIED, + &value, sizeof(value)); + if (ret != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, + "Failed to read OTP memory, addr = %x\n", + otp_addr); + return false; + } + s->regs[R_CAMP1] = value; + trace_aspeed_sbc_otp_read(otp_addr, value); + + if (is_data) { + ret = address_space_read(&otp->as, otp_offset + 4, + MEMTXATTRS_UNSPECIFIED, + &value, sizeof(value)); + if (ret != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, + "Failed to read OTP memory, addr = %x\n", + otp_addr); + return false; + } + s->regs[R_CAMP2] = value; + trace_aspeed_sbc_otp_read(otp_addr + 1, value); + } + + return true; +} + +static bool mode_handler(uint32_t otp_addr) +{ + switch (otp_addr) { + case MODE_REGISTER: + case MODE_REGISTER_A: + case MODE_REGISTER_B: + /* HW behavior, do nothing here */ + return true; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "Unsupported address 0x%x\n", + otp_addr); + return false; + } +} + +static bool aspeed_sbc_otp_write(AspeedSBCState *s, + uint32_t otp_addr) +{ + if (otp_addr == 0) { + trace_aspeed_sbc_ignore_cmd(otp_addr); + return true; + } else { + if (mode_handler(otp_addr) == false) { + return false; + } + } + + return true; +} + +static bool aspeed_sbc_otp_prog(AspeedSBCState *s, + uint32_t otp_addr) +{ + MemTxResult ret; + AspeedOTPState *otp = &s->otp; + uint32_t value = s->regs[R_CAMP1]; + + ret = address_space_write(&otp->as, otp_addr, MEMTXATTRS_UNSPECIFIED, + &value, sizeof(value)); + if (ret != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, + "Failed to write OTP memory, addr = %x\n", + otp_addr); + return false; + } + + trace_aspeed_sbc_otp_prog(otp_addr, value); + + return true; +} + +static void aspeed_sbc_handle_command(void *opaque, uint32_t cmd) +{ + AspeedSBCState *s = ASPEED_SBC(opaque); + AspeedSBCClass *sc = ASPEED_SBC_GET_CLASS(opaque); + bool ret = false; + uint32_t otp_addr; + + if (!sc->has_otp) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: OTP memory is not supported\n", + __func__); + return; + } + + s->regs[R_STATUS] &= ~(OTP_MEM_IDLE | OTP_IDLE); + otp_addr = s->regs[R_ADDR]; + + switch (cmd) { + case SBC_OTP_CMD_READ: + ret = aspeed_sbc_otp_read(s, otp_addr); + break; + case SBC_OTP_CMD_WRITE: + ret = aspeed_sbc_otp_write(s, otp_addr); + break; + case SBC_OTP_CMD_PROG: + ret = aspeed_sbc_otp_prog(s, otp_addr); + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Unknown command 0x%x\n", + __func__, cmd); + break; + } + + trace_aspeed_sbc_handle_cmd(cmd, otp_addr, ret); + s->regs[R_STATUS] |= (OTP_MEM_IDLE | OTP_IDLE); +} + static void aspeed_sbc_write(void *opaque, hwaddr addr, uint64_t data, unsigned int size) { @@ -78,6 +233,9 @@ static void aspeed_sbc_write(void *opaque, hwaddr addr, uint64_t data, "%s: write to read only register 0x%" HWADDR_PRIx "\n", __func__, addr << 2); return; + case R_CMD: + aspeed_sbc_handle_command(opaque, data); + return; default: break; } @@ -115,10 +273,30 @@ static void aspeed_sbc_reset(DeviceState *dev) s->regs[R_QSR] = s->signing_settings; } +static void aspeed_sbc_instance_init(Object *obj) +{ + AspeedSBCClass *sc = ASPEED_SBC_GET_CLASS(obj); + AspeedSBCState *s = ASPEED_SBC(obj); + + if (sc->has_otp) { + object_initialize_child(OBJECT(s), "otp", &s->otp, + TYPE_ASPEED_OTP); + } +} + static void aspeed_sbc_realize(DeviceState *dev, Error **errp) { AspeedSBCState *s = ASPEED_SBC(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedSBCClass *sc = ASPEED_SBC_GET_CLASS(dev); + + if (sc->has_otp) { + object_property_set_int(OBJECT(&s->otp), "size", + OTP_MEMORY_SIZE, &error_abort); + if (!qdev_realize(DEVICE(&s->otp), NULL, errp)) { + return; + } + } memory_region_init_io(&s->iomem, OBJECT(s), &aspeed_sbc_ops, s, TYPE_ASPEED_SBC, 0x1000); @@ -155,6 +333,7 @@ static const TypeInfo aspeed_sbc_info = { .name = TYPE_ASPEED_SBC, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(AspeedSBCState), + .instance_init = aspeed_sbc_instance_init, .class_init = aspeed_sbc_class_init, .class_size = sizeof(AspeedSBCClass) }; @@ -162,8 +341,10 @@ static const TypeInfo aspeed_sbc_info = { static void aspeed_ast2600_sbc_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSBCClass *sc = ASPEED_SBC_CLASS(klass); dc->desc = "AST2600 Secure Boot Controller"; + sc->has_otp = true; } static const TypeInfo aspeed_ast2600_sbc_info = { @@ -172,9 +353,25 @@ static const TypeInfo aspeed_ast2600_sbc_info = { .class_init = aspeed_ast2600_sbc_class_init, }; +static void aspeed_ast10x0_sbc_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedSBCClass *sc = ASPEED_SBC_CLASS(klass); + + dc->desc = "AST10X0 Secure Boot Controller"; + sc->has_otp = true; +} + +static const TypeInfo aspeed_ast10x0_sbc_info = { + .name = TYPE_ASPEED_AST10X0_SBC, + .parent = TYPE_ASPEED_SBC, + .class_init = aspeed_ast10x0_sbc_class_init, +}; + static void aspeed_sbc_register_types(void) { type_register_static(&aspeed_ast2600_sbc_info); + type_register_static(&aspeed_ast10x0_sbc_info); type_register_static(&aspeed_sbc_info); } diff --git a/hw/misc/ivshmem-flat.c b/hw/misc/ivshmem-flat.c index fe4be6be178cf..27ee8c921830e 100644 --- a/hw/misc/ivshmem-flat.c +++ b/hw/misc/ivshmem-flat.c @@ -138,6 +138,8 @@ static void ivshmem_flat_remove_peer(IvshmemFTState *s, uint16_t peer_id) static void ivshmem_flat_add_vector(IvshmemFTState *s, IvshmemPeer *peer, int vector_fd) { + Error *err = NULL; + if (peer->vector_counter >= IVSHMEM_MAX_VECTOR_NUM) { trace_ivshmem_flat_add_vector_failure(peer->vector_counter, vector_fd, peer->id); @@ -154,7 +156,10 @@ static void ivshmem_flat_add_vector(IvshmemFTState *s, IvshmemPeer *peer, * peer. */ peer->vector[peer->vector_counter].id = peer->vector_counter; - g_unix_set_fd_nonblocking(vector_fd, true, NULL); + if (!qemu_set_blocking(vector_fd, false, &err)) { + /* FIXME handle the error */ + warn_report_err(err); + } event_notifier_init_fd(&peer->vector[peer->vector_counter].event_notifier, vector_fd); diff --git a/hw/misc/ivshmem-pci.c b/hw/misc/ivshmem-pci.c index 5a10bca633d07..2748db9286e52 100644 --- a/hw/misc/ivshmem-pci.c +++ b/hw/misc/ivshmem-pci.c @@ -479,6 +479,11 @@ static void process_msg_shmem(IVShmemState *s, int fd, Error **errp) struct stat buf; size_t size; + if (fd < 0) { + error_setg(errp, "server didn't provide fd with shared memory message"); + return; + } + if (s->ivshmem_bar2) { error_setg(errp, "server sent unexpected shared memory message"); close(fd); @@ -535,7 +540,12 @@ static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd, IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd); event_notifier_init_fd(&peer->eventfds[vector], fd); - g_unix_set_fd_nonblocking(fd, true, NULL); /* msix/irqfd poll non block */ + + /* msix/irqfd poll non block */ + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); + return; + } if (posn == s->vm_id) { setup_interrupt(s, vector, errp); @@ -553,7 +563,9 @@ static void process_msg(IVShmemState *s, int64_t msg, int fd, Error **errp) if (msg < -1 || msg > IVSHMEM_MAX_PEERS) { error_setg(errp, "server sent invalid message %" PRId64, msg); - close(fd); + if (fd >= 0) { + close(fd); + } return; } diff --git a/hw/misc/max78000_aes.c b/hw/misc/max78000_aes.c index 0bfb2f02b5b32..d883ddd2b614b 100644 --- a/hw/misc/max78000_aes.c +++ b/hw/misc/max78000_aes.c @@ -79,6 +79,12 @@ static void max78000_aes_do_crypto(Max78000AesState *s) keydata += 8; } + /* + * The MAX78000 AES engine stores an internal key, which it uses only + * for decryption. This results in the slighly odd looking pairs of + * set_encrypt and set_decrypt calls below; s->internal_key is + * being stored for later use in both cases. + */ AES_KEY key; if ((s->ctrl & TYPE) == 0) { AES_set_encrypt_key(keydata, keylen, &key); diff --git a/hw/misc/trace-events b/hw/misc/trace-events index e3f64c0ff6b0a..eeb9243898e9c 100644 --- a/hw/misc/trace-events +++ b/hw/misc/trace-events @@ -90,6 +90,12 @@ slavio_sysctrl_mem_readl(uint32_t ret) "Read system control 0x%08x" slavio_led_mem_writew(uint32_t val) "Write diagnostic LED 0x%04x" slavio_led_mem_readw(uint32_t ret) "Read diagnostic LED 0x%04x" +# aspeed_sbc.c +aspeed_sbc_ignore_cmd(uint32_t cmd) "Ignoring command 0x%" PRIx32 +aspeed_sbc_handle_cmd(uint32_t cmd, uint32_t addr, bool ret) "Handling command 0x%" PRIx32 " for OTP addr 0x%" PRIx32 " Result: %d" +aspeed_sbc_otp_read(uint32_t addr, uint32_t value) "OTP Memory read: addr 0x%" PRIx32 " value 0x%" PRIx32 +aspeed_sbc_otp_prog(uint32_t addr, uint32_t value) "OTP Memory write: addr 0x%" PRIx32 " value 0x%" PRIx32 + # aspeed_scu.c aspeed_scu_write(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 aspeed_scu_read(uint64_t offset, unsigned size, uint32_t data) "To 0x%" PRIx64 " of size %u: 0x%" PRIx32 diff --git a/hw/misc/xlnx-versal-cframe-reg.c b/hw/misc/xlnx-versal-cframe-reg.c index 1ce083e2409d2..95e167b9213bc 100644 --- a/hw/misc/xlnx-versal-cframe-reg.c +++ b/hw/misc/xlnx-versal-cframe-reg.c @@ -693,6 +693,14 @@ static void cframe_reg_init(Object *obj) fifo32_create(&s->new_f_data, FRAME_NUM_WORDS); } +static void cframe_reg_finalize(Object *obj) +{ + XlnxVersalCFrameReg *s = XLNX_VERSAL_CFRAME_REG(obj); + + fifo32_destroy(&s->new_f_data); + g_tree_destroy(s->cframes); +} + static const VMStateDescription vmstate_cframe = { .name = "cframe", .version_id = 1, @@ -833,6 +841,7 @@ static const TypeInfo cframe_reg_info = { .instance_size = sizeof(XlnxVersalCFrameReg), .class_init = cframe_reg_class_init, .instance_init = cframe_reg_init, + .instance_finalize = cframe_reg_finalize, .interfaces = (const InterfaceInfo[]) { { TYPE_XLNX_CFI_IF }, { } diff --git a/hw/misc/xlnx-versal-crl.c b/hw/misc/xlnx-versal-crl.c index 08ff2fcc24ff9..5987f32c7165d 100644 --- a/hw/misc/xlnx-versal-crl.c +++ b/hw/misc/xlnx-versal-crl.c @@ -1,16 +1,13 @@ /* * QEMU model of the Clock-Reset-LPD (CRL). * - * Copyright (c) 2022 Advanced Micro Devices, Inc. + * Copyright (c) 2022-2025 Advanced Micro Devices, Inc. * SPDX-License-Identifier: GPL-2.0-or-later * * Written by Edgar E. Iglesias */ #include "qemu/osdep.h" -#include "qapi/error.h" -#include "qemu/log.h" -#include "qemu/bitops.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" #include "hw/sysbus.h" @@ -58,90 +55,144 @@ static uint64_t crl_disable_prew(RegisterInfo *reg, uint64_t val64) return 0; } -static void crl_reset_dev(XlnxVersalCRL *s, DeviceState *dev, - bool rst_old, bool rst_new) +static DeviceState **versal_decode_periph_rst(XlnxVersalCRLBase *s, + hwaddr addr, size_t *count) { - device_cold_reset(dev); -} + size_t idx; + XlnxVersalCRL *xvc = XLNX_VERSAL_CRL(s); -static void crl_reset_cpu(XlnxVersalCRL *s, ARMCPU *armcpu, - bool rst_old, bool rst_new) -{ - if (rst_new) { - arm_set_cpu_off(arm_cpu_mp_affinity(armcpu)); - } else { - arm_set_cpu_on_and_reset(arm_cpu_mp_affinity(armcpu)); - } -} + *count = 1; -#define REGFIELD_RESET(type, s, reg, f, new_val, dev) { \ - bool old_f = ARRAY_FIELD_EX32((s)->regs, reg, f); \ - bool new_f = FIELD_EX32(new_val, reg, f); \ - \ - /* Detect edges. */ \ - if (dev && old_f != new_f) { \ - crl_reset_ ## type(s, dev, old_f, new_f); \ - } \ -} + switch (addr) { + case A_RST_CPU_R5: + return xvc->cfg.rpu; -static uint64_t crl_rst_r5_prew(RegisterInfo *reg, uint64_t val64) -{ - XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + case A_RST_ADMA: + /* A single register fans out to all DMA reset inputs */ + *count = ARRAY_SIZE(xvc->cfg.adma); + return xvc->cfg.adma; - REGFIELD_RESET(cpu, s, RST_CPU_R5, RESET_CPU0, val64, s->cfg.cpu_r5[0]); - REGFIELD_RESET(cpu, s, RST_CPU_R5, RESET_CPU1, val64, s->cfg.cpu_r5[1]); - return val64; -} + case A_RST_UART0 ... A_RST_UART1: + idx = (addr - A_RST_UART0) / sizeof(uint32_t); + return xvc->cfg.uart + idx; -static uint64_t crl_rst_adma_prew(RegisterInfo *reg, uint64_t val64) -{ - XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); - int i; + case A_RST_GEM0 ... A_RST_GEM1: + idx = (addr - A_RST_GEM0) / sizeof(uint32_t); + return xvc->cfg.gem + idx; + + case A_RST_USB0: + return xvc->cfg.usb; - /* A single register fans out to all ADMA reset inputs. */ - for (i = 0; i < ARRAY_SIZE(s->cfg.adma); i++) { - REGFIELD_RESET(dev, s, RST_ADMA, RESET, val64, s->cfg.adma[i]); + default: + /* invalid or unimplemented */ + g_assert_not_reached(); } - return val64; } -static uint64_t crl_rst_uart0_prew(RegisterInfo *reg, uint64_t val64) +static DeviceState **versal2_decode_periph_rst(XlnxVersalCRLBase *s, + hwaddr addr, size_t *count) { - XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + size_t idx; + XlnxVersal2CRL *xvc = XLNX_VERSAL2_CRL(s); - REGFIELD_RESET(dev, s, RST_UART0, RESET, val64, s->cfg.uart[0]); - return val64; -} + *count = 1; -static uint64_t crl_rst_uart1_prew(RegisterInfo *reg, uint64_t val64) -{ - XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + switch (addr) { + case A_VERSAL2_RST_RPU_A ... A_VERSAL2_RST_RPU_E: + idx = (addr - A_VERSAL2_RST_RPU_A) / sizeof(uint32_t); + idx *= 2; /* two RPUs per RST_RPU_x registers */ + return xvc->cfg.rpu + idx; - REGFIELD_RESET(dev, s, RST_UART1, RESET, val64, s->cfg.uart[1]); - return val64; -} + case A_VERSAL2_RST_ADMA: + /* A single register fans out to all DMA reset inputs */ + *count = ARRAY_SIZE(xvc->cfg.adma); + return xvc->cfg.adma; -static uint64_t crl_rst_gem0_prew(RegisterInfo *reg, uint64_t val64) -{ - XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + case A_VERSAL2_RST_SDMA: + *count = ARRAY_SIZE(xvc->cfg.sdma); + return xvc->cfg.sdma; - REGFIELD_RESET(dev, s, RST_GEM0, RESET, val64, s->cfg.gem[0]); - return val64; + case A_VERSAL2_RST_UART0 ... A_VERSAL2_RST_UART1: + idx = (addr - A_VERSAL2_RST_UART0) / sizeof(uint32_t); + return xvc->cfg.uart + idx; + + case A_VERSAL2_RST_GEM0 ... A_VERSAL2_RST_GEM1: + idx = (addr - A_VERSAL2_RST_GEM0) / sizeof(uint32_t); + return xvc->cfg.gem + idx; + + case A_VERSAL2_RST_USB0 ... A_VERSAL2_RST_USB1: + idx = (addr - A_VERSAL2_RST_USB0) / sizeof(uint32_t); + return xvc->cfg.usb + idx; + + case A_VERSAL2_RST_CAN0 ... A_VERSAL2_RST_CAN3: + idx = (addr - A_VERSAL2_RST_CAN0) / sizeof(uint32_t); + return xvc->cfg.can + idx; + + default: + /* invalid or unimplemented */ + return NULL; + } } -static uint64_t crl_rst_gem1_prew(RegisterInfo *reg, uint64_t val64) +static uint64_t crl_rst_cpu_prew(RegisterInfo *reg, uint64_t val64) { - XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + XlnxVersalCRLBase *s = XLNX_VERSAL_CRL_BASE(reg->opaque); + XlnxVersalCRLBaseClass *xvcbc = XLNX_VERSAL_CRL_BASE_GET_CLASS(s); + DeviceState **dev; + size_t i, count; + + dev = xvcbc->decode_periph_rst(s, reg->access->addr, &count); + + for (i = 0; i < 2; i++) { + bool prev, new; + uint64_t aff; + + prev = extract32(s->regs[reg->access->addr / 4], i, 1); + new = extract32(val64, i, 1); + + if (prev == new) { + continue; + } + + aff = arm_cpu_mp_affinity(ARM_CPU(dev[i])); + + if (new) { + arm_set_cpu_off(aff); + } else { + arm_set_cpu_on_and_reset(aff); + } + } - REGFIELD_RESET(dev, s, RST_GEM1, RESET, val64, s->cfg.gem[1]); return val64; } -static uint64_t crl_rst_usb_prew(RegisterInfo *reg, uint64_t val64) +static uint64_t crl_rst_dev_prew(RegisterInfo *reg, uint64_t val64) { - XlnxVersalCRL *s = XLNX_VERSAL_CRL(reg->opaque); + XlnxVersalCRLBase *s = XLNX_VERSAL_CRL_BASE(reg->opaque); + XlnxVersalCRLBaseClass *xvcbc = XLNX_VERSAL_CRL_BASE_GET_CLASS(s); + DeviceState **dev; + bool prev, new; + size_t i, count; + + dev = xvcbc->decode_periph_rst(s, reg->access->addr, &count); + + if (dev == NULL) { + return val64; + } + + prev = s->regs[reg->access->addr / 4] & 0x1; + new = val64 & 0x1; + + if (prev == new) { + return val64; + } + + for (i = 0; i < count; i++) { + if (dev[i]) { + device_cold_reset(dev[i]); + } + } - REGFIELD_RESET(dev, s, RST_USB0, RESET, val64, s->cfg.usb); return val64; } @@ -247,27 +298,27 @@ static const RegisterAccessInfo crl_regs_info[] = { },{ .name = "RST_CPU_R5", .addr = A_RST_CPU_R5, .reset = 0x17, .rsvd = 0x8, - .pre_write = crl_rst_r5_prew, + .pre_write = crl_rst_cpu_prew, },{ .name = "RST_ADMA", .addr = A_RST_ADMA, .reset = 0x1, - .pre_write = crl_rst_adma_prew, + .pre_write = crl_rst_dev_prew, },{ .name = "RST_GEM0", .addr = A_RST_GEM0, .reset = 0x1, - .pre_write = crl_rst_gem0_prew, + .pre_write = crl_rst_dev_prew, },{ .name = "RST_GEM1", .addr = A_RST_GEM1, .reset = 0x1, - .pre_write = crl_rst_gem1_prew, + .pre_write = crl_rst_dev_prew, },{ .name = "RST_SPARE", .addr = A_RST_SPARE, .reset = 0x1, },{ .name = "RST_USB0", .addr = A_RST_USB0, .reset = 0x1, - .pre_write = crl_rst_usb_prew, + .pre_write = crl_rst_dev_prew, },{ .name = "RST_UART0", .addr = A_RST_UART0, .reset = 0x1, - .pre_write = crl_rst_uart0_prew, + .pre_write = crl_rst_dev_prew, },{ .name = "RST_UART1", .addr = A_RST_UART1, .reset = 0x1, - .pre_write = crl_rst_uart1_prew, + .pre_write = crl_rst_dev_prew, },{ .name = "RST_SPI0", .addr = A_RST_SPI0, .reset = 0x1, },{ .name = "RST_SPI1", .addr = A_RST_SPI1, @@ -301,7 +352,247 @@ static const RegisterAccessInfo crl_regs_info[] = { } }; -static void crl_reset_enter(Object *obj, ResetType type) +static const RegisterAccessInfo versal2_crl_regs_info[] = { + { .name = "ERR_CTRL", .addr = A_VERSAL2_ERR_CTRL, + .reset = 0x1, + },{ .name = "WPROT", .addr = A_VERSAL2_WPROT, + },{ .name = "RPLL_CTRL", .addr = A_VERSAL2_RPLL_CTRL, + .reset = 0x24809, + .rsvd = 0xf88c00f6, + },{ .name = "RPLL_CFG", .addr = A_VERSAL2_RPLL_CFG, + .reset = 0x7e5dcc6c, + .rsvd = 0x1801210, + },{ .name = "FLXPLL_CTRL", .addr = A_VERSAL2_FLXPLL_CTRL, + .reset = 0x24809, + .rsvd = 0xf88c00f6, + },{ .name = "FLXPLL_CFG", .addr = A_VERSAL2_FLXPLL_CFG, + .reset = 0x7e5dcc6c, + .rsvd = 0x1801210, + },{ .name = "PLL_STATUS", .addr = A_VERSAL2_PLL_STATUS, + .reset = 0xf, + .rsvd = 0xf0, + .ro = 0xf, + },{ .name = "RPLL_TO_XPD_CTRL", .addr = A_VERSAL2_RPLL_TO_XPD_CTRL, + .reset = 0x2000100, + .rsvd = 0xfdfc00ff, + },{ .name = "LPX_TOP_SWITCH_CTRL", .addr = A_VERSAL2_LPX_TOP_SWITCH_CTRL, + .reset = 0xe000300, + .rsvd = 0xf1fc00f8, + },{ .name = "LPX_LSBUS_CLK_CTRL", .addr = A_VERSAL2_LPX_LSBUS_CLK_CTRL, + .reset = 0x2000800, + .rsvd = 0xfdfc00f8, + },{ .name = "RPU_CLK_CTRL", .addr = A_VERSAL2_RPU_CLK_CTRL, + .reset = 0x3f00300, + .rsvd = 0xfc0c00f8, + },{ .name = "OCM_CLK_CTRL", .addr = A_VERSAL2_OCM_CLK_CTRL, + .reset = 0x1e00000, + .rsvd = 0xfe1fffff, + },{ .name = "IOU_SWITCH_CLK_CTRL", .addr = A_VERSAL2_IOU_SWITCH_CLK_CTRL, + .reset = 0x2000500, + .rsvd = 0xfdfc00f8, + },{ .name = "GEM0_REF_CTRL", .addr = A_VERSAL2_GEM0_REF_CTRL, + .reset = 0xe000a00, + .rsvd = 0xf1fc00f8, + },{ .name = "GEM1_REF_CTRL", .addr = A_VERSAL2_GEM1_REF_CTRL, + .reset = 0xe000a00, + .rsvd = 0xf1fc00f8, + },{ .name = "GEM_TSU_REF_CLK_CTRL", .addr = A_VERSAL2_GEM_TSU_REF_CLK_CTRL, + .reset = 0x300, + .rsvd = 0xfdfc00f8, + },{ .name = "USB0_BUS_REF_CLK_CTRL", + .addr = A_VERSAL2_USB0_BUS_REF_CLK_CTRL, + .reset = 0x2001900, + .rsvd = 0xfdfc00f8, + },{ .name = "USB1_BUS_REF_CLK_CTRL", + .addr = A_VERSAL2_USB1_BUS_REF_CLK_CTRL, + .reset = 0x2001900, + .rsvd = 0xfdfc00f8, + },{ .name = "UART0_REF_CLK_CTRL", .addr = A_VERSAL2_UART0_REF_CLK_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "UART1_REF_CLK_CTRL", .addr = A_VERSAL2_UART1_REF_CLK_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "SPI0_REF_CLK_CTRL", .addr = A_VERSAL2_SPI0_REF_CLK_CTRL, + .reset = 0x600, + .rsvd = 0xfdfc00f8, + },{ .name = "SPI1_REF_CLK_CTRL", .addr = A_VERSAL2_SPI1_REF_CLK_CTRL, + .reset = 0x600, + .rsvd = 0xfdfc00f8, + },{ .name = "CAN0_REF_2X_CTRL", .addr = A_VERSAL2_CAN0_REF_2X_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "CAN1_REF_2X_CTRL", .addr = A_VERSAL2_CAN1_REF_2X_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "CAN2_REF_2X_CTRL", .addr = A_VERSAL2_CAN2_REF_2X_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "CAN3_REF_2X_CTRL", .addr = A_VERSAL2_CAN3_REF_2X_CTRL, + .reset = 0xc00, + .rsvd = 0xfdfc00f8, + },{ .name = "I3C0_REF_CTRL", .addr = A_VERSAL2_I3C0_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "I3C1_REF_CTRL", .addr = A_VERSAL2_I3C1_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "I3C2_REF_CTRL", .addr = A_VERSAL2_I3C2_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "I3C3_REF_CTRL", .addr = A_VERSAL2_I3C3_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "I3C4_REF_CTRL", .addr = A_VERSAL2_I3C4_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "I3C5_REF_CTRL", .addr = A_VERSAL2_I3C5_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "I3C6_REF_CTRL", .addr = A_VERSAL2_I3C6_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "I3C7_REF_CTRL", .addr = A_VERSAL2_I3C7_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "DBG_LPX_CTRL", .addr = A_VERSAL2_DBG_LPX_CTRL, + .reset = 0x300, + .rsvd = 0xfdfc00f8, + },{ .name = "TIMESTAMP_REF_CTRL", .addr = A_VERSAL2_TIMESTAMP_REF_CTRL, + .reset = 0x2000c00, + .rsvd = 0xfdfc00f8, + },{ .name = "SAFETY_CHK", .addr = A_VERSAL2_SAFETY_CHK, + },{ .name = "ASU_CLK_CTRL", .addr = A_VERSAL2_ASU_CLK_CTRL, + .reset = 0x2000f04, + .rsvd = 0xfdfc00f8, + },{ .name = "DBG_TSTMP_CLK_CTRL", .addr = A_VERSAL2_DBG_TSTMP_CLK_CTRL, + .reset = 0x300, + .rsvd = 0xfdfc00f8, + },{ .name = "MMI_TOPSW_CLK_CTRL", .addr = A_VERSAL2_MMI_TOPSW_CLK_CTRL, + .reset = 0x2000300, + .rsvd = 0xfdfc00f8, + },{ .name = "WWDT_PLL_CLK_CTRL", .addr = A_VERSAL2_WWDT_PLL_CLK_CTRL, + .reset = 0xc00, + .rsvd = 0xfffc00f8, + },{ .name = "RCLK_CTRL", .addr = A_VERSAL2_RCLK_CTRL, + .rsvd = 0xc040, + },{ .name = "RST_RPU_A", .addr = A_VERSAL2_RST_RPU_A, + .reset = 0x10303, + .rsvd = 0xfffefcfc, + .pre_write = crl_rst_cpu_prew, + },{ .name = "RST_RPU_B", .addr = A_VERSAL2_RST_RPU_B, + .reset = 0x10303, + .rsvd = 0xfffefcfc, + .pre_write = crl_rst_cpu_prew, + },{ .name = "RST_RPU_C", .addr = A_VERSAL2_RST_RPU_C, + .reset = 0x10303, + .rsvd = 0xfffefcfc, + .pre_write = crl_rst_cpu_prew, + },{ .name = "RST_RPU_D", .addr = A_VERSAL2_RST_RPU_D, + .reset = 0x10303, + .rsvd = 0xfffefcfc, + .pre_write = crl_rst_cpu_prew, + },{ .name = "RST_RPU_E", .addr = A_VERSAL2_RST_RPU_E, + .reset = 0x10303, + .rsvd = 0xfffefcfc, + .pre_write = crl_rst_cpu_prew, + },{ .name = "RST_RPU_GD_0", .addr = A_VERSAL2_RST_RPU_GD_0, + .reset = 0x3, + },{ .name = "RST_RPU_GD_1", .addr = A_VERSAL2_RST_RPU_GD_1, + .reset = 0x3, + },{ .name = "RST_ASU_GD", .addr = A_VERSAL2_RST_ASU_GD, + .reset = 0x3, + },{ .name = "RST_ADMA", .addr = A_VERSAL2_RST_ADMA, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_SDMA", .addr = A_VERSAL2_RST_SDMA, + .pre_write = crl_rst_dev_prew, + .reset = 0x1, + },{ .name = "RST_GEM0", .addr = A_VERSAL2_RST_GEM0, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_GEM1", .addr = A_VERSAL2_RST_GEM1, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_USB0", .addr = A_VERSAL2_RST_USB0, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_USB1", .addr = A_VERSAL2_RST_USB1, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_UART0", .addr = A_VERSAL2_RST_UART0, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_UART1", .addr = A_VERSAL2_RST_UART1, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_SPI0", .addr = A_VERSAL2_RST_SPI0, + .reset = 0x1, + },{ .name = "RST_SPI1", .addr = A_VERSAL2_RST_SPI1, + .reset = 0x1, + },{ .name = "RST_CAN0", .addr = A_VERSAL2_RST_CAN0, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_CAN1", .addr = A_VERSAL2_RST_CAN1, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_CAN2", .addr = A_VERSAL2_RST_CAN2, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_CAN3", .addr = A_VERSAL2_RST_CAN3, + .reset = 0x1, + .pre_write = crl_rst_dev_prew, + },{ .name = "RST_I3C0", .addr = A_VERSAL2_RST_I3C0, + .reset = 0x1, + },{ .name = "RST_I3C1", .addr = A_VERSAL2_RST_I3C1, + .reset = 0x1, + },{ .name = "RST_I3C2", .addr = A_VERSAL2_RST_I3C2, + .reset = 0x1, + },{ .name = "RST_I3C3", .addr = A_VERSAL2_RST_I3C3, + .reset = 0x1, + },{ .name = "RST_I3C4", .addr = A_VERSAL2_RST_I3C4, + .reset = 0x1, + },{ .name = "RST_I3C5", .addr = A_VERSAL2_RST_I3C5, + .reset = 0x1, + },{ .name = "RST_I3C6", .addr = A_VERSAL2_RST_I3C6, + .reset = 0x1, + },{ .name = "RST_I3C7", .addr = A_VERSAL2_RST_I3C7, + .reset = 0x1, + },{ .name = "RST_DBG_LPX", .addr = A_VERSAL2_RST_DBG_LPX, + .reset = 0x3, + .rsvd = 0xfc, + },{ .name = "RST_GPIO", .addr = A_VERSAL2_RST_GPIO, + .reset = 0x1, + },{ .name = "RST_TTC", .addr = A_VERSAL2_RST_TTC, + .reset = 0xff, + },{ .name = "RST_TIMESTAMP", .addr = A_VERSAL2_RST_TIMESTAMP, + .reset = 0x1, + },{ .name = "RST_SWDT0", .addr = A_VERSAL2_RST_SWDT0, + .reset = 0x1, + },{ .name = "RST_SWDT1", .addr = A_VERSAL2_RST_SWDT1, + .reset = 0x1, + },{ .name = "RST_SWDT2", .addr = A_VERSAL2_RST_SWDT2, + .reset = 0x1, + },{ .name = "RST_SWDT3", .addr = A_VERSAL2_RST_SWDT3, + .reset = 0x1, + },{ .name = "RST_SWDT4", .addr = A_VERSAL2_RST_SWDT4, + .reset = 0x1, + },{ .name = "RST_IPI", .addr = A_VERSAL2_RST_IPI, + },{ .name = "RST_SYSMON", .addr = A_VERSAL2_RST_SYSMON, + },{ .name = "ASU_MB_RST_MODE", .addr = A_VERSAL2_ASU_MB_RST_MODE, + .reset = 0x1, + .rsvd = 0xf8, + },{ .name = "FPX_TOPSW_MUX_CTRL", .addr = A_VERSAL2_FPX_TOPSW_MUX_CTRL, + .reset = 0x1, + },{ .name = "RST_FPX", .addr = A_VERSAL2_RST_FPX, + .reset = 0x3, + },{ .name = "RST_MMI", .addr = A_VERSAL2_RST_MMI, + .reset = 0x1, + },{ .name = "RST_OCM", .addr = A_VERSAL2_RST_OCM, + } +}; + +static void versal_crl_reset_enter(Object *obj, ResetType type) { XlnxVersalCRL *s = XLNX_VERSAL_CRL(obj); unsigned int i; @@ -311,7 +602,17 @@ static void crl_reset_enter(Object *obj, ResetType type) } } -static void crl_reset_hold(Object *obj, ResetType type) +static void versal2_crl_reset_enter(Object *obj, ResetType type) +{ + XlnxVersal2CRL *s = XLNX_VERSAL2_CRL(obj); + size_t i; + + for (i = 0; i < VERSAL2_CRL_R_MAX; ++i) { + register_reset(&s->regs_info[i]); + } +} + +static void versal_crl_reset_hold(Object *obj, ResetType type) { XlnxVersalCRL *s = XLNX_VERSAL_CRL(obj); @@ -328,25 +629,27 @@ static const MemoryRegionOps crl_ops = { }, }; -static void crl_init(Object *obj) +static void versal_crl_init(Object *obj) { XlnxVersalCRL *s = XLNX_VERSAL_CRL(obj); + XlnxVersalCRLBase *xvcb = XLNX_VERSAL_CRL_BASE(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; int i; - s->reg_array = - register_init_block32(DEVICE(obj), crl_regs_info, - ARRAY_SIZE(crl_regs_info), - s->regs_info, s->regs, - &crl_ops, - XLNX_VERSAL_CRL_ERR_DEBUG, - CRL_R_MAX * 4); - sysbus_init_mmio(sbd, &s->reg_array->mem); + reg_array = register_init_block32(DEVICE(obj), crl_regs_info, + ARRAY_SIZE(crl_regs_info), + s->regs_info, s->regs, + &crl_ops, + XLNX_VERSAL_CRL_ERR_DEBUG, + CRL_R_MAX * 4); + xvcb->regs = s->regs; + sysbus_init_mmio(sbd, ®_array->mem); sysbus_init_irq(sbd, &s->irq); - for (i = 0; i < ARRAY_SIZE(s->cfg.cpu_r5); ++i) { - object_property_add_link(obj, "cpu_r5[*]", TYPE_ARM_CPU, - (Object **)&s->cfg.cpu_r5[i], + for (i = 0; i < ARRAY_SIZE(s->cfg.rpu); ++i) { + object_property_add_link(obj, "rpu[*]", TYPE_ARM_CPU, + (Object **)&s->cfg.rpu[i], qdev_prop_allow_set_link_before_realize, OBJ_PROP_LINK_STRONG); } @@ -372,19 +675,83 @@ static void crl_init(Object *obj) OBJ_PROP_LINK_STRONG); } - object_property_add_link(obj, "usb", TYPE_DEVICE, - (Object **)&s->cfg.gem[i], - qdev_prop_allow_set_link_before_realize, - OBJ_PROP_LINK_STRONG); + for (i = 0; i < ARRAY_SIZE(s->cfg.usb); ++i) { + object_property_add_link(obj, "usb[*]", TYPE_DEVICE, + (Object **)&s->cfg.usb[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } } -static void crl_finalize(Object *obj) +static void versal2_crl_init(Object *obj) { - XlnxVersalCRL *s = XLNX_VERSAL_CRL(obj); - register_finalize_block(s->reg_array); + XlnxVersal2CRL *s = XLNX_VERSAL2_CRL(obj); + XlnxVersalCRLBase *xvcb = XLNX_VERSAL_CRL_BASE(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; + size_t i; + + reg_array = register_init_block32(DEVICE(obj), versal2_crl_regs_info, + ARRAY_SIZE(versal2_crl_regs_info), + s->regs_info, s->regs, + &crl_ops, + XLNX_VERSAL_CRL_ERR_DEBUG, + VERSAL2_CRL_R_MAX * 4); + xvcb->regs = s->regs; + + sysbus_init_mmio(sbd, ®_array->mem); + + for (i = 0; i < ARRAY_SIZE(s->cfg.rpu); ++i) { + object_property_add_link(obj, "rpu[*]", TYPE_ARM_CPU, + (Object **)&s->cfg.rpu[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + for (i = 0; i < ARRAY_SIZE(s->cfg.adma); ++i) { + object_property_add_link(obj, "adma[*]", TYPE_DEVICE, + (Object **)&s->cfg.adma[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + for (i = 0; i < ARRAY_SIZE(s->cfg.sdma); ++i) { + object_property_add_link(obj, "sdma[*]", TYPE_DEVICE, + (Object **)&s->cfg.sdma[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + for (i = 0; i < ARRAY_SIZE(s->cfg.uart); ++i) { + object_property_add_link(obj, "uart[*]", TYPE_DEVICE, + (Object **)&s->cfg.uart[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + for (i = 0; i < ARRAY_SIZE(s->cfg.gem); ++i) { + object_property_add_link(obj, "gem[*]", TYPE_DEVICE, + (Object **)&s->cfg.gem[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + for (i = 0; i < ARRAY_SIZE(s->cfg.usb); ++i) { + object_property_add_link(obj, "usb[*]", TYPE_DEVICE, + (Object **)&s->cfg.usb[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } + + for (i = 0; i < ARRAY_SIZE(s->cfg.can); ++i) { + object_property_add_link(obj, "can[*]", TYPE_DEVICE, + (Object **)&s->cfg.can[i], + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_STRONG); + } } -static const VMStateDescription vmstate_crl = { +static const VMStateDescription vmstate_versal_crl = { .name = TYPE_XLNX_VERSAL_CRL, .version_id = 1, .minimum_version_id = 1, @@ -394,29 +761,68 @@ static const VMStateDescription vmstate_crl = { } }; -static void crl_class_init(ObjectClass *klass, const void *data) +static const VMStateDescription vmstate_versal2_crl = { + .name = TYPE_XLNX_VERSAL2_CRL, + .version_id = 1, + .minimum_version_id = 1, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_ARRAY(regs, XlnxVersal2CRL, VERSAL2_CRL_R_MAX), + VMSTATE_END_OF_LIST(), + } +}; + +static void versal_crl_class_init(ObjectClass *klass, const void *data) { - ResettableClass *rc = RESETTABLE_CLASS(klass); DeviceClass *dc = DEVICE_CLASS(klass); + XlnxVersalCRLBaseClass *xvcc = XLNX_VERSAL_CRL_BASE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); + + dc->vmsd = &vmstate_versal_crl; + rc->phases.enter = versal_crl_reset_enter; + rc->phases.hold = versal_crl_reset_hold; + xvcc->decode_periph_rst = versal_decode_periph_rst; +} - dc->vmsd = &vmstate_crl; +static void versal2_crl_class_init(ObjectClass *klass, const void *data) +{ + XlnxVersalCRLBaseClass *xvcc = XLNX_VERSAL_CRL_BASE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + ResettableClass *rc = RESETTABLE_CLASS(klass); - rc->phases.enter = crl_reset_enter; - rc->phases.hold = crl_reset_hold; + dc->vmsd = &vmstate_versal2_crl; + rc->phases.enter = versal2_crl_reset_enter; + xvcc->decode_periph_rst = versal2_decode_periph_rst; } -static const TypeInfo crl_info = { - .name = TYPE_XLNX_VERSAL_CRL, +static const TypeInfo crl_base_info = { + .name = TYPE_XLNX_VERSAL_CRL_BASE, .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(XlnxVersalCRLBase), + .class_size = sizeof(XlnxVersalCRLBaseClass), + .abstract = true, +}; + +static const TypeInfo versal_crl_info = { + .name = TYPE_XLNX_VERSAL_CRL, + .parent = TYPE_XLNX_VERSAL_CRL_BASE, .instance_size = sizeof(XlnxVersalCRL), - .class_init = crl_class_init, - .instance_init = crl_init, - .instance_finalize = crl_finalize, + .instance_init = versal_crl_init, + .class_init = versal_crl_class_init, +}; + +static const TypeInfo versal2_crl_info = { + .name = TYPE_XLNX_VERSAL2_CRL, + .parent = TYPE_XLNX_VERSAL_CRL_BASE, + .instance_size = sizeof(XlnxVersal2CRL), + .instance_init = versal2_crl_init, + .class_init = versal2_crl_class_init, }; static void crl_register_types(void) { - type_register_static(&crl_info); + type_register_static(&crl_base_info); + type_register_static(&versal_crl_info); + type_register_static(&versal2_crl_info); } type_init(crl_register_types) diff --git a/hw/misc/xlnx-versal-trng.c b/hw/misc/xlnx-versal-trng.c index f34dd3ef35257..2b573a45bdb26 100644 --- a/hw/misc/xlnx-versal-trng.c +++ b/hw/misc/xlnx-versal-trng.c @@ -627,7 +627,6 @@ static void trng_finalize(Object *obj) { XlnxVersalTRng *s = XLNX_VERSAL_TRNG(obj); - register_finalize_block(s->reg_array); g_rand_free(s->prng); s->prng = NULL; } diff --git a/hw/misc/xlnx-versal-xramc.c b/hw/misc/xlnx-versal-xramc.c index 07370b80c0d91..d90f3e87c74ca 100644 --- a/hw/misc/xlnx-versal-xramc.c +++ b/hw/misc/xlnx-versal-xramc.c @@ -190,24 +190,19 @@ static void xram_ctrl_init(Object *obj) { XlnxXramCtrl *s = XLNX_XRAM_CTRL(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; - s->reg_array = + reg_array = register_init_block32(DEVICE(obj), xram_ctrl_regs_info, ARRAY_SIZE(xram_ctrl_regs_info), s->regs_info, s->regs, &xram_ctrl_ops, XLNX_XRAM_CTRL_ERR_DEBUG, XRAM_CTRL_R_MAX * 4); - sysbus_init_mmio(sbd, &s->reg_array->mem); + sysbus_init_mmio(sbd, ®_array->mem); sysbus_init_irq(sbd, &s->irq); } -static void xram_ctrl_finalize(Object *obj) -{ - XlnxXramCtrl *s = XLNX_XRAM_CTRL(obj); - register_finalize_block(s->reg_array); -} - static const VMStateDescription vmstate_xram_ctrl = { .name = TYPE_XLNX_XRAM_CTRL, .version_id = 1, @@ -241,7 +236,6 @@ static const TypeInfo xram_ctrl_info = { .instance_size = sizeof(XlnxXramCtrl), .class_init = xram_ctrl_class_init, .instance_init = xram_ctrl_init, - .instance_finalize = xram_ctrl_finalize, }; static void xram_ctrl_register_types(void) diff --git a/hw/misc/xlnx-zynqmp-apu-ctrl.c b/hw/misc/xlnx-zynqmp-apu-ctrl.c index e85da32d99c22..08777496d5624 100644 --- a/hw/misc/xlnx-zynqmp-apu-ctrl.c +++ b/hw/misc/xlnx-zynqmp-apu-ctrl.c @@ -179,16 +179,17 @@ static void zynqmp_apu_handle_wfi(void *opaque, int irq, int level) static void zynqmp_apu_init(Object *obj) { XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(obj); + RegisterInfoArray *reg_array; int i; - s->reg_array = + reg_array = register_init_block32(DEVICE(obj), zynqmp_apu_regs_info, ARRAY_SIZE(zynqmp_apu_regs_info), s->regs_info, s->regs, &zynqmp_apu_ops, XILINX_ZYNQMP_APU_ERR_DEBUG, APU_R_MAX * 4); - sysbus_init_mmio(SYS_BUS_DEVICE(obj), &s->reg_array->mem); + sysbus_init_mmio(SYS_BUS_DEVICE(obj), ®_array->mem); sysbus_init_irq(SYS_BUS_DEVICE(obj), &s->irq_imr); for (i = 0; i < APU_MAX_CPU; ++i) { @@ -208,12 +209,6 @@ static void zynqmp_apu_init(Object *obj) qdev_init_gpio_in_named(DEVICE(obj), zynqmp_apu_handle_wfi, "wfi_in", 4); } -static void zynqmp_apu_finalize(Object *obj) -{ - XlnxZynqMPAPUCtrl *s = XLNX_ZYNQMP_APU_CTRL(obj); - register_finalize_block(s->reg_array); -} - static const VMStateDescription vmstate_zynqmp_apu = { .name = TYPE_XLNX_ZYNQMP_APU_CTRL, .version_id = 1, @@ -241,7 +236,6 @@ static const TypeInfo zynqmp_apu_info = { .instance_size = sizeof(XlnxZynqMPAPUCtrl), .class_init = zynqmp_apu_class_init, .instance_init = zynqmp_apu_init, - .instance_finalize = zynqmp_apu_finalize, }; static void zynqmp_apu_register_types(void) diff --git a/hw/misc/xlnx-zynqmp-crf.c b/hw/misc/xlnx-zynqmp-crf.c index cccca0e814e22..d9c1bd50e4ff1 100644 --- a/hw/misc/xlnx-zynqmp-crf.c +++ b/hw/misc/xlnx-zynqmp-crf.c @@ -211,24 +211,19 @@ static void crf_init(Object *obj) { XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; - s->reg_array = + reg_array = register_init_block32(DEVICE(obj), crf_regs_info, ARRAY_SIZE(crf_regs_info), s->regs_info, s->regs, &crf_ops, XLNX_ZYNQMP_CRF_ERR_DEBUG, CRF_R_MAX * 4); - sysbus_init_mmio(sbd, &s->reg_array->mem); + sysbus_init_mmio(sbd, ®_array->mem); sysbus_init_irq(sbd, &s->irq_ir); } -static void crf_finalize(Object *obj) -{ - XlnxZynqMPCRF *s = XLNX_ZYNQMP_CRF(obj); - register_finalize_block(s->reg_array); -} - static const VMStateDescription vmstate_crf = { .name = TYPE_XLNX_ZYNQMP_CRF, .version_id = 1, @@ -255,7 +250,6 @@ static const TypeInfo crf_info = { .instance_size = sizeof(XlnxZynqMPCRF), .class_init = crf_class_init, .instance_init = crf_init, - .instance_finalize = crf_finalize, }; static void crf_register_types(void) diff --git a/hw/net/Kconfig b/hw/net/Kconfig index 7f80218d10ff5..2b513d689584b 100644 --- a/hw/net/Kconfig +++ b/hw/net/Kconfig @@ -82,9 +82,6 @@ config OPENCORES_ETH config XGMAC bool -config MIPSNET - bool - config ALLWINNER_EMAC bool diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 50025d5a6f2b3..44446666deb26 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -1756,6 +1756,7 @@ static void gem_realize(DeviceState *dev, Error **errp) sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq[i]); } + gem_init_register_masks(s); qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_gem_info, &s->conf, @@ -1776,7 +1777,6 @@ static void gem_init(Object *obj) DB_PRINT("\n"); - gem_init_register_masks(s); memory_region_init_io(&s->iomem, OBJECT(s), &gem_ops, s, "enet", sizeof(s->regs)); diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c index 5b6ba9df6c42f..6b08e977a1d79 100644 --- a/hw/net/can/can_sja1000.c +++ b/hw/net/can/can_sja1000.c @@ -842,7 +842,6 @@ ssize_t can_sja_receive(CanBusClientState *client, const qemu_can_frame *frames, s->status_pel |= 0x01; /* Set the Receive Buffer Status. DS-p23 */ s->interrupt_pel |= 0x01; s->status_pel &= ~(1 << 4); - s->status_pel |= (1 << 0); can_sja_update_pel_irq(s); } else { /* BasicCAN mode */ diff --git a/hw/net/can/xlnx-versal-canfd.c b/hw/net/can/xlnx-versal-canfd.c index 3eb111949f89f..5735639b85a68 100644 --- a/hw/net/can/xlnx-versal-canfd.c +++ b/hw/net/can/xlnx-versal-canfd.c @@ -35,12 +35,8 @@ #include "hw/irq.h" #include "hw/register.h" #include "qapi/error.h" -#include "qemu/bitops.h" #include "qemu/log.h" -#include "qemu/cutils.h" -#include "qemu/event_notifier.h" #include "hw/qdev-properties.h" -#include "qom/object_interfaces.h" #include "migration/vmstate.h" #include "hw/net/xlnx-versal-canfd.h" #include "trace.h" @@ -1414,44 +1410,20 @@ static uint64_t canfd_srr_pre_write(RegisterInfo *reg, uint64_t val64) return s->regs[R_SOFTWARE_RESET_REGISTER]; } -static uint64_t filter_mask(RegisterInfo *reg, uint64_t val64) +static void filter_reg_write(XlnxVersalCANFDState *s, hwaddr addr, + unsigned bank_idx, uint32_t val) { - XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); - uint32_t reg_idx = (reg->access->addr) / 4; - uint32_t val = val64; - uint32_t filter_offset = (reg_idx - R_AFMR_REGISTER) / 2; - - if (!(s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER] & - (1 << filter_offset))) { - s->regs[reg_idx] = val; - } else { - g_autofree char *path = object_get_canonical_path(OBJECT(s)); - - qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d not enabled\n", - path, filter_offset + 1); - } - - return s->regs[reg_idx]; -} - -static uint64_t filter_id(RegisterInfo *reg, uint64_t val64) -{ - XlnxVersalCANFDState *s = XILINX_CANFD(reg->opaque); - hwaddr reg_idx = (reg->access->addr) / 4; - uint32_t val = val64; - uint32_t filter_offset = (reg_idx - R_AFIR_REGISTER) / 2; + size_t reg_idx = addr / sizeof(uint32_t); - if (!(s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER] & - (1 << filter_offset))) { + if (!(s->regs[R_ACCEPTANCE_FILTER_CONTROL_REGISTER] & (1 << bank_idx))) { s->regs[reg_idx] = val; } else { g_autofree char *path = object_get_canonical_path(OBJECT(s)); - qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter %d not enabled\n", - path, filter_offset + 1); + qemu_log_mask(LOG_GUEST_ERROR, "%s: Acceptance filter register 0x%" + HWADDR_PRIx " changed while filter %u enabled\n", + path, addr, bank_idx + 1); } - - return s->regs[reg_idx]; } static uint64_t canfd_tx_fifo_status_prew(RegisterInfo *reg, uint64_t val64) @@ -1579,125 +1551,6 @@ static uint64_t canfd_write_check_prew(RegisterInfo *reg, uint64_t val64) return 0; } -static const RegisterAccessInfo canfd_tx_regs[] = { - { .name = "TB_ID_REGISTER", .addr = A_TB_ID_REGISTER, - },{ .name = "TB0_DLC_REGISTER", .addr = A_TB0_DLC_REGISTER, - },{ .name = "TB_DW0_REGISTER", .addr = A_TB_DW0_REGISTER, - },{ .name = "TB_DW1_REGISTER", .addr = A_TB_DW1_REGISTER, - },{ .name = "TB_DW2_REGISTER", .addr = A_TB_DW2_REGISTER, - },{ .name = "TB_DW3_REGISTER", .addr = A_TB_DW3_REGISTER, - },{ .name = "TB_DW4_REGISTER", .addr = A_TB_DW4_REGISTER, - },{ .name = "TB_DW5_REGISTER", .addr = A_TB_DW5_REGISTER, - },{ .name = "TB_DW6_REGISTER", .addr = A_TB_DW6_REGISTER, - },{ .name = "TB_DW7_REGISTER", .addr = A_TB_DW7_REGISTER, - },{ .name = "TB_DW8_REGISTER", .addr = A_TB_DW8_REGISTER, - },{ .name = "TB_DW9_REGISTER", .addr = A_TB_DW9_REGISTER, - },{ .name = "TB_DW10_REGISTER", .addr = A_TB_DW10_REGISTER, - },{ .name = "TB_DW11_REGISTER", .addr = A_TB_DW11_REGISTER, - },{ .name = "TB_DW12_REGISTER", .addr = A_TB_DW12_REGISTER, - },{ .name = "TB_DW13_REGISTER", .addr = A_TB_DW13_REGISTER, - },{ .name = "TB_DW14_REGISTER", .addr = A_TB_DW14_REGISTER, - },{ .name = "TB_DW15_REGISTER", .addr = A_TB_DW15_REGISTER, - } -}; - -static const RegisterAccessInfo canfd_rx0_regs[] = { - { .name = "RB_ID_REGISTER", .addr = A_RB_ID_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DLC_REGISTER", .addr = A_RB_DLC_REGISTER, - .ro = 0xfe1fffff, - },{ .name = "RB_DW0_REGISTER", .addr = A_RB_DW0_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW1_REGISTER", .addr = A_RB_DW1_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW2_REGISTER", .addr = A_RB_DW2_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW3_REGISTER", .addr = A_RB_DW3_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW4_REGISTER", .addr = A_RB_DW4_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW5_REGISTER", .addr = A_RB_DW5_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW6_REGISTER", .addr = A_RB_DW6_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW7_REGISTER", .addr = A_RB_DW7_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW8_REGISTER", .addr = A_RB_DW8_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW9_REGISTER", .addr = A_RB_DW9_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW10_REGISTER", .addr = A_RB_DW10_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW11_REGISTER", .addr = A_RB_DW11_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW12_REGISTER", .addr = A_RB_DW12_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW13_REGISTER", .addr = A_RB_DW13_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW14_REGISTER", .addr = A_RB_DW14_REGISTER, - .ro = 0xffffffff, - },{ .name = "RB_DW15_REGISTER", .addr = A_RB_DW15_REGISTER, - .ro = 0xffffffff, - } -}; - -static const RegisterAccessInfo canfd_rx1_regs[] = { - { .name = "RB_ID_REGISTER_1", .addr = A_RB_ID_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DLC_REGISTER_1", .addr = A_RB_DLC_REGISTER_1, - .ro = 0xfe1fffff, - },{ .name = "RB0_DW0_REGISTER_1", .addr = A_RB0_DW0_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW1_REGISTER_1", .addr = A_RB_DW1_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW2_REGISTER_1", .addr = A_RB_DW2_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW3_REGISTER_1", .addr = A_RB_DW3_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW4_REGISTER_1", .addr = A_RB_DW4_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW5_REGISTER_1", .addr = A_RB_DW5_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW6_REGISTER_1", .addr = A_RB_DW6_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW7_REGISTER_1", .addr = A_RB_DW7_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW8_REGISTER_1", .addr = A_RB_DW8_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW9_REGISTER_1", .addr = A_RB_DW9_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW10_REGISTER_1", .addr = A_RB_DW10_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW11_REGISTER_1", .addr = A_RB_DW11_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW12_REGISTER_1", .addr = A_RB_DW12_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW13_REGISTER_1", .addr = A_RB_DW13_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW14_REGISTER_1", .addr = A_RB_DW14_REGISTER_1, - .ro = 0xffffffff, - },{ .name = "RB_DW15_REGISTER_1", .addr = A_RB_DW15_REGISTER_1, - .ro = 0xffffffff, - } -}; - -/* Acceptance filter registers. */ -static const RegisterAccessInfo canfd_af_regs[] = { - { .name = "AFMR_REGISTER", .addr = A_AFMR_REGISTER, - .pre_write = filter_mask, - },{ .name = "AFIR_REGISTER", .addr = A_AFIR_REGISTER, - .pre_write = filter_id, - } -}; - -static const RegisterAccessInfo canfd_txe_regs[] = { - { .name = "TXE_FIFO_TB_ID_REGISTER", .addr = A_TXE_FIFO_TB_ID_REGISTER, - .ro = 0xffffffff, - },{ .name = "TXE_FIFO_TB_DLC_REGISTER", .addr = A_TXE_FIFO_TB_DLC_REGISTER, - .ro = 0xffffffff, - } -}; - static const RegisterAccessInfo canfd_regs_info[] = { { .name = "SOFTWARE_RESET_REGISTER", .addr = A_SOFTWARE_RESET_REGISTER, .pre_write = canfd_srr_pre_write, @@ -1767,7 +1620,143 @@ static void xlnx_versal_canfd_ptimer_cb(void *opaque) /* No action required on the timer rollover. */ } +static bool canfd_decode_reg_bank(XlnxVersalCANFDState *s, hwaddr addr, + hwaddr first_reg, hwaddr last_reg, + size_t num_banks, unsigned *idx, + hwaddr *offset) +{ + hwaddr base = addr - first_reg; + hwaddr span = last_reg - first_reg + sizeof(uint32_t); + unsigned index = base / span; + + if (index >= num_banks) { + return false; + } + if (idx) { + *idx = index; + } + + *offset = base % span; + *offset += first_reg; + + return true; +} + +/* + * Decode the given addr into a (idx, offset) pair: + * - idx is the bank index of the register at addr, + * - offset is the register offset relative to bank 0 + * + * @return true is the decoding succeded, false otherwise + */ +static bool canfd_decode_addr(XlnxVersalCANFDState *s, hwaddr addr, + unsigned *idx, hwaddr *offset) +{ + if (addr <= A_RX_FIFO_WATERMARK_REGISTER) { + /* from 0x0 to 0xec. Handled by the register API */ + g_assert_not_reached(); + } else if (addr < A_TB_ID_REGISTER) { + /* no register in this gap */ + return false; + } else if (addr < A_AFMR_REGISTER) { + /* TX registers */ + return canfd_decode_reg_bank(s, addr, + A_TB_ID_REGISTER, A_TB_DW15_REGISTER, + s->cfg.tx_fifo, idx, offset); + } else if (addr < A_TXE_FIFO_TB_ID_REGISTER) { + /* Filter registers */ + return canfd_decode_reg_bank(s, addr, + A_AFMR_REGISTER, A_AFIR_REGISTER, + 32, idx, offset); + } else if (addr < A_RB_ID_REGISTER) { + /* TX event registers */ + return canfd_decode_reg_bank(s, addr, + A_TXE_FIFO_TB_ID_REGISTER, + A_TXE_FIFO_TB_DLC_REGISTER, + 32, idx, offset); + } else if (addr < A_RB_ID_REGISTER_1) { + /* RX0 registers */ + return canfd_decode_reg_bank(s, addr, + A_RB_ID_REGISTER, + A_RB_DW15_REGISTER, + s->cfg.rx0_fifo, idx, offset); + } else if (addr <= A_RB_DW15_REGISTER_1) { + /* RX1 registers */ + return canfd_decode_reg_bank(s, addr, + A_RB_ID_REGISTER_1, + A_RB_DW15_REGISTER_1, + s->cfg.rx1_fifo, idx, offset); + } + + /* decode error */ + return false; +} + +static uint64_t canfd_read(void *opaque, hwaddr addr, unsigned size) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(opaque); + hwaddr reg_offset; + uint64_t ret; + + if (!canfd_decode_addr(s, addr, NULL, ®_offset)) { + qemu_log_mask(LOG_GUEST_ERROR, TYPE_XILINX_CANFD + ": read to unknown register at address 0x%" + HWADDR_PRIx "\n", addr); + return 0; + } + + switch (reg_offset) { + default: + ret = s->regs[addr / sizeof(uint32_t)]; + } + + return ret; +} + +static void canfd_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + XlnxVersalCANFDState *s = XILINX_CANFD(opaque); + unsigned bank_idx; + hwaddr reg_offset; + + if (!canfd_decode_addr(s, addr, &bank_idx, ®_offset)) { + qemu_log_mask(LOG_GUEST_ERROR, TYPE_XILINX_CANFD + ": write to unknown register at address 0x%" + HWADDR_PRIx "\n", addr); + return; + } + + if (addr >= A_TXE_FIFO_TB_ID_REGISTER) { + /* All registers from TX event regs to the end are read-only */ + qemu_log_mask(LOG_GUEST_ERROR, TYPE_XILINX_CANFD + ": write to read-only register at 0x%" HWADDR_PRIx "\n", + addr); + return; + } + + switch (reg_offset) { + case A_AFMR_REGISTER: + case A_AFIR_REGISTER: + filter_reg_write(s, addr, bank_idx, value); + break; + + default: + s->regs[addr / sizeof(uint32_t)] = value; + } +} + static const MemoryRegionOps canfd_ops = { + .read = canfd_read, + .write = canfd_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static const MemoryRegionOps canfd_regs_ops = { .read = register_read_memory, .write = register_write_memory, .endianness = DEVICE_LITTLE_ENDIAN, @@ -1856,142 +1845,16 @@ static int xlnx_canfd_connect_to_bus(XlnxVersalCANFDState *s, return can_bus_insert_client(bus, &s->bus_client); } -#define NUM_REG_PER_AF ARRAY_SIZE(canfd_af_regs) -#define NUM_AF 32 -#define NUM_REG_PER_TXE ARRAY_SIZE(canfd_txe_regs) -#define NUM_TXE 32 - -static int canfd_populate_regarray(XlnxVersalCANFDState *s, - RegisterInfoArray *r_array, int pos, - const RegisterAccessInfo *rae, - int num_rae) -{ - int i; - - for (i = 0; i < num_rae; i++) { - int index = rae[i].addr / 4; - RegisterInfo *r = &s->reg_info[index]; - - object_initialize(r, sizeof(*r), TYPE_REGISTER); - - *r = (RegisterInfo) { - .data = &s->regs[index], - .data_size = sizeof(uint32_t), - .access = &rae[i], - .opaque = OBJECT(s), - }; - - r_array->r[i + pos] = r; - } - return i + pos; -} - -static void canfd_create_rai(RegisterAccessInfo *rai_array, - const RegisterAccessInfo *canfd_regs, - int template_rai_array_sz, - int num_template_to_copy) -{ - int i; - int reg_num; - - for (reg_num = 0; reg_num < num_template_to_copy; reg_num++) { - int pos = reg_num * template_rai_array_sz; - - memcpy(rai_array + pos, canfd_regs, - template_rai_array_sz * sizeof(RegisterAccessInfo)); - - for (i = 0; i < template_rai_array_sz; i++) { - const char *name = canfd_regs[i].name; - uint64_t addr = canfd_regs[i].addr; - rai_array[i + pos].name = g_strdup_printf("%s%d", name, reg_num); - rai_array[i + pos].addr = addr + pos * 4; - } - } -} - -static RegisterInfoArray *canfd_create_regarray(XlnxVersalCANFDState *s) -{ - const char *device_prefix = object_get_typename(OBJECT(s)); - uint64_t memory_size = XLNX_VERSAL_CANFD_R_MAX * 4; - int num_regs; - int pos = 0; - RegisterInfoArray *r_array; - - num_regs = ARRAY_SIZE(canfd_regs_info) + - s->cfg.tx_fifo * NUM_REGS_PER_MSG_SPACE + - s->cfg.rx0_fifo * NUM_REGS_PER_MSG_SPACE + - NUM_AF * NUM_REG_PER_AF + - NUM_TXE * NUM_REG_PER_TXE; - - s->tx_regs = g_new0(RegisterAccessInfo, - s->cfg.tx_fifo * ARRAY_SIZE(canfd_tx_regs)); - - canfd_create_rai(s->tx_regs, canfd_tx_regs, - ARRAY_SIZE(canfd_tx_regs), s->cfg.tx_fifo); - - s->rx0_regs = g_new0(RegisterAccessInfo, - s->cfg.rx0_fifo * ARRAY_SIZE(canfd_rx0_regs)); - - canfd_create_rai(s->rx0_regs, canfd_rx0_regs, - ARRAY_SIZE(canfd_rx0_regs), s->cfg.rx0_fifo); - - s->af_regs = g_new0(RegisterAccessInfo, - NUM_AF * ARRAY_SIZE(canfd_af_regs)); - - canfd_create_rai(s->af_regs, canfd_af_regs, - ARRAY_SIZE(canfd_af_regs), NUM_AF); - - s->txe_regs = g_new0(RegisterAccessInfo, - NUM_TXE * ARRAY_SIZE(canfd_txe_regs)); - - canfd_create_rai(s->txe_regs, canfd_txe_regs, - ARRAY_SIZE(canfd_txe_regs), NUM_TXE); - - if (s->cfg.enable_rx_fifo1) { - num_regs += s->cfg.rx1_fifo * NUM_REGS_PER_MSG_SPACE; - - s->rx1_regs = g_new0(RegisterAccessInfo, - s->cfg.rx1_fifo * ARRAY_SIZE(canfd_rx1_regs)); - - canfd_create_rai(s->rx1_regs, canfd_rx1_regs, - ARRAY_SIZE(canfd_rx1_regs), s->cfg.rx1_fifo); - } - - r_array = g_new0(RegisterInfoArray, 1); - r_array->r = g_new0(RegisterInfo * , num_regs); - r_array->num_elements = num_regs; - r_array->prefix = device_prefix; - - pos = canfd_populate_regarray(s, r_array, pos, - canfd_regs_info, - ARRAY_SIZE(canfd_regs_info)); - pos = canfd_populate_regarray(s, r_array, pos, - s->tx_regs, s->cfg.tx_fifo * - NUM_REGS_PER_MSG_SPACE); - pos = canfd_populate_regarray(s, r_array, pos, - s->rx0_regs, s->cfg.rx0_fifo * - NUM_REGS_PER_MSG_SPACE); - if (s->cfg.enable_rx_fifo1) { - pos = canfd_populate_regarray(s, r_array, pos, - s->rx1_regs, s->cfg.rx1_fifo * - NUM_REGS_PER_MSG_SPACE); - } - pos = canfd_populate_regarray(s, r_array, pos, - s->af_regs, NUM_AF * NUM_REG_PER_AF); - pos = canfd_populate_regarray(s, r_array, pos, - s->txe_regs, NUM_TXE * NUM_REG_PER_TXE); - - memory_region_init_io(&r_array->mem, OBJECT(s), &canfd_ops, r_array, - device_prefix, memory_size); - return r_array; -} - static void canfd_realize(DeviceState *dev, Error **errp) { XlnxVersalCANFDState *s = XILINX_CANFD(dev); RegisterInfoArray *reg_array; - reg_array = canfd_create_regarray(s); + reg_array = register_init_block32(dev, canfd_regs_info, + ARRAY_SIZE(canfd_regs_info), s->reg_info, + s->regs, &canfd_regs_ops, false, + A_RX_FIFO_WATERMARK_REGISTER + + sizeof(uint32_t)); memory_region_add_subregion(&s->iomem, 0x00, ®_array->mem); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq_canfd_int); @@ -2024,8 +1887,8 @@ static void canfd_init(Object *obj) { XlnxVersalCANFDState *s = XILINX_CANFD(obj); - memory_region_init(&s->iomem, obj, TYPE_XILINX_CANFD, - XLNX_VERSAL_CANFD_R_MAX * 4); + memory_region_init_io(&s->iomem, obj, &canfd_ops, s, TYPE_XILINX_CANFD, + XLNX_VERSAL_CANFD_R_MAX * 4); } static const VMStateDescription vmstate_canfd = { diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index 24138587905b4..8fef598b4988c 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -341,11 +341,6 @@ e1000e_intmgr_collect_delayed_causes(E1000ECore *core) { uint32_t res; - if (msix_enabled(core->owner)) { - assert(core->delayed_causes == 0); - return 0; - } - res = core->delayed_causes; core->delayed_causes = 0; @@ -2827,8 +2822,9 @@ e1000e_update_rx_offloads(E1000ECore *core) trace_e1000e_rx_set_cso(cso_state); if (core->has_vnet) { - qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, - cso_state, 0, 0, 0, 0, 0, 0); + NetOffloads ol = { .csum = cso_state }; + + qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, &ol); } } diff --git a/hw/net/igb_core.c b/hw/net/igb_core.c index 39e3ce1c8febb..45d8fd795b844 100644 --- a/hw/net/igb_core.c +++ b/hw/net/igb_core.c @@ -3058,8 +3058,9 @@ igb_update_rx_offloads(IGBCore *core) trace_e1000e_rx_set_cso(cso_state); if (core->has_vnet) { - qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, - cso_state, 0, 0, 0, 0, 0, 0); + NetOffloads ol = {.csum = cso_state }; + + qemu_set_offload(qemu_get_queue(core->owner_nic)->peer, &ol); } } diff --git a/hw/net/igbvf.c b/hw/net/igbvf.c index 31d72c4977d9a..9b0db8f841105 100644 --- a/hw/net/igbvf.c +++ b/hw/net/igbvf.c @@ -251,10 +251,12 @@ static void igbvf_pci_realize(PCIDevice *dev, Error **errp) memory_region_init_io(&s->mmio, OBJECT(dev), &mmio_ops, s, "igbvf-mmio", IGBVF_MMIO_SIZE); - pcie_sriov_vf_register_bar(dev, IGBVF_MMIO_BAR_IDX, &s->mmio); + pci_register_bar(dev, IGBVF_MMIO_BAR_IDX, PCI_BASE_ADDRESS_MEM_TYPE_64 | + PCI_BASE_ADDRESS_MEM_PREFETCH, &s->mmio); memory_region_init(&s->msix, OBJECT(dev), "igbvf-msix", IGBVF_MSIX_SIZE); - pcie_sriov_vf_register_bar(dev, IGBVF_MSIX_BAR_IDX, &s->msix); + pci_register_bar(dev, IGBVF_MSIX_BAR_IDX, PCI_BASE_ADDRESS_MEM_TYPE_64 | + PCI_BASE_ADDRESS_MEM_PREFETCH, &s->msix); ret = msix_init(dev, IGBVF_MSIX_VEC_NUM, &s->msix, IGBVF_MSIX_BAR_IDX, 0, &s->msix, IGBVF_MSIX_BAR_IDX, 0x2000, 0x70, errp); diff --git a/hw/net/meson.build b/hw/net/meson.build index e6759e26ca6cc..913eaedbc5293 100644 --- a/hw/net/meson.build +++ b/hw/net/meson.build @@ -23,7 +23,6 @@ system_ss.add(when: 'CONFIG_LAN9118_PHY', if_true: files('lan9118_phy.c')) system_ss.add(when: 'CONFIG_NE2000_ISA', if_true: files('ne2000-isa.c')) system_ss.add(when: 'CONFIG_OPENCORES_ETH', if_true: files('opencores_eth.c')) system_ss.add(when: 'CONFIG_XGMAC', if_true: files('xgmac.c')) -system_ss.add(when: 'CONFIG_MIPSNET', if_true: files('mipsnet.c')) system_ss.add(when: 'CONFIG_XILINX_AXI', if_true: files('xilinx_axienet.c')) system_ss.add(when: 'CONFIG_ALLWINNER_EMAC', if_true: files('allwinner_emac.c')) system_ss.add(when: 'CONFIG_ALLWINNER_SUN8I_EMAC', if_true: files('allwinner-sun8i-emac.c')) diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c deleted file mode 100644 index 583aa1c7de6df..0000000000000 --- a/hw/net/mipsnet.c +++ /dev/null @@ -1,297 +0,0 @@ -#include "qemu/osdep.h" -#include "hw/irq.h" -#include "hw/qdev-properties.h" -#include "net/net.h" -#include "qemu/module.h" -#include "trace.h" -#include "hw/sysbus.h" -#include "migration/vmstate.h" -#include "qom/object.h" - -/* MIPSnet register offsets */ - -#define MIPSNET_DEV_ID 0x00 -#define MIPSNET_BUSY 0x08 -#define MIPSNET_RX_DATA_COUNT 0x0c -#define MIPSNET_TX_DATA_COUNT 0x10 -#define MIPSNET_INT_CTL 0x14 -# define MIPSNET_INTCTL_TXDONE 0x00000001 -# define MIPSNET_INTCTL_RXDONE 0x00000002 -# define MIPSNET_INTCTL_TESTBIT 0x80000000 -#define MIPSNET_INTERRUPT_INFO 0x18 -#define MIPSNET_RX_DATA_BUFFER 0x1c -#define MIPSNET_TX_DATA_BUFFER 0x20 - -#define MAX_ETH_FRAME_SIZE 1514 - -#define TYPE_MIPS_NET "mipsnet" -OBJECT_DECLARE_SIMPLE_TYPE(MIPSnetState, MIPS_NET) - -struct MIPSnetState { - SysBusDevice parent_obj; - - uint32_t busy; - uint32_t rx_count; - uint32_t rx_read; - uint32_t tx_count; - uint32_t tx_written; - uint32_t intctl; - uint8_t rx_buffer[MAX_ETH_FRAME_SIZE]; - uint8_t tx_buffer[MAX_ETH_FRAME_SIZE]; - MemoryRegion io; - qemu_irq irq; - NICState *nic; - NICConf conf; -}; - -static void mipsnet_reset(MIPSnetState *s) -{ - s->busy = 1; - s->rx_count = 0; - s->rx_read = 0; - s->tx_count = 0; - s->tx_written = 0; - s->intctl = 0; - memset(s->rx_buffer, 0, MAX_ETH_FRAME_SIZE); - memset(s->tx_buffer, 0, MAX_ETH_FRAME_SIZE); -} - -static void mipsnet_update_irq(MIPSnetState *s) -{ - int isr = !!s->intctl; - trace_mipsnet_irq(isr, s->intctl); - qemu_set_irq(s->irq, isr); -} - -static int mipsnet_buffer_full(MIPSnetState *s) -{ - if (s->rx_count >= MAX_ETH_FRAME_SIZE) { - return 1; - } - return 0; -} - -static int mipsnet_can_receive(NetClientState *nc) -{ - MIPSnetState *s = qemu_get_nic_opaque(nc); - - if (s->busy) { - return 0; - } - return !mipsnet_buffer_full(s); -} - -static ssize_t mipsnet_receive(NetClientState *nc, - const uint8_t *buf, size_t size) -{ - MIPSnetState *s = qemu_get_nic_opaque(nc); - - trace_mipsnet_receive(size); - if (!mipsnet_can_receive(nc)) { - return 0; - } - - if (size >= sizeof(s->rx_buffer)) { - return 0; - } - s->busy = 1; - - /* Just accept everything. */ - - /* Write packet data. */ - memcpy(s->rx_buffer, buf, size); - - s->rx_count = size; - s->rx_read = 0; - - /* Now we can signal we have received something. */ - s->intctl |= MIPSNET_INTCTL_RXDONE; - mipsnet_update_irq(s); - - return size; -} - -static uint64_t mipsnet_ioport_read(void *opaque, hwaddr addr, - unsigned int size) -{ - MIPSnetState *s = opaque; - int ret = 0; - - addr &= 0x3f; - switch (addr) { - case MIPSNET_DEV_ID: - ret = be32_to_cpu(0x4d495053); /* MIPS */ - break; - case MIPSNET_DEV_ID + 4: - ret = be32_to_cpu(0x4e455430); /* NET0 */ - break; - case MIPSNET_BUSY: - ret = s->busy; - break; - case MIPSNET_RX_DATA_COUNT: - ret = s->rx_count; - break; - case MIPSNET_TX_DATA_COUNT: - ret = s->tx_count; - break; - case MIPSNET_INT_CTL: - ret = s->intctl; - s->intctl &= ~MIPSNET_INTCTL_TESTBIT; - break; - case MIPSNET_INTERRUPT_INFO: - /* XXX: This seems to be a per-VPE interrupt number. */ - ret = 0; - break; - case MIPSNET_RX_DATA_BUFFER: - if (s->rx_count) { - s->rx_count--; - ret = s->rx_buffer[s->rx_read++]; - if (mipsnet_can_receive(s->nic->ncs)) { - qemu_flush_queued_packets(qemu_get_queue(s->nic)); - } - } - break; - /* Reads as zero. */ - case MIPSNET_TX_DATA_BUFFER: - default: - break; - } - trace_mipsnet_read(addr, ret); - return ret; -} - -static void mipsnet_ioport_write(void *opaque, hwaddr addr, - uint64_t val, unsigned int size) -{ - MIPSnetState *s = opaque; - - addr &= 0x3f; - trace_mipsnet_write(addr, val); - switch (addr) { - case MIPSNET_TX_DATA_COUNT: - s->tx_count = (val <= MAX_ETH_FRAME_SIZE) ? val : 0; - s->tx_written = 0; - break; - case MIPSNET_INT_CTL: - if (val & MIPSNET_INTCTL_TXDONE) { - s->intctl &= ~MIPSNET_INTCTL_TXDONE; - } else if (val & MIPSNET_INTCTL_RXDONE) { - s->intctl &= ~MIPSNET_INTCTL_RXDONE; - } else if (val & MIPSNET_INTCTL_TESTBIT) { - mipsnet_reset(s); - s->intctl |= MIPSNET_INTCTL_TESTBIT; - } else if (!val) { - /* ACK testbit interrupt, flag was cleared on read. */ - } - s->busy = !!s->intctl; - mipsnet_update_irq(s); - if (mipsnet_can_receive(s->nic->ncs)) { - qemu_flush_queued_packets(qemu_get_queue(s->nic)); - } - break; - case MIPSNET_TX_DATA_BUFFER: - s->tx_buffer[s->tx_written++] = val; - if ((s->tx_written >= MAX_ETH_FRAME_SIZE) - || (s->tx_written == s->tx_count)) { - /* Send buffer. */ - trace_mipsnet_send(s->tx_written); - qemu_send_packet(qemu_get_queue(s->nic), - s->tx_buffer, s->tx_written); - s->tx_count = s->tx_written = 0; - s->intctl |= MIPSNET_INTCTL_TXDONE; - s->busy = 1; - mipsnet_update_irq(s); - } - break; - /* Read-only registers */ - case MIPSNET_DEV_ID: - case MIPSNET_BUSY: - case MIPSNET_RX_DATA_COUNT: - case MIPSNET_INTERRUPT_INFO: - case MIPSNET_RX_DATA_BUFFER: - default: - break; - } -} - -static const VMStateDescription vmstate_mipsnet = { - .name = "mipsnet", - .version_id = 0, - .minimum_version_id = 0, - .fields = (const VMStateField[]) { - VMSTATE_UINT32(busy, MIPSnetState), - VMSTATE_UINT32(rx_count, MIPSnetState), - VMSTATE_UINT32(rx_read, MIPSnetState), - VMSTATE_UINT32(tx_count, MIPSnetState), - VMSTATE_UINT32(tx_written, MIPSnetState), - VMSTATE_UINT32(intctl, MIPSnetState), - VMSTATE_BUFFER(rx_buffer, MIPSnetState), - VMSTATE_BUFFER(tx_buffer, MIPSnetState), - VMSTATE_END_OF_LIST() - } -}; - -static NetClientInfo net_mipsnet_info = { - .type = NET_CLIENT_DRIVER_NIC, - .size = sizeof(NICState), - .receive = mipsnet_receive, -}; - -static const MemoryRegionOps mipsnet_ioport_ops = { - .read = mipsnet_ioport_read, - .write = mipsnet_ioport_write, - .impl.min_access_size = 1, - .impl.max_access_size = 4, -}; - -static void mipsnet_realize(DeviceState *dev, Error **errp) -{ - SysBusDevice *sbd = SYS_BUS_DEVICE(dev); - MIPSnetState *s = MIPS_NET(dev); - - memory_region_init_io(&s->io, OBJECT(dev), &mipsnet_ioport_ops, s, - "mipsnet-io", 36); - sysbus_init_mmio(sbd, &s->io); - sysbus_init_irq(sbd, &s->irq); - - s->nic = qemu_new_nic(&net_mipsnet_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, - &dev->mem_reentrancy_guard, s); - qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); -} - -static void mipsnet_sysbus_reset(DeviceState *dev) -{ - MIPSnetState *s = MIPS_NET(dev); - mipsnet_reset(s); -} - -static const Property mipsnet_properties[] = { - DEFINE_NIC_PROPERTIES(MIPSnetState, conf), -}; - -static void mipsnet_class_init(ObjectClass *klass, const void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - - dc->realize = mipsnet_realize; - set_bit(DEVICE_CATEGORY_NETWORK, dc->categories); - dc->desc = "MIPS Simulator network device"; - device_class_set_legacy_reset(dc, mipsnet_sysbus_reset); - dc->vmsd = &vmstate_mipsnet; - device_class_set_props(dc, mipsnet_properties); -} - -static const TypeInfo mipsnet_info = { - .name = TYPE_MIPS_NET, - .parent = TYPE_SYS_BUS_DEVICE, - .instance_size = sizeof(MIPSnetState), - .class_init = mipsnet_class_init, -}; - -static void mipsnet_register_types(void) -{ - type_register_static(&mipsnet_info); -} - -type_init(mipsnet_register_types) diff --git a/hw/net/npcm_gmac.c b/hw/net/npcm_gmac.c index a43411258090c..5e32cd3edf3fa 100644 --- a/hw/net/npcm_gmac.c +++ b/hw/net/npcm_gmac.c @@ -516,8 +516,6 @@ static void gmac_try_send_next_packet(NPCMGMACState *gmac) uint32_t desc_addr; struct NPCMGMACTxDesc tx_desc; uint32_t tx_buf_addr, tx_buf_len; - uint16_t length = 0; - uint8_t *buf = tx_send_buffer; uint32_t prev_buf_size = 0; int csum = 0; @@ -568,22 +566,20 @@ static void gmac_try_send_next_packet(NPCMGMACState *gmac) tx_buf_addr = tx_desc.tdes2; gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; tx_buf_len = TX_DESC_TDES1_BFFR1_SZ_MASK(tx_desc.tdes1); - buf = &tx_send_buffer[prev_buf_size]; - if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { + if ((prev_buf_size + tx_buf_len) > tx_buffer_size) { tx_buffer_size = prev_buf_size + tx_buf_len; tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); - buf = &tx_send_buffer[prev_buf_size]; } /* step 5 */ - if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, + if (dma_memory_read(&address_space_memory, tx_buf_addr, + tx_send_buffer + prev_buf_size, tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", __func__, tx_buf_addr); return; } - length += tx_buf_len; prev_buf_size += tx_buf_len; /* If not chained we'll have a second buffer. */ @@ -591,30 +587,32 @@ static void gmac_try_send_next_packet(NPCMGMACState *gmac) tx_buf_addr = tx_desc.tdes3; gmac->regs[R_NPCM_DMA_CUR_TX_BUF_ADDR] = tx_buf_addr; tx_buf_len = TX_DESC_TDES1_BFFR2_SZ_MASK(tx_desc.tdes1); - buf = &tx_send_buffer[prev_buf_size]; - if ((prev_buf_size + tx_buf_len) > sizeof(buf)) { + if ((prev_buf_size + tx_buf_len) > tx_buffer_size) { tx_buffer_size = prev_buf_size + tx_buf_len; tx_send_buffer = g_realloc(tx_send_buffer, tx_buffer_size); - buf = &tx_send_buffer[prev_buf_size]; } - if (dma_memory_read(&address_space_memory, tx_buf_addr, buf, + if (dma_memory_read(&address_space_memory, tx_buf_addr, + tx_send_buffer + prev_buf_size, tx_buf_len, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", __func__, tx_buf_addr); return; } - length += tx_buf_len; prev_buf_size += tx_buf_len; } if (tx_desc.tdes1 & TX_DESC_TDES1_LAST_SEG_MASK) { + /* + * This will truncate the packet at 64K. + * TODO: find out if this is the correct behaviour. + */ + uint16_t length = prev_buf_size; net_checksum_calculate(tx_send_buffer, length, csum); qemu_send_packet(qemu_get_queue(gmac->nic), tx_send_buffer, length); trace_npcm_gmac_packet_sent(DEVICE(gmac)->canonical_path, length); - buf = tx_send_buffer; - length = 0; + prev_buf_size = 0; } /* step 6 */ diff --git a/hw/net/trace-events b/hw/net/trace-events index 72b69c4a8bb89..e82d7490c33b6 100644 --- a/hw/net/trace-events +++ b/hw/net/trace-events @@ -20,13 +20,6 @@ lan9118_phy_reset(void) "" lance_mem_readw(uint64_t addr, uint32_t ret) "addr=0x%"PRIx64"val=0x%04x" lance_mem_writew(uint64_t addr, uint32_t val) "addr=0x%"PRIx64"val=0x%04x" -# mipsnet.c -mipsnet_send(uint32_t size) "sending len=%u" -mipsnet_receive(uint32_t size) "receiving len=%u" -mipsnet_read(uint64_t addr, uint32_t val) "read addr=0x%" PRIx64 " val=0x%x" -mipsnet_write(uint64_t addr, uint64_t val) "write addr=0x%" PRIx64 " val=0x%" PRIx64 -mipsnet_irq(uint32_t isr, uint32_t intctl) "set irq to %d (0x%02x)" - # ne2000.c ne2000_read(uint64_t addr, uint64_t val) "read addr=0x%" PRIx64 " val=0x%" PRIx64 ne2000_write(uint64_t addr, uint64_t val) "write addr=0x%" PRIx64 " val=0x%" PRIx64 diff --git a/hw/net/vhost_net-stub.c b/hw/net/vhost_net-stub.c index 7d49f82906a9b..0740d5a2ebe6a 100644 --- a/hw/net/vhost_net-stub.c +++ b/hw/net/vhost_net-stub.c @@ -46,9 +46,8 @@ void vhost_net_cleanup(struct vhost_net *net) { } -uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features) +void vhost_net_get_features_ex(struct vhost_net *net, uint64_t *features) { - return features; } int vhost_net_get_config(struct vhost_net *net, uint8_t *config, @@ -62,13 +61,12 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data, return 0; } -void vhost_net_ack_features(struct vhost_net *net, uint64_t features) +void vhost_net_ack_features_ex(struct vhost_net *net, const uint64_t *features) { } -uint64_t vhost_net_get_acked_features(VHostNetState *net) +void vhost_net_get_acked_features_ex(VHostNetState *net, uint64_t *features) { - return 0; } bool vhost_net_virtqueue_pending(VHostNetState *net, int idx) diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index 540492b37ddca..a8ee18a912661 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -35,10 +35,9 @@ #include "hw/virtio/virtio-bus.h" #include "linux-headers/linux/vhost.h" -uint64_t vhost_net_get_features(struct vhost_net *net, uint64_t features) +void vhost_net_get_features_ex(struct vhost_net *net, uint64_t *features) { - return vhost_get_features(&net->dev, net->feature_bits, - features); + vhost_get_features_ex(&net->dev, net->feature_bits, features); } int vhost_net_get_config(struct vhost_net *net, uint8_t *config, uint32_t config_len) @@ -51,10 +50,11 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data, return vhost_dev_set_config(&net->dev, data, offset, size, flags); } -void vhost_net_ack_features(struct vhost_net *net, uint64_t features) +void vhost_net_ack_features_ex(struct vhost_net *net, const uint64_t *features) { - net->dev.acked_features = net->dev.backend_features; - vhost_ack_features(&net->dev, net->feature_bits, features); + virtio_features_copy(net->dev.acked_features_ex, + net->dev.backend_features_ex); + vhost_ack_features_ex(&net->dev, net->feature_bits, features); } uint64_t vhost_net_get_max_queues(VHostNetState *net) @@ -62,9 +62,9 @@ uint64_t vhost_net_get_max_queues(VHostNetState *net) return net->dev.max_queues; } -uint64_t vhost_net_get_acked_features(VHostNetState *net) +void vhost_net_get_acked_features_ex(VHostNetState *net, uint64_t *features) { - return net->dev.acked_features; + virtio_features_copy(features, net->dev.acked_features_ex); } void vhost_net_save_acked_features(NetClientState *nc) @@ -234,7 +234,8 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) int r; bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL; struct vhost_net *net = g_new0(struct vhost_net, 1); - uint64_t features = 0; + uint64_t missing_features[VIRTIO_FEATURES_NU64S]; + uint64_t features[VIRTIO_FEATURES_NU64S]; Error *local_err = NULL; if (!options->net_backend) { @@ -247,6 +248,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) net->save_acked_features = options->save_acked_features; net->max_tx_queue_size = options->max_tx_queue_size; net->is_vhost_user = options->is_vhost_user; + virtio_features_clear(features); net->dev.max_queues = 1; net->dev.vqs = net->vqs; @@ -261,7 +263,7 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) net->backend = r; net->dev.protocol_features = 0; } else { - net->dev.backend_features = 0; + virtio_features_clear(net->dev.backend_features_ex); net->dev.protocol_features = 0; net->backend = -1; @@ -281,26 +283,29 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) sizeof(struct virtio_net_hdr_mrg_rxbuf))) { net->dev.features &= ~(1ULL << VIRTIO_NET_F_MRG_RXBUF); } - if (~net->dev.features & net->dev.backend_features) { - fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64 - " for backend\n", - (uint64_t)(~net->dev.features & net->dev.backend_features)); + + if (virtio_features_andnot(missing_features, + net->dev.backend_features_ex, + net->dev.features_ex)) { + fprintf(stderr, "vhost lacks feature mask 0x" VIRTIO_FEATURES_FMT + " for backend\n", VIRTIO_FEATURES_PR(missing_features)); goto fail; } } /* Set sane init value. Override when guest acks. */ if (options->get_acked_features) { - features = options->get_acked_features(net->nc); - if (~net->dev.features & features) { - fprintf(stderr, "vhost lacks feature mask 0x%" PRIx64 - " for backend\n", - (uint64_t)(~net->dev.features & features)); + virtio_features_from_u64(features, + options->get_acked_features(net->nc)); + if (virtio_features_andnot(missing_features, features, + net->dev.features_ex)) { + fprintf(stderr, "vhost lacks feature mask 0x" VIRTIO_FEATURES_FMT + " for backend\n", VIRTIO_FEATURES_PR(missing_features)); goto fail; } } - vhost_net_ack_features(net, features); + vhost_net_ack_features_ex(net, features); return net; diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index c4c49b0f9caa1..33116712eb4ab 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -90,6 +90,25 @@ VIRTIO_NET_RSS_HASH_TYPE_TCP_EX | \ VIRTIO_NET_RSS_HASH_TYPE_UDP_EX) +/* + * Features starting from VIRTIO_NET_FEATURES_MAP_MIN bit correspond + * to guest offloads in the VIRTIO_NET_OFFLOAD_MAP range + */ +#define VIRTIO_NET_OFFLOAD_MAP_MIN 46 +#define VIRTIO_NET_OFFLOAD_MAP_LENGTH 4 +#define VIRTIO_NET_OFFLOAD_MAP MAKE_64BIT_MASK( \ + VIRTIO_NET_OFFLOAD_MAP_MIN, \ + VIRTIO_NET_OFFLOAD_MAP_LENGTH) +#define VIRTIO_NET_FEATURES_MAP_MIN 65 +#define VIRTIO_NET_F2O_SHIFT (VIRTIO_NET_OFFLOAD_MAP_MIN - \ + VIRTIO_NET_FEATURES_MAP_MIN + 64) + +static bool virtio_has_tunnel_hdr(const uint64_t *features) +{ + return virtio_has_feature_ex(features, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO) || + virtio_has_feature_ex(features, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO); +} + static const VirtIOFeature feature_sizes[] = { {.flags = 1ULL << VIRTIO_NET_F_MAC, .end = endof(struct virtio_net_config, mac)}, @@ -636,8 +655,18 @@ static int peer_has_uso(VirtIONet *n) return qemu_has_uso(qemu_get_queue(n->nic)->peer); } +static bool peer_has_tunnel(VirtIONet *n) +{ + if (!peer_has_vnet_hdr(n)) { + return false; + } + + return qemu_has_tunnel(qemu_get_queue(n->nic)->peer); +} + static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, - int version_1, int hash_report) + int version_1, int hash_report, + int tunnel) { int i; NetClientState *nc; @@ -645,9 +674,11 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, n->mergeable_rx_bufs = mergeable_rx_bufs; if (version_1) { - n->guest_hdr_len = hash_report ? - sizeof(struct virtio_net_hdr_v1_hash) : - sizeof(struct virtio_net_hdr_mrg_rxbuf); + n->guest_hdr_len = tunnel ? + sizeof(struct virtio_net_hdr_v1_hash_tunnel) : + (hash_report ? + sizeof(struct virtio_net_hdr_v1_hash) : + sizeof(struct virtio_net_hdr_mrg_rxbuf)); n->rss_data.populate_hash = !!hash_report; } else { n->guest_hdr_len = n->mergeable_rx_bufs ? @@ -773,17 +804,31 @@ static uint64_t virtio_net_bad_features(VirtIODevice *vdev) static void virtio_net_apply_guest_offloads(VirtIONet *n) { - qemu_set_offload(qemu_get_queue(n->nic)->peer, - !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), - !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), - !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), - !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), - !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)), - !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO4)), - !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO6))); + NetOffloads ol = { + .csum = !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_CSUM)), + .tso4 = !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO4)), + .tso6 = !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_TSO6)), + .ecn = !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_ECN)), + .ufo = !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_UFO)), + .uso4 = !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO4)), + .uso6 = !!(n->curr_guest_offloads & (1ULL << VIRTIO_NET_F_GUEST_USO6)), + .tnl = !!(n->curr_guest_offloads & + (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED)), + .tnl_csum = !!(n->curr_guest_offloads & + (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED)), + }; + + qemu_set_offload(qemu_get_queue(n->nic)->peer, &ol); +} + +static uint64_t virtio_net_features_to_offload(const uint64_t *features) +{ + return (features[0] & ~VIRTIO_NET_OFFLOAD_MAP) | + ((features[1] << VIRTIO_NET_F2O_SHIFT) & VIRTIO_NET_OFFLOAD_MAP); } -static uint64_t virtio_net_guest_offloads_by_features(uint64_t features) +static uint64_t +virtio_net_guest_offloads_by_features(const uint64_t *features) { static const uint64_t guest_offloads_mask = (1ULL << VIRTIO_NET_F_GUEST_CSUM) | @@ -792,15 +837,17 @@ static uint64_t virtio_net_guest_offloads_by_features(uint64_t features) (1ULL << VIRTIO_NET_F_GUEST_ECN) | (1ULL << VIRTIO_NET_F_GUEST_UFO) | (1ULL << VIRTIO_NET_F_GUEST_USO4) | - (1ULL << VIRTIO_NET_F_GUEST_USO6); + (1ULL << VIRTIO_NET_F_GUEST_USO6) | + (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED) | + (1ULL << VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED); - return guest_offloads_mask & features; + return guest_offloads_mask & virtio_net_features_to_offload(features); } uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n) { VirtIODevice *vdev = VIRTIO_DEVICE(n); - return virtio_net_guest_offloads_by_features(vdev->guest_features); + return virtio_net_guest_offloads_by_features(vdev->guest_features_ex); } typedef struct { @@ -879,34 +926,40 @@ static void failover_add_primary(VirtIONet *n, Error **errp) error_propagate(errp, err); } -static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) +static void virtio_net_set_features(VirtIODevice *vdev, + const uint64_t *in_features) { + uint64_t features[VIRTIO_FEATURES_NU64S]; VirtIONet *n = VIRTIO_NET(vdev); Error *err = NULL; int i; + virtio_features_copy(features, in_features); if (n->mtu_bypass_backend && !virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_MTU)) { - features &= ~(1ULL << VIRTIO_NET_F_MTU); + virtio_clear_feature_ex(features, VIRTIO_NET_F_MTU); } virtio_net_set_multiqueue(n, - virtio_has_feature(features, VIRTIO_NET_F_RSS) || - virtio_has_feature(features, VIRTIO_NET_F_MQ)); + virtio_has_feature_ex(features, + VIRTIO_NET_F_RSS) || + virtio_has_feature_ex(features, + VIRTIO_NET_F_MQ)); virtio_net_set_mrg_rx_bufs(n, - virtio_has_feature(features, + virtio_has_feature_ex(features, VIRTIO_NET_F_MRG_RXBUF), - virtio_has_feature(features, + virtio_has_feature_ex(features, VIRTIO_F_VERSION_1), - virtio_has_feature(features, - VIRTIO_NET_F_HASH_REPORT)); + virtio_has_feature_ex(features, + VIRTIO_NET_F_HASH_REPORT), + virtio_has_tunnel_hdr(features)); - n->rsc4_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) && - virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO4); - n->rsc6_enabled = virtio_has_feature(features, VIRTIO_NET_F_RSC_EXT) && - virtio_has_feature(features, VIRTIO_NET_F_GUEST_TSO6); - n->rss_data.redirect = virtio_has_feature(features, VIRTIO_NET_F_RSS); + n->rsc4_enabled = virtio_has_feature_ex(features, VIRTIO_NET_F_RSC_EXT) && + virtio_has_feature_ex(features, VIRTIO_NET_F_GUEST_TSO4); + n->rsc6_enabled = virtio_has_feature_ex(features, VIRTIO_NET_F_RSC_EXT) && + virtio_has_feature_ex(features, VIRTIO_NET_F_GUEST_TSO6); + n->rss_data.redirect = virtio_has_feature_ex(features, VIRTIO_NET_F_RSS); if (n->has_vnet_hdr) { n->curr_guest_offloads = @@ -920,7 +973,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) if (!get_vhost_net(nc->peer)) { continue; } - vhost_net_ack_features(get_vhost_net(nc->peer), features); + vhost_net_ack_features_ex(get_vhost_net(nc->peer), features); /* * keep acked_features in NetVhostUserState up-to-date so it @@ -929,11 +982,14 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) vhost_net_save_acked_features(nc->peer); } - if (!virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { - memset(n->vlans, 0xff, MAX_VLAN >> 3); + if (virtio_has_feature_ex(features, VIRTIO_NET_F_CTRL_VLAN) != + virtio_has_feature_ex(vdev->guest_features_ex, + VIRTIO_NET_F_CTRL_VLAN)) { + bool vlan = virtio_has_feature_ex(features, VIRTIO_NET_F_CTRL_VLAN); + memset(n->vlans, vlan ? 0 : 0xff, MAX_VLAN >> 3); } - if (virtio_has_feature(features, VIRTIO_NET_F_STANDBY)) { + if (virtio_has_feature_ex(features, VIRTIO_NET_F_STANDBY)) { qapi_event_send_failover_negotiated(n->netclient_name); qatomic_set(&n->failover_primary_hidden, false); failover_add_primary(n, &err); @@ -1288,6 +1344,8 @@ static bool virtio_net_load_ebpf_fds(VirtIONet *n, Error **errp) static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp) { + Error *err = NULL; + if (!virtio_net_attach_ebpf_to_backend(n->nic, -1)) { return true; } @@ -1305,7 +1363,11 @@ static bool virtio_net_load_ebpf(VirtIONet *n, Error **errp) return virtio_net_load_ebpf_fds(n, errp); } - ebpf_rss_load(&n->ebpf_rss, &error_warn); + ebpf_rss_load(&n->ebpf_rss, &err); + /* Beware, ebpf_rss_load() can return false with @err unset */ + if (err) { + warn_report_err(err); + } return true; } @@ -1898,10 +1960,10 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, virtio_error(vdev, "virtio-net unexpected empty queue: " "i %zd mergeable %d offset %zd, size %zd, " "guest hdr len %zd, host hdr len %zd " - "guest features 0x%" PRIx64, + "guest features 0x" VIRTIO_FEATURES_FMT, i, n->mergeable_rx_bufs, offset, size, n->guest_hdr_len, n->host_hdr_len, - vdev->guest_features); + VIRTIO_FEATURES_PR(vdev->guest_features_ex)); } err = -1; goto err; @@ -3008,8 +3070,8 @@ static int virtio_net_pre_load_queues(VirtIODevice *vdev, uint32_t n) return 0; } -static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, - Error **errp) +static void virtio_net_get_features(VirtIODevice *vdev, uint64_t *features, + Error **errp) { VirtIONet *n = VIRTIO_NET(vdev); NetClientState *nc = qemu_get_queue(n->nic); @@ -3023,68 +3085,83 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, (supported_hash_types & peer_hash_types) == supported_hash_types; /* Firstly sync all virtio-net possible supported features */ - features |= n->host_features; + virtio_features_or(features, features, n->host_features_ex); - virtio_add_feature(&features, VIRTIO_NET_F_MAC); + virtio_add_feature_ex(features, VIRTIO_NET_F_MAC); if (!peer_has_vnet_hdr(n)) { - virtio_clear_feature(&features, VIRTIO_NET_F_CSUM); - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO4); - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_TSO6); - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_ECN); + virtio_clear_feature_ex(features, VIRTIO_NET_F_CSUM); + virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_TSO4); + virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_TSO6); + virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_ECN); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_CSUM); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO4); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_TSO6); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ECN); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_CSUM); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_TSO4); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_TSO6); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_ECN); - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6); + virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_USO); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO4); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO6); - virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO); + virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO); + virtio_clear_feature_ex(features, + VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM); + virtio_clear_feature_ex(features, + VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM); + + virtio_clear_feature_ex(features, VIRTIO_NET_F_HASH_REPORT); } if (!peer_has_vnet_hdr(n) || !peer_has_ufo(n)) { - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_UFO); - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_UFO); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_UFO); + virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_UFO); } - if (!peer_has_uso(n)) { - virtio_clear_feature(&features, VIRTIO_NET_F_HOST_USO); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO4); - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_USO6); + virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_USO); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO4); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_USO6); + } + + if (!peer_has_tunnel(n)) { + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO); + virtio_clear_feature_ex(features, VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO); + virtio_clear_feature_ex(features, + VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM); + virtio_clear_feature_ex(features, + VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM); } if (!get_vhost_net(nc->peer)) { if (!use_own_hash) { - virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); - virtio_clear_feature(&features, VIRTIO_NET_F_RSS); - } else if (virtio_has_feature(features, VIRTIO_NET_F_RSS)) { + virtio_clear_feature_ex(features, VIRTIO_NET_F_HASH_REPORT); + virtio_clear_feature_ex(features, VIRTIO_NET_F_RSS); + } else if (virtio_has_feature_ex(features, VIRTIO_NET_F_RSS)) { virtio_net_load_ebpf(n, errp); } - return features; + return; } if (!use_peer_hash) { - virtio_clear_feature(&features, VIRTIO_NET_F_HASH_REPORT); + virtio_clear_feature_ex(features, VIRTIO_NET_F_HASH_REPORT); if (!use_own_hash || !virtio_net_attach_ebpf_to_backend(n->nic, -1)) { if (!virtio_net_load_ebpf(n, errp)) { - return features; + return; } - virtio_clear_feature(&features, VIRTIO_NET_F_RSS); + virtio_clear_feature_ex(features, VIRTIO_NET_F_RSS); } } - features = vhost_net_get_features(get_vhost_net(nc->peer), features); - vdev->backend_features = features; + vhost_net_get_features_ex(get_vhost_net(nc->peer), features); + virtio_features_copy(vdev->backend_features_ex, features); if (n->mtu_bypass_backend && (n->host_features & 1ULL << VIRTIO_NET_F_MTU)) { - features |= (1ULL << VIRTIO_NET_F_MTU); + virtio_add_feature_ex(features, VIRTIO_NET_F_MTU); } /* @@ -3099,10 +3176,8 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, * support it. */ if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) { - virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE); + virtio_clear_feature_ex(features, VIRTIO_NET_F_GUEST_ANNOUNCE); } - - return features; } static int virtio_net_post_load_device(void *opaque, int version_id) @@ -3110,13 +3185,15 @@ static int virtio_net_post_load_device(void *opaque, int version_id) VirtIONet *n = opaque; VirtIODevice *vdev = VIRTIO_DEVICE(n); int i, link_down; + bool has_tunnel_hdr = virtio_has_tunnel_hdr(vdev->guest_features_ex); trace_virtio_net_post_load_device(); virtio_net_set_mrg_rx_bufs(n, n->mergeable_rx_bufs, virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1), virtio_vdev_has_feature(vdev, - VIRTIO_NET_F_HASH_REPORT)); + VIRTIO_NET_F_HASH_REPORT), + has_tunnel_hdr); /* MAC_TABLE_ENTRIES may be different from the saved image */ if (n->mac_table.in_use > MAC_TABLE_ENTRIES) { @@ -3936,12 +4013,13 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) n->vqs[0].tx_waiting = 0; n->tx_burst = n->net_conf.txburst; - virtio_net_set_mrg_rx_bufs(n, 0, 0, 0); + virtio_net_set_mrg_rx_bufs(n, 0, 0, 0, 0); n->promisc = 1; /* for compatibility */ n->mac_table.macs = g_malloc0(MAC_TABLE_ENTRIES * ETH_ALEN); n->vlans = g_malloc0(MAX_VLAN >> 3); + memset(n->vlans, 0xff, MAX_VLAN >> 3); nc = qemu_get_queue(n->nic); nc->rxfilter_notify_enabled = 1; @@ -4041,7 +4119,6 @@ static void virtio_net_reset(VirtIODevice *vdev) memset(n->mac_table.macs, 0, MAC_TABLE_ENTRIES * ETH_ALEN); memcpy(&n->mac[0], &n->nic->conf->macaddr, sizeof(n->mac)); qemu_format_nic_info_str(qemu_get_queue(n->nic), n->mac); - memset(n->vlans, 0, MAX_VLAN >> 3); /* Flush any async TX */ for (i = 0; i < n->max_queue_pairs; i++) { @@ -4220,6 +4297,22 @@ static const Property virtio_net_properties[] = { rss_data.specified_hash_types, VIRTIO_NET_HASH_REPORT_UDPv6_EX - 1, ON_OFF_AUTO_AUTO), + VIRTIO_DEFINE_PROP_FEATURE("host_tunnel", VirtIONet, + host_features_ex, + VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO, + false), + VIRTIO_DEFINE_PROP_FEATURE("host_tunnel_csum", VirtIONet, + host_features_ex, + VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM, + false), + VIRTIO_DEFINE_PROP_FEATURE("guest_tunnel", VirtIONet, + host_features_ex, + VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO, + false), + VIRTIO_DEFINE_PROP_FEATURE("guest_tunnel_csum", VirtIONet, + host_features_ex, + VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM, + false), }; static void virtio_net_class_init(ObjectClass *klass, const void *data) @@ -4234,8 +4327,8 @@ static void virtio_net_class_init(ObjectClass *klass, const void *data) vdc->unrealize = virtio_net_device_unrealize; vdc->get_config = virtio_net_get_config; vdc->set_config = virtio_net_set_config; - vdc->get_features = virtio_net_get_features; - vdc->set_features = virtio_net_set_features; + vdc->get_features_ex = virtio_net_get_features; + vdc->set_features_ex = virtio_net_set_features; vdc->bad_features = virtio_net_bad_features; vdc->reset = virtio_net_reset; vdc->queue_reset = virtio_net_queue_reset; diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index af73aa8ef285d..03732375a76d4 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -1322,14 +1322,11 @@ static void vmxnet3_update_features(VMXNET3State *s) s->lro_supported, rxcso_supported, s->rx_vlan_stripping); if (s->peer_has_vhdr) { - qemu_set_offload(qemu_get_queue(s->nic)->peer, - rxcso_supported, - s->lro_supported, - s->lro_supported, - 0, - 0, - 0, - 0); + NetOffloads ol = { .csum = rxcso_supported, + .tso4 = s->lro_supported, + .tso6 = s->lro_supported }; + + qemu_set_offload(qemu_get_queue(s->nic)->peer, &ol); } } diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index e764ec7683ab4..cd81f7399754b 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -6816,7 +6816,7 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) switch (sel) { case NVME_NS_ATTACHMENT_ATTACH: - if (nvme_ns(n, nsid)) { + if (nvme_ns(ctrl, nsid)) { return NVME_NS_ALREADY_ATTACHED | NVME_DNR; } @@ -6824,7 +6824,7 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) return NVME_NS_PRIVATE | NVME_DNR; } - if (!nvme_csi_supported(n, ns->csi)) { + if (!nvme_csi_supported(ctrl, ns->csi)) { return NVME_IOCS_NOT_SUPPORTED | NVME_DNR; } @@ -6834,6 +6834,10 @@ static uint16_t nvme_ns_attachment(NvmeCtrl *n, NvmeRequest *req) break; case NVME_NS_ATTACHMENT_DETACH: + if (!nvme_ns(ctrl, nsid)) { + return NVME_NS_NOT_ATTACHED | NVME_DNR; + } + nvme_detach_ns(ctrl, ns); nvme_update_dsm_limits(ctrl, NULL); @@ -8335,6 +8339,11 @@ static bool nvme_check_params(NvmeCtrl *n, Error **errp) host_memory_backend_set_mapped(n->pmr.dev, true); } + if (!n->params.mdts || ((1 << n->params.mdts) + 1) > IOV_MAX) { + error_setg(errp, "mdts exceeds IOV_MAX"); + return false; + } + if (n->params.zasl > n->params.mdts) { error_setg(errp, "zoned.zasl (Zone Append Size Limit) must be less " "than or equal to mdts (Maximum Data Transfer Size)"); @@ -8699,12 +8708,8 @@ static bool nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp) msix_table_offset); memory_region_add_subregion(&n->bar0, 0, &n->iomem); - if (pci_is_vf(pci_dev)) { - pcie_sriov_vf_register_bar(pci_dev, 0, &n->bar0); - } else { - pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | - PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); - } + pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY | + PCI_BASE_ADDRESS_MEM_TYPE_64, &n->bar0); ret = msix_init(pci_dev, nr_vectors, &n->bar0, 0, msix_table_offset, @@ -8776,7 +8781,7 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) uint8_t *pci_conf = pci_dev->config; uint64_t cap = ldq_le_p(&n->bar.cap); NvmeSecCtrlEntry *sctrl = nvme_sctrl(n); - uint32_t ctratt; + uint32_t ctratt = le32_to_cpu(id->ctratt); uint16_t oacs; memcpy(n->cse.acs, nvme_cse_acs_default, sizeof(n->cse.acs)); @@ -8794,10 +8799,11 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->oaes = cpu_to_le32(NVME_OAES_NS_ATTR); - ctratt = NVME_CTRATT_ELBAS; + ctratt |= NVME_CTRATT_ELBAS; if (n->params.ctratt.mem) { ctratt |= NVME_CTRATT_MEM; } + id->ctratt = cpu_to_le32(ctratt); id->rab = 6; @@ -8880,17 +8886,6 @@ static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev) id->psd[0].enlat = cpu_to_le32(0x10); id->psd[0].exlat = cpu_to_le32(0x4); - id->cmic |= NVME_CMIC_MULTI_CTRL; - ctratt |= NVME_CTRATT_ENDGRPS; - - id->endgidmax = cpu_to_le16(0x1); - - if (n->subsys->endgrp.fdp.enabled) { - ctratt |= NVME_CTRATT_FDPS; - } - - id->ctratt = cpu_to_le32(ctratt); - NVME_CAP_SET_MQES(cap, n->params.mqes); NVME_CAP_SET_CQR(cap, 1); NVME_CAP_SET_TO(cap, 0xf); @@ -8923,6 +8918,20 @@ static int nvme_init_subsys(NvmeCtrl *n, Error **errp) } n->subsys = NVME_SUBSYS(dev); + } else { + NvmeIdCtrl *id = &n->id_ctrl; + uint32_t ctratt = le32_to_cpu(id->ctratt); + + id->cmic |= NVME_CMIC_MULTI_CTRL; + ctratt |= NVME_CTRATT_ENDGRPS; + + id->endgidmax = cpu_to_le16(0x1); + + if (n->subsys->endgrp.fdp.enabled) { + ctratt |= NVME_CTRATT_FDPS; + } + + id->ctratt = cpu_to_le32(ctratt); } cntlid = nvme_subsys_register_ctrl(n, errp); diff --git a/hw/nvram/aspeed_otp.c b/hw/nvram/aspeed_otp.c new file mode 100644 index 0000000000000..dcf8ed3917e29 --- /dev/null +++ b/hw/nvram/aspeed_otp.c @@ -0,0 +1,190 @@ +/* + * ASPEED OTP (One-Time Programmable) memory + * + * Copyright (C) 2025 Aspeed + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "system/block-backend.h" +#include "hw/qdev-properties.h" +#include "hw/nvram/aspeed_otp.h" +#include "hw/nvram/trace.h" + +static uint64_t aspeed_otp_read(void *opaque, hwaddr offset, unsigned size) +{ + AspeedOTPState *s = opaque; + uint64_t val = 0; + + memcpy(&val, s->storage + offset, size); + + return val; +} + +static bool valid_program_data(uint32_t otp_addr, + uint32_t value, uint32_t prog_bit) +{ + uint32_t programmed_bits, has_programmable_bits; + bool is_odd = otp_addr & 1; + + /* + * prog_bit uses 0s to indicate target bits to program: + * - if OTP word is even-indexed, programmed bits flip 0->1 + * - if odd, bits flip 1->0 + * Bit programming is one-way only and irreversible. + */ + if (is_odd) { + programmed_bits = ~value & prog_bit; + } else { + programmed_bits = value & (~prog_bit); + } + + /* If any bit can be programmed, accept the request */ + has_programmable_bits = value ^ (~prog_bit); + + if (programmed_bits) { + trace_aspeed_otp_prog_conflict(otp_addr, programmed_bits); + for (int i = 0; i < 32; ++i) { + if (programmed_bits & (1U << i)) { + trace_aspeed_otp_prog_bit(i); + } + } + } + + return has_programmable_bits != 0; +} + +static bool program_otpmem_data(void *opaque, uint32_t otp_addr, + uint32_t prog_bit, uint32_t *value) +{ + AspeedOTPState *s = opaque; + bool is_odd = otp_addr & 1; + uint32_t otp_offset = otp_addr << 2; + + memcpy(value, s->storage + otp_offset, sizeof(uint32_t)); + + if (!valid_program_data(otp_addr, *value, prog_bit)) { + return false; + } + + if (is_odd) { + *value &= ~prog_bit; + } else { + *value |= ~prog_bit; + } + + return true; +} + +static void aspeed_otp_write(void *opaque, hwaddr otp_addr, + uint64_t val, unsigned size) +{ + AspeedOTPState *s = opaque; + uint32_t otp_offset, value; + + if (!program_otpmem_data(s, otp_addr, val, &value)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Failed to program data, value = %x, bit = %"PRIx64"\n", + __func__, value, val); + return; + } + + otp_offset = otp_addr << 2; + memcpy(s->storage + otp_offset, &value, size); + + if (s->blk) { + if (blk_pwrite(s->blk, otp_offset, size, &value, 0) < 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Failed to write %x to %x\n", + __func__, value, otp_offset); + + return; + } + } + trace_aspeed_otp_prog(otp_offset, val, value); +} + +static bool aspeed_otp_init_storage(AspeedOTPState *s, Error **errp) +{ + uint32_t *p; + int i, num; + uint64_t perm; + + if (s->blk) { + perm = BLK_PERM_CONSISTENT_READ | + (blk_supports_write_perm(s->blk) ? BLK_PERM_WRITE : 0); + if (blk_set_perm(s->blk, perm, BLK_PERM_ALL, errp) < 0) { + return false; + } + if (blk_pread(s->blk, 0, s->size, s->storage, 0) < 0) { + error_setg(errp, "Failed to read the initial flash content"); + return false; + } + } else { + num = s->size / sizeof(uint32_t); + p = (uint32_t *)s->storage; + for (i = 0; i < num; i++) { + p[i] = (i % 2 == 0) ? 0x00000000 : 0xFFFFFFFF; + } + } + return true; +} + +static const MemoryRegionOps aspeed_otp_ops = { + .read = aspeed_otp_read, + .write = aspeed_otp_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .valid.unaligned = true, + .impl.unaligned = true +}; + +static void aspeed_otp_realize(DeviceState *dev, Error **errp) +{ + AspeedOTPState *s = ASPEED_OTP(dev); + + if (s->size == 0) { + error_setg(errp, "aspeed.otp: 'size' property must be set"); + return; + } + + s->storage = blk_blockalign(s->blk, s->size); + + if (!aspeed_otp_init_storage(s, errp)) { + return; + } + + memory_region_init_io(&s->mmio, OBJECT(dev), &aspeed_otp_ops, + s, "aspeed.otp", s->size); + address_space_init(&s->as, &s->mmio, NULL); +} + +static const Property aspeed_otp_properties[] = { + DEFINE_PROP_UINT64("size", AspeedOTPState, size, 0), + DEFINE_PROP_DRIVE("drive", AspeedOTPState, blk), +}; + +static void aspeed_otp_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->realize = aspeed_otp_realize; + device_class_set_props(dc, aspeed_otp_properties); +} + +static const TypeInfo aspeed_otp_info = { + .name = TYPE_ASPEED_OTP, + .parent = TYPE_DEVICE, + .instance_size = sizeof(AspeedOTPState), + .class_init = aspeed_otp_class_init, +}; + +static void aspeed_otp_register_types(void) +{ + type_register_static(&aspeed_otp_info); +} + +type_init(aspeed_otp_register_types) diff --git a/hw/nvram/meson.build b/hw/nvram/meson.build index 10f3639db6aea..b66f23605b772 100644 --- a/hw/nvram/meson.build +++ b/hw/nvram/meson.build @@ -19,3 +19,7 @@ system_ss.add(when: 'CONFIG_XLNX_BBRAM', if_true: files('xlnx-bbram.c')) specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr_nvram.c')) specific_ss.add(when: 'CONFIG_ACPI', if_true: files('fw_cfg-acpi.c')) + +system_ss.add(when: 'CONFIG_ASPEED_SOC', if_true: files( + 'aspeed_otp.c', + )) \ No newline at end of file diff --git a/hw/nvram/trace-events b/hw/nvram/trace-events index 5e33b24d47a0d..7084bf70d35fa 100644 --- a/hw/nvram/trace-events +++ b/hw/nvram/trace-events @@ -1,5 +1,10 @@ # See docs/devel/tracing.rst for syntax documentation. +# aspeed_otp.c +aspeed_otp_prog(uint32_t addr, uint32_t prog_value, uint32_t value) "OTP Memory program: addr 0x%" PRIx32 " prog_value 0x%" PRIx32 " value 0x%" PRIx32 +aspeed_otp_prog_conflict(uint32_t addr, uint32_t bits) "Conflict at addr=0x%x, bits=0x%08x" +aspeed_otp_prog_bit(int bit) "Programmed bit %d" + # ds1225y.c nvram_read(uint32_t addr, uint32_t ret) "read addr %d: 0x%02x" nvram_write(uint32_t addr, uint32_t old, uint32_t val) "write addr %d: 0x%02x -> 0x%02x" diff --git a/hw/nvram/xlnx-bbram.c b/hw/nvram/xlnx-bbram.c index 5702bb3f31087..22aefbc240d51 100644 --- a/hw/nvram/xlnx-bbram.c +++ b/hw/nvram/xlnx-bbram.c @@ -456,8 +456,9 @@ static void bbram_ctrl_init(Object *obj) { XlnxBBRam *s = XLNX_BBRAM(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + RegisterInfoArray *reg_array; - s->reg_array = + reg_array = register_init_block32(DEVICE(obj), bbram_ctrl_regs_info, ARRAY_SIZE(bbram_ctrl_regs_info), s->regs_info, s->regs, @@ -465,17 +466,10 @@ static void bbram_ctrl_init(Object *obj) XLNX_BBRAM_ERR_DEBUG, R_MAX * 4); - sysbus_init_mmio(sbd, &s->reg_array->mem); + sysbus_init_mmio(sbd, ®_array->mem); sysbus_init_irq(sbd, &s->irq_bbram); } -static void bbram_ctrl_finalize(Object *obj) -{ - XlnxBBRam *s = XLNX_BBRAM(obj); - - register_finalize_block(s->reg_array); -} - static void bbram_prop_set_drive(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -542,7 +536,6 @@ static const TypeInfo bbram_ctrl_info = { .instance_size = sizeof(XlnxBBRam), .class_init = bbram_ctrl_class_init, .instance_init = bbram_ctrl_init, - .instance_finalize = bbram_ctrl_finalize, }; static void bbram_ctrl_register_types(void) diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c index 90962198008e7..6f17f32a0c327 100644 --- a/hw/nvram/xlnx-versal-efuse-ctrl.c +++ b/hw/nvram/xlnx-versal-efuse-ctrl.c @@ -728,7 +728,6 @@ static void efuse_ctrl_finalize(Object *obj) { XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj); - register_finalize_block(s->reg_array); g_free(s->extra_pg0_lock_spec); } diff --git a/hw/nvram/xlnx-zynqmp-efuse.c b/hw/nvram/xlnx-zynqmp-efuse.c index 5a218c32e84c9..ce35bb0cc1f72 100644 --- a/hw/nvram/xlnx-zynqmp-efuse.c +++ b/hw/nvram/xlnx-zynqmp-efuse.c @@ -816,13 +816,6 @@ static void zynqmp_efuse_init(Object *obj) sysbus_init_irq(sbd, &s->irq); } -static void zynqmp_efuse_finalize(Object *obj) -{ - XlnxZynqMPEFuse *s = XLNX_ZYNQMP_EFUSE(obj); - - register_finalize_block(s->reg_array); -} - static const VMStateDescription vmstate_efuse = { .name = TYPE_XLNX_ZYNQMP_EFUSE, .version_id = 1, @@ -857,7 +850,6 @@ static const TypeInfo efuse_info = { .instance_size = sizeof(XlnxZynqMPEFuse), .class_init = zynqmp_efuse_class_init, .instance_init = zynqmp_efuse_init, - .instance_finalize = zynqmp_efuse_finalize, }; static void efuse_register_types(void) diff --git a/hw/openrisc/cputimer.c b/hw/openrisc/cputimer.c index 6331997d56ba0..51da226fcdeb2 100644 --- a/hw/openrisc/cputimer.c +++ b/hw/openrisc/cputimer.c @@ -105,7 +105,7 @@ static void openrisc_timer_cb(void *opaque) CPUState *cs = CPU(cpu); cpu->env.ttmr |= TTMR_IP; - cs->interrupt_request |= CPU_INTERRUPT_TIMER; + cpu_set_interrupt(cs, CPU_INTERRUPT_TIMER); } switch (cpu->env.ttmr & TTMR_M) { diff --git a/hw/openrisc/openrisc_sim.c b/hw/openrisc/openrisc_sim.c index 880c8ebbb8b85..b7d9cdd900731 100644 --- a/hw/openrisc/openrisc_sim.c +++ b/hw/openrisc/openrisc_sim.c @@ -247,10 +247,10 @@ static void openrisc_sim_serial_init(Or1ksimState *state, hwaddr base, OpenRISCCPU *cpus[], int irq_pin, int uart_idx) { + g_autofree char *alias = g_strdup_printf("serial%d", uart_idx); void *fdt = state->fdt; char *nodename; qemu_irq serial_irq; - char alias[sizeof("serial0")]; int i; if (num_cpus > 1) { @@ -281,7 +281,6 @@ static void openrisc_sim_serial_init(Or1ksimState *state, hwaddr base, /* The /chosen node is created during fdt creation. */ qemu_fdt_setprop_string(fdt, "/chosen", "stdout-path", nodename); } - snprintf(alias, sizeof(alias), "serial%d", uart_idx); qemu_fdt_setprop_string(fdt, "/aliases", alias, nodename); g_free(nodename); diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index 3a29dfefc2c35..1bcceddbc4d7d 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -34,7 +34,6 @@ typedef struct PXBBus PXBBus; DECLARE_INSTANCE_CHECKER(PXBBus, PXB_BUS, TYPE_PXB_BUS) -#define TYPE_PXB_PCIE_BUS "pxb-pcie-bus" DECLARE_INSTANCE_CHECKER(PXBBus, PXB_PCIE_BUS, TYPE_PXB_PCIE_BUS) diff --git a/hw/pci-host/Kconfig b/hw/pci-host/Kconfig index 9824fa188d6b8..8cbb8304a3ff6 100644 --- a/hw/pci-host/Kconfig +++ b/hw/pci-host/Kconfig @@ -46,6 +46,10 @@ config PCI_I440FX select PCI select PAM +config PCI_EXPRESS_ASPEED + bool + select PCI_EXPRESS + config PCI_EXPRESS_Q35 bool select PCI_EXPRESS diff --git a/hw/pci-host/aspeed_pcie.c b/hw/pci-host/aspeed_pcie.c new file mode 100644 index 0000000000000..f7593444fc42f --- /dev/null +++ b/hw/pci-host/aspeed_pcie.c @@ -0,0 +1,1015 @@ +/* + * ASPEED PCIe Host Controller + * + * Copyright (C) 2025 ASPEED Technology Inc. + * Copyright (c) 2022 Cédric Le Goater + * + * Authors: + * Cédric Le Goater + * Jamin Lin + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Based on previous work from Cédric Le Goater. + * Modifications extend support for the ASPEED AST2600 and AST2700 platforms. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/qdev-properties.h" +#include "hw/registerfields.h" +#include "hw/irq.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/pcie_port.h" +#include "hw/pci-host/aspeed_pcie.h" +#include "hw/pci/msi.h" +#include "trace.h" + +/* + * PCIe Root Device + * This device exists only on AST2600. + */ + +static void aspeed_pcie_root_device_class_init(ObjectClass *klass, + const void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + dc->desc = "ASPEED PCIe Root Device"; + k->vendor_id = PCI_VENDOR_ID_ASPEED; + k->device_id = 0x2600; + k->class_id = PCI_CLASS_BRIDGE_HOST; + k->subsystem_vendor_id = k->vendor_id; + k->subsystem_id = k->device_id; + k->revision = 0; + + /* + * PCI-facing part of the host bridge, + * not usable without the host-facing part + */ + dc->user_creatable = false; +} + +static const TypeInfo aspeed_pcie_root_device_info = { + .name = TYPE_ASPEED_PCIE_ROOT_DEVICE, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(AspeedPCIERootDeviceState), + .class_init = aspeed_pcie_root_device_class_init, + .interfaces = (const InterfaceInfo[]) { + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + { }, + }, +}; + +/* + * PCIe Root Port + */ + +static void aspeed_pcie_root_port_class_init(ObjectClass *klass, + const void *data) +{ + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); + + dc->desc = "ASPEED PCIe Root Port"; + k->vendor_id = PCI_VENDOR_ID_ASPEED; + k->device_id = 0x1150; + dc->user_creatable = true; + + rpc->aer_offset = 0x100; +} + +static const TypeInfo aspeed_pcie_root_port_info = { + .name = TYPE_ASPEED_PCIE_ROOT_PORT, + .parent = TYPE_PCIE_ROOT_PORT, + .instance_size = sizeof(AspeedPCIERootPortState), + .class_init = aspeed_pcie_root_port_class_init, +}; + +/* + * PCIe Root Complex (RC) + */ + +#define ASPEED_PCIE_CFG_RC_MAX_MSI 64 + +static void aspeed_pcie_rc_set_irq(void *opaque, int irq, int level) +{ + AspeedPCIERcState *rc = (AspeedPCIERcState *) opaque; + AspeedPCIECfgState *cfg = + container_of(rc, AspeedPCIECfgState, rc); + bool intx; + + assert(irq < PCI_NUM_PINS); + + if (level) { + cfg->regs[cfg->rc_regs->int_sts_reg] |= BIT(irq); + } else { + cfg->regs[cfg->rc_regs->int_sts_reg] &= ~BIT(irq); + } + + intx = !!(cfg->regs[cfg->rc_regs->int_sts_reg] & + cfg->regs[cfg->rc_regs->int_en_reg]); + trace_aspeed_pcie_rc_intx_set_irq(cfg->id, irq, intx); + qemu_set_irq(rc->irq, intx); +} + +static int aspeed_pcie_rc_map_irq(PCIDevice *pci_dev, int irq_num) +{ + return irq_num % PCI_NUM_PINS; +} + +static void aspeed_pcie_rc_msi_notify(AspeedPCIERcState *rc, uint64_t data) +{ + AspeedPCIECfgState *cfg = + container_of(rc, AspeedPCIECfgState, rc); + uint32_t reg; + + /* Written data is the HW IRQ number */ + assert(data < ASPEED_PCIE_CFG_RC_MAX_MSI); + + reg = (data < 32) ? + cfg->rc_regs->msi_sts0_reg : cfg->rc_regs->msi_sts1_reg; + cfg->regs[reg] |= BIT(data % 32); + + trace_aspeed_pcie_rc_msi_set_irq(cfg->id, data, 1); + qemu_set_irq(rc->irq, 1); +} + +static void aspeed_pcie_rc_msi_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + AspeedPCIERcState *rc = ASPEED_PCIE_RC(opaque); + AspeedPCIECfgState *cfg = + container_of(rc, AspeedPCIECfgState, rc); + + trace_aspeed_pcie_rc_msi_notify(cfg->id, addr + rc->msi_addr, data); + aspeed_pcie_rc_msi_notify(rc, data); +} + +static const MemoryRegionOps aspeed_pcie_rc_msi_ops = { + .write = aspeed_pcie_rc_msi_write, + .read = NULL, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 4, + }, +}; + +static AddressSpace *aspeed_pcie_rc_get_as(PCIBus *bus, void *opaque, int devfn) +{ + AspeedPCIERcState *rc = ASPEED_PCIE_RC(opaque); + return &rc->iommu_as; +} + +static const PCIIOMMUOps aspeed_pcie_rc_iommu_ops = { + .get_address_space = aspeed_pcie_rc_get_as, +}; + +static void aspeed_pcie_rc_realize(DeviceState *dev, Error **errp) +{ + PCIExpressHost *pex = PCIE_HOST_BRIDGE(dev); + AspeedPCIERcState *rc = ASPEED_PCIE_RC(dev); + AspeedPCIECfgState *cfg = + container_of(rc, AspeedPCIECfgState, rc); + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + g_autofree char *ioport_window_name = NULL; + g_autofree char *mmio_window_name = NULL; + g_autofree char *iommu_root_name = NULL; + g_autofree char *dram_alias_name = NULL; + g_autofree char *root_bus_name = NULL; + + /* PCI configuration space */ + pcie_host_mmcfg_init(pex, PCIE_MMCFG_SIZE_MAX); + sysbus_init_mmio(sbd, &pex->mmio); + + /* MMIO and IO region */ + memory_region_init(&rc->mmio, OBJECT(rc), "mmio", UINT64_MAX); + memory_region_init(&rc->io, OBJECT(rc), "io", 0x10000); + + mmio_window_name = g_strdup_printf("pcie.%d.mmio_window", cfg->id); + memory_region_init_io(&rc->mmio_window, OBJECT(rc), &unassigned_io_ops, + OBJECT(rc), mmio_window_name, UINT64_MAX); + ioport_window_name = g_strdup_printf("pcie.%d.ioport_window", cfg->id); + memory_region_init_io(&rc->io_window, OBJECT(rc), &unassigned_io_ops, + OBJECT(rc), ioport_window_name, 0x10000); + + memory_region_add_subregion(&rc->mmio_window, 0, &rc->mmio); + memory_region_add_subregion(&rc->io_window, 0, &rc->io); + sysbus_init_mmio(sbd, &rc->mmio_window); + sysbus_init_mmio(sbd, &rc->io_window); + + sysbus_init_irq(sbd, &rc->irq); + root_bus_name = g_strdup_printf("pcie.rc%d", cfg->id); + pci->bus = pci_register_root_bus(dev, root_bus_name, + aspeed_pcie_rc_set_irq, + aspeed_pcie_rc_map_irq, rc, &rc->mmio, + &rc->io, 0, 4, TYPE_PCIE_BUS); + pci->bus->flags |= PCI_BUS_EXTENDED_CONFIG_SPACE; + + /* + * PCIe memory view setup + * + * Background: + * - On AST2700, all Root Complexes use the same MSI address. This MSI + * address is not normal system RAM - it is a PCI system memory address. + * If we map the MSI/MSI-X window into real system memory, a write from + * one EP can be seen by all RCs and wrongly trigger interrupts on them. + * + * Design: + * - MSI/MSI-X here is just a placeholder address so RC and EP can talk. + * We make a separate MMIO space (iommu_root) for the MSI window so the + * writes stay local to each RC. + * + * DMA: + * - EPs still need access to real system memory for DMA. We add a DRAM + * alias in the PCI space so DMA works as expected. + */ + iommu_root_name = g_strdup_printf("pcie.%d.iommu_root", cfg->id); + memory_region_init(&rc->iommu_root, OBJECT(rc), iommu_root_name, + UINT64_MAX); + address_space_init(&rc->iommu_as, &rc->iommu_root, iommu_root_name); + /* setup MSI */ + memory_region_init_io(&rc->msi_window, OBJECT(rc), + &aspeed_pcie_rc_msi_ops, rc, + "msi_window", 4); + memory_region_add_subregion(&rc->iommu_root, rc->msi_addr, + &rc->msi_window); + /* setup DRAM for DMA */ + assert(rc->dram_mr != NULL); + dram_alias_name = g_strdup_printf("pcie.%d.dram_alias", cfg->id); + memory_region_init_alias(&rc->dram_alias, OBJECT(rc), dram_alias_name, + rc->dram_mr, 0, memory_region_size(rc->dram_mr)); + memory_region_add_subregion(&rc->iommu_root, rc->dram_base, + &rc->dram_alias); + pci_setup_iommu(pci->bus, &aspeed_pcie_rc_iommu_ops, rc); + + /* setup root device */ + if (rc->has_rd) { + object_initialize_child(OBJECT(rc), "root_device", &rc->root_device, + TYPE_ASPEED_PCIE_ROOT_DEVICE); + qdev_prop_set_int32(DEVICE(&rc->root_device), "addr", + PCI_DEVFN(0, 0)); + qdev_prop_set_bit(DEVICE(&rc->root_device), "multifunction", false); + if (!qdev_realize(DEVICE(&rc->root_device), BUS(pci->bus), errp)) { + return; + } + } + + /* setup root port */ + qdev_prop_set_int32(DEVICE(&rc->root_port), "addr", rc->rp_addr); + qdev_prop_set_uint16(DEVICE(&rc->root_port), "chassis", cfg->id); + if (!qdev_realize(DEVICE(&rc->root_port), BUS(pci->bus), errp)) { + return; + } +} + +static const char *aspeed_pcie_rc_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + AspeedPCIERcState *rc = ASPEED_PCIE_RC(host_bridge); + AspeedPCIECfgState *cfg = + container_of(rc, AspeedPCIECfgState, rc); + + snprintf(rc->name, sizeof(rc->name), "%04x:%02x", cfg->id, rc->bus_nr); + + return rc->name; +} + +static void aspeed_pcie_rc_instance_init(Object *obj) +{ + AspeedPCIERcState *rc = ASPEED_PCIE_RC(obj); + AspeedPCIERootPortState *root_port = &rc->root_port; + + object_initialize_child(obj, "root_port", root_port, + TYPE_ASPEED_PCIE_ROOT_PORT); +} + +static const Property aspeed_pcie_rc_props[] = { + DEFINE_PROP_UINT32("bus-nr", AspeedPCIERcState, bus_nr, 0), + DEFINE_PROP_BOOL("has-rd", AspeedPCIERcState, has_rd, 0), + DEFINE_PROP_UINT32("rp-addr", AspeedPCIERcState, rp_addr, 0), + DEFINE_PROP_UINT32("msi-addr", AspeedPCIERcState, msi_addr, 0), + DEFINE_PROP_UINT64("dram-base", AspeedPCIERcState, dram_base, 0), + DEFINE_PROP_LINK("dram", AspeedPCIERcState, dram_mr, TYPE_MEMORY_REGION, + MemoryRegion *), +}; + +static void aspeed_pcie_rc_class_init(ObjectClass *klass, const void *data) +{ + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->desc = "ASPEED PCIe RC"; + dc->realize = aspeed_pcie_rc_realize; + dc->fw_name = "pci"; + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); + + hc->root_bus_path = aspeed_pcie_rc_root_bus_path; + device_class_set_props(dc, aspeed_pcie_rc_props); + + msi_nonbroken = true; +} + +static const TypeInfo aspeed_pcie_rc_info = { + .name = TYPE_ASPEED_PCIE_RC, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(AspeedPCIERcState), + .instance_init = aspeed_pcie_rc_instance_init, + .class_init = aspeed_pcie_rc_class_init, +}; + +/* + * PCIe Config + * + * AHB to PCIe Bus Bridge (H2X) + * + * On the AST2600: + * NOTE: rc_l is not supported by this model. + * - Registers 0x00 - 0x7F are shared by both PCIe0 (rc_l) and PCIe1 (rc_h). + * - Registers 0x80 - 0xBF are specific to PCIe0. + * - Registers 0xC0 - 0xFF are specific to PCIe1. + * + * On the AST2700: + * - The register range 0x00 - 0xFF is assigned to a single PCIe configuration. + * - There are three PCIe Root Complexes (RCs), each with its own dedicated H2X + * register set of size 0x100 (covering offsets 0x00 to 0xFF). + */ + +/* AST2600 */ +REG32(H2X_CTRL, 0x00) + FIELD(H2X_CTRL, CLEAR_RX, 4, 1) +REG32(H2X_TX_CLEAR, 0x08) + FIELD(H2X_TX_CLEAR, IDLE, 0, 1) +REG32(H2X_RDATA, 0x0C) +REG32(H2X_TX_DESC0, 0x10) +REG32(H2X_TX_DESC1, 0x14) +REG32(H2X_TX_DESC2, 0x18) +REG32(H2X_TX_DESC3, 0x1C) +REG32(H2X_TX_DATA, 0x20) +REG32(H2X_TX_STS, 0x24) + FIELD(H2X_TX_STS, IDLE, 31, 1) + FIELD(H2X_TX_STS, RC_L_TX_COMP, 24, 1) + FIELD(H2X_TX_STS, RC_H_TX_COMP, 25, 1) + FIELD(H2X_TX_STS, TRIG, 0, 1) +REG32(H2X_RC_H_CTRL, 0xC0) +REG32(H2X_RC_H_INT_EN, 0xC4) +REG32(H2X_RC_H_INT_STS, 0xC8) + SHARED_FIELD(H2X_RC_INT_INTDONE, 4, 1) + SHARED_FIELD(H2X_RC_INT_INTX, 0, 4) +REG32(H2X_RC_H_RDATA, 0xCC) +REG32(H2X_RC_H_MSI_EN0, 0xE0) +REG32(H2X_RC_H_MSI_EN1, 0xE4) +REG32(H2X_RC_H_MSI_STS0, 0xE8) +REG32(H2X_RC_H_MSI_STS1, 0xEC) + +/* AST2700 */ +REG32(H2X_CFGE_INT_STS, 0x08) + FIELD(H2X_CFGE_INT_STS, TX_IDEL, 0, 1) + FIELD(H2X_CFGE_INT_STS, RX_BUSY, 1, 1) +REG32(H2X_CFGI_TLP, 0x20) + FIELD(H2X_CFGI_TLP, ADDR, 0, 16) + FIELD(H2X_CFGI_TLP, BEN, 16, 4) + FIELD(H2X_CFGI_TLP, WR, 20, 1) +REG32(H2X_CFGI_WDATA, 0x24) +REG32(H2X_CFGI_CTRL, 0x28) + FIELD(H2X_CFGI_CTRL, FIRE, 0, 1) +REG32(H2X_CFGI_RDATA, 0x2C) +REG32(H2X_CFGE_TLP1, 0x30) +REG32(H2X_CFGE_TLPN, 0x34) +REG32(H2X_CFGE_CTRL, 0x38) + FIELD(H2X_CFGE_CTRL, FIRE, 0, 1) +REG32(H2X_CFGE_RDATA, 0x3C) +REG32(H2X_INT_EN, 0x40) +REG32(H2X_INT_STS, 0x48) + FIELD(H2X_INT_STS, INTX, 0, 4) +REG32(H2X_MSI_EN0, 0x50) +REG32(H2X_MSI_EN1, 0x54) +REG32(H2X_MSI_STS0, 0x58) +REG32(H2X_MSI_STS1, 0x5C) + +#define TLP_FMTTYPE_CFGRD0 0x04 /* Configuration Read Type 0 */ +#define TLP_FMTTYPE_CFGWR0 0x44 /* Configuration Write Type 0 */ +#define TLP_FMTTYPE_CFGRD1 0x05 /* Configuration Read Type 1 */ +#define TLP_FMTTYPE_CFGWR1 0x45 /* Configuration Write Type 1 */ + +#define PCIE_CFG_FMTTYPE_MASK(x) (((x) >> 24) & 0xff) +#define PCIE_CFG_BYTE_EN(x) ((x) & 0xf) + +static const AspeedPCIERegMap aspeed_regmap = { + .rc = { + .int_en_reg = R_H2X_RC_H_INT_EN, + .int_sts_reg = R_H2X_RC_H_INT_STS, + .msi_sts0_reg = R_H2X_RC_H_MSI_STS0, + .msi_sts1_reg = R_H2X_RC_H_MSI_STS1, + }, +}; + +static const AspeedPCIERegMap aspeed_2700_regmap = { + .rc = { + .int_en_reg = R_H2X_INT_EN, + .int_sts_reg = R_H2X_INT_STS, + .msi_sts0_reg = R_H2X_MSI_STS0, + .msi_sts1_reg = R_H2X_MSI_STS1, + }, +}; + +static uint64_t aspeed_pcie_cfg_read(void *opaque, hwaddr addr, + unsigned int size) +{ + AspeedPCIECfgState *s = ASPEED_PCIE_CFG(opaque); + uint32_t reg = addr >> 2; + uint32_t value = 0; + + value = s->regs[reg]; + + trace_aspeed_pcie_cfg_read(s->id, addr, value); + + return value; +} + +static void aspeed_pcie_cfg_translate_write(uint8_t byte_en, uint32_t *addr, + uint64_t *val, int *len) +{ + uint64_t packed_val = 0; + int first_bit = -1; + int index = 0; + int i; + + *len = ctpop8(byte_en); + + if (*len == 0 || *len > 4) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid byte enable: 0x%x\n", + __func__, byte_en); + return; + } + + /* Special case: full 4-byte write must be 4-byte aligned */ + if (byte_en == 0x0f) { + if ((*addr & 0x3) != 0) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: 4-byte write not 4-byte aligned: addr=0x%x\n", + __func__, *addr); + return; + } + *val &= 0xffffffffULL; + return; + } + + for (i = 0; i < 4; i++) { + if (byte_en & (1 << i)) { + if (first_bit < 0) { + first_bit = i; + } + packed_val |= ((*val >> (i * 8)) & 0xff) << (index * 8); + index++; + } + } + + *addr += first_bit; + *val = packed_val; +} + +static void aspeed_pcie_cfg_readwrite(AspeedPCIECfgState *s, + const AspeedPCIECfgTxDesc *desc) +{ + AspeedPCIERcState *rc = &s->rc; + PCIHostState *pci = NULL; + PCIDevice *pdev = NULL; + uint32_t cfg_addr; + uint32_t offset; + uint8_t byte_en; + bool is_write; + uint8_t devfn; + uint64_t val; + uint8_t bus; + int len; + + val = ~0; + is_write = !!(desc->desc0 & BIT(30)); + cfg_addr = desc->desc2; + + bus = (cfg_addr >> 24) & 0xff; + devfn = (cfg_addr >> 16) & 0xff; + offset = cfg_addr & 0xffc; + + pci = PCI_HOST_BRIDGE(rc); + + /* + * On the AST2600, the RC_H bus number range from 0x80 to 0xFF, with the + * root device and root port assigned to bus 0x80 instead of the standard + * 0x00. To allow the PCI subsystem to correctly discover devices on the + * root bus, bus 0x80 is remapped to 0x00. + */ + if (bus == rc->bus_nr) { + bus = 0; + } + + pdev = pci_find_device(pci->bus, bus, devfn); + if (!pdev) { + s->regs[desc->rdata_reg] = ~0; + goto out; + } + + switch (PCIE_CFG_FMTTYPE_MASK(desc->desc0)) { + case TLP_FMTTYPE_CFGWR0: + case TLP_FMTTYPE_CFGWR1: + byte_en = PCIE_CFG_BYTE_EN(desc->desc1); + val = desc->wdata; + aspeed_pcie_cfg_translate_write(byte_en, &offset, &val, &len); + pci_host_config_write_common(pdev, offset, pci_config_size(pdev), + val, len); + break; + case TLP_FMTTYPE_CFGRD0: + case TLP_FMTTYPE_CFGRD1: + val = pci_host_config_read_common(pdev, offset, + pci_config_size(pdev), 4); + s->regs[desc->rdata_reg] = val; + break; + default: + qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid CFG type. DESC0=0x%x\n", + __func__, desc->desc0); + } + +out: + trace_aspeed_pcie_cfg_rw(s->id, is_write ? "write" : "read", bus, devfn, + cfg_addr, val); +} + +static void aspeed_pcie_cfg_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + AspeedPCIECfgState *s = ASPEED_PCIE_CFG(opaque); + AspeedPCIECfgTxDesc desc; + uint32_t reg = addr >> 2; + uint32_t rc_reg; + + trace_aspeed_pcie_cfg_write(s->id, addr, data); + + switch (reg) { + case R_H2X_CTRL: + if (data & R_H2X_CTRL_CLEAR_RX_MASK) { + s->regs[R_H2X_RDATA] = ~0; + } + break; + case R_H2X_TX_CLEAR: + if (data & R_H2X_TX_CLEAR_IDLE_MASK) { + s->regs[R_H2X_TX_STS] &= ~R_H2X_TX_STS_IDLE_MASK; + } + break; + case R_H2X_TX_STS: + if (data & R_H2X_TX_STS_TRIG_MASK) { + desc.desc0 = s->regs[R_H2X_TX_DESC0]; + desc.desc1 = s->regs[R_H2X_TX_DESC1]; + desc.desc2 = s->regs[R_H2X_TX_DESC2]; + desc.desc3 = s->regs[R_H2X_TX_DESC3]; + desc.wdata = s->regs[R_H2X_TX_DATA]; + desc.rdata_reg = R_H2X_RC_H_RDATA; + aspeed_pcie_cfg_readwrite(s, &desc); + rc_reg = s->rc_regs->int_sts_reg; + s->regs[rc_reg] |= H2X_RC_INT_INTDONE_MASK; + s->regs[R_H2X_TX_STS] |= + BIT(R_H2X_TX_STS_RC_H_TX_COMP_SHIFT); + s->regs[R_H2X_TX_STS] |= R_H2X_TX_STS_IDLE_MASK; + } + break; + /* preserve INTx status */ + case R_H2X_RC_H_INT_STS: + if (data & H2X_RC_INT_INTDONE_MASK) { + s->regs[R_H2X_TX_STS] &= ~R_H2X_TX_STS_RC_H_TX_COMP_MASK; + } + s->regs[reg] &= ~data | H2X_RC_INT_INTX_MASK; + break; + /* + * These status registers are used for notify sources ISR are executed. + * If one source ISR is executed, it will clear one bit. + * If it clear all bits, it means to initialize this register status + * rather than sources ISR are executed. + */ + case R_H2X_RC_H_MSI_STS0: + case R_H2X_RC_H_MSI_STS1: + if (data == 0) { + return ; + } + + s->regs[reg] &= ~data; + if (data == 0xffffffff) { + return; + } + + if (!s->regs[R_H2X_RC_H_MSI_STS0] && + !s->regs[R_H2X_RC_H_MSI_STS1]) { + trace_aspeed_pcie_rc_msi_clear_irq(s->id, 0); + qemu_set_irq(s->rc.irq, 0); + } + break; + default: + s->regs[reg] = data; + break; + } +} + +static const MemoryRegionOps aspeed_pcie_cfg_ops = { + .read = aspeed_pcie_cfg_read, + .write = aspeed_pcie_cfg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void aspeed_pcie_cfg_instance_init(Object *obj) +{ + AspeedPCIECfgState *s = ASPEED_PCIE_CFG(obj); + + object_initialize_child(obj, "rc", &s->rc, TYPE_ASPEED_PCIE_RC); + object_property_add_alias(obj, "dram", OBJECT(&s->rc), "dram"); + object_property_add_alias(obj, "dram-base", OBJECT(&s->rc), "dram-base"); + + return; +} + +static void aspeed_pcie_cfg_reset(DeviceState *dev) +{ + AspeedPCIECfgState *s = ASPEED_PCIE_CFG(dev); + AspeedPCIECfgClass *apc = ASPEED_PCIE_CFG_GET_CLASS(s); + + memset(s->regs, 0, apc->nr_regs << 2); + memset(s->tlpn_fifo, 0, sizeof(s->tlpn_fifo)); + s->tlpn_idx = 0; +} + +static void aspeed_pcie_cfg_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + AspeedPCIECfgState *s = ASPEED_PCIE_CFG(dev); + AspeedPCIECfgClass *apc = ASPEED_PCIE_CFG_GET_CLASS(s); + g_autofree char *name = NULL; + + s->rc_regs = &apc->reg_map->rc; + s->regs = g_new(uint32_t, apc->nr_regs); + name = g_strdup_printf(TYPE_ASPEED_PCIE_CFG ".regs.%d", s->id); + memory_region_init_io(&s->mmio, OBJECT(s), apc->reg_ops, s, name, + apc->nr_regs << 2); + sysbus_init_mmio(sbd, &s->mmio); + + object_property_set_int(OBJECT(&s->rc), "bus-nr", + apc->rc_bus_nr, + &error_abort); + object_property_set_bool(OBJECT(&s->rc), "has-rd", + apc->rc_has_rd, + &error_abort); + object_property_set_int(OBJECT(&s->rc), "rp-addr", + apc->rc_rp_addr, + &error_abort); + object_property_set_int(OBJECT(&s->rc), "msi-addr", + apc->rc_msi_addr, + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&s->rc), errp)) { + return; + } +} + +static void aspeed_pcie_cfg_unrealize(DeviceState *dev) +{ + AspeedPCIECfgState *s = ASPEED_PCIE_CFG(dev); + + g_free(s->regs); + s->regs = NULL; +} + +static const Property aspeed_pcie_cfg_props[] = { + DEFINE_PROP_UINT32("id", AspeedPCIECfgState, id, 0), +}; + +static void aspeed_pcie_cfg_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedPCIECfgClass *apc = ASPEED_PCIE_CFG_CLASS(klass); + + dc->desc = "ASPEED PCIe Config"; + dc->realize = aspeed_pcie_cfg_realize; + dc->unrealize = aspeed_pcie_cfg_unrealize; + device_class_set_legacy_reset(dc, aspeed_pcie_cfg_reset); + device_class_set_props(dc, aspeed_pcie_cfg_props); + + apc->reg_ops = &aspeed_pcie_cfg_ops; + apc->reg_map = &aspeed_regmap; + apc->nr_regs = 0x100 >> 2; + apc->rc_msi_addr = 0x1e77005C; + apc->rc_bus_nr = 0x80; + apc->rc_has_rd = true; + apc->rc_rp_addr = PCI_DEVFN(8, 0); +} + +static const TypeInfo aspeed_pcie_cfg_info = { + .name = TYPE_ASPEED_PCIE_CFG, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_init = aspeed_pcie_cfg_instance_init, + .instance_size = sizeof(AspeedPCIECfgState), + .class_init = aspeed_pcie_cfg_class_init, + .class_size = sizeof(AspeedPCIECfgClass), +}; + +static void aspeed_2700_pcie_cfg_write(void *opaque, hwaddr addr, + uint64_t data, unsigned int size) +{ + AspeedPCIECfgState *s = ASPEED_PCIE_CFG(opaque); + AspeedPCIECfgTxDesc desc; + uint32_t reg = addr >> 2; + + trace_aspeed_pcie_cfg_write(s->id, addr, data); + + switch (reg) { + case R_H2X_CFGE_INT_STS: + if (data & R_H2X_CFGE_INT_STS_TX_IDEL_MASK) { + s->regs[R_H2X_CFGE_INT_STS] &= ~R_H2X_CFGE_INT_STS_TX_IDEL_MASK; + } + + if (data & R_H2X_CFGE_INT_STS_RX_BUSY_MASK) { + s->regs[R_H2X_CFGE_INT_STS] &= ~R_H2X_CFGE_INT_STS_RX_BUSY_MASK; + } + break; + case R_H2X_CFGI_CTRL: + if (data & R_H2X_CFGI_CTRL_FIRE_MASK) { + /* + * Internal access to bridge + * Type and BDF are 0 + */ + desc.desc0 = 0x04000001 | + (ARRAY_FIELD_EX32(s->regs, H2X_CFGI_TLP, WR) << 30); + desc.desc1 = 0x00401000 | + ARRAY_FIELD_EX32(s->regs, H2X_CFGI_TLP, BEN); + desc.desc2 = 0x00000000 | + ARRAY_FIELD_EX32(s->regs, H2X_CFGI_TLP, ADDR); + desc.wdata = s->regs[R_H2X_CFGI_WDATA]; + desc.rdata_reg = R_H2X_CFGI_RDATA; + aspeed_pcie_cfg_readwrite(s, &desc); + } + break; + case R_H2X_CFGE_TLPN: + s->tlpn_fifo[s->tlpn_idx] = data; + s->tlpn_idx = (s->tlpn_idx + 1) % ARRAY_SIZE(s->tlpn_fifo); + break; + case R_H2X_CFGE_CTRL: + if (data & R_H2X_CFGE_CTRL_FIRE_MASK) { + desc.desc0 = s->regs[R_H2X_CFGE_TLP1]; + desc.desc1 = s->tlpn_fifo[0]; + desc.desc2 = s->tlpn_fifo[1]; + desc.wdata = s->tlpn_fifo[2]; + desc.rdata_reg = R_H2X_CFGE_RDATA; + aspeed_pcie_cfg_readwrite(s, &desc); + s->regs[R_H2X_CFGE_INT_STS] |= R_H2X_CFGE_INT_STS_TX_IDEL_MASK; + s->regs[R_H2X_CFGE_INT_STS] |= R_H2X_CFGE_INT_STS_RX_BUSY_MASK; + s->tlpn_idx = 0; + } + break; + + case R_H2X_INT_STS: + s->regs[reg] &= ~data | R_H2X_INT_STS_INTX_MASK; + break; + /* + * These status registers are used for notify sources ISR are executed. + * If one source ISR is executed, it will clear one bit. + * If it clear all bits, it means to initialize this register status + * rather than sources ISR are executed. + */ + case R_H2X_MSI_STS0: + case R_H2X_MSI_STS1: + if (data == 0) { + return ; + } + + s->regs[reg] &= ~data; + if (data == 0xffffffff) { + return; + } + + if (!s->regs[R_H2X_MSI_STS0] && + !s->regs[R_H2X_MSI_STS1]) { + trace_aspeed_pcie_rc_msi_clear_irq(s->id, 0); + qemu_set_irq(s->rc.irq, 0); + } + break; + default: + s->regs[reg] = data; + break; + } +} + +static const MemoryRegionOps aspeed_2700_pcie_cfg_ops = { + .read = aspeed_pcie_cfg_read, + .write = aspeed_2700_pcie_cfg_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void aspeed_2700_pcie_cfg_class_init(ObjectClass *klass, + const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedPCIECfgClass *apc = ASPEED_PCIE_CFG_CLASS(klass); + + dc->desc = "ASPEED 2700 PCIe Config"; + apc->reg_ops = &aspeed_2700_pcie_cfg_ops; + apc->reg_map = &aspeed_2700_regmap; + apc->nr_regs = 0x100 >> 2; + apc->rc_msi_addr = 0x000000F0; + apc->rc_bus_nr = 0; + apc->rc_has_rd = false; + apc->rc_rp_addr = PCI_DEVFN(0, 0); +} + +static const TypeInfo aspeed_2700_pcie_cfg_info = { + .name = TYPE_ASPEED_2700_PCIE_CFG, + .parent = TYPE_ASPEED_PCIE_CFG, + .class_init = aspeed_2700_pcie_cfg_class_init, +}; + +/* + * PCIe PHY + * + * PCIe Host Controller (PCIEH) + */ + +/* AST2600 */ +REG32(PEHR_ID, 0x00) + FIELD(PEHR_ID, DEV, 16, 16) +REG32(PEHR_CLASS_CODE, 0x04) +REG32(PEHR_DATALINK, 0x10) +REG32(PEHR_PROTECT, 0x7C) + FIELD(PEHR_PROTECT, LOCK, 0, 8) +REG32(PEHR_LINK, 0xC0) + FIELD(PEHR_LINK, STS, 5, 1) + +/* AST2700 */ +REG32(PEHR_2700_LINK_GEN2, 0x344) + FIELD(PEHR_2700_LINK_GEN2, STS, 18, 1) +REG32(PEHR_2700_LINK_GEN4, 0x358) + FIELD(PEHR_2700_LINK_GEN4, STS, 8, 1) + +#define ASPEED_PCIE_PHY_UNLOCK 0xA8 + +static uint64_t aspeed_pcie_phy_read(void *opaque, hwaddr addr, + unsigned int size) +{ + AspeedPCIEPhyState *s = ASPEED_PCIE_PHY(opaque); + uint32_t reg = addr >> 2; + uint32_t value = 0; + + value = s->regs[reg]; + + trace_aspeed_pcie_phy_read(s->id, addr, value); + + return value; +} + +static void aspeed_pcie_phy_write(void *opaque, hwaddr addr, uint64_t data, + unsigned int size) +{ + AspeedPCIEPhyState *s = ASPEED_PCIE_PHY(opaque); + uint32_t reg = addr >> 2; + + trace_aspeed_pcie_phy_write(s->id, addr, data); + + switch (reg) { + case R_PEHR_PROTECT: + data &= R_PEHR_PROTECT_LOCK_MASK; + s->regs[reg] = !!(data == ASPEED_PCIE_PHY_UNLOCK); + break; + default: + s->regs[reg] = data; + break; + } +} + +static const MemoryRegionOps aspeed_pcie_phy_ops = { + .read = aspeed_pcie_phy_read, + .write = aspeed_pcie_phy_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 4, + }, +}; + +static void aspeed_pcie_phy_reset(DeviceState *dev) +{ + AspeedPCIEPhyState *s = ASPEED_PCIE_PHY(dev); + AspeedPCIEPhyClass *apc = ASPEED_PCIE_PHY_GET_CLASS(s); + + memset(s->regs, 0, apc->nr_regs << 2); + + s->regs[R_PEHR_ID] = + (0x1150 << R_PEHR_ID_DEV_SHIFT) | PCI_VENDOR_ID_ASPEED; + s->regs[R_PEHR_CLASS_CODE] = 0x06040006; + s->regs[R_PEHR_DATALINK] = 0xD7040022; + s->regs[R_PEHR_LINK] = R_PEHR_LINK_STS_MASK; +} + +static void aspeed_pcie_phy_realize(DeviceState *dev, Error **errp) +{ + AspeedPCIEPhyState *s = ASPEED_PCIE_PHY(dev); + AspeedPCIEPhyClass *apc = ASPEED_PCIE_PHY_GET_CLASS(s); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + g_autofree char *name = NULL; + + s->regs = g_new(uint32_t, apc->nr_regs); + name = g_strdup_printf(TYPE_ASPEED_PCIE_PHY ".regs.%d", s->id); + memory_region_init_io(&s->mmio, OBJECT(s), &aspeed_pcie_phy_ops, s, name, + apc->nr_regs << 2); + sysbus_init_mmio(sbd, &s->mmio); +} + +static void aspeed_pcie_phy_unrealize(DeviceState *dev) +{ + AspeedPCIEPhyState *s = ASPEED_PCIE_PHY(dev); + + g_free(s->regs); + s->regs = NULL; +} + +static const Property aspeed_pcie_phy_props[] = { + DEFINE_PROP_UINT32("id", AspeedPCIEPhyState, id, 0), +}; + +static void aspeed_pcie_phy_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedPCIEPhyClass *apc = ASPEED_PCIE_PHY_CLASS(klass); + + dc->desc = "ASPEED PCIe Phy"; + dc->realize = aspeed_pcie_phy_realize; + dc->unrealize = aspeed_pcie_phy_unrealize; + device_class_set_legacy_reset(dc, aspeed_pcie_phy_reset); + device_class_set_props(dc, aspeed_pcie_phy_props); + + apc->nr_regs = 0x100 >> 2; +} + +static const TypeInfo aspeed_pcie_phy_info = { + .name = TYPE_ASPEED_PCIE_PHY, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(AspeedPCIEPhyState), + .class_init = aspeed_pcie_phy_class_init, + .class_size = sizeof(AspeedPCIEPhyClass), +}; + +static void aspeed_2700_pcie_phy_reset(DeviceState *dev) +{ + AspeedPCIEPhyState *s = ASPEED_PCIE_PHY(dev); + AspeedPCIEPhyClass *apc = ASPEED_PCIE_PHY_GET_CLASS(s); + + memset(s->regs, 0, apc->nr_regs << 2); + + s->regs[R_PEHR_ID] = + (0x1150 << R_PEHR_ID_DEV_SHIFT) | PCI_VENDOR_ID_ASPEED; + s->regs[R_PEHR_CLASS_CODE] = 0x06040011; + s->regs[R_PEHR_2700_LINK_GEN2] = R_PEHR_2700_LINK_GEN2_STS_MASK; + s->regs[R_PEHR_2700_LINK_GEN4] = R_PEHR_2700_LINK_GEN4_STS_MASK; +} + +static void aspeed_2700_pcie_phy_class_init(ObjectClass *klass, + const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + AspeedPCIEPhyClass *apc = ASPEED_PCIE_PHY_CLASS(klass); + + dc->desc = "ASPEED AST2700 PCIe Phy"; + device_class_set_legacy_reset(dc, aspeed_2700_pcie_phy_reset); + + apc->nr_regs = 0x800 >> 2; +} + +static const TypeInfo aspeed_2700_pcie_phy_info = { + .name = TYPE_ASPEED_2700_PCIE_PHY, + .parent = TYPE_ASPEED_PCIE_PHY, + .class_init = aspeed_2700_pcie_phy_class_init, +}; + +static void aspeed_pcie_register_types(void) +{ + type_register_static(&aspeed_pcie_rc_info); + type_register_static(&aspeed_pcie_root_device_info); + type_register_static(&aspeed_pcie_root_port_info); + type_register_static(&aspeed_pcie_cfg_info); + type_register_static(&aspeed_2700_pcie_cfg_info); + type_register_static(&aspeed_pcie_phy_info); + type_register_static(&aspeed_2700_pcie_phy_info); +} + +type_init(aspeed_pcie_register_types); + diff --git a/hw/pci-host/astro.c b/hw/pci-host/astro.c index 859e308c577c5..1024ede7b68cc 100644 --- a/hw/pci-host/astro.c +++ b/hw/pci-host/astro.c @@ -424,22 +424,23 @@ static void elroy_reset(DeviceState *dev) } } -static void elroy_pcihost_init(Object *obj) +static void elroy_pcihost_realize(DeviceState *dev, Error **errp) { - ElroyState *s = ELROY_PCI_HOST_BRIDGE(obj); - PCIHostState *phb = PCI_HOST_BRIDGE(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + ElroyState *s = ELROY_PCI_HOST_BRIDGE(dev); + PCIHostState *phb = PCI_HOST_BRIDGE(dev); + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + Object *obj = OBJECT(s); /* Elroy config access from CPU. */ - memory_region_init_io(&s->this_mem, OBJECT(s), &elroy_chip_ops, + memory_region_init_io(&s->this_mem, obj, &elroy_chip_ops, s, "elroy", 0x2000); /* Elroy PCI config. */ - memory_region_init_io(&phb->conf_mem, OBJECT(phb), - &elroy_config_addr_ops, DEVICE(s), + memory_region_init_io(&phb->conf_mem, obj, + &elroy_config_addr_ops, dev, "pci-conf-idx", 8); - memory_region_init_io(&phb->data_mem, OBJECT(phb), - &elroy_config_data_ops, DEVICE(s), + memory_region_init_io(&phb->data_mem, obj, + &elroy_config_data_ops, dev, "pci-conf-data", 8); memory_region_add_subregion(&s->this_mem, 0x40, &phb->conf_mem); @@ -447,8 +448,8 @@ static void elroy_pcihost_init(Object *obj) &phb->data_mem); /* Elroy PCI bus memory. */ - memory_region_init(&s->pci_mmio, OBJECT(s), "pci-mmio", UINT64_MAX); - memory_region_init_io(&s->pci_io, OBJECT(s), &unassigned_io_ops, obj, + memory_region_init(&s->pci_mmio, obj, "pci-mmio", UINT64_MAX); + memory_region_init_io(&s->pci_io, obj, &unassigned_io_ops, obj, "pci-isa-mmio", ((uint32_t) IOS_DIST_BASE_SIZE) / ROPES_PER_IOC); @@ -459,7 +460,7 @@ static void elroy_pcihost_init(Object *obj) sysbus_init_mmio(sbd, &s->this_mem); - qdev_init_gpio_in(DEVICE(obj), elroy_set_irq, ELROY_IRQS); + qdev_init_gpio_in(dev, elroy_set_irq, ELROY_IRQS); } static const VMStateDescription vmstate_elroy = { @@ -487,6 +488,7 @@ static void elroy_pcihost_class_init(ObjectClass *klass, const void *data) DeviceClass *dc = DEVICE_CLASS(klass); device_class_set_legacy_reset(dc, elroy_reset); + dc->realize = elroy_pcihost_realize; dc->vmsd = &vmstate_elroy; dc->user_creatable = false; } @@ -494,7 +496,6 @@ static void elroy_pcihost_class_init(ObjectClass *klass, const void *data) static const TypeInfo elroy_pcihost_info = { .name = TYPE_ELROY_PCI_HOST_BRIDGE, .parent = TYPE_PCI_HOST_BRIDGE, - .instance_init = elroy_pcihost_init, .instance_size = sizeof(ElroyState), .class_init = elroy_pcihost_class_init, }; diff --git a/hw/pci-host/dino.c b/hw/pci-host/dino.c index 11b353be2eac3..924053499c164 100644 --- a/hw/pci-host/dino.c +++ b/hw/pci-host/dino.c @@ -413,43 +413,7 @@ static void dino_pcihost_reset(DeviceState *dev) static void dino_pcihost_realize(DeviceState *dev, Error **errp) { DinoState *s = DINO_PCI_HOST_BRIDGE(dev); - - /* Set up PCI view of memory: Bus master address space. */ - memory_region_init(&s->bm, OBJECT(s), "bm-dino", 4 * GiB); - memory_region_init_alias(&s->bm_ram_alias, OBJECT(s), - "bm-system", s->memory_as, 0, - 0xf0000000 + DINO_MEM_CHUNK_SIZE); - memory_region_init_alias(&s->bm_pci_alias, OBJECT(s), - "bm-pci", &s->pci_mem, - 0xf0000000 + DINO_MEM_CHUNK_SIZE, - 30 * DINO_MEM_CHUNK_SIZE); - memory_region_init_alias(&s->bm_cpu_alias, OBJECT(s), - "bm-cpu", s->memory_as, 0xfff00000, - 0xfffff); - memory_region_add_subregion(&s->bm, 0, - &s->bm_ram_alias); - memory_region_add_subregion(&s->bm, - 0xf0000000 + DINO_MEM_CHUNK_SIZE, - &s->bm_pci_alias); - memory_region_add_subregion(&s->bm, 0xfff00000, - &s->bm_cpu_alias); - - address_space_init(&s->bm_as, &s->bm, "pci-bm"); -} - -static void dino_pcihost_unrealize(DeviceState *dev) -{ - DinoState *s = DINO_PCI_HOST_BRIDGE(dev); - - address_space_destroy(&s->bm_as); -} - -static void dino_pcihost_init(Object *obj) -{ - DinoState *s = DINO_PCI_HOST_BRIDGE(obj); - PCIHostState *phb = PCI_HOST_BRIDGE(obj); - SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - int i; + PCIHostState *phb = PCI_HOST_BRIDGE(dev); /* Dino PCI access from main memory. */ memory_region_init_io(&s->this_mem, OBJECT(s), &dino_chip_ops, @@ -476,7 +440,7 @@ static void dino_pcihost_init(Object *obj) PCI_DEVFN(0, 0), 32, TYPE_PCI_BUS); /* Set up windows into PCI bus memory. */ - for (i = 1; i < 31; i++) { + for (int i = 1; i < 31; i++) { uint32_t addr = 0xf0000000 + i * DINO_MEM_CHUNK_SIZE; char *name = g_strdup_printf("PCI Outbound Window %d", i); memory_region_init_alias(&s->pci_mem_alias[i], OBJECT(s), @@ -487,9 +451,38 @@ static void dino_pcihost_init(Object *obj) pci_setup_iommu(phb->bus, &dino_iommu_ops, s); - sysbus_init_mmio(sbd, &s->this_mem); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->this_mem); - qdev_init_gpio_in(DEVICE(obj), dino_set_irq, DINO_IRQS); + qdev_init_gpio_in(dev, dino_set_irq, DINO_IRQS); + + /* Set up PCI view of memory: Bus master address space. */ + memory_region_init(&s->bm, OBJECT(s), "bm-dino", 4 * GiB); + memory_region_init_alias(&s->bm_ram_alias, OBJECT(s), + "bm-system", s->memory_as, 0, + 0xf0000000 + DINO_MEM_CHUNK_SIZE); + memory_region_init_alias(&s->bm_pci_alias, OBJECT(s), + "bm-pci", &s->pci_mem, + 0xf0000000 + DINO_MEM_CHUNK_SIZE, + 30 * DINO_MEM_CHUNK_SIZE); + memory_region_init_alias(&s->bm_cpu_alias, OBJECT(s), + "bm-cpu", s->memory_as, 0xfff00000, + 0xfffff); + memory_region_add_subregion(&s->bm, 0, + &s->bm_ram_alias); + memory_region_add_subregion(&s->bm, + 0xf0000000 + DINO_MEM_CHUNK_SIZE, + &s->bm_pci_alias); + memory_region_add_subregion(&s->bm, 0xfff00000, + &s->bm_cpu_alias); + + address_space_init(&s->bm_as, &s->bm, "pci-bm"); +} + +static void dino_pcihost_unrealize(DeviceState *dev) +{ + DinoState *s = DINO_PCI_HOST_BRIDGE(dev); + + address_space_destroy(&s->bm_as); } static const Property dino_pcihost_properties[] = { @@ -511,7 +504,6 @@ static void dino_pcihost_class_init(ObjectClass *klass, const void *data) static const TypeInfo dino_pcihost_info = { .name = TYPE_DINO_PCI_HOST_BRIDGE, .parent = TYPE_PCI_HOST_BRIDGE, - .instance_init = dino_pcihost_init, .instance_size = sizeof(DinoState), .class_init = dino_pcihost_class_init, }; diff --git a/hw/pci-host/meson.build b/hw/pci-host/meson.build index 937a0f72acf94..86b754d0b0d98 100644 --- a/hw/pci-host/meson.build +++ b/hw/pci-host/meson.build @@ -2,6 +2,7 @@ pci_ss = ss.source_set() pci_ss.add(when: 'CONFIG_PAM', if_true: files('pam.c')) pci_ss.add(when: 'CONFIG_PCI_BONITO', if_true: files('bonito.c')) pci_ss.add(when: 'CONFIG_GT64120', if_true: files('gt64120.c')) +pci_ss.add(when: 'CONFIG_PCI_EXPRESS_ASPEED', if_true: files('aspeed_pcie.c')) pci_ss.add(when: 'CONFIG_PCI_EXPRESS_DESIGNWARE', if_true: files('designware.c')) pci_ss.add(when: 'CONFIG_PCI_EXPRESS_GENERIC_BRIDGE', if_true: files('gpex.c')) pci_ss.add(when: ['CONFIG_PCI_EXPRESS_GENERIC_BRIDGE', 'CONFIG_ACPI'], if_true: files('gpex-acpi.c')) diff --git a/hw/pci-host/raven.c b/hw/pci-host/raven.c index f8c0be5d21c35..eacffc86d849a 100644 --- a/hw/pci-host/raven.c +++ b/hw/pci-host/raven.c @@ -72,37 +72,29 @@ struct PRePPCIState { #define PCI_IO_BASE_ADDR 0x80000000 /* Physical address on main bus */ -static inline uint32_t raven_pci_io_config(hwaddr addr) +static inline uint32_t raven_idsel_to_addr(hwaddr addr) { - int i; - - for (i = 0; i < 11; i++) { - if ((addr & (1 << (11 + i))) != 0) { - break; - } - } - return (addr & 0x7ff) | (i << 11); + return (ctz16(addr >> 11) << 11) | (addr & 0x7ff); } -static void raven_pci_io_write(void *opaque, hwaddr addr, - uint64_t val, unsigned int size) +static void raven_mmcfg_write(void *opaque, hwaddr addr, uint64_t val, + unsigned int size) { - PREPPCIState *s = opaque; - PCIHostState *phb = PCI_HOST_BRIDGE(s); - pci_data_write(phb->bus, raven_pci_io_config(addr), val, size); + PCIBus *hbus = opaque; + + pci_data_write(hbus, raven_idsel_to_addr(addr), val, size); } -static uint64_t raven_pci_io_read(void *opaque, hwaddr addr, - unsigned int size) +static uint64_t raven_mmcfg_read(void *opaque, hwaddr addr, unsigned int size) { - PREPPCIState *s = opaque; - PCIHostState *phb = PCI_HOST_BRIDGE(s); - return pci_data_read(phb->bus, raven_pci_io_config(addr), size); + PCIBus *hbus = opaque; + + return pci_data_read(hbus, raven_idsel_to_addr(addr), size); } -static const MemoryRegionOps raven_pci_io_ops = { - .read = raven_pci_io_read, - .write = raven_pci_io_write, +static const MemoryRegionOps raven_mmcfg_ops = { + .read = raven_mmcfg_read, + .write = raven_mmcfg_write, .endianness = DEVICE_LITTLE_ENDIAN, }; @@ -260,8 +252,8 @@ static void raven_pcihost_realizefn(DeviceState *d, Error **errp) "pci-conf-data", 4); memory_region_add_subregion(&s->pci_io, 0xcfc, &h->data_mem); - memory_region_init_io(&h->mmcfg, OBJECT(s), &raven_pci_io_ops, s, - "pciio", 0x00400000); + memory_region_init_io(&h->mmcfg, OBJECT(h), &raven_mmcfg_ops, h->bus, + "pci-mmcfg", 0x00400000); memory_region_add_subregion(address_space_mem, 0x80800000, &h->mmcfg); memory_region_init_io(&s->pci_intack, OBJECT(s), &raven_intack_ops, s, diff --git a/hw/pci-host/trace-events b/hw/pci-host/trace-events index 0a816b9aa129b..a6fd88c2c4667 100644 --- a/hw/pci-host/trace-events +++ b/hw/pci-host/trace-events @@ -1,5 +1,16 @@ # See docs/devel/tracing.rst for syntax documentation. +# aspeed_pcie.c +aspeed_pcie_rc_intx_set_irq(uint32_t id, int num, int level) "%d: num %d set IRQ leve %d" +aspeed_pcie_rc_msi_notify(uint32_t id, uint64_t addr, uint64_t data) "%d: 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_pcie_rc_msi_set_irq(uint32_t id, uint64_t unm, int level) "%d: num 0x%" PRIx64 " set IRQ level %d" +aspeed_pcie_rc_msi_clear_irq(uint32_t id, int level) "%d: clear IRQ level %d" +aspeed_pcie_cfg_read(uint32_t id, uint64_t addr, uint32_t value) "%d: addr 0x%" PRIx64 " value 0x%" PRIx32 +aspeed_pcie_cfg_write(uint32_t id, uint64_t addr, uint32_t value) "%d: addr 0x%" PRIx64 " value 0x%" PRIx32 +aspeed_pcie_cfg_rw(uint32_t id, const char *dir, uint8_t bus, uint8_t devfn, uint64_t addr, uint64_t data) "%d: %s bus:0x%x devfn:0x%x addr 0x%" PRIx64 " data 0x%" PRIx64 +aspeed_pcie_phy_read(uint32_t id, uint64_t addr, uint32_t value) "%d: addr 0x%" PRIx64 " value 0x%" PRIx32 +aspeed_pcie_phy_write(uint32_t id, uint64_t addr, uint32_t value) "%d: addr 0x%" PRIx64 " value 0x%" PRIx32 + # bonito.c bonito_spciconf_small_access(uint64_t addr, unsigned size) "PCI config address is smaller then 32-bit, addr: 0x%"PRIx64", size: %u" diff --git a/hw/pci/pci.c b/hw/pci/pci.c index c70b5ceebaf1f..acc03fd4707cd 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -926,7 +926,7 @@ void pci_device_save(PCIDevice *s, QEMUFile *f) * This makes us compatible with old devices * which never set or clear this bit. */ s->config[PCI_STATUS] &= ~PCI_STATUS_INTERRUPT; - vmstate_save_state(f, &vmstate_pci_device, s, NULL); + vmstate_save_state(f, &vmstate_pci_device, s, NULL, &error_fatal); /* Restore the interrupt status bit. */ pci_update_irq_status(s); } @@ -934,7 +934,8 @@ void pci_device_save(PCIDevice *s, QEMUFile *f) int pci_device_load(PCIDevice *s, QEMUFile *f) { int ret; - ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id); + ret = vmstate_load_state(f, &vmstate_pci_device, s, s->version_id, + &error_fatal); /* Restore the interrupt status bit. */ pci_update_irq_status(s); return ret; @@ -984,14 +985,15 @@ static int pci_parse_devaddr(const char *addr, int *domp, int *busp, slot = val; - if (funcp != NULL) { - if (*e != '.') + if (funcp != NULL && *e != '\0') { + if (*e != '.') { return -1; - + } p = e + 1; val = strtoul(p, &e, 16); - if (e == p) + if (e == p) { return -1; + } func = val; } @@ -1490,9 +1492,6 @@ void pci_register_bar(PCIDevice *pci_dev, int region_num, : pci_get_bus(pci_dev)->address_space_mem; if (pci_is_vf(pci_dev)) { - PCIDevice *pf = pci_dev->exp.sriov_vf.pf; - assert(!pf || type == pf->exp.sriov_pf.vf_bar_type[region_num]); - r->addr = pci_bar_address(pci_dev, region_num, r->type, r->size); if (r->addr != PCI_BAR_UNMAPPED) { memory_region_add_subregion_overlap(r->address_space, @@ -2054,13 +2053,15 @@ bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, int dom, busnr, devfn; PCIDevice *pci_dev; unsigned slot; + unsigned func; + PCIBus *bus; if (!nd) { return false; } - if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, NULL) < 0) { + if (!devaddr || pci_parse_devaddr(devaddr, &dom, &busnr, &slot, &func) < 0) { error_report("Invalid PCI device address %s for device %s", devaddr, model); exit(1); @@ -2071,7 +2072,7 @@ bool pci_init_nic_in_slot(PCIBus *rootbus, const char *model, exit(1); } - devfn = PCI_DEVFN(slot, 0); + devfn = PCI_DEVFN(slot, func); bus = pci_find_bus_nr(rootbus, busnr); if (!bus) { @@ -2909,6 +2910,19 @@ static void pci_device_get_iommu_bus_devfn(PCIDevice *dev, } } + /* + * When multiple PCI Express Root Buses are defined using pxb-pcie, + * the IOMMU configuration may be specific to each root bus. However, + * pxb-pcie acts as a special root complex whose parent is effectively + * the default root complex(pcie.0). Ensure that we retrieve the + * correct IOMMU ops(if any) in such cases. + */ + if (pci_bus_is_express(iommu_bus) && pci_bus_is_root(iommu_bus)) { + if (parent_bus->iommu_per_bus) { + break; + } + } + iommu_bus = parent_bus; } @@ -2951,7 +2965,7 @@ int pci_iommu_init_iotlb_notifier(PCIDevice *dev, IOMMUNotifier *n, PCIBus *iommu_bus; int devfn; - pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); if (iommu_bus && iommu_bus->iommu_ops->init_iotlb_notifier) { iommu_bus->iommu_ops->init_iotlb_notifier(bus, iommu_bus->iommu_opaque, devfn, n, fn, opaque); @@ -3009,7 +3023,7 @@ int pci_pri_request_page(PCIDevice *dev, uint32_t pasid, bool priv_req, return -EPERM; } - pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); if (iommu_bus && iommu_bus->iommu_ops->pri_request_page) { return iommu_bus->iommu_ops->pri_request_page(bus, iommu_bus->iommu_opaque, @@ -3033,7 +3047,7 @@ int pci_pri_register_notifier(PCIDevice *dev, uint32_t pasid, return -EPERM; } - pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); if (iommu_bus && iommu_bus->iommu_ops->pri_register_notifier) { iommu_bus->iommu_ops->pri_register_notifier(bus, iommu_bus->iommu_opaque, @@ -3050,7 +3064,7 @@ void pci_pri_unregister_notifier(PCIDevice *dev, uint32_t pasid) PCIBus *iommu_bus; int devfn; - pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); if (iommu_bus && iommu_bus->iommu_ops->pri_unregister_notifier) { iommu_bus->iommu_ops->pri_unregister_notifier(bus, iommu_bus->iommu_opaque, @@ -3082,7 +3096,7 @@ ssize_t pci_ats_request_translation(PCIDevice *dev, uint32_t pasid, return -EPERM; } - pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); if (iommu_bus && iommu_bus->iommu_ops->ats_request_translation) { return iommu_bus->iommu_ops->ats_request_translation(bus, iommu_bus->iommu_opaque, @@ -3106,7 +3120,7 @@ int pci_iommu_register_iotlb_notifier(PCIDevice *dev, uint32_t pasid, return -EPERM; } - pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); if (iommu_bus && iommu_bus->iommu_ops->register_iotlb_notifier) { iommu_bus->iommu_ops->register_iotlb_notifier(bus, iommu_bus->iommu_opaque, devfn, @@ -3128,7 +3142,7 @@ int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid, return -EPERM; } - pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, &bus, &devfn); if (iommu_bus && iommu_bus->iommu_ops->unregister_iotlb_notifier) { iommu_bus->iommu_ops->unregister_iotlb_notifier(bus, iommu_bus->iommu_opaque, @@ -3142,11 +3156,9 @@ int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid, int pci_iommu_get_iotlb_info(PCIDevice *dev, uint8_t *addr_width, uint32_t *min_page_size) { - PCIBus *bus; PCIBus *iommu_bus; - int devfn; - pci_device_get_iommu_bus_devfn(dev, &bus, &iommu_bus, &devfn); + pci_device_get_iommu_bus_devfn(dev, &iommu_bus, NULL, NULL); if (iommu_bus && iommu_bus->iommu_ops->get_iotlb_info) { iommu_bus->iommu_ops->get_iotlb_info(iommu_bus->iommu_opaque, addr_width, min_page_size); @@ -3169,6 +3181,24 @@ void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque) bus->iommu_opaque = opaque; } +/* + * Similar to pci_setup_iommu(), but sets iommu_per_bus to true, + * indicating that the IOMMU is specific to this bus. This is used by + * IOMMU implementations that are tied to a specific PCIe root complex. + * + * In QEMU, pxb-pcie behaves as a special root complex whose parent is + * effectively the default root complex (pcie.0). The iommu_per_bus + * is checked in pci_device_get_iommu_bus_devfn() to ensure the correct + * IOMMU ops are returned, avoiding the use of the parent’s IOMMU when + * it's not appropriate. + */ +void pci_setup_iommu_per_bus(PCIBus *bus, const PCIIOMMUOps *ops, + void *opaque) +{ + pci_setup_iommu(bus, ops, opaque); + bus->iommu_per_bus = true; +} + static void pci_dev_get_w64(PCIBus *b, PCIDevice *dev, void *opaque) { Range *range = opaque; diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index eaeb68894e6ec..b302de64191b2 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -1266,6 +1266,14 @@ void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap, dev->exp.pri_cap = offset; } +uint32_t pcie_pri_get_req_alloc(const PCIDevice *dev) +{ + if (!pcie_pri_enabled(dev)) { + return 0; + } + return pci_get_long(dev->config + dev->exp.pri_cap + PCI_PRI_ALLOC_REQ); +} + bool pcie_pri_enabled(const PCIDevice *dev) { if (!pci_is_express(dev) || !dev->exp.pri_cap) { diff --git a/hw/pci/pcie_sriov.c b/hw/pci/pcie_sriov.c index 3ad18744f4a8e..c4f88f097571a 100644 --- a/hw/pci/pcie_sriov.c +++ b/hw/pci/pcie_sriov.c @@ -64,6 +64,27 @@ static void unregister_vfs(PCIDevice *dev) pci_set_word(dev->wmask + dev->exp.sriov_cap + PCI_SRIOV_NUM_VF, 0xffff); } +static void consume_config(PCIDevice *dev) +{ + uint8_t *cfg = dev->config + dev->exp.sriov_cap; + + if (pci_get_word(cfg + PCI_SRIOV_CTRL) & PCI_SRIOV_CTRL_VFE) { + register_vfs(dev); + } else { + uint8_t *wmask = dev->wmask + dev->exp.sriov_cap; + uint16_t num_vfs = pci_get_word(cfg + PCI_SRIOV_NUM_VF); + uint16_t wmask_val = PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI; + + unregister_vfs(dev); + + if (num_vfs <= pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)) { + wmask_val |= PCI_SRIOV_CTRL_VFE; + } + + pci_set_word(wmask + PCI_SRIOV_CTRL, wmask_val); + } +} + static bool pcie_sriov_pf_init_common(PCIDevice *dev, uint16_t offset, uint16_t vf_dev_id, uint16_t init_vfs, uint16_t total_vfs, uint16_t vf_offset, @@ -174,7 +195,9 @@ bool pcie_sriov_pf_init(PCIDevice *dev, uint16_t offset, void pcie_sriov_pf_exit(PCIDevice *dev) { - uint8_t *cfg = dev->config + dev->exp.sriov_cap; + if (dev->exp.sriov_cap == 0) { + return; + } if (dev->exp.sriov_pf.vf_user_created) { uint16_t ven_id = pci_get_word(dev->config + PCI_VENDOR_ID); @@ -190,6 +213,8 @@ void pcie_sriov_pf_exit(PCIDevice *dev) pci_config_set_device_id(dev->exp.sriov_pf.vf[i]->config, vf_dev_id); } } else { + uint8_t *cfg = dev->config + dev->exp.sriov_cap; + unparent_vfs(dev, pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)); } } @@ -221,17 +246,6 @@ void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num, dev->exp.sriov_pf.vf_bar_type[region_num] = type; } -void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num, - MemoryRegion *memory) -{ - uint8_t type; - - assert(dev->exp.sriov_vf.pf); - type = dev->exp.sriov_vf.pf->exp.sriov_pf.vf_bar_type[region_num]; - - return pci_register_bar(dev, region_num, type, memory); -} - static gint compare_vf_devfns(gconstpointer a, gconstpointer b) { return (*(PCIDevice **)a)->devfn - (*(PCIDevice **)b)->devfn; @@ -416,30 +430,13 @@ void pcie_sriov_config_write(PCIDevice *dev, uint32_t address, trace_sriov_config_write(dev->name, PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn), off, val, len); - if (range_covers_byte(off, len, PCI_SRIOV_CTRL)) { - if (val & PCI_SRIOV_CTRL_VFE) { - register_vfs(dev); - } else { - unregister_vfs(dev); - } - } else if (range_covers_byte(off, len, PCI_SRIOV_NUM_VF)) { - uint8_t *cfg = dev->config + sriov_cap; - uint8_t *wmask = dev->wmask + sriov_cap; - uint16_t num_vfs = pci_get_word(cfg + PCI_SRIOV_NUM_VF); - uint16_t wmask_val = PCI_SRIOV_CTRL_MSE | PCI_SRIOV_CTRL_ARI; - - if (num_vfs <= pci_get_word(cfg + PCI_SRIOV_TOTAL_VF)) { - wmask_val |= PCI_SRIOV_CTRL_VFE; - } - - pci_set_word(wmask + PCI_SRIOV_CTRL, wmask_val); - } + consume_config(dev); } void pcie_sriov_pf_post_load(PCIDevice *dev) { if (dev->exp.sriov_cap) { - register_vfs(dev); + consume_config(dev); } } diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index ced6bbc7404e3..7091d72fd88f8 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -44,6 +44,11 @@ config POWERNV select SSI_M25P80 select PNV_SPI +config PPC405 + bool + default y + depends on PPC + config PPC440 bool default y diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 723c97fad2e16..3d69428f31c0d 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -20,6 +20,7 @@ #include "qemu/guest-random.h" #include "exec/target_page.h" #include "qapi/error.h" +#include "cpu-models.h" #include "e500.h" #include "e500-ccsr.h" #include "net/net.h" @@ -942,9 +943,8 @@ void ppce500_init(MachineState *machine) env = &cpu->env; cs = CPU(cpu); - if (env->mmu_model != POWERPC_MMU_BOOKE206) { - error_report("MMU model %i not supported by this machine", - env->mmu_model); + if (!(POWERPC_CPU_GET_CLASS(cpu)->svr & POWERPC_SVR_E500)) { + error_report("This machine needs a CPU from the e500 family"); exit(1); } diff --git a/hw/ppc/meson.build b/hw/ppc/meson.build index 9893f8adebb0e..170b90ae7d05e 100644 --- a/hw/ppc/meson.build +++ b/hw/ppc/meson.build @@ -57,6 +57,8 @@ ppc_ss.add(when: 'CONFIG_POWERNV', if_true: files( 'pnv_n1_chiplet.c', )) # PowerPC 4xx boards +ppc_ss.add(when: 'CONFIG_PPC405', if_true: files( + 'ppe42_machine.c')) ppc_ss.add(when: 'CONFIG_PPC440', if_true: files( 'ppc440_bamboo.c', 'ppc440_uc.c')) diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 4a49e9d1a8650..f0469cdb8b657 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -21,6 +21,7 @@ #include "qemu/osdep.h" #include "qemu/datadir.h" +#include "qemu/log.h" #include "qemu/units.h" #include "qemu/cutils.h" #include "qapi/error.h" @@ -490,6 +491,37 @@ static void pnv_chip_power10_dt_populate(PnvChip *chip, void *fdt) pnv_dt_lpc(chip, fdt, 0, PNV10_LPCM_BASE(chip), PNV10_LPCM_SIZE); } +static void pnv_chip_power11_dt_populate(PnvChip *chip, void *fdt) +{ + static const char compat[] = "ibm,power11-xscom\0ibm,xscom"; + int i; + + pnv_dt_xscom(chip, fdt, 0, + cpu_to_be64(PNV11_XSCOM_BASE(chip)), + cpu_to_be64(PNV11_XSCOM_SIZE), + compat, sizeof(compat)); + + for (i = 0; i < chip->nr_cores; i++) { + PnvCore *pnv_core = chip->cores[i]; + int offset; + + offset = pnv_dt_core(chip, pnv_core, fdt); + + _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", + pa_features_31, sizeof(pa_features_31)))); + + if (pnv_core->big_core) { + i++; /* Big-core groups two QEMU cores */ + } + } + + if (chip->ram_size) { + pnv_dt_memory(fdt, chip->chip_id, chip->ram_start, chip->ram_size); + } + + pnv_dt_lpc(chip, fdt, 0, PNV11_LPCM_BASE(chip), PNV11_LPCM_SIZE); +} + static void pnv_dt_rtc(ISADevice *d, void *fdt, int lpc_off) { uint32_t io_base = d->ioport_id; @@ -822,6 +854,26 @@ static ISABus *pnv_chip_power10_isa_create(PnvChip *chip, Error **errp) return pnv_lpc_isa_create(&chip10->lpc, false, errp); } +static ISABus *pnv_chip_power11_isa_create(PnvChip *chip, Error **errp) +{ + Pnv11Chip *chip11 = PNV11_CHIP(chip); + qemu_irq irq; + + irq = qdev_get_gpio_in(DEVICE(&chip11->psi), PSIHB9_IRQ_LPCHC); + qdev_connect_gpio_out_named(DEVICE(&chip11->lpc), "LPCHC", 0, irq); + + irq = qdev_get_gpio_in(DEVICE(&chip11->psi), PSIHB9_IRQ_LPC_SIRQ0); + qdev_connect_gpio_out_named(DEVICE(&chip11->lpc), "SERIRQ", 0, irq); + irq = qdev_get_gpio_in(DEVICE(&chip11->psi), PSIHB9_IRQ_LPC_SIRQ1); + qdev_connect_gpio_out_named(DEVICE(&chip11->lpc), "SERIRQ", 1, irq); + irq = qdev_get_gpio_in(DEVICE(&chip11->psi), PSIHB9_IRQ_LPC_SIRQ2); + qdev_connect_gpio_out_named(DEVICE(&chip11->lpc), "SERIRQ", 2, irq); + irq = qdev_get_gpio_in(DEVICE(&chip11->psi), PSIHB9_IRQ_LPC_SIRQ3); + qdev_connect_gpio_out_named(DEVICE(&chip11->lpc), "SERIRQ", 3, irq); + + return pnv_lpc_isa_create(&chip11->lpc, false, errp); +} + static ISABus *pnv_isa_create(PnvChip *chip, Error **errp) { return PNV_CHIP_GET_CLASS(chip)->isa_create(chip, errp); @@ -885,6 +937,12 @@ static uint64_t pnv_chip_power10_xscom_core_base(PnvChip *chip, return PNV10_XSCOM_EC_BASE(core_id); } +static uint64_t pnv_chip_power11_xscom_core_base(PnvChip *chip, + uint32_t core_id) +{ + return PNV11_XSCOM_EC_BASE(core_id); +} + static bool pnv_match_cpu(const char *default_type, const char *cpu_type) { PowerPCCPUClass *ppc_default = @@ -914,6 +972,16 @@ static void pnv_chip_power10_pic_print_info(PnvChip *chip, GString *buf) pnv_chip_power9_pic_print_info_child, buf); } +static void pnv_chip_power11_pic_print_info(PnvChip *chip, GString *buf) +{ + Pnv11Chip *chip11 = PNV11_CHIP(chip); + + pnv_xive2_pic_print_info(&chip11->xive, buf); + pnv_psi_pic_print_info(&chip11->psi, buf); + object_child_foreach_recursive(OBJECT(chip), + pnv_chip_power9_pic_print_info_child, buf); +} + /* Always give the first 1GB to chip 0 else we won't boot */ static uint64_t pnv_chip_get_ram_size(PnvMachineState *pnv, int chip_id) { @@ -1421,6 +1489,60 @@ static void pnv_chip_power10_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, xive_tctx_pic_print_info(XIVE_TCTX(pnv_cpu_state(cpu)->intc), buf); } +static void *pnv_chip_power10_intc_get(PnvChip *chip) +{ + return &PNV10_CHIP(chip)->xive; +} + +static void pnv_chip_power11_intc_create(PnvChip *chip, PowerPCCPU *cpu, + Error **errp) +{ + Pnv11Chip *chip11 = PNV11_CHIP(chip); + Error *local_err = NULL; + Object *obj; + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + /* + * The core creates its interrupt presenter but the XIVE2 interrupt + * controller object is initialized afterwards. Hopefully, it's + * only used at runtime. + */ + obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(&chip11->xive), + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + pnv_cpu->intc = obj; +} + +static void pnv_chip_power11_intc_reset(PnvChip *chip, PowerPCCPU *cpu) +{ + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + xive_tctx_reset(XIVE_TCTX(pnv_cpu->intc)); +} + +static void pnv_chip_power11_intc_destroy(PnvChip *chip, PowerPCCPU *cpu) +{ + PnvCPUState *pnv_cpu = pnv_cpu_state(cpu); + + xive_tctx_destroy(XIVE_TCTX(pnv_cpu->intc)); + pnv_cpu->intc = NULL; +} + +static void pnv_chip_power11_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, + GString *buf) +{ + xive_tctx_pic_print_info(XIVE_TCTX(pnv_cpu_state(cpu)->intc), buf); +} + +static void *pnv_chip_power11_intc_get(PnvChip *chip) +{ + return &PNV11_CHIP(chip)->xive; +} + /* * Allowed core identifiers on a POWER8 Processor Chip : * @@ -1451,6 +1573,8 @@ static void pnv_chip_power10_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, #define POWER10_CORE_MASK (0xffffffffffffffull) +#define POWER11_CORE_MASK (0xffffffffffffffull) + static void pnv_chip_power8_instance_init(Object *obj) { Pnv8Chip *chip8 = PNV8_CHIP(obj); @@ -1794,12 +1918,83 @@ static void pnv_chip_power9_pec_realize(PnvChip *chip, Error **errp) } } +static uint64_t pnv_handle_sprd_load(CPUPPCState *env) +{ + PowerPCCPU *cpu = env_archcpu(env); + PnvCore *pc = pnv_cpu_state(cpu)->pnv_core; + uint64_t sprc = env->spr[SPR_POWER_SPRC]; + + if (pc->big_core) { + pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1); + } + + switch (sprc & 0x3e0) { + case 0: /* SCRATCH0-3 */ + case 1: /* SCRATCH4-7 */ + return pc->scratch[(sprc >> 3) & 0x7]; + + case 0x1e0: /* core thread state */ + if (env->excp_model == POWERPC_EXCP_POWER9) { + /* + * Only implement for POWER9 because skiboot uses it to check + * big-core mode. Other bits are unimplemented so we would + * prefer to get unimplemented message on POWER10 if it were + * used anywhere. + */ + if (pc->big_core) { + return PPC_BIT(63); + } else { + return 0; + } + } + /* fallthru */ + + default: + qemu_log_mask(LOG_UNIMP, "mfSPRD: Unimplemented SPRC:0x" + TARGET_FMT_lx"\n", sprc); + break; + } + return 0; +} + +static void pnv_handle_sprd_store(CPUPPCState *env, uint64_t val) +{ + PowerPCCPU *cpu = env_archcpu(env); + uint64_t sprc = env->spr[SPR_POWER_SPRC]; + PnvCore *pc = pnv_cpu_state(cpu)->pnv_core; + int nr; + + if (pc->big_core) { + pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1); + } + + switch (sprc & 0x3e0) { + case 0: /* SCRATCH0-3 */ + case 1: /* SCRATCH4-7 */ + /* + * Log stores to SCRATCH, because some firmware uses these for + * debugging and logging, but they would normally be read by the BMC, + * which is not implemented in QEMU yet. This gives a way to get at the + * information. Could also dump these upon checkstop. + */ + nr = (sprc >> 3) & 0x7; + pc->scratch[nr] = val; + break; + default: + qemu_log_mask(LOG_UNIMP, "mtSPRD: Unimplemented SPRC:0x" + TARGET_FMT_lx"\n", sprc); + break; + } +} + static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) { PnvChipClass *pcc = PNV_CHIP_GET_CLASS(dev); Pnv9Chip *chip9 = PNV9_CHIP(dev); PnvChip *chip = PNV_CHIP(dev); Pnv9Psi *psi9 = &chip9->psi; + PowerPCCPU *cpu; + PowerPCCPUClass *cpu_class; Error *local_err = NULL; int i; @@ -1827,6 +2022,12 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp) return; } + /* Set handlers for Special registers, such as SPRD */ + cpu = chip->cores[0]->threads[0]; + cpu_class = POWERPC_CPU_GET_CLASS(cpu); + cpu_class->load_sprd = pnv_handle_sprd_load; + cpu_class->store_sprd = pnv_handle_sprd_store; + /* XIVE interrupt controller (POWER9) */ object_property_set_int(OBJECT(&chip9->xive), "ic-bar", PNV9_XIVE_IC_BASE(chip), &error_fatal); @@ -2078,6 +2279,8 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) PnvChipClass *pcc = PNV_CHIP_GET_CLASS(dev); PnvChip *chip = PNV_CHIP(dev); Pnv10Chip *chip10 = PNV10_CHIP(dev); + PowerPCCPU *cpu; + PowerPCCPUClass *cpu_class; Error *local_err = NULL; int i; @@ -2105,6 +2308,12 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) return; } + /* Set handlers for Special registers, such as SPRD */ + cpu = chip->cores[0]->threads[0]; + cpu_class = POWERPC_CPU_GET_CLASS(cpu); + cpu_class->load_sprd = pnv_handle_sprd_load; + cpu_class->store_sprd = pnv_handle_sprd_store; + /* XIVE2 interrupt controller (POWER10) */ object_property_set_int(OBJECT(&chip10->xive), "ic-bar", PNV10_XIVE2_IC_BASE(chip), &error_fatal); @@ -2264,6 +2473,302 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp) } } +static void pnv_chip_power11_instance_init(Object *obj) +{ + PnvChip *chip = PNV_CHIP(obj); + Pnv11Chip *chip11 = PNV11_CHIP(obj); + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj); + int i; + + object_initialize_child(obj, "adu", &chip11->adu, TYPE_PNV_ADU); + + /* + * Use Power10 device models for PSI/LPC/OCC/SBE/HOMER as corresponding + * device models for Power11 are same + */ + object_initialize_child(obj, "psi", &chip11->psi, TYPE_PNV10_PSI); + object_initialize_child(obj, "lpc", &chip11->lpc, TYPE_PNV10_LPC); + object_initialize_child(obj, "occ", &chip11->occ, TYPE_PNV10_OCC); + object_initialize_child(obj, "sbe", &chip11->sbe, TYPE_PNV10_SBE); + object_initialize_child(obj, "homer", &chip11->homer, TYPE_PNV10_HOMER); + + object_initialize_child(obj, "xive", &chip11->xive, TYPE_PNV_XIVE2); + object_property_add_alias(obj, "xive-fabric", OBJECT(&chip11->xive), + "xive-fabric"); + object_initialize_child(obj, "chiptod", &chip11->chiptod, + TYPE_PNV11_CHIPTOD); + object_initialize_child(obj, "n1-chiplet", &chip11->n1_chiplet, + TYPE_PNV_N1_CHIPLET); + + chip->num_pecs = pcc->num_pecs; + + for (i = 0; i < chip->num_pecs; i++) { + object_initialize_child(obj, "pec[*]", &chip11->pecs[i], + TYPE_PNV_PHB5_PEC); + } + + for (i = 0; i < pcc->i2c_num_engines; i++) { + object_initialize_child(obj, "i2c[*]", &chip11->i2c[i], TYPE_PNV_I2C); + } + + for (i = 0; i < PNV10_CHIP_MAX_PIB_SPIC; i++) { + object_initialize_child(obj, "pib_spic[*]", &chip11->pib_spic[i], + TYPE_PNV_SPI); + } +} + +static void pnv_chip_power11_quad_realize(Pnv11Chip *chip11, Error **errp) +{ + PnvChip *chip = PNV_CHIP(chip11); + int i; + + chip11->nr_quads = DIV_ROUND_UP(chip->nr_cores, 4); + chip11->quads = g_new0(PnvQuad, chip11->nr_quads); + + for (i = 0; i < chip11->nr_quads; i++) { + PnvQuad *eq = &chip11->quads[i]; + + pnv_chip_quad_realize_one(chip, eq, chip->cores[i * 4], + PNV_QUAD_TYPE_NAME("power11")); + + pnv_xscom_add_subregion(chip, PNV11_XSCOM_EQ_BASE(eq->quad_id), + &eq->xscom_regs); + + pnv_xscom_add_subregion(chip, PNV11_XSCOM_QME_BASE(eq->quad_id), + &eq->xscom_qme_regs); + } +} + +static void pnv_chip_power11_phb_realize(PnvChip *chip, Error **errp) +{ + Pnv11Chip *chip11 = PNV11_CHIP(chip); + int i; + + for (i = 0; i < chip->num_pecs; i++) { + PnvPhb4PecState *pec = &chip11->pecs[i]; + PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec); + uint32_t pec_cplt_base; + uint32_t pec_nest_base; + uint32_t pec_pci_base; + + object_property_set_int(OBJECT(pec), "index", i, &error_fatal); + object_property_set_int(OBJECT(pec), "chip-id", chip->chip_id, + &error_fatal); + object_property_set_link(OBJECT(pec), "chip", OBJECT(chip), + &error_fatal); + if (!qdev_realize(DEVICE(pec), NULL, errp)) { + return; + } + + pec_cplt_base = pecc->xscom_cplt_base(pec); + pec_nest_base = pecc->xscom_nest_base(pec); + pec_pci_base = pecc->xscom_pci_base(pec); + + pnv_xscom_add_subregion(chip, pec_cplt_base, + &pec->nest_pervasive.xscom_ctrl_regs_mr); + pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr); + pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr); + } +} + +static void pnv_chip_power11_realize(DeviceState *dev, Error **errp) +{ + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(dev); + PnvChip *chip = PNV_CHIP(dev); + Pnv11Chip *chip11 = PNV11_CHIP(dev); + PowerPCCPU *cpu; + PowerPCCPUClass *cpu_class; + Error *local_err = NULL; + int i; + + /* XSCOM bridge is first */ + pnv_xscom_init(chip, PNV11_XSCOM_SIZE, PNV11_XSCOM_BASE(chip)); + + pcc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Set handlers for Special registers, such as SPRD */ + cpu = chip->cores[0]->threads[0]; + cpu_class = POWERPC_CPU_GET_CLASS(cpu); + cpu_class->load_sprd = pnv_handle_sprd_load; + cpu_class->store_sprd = pnv_handle_sprd_store; + + /* ADU */ + object_property_set_link(OBJECT(&chip11->adu), "lpc", OBJECT(&chip11->lpc), + &error_abort); + if (!qdev_realize(DEVICE(&chip11->adu), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV11_XSCOM_ADU_BASE, + &chip11->adu.xscom_regs); + + pnv_chip_power11_quad_realize(chip11, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* XIVE2 interrupt controller */ + object_property_set_int(OBJECT(&chip11->xive), "ic-bar", + PNV11_XIVE2_IC_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip11->xive), "esb-bar", + PNV11_XIVE2_ESB_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip11->xive), "end-bar", + PNV11_XIVE2_END_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip11->xive), "nvpg-bar", + PNV11_XIVE2_NVPG_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip11->xive), "nvc-bar", + PNV11_XIVE2_NVC_BASE(chip), &error_fatal); + object_property_set_int(OBJECT(&chip11->xive), "tm-bar", + PNV11_XIVE2_TM_BASE(chip), &error_fatal); + object_property_set_link(OBJECT(&chip11->xive), "chip", OBJECT(chip), + &error_abort); + if (!sysbus_realize(SYS_BUS_DEVICE(&chip11->xive), errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV11_XSCOM_XIVE2_BASE, + &chip11->xive.xscom_regs); + + /* Processor Service Interface (PSI) Host Bridge */ + object_property_set_int(OBJECT(&chip11->psi), "bar", + PNV11_PSIHB_BASE(chip), &error_fatal); + /* PSI can be configured to use 64k ESB pages on Power11 */ + object_property_set_int(OBJECT(&chip11->psi), "shift", XIVE_ESB_64K, + &error_fatal); + if (!qdev_realize(DEVICE(&chip11->psi), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV11_XSCOM_PSIHB_BASE, + &PNV_PSI(&chip11->psi)->xscom_regs); + + /* LPC */ + if (!qdev_realize(DEVICE(&chip11->lpc), NULL, errp)) { + return; + } + memory_region_add_subregion(get_system_memory(), PNV11_LPCM_BASE(chip), + &chip11->lpc.xscom_regs); + + chip->fw_mr = &chip11->lpc.isa_fw; + chip->dt_isa_nodename = g_strdup_printf("/lpcm-opb@%" PRIx64 "/lpc@0", + (uint64_t) PNV11_LPCM_BASE(chip)); + + /* ChipTOD */ + object_property_set_bool(OBJECT(&chip11->chiptod), "primary", + chip->chip_id == 0, &error_abort); + object_property_set_bool(OBJECT(&chip11->chiptod), "secondary", + chip->chip_id == 1, &error_abort); + object_property_set_link(OBJECT(&chip11->chiptod), "chip", OBJECT(chip), + &error_abort); + if (!qdev_realize(DEVICE(&chip11->chiptod), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV11_XSCOM_CHIPTOD_BASE, + &chip11->chiptod.xscom_regs); + + /* HOMER (must be created before OCC) */ + object_property_set_link(OBJECT(&chip11->homer), "chip", OBJECT(chip), + &error_abort); + if (!qdev_realize(DEVICE(&chip11->homer), NULL, errp)) { + return; + } + /* Homer Xscom region */ + pnv_xscom_add_subregion(chip, PNV11_XSCOM_PBA_BASE, + &chip11->homer.pba_regs); + /* Homer RAM region */ + memory_region_add_subregion(get_system_memory(), chip11->homer.base, + &chip11->homer.mem); + + /* Create the simplified OCC model */ + object_property_set_link(OBJECT(&chip11->occ), "homer", + OBJECT(&chip11->homer), &error_abort); + if (!qdev_realize(DEVICE(&chip11->occ), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV11_XSCOM_OCC_BASE, + &chip11->occ.xscom_regs); + qdev_connect_gpio_out(DEVICE(&chip11->occ), 0, qdev_get_gpio_in( + DEVICE(&chip11->psi), PSIHB9_IRQ_OCC)); + + /* OCC SRAM model */ + memory_region_add_subregion(get_system_memory(), + PNV11_OCC_SENSOR_BASE(chip), + &chip11->occ.sram_regs); + + /* SBE */ + if (!qdev_realize(DEVICE(&chip11->sbe), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV11_XSCOM_SBE_CTRL_BASE, + &chip11->sbe.xscom_ctrl_regs); + pnv_xscom_add_subregion(chip, PNV11_XSCOM_SBE_MBOX_BASE, + &chip11->sbe.xscom_mbox_regs); + qdev_connect_gpio_out(DEVICE(&chip11->sbe), 0, qdev_get_gpio_in( + DEVICE(&chip11->psi), PSIHB9_IRQ_PSU)); + + /* N1 chiplet */ + if (!qdev_realize(DEVICE(&chip11->n1_chiplet), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV11_XSCOM_N1_CHIPLET_CTRL_REGS_BASE, + &chip11->n1_chiplet.nest_pervasive.xscom_ctrl_regs_mr); + + pnv_xscom_add_subregion(chip, PNV11_XSCOM_N1_PB_SCOM_EQ_BASE, + &chip11->n1_chiplet.xscom_pb_eq_mr); + + pnv_xscom_add_subregion(chip, PNV11_XSCOM_N1_PB_SCOM_ES_BASE, + &chip11->n1_chiplet.xscom_pb_es_mr); + + /* PHBs */ + pnv_chip_power11_phb_realize(chip, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* + * I2C + */ + for (i = 0; i < pcc->i2c_num_engines; i++) { + Object *obj = OBJECT(&chip11->i2c[i]); + + object_property_set_int(obj, "engine", i + 1, &error_fatal); + object_property_set_int(obj, "num-busses", + pcc->i2c_ports_per_engine[i], + &error_fatal); + object_property_set_link(obj, "chip", OBJECT(chip), &error_abort); + if (!qdev_realize(DEVICE(obj), NULL, errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV11_XSCOM_I2CM_BASE + + (chip11->i2c[i].engine - 1) * + PNV11_XSCOM_I2CM_SIZE, + &chip11->i2c[i].xscom_regs); + qdev_connect_gpio_out(DEVICE(&chip11->i2c[i]), 0, + qdev_get_gpio_in(DEVICE(&chip11->psi), + PSIHB9_IRQ_SBE_I2C)); + } + /* PIB SPI Controller */ + for (i = 0; i < PNV10_CHIP_MAX_PIB_SPIC; i++) { + object_property_set_int(OBJECT(&chip11->pib_spic[i]), "spic_num", + i, &error_fatal); + /* pib_spic[2] connected to 25csm04 which implements 1 byte transfer */ + object_property_set_int(OBJECT(&chip11->pib_spic[i]), "transfer_len", + (i == 2) ? 1 : 4, &error_fatal); + object_property_set_int(OBJECT(&chip11->pib_spic[i]), "chip-id", + chip->chip_id, &error_fatal); + if (!sysbus_realize(SYS_BUS_DEVICE(OBJECT + (&chip11->pib_spic[i])), errp)) { + return; + } + pnv_xscom_add_subregion(chip, PNV11_XSCOM_PIB_SPIC_BASE + + i * PNV11_XSCOM_PIB_SPIC_SIZE, + &chip11->pib_spic[i].xscom_spic_regs); + } +} + static void pnv_rainier_i2c_init(PnvMachineState *pnv) { int i; @@ -2315,6 +2820,7 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, const void *data) k->intc_reset = pnv_chip_power10_intc_reset; k->intc_destroy = pnv_chip_power10_intc_destroy; k->intc_print_info = pnv_chip_power10_intc_print_info; + k->intc_get = pnv_chip_power10_intc_get; k->isa_create = pnv_chip_power10_isa_create; k->dt_populate = pnv_chip_power10_dt_populate; k->pic_print_info = pnv_chip_power10_pic_print_info; @@ -2329,6 +2835,40 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, const void *data) &k->parent_realize); } +static uint32_t pnv_chip_power11_xscom_pcba(PnvChip *chip, uint64_t addr) +{ + addr &= (PNV11_XSCOM_SIZE - 1); + return addr >> 3; +} + +static void pnv_chip_power11_class_init(ObjectClass *klass, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvChipClass *k = PNV_CHIP_CLASS(klass); + static const int i2c_ports_per_engine[PNV10_CHIP_MAX_I2C] = {14, 14, 2, 16}; + + k->chip_cfam_id = 0x220da04980000000ull; /* P11 DD2.0 (with NX) */ + k->cores_mask = POWER11_CORE_MASK; + k->get_pir_tir = pnv_get_pir_tir_p10; + k->intc_create = pnv_chip_power11_intc_create; + k->intc_reset = pnv_chip_power11_intc_reset; + k->intc_destroy = pnv_chip_power11_intc_destroy; + k->intc_print_info = pnv_chip_power11_intc_print_info; + k->intc_get = pnv_chip_power11_intc_get; + k->isa_create = pnv_chip_power11_isa_create; + k->dt_populate = pnv_chip_power11_dt_populate; + k->pic_print_info = pnv_chip_power11_pic_print_info; + k->xscom_core_base = pnv_chip_power11_xscom_core_base; + k->xscom_pcba = pnv_chip_power11_xscom_pcba; + dc->desc = "PowerNV Chip Power11"; + k->num_pecs = PNV10_CHIP_MAX_PEC; + k->i2c_num_engines = PNV10_CHIP_MAX_I2C; + k->i2c_ports_per_engine = i2c_ports_per_engine; + + device_class_set_parent_realize(dc, pnv_chip_power11_realize, + &k->parent_realize); +} + static void pnv_chip_core_sanitize(PnvMachineState *pnv, PnvChip *chip, Error **errp) { @@ -2608,65 +3148,88 @@ static void pnv_pic_print_info(InterruptStatsProvider *obj, GString *buf) } } -static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, - XiveTCTXMatch *match) +static bool pnv_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, + XiveTCTXMatch *match) { PnvMachineState *pnv = PNV_MACHINE(xfb); - int total_count = 0; int i; for (i = 0; i < pnv->num_chips; i++) { Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); XivePresenter *xptr = XIVE_PRESENTER(&chip9->xive); XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); - int count; - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, - cam_ignore, priority, logic_serv, match); + xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, + cam_ignore, priority, logic_serv, match); + } - if (count < 0) { - return count; - } + return !!match->count; +} + +static bool pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, + XiveTCTXMatch *match) +{ + PnvMachineState *pnv = PNV_MACHINE(xfb); + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); + XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); - total_count += count; + xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, + cam_ignore, priority, logic_serv, match); } - return total_count; + return !!match->count; } -static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, +static int pnv10_xive_broadcast(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, - XiveTCTXMatch *match) + bool crowd, bool cam_ignore, + uint8_t priority) { PnvMachineState *pnv = PNV_MACHINE(xfb); - int total_count = 0; int i; for (i = 0; i < pnv->num_chips; i++) { Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); - int count; - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, - cam_ignore, priority, logic_serv, match); + xpc->broadcast(xptr, nvt_blk, nvt_idx, crowd, cam_ignore, priority); + } + return 0; +} - if (count < 0) { - return count; - } +static bool pnv11_xive_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, + XiveTCTXMatch *match) +{ + PnvMachineState *pnv = PNV_MACHINE(xfb); + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv11Chip *chip11 = PNV11_CHIP(pnv->chips[i]); + XivePresenter *xptr = XIVE_PRESENTER(&chip11->xive); + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); - total_count += count; + xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, + cam_ignore, priority, logic_serv, match); } - return total_count; + return !!match->count; } -static int pnv10_xive_broadcast(XiveFabric *xfb, +static int pnv11_xive_broadcast(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, bool crowd, bool cam_ignore, uint8_t priority) @@ -2675,8 +3238,8 @@ static int pnv10_xive_broadcast(XiveFabric *xfb, int i; for (i = 0; i < pnv->num_chips; i++) { - Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); - XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); + Pnv11Chip *chip11 = PNV11_CHIP(pnv->chips[i]); + XivePresenter *xptr = XIVE_PRESENTER(&chip11->xive); XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); xpc->broadcast(xptr, nvt_blk, nvt_idx, crowd, cam_ignore, priority); @@ -2858,6 +3421,46 @@ static void pnv_machine_p10_rainier_class_init(ObjectClass *oc, pmc->i2c_init = pnv_rainier_i2c_init; } +static void pnv_machine_power11_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc); + XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc); + static const char compat[] = "qemu,powernv11\0ibm,powernv"; + + static GlobalProperty phb_compat[] = { + { TYPE_PNV_PHB, "version", "5" }, + { TYPE_PNV_PHB_ROOT_PORT, "version", "5" }, + }; + + compat_props_add(mc->compat_props, phb_compat, G_N_ELEMENTS(phb_compat)); + + pmc->compat = compat; + pmc->compat_size = sizeof(compat); + pmc->max_smt_threads = 4; + pmc->has_lpar_per_thread = true; + pmc->quirk_tb_big_core = true; + pmc->dt_power_mgt = pnv_dt_power_mgt; + + xfc->match_nvt = pnv11_xive_match_nvt; + xfc->broadcast = pnv11_xive_broadcast; + + mc->desc = "IBM PowerNV (Non-Virtualized) Power11"; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("power11_v2.0"); + + object_class_property_add_bool(oc, "big-core", + pnv_machine_get_big_core, + pnv_machine_set_big_core); + object_class_property_set_description(oc, "big-core", + "Use big-core (aka fused-core) mode"); + + object_class_property_add_bool(oc, "lpar-per-core", + pnv_machine_get_lpar_per_core, + pnv_machine_set_lpar_per_core); + object_class_property_set_description(oc, "lpar-per-core", + "Use 1 LPAR per core mode"); +} + static void pnv_cpu_do_nmi_on_cpu(CPUState *cs, run_on_cpu_data arg) { CPUPPCState *env = cpu_env(cs); @@ -2963,7 +3566,23 @@ static void pnv_machine_class_init(ObjectClass *oc, const void *data) .parent = TYPE_PNV10_CHIP, \ } +#define DEFINE_PNV11_CHIP_TYPE(type, class_initfn) \ + { \ + .name = type, \ + .class_init = class_initfn, \ + .parent = TYPE_PNV11_CHIP, \ + } + static const TypeInfo types[] = { + { + .name = MACHINE_TYPE_NAME("powernv11"), + .parent = TYPE_PNV_MACHINE, + .class_init = pnv_machine_power11_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_XIVE_FABRIC }, + { }, + }, + }, { .name = MACHINE_TYPE_NAME("powernv10-rainier"), .parent = MACHINE_TYPE_NAME("powernv10"), @@ -3018,6 +3637,17 @@ static const TypeInfo types[] = { .abstract = true, }, + /* + * P11 chip and variants + */ + { + .name = TYPE_PNV11_CHIP, + .parent = TYPE_PNV_CHIP, + .instance_init = pnv_chip_power11_instance_init, + .instance_size = sizeof(Pnv11Chip), + }, + DEFINE_PNV11_CHIP_TYPE(TYPE_PNV_CHIP_POWER11, pnv_chip_power11_class_init), + /* * P10 chip and variants */ diff --git a/hw/ppc/pnv_chiptod.c b/hw/ppc/pnv_chiptod.c index b9e9c7ba3dbbe..f887a18cde8d1 100644 --- a/hw/ppc/pnv_chiptod.c +++ b/hw/ppc/pnv_chiptod.c @@ -210,6 +210,22 @@ static void chiptod_power10_broadcast_ttype(PnvChipTOD *sender, } } +static void chiptod_power11_broadcast_ttype(PnvChipTOD *sender, + uint32_t trigger) +{ + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + int i; + + for (i = 0; i < pnv->num_chips; i++) { + Pnv11Chip *chip11 = PNV11_CHIP(pnv->chips[i]); + PnvChipTOD *chiptod = &chip11->chiptod; + + if (chiptod != sender) { + chiptod_receive_ttype(chiptod, trigger); + } + } +} + static PnvCore *pnv_chip_get_core_by_xscom_base(PnvChip *chip, uint32_t xscom_base) { @@ -283,6 +299,12 @@ static PnvCore *chiptod_power10_tx_ttype_target(PnvChipTOD *chiptod, } } +static PnvCore *chiptod_power11_tx_ttype_target(PnvChipTOD *chiptod, + uint64_t val) +{ + return chiptod_power10_tx_ttype_target(chiptod, val); +} + static void pnv_chiptod_xscom_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { @@ -520,6 +542,42 @@ static const TypeInfo pnv_chiptod_power10_type_info = { } }; +static int pnv_chiptod_power11_dt_xscom(PnvXScomInterface *dev, void *fdt, + int xscom_offset) +{ + const char compat[] = "ibm,power-chiptod\0ibm,power11-chiptod"; + + return pnv_chiptod_dt_xscom(dev, fdt, xscom_offset, compat, sizeof(compat)); +} + +static void pnv_chiptod_power11_class_init(ObjectClass *klass, const void *data) +{ + PnvChipTODClass *pctc = PNV_CHIPTOD_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + + dc->desc = "PowerNV ChipTOD Controller (Power11)"; + device_class_set_props(dc, pnv_chiptod_properties); + + xdc->dt_xscom = pnv_chiptod_power11_dt_xscom; + + pctc->broadcast_ttype = chiptod_power11_broadcast_ttype; + pctc->tx_ttype_target = chiptod_power11_tx_ttype_target; + + pctc->xscom_size = PNV_XSCOM_CHIPTOD_SIZE; +} + +static const TypeInfo pnv_chiptod_power11_type_info = { + .name = TYPE_PNV11_CHIPTOD, + .parent = TYPE_PNV_CHIPTOD, + .instance_size = sizeof(PnvChipTOD), + .class_init = pnv_chiptod_power11_class_init, + .interfaces = (const InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + static void pnv_chiptod_reset(void *dev) { PnvChipTOD *chiptod = PNV_CHIPTOD(dev); @@ -579,6 +637,7 @@ static void pnv_chiptod_register_types(void) type_register_static(&pnv_chiptod_type_info); type_register_static(&pnv_chiptod_power9_type_info); type_register_static(&pnv_chiptod_power10_type_info); + type_register_static(&pnv_chiptod_power11_type_info); } type_init(pnv_chiptod_register_types); diff --git a/hw/ppc/pnv_core.c b/hw/ppc/pnv_core.c index 08c20224b97d5..fb2dfc7ba2124 100644 --- a/hw/ppc/pnv_core.c +++ b/hw/ppc/pnv_core.c @@ -473,6 +473,11 @@ static void pnv_core_power10_class_init(ObjectClass *oc, const void *data) pcc->xscom_size = PNV10_XSCOM_EC_SIZE; } +static void pnv_core_power11_class_init(ObjectClass *oc, const void *data) +{ + pnv_core_power10_class_init(oc, data); +} + static void pnv_core_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -504,6 +509,7 @@ static const TypeInfo pnv_core_infos[] = { DEFINE_PNV_CORE_TYPE(power8, "power8nvl_v1.0"), DEFINE_PNV_CORE_TYPE(power9, "power9_v2.2"), DEFINE_PNV_CORE_TYPE(power10, "power10_v2.0"), + DEFINE_PNV_CORE_TYPE(power11, "power11_v2.0"), }; DEFINE_TYPES(pnv_core_infos) @@ -725,6 +731,12 @@ static void pnv_quad_power10_class_init(ObjectClass *oc, const void *data) pqc->xscom_qme_size = PNV10_XSCOM_QME_SIZE; } +static void pnv_quad_power11_class_init(ObjectClass *oc, const void *data) +{ + /* Power11 quad is similar to Power10 quad */ + pnv_quad_power10_class_init(oc, data); +} + static void pnv_quad_class_init(ObjectClass *oc, const void *data) { DeviceClass *dc = DEVICE_CLASS(oc); @@ -752,6 +764,11 @@ static const TypeInfo pnv_quad_infos[] = { .name = PNV_QUAD_TYPE_NAME("power10"), .class_init = pnv_quad_power10_class_init, }, + { + .parent = TYPE_PNV_QUAD, + .name = PNV_QUAD_TYPE_NAME("power11"), + .class_init = pnv_quad_power11_class_init, + }, }; DEFINE_TYPES(pnv_quad_infos); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index 43d0d0e7553d9..3e436c704139e 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -190,6 +190,7 @@ static void ppc970_set_irq(void *opaque, int pin, int level) if (level) { trace_ppc_irq_cpu("stop"); cs->halted = 1; + cpu_exit(cs); } else { trace_ppc_irq_cpu("restart"); cs->halted = 0; @@ -386,6 +387,7 @@ static void ppc40x_set_irq(void *opaque, int pin, int level) if (level) { trace_ppc_irq_cpu("stop"); cs->halted = 1; + cpu_exit(cs); } else { trace_ppc_irq_cpu("restart"); cs->halted = 0; diff --git a/hw/ppc/ppc_booke.c b/hw/ppc/ppc_booke.c index 3872ae2822256..13403a56b1a48 100644 --- a/hw/ppc/ppc_booke.c +++ b/hw/ppc/ppc_booke.c @@ -352,7 +352,12 @@ void ppc_booke_timers_init(PowerPCCPU *cpu, uint32_t freq, uint32_t flags) booke_timer = g_new0(booke_timer_t, 1); cpu->env.tb_env = tb_env; - tb_env->flags = flags | PPC_TIMER_BOOKE | PPC_DECR_ZERO_TRIGGERED; + if (flags & PPC_TIMER_PPE) { + /* PPE's use a modified version of the booke behavior */ + tb_env->flags = flags | PPC_DECR_UNDERFLOW_TRIGGERED; + } else { + tb_env->flags = flags | PPC_TIMER_BOOKE | PPC_DECR_ZERO_TRIGGERED; + } tb_env->tb_freq = freq; tb_env->decr_freq = freq; diff --git a/hw/ppc/ppce500_spin.c b/hw/ppc/ppce500_spin.c index 2310f62a91e4a..bc70e50e926eb 100644 --- a/hw/ppc/ppce500_spin.c +++ b/hw/ppc/ppce500_spin.c @@ -99,8 +99,7 @@ static void spin_kick(CPUState *cs, run_on_cpu_data data) cs->halted = 0; cs->exception_index = -1; - cs->stopped = false; - qemu_cpu_kick(cs); + cpu_resume(cs); } static void spin_write(void *opaque, hwaddr addr, uint64_t value, diff --git a/hw/ppc/ppe42_machine.c b/hw/ppc/ppe42_machine.c new file mode 100644 index 0000000000000..f14a91b4e4dbd --- /dev/null +++ b/hw/ppc/ppe42_machine.c @@ -0,0 +1,101 @@ +/* + * Test Machine for the IBM PPE42 processor + * + * Copyright (c) 2025, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/error-report.h" +#include "system/address-spaces.h" +#include "hw/boards.h" +#include "hw/ppc/ppc.h" +#include "system/system.h" +#include "system/reset.h" +#include "system/kvm.h" +#include "qapi/error.h" + +#define TYPE_PPE42_MACHINE MACHINE_TYPE_NAME("ppe42_machine") +typedef MachineClass Ppe42MachineClass; +typedef struct Ppe42MachineState Ppe42MachineState; +DECLARE_OBJ_CHECKERS(Ppe42MachineState, Ppe42MachineClass, + PPE42_MACHINE, TYPE_PPE42_MACHINE) + +struct Ppe42MachineState { + MachineState parent_obj; + + PowerPCCPU cpu; +}; + +static void main_cpu_reset(void *opaque) +{ + PowerPCCPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} + +static void ppe42_machine_init(MachineState *machine) +{ + Ppe42MachineState *pms = PPE42_MACHINE(machine); + PowerPCCPU *cpu = &pms->cpu; + + if (kvm_enabled()) { + error_report("machine %s does not support the KVM accelerator", + MACHINE_GET_CLASS(machine)->name); + exit(EXIT_FAILURE); + } + if (machine->ram_size > 512 * KiB) { + error_report("RAM size more than 512 KiB is not supported"); + exit(1); + } + + /* init CPU */ + object_initialize_child(OBJECT(pms), "cpu", cpu, machine->cpu_type); + if (!qdev_realize(DEVICE(cpu), NULL, &error_fatal)) { + return; + } + + qemu_register_reset(main_cpu_reset, cpu); + + /* This sets the decrementer timebase */ + ppc_booke_timers_init(cpu, 37500000, PPC_TIMER_PPE); + + /* RAM */ + memory_region_add_subregion(get_system_memory(), 0xfff80000, machine->ram); +} + + +static void ppe42_machine_class_init(ObjectClass *oc, const void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + static const char * const valid_cpu_types[] = { + POWERPC_CPU_TYPE_NAME("PPE42"), + POWERPC_CPU_TYPE_NAME("PPE42X"), + POWERPC_CPU_TYPE_NAME("PPE42XM"), + NULL, + }; + + mc->desc = "PPE42 Test Machine"; + mc->init = ppe42_machine_init; + mc->default_cpu_type = POWERPC_CPU_TYPE_NAME("PPE42XM"); + mc->valid_cpu_types = valid_cpu_types; + mc->default_ram_id = "ram"; + mc->default_ram_size = 512 * KiB; +} + +static const TypeInfo ppe42_machine_info = { + .name = TYPE_PPE42_MACHINE, + .parent = TYPE_MACHINE, + .instance_size = sizeof(Ppe42MachineState), + .class_init = ppe42_machine_class_init, + .class_size = sizeof(Ppe42MachineClass), +}; + +static void ppe42_machine_register_types(void) +{ + type_register_static(&ppe42_machine_info); +} + +type_init(ppe42_machine_register_types); diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index 982e40e53e1a6..c730cb3429e66 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -308,6 +308,13 @@ static void ibm_40p_init(MachineState *machine) sysbus_connect_irq(pcihost, 0, qdev_get_gpio_in(i82378_dev, 15)); isa_bus = ISA_BUS(qdev_get_child_bus(i82378_dev, "isa.0")); + /* system control ports */ + isa_dev = isa_new("prep-systemio"); + dev = DEVICE(isa_dev); + qdev_prop_set_uint32(dev, "ibm-planar-id", 0xfc); + qdev_prop_set_uint32(dev, "equipment", 0xc0); + isa_realize_and_unref(isa_dev, isa_bus, &error_fatal); + /* Memory controller */ isa_dev = isa_new("rs6000-mc"); dev = DEVICE(isa_dev); @@ -333,7 +340,6 @@ static void ibm_40p_init(MachineState *machine) dev = DEVICE(isa_dev); qdev_prop_set_uint32(dev, "iobase", 0x830); qdev_prop_set_uint32(dev, "irq", 10); - if (machine->audiodev) { qdev_prop_set_string(dev, "audiodev", machine->audiodev); } @@ -344,14 +350,7 @@ static void ibm_40p_init(MachineState *machine) qdev_prop_set_uint32(dev, "config", 12); isa_realize_and_unref(isa_dev, isa_bus, &error_fatal); - isa_dev = isa_new("prep-systemio"); - dev = DEVICE(isa_dev); - qdev_prop_set_uint32(dev, "ibm-planar-id", 0xfc); - qdev_prop_set_uint32(dev, "equipment", 0xc0); - isa_realize_and_unref(isa_dev, isa_bus, &error_fatal); - - dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(1, 0), - "lsi53c810")); + dev = DEVICE(pci_create_simple(pci_bus, PCI_DEVFN(1, 0), "lsi53c810")); lsi53c8xx_handle_legacy_cmdline(dev); qdev_connect_gpio_out(dev, 0, qdev_get_gpio_in(i82378_dev, 13)); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 40f53ad7b3444..e0a2e5a984d64 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -77,7 +77,6 @@ #include "hw/virtio/virtio-scsi.h" #include "hw/virtio/vhost-scsi-common.h" -#include "system/ram_addr.h" #include "system/confidential-guest-support.h" #include "hw/usb.h" #include "qemu/config-file.h" @@ -907,6 +906,7 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) int rtas; GString *hypertas = g_string_sized_new(256); GString *qemu_hypertas = g_string_sized_new(256); + uint64_t max_device_addr = 0; uint32_t lrdr_capacity[] = { 0, 0, @@ -917,13 +917,15 @@ static void spapr_dt_rtas(SpaprMachineState *spapr, void *fdt) /* Do we have device memory? */ if (MACHINE(spapr)->device_memory) { - uint64_t max_device_addr = MACHINE(spapr)->device_memory->base + + max_device_addr = MACHINE(spapr)->device_memory->base + memory_region_size(&MACHINE(spapr)->device_memory->mr); - - lrdr_capacity[0] = cpu_to_be32(max_device_addr >> 32); - lrdr_capacity[1] = cpu_to_be32(max_device_addr & 0xffffffff); + } else if (ms->ram_size == ms->maxram_size) { + max_device_addr = ms->ram_size; } + lrdr_capacity[0] = cpu_to_be32(max_device_addr >> 32); + lrdr_capacity[1] = cpu_to_be32(max_device_addr & 0xffffffff); + _FDT(rtas = fdt_add_subnode(fdt, 0, "rtas")); /* hypertas */ @@ -2815,7 +2817,7 @@ static void spapr_machine_init(MachineState *machine) int i; MemoryRegion *sysmem = get_system_memory(); long load_limit, fw_size; - Error *resize_hpt_err = NULL; + Error *errp = NULL; NICInfo *nd; if (!filename) { @@ -2843,7 +2845,7 @@ static void spapr_machine_init(MachineState *machine) /* Determine capabilities to run with */ spapr_caps_init(spapr); - kvmppc_check_papr_resize_hpt(&resize_hpt_err); + kvmppc_check_papr_resize_hpt(&errp); if (spapr->resize_hpt == SPAPR_RESIZE_HPT_DEFAULT) { /* * If the user explicitly requested a mode we should either @@ -2851,10 +2853,10 @@ static void spapr_machine_init(MachineState *machine) * it's not set explicitly, we reset our mode to something * that works */ - if (resize_hpt_err) { + if (errp) { spapr->resize_hpt = SPAPR_RESIZE_HPT_DISABLED; - error_free(resize_hpt_err); - resize_hpt_err = NULL; + error_free(errp); + errp = NULL; } else { spapr->resize_hpt = smc->resize_hpt_default; } @@ -2862,14 +2864,14 @@ static void spapr_machine_init(MachineState *machine) assert(spapr->resize_hpt != SPAPR_RESIZE_HPT_DEFAULT); - if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && resize_hpt_err) { + if ((spapr->resize_hpt != SPAPR_RESIZE_HPT_DISABLED) && errp) { /* * User requested HPT resize, but this host can't supply it. Bail out */ - error_report_err(resize_hpt_err); + error_report_err(errp); exit(1); } - error_free(resize_hpt_err); + error_free(errp); spapr->rma_size = spapr_rma_size(spapr, &error_fatal); @@ -4468,21 +4470,14 @@ static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf) /* * This is a XIVE only operation */ -static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match) +static bool spapr_match_nvt(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match) { SpaprMachineState *spapr = SPAPR_MACHINE(xfb); XivePresenter *xptr = XIVE_PRESENTER(spapr->active_intc); XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); - int count; - - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore, - priority, logic_serv, match); - if (count < 0) { - return count; - } /* * When we implement the save and restore of the thread interrupt @@ -4493,12 +4488,14 @@ static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, * Until this is done, the sPAPR machine should find at least one * matching context always. */ - if (count == 0) { + if (!xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore, + priority, logic_serv, match)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVT %x/%x is not dispatched\n", nvt_blk, nvt_idx); + return false; } - return count; + return true; } int spapr_get_vcpu_id(PowerPCCPU *cpu) @@ -4766,15 +4763,26 @@ static void spapr_machine_latest_class_options(MachineClass *mc) #define DEFINE_SPAPR_MACHINE(major, minor) \ DEFINE_SPAPR_MACHINE_IMPL(false, major, minor) +/* + * pseries-10.2 + */ +static void spapr_machine_10_2_class_options(MachineClass *mc) +{ + /* Defaults for the latest behaviour inherited from the base class */ +} + +DEFINE_SPAPR_MACHINE_AS_LATEST(10, 2); + /* * pseries-10.1 */ static void spapr_machine_10_1_class_options(MachineClass *mc) { - /* Defaults for the latest behaviour inherited from the base class */ + spapr_machine_10_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_10_1, hw_compat_10_1_len); } -DEFINE_SPAPR_MACHINE_AS_LATEST(10, 1); +DEFINE_SPAPR_MACHINE(10, 1); /* * pseries-10.0 diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index f2f5722d8ad4f..0f94c192fd480 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -27,7 +27,6 @@ #include "qapi/error.h" #include "qapi/visitor.h" #include "system/hw_accel.h" -#include "system/ram_addr.h" #include "target/ppc/cpu.h" #include "target/ppc/mmu-hash64.h" #include "cpu-models.h" diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 1e936f35e4488..8c1e0a4817b17 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -8,7 +8,7 @@ #include "qemu/main-loop.h" #include "qemu/module.h" #include "qemu/error-report.h" -#include "exec/tb-flush.h" +#include "exec/translation-block.h" #include "exec/target_page.h" #include "helper_regs.h" #include "hw/ppc/ppc.h" @@ -301,7 +301,7 @@ static target_ulong h_page_init(PowerPCCPU *cpu, SpaprMachineState *spapr, if (kvm_enabled()) { kvmppc_icbi_range(cpu, pdst, len); } else if (tcg_enabled()) { - tb_flush(CPU(cpu)); + tb_invalidate_phys_range(CPU(cpu), dst, dst + len - 1); } else { g_assert_not_reached(); } @@ -509,8 +509,8 @@ static target_ulong h_cede(PowerPCCPU *cpu, SpaprMachineState *spapr, if (!cpu_has_work(cs)) { cs->halted = 1; cs->exception_index = EXCP_HLT; - cs->exit_request = 1; ppc_maybe_interrupt(env); + cpu_exit(cs); } return H_SUCCESS; @@ -531,8 +531,8 @@ static target_ulong h_confer_self(PowerPCCPU *cpu) } cs->halted = 1; cs->exception_index = EXCP_HALTED; - cs->exit_request = 1; ppc_maybe_interrupt(&cpu->env); + cpu_exit(cs); return H_SUCCESS; } @@ -624,8 +624,7 @@ static target_ulong h_confer(PowerPCCPU *cpu, SpaprMachineState *spapr, } cs->exception_index = EXCP_YIELD; - cs->exit_request = 1; - cpu_loop_exit(cs); + cpu_exit(cs); return H_SUCCESS; } diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 1ac1185825e84..f9095552e865f 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -34,7 +34,6 @@ #include "hw/pci/pci_host.h" #include "hw/ppc/spapr.h" #include "hw/pci-host/spapr.h" -#include "system/ram_addr.h" #include #include "trace.h" #include "qemu/error-report.h" diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c index e318d0d912f3e..a748a0bf4c987 100644 --- a/hw/ppc/spapr_pci_vfio.c +++ b/hw/ppc/spapr_pci_vfio.c @@ -24,7 +24,7 @@ #include "hw/pci-host/spapr.h" #include "hw/pci/msix.h" #include "hw/pci/pci_device.h" -#include "hw/vfio/vfio-container.h" +#include "hw/vfio/vfio-container-legacy.h" #include "qemu/error-report.h" #include CONFIG_DEVICES /* CONFIG_VFIO_PCI */ @@ -32,7 +32,7 @@ * Interfaces for IBM EEH (Enhanced Error Handling) */ #ifdef CONFIG_VFIO_PCI -static bool vfio_eeh_container_ok(VFIOContainer *container) +static bool vfio_eeh_container_ok(VFIOLegacyContainer *container) { /* * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO @@ -60,7 +60,7 @@ static bool vfio_eeh_container_ok(VFIOContainer *container) return true; } -static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) +static int vfio_eeh_container_op(VFIOLegacyContainer *container, uint32_t op) { struct vfio_eeh_pe_op pe_op = { .argsz = sizeof(pe_op), @@ -83,10 +83,10 @@ static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op) return ret; } -static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) +static VFIOLegacyContainer *vfio_eeh_as_container(AddressSpace *as) { VFIOAddressSpace *space = vfio_address_space_get(as); - VFIOContainerBase *bcontainer = NULL; + VFIOContainer *bcontainer = NULL; if (QLIST_EMPTY(&space->containers)) { /* No containers to act on */ @@ -106,19 +106,19 @@ static VFIOContainer *vfio_eeh_as_container(AddressSpace *as) out: vfio_address_space_put(space); - return container_of(bcontainer, VFIOContainer, bcontainer); + return VFIO_IOMMU_LEGACY(bcontainer); } static bool vfio_eeh_as_ok(AddressSpace *as) { - VFIOContainer *container = vfio_eeh_as_container(as); + VFIOLegacyContainer *container = vfio_eeh_as_container(as); return (container != NULL) && vfio_eeh_container_ok(container); } static int vfio_eeh_as_op(AddressSpace *as, uint32_t op) { - VFIOContainer *container = vfio_eeh_as_container(as); + VFIOLegacyContainer *container = vfio_eeh_as_container(as); if (!container) { return -ENODEV; diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c index 78309dbb09dc4..143bc8c379479 100644 --- a/hw/ppc/spapr_rtas.c +++ b/hw/ppc/spapr_rtas.c @@ -221,7 +221,7 @@ static void rtas_stop_self(PowerPCCPU *cpu, SpaprMachineState *spapr, cs->halted = 1; ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm); kvmppc_set_reg_ppc_online(cpu, 0); - qemu_cpu_kick(cs); + cpu_exit(cs); } static void rtas_ibm_suspend_me(PowerPCCPU *cpu, SpaprMachineState *spapr, diff --git a/hw/remote/memory.c b/hw/remote/memory.c index 00193a552fa7d..8195aa5fb83db 100644 --- a/hw/remote/memory.c +++ b/hw/remote/memory.c @@ -11,7 +11,6 @@ #include "qemu/osdep.h" #include "hw/remote/memory.h" -#include "system/ram_addr.h" #include "qapi/error.h" static void remote_sysmem_reset(void) diff --git a/hw/remote/proxy-memory-listener.c b/hw/remote/proxy-memory-listener.c index 30ac74961dd3a..e1a52d24f0bfe 100644 --- a/hw/remote/proxy-memory-listener.c +++ b/hw/remote/proxy-memory-listener.c @@ -12,7 +12,6 @@ #include "qemu/range.h" #include "system/memory.h" #include "exec/cpu-common.h" -#include "system/ram_addr.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "hw/remote/mpqemu-link.h" diff --git a/hw/remote/proxy.c b/hw/remote/proxy.c index b0165aa2a1d71..18e0f7a064f26 100644 --- a/hw/remote/proxy.c +++ b/hw/remote/proxy.c @@ -112,8 +112,12 @@ static void pci_proxy_dev_realize(PCIDevice *device, Error **errp) return; } + if (!qio_channel_set_blocking(dev->ioc, true, errp)) { + object_unref(dev->ioc); + return; + } + qemu_mutex_init(&dev->io_mutex); - qio_channel_set_blocking(dev->ioc, true, NULL); pci_conf[PCI_LATENCY_TIMER] = 0xff; pci_conf[PCI_INTERRUPT_PIN] = 0x01; diff --git a/hw/remote/remote-obj.c b/hw/remote/remote-obj.c index 85882902d7fae..3402068ab9e84 100644 --- a/hw/remote/remote-obj.c +++ b/hw/remote/remote-obj.c @@ -107,7 +107,11 @@ static void remote_object_machine_done(Notifier *notifier, void *data) error_report_err(err); return; } - qio_channel_set_blocking(ioc, false, NULL); + if (!qio_channel_set_blocking(ioc, false, &err)) { + error_report_err(err); + object_unref(OBJECT(ioc)); + return; + } o->dev = dev; diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c index ea6165ebdcea3..216b4876e245f 100644 --- a/hw/remote/vfio-user-obj.c +++ b/hw/remote/vfio-user-obj.c @@ -75,12 +75,17 @@ OBJECT_DECLARE_TYPE(VfuObject, VfuObjectClass, VFU_OBJECT) */ #define VFU_OBJECT_ERROR(o, fmt, ...) \ { \ + error_report((fmt), ## __VA_ARGS__); \ if (vfu_object_auto_shutdown()) { \ - error_setg(&error_abort, (fmt), ## __VA_ARGS__); \ - } else { \ - error_report((fmt), ## __VA_ARGS__); \ + /* \ + * FIXME This looks inappropriate. The error is serious \ + * enough programming error to warrant aborting the process \ + * when auto-shutdown is enabled, yet harmless enough to \ + * permit carrying on when it's disabled. Makes no sense. \ + */ \ + abort(); \ } \ - } \ + } struct VfuObjectClass { ObjectClass parent_class; diff --git a/hw/riscv/riscv-iommu.c b/hw/riscv/riscv-iommu.c index 96a7fbdefcf3b..b33c7fe3259ef 100644 --- a/hw/riscv/riscv-iommu.c +++ b/hw/riscv/riscv-iommu.c @@ -558,6 +558,7 @@ static MemTxResult riscv_iommu_msi_write(RISCVIOMMUState *s, MemTxResult res; dma_addr_t addr; uint64_t intn; + size_t offset; uint32_t n190; uint64_t pte[2]; int fault_type = RISCV_IOMMU_FQ_TTYPE_UADDR_WR; @@ -565,16 +566,18 @@ static MemTxResult riscv_iommu_msi_write(RISCVIOMMUState *s, /* Interrupt File Number */ intn = riscv_iommu_pext_u64(PPN_DOWN(gpa), ctx->msi_addr_mask); - if (intn >= 256) { + offset = intn * sizeof(pte); + + /* fetch MSI PTE */ + addr = PPN_PHYS(get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_PPN)); + if (addr & offset) { /* Interrupt file number out of range */ res = MEMTX_ACCESS_ERROR; cause = RISCV_IOMMU_FQ_CAUSE_MSI_LOAD_FAULT; goto err; } - /* fetch MSI PTE */ - addr = PPN_PHYS(get_field(ctx->msiptp, RISCV_IOMMU_DC_MSIPTP_PPN)); - addr = addr | (intn * sizeof(pte)); + addr |= offset; res = dma_memory_read(s->target_as, addr, &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED); if (res != MEMTX_OK) { @@ -866,6 +869,145 @@ static bool riscv_iommu_validate_process_ctx(RISCVIOMMUState *s, return true; } +/** + * pdt_memory_read: PDT wrapper of dma_memory_read. + * + * @s: IOMMU Device State + * @ctx: Device Translation Context with devid and pasid set + * @addr: address within that address space + * @buf: buffer with the data transferred + * @len: length of the data transferred + * @attrs: memory transaction attributes + */ +static MemTxResult pdt_memory_read(RISCVIOMMUState *s, + RISCVIOMMUContext *ctx, + dma_addr_t addr, + void *buf, dma_addr_t len, + MemTxAttrs attrs) +{ + uint64_t gatp_mode, pte; + struct { + unsigned char step; + unsigned char levels; + unsigned char ptidxbits; + unsigned char ptesize; + } sc; + MemTxResult ret; + dma_addr_t base = addr; + + /* G stages translation mode */ + gatp_mode = get_field(ctx->gatp, RISCV_IOMMU_ATP_MODE_FIELD); + if (gatp_mode == RISCV_IOMMU_DC_IOHGATP_MODE_BARE) { + goto out; + } + + /* G stages translation tables root pointer */ + base = PPN_PHYS(get_field(ctx->gatp, RISCV_IOMMU_ATP_PPN_FIELD)); + + /* Start at step 0 */ + sc.step = 0; + + if (s->fctl & RISCV_IOMMU_FCTL_GXL) { + /* 32bit mode for GXL == 1 */ + switch (gatp_mode) { + case RISCV_IOMMU_DC_IOHGATP_MODE_SV32X4: + if (!(s->cap & RISCV_IOMMU_CAP_SV32X4)) { + return MEMTX_ACCESS_ERROR; + } + sc.levels = 2; + sc.ptidxbits = 10; + sc.ptesize = 4; + break; + default: + return MEMTX_ACCESS_ERROR; + } + } else { + /* 64bit mode for GXL == 0 */ + switch (gatp_mode) { + case RISCV_IOMMU_DC_IOHGATP_MODE_SV39X4: + if (!(s->cap & RISCV_IOMMU_CAP_SV39X4)) { + return MEMTX_ACCESS_ERROR; + } + sc.levels = 3; + sc.ptidxbits = 9; + sc.ptesize = 8; + break; + case RISCV_IOMMU_DC_IOHGATP_MODE_SV48X4: + if (!(s->cap & RISCV_IOMMU_CAP_SV48X4)) { + return MEMTX_ACCESS_ERROR; + } + sc.levels = 4; + sc.ptidxbits = 9; + sc.ptesize = 8; + break; + case RISCV_IOMMU_DC_IOHGATP_MODE_SV57X4: + if (!(s->cap & RISCV_IOMMU_CAP_SV57X4)) { + return MEMTX_ACCESS_ERROR; + } + sc.levels = 5; + sc.ptidxbits = 9; + sc.ptesize = 8; + break; + default: + return MEMTX_ACCESS_ERROR; + } + } + + do { + const unsigned va_bits = (sc.step ? 0 : 2) + sc.ptidxbits; + const unsigned va_skip = TARGET_PAGE_BITS + sc.ptidxbits * + (sc.levels - 1 - sc.step); + const unsigned idx = (addr >> va_skip) & ((1 << va_bits) - 1); + const dma_addr_t pte_addr = base + idx * sc.ptesize; + + /* Address range check before first level lookup */ + if (!sc.step) { + const uint64_t va_mask = (1ULL << (va_skip + va_bits)) - 1; + if ((addr & va_mask) != addr) { + return MEMTX_ACCESS_ERROR; + } + } + + /* Read page table entry */ + if (sc.ptesize == 4) { + uint32_t pte32 = 0; + ret = ldl_le_dma(s->target_as, pte_addr, &pte32, attrs); + pte = pte32; + } else { + ret = ldq_le_dma(s->target_as, pte_addr, &pte, attrs); + } + if (ret != MEMTX_OK) { + return ret; + } + + sc.step++; + hwaddr ppn = pte >> PTE_PPN_SHIFT; + + if (!(pte & PTE_V)) { + return MEMTX_ACCESS_ERROR; /* Invalid PTE */ + } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { + base = PPN_PHYS(ppn); /* Inner PTE, continue walking */ + } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { + return MEMTX_ACCESS_ERROR; /* Reserved leaf PTE flags: PTE_W */ + } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { + return MEMTX_ACCESS_ERROR; /* Reserved leaf PTE flags: PTE_W + PTE_X */ + } else if (ppn & ((1ULL << (va_skip - TARGET_PAGE_BITS)) - 1)) { + return MEMTX_ACCESS_ERROR; /* Misaligned PPN */ + } else { + /* Leaf PTE, translation completed. */ + base = PPN_PHYS(ppn) | (addr & ((1ULL << va_skip) - 1)); + break; + } + + if (sc.step == sc.levels) { + return MEMTX_ACCESS_ERROR; /* Can't find leaf PTE */ + } + } while (1); + +out: + return dma_memory_read(s->target_as, base, buf, len, attrs); +} + /* * RISC-V IOMMU Device Context Loopkup - Device Directory Tree Walk * @@ -1038,7 +1180,7 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx) */ const int split = depth * 9 + 8; addr |= ((ctx->process_id >> split) << 3) & ~TARGET_PAGE_MASK; - if (dma_memory_read(s->target_as, addr, &de, sizeof(de), + if (pdt_memory_read(s, ctx, addr, &de, sizeof(de), MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT; } @@ -1053,7 +1195,7 @@ static int riscv_iommu_ctx_fetch(RISCVIOMMUState *s, RISCVIOMMUContext *ctx) /* Leaf entry in PDT */ addr |= (ctx->process_id << 4) & ~TARGET_PAGE_MASK; - if (dma_memory_read(s->target_as, addr, &dc.ta, sizeof(uint64_t) * 2, + if (pdt_memory_read(s, ctx, addr, &dc.ta, sizeof(uint64_t) * 2, MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { return RISCV_IOMMU_FQ_CAUSE_PDT_LOAD_FAULT; } diff --git a/hw/riscv/virt-acpi-build.c b/hw/riscv/virt-acpi-build.c index ee1416d264598..f1406cb68339b 100644 --- a/hw/riscv/virt-acpi-build.c +++ b/hw/riscv/virt-acpi-build.c @@ -270,11 +270,8 @@ spcr_setup(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s) #define RHCT_NODE_ARRAY_OFFSET 56 /* - * ACPI spec, Revision 6.5+ - * 5.2.36 RISC-V Hart Capabilities Table (RHCT) - * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/16 - * https://drive.google.com/file/d/1nP3nFiH4jkPMp6COOxP6123DCZKR-tia/view - * https://drive.google.com/file/d/1sKbOa8m1UZw1JkquZYe3F1zQBN1xXsaf/view + * ACPI spec, Revision 6.6 + * 5.2.37 RISC-V Hart Capabilities Table (RHCT) */ static void build_rhct(GArray *table_data, BIOSLinker *linker, @@ -421,7 +418,10 @@ static void build_rhct(GArray *table_data, acpi_table_end(linker, &table); } -/* FADT */ +/* + * ACPI spec, Revision 6.6 + * 5.2.9 Fixed ACPI Description Table (MADT) + */ static void build_fadt_rev6(GArray *table_data, BIOSLinker *linker, RISCVVirtState *s, @@ -429,7 +429,7 @@ static void build_fadt_rev6(GArray *table_data, { AcpiFadtData fadt = { .rev = 6, - .minor_ver = 5, + .minor_ver = 6, .flags = 1 << ACPI_FADT_F_HW_REDUCED_ACPI, .xdsdt_tbl_offset = &dsdt_tbl_offset, }; @@ -508,11 +508,8 @@ static void build_dsdt(GArray *table_data, } /* - * ACPI spec, Revision 6.5+ + * ACPI spec, Revision 6.6 * 5.2.12 Multiple APIC Description Table (MADT) - * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/15 - * https://drive.google.com/file/d/1R6k4MshhN3WTT-hwqAquu5nX6xSEqK2l/view - * https://drive.google.com/file/d/1oMGPyOD58JaPgMl1pKasT-VKsIKia7zR/view */ static void build_madt(GArray *table_data, BIOSLinker *linker, @@ -537,7 +534,7 @@ static void build_madt(GArray *table_data, hart_index_bits = imsic_num_bits(imsic_max_hart_per_socket); - AcpiTable table = { .sig = "APIC", .rev = 6, .oem_id = s->oem_id, + AcpiTable table = { .sig = "APIC", .rev = 7, .oem_id = s->oem_id, .oem_table_id = s->oem_table_id }; acpi_table_begin(&table, table_data); @@ -812,10 +809,8 @@ static void build_rimt(GArray *table_data, BIOSLinker *linker, } /* - * ACPI spec, Revision 6.5+ + * ACPI spec, Revision 6.6 * 5.2.16 System Resource Affinity Table (SRAT) - * REF: https://github.com/riscv-non-isa/riscv-acpi/issues/25 - * https://drive.google.com/file/d/1YTdDx2IPm5IeZjAW932EYU-tUtgS08tX/view */ static void build_srat(GArray *table_data, BIOSLinker *linker, RISCVVirtState *vms) diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c index f9f5cf396f007..8631386b9f092 100644 --- a/hw/rtc/mc146818rtc.c +++ b/hw/rtc/mc146818rtc.c @@ -43,16 +43,10 @@ #include "qapi/error.h" #include "qapi/qapi-events-misc.h" #include "qapi/visitor.h" +#include "trace.h" -//#define DEBUG_CMOS //#define DEBUG_COALESCED -#ifdef DEBUG_CMOS -# define CMOS_DPRINTF(format, ...) printf(format, ## __VA_ARGS__) -#else -# define CMOS_DPRINTF(format, ...) do { } while (0) -#endif - #ifdef DEBUG_COALESCED # define DPRINTF_C(format, ...) printf(format, ## __VA_ARGS__) #else @@ -439,8 +433,7 @@ static void cmos_ioport_write(void *opaque, hwaddr addr, if ((addr & 1) == 0) { s->cmos_index = data & 0x7f; } else { - CMOS_DPRINTF("cmos: write index=0x%02x val=0x%02" PRIx64 "\n", - s->cmos_index, data); + trace_mc146818_rtc_ioport_write(s->cmos_index, data); switch(s->cmos_index) { case RTC_SECONDS_ALARM: case RTC_MINUTES_ALARM: @@ -726,21 +719,20 @@ static uint64_t cmos_ioport_read(void *opaque, hwaddr addr, ret = s->cmos_data[s->cmos_index]; break; } - CMOS_DPRINTF("cmos: read index=0x%02x val=0x%02x\n", - s->cmos_index, ret); + trace_mc146818_rtc_ioport_read(s->cmos_index, ret); return ret; } } void mc146818rtc_set_cmos_data(MC146818RtcState *s, int addr, int val) { - if (addr >= 0 && addr <= 127) - s->cmos_data[addr] = val; + assert(addr >= 0 && addr < ARRAY_SIZE(s->cmos_data)); + s->cmos_data[addr] = val; } int mc146818rtc_get_cmos_data(MC146818RtcState *s, int addr) { - assert(addr >= 0 && addr <= 127); + assert(addr >= 0 && addr < ARRAY_SIZE(s->cmos_data)); return s->cmos_data[addr]; } diff --git a/hw/rtc/trace-events b/hw/rtc/trace-events index b9f2852d35fca..d2f36217cb835 100644 --- a/hw/rtc/trace-events +++ b/hw/rtc/trace-events @@ -32,6 +32,10 @@ m48txx_nvram_io_write(uint64_t addr, uint64_t value) "io write addr:0x%04" PRIx6 m48txx_nvram_mem_read(uint32_t addr, uint32_t value) "mem read addr:0x%04x value:0x%02x" m48txx_nvram_mem_write(uint32_t addr, uint32_t value) "mem write addr:0x%04x value:0x%02x" +# mc146818rtc.c +mc146818_rtc_ioport_read(uint8_t addr, uint8_t value) "[0x%02" PRIx8 "] -> 0x%02" PRIx8 +mc146818_rtc_ioport_write(uint8_t addr, uint8_t value) "[0x%02" PRIx8 "] <- 0x%02" PRIx8 + # goldfish_rtc.c goldfish_rtc_read(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 goldfish_rtc_write(uint64_t addr, uint64_t value) "addr 0x%02" PRIx64 " value 0x%08" PRIx64 diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index f87d2748b6378..52820894fa168 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -652,7 +652,16 @@ static const PCIIOMMUOps s390_iommu_ops = { .get_address_space = s390_pci_dma_iommu, }; -static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) +/** + * set_ind_bit_atomic - Atomically set a bit in an indicator + * + * @ind_loc: Address of the indicator + * @to_be_set: Bit to set + * + * Returns true if the bit was set by this function, false if it was + * already set or mapping failed. + */ +static bool set_ind_bit_atomic(uint64_t ind_loc, uint8_t to_be_set) { uint8_t expected, actual; hwaddr len = 1; @@ -662,7 +671,7 @@ static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) ind_addr = cpu_physical_memory_map(ind_loc, &len, true); if (!ind_addr) { s390_pci_generate_error_event(ERR_EVENT_AIRERR, 0, 0, 0, 0); - return -1; + return false; } actual = *ind_addr; do { @@ -671,7 +680,7 @@ static uint8_t set_ind_atomic(uint64_t ind_loc, uint8_t to_be_set) } while (actual != expected); cpu_physical_memory_unmap((void *)ind_addr, len, 1, len); - return actual; + return (actual & to_be_set) ? false : true; } static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data, @@ -693,10 +702,10 @@ static void s390_msi_ctrl_write(void *opaque, hwaddr addr, uint64_t data, ind_bit = pbdev->routes.adapter.ind_offset; sum_bit = pbdev->routes.adapter.summary_offset; - set_ind_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8, + set_ind_bit_atomic(pbdev->routes.adapter.ind_addr + (ind_bit + vec) / 8, 0x80 >> ((ind_bit + vec) % 8)); - if (!set_ind_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8, - 0x80 >> (sum_bit % 8))) { + if (set_ind_bit_atomic(pbdev->routes.adapter.summary_addr + sum_bit / 8, + 0x80 >> (sum_bit % 8))) { css_adapter_interrupt(CSS_IO_ADAPTER_PCI, pbdev->isc); } } @@ -891,6 +900,7 @@ static void s390_pcihost_realize(DeviceState *dev, Error **errp) s390_pci_init_default_group(); css_register_io_adapters(CSS_IO_ADAPTER_PCI, true, false, S390_ADAPTER_SUPPRESSIBLE, errp); + s390_pcihost_kvm_realize(); } static void s390_pcihost_unrealize(DeviceState *dev) diff --git a/hw/s390x/s390-pci-vfio.c b/hw/s390x/s390-pci-vfio.c index aaf91319b4e3c..9e31029d7acbe 100644 --- a/hw/s390x/s390-pci-vfio.c +++ b/hw/s390x/s390-pci-vfio.c @@ -20,7 +20,7 @@ #include "hw/s390x/s390-pci-clp.h" #include "hw/s390x/s390-pci-vfio.h" #include "hw/vfio/pci.h" -#include "hw/vfio/vfio-container.h" +#include "hw/vfio/vfio-container-legacy.h" #include "hw/vfio/vfio-helpers.h" /* @@ -62,7 +62,7 @@ S390PCIDMACount *s390_pci_start_dma_count(S390pciState *s, { S390PCIDMACount *cnt; uint32_t avail; - VFIOPCIDevice *vpdev = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + VFIOPCIDevice *vpdev = VFIO_PCI_DEVICE(pbdev->pdev); int id; assert(vpdev); @@ -108,7 +108,7 @@ static void s390_pci_read_base(S390PCIBusDevice *pbdev, { struct vfio_info_cap_header *hdr; struct vfio_device_info_cap_zpci_base *cap; - VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + VFIOPCIDevice *vpci = VFIO_PCI_DEVICE(pbdev->pdev); uint64_t vfio_size; hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_BASE); @@ -162,7 +162,7 @@ static bool get_host_fh(S390PCIBusDevice *pbdev, struct vfio_device_info *info, { struct vfio_info_cap_header *hdr; struct vfio_device_info_cap_zpci_base *cap; - VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + VFIOPCIDevice *vpci = VFIO_PCI_DEVICE(pbdev->pdev); hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_BASE); @@ -185,7 +185,7 @@ static void s390_pci_read_group(S390PCIBusDevice *pbdev, struct vfio_device_info_cap_zpci_group *cap; S390pciState *s = s390_get_phb(); ClpRspQueryPciGrp *resgrp; - VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + VFIOPCIDevice *vpci = VFIO_PCI_DEVICE(pbdev->pdev); uint8_t start_gid = pbdev->zpci_fn.pfgid; hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_GROUP); @@ -264,7 +264,7 @@ static void s390_pci_read_util(S390PCIBusDevice *pbdev, { struct vfio_info_cap_header *hdr; struct vfio_device_info_cap_zpci_util *cap; - VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + VFIOPCIDevice *vpci = VFIO_PCI_DEVICE(pbdev->pdev); hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_UTIL); @@ -291,7 +291,7 @@ static void s390_pci_read_pfip(S390PCIBusDevice *pbdev, { struct vfio_info_cap_header *hdr; struct vfio_device_info_cap_zpci_pfip *cap; - VFIOPCIDevice *vpci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + VFIOPCIDevice *vpci = VFIO_PCI_DEVICE(pbdev->pdev); hdr = vfio_get_device_info_cap(info, VFIO_DEVICE_INFO_CAP_ZPCI_PFIP); @@ -314,7 +314,7 @@ static void s390_pci_read_pfip(S390PCIBusDevice *pbdev, static struct vfio_device_info *get_device_info(S390PCIBusDevice *pbdev) { - VFIOPCIDevice *vfio_pci = container_of(pbdev->pdev, VFIOPCIDevice, pdev); + VFIOPCIDevice *vfio_pci = VFIO_PCI_DEVICE(pbdev->pdev); return vfio_get_device_info(vfio_pci->vbasedev.fd); } diff --git a/hw/s390x/s390-stattrib-kvm.c b/hw/s390x/s390-stattrib-kvm.c index e1fee361dc379..73df1f600b9f8 100644 --- a/hw/s390x/s390-stattrib-kvm.c +++ b/hw/s390x/s390-stattrib-kvm.c @@ -10,13 +10,13 @@ */ #include "qemu/osdep.h" +#include "exec/target_page.h" #include "hw/s390x/s390-virtio-ccw.h" #include "migration/qemu-file.h" #include "hw/s390x/storage-attributes.h" #include "qemu/error-report.h" #include "system/kvm.h" #include "system/memory_mapping.h" -#include "system/ram_addr.h" #include "kvm/kvm_s390x.h" #include "qapi/error.h" diff --git a/hw/s390x/s390-stattrib.c b/hw/s390x/s390-stattrib.c index 13a678a80373b..aa18537291486 100644 --- a/hw/s390x/s390-stattrib.c +++ b/hw/s390x/s390-stattrib.c @@ -11,12 +11,12 @@ #include "qemu/osdep.h" #include "qemu/units.h" +#include "exec/target_page.h" #include "migration/qemu-file.h" #include "migration/register.h" #include "hw/qdev-properties.h" #include "hw/s390x/storage-attributes.h" #include "qemu/error-report.h" -#include "system/ram_addr.h" #include "qapi/error.h" #include "qobject/qdict.h" #include "cpu.h" diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index a79bd13275b9b..64b81345f1e20 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -13,7 +13,6 @@ #include "qemu/osdep.h" #include "qapi/error.h" -#include "system/ram_addr.h" #include "system/confidential-guest-support.h" #include "hw/boards.h" #include "hw/s390x/sclp.h" @@ -716,26 +715,6 @@ static void s390_nmi(NMIState *n, int cpu_index, Error **errp) s390_cpu_restart(S390_CPU(cs)); } -static ram_addr_t s390_fixup_ram_size(ram_addr_t sz) -{ - /* same logic as in sclp.c */ - int increment_size = 20; - ram_addr_t newsz; - - while ((sz >> increment_size) > MAX_STORAGE_INCREMENTS) { - increment_size++; - } - newsz = sz >> increment_size << increment_size; - - if (sz != newsz) { - qemu_printf("Ram size %" PRIu64 "MB was fixed up to %" PRIu64 - "MB to match machine restrictions. Consider updating " - "the guest definition.\n", (uint64_t) (sz / MiB), - (uint64_t) (newsz / MiB)); - } - return newsz; -} - static inline bool machine_get_aes_key_wrap(Object *obj, Error **errp) { S390CcwMachineState *ms = S390_CCW_MACHINE(obj); @@ -911,14 +890,26 @@ static const TypeInfo ccw_machine_info = { DEFINE_CCW_MACHINE_IMPL(false, major, minor) +static void ccw_machine_10_2_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_10_2_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE_AS_LATEST(10, 2); + static void ccw_machine_10_1_instance_options(MachineState *machine) { + ccw_machine_10_2_instance_options(machine); } static void ccw_machine_10_1_class_options(MachineClass *mc) { + ccw_machine_10_2_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_10_1, hw_compat_10_1_len); } -DEFINE_CCW_MACHINE_AS_LATEST(10, 1); +DEFINE_CCW_MACHINE(10, 1); static void ccw_machine_10_0_instance_options(MachineState *machine) { @@ -1154,19 +1145,6 @@ static void ccw_machine_5_0_class_options(MachineClass *mc) } DEFINE_CCW_MACHINE(5, 0); -static void ccw_machine_4_2_instance_options(MachineState *machine) -{ - ccw_machine_5_0_instance_options(machine); -} - -static void ccw_machine_4_2_class_options(MachineClass *mc) -{ - ccw_machine_5_0_class_options(mc); - mc->fixup_ram_size = s390_fixup_ram_size; - compat_props_add(mc->compat_props, hw_compat_4_2, hw_compat_4_2_len); -} -DEFINE_CCW_MACHINE(4, 2); - static void ccw_machine_register_types(void) { type_register_static(&ccw_machine_info); diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index 9718564fa42fe..c9a9c4bb375a7 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -16,6 +16,7 @@ #include "qemu/units.h" #include "qapi/error.h" #include "hw/boards.h" +#include "system/memory.h" #include "hw/s390x/sclp.h" #include "hw/s390x/event-facility.h" #include "hw/s390x/s390-pci-bus.h" @@ -109,7 +110,7 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb) ReadInfo *read_info = (ReadInfo *) sccb; MachineState *machine = MACHINE(qdev_get_machine()); int cpu_count; - int rnsize, rnmax; + int rnmax; int required_len = SCCB_REQ_LEN(ReadInfo, machine->possible_cpus->len); int offset_cpu = s390_has_feat(S390_FEAT_EXTENDED_LENGTH_SCCB) ? offsetof(ReadInfo, entries) : @@ -152,21 +153,14 @@ static void read_SCP_info(SCLPDevice *sclp, SCCB *sccb) read_info->mha_pow = s390_get_mha_pow(); read_info->hmfai = cpu_to_be32(s390_get_hmfai()); - - rnsize = 1 << (sclp->increment_size - 20); - if (rnsize <= 128) { - read_info->rnsize = rnsize; - } else { - read_info->rnsize = 0; - read_info->rnsize2 = cpu_to_be32(rnsize); - } + read_info->rnsize = 1; /* * We don't support standby memory. maxram_size is used for sizing the * memory device region, which is not exposed through SCLP but through * diag500. */ - rnmax = machine->ram_size >> sclp->increment_size; + rnmax = machine->ram_size >> 20; if (rnmax < 0x10000) { read_info->rnmax = cpu_to_be16(rnmax); } else { @@ -303,12 +297,15 @@ int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code) SCLPDeviceClass *sclp_c = SCLP_GET_CLASS(sclp); SCCBHeader header; g_autofree SCCB *work_sccb = NULL; + AddressSpace *as = CPU(cpu)->as; + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + MemTxResult ret; /* first some basic checks on program checks */ if (env->psw.mask & PSW_MASK_PSTATE) { return -PGM_PRIVILEGED; } - if (cpu_physical_memory_is_io(sccb)) { + if (address_space_is_io(CPU(cpu)->as, sccb)) { return -PGM_ADDRESSING; } if ((sccb & ~0x1fffUL) == 0 || (sccb & ~0x1fffUL) == env->psa @@ -317,7 +314,10 @@ int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code) } /* the header contains the actual length of the sccb */ - cpu_physical_memory_read(sccb, &header, sizeof(SCCBHeader)); + ret = address_space_read(as, sccb, attrs, &header, sizeof(SCCBHeader)); + if (ret != MEMTX_OK) { + return -PGM_ADDRESSING; + } /* Valid sccb sizes */ if (be16_to_cpu(header.length) < sizeof(SCCBHeader)) { @@ -330,7 +330,11 @@ int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code) * the host has checked the values */ work_sccb = g_malloc0(be16_to_cpu(header.length)); - cpu_physical_memory_read(sccb, work_sccb, be16_to_cpu(header.length)); + ret = address_space_read(as, sccb, attrs, + work_sccb, be16_to_cpu(header.length)); + if (ret != MEMTX_OK) { + return -PGM_ADDRESSING; + } if (!sclp_command_code_valid(code)) { work_sccb->h.response_code = cpu_to_be16(SCLP_RC_INVALID_SCLP_COMMAND); @@ -344,8 +348,11 @@ int sclp_service_call(S390CPU *cpu, uint64_t sccb, uint32_t code) sclp_c->execute(sclp, work_sccb, code); out_write: - cpu_physical_memory_write(sccb, work_sccb, - be16_to_cpu(work_sccb->h.length)); + ret = address_space_write(as, sccb, attrs, + work_sccb, be16_to_cpu(header.length)); + if (ret != MEMTX_OK) { + return -PGM_PROTECTION; + } sclp_c->service_interrupt(sclp, sccb); @@ -392,25 +399,6 @@ static void sclp_realize(DeviceState *dev, Error **errp) } } -static void sclp_memory_init(SCLPDevice *sclp) -{ - MachineState *machine = MACHINE(qdev_get_machine()); - MachineClass *machine_class = MACHINE_GET_CLASS(qdev_get_machine()); - ram_addr_t initial_mem = machine->ram_size; - int increment_size = 20; - - /* The storage increment size is a multiple of 1M and is a power of 2. - * For some machine types, the number of storage increments must be - * MAX_STORAGE_INCREMENTS or fewer. - * The variable 'increment_size' is an exponent of 2 that can be - * used to calculate the size (in bytes) of an increment. */ - while (machine_class->fixup_ram_size != NULL && - (initial_mem >> increment_size) > MAX_STORAGE_INCREMENTS) { - increment_size++; - } - sclp->increment_size = increment_size; -} - static void sclp_init(Object *obj) { SCLPDevice *sclp = SCLP(obj); @@ -420,8 +408,6 @@ static void sclp_init(Object *obj) object_property_add_child(obj, TYPE_SCLP_EVENT_FACILITY, new); object_unref(new); sclp->event_facility = EVENT_FACILITY(new); - - sclp_memory_init(sclp); } static void sclp_class_init(ObjectClass *oc, const void *data) diff --git a/hw/s390x/sclpcpi.c b/hw/s390x/sclpcpi.c index 7aa039d510035..68fc1b809bf3d 100644 --- a/hw/s390x/sclpcpi.c +++ b/hw/s390x/sclpcpi.c @@ -54,6 +54,7 @@ #include "hw/s390x/event-facility.h" #include "hw/s390x/ebcdic.h" #include "qapi/qapi-visit-machine.h" +#include "qapi/qapi-events-machine-s390x.h" #include "migration/vmstate.h" typedef struct Data { @@ -106,6 +107,9 @@ static int write_event_data(SCLPEvent *event, EventBufferHeader *evt_buf_hdr) e->timestamp = qemu_clock_get_ns(QEMU_CLOCK_HOST); cpim->ebh.flags = SCLP_EVENT_BUFFER_ACCEPTED; + + qapi_event_send_sclp_cpi_info_available(); + return SCLP_RC_NORMAL_COMPLETION; } diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index d2f85b39f30f7..4cb1ced001ae2 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -1130,13 +1130,13 @@ static int virtio_ccw_load_queue(DeviceState *d, int n, QEMUFile *f) static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL); + vmstate_save_state(f, &vmstate_virtio_ccw_dev, dev, NULL, &error_fatal); } static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); - return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1); + return vmstate_load_state(f, &vmstate_virtio_ccw_dev, dev, 1, &error_fatal); } static void virtio_ccw_pre_plugged(DeviceState *d, Error **errp) diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index 1ebe0b82a79d1..4ada35b7ec8fb 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -579,11 +579,11 @@ static void mptsas_process_ioc_init(MPTSASState *s, MPIMsgIOCInit *req) } memset(&reply, 0, sizeof(reply)); - reply.WhoInit = s->who_init; + reply.WhoInit = req->WhoInit; reply.MsgLength = sizeof(reply) / 4; reply.Function = req->Function; - reply.MaxDevices = s->max_devices; - reply.MaxBuses = s->max_buses; + reply.MaxDevices = req->MaxDevices; + reply.MaxBuses = req->MaxBuses; reply.MsgContext = req->MsgContext; mptsas_fix_ioc_init_reply_endianness(&reply); diff --git a/hw/scsi/spapr_vscsi.c b/hw/scsi/spapr_vscsi.c index 20f70fb2729de..f0a7dd2b882a1 100644 --- a/hw/scsi/spapr_vscsi.c +++ b/hw/scsi/spapr_vscsi.c @@ -630,7 +630,7 @@ static void vscsi_save_request(QEMUFile *f, SCSIRequest *sreq) vscsi_req *req = sreq->hba_private; assert(req->active); - vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL); + vmstate_save_state(f, &vmstate_spapr_vscsi_req, req, NULL, &error_fatal); trace_spapr_vscsi_save_request(req->qtag, req->cur_desc_num, req->cur_desc_offset); @@ -642,15 +642,17 @@ static void *vscsi_load_request(QEMUFile *f, SCSIRequest *sreq) VSCSIState *s = VIO_SPAPR_VSCSI_DEVICE(bus->qbus.parent); vscsi_req *req; int rc; + Error *local_err = NULL; assert(sreq->tag < VSCSI_REQ_LIMIT); req = &s->reqs[sreq->tag]; assert(!req->active); memset(req, 0, sizeof(*req)); - rc = vmstate_load_state(f, &vmstate_spapr_vscsi_req, req, 1); + rc = vmstate_load_state(f, &vmstate_spapr_vscsi_req, req, 1, &local_err); if (rc) { fprintf(stderr, "VSCSI: failed loading request tag#%u\n", sreq->tag); + error_report_err(local_err); return NULL; } assert(req->active); diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 34ae14f7bf938..d817fc42b4c24 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -116,11 +116,7 @@ static void virtio_scsi_complete_req(VirtIOSCSIReq *req, QemuMutex *vq_lock) } virtqueue_push(vq, &req->elem, req->qsgl.size + req->resp_iov.size); - if (s->dataplane_started && !s->dataplane_fenced) { - virtio_notify_irqfd(vdev, vq); - } else { - virtio_notify(vdev, vq); - } + virtio_notify(vdev, vq); if (vq_lock) { qemu_mutex_unlock(vq_lock); diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index b31da5c399c10..9d61b372e7065 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -233,7 +233,7 @@ static void allwinner_sdhost_send_command(AwSdHostState *s) { SDRequest request; uint8_t resp[16]; - int rlen; + size_t rlen; /* Auto clear load flag */ s->command &= ~SD_CMDR_LOAD; @@ -246,10 +246,7 @@ static void allwinner_sdhost_send_command(AwSdHostState *s) request.arg = s->command_arg; /* Send request to SD bus */ - rlen = sdbus_do_command(&s->sdbus, &request, resp); - if (rlen < 0) { - goto error; - } + rlen = sdbus_do_command(&s->sdbus, &request, resp, sizeof(resp)); /* If the command has a response, store it in the response registers */ if ((s->command & SD_CMDR_RESPONSE)) { diff --git a/hw/sd/bcm2835_sdhost.c b/hw/sd/bcm2835_sdhost.c index 29debdf59e44e..f7cef7bb1cdf9 100644 --- a/hw/sd/bcm2835_sdhost.c +++ b/hw/sd/bcm2835_sdhost.c @@ -113,15 +113,12 @@ static void bcm2835_sdhost_send_command(BCM2835SDHostState *s) { SDRequest request; uint8_t rsp[16]; - int rlen; + size_t rlen; request.cmd = s->cmd & SDCMD_CMD_MASK; request.arg = s->cmdarg; - rlen = sdbus_do_command(&s->sdbus, &request, rsp); - if (rlen < 0) { - goto error; - } + rlen = sdbus_do_command(&s->sdbus, &request, rsp, sizeof(rsp)); if (!(s->cmd & SDCMD_NO_RESPONSE)) { if (rlen == 0 || (rlen == 4 && (s->cmd & SDCMD_LONG_RESPONSE))) { goto error; diff --git a/hw/sd/core.c b/hw/sd/core.c index 4b30218b5202a..d3c9017445e01 100644 --- a/hw/sd/core.c +++ b/hw/sd/core.c @@ -90,7 +90,8 @@ void sdbus_set_voltage(SDBus *sdbus, uint16_t millivolts) } } -int sdbus_do_command(SDBus *sdbus, SDRequest *req, uint8_t *response) +size_t sdbus_do_command(SDBus *sdbus, SDRequest *req, + uint8_t *resp, size_t respsz) { SDState *card = get_card(sdbus); @@ -98,7 +99,7 @@ int sdbus_do_command(SDBus *sdbus, SDRequest *req, uint8_t *response) if (card) { SDCardClass *sc = SDMMC_COMMON_GET_CLASS(card); - return sc->do_command(card, req, response); + return sc->do_command(card, req, resp, respsz); } return 0; diff --git a/hw/sd/omap_mmc.c b/hw/sd/omap_mmc.c index b7648d41cc53c..5a1d25defaaf5 100644 --- a/hw/sd/omap_mmc.c +++ b/hw/sd/omap_mmc.c @@ -130,7 +130,8 @@ static void omap_mmc_command(OMAPMMCState *host, int cmd, int dir, sd_rsp_type_t resptype, int init) { uint32_t rspstatus, mask; - int rsplen, timeout; + size_t rsplen; + int timeout; SDRequest request; uint8_t response[16]; @@ -157,7 +158,7 @@ static void omap_mmc_command(OMAPMMCState *host, int cmd, int dir, request.arg = host->arg; request.crc = 0; /* FIXME */ - rsplen = sdbus_do_command(&host->sdbus, &request, response); + rsplen = sdbus_do_command(&host->sdbus, &request, response, sizeof(response)); /* TODO: validate CRCs */ switch (resptype) { diff --git a/hw/sd/pl181.c b/hw/sd/pl181.c index b8fc9f86f13e7..5d56ead4d911e 100644 --- a/hw/sd/pl181.c +++ b/hw/sd/pl181.c @@ -173,14 +173,12 @@ static void pl181_do_command(PL181State *s) { SDRequest request; uint8_t response[16]; - int rlen; + size_t rlen; request.cmd = s->cmd & PL181_CMD_INDEX; request.arg = s->cmdarg; trace_pl181_command_send(request.cmd, request.arg); - rlen = sdbus_do_command(&s->sdbus, &request, response); - if (rlen < 0) - goto error; + rlen = sdbus_do_command(&s->sdbus, &request, response, sizeof(response)); if (s->cmd & PL181_CMD_RESPONSE) { if (rlen == 0 || (rlen == 4 && (s->cmd & PL181_CMD_LONGRESP))) goto error; diff --git a/hw/sd/sd.c b/hw/sd/sd.c index c275fdda2d02a..d7a496d77c920 100644 --- a/hw/sd/sd.c +++ b/hw/sd/sd.c @@ -61,6 +61,7 @@ typedef enum { sd_r0 = 0, /* no response */ sd_r1, /* normal response command */ + spi_r2, /* STATUS */ sd_r2_i, /* CID register */ sd_r2_s, /* CSD register */ sd_r3, /* OCR register */ @@ -146,7 +147,6 @@ struct SDState { /* Runtime changeables */ - uint32_t mode; /* current card mode, one of SDCardModes */ int32_t state; /* current card state, one of SDCardStates */ uint32_t vhs; bool wp_switch; @@ -195,7 +195,6 @@ static bool sd_is_emmc(SDState *sd) static const char *sd_version_str(enum SDPhySpecificationVersion version) { static const char *sdphy_version[] = { - [SD_PHY_SPECv1_10_VERS] = "v1.10", [SD_PHY_SPECv2_00_VERS] = "v2.00", [SD_PHY_SPECv3_01_VERS] = "v3.01", }; @@ -247,6 +246,7 @@ static const char *sd_response_name(sd_rsp_type_t rsp) static const char *response_name[] = { [sd_r0] = "RESP#0 (no response)", [sd_r1] = "RESP#1 (normal cmd)", + [spi_r2] = "RESP#2 (STATUS reg)", [sd_r2_i] = "RESP#2 (CID reg)", [sd_r2_s] = "RESP#2 (CSD reg)", [sd_r3] = "RESP#3 (OCR reg)", @@ -313,27 +313,24 @@ static void sd_set_voltage(SDState *sd, uint16_t millivolts) } } -static void sd_set_mode(SDState *sd) +static enum SDCardModes sd_mode(SDState *sd) { switch (sd->state) { case sd_inactive_state: - sd->mode = sd_inactive; - break; - + return sd_inactive; case sd_idle_state: case sd_ready_state: case sd_identification_state: - sd->mode = sd_card_identification_mode; - break; - + return sd_card_identification_mode; case sd_standby_state: case sd_transfer_state: case sd_sendingdata_state: case sd_receivingdata_state: case sd_programming_state: case sd_disconnect_state: - sd->mode = sd_data_transfer_mode; - break; + return sd_data_transfer_mode; + default: + g_assert_not_reached(); } } @@ -409,11 +406,7 @@ static void sd_set_ocr(SDState *sd) static void sd_set_scr(SDState *sd) { sd->scr[0] = 0 << 4; /* SCR structure version 1.0 */ - if (sd->spec_version == SD_PHY_SPECv1_10_VERS) { - sd->scr[0] |= 1; /* Spec Version 1.10 */ - } else { - sd->scr[0] |= 2; /* Spec Version 2.00 or Version 3.0X */ - } + sd->scr[0] |= 2; /* Spec Version 2.00 or Version 3.0X */ sd->scr[1] = (2 << 4) /* SDSC Card (Security Version 1.01) */ | 0b0101; /* 1-bit or 4-bit width bus modes */ sd->scr[2] = 0x00; /* Extended Security is not supported. */ @@ -729,16 +722,82 @@ static int sd_req_crc_validate(SDRequest *req) return sd_crc7(buffer, 5) != req->crc; /* TODO */ } +static size_t sd_response_size(SDState *sd, sd_rsp_type_t rtype) +{ + switch (rtype) { + case sd_r1: + case sd_r1b: + return sd_is_spi(sd) ? 1 : 4; + + case spi_r2: + assert(sd_is_spi(sd)); + return 2; + + case sd_r2_i: + case sd_r2_s: + assert(!sd_is_spi(sd)); + return 16; + + case sd_r3: + case sd_r7: + return sd_is_spi(sd) ? 5 : 4; + + case sd_r6: + assert(!sd_is_spi(sd)); + return 4; + + case sd_r0: + case sd_illegal: + return sd_is_spi(sd) ? 1 : 0; + + default: + g_assert_not_reached(); + } +} + static void sd_response_r1_make(SDState *sd, uint8_t *response) { - stl_be_p(response, sd->card_status); + if (sd_is_spi(sd)) { + response[0] = sd->state == sd_idle_state + && !FIELD_EX32(sd->ocr, OCR, CARD_POWER_UP); + response[0] |= FIELD_EX32(sd->card_status, CSR, ERASE_RESET) << 1; + response[0] |= FIELD_EX32(sd->card_status, CSR, ILLEGAL_COMMAND) << 2; + response[0] |= FIELD_EX32(sd->card_status, CSR, COM_CRC_ERROR) << 3; + response[0] |= FIELD_EX32(sd->card_status, CSR, ERASE_SEQ_ERROR) << 4; + response[0] |= FIELD_EX32(sd->card_status, CSR, ADDRESS_ERROR) << 5; + response[0] |= FIELD_EX32(sd->card_status, CSR, BLOCK_LEN_ERROR) << 6; + response[0] |= 0 << 7; + } else { + stl_be_p(response, sd->card_status); + } /* Clear the "clear on read" status bits */ sd->card_status &= ~CARD_STATUS_C; } +static void spi_response_r2_make(SDState *sd, uint8_t *resp) +{ + /* Prepend R1 */ + sd_response_r1_make(sd, resp); + + resp[1] = FIELD_EX32(sd->card_status, CSR, CARD_IS_LOCKED) << 0; + resp[1] |= (FIELD_EX32(sd->card_status, CSR, LOCK_UNLOCK_FAILED) + || FIELD_EX32(sd->card_status, CSR, WP_ERASE_SKIP)) << 1; + resp[1] |= FIELD_EX32(sd->card_status, CSR, ERROR) << 2; + resp[1] |= FIELD_EX32(sd->card_status, CSR, CC_ERROR) << 3; + resp[1] |= FIELD_EX32(sd->card_status, CSR, CARD_ECC_FAILED) << 4; + resp[1] |= FIELD_EX32(sd->card_status, CSR, WP_VIOLATION) << 5; + resp[1] |= FIELD_EX32(sd->card_status, CSR, ERASE_PARAM) << 6; + resp[1] |= FIELD_EX32(sd->card_status, CSR, OUT_OF_RANGE) << 7; +} + static void sd_response_r3_make(SDState *sd, uint8_t *response) { + if (sd_is_spi(sd)) { + /* Prepend R1 */ + sd_response_r1_make(sd, response); + response++; + } stl_be_p(response, sd->ocr & ACMD41_R3_MASK); } @@ -756,6 +815,11 @@ static void sd_response_r6_make(SDState *sd, uint8_t *response) static void sd_response_r7_make(SDState *sd, uint8_t *response) { + if (sd_is_spi(sd)) { + /* Prepend R1 */ + sd_response_r1_make(sd, response); + response++; + } stl_be_p(response, sd->vhs); } @@ -769,14 +833,14 @@ static uint32_t sd_blk_len(SDState *sd) /* * This requires a disk image that has two boot partitions inserted at the - * beginning of it. The size of the boot partitions is the "boot-size" - * property. + * beginning of it, followed by an RPMB partition. The size of the boot + * partitions is the "boot-partition-size" property. */ -static uint32_t sd_bootpart_offset(SDState *sd) +static uint32_t sd_part_offset(SDState *sd) { unsigned partition_access; - if (!sd->boot_part_size || !sd_is_emmc(sd)) { + if (!sd_is_emmc(sd)) { return 0; } @@ -785,9 +849,9 @@ static uint32_t sd_bootpart_offset(SDState *sd) switch (partition_access) { case EXT_CSD_PART_CONFIG_ACC_DEFAULT: return sd->boot_part_size * 2; - case EXT_CSD_PART_CONFIG_ACC_BOOT0: + case EXT_CSD_PART_CONFIG_ACC_BOOT1: return 0; - case EXT_CSD_PART_CONFIG_ACC_BOOT0 + 1: + case EXT_CSD_PART_CONFIG_ACC_BOOT2: return sd->boot_part_size * 1; default: g_assert_not_reached(); @@ -952,7 +1016,7 @@ static const VMStateDescription sd_vmstate = { .minimum_version_id = 2, .pre_load = sd_vmstate_pre_load, .fields = (const VMStateField[]) { - VMSTATE_UINT32(mode, SDState), + VMSTATE_UNUSED(4), VMSTATE_INT32(state, SDState), VMSTATE_UINT8_ARRAY(cid, SDState, 16), VMSTATE_UINT8_ARRAY(csd, SDState, 16), @@ -988,7 +1052,7 @@ static const VMStateDescription sd_vmstate = { static void sd_blk_read(SDState *sd, uint64_t addr, uint32_t len) { trace_sdcard_read_block(addr, len); - addr += sd_bootpart_offset(sd); + addr += sd_part_offset(sd); if (!sd->blk || blk_pread(sd->blk, addr, len, sd->data, 0) < 0) { fprintf(stderr, "sd_blk_read: read error on host side\n"); } @@ -997,7 +1061,7 @@ static void sd_blk_read(SDState *sd, uint64_t addr, uint32_t len) static void sd_blk_write(SDState *sd, uint64_t addr, uint32_t len) { trace_sdcard_write_block(addr, len); - addr += sd_bootpart_offset(sd); + addr += sd_part_offset(sd); if (!sd->blk || blk_pwrite(sd->blk, addr, len, sd->data, 0) < 0) { fprintf(stderr, "sd_blk_write: write error on host side\n"); } @@ -1252,7 +1316,7 @@ static sd_rsp_type_t sd_invalid_state_for_cmd(SDState *sd, SDRequest req) static sd_rsp_type_t sd_invalid_mode_for_cmd(SDState *sd, SDRequest req) { qemu_log_mask(LOG_GUEST_ERROR, "%s: CMD%i in a wrong mode: %s (spec %s)\n", - sd->proto->name, req.cmd, sd_mode_name(sd->mode), + sd->proto->name, req.cmd, sd_mode_name(sd_mode(sd)), sd_version_str(sd->spec_version)); return sd_illegal; @@ -1305,7 +1369,7 @@ static sd_rsp_type_t sd_cmd_to_sendingdata(SDState *sd, SDRequest req, const void *data, size_t size) { if (sd->state != sd_transfer_state) { - sd_invalid_state_for_cmd(sd, req); + return sd_invalid_state_for_cmd(sd, req); } sd->state = sd_sendingdata_state; @@ -1341,14 +1405,6 @@ static sd_rsp_type_t sd_cmd_GO_IDLE_STATE(SDState *sd, SDRequest req) return sd_is_spi(sd) ? sd_r1 : sd_r0; } -/* CMD1 */ -static sd_rsp_type_t spi_cmd_SEND_OP_COND(SDState *sd, SDRequest req) -{ - sd->state = sd_transfer_state; - - return sd_r1; -} - /* CMD2 */ static sd_rsp_type_t sd_cmd_ALL_SEND_CID(SDState *sd, SDRequest req) { @@ -1420,11 +1476,17 @@ static sd_rsp_type_t emmc_cmd_sleep_awake(SDState *sd, SDRequest req) /* CMD6 */ static sd_rsp_type_t sd_cmd_SWITCH_FUNCTION(SDState *sd, SDRequest req) { - if (sd->mode != sd_data_transfer_mode) { + if (sd_mode(sd) != sd_data_transfer_mode) { return sd_invalid_mode_for_cmd(sd, req); } - if (sd->state != sd_transfer_state) { - return sd_invalid_state_for_cmd(sd, req); + if (sd_is_spi(sd)) { + if (sd->state == sd_idle_state) { + return sd_invalid_state_for_cmd(sd, req); + } + } else { + if (sd->state != sd_transfer_state) { + return sd_invalid_state_for_cmd(sd, req); + } } sd_function_switch(sd, req.arg); @@ -1488,9 +1550,6 @@ static sd_rsp_type_t sd_cmd_DE_SELECT_CARD(SDState *sd, SDRequest req) /* CMD8 */ static sd_rsp_type_t sd_cmd_SEND_IF_COND(SDState *sd, SDRequest req) { - if (sd->spec_version < SD_PHY_SPECv2_00_VERS) { - return sd_cmd_illegal(sd, req); - } if (sd->state != sd_idle_state) { return sd_invalid_state_for_cmd(sd, req); } @@ -1517,14 +1576,30 @@ static sd_rsp_type_t emmc_cmd_SEND_EXT_CSD(SDState *sd, SDRequest req) sd->ext_csd, sizeof(sd->ext_csd)); } -/* CMD9 */ -static sd_rsp_type_t spi_cmd_SEND_CSD(SDState *sd, SDRequest req) +static sd_rsp_type_t spi_cmd_SEND_CxD(SDState *sd, SDRequest req, + const void *data, size_t size) { + /* + * XXX as of v10.1.0-rc1 command is reached in sd_idle_state, + * so disable this check. if (sd->state != sd_standby_state) { return sd_invalid_state_for_cmd(sd, req); } - return sd_cmd_to_sendingdata(sd, req, sd_req_get_address(sd, req), - sd->csd, 16); + */ + + /* + * Since SPI returns CSD and CID on the DAT lines, + * switch to sd_transfer_state. + */ + sd->state = sd_transfer_state; + + return sd_cmd_to_sendingdata(sd, req, 0, data, size); +} + +/* CMD9 */ +static sd_rsp_type_t spi_cmd_SEND_CSD(SDState *sd, SDRequest req) +{ + return spi_cmd_SEND_CxD(sd, req, sd->csd, sizeof(sd->csd)); } static sd_rsp_type_t sd_cmd_SEND_CSD(SDState *sd, SDRequest req) @@ -1539,11 +1614,7 @@ static sd_rsp_type_t sd_cmd_SEND_CSD(SDState *sd, SDRequest req) /* CMD10 */ static sd_rsp_type_t spi_cmd_SEND_CID(SDState *sd, SDRequest req) { - if (sd->state != sd_standby_state) { - return sd_invalid_state_for_cmd(sd, req); - } - return sd_cmd_to_sendingdata(sd, req, sd_req_get_address(sd, req), - sd->cid, 16); + return spi_cmd_SEND_CxD(sd, req, sd->cid, sizeof(sd->cid)); } static sd_rsp_type_t sd_cmd_SEND_CID(SDState *sd, SDRequest req) @@ -1575,7 +1646,7 @@ static sd_rsp_type_t sd_cmd_STOP_TRANSMISSION(SDState *sd, SDRequest req) /* CMD13 */ static sd_rsp_type_t sd_cmd_SEND_STATUS(SDState *sd, SDRequest req) { - if (sd->mode != sd_data_transfer_mode) { + if (sd_mode(sd) != sd_data_transfer_mode) { return sd_invalid_mode_for_cmd(sd, req); } @@ -1592,7 +1663,7 @@ static sd_rsp_type_t sd_cmd_SEND_STATUS(SDState *sd, SDRequest req) } if (sd_is_spi(sd)) { - return sd_r2_s; + return spi_r2; } return sd_req_rca_same(sd, req) ? sd_r1 : sd_r0; @@ -1601,7 +1672,7 @@ static sd_rsp_type_t sd_cmd_SEND_STATUS(SDState *sd, SDRequest req) /* CMD15 */ static sd_rsp_type_t sd_cmd_GO_INACTIVE_STATE(SDState *sd, SDRequest req) { - if (sd->mode != sd_data_transfer_mode) { + if (sd_mode(sd) != sd_data_transfer_mode) { return sd_invalid_mode_for_cmd(sd, req); } switch (sd->state) { @@ -1906,8 +1977,14 @@ static sd_rsp_type_t sd_acmd_SET_BUS_WIDTH(SDState *sd, SDRequest req) /* ACMD13 */ static sd_rsp_type_t sd_acmd_SD_STATUS(SDState *sd, SDRequest req) { - return sd_cmd_to_sendingdata(sd, req, 0, - sd->sd_status, sizeof(sd->sd_status)); + sd_rsp_type_t rsp; + + rsp = sd_cmd_to_sendingdata(sd, req, 0, + sd->sd_status, sizeof(sd->sd_status)); + if (sd_is_spi(sd) && rsp != sd_illegal) { + return spi_r2; + } + return rsp; } /* ACMD22 */ @@ -1967,6 +2044,9 @@ static sd_rsp_type_t sd_cmd_SEND_OP_COND(SDState *sd, SDRequest req) sd->state = sd_ready_state; } + if (sd_is_spi(sd)) { + return sd_r1; + } return sd_r3; } @@ -1998,7 +2078,9 @@ static sd_rsp_type_t sd_normal_command(SDState *sd, SDRequest req) if (req.cmd != 55 || sd->expecting_acmd) { trace_sdcard_normal_command(sd->proto->name, sd->last_cmd_name, req.cmd, - req.arg, sd_state_name(sd->state)); + req.arg, + sd_mode_name(sd_mode(sd)), + sd_state_name(sd->state)); } /* Not interpreting this as an app command */ @@ -2084,7 +2166,9 @@ static sd_rsp_type_t sd_app_command(SDState *sd, { sd->last_cmd_name = sd_acmd_name(sd, req.cmd); trace_sdcard_app_command(sd->proto->name, sd->last_cmd_name, - req.cmd, req.arg, sd_state_name(sd->state)); + req.cmd, req.arg, + sd_mode_name(sd_mode(sd)), + sd_state_name(sd->state)); sd->card_status |= APP_CMD; if (sd->proto->acmd[req.cmd].handler) { @@ -2139,8 +2223,9 @@ static bool cmd_valid_while_locked(SDState *sd, unsigned cmd) return cmd_class == 0 || cmd_class == 7; } -static int sd_do_command(SDState *sd, SDRequest *req, - uint8_t *response) { +static size_t sd_do_command(SDState *sd, SDRequest *req, + uint8_t *response, size_t respsz) +{ int last_state; sd_rsp_type_t rtype; int rsplen; @@ -2183,7 +2268,6 @@ static int sd_do_command(SDState *sd, SDRequest *req, } last_state = sd->state; - sd_set_mode(sd); if (sd->expecting_acmd) { sd->expecting_acmd = false; @@ -2203,36 +2287,37 @@ static int sd_do_command(SDState *sd, SDRequest *req, } send_response: + rsplen = sd_response_size(sd, rtype); + assert(rsplen <= respsz); + switch (rtype) { case sd_r1: case sd_r1b: sd_response_r1_make(sd, response); - rsplen = 4; + break; + + case spi_r2: + spi_response_r2_make(sd, response); break; case sd_r2_i: memcpy(response, sd->cid, sizeof(sd->cid)); - rsplen = 16; break; case sd_r2_s: memcpy(response, sd->csd, sizeof(sd->csd)); - rsplen = 16; break; case sd_r3: sd_response_r3_make(sd, response); - rsplen = 4; break; case sd_r6: sd_response_r6_make(sd, response); - rsplen = 4; break; case sd_r7: sd_response_r7_make(sd, response); - rsplen = 4; break; case sd_r0: @@ -2244,7 +2329,6 @@ static int sd_do_command(SDState *sd, SDRequest *req, sd->data_offset = 0; /* fall-through */ case sd_illegal: - rsplen = 0; break; default: g_assert_not_reached(); @@ -2510,7 +2594,7 @@ static const SDProto sd_proto_spi = { .name = "SPI", .cmd = { [0] = {0, sd_spi, "GO_IDLE_STATE", sd_cmd_GO_IDLE_STATE}, - [1] = {0, sd_spi, "SEND_OP_COND", spi_cmd_SEND_OP_COND}, + [1] = {0, sd_spi, "SEND_OP_COND", sd_cmd_SEND_OP_COND}, [5] = {9, sd_spi, "IO_SEND_OP_COND", sd_cmd_optional}, [6] = {10, sd_spi, "SWITCH_FUNCTION", sd_cmd_SWITCH_FUNCTION}, [8] = {0, sd_spi, "SEND_IF_COND", sd_cmd_SEND_IF_COND}, @@ -2546,7 +2630,7 @@ static const SDProto sd_proto_spi = { [13] = {8, sd_spi, "SD_STATUS", sd_acmd_SD_STATUS}, [22] = {8, sd_spi, "SEND_NUM_WR_BLOCKS", sd_acmd_SEND_NUM_WR_BLOCKS}, [23] = {8, sd_spi, "SET_WR_BLK_ERASE_COUNT", sd_acmd_SET_WR_BLK_ERASE_COUNT}, - [41] = {8, sd_spi, "SEND_OP_COND", spi_cmd_SEND_OP_COND}, + [41] = {8, sd_spi, "SEND_OP_COND", sd_cmd_SEND_OP_COND}, [42] = {8, sd_spi, "SET_CLR_CARD_DETECT", sd_acmd_SET_CLR_CARD_DETECT}, [51] = {8, sd_spi, "SEND_SCR", sd_acmd_SEND_SCR}, }, @@ -2681,7 +2765,7 @@ static void sd_realize(DeviceState *dev, Error **errp) int ret; switch (sd->spec_version) { - case SD_PHY_SPECv1_10_VERS + case SD_PHY_SPECv2_00_VERS ... SD_PHY_SPECv3_01_VERS: break; default: @@ -2726,6 +2810,15 @@ static void sd_realize(DeviceState *dev, Error **errp) } blk_set_dev_ops(sd->blk, &sd_block_ops, sd); } + if (sd->boot_part_size % (128 * KiB) || + sd->boot_part_size > 255 * 128 * KiB) { + g_autofree char *size_str = size_to_str(sd->boot_part_size); + + error_setg(errp, "Invalid boot partition size: %s", size_str); + error_append_hint(errp, + "The boot partition size must be multiples of 128K" + "and not larger than 32640K.\n"); + } } static void emmc_realize(DeviceState *dev, Error **errp) diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 226ff133ff9fd..89b595ce4a5a7 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -337,7 +337,7 @@ static void sdhci_send_command(SDHCIState *s) { SDRequest request; uint8_t response[16]; - int rlen; + size_t rlen; bool timeout = false; s->errintsts = 0; @@ -346,7 +346,7 @@ static void sdhci_send_command(SDHCIState *s) request.arg = s->argument; trace_sdhci_send_command(request.cmd, request.arg); - rlen = sdbus_do_command(&s->sdbus, &request, response); + rlen = sdbus_do_command(&s->sdbus, &request, response, sizeof(response)); if (s->cmdreg & SDHC_CMD_RESPONSE) { if (rlen == 4) { @@ -400,7 +400,7 @@ static void sdhci_end_transfer(SDHCIState *s) request.cmd = 0x0C; request.arg = 0; trace_sdhci_end_transfer(request.cmd, request.arg); - sdbus_do_command(&s->sdbus, &request, response); + sdbus_do_command(&s->sdbus, &request, response, sizeof(response)); /* Auto CMD12 response goes to the upper Response register */ s->rspreg[3] = ldl_be_p(response); } @@ -1578,10 +1578,6 @@ static void sdhci_sysbus_finalize(Object *obj) { SDHCIState *s = SYSBUS_SDHCI(obj); - if (s->dma_mr) { - object_unparent(OBJECT(s->dma_mr)); - } - sdhci_uninitfn(s); } diff --git a/hw/sd/sdmmc-internal.h b/hw/sd/sdmmc-internal.h index 91eb5b6b2fc06..ce6bc4e6ec477 100644 --- a/hw/sd/sdmmc-internal.h +++ b/hw/sd/sdmmc-internal.h @@ -116,7 +116,8 @@ DECLARE_OBJ_CHECKERS(SDState, SDCardClass, SDMMC_COMMON, TYPE_SDMMC_COMMON) #define EXT_CSD_PART_CONFIG_ACC_MASK (0x7) #define EXT_CSD_PART_CONFIG_ACC_DEFAULT (0x0) -#define EXT_CSD_PART_CONFIG_ACC_BOOT0 (0x1) +#define EXT_CSD_PART_CONFIG_ACC_BOOT1 (0x1) +#define EXT_CSD_PART_CONFIG_ACC_BOOT2 (0x2) #define EXT_CSD_PART_CONFIG_EN_MASK (0x7 << 3) #define EXT_CSD_PART_CONFIG_EN_BOOT0 (0x1 << 3) diff --git a/hw/sd/ssi-sd.c b/hw/sd/ssi-sd.c index 6c90a86ab4164..3aacbd0387153 100644 --- a/hw/sd/ssi-sd.c +++ b/hw/sd/ssi-sd.c @@ -70,23 +70,6 @@ struct ssi_sd_state { #define TYPE_SSI_SD "ssi-sd" OBJECT_DECLARE_SIMPLE_TYPE(ssi_sd_state, SSI_SD) -/* State word bits. */ -#define SSI_SDR_LOCKED 0x0001 -#define SSI_SDR_WP_ERASE 0x0002 -#define SSI_SDR_ERROR 0x0004 -#define SSI_SDR_CC_ERROR 0x0008 -#define SSI_SDR_ECC_FAILED 0x0010 -#define SSI_SDR_WP_VIOLATION 0x0020 -#define SSI_SDR_ERASE_PARAM 0x0040 -#define SSI_SDR_OUT_OF_RANGE 0x0080 -#define SSI_SDR_IDLE 0x0100 -#define SSI_SDR_ERASE_RESET 0x0200 -#define SSI_SDR_ILLEGAL_COMMAND 0x0400 -#define SSI_SDR_COM_CRC_ERROR 0x0800 -#define SSI_SDR_ERASE_SEQ_ERROR 0x1000 -#define SSI_SDR_ADDRESS_ERROR 0x2000 -#define SSI_SDR_PARAMETER_ERROR 0x4000 - /* multiple block write */ #define SSI_TOKEN_MULTI_WRITE 0xfc /* terminate multiple block write */ @@ -104,7 +87,11 @@ static uint32_t ssi_sd_transfer(SSIPeripheral *dev, uint32_t val) { ssi_sd_state *s = SSI_SD(dev); SDRequest request; - uint8_t longresp[16]; + uint8_t longresp[5]; + + if (!sdbus_get_inserted(&s->sdbus)) { + return SSI_DUMMY; + } /* * Special case: allow CMD12 (STOP TRANSMISSION) while reading data. @@ -146,8 +133,9 @@ static uint32_t ssi_sd_transfer(SSIPeripheral *dev, uint32_t val) /* manually issue cmd12 to stop the transfer */ request.cmd = 12; request.arg = 0; - s->arglen = sdbus_do_command(&s->sdbus, &request, longresp); - if (s->arglen <= 0) { + s->arglen = sdbus_do_command(&s->sdbus, &request, + longresp, sizeof(longresp)); + if (s->arglen == 0) { s->arglen = 1; /* a zero value indicates the card is busy */ s->response[0] = 0; @@ -170,73 +158,15 @@ static uint32_t ssi_sd_transfer(SSIPeripheral *dev, uint32_t val) /* FIXME: Check CRC. */ request.cmd = s->cmd; request.arg = ldl_be_p(s->cmdarg); - DPRINTF("CMD%d arg 0x%08x\n", s->cmd, request.arg); - s->arglen = sdbus_do_command(&s->sdbus, &request, longresp); - if (s->arglen <= 0) { - s->arglen = 1; - s->response[0] = 4; - DPRINTF("SD command failed\n"); - } else if (s->cmd == 8 || s->cmd == 58) { - /* CMD8/CMD58 returns R3/R7 response */ - DPRINTF("Returned R3/R7\n"); - s->arglen = 5; - s->response[0] = 1; - memcpy(&s->response[1], longresp, 4); - } else if (s->arglen != 4) { - BADF("Unexpected response to cmd %d\n", s->cmd); - /* Illegal command is about as near as we can get. */ - s->arglen = 1; - s->response[0] = 4; - } else { - /* All other commands return status. */ - uint32_t cardstatus; - uint16_t status; - /* CMD13 returns a 2-byte statuse work. Other commands - only return the first byte. */ - s->arglen = (s->cmd == 13) ? 2 : 1; - - /* handle R1b */ - if (s->cmd == 28 || s->cmd == 29 || s->cmd == 38) { - s->stopping = 1; - } + s->arglen = sdbus_do_command(&s->sdbus, &request, + longresp, sizeof(longresp)); + DPRINTF("CMD%d arg 0x%08x = %d\n", s->cmd, request.arg, s->arglen); + assert(s->arglen > 0); + memcpy(s->response, longresp, s->arglen); - cardstatus = ldl_be_p(longresp); - status = 0; - if (((cardstatus >> 9) & 0xf) < 4) - status |= SSI_SDR_IDLE; - if (cardstatus & ERASE_RESET) - status |= SSI_SDR_ERASE_RESET; - if (cardstatus & ILLEGAL_COMMAND) - status |= SSI_SDR_ILLEGAL_COMMAND; - if (cardstatus & COM_CRC_ERROR) - status |= SSI_SDR_COM_CRC_ERROR; - if (cardstatus & ERASE_SEQ_ERROR) - status |= SSI_SDR_ERASE_SEQ_ERROR; - if (cardstatus & ADDRESS_ERROR) - status |= SSI_SDR_ADDRESS_ERROR; - if (cardstatus & CARD_IS_LOCKED) - status |= SSI_SDR_LOCKED; - if (cardstatus & (LOCK_UNLOCK_FAILED | WP_ERASE_SKIP)) - status |= SSI_SDR_WP_ERASE; - if (cardstatus & SD_ERROR) - status |= SSI_SDR_ERROR; - if (cardstatus & CC_ERROR) - status |= SSI_SDR_CC_ERROR; - if (cardstatus & CARD_ECC_FAILED) - status |= SSI_SDR_ECC_FAILED; - if (cardstatus & WP_VIOLATION) - status |= SSI_SDR_WP_VIOLATION; - if (cardstatus & ERASE_PARAM) - status |= SSI_SDR_ERASE_PARAM; - if (cardstatus & (OUT_OF_RANGE | CID_CSD_OVERWRITE)) - status |= SSI_SDR_OUT_OF_RANGE; - /* ??? Don't know what Parameter Error really means, so - assume it's set if the second byte is nonzero. */ - if (status & 0xff) - status |= SSI_SDR_PARAMETER_ERROR; - s->response[0] = status >> 8; - s->response[1] = status; - DPRINTF("Card status 0x%02x\n", status); + /* handle R1b (busy signal) */ + if (s->cmd == 28 || s->cmd == 29 || s->cmd == 38) { + s->stopping = 1; } s->mode = SSI_SD_PREP_RESP; s->response_pos = 0; @@ -333,7 +263,7 @@ static int ssi_sd_post_load(void *opaque, int version_id) return -EINVAL; } if (s->mode == SSI_SD_CMDARG && - (s->arglen < 0 || s->arglen >= ARRAY_SIZE(s->cmdarg))) { + (s->arglen >= ARRAY_SIZE(s->cmdarg))) { return -EINVAL; } if (s->mode == SSI_SD_RESPONSE && diff --git a/hw/sd/trace-events b/hw/sd/trace-events index db0644256d913..8d49840917e22 100644 --- a/hw/sd/trace-events +++ b/hw/sd/trace-events @@ -37,8 +37,8 @@ sdhci_write_dataport(uint16_t data_count) "write buffer filled with %u bytes of sdhci_capareg(const char *desc, uint16_t val) "%s: %u" # sd.c -sdcard_normal_command(const char *proto, const char *cmd_desc, uint8_t cmd, uint32_t arg, const char *state) "%s %20s/ CMD%02d arg 0x%08x (state %s)" -sdcard_app_command(const char *proto, const char *acmd_desc, uint8_t acmd, uint32_t arg, const char *state) "%s %23s/ACMD%02d arg 0x%08x (state %s)" +sdcard_normal_command(const char *proto, const char *cmd_desc, uint8_t cmd, uint32_t arg, const char *mode, const char *state) "%s %20s/ CMD%02d arg 0x%08x (mode %s, state %s)" +sdcard_app_command(const char *proto, const char *acmd_desc, uint8_t acmd, uint32_t arg, const char *mode, const char *state) "%s %23s/ACMD%02d arg 0x%08x (mode %s, state %s)" sdcard_response(const char *rspdesc, int rsplen) "%s (sz:%d)" sdcard_powerup(void) "" sdcard_inquiry_cmd41(void) "" diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 1ac063cfb4ba8..13e21a9c43e9c 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -179,6 +179,10 @@ static const QemuOptDesc qemu_smbios_type0_opts[] = { .name = "uefi", .type = QEMU_OPT_BOOL, .help = "uefi support", + },{ + .name = "vm", + .type = QEMU_OPT_BOOL, + .help = "virtual machine", }, { /* end of list */ } }; @@ -574,10 +578,14 @@ static void smbios_build_type_0_table(void) t->bios_characteristics = cpu_to_le64(0x08); /* Not supported */ t->bios_characteristics_extension_bytes[0] = 0; - t->bios_characteristics_extension_bytes[1] = 0x14; /* TCD/SVVP | VM */ + + t->bios_characteristics_extension_bytes[1] = 0x04; /* TCD/SVVP */ if (smbios_type0.uefi) { t->bios_characteristics_extension_bytes[1] |= 0x08; /* |= UEFI */ } + if (smbios_type0.vm) { + t->bios_characteristics_extension_bytes[1] |= 0x10; /* |= VM */ + } if (smbios_type0.have_major_minor) { t->system_bios_major_release = smbios_type0.major; @@ -1405,6 +1413,7 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) save_opt(&smbios_type0.version, opts, "version"); save_opt(&smbios_type0.date, opts, "date"); smbios_type0.uefi = qemu_opt_get_bool(opts, "uefi", false); + smbios_type0.vm = qemu_opt_get_bool(opts, "vm", true); val = qemu_opt_get(opts, "release"); if (val) { diff --git a/hw/sparc/leon3.c b/hw/sparc/leon3.c index 0aeaad3becc9a..09d2cec488c91 100644 --- a/hw/sparc/leon3.c +++ b/hw/sparc/leon3.c @@ -192,7 +192,7 @@ static void leon3_cache_control_int(CPUSPARCState *env) static void leon3_irq_ack(CPUSPARCState *env, int intno) { - CPUState *cpu = CPU(env_cpu(env)); + CPUState *cpu = env_cpu(env); grlib_irqmp_ack(env->irq_manager, cpu->cpu_index, intno); } diff --git a/hw/ssi/aspeed_smc.c b/hw/ssi/aspeed_smc.c index 614528b8ef654..e33496f502987 100644 --- a/hw/ssi/aspeed_smc.c +++ b/hw/ssi/aspeed_smc.c @@ -1857,7 +1857,8 @@ static void aspeed_1030_fmc_class_init(ObjectClass *klass, const void *data) asc->resets = aspeed_1030_fmc_resets; asc->flash_window_base = 0x80000000; asc->flash_window_size = 0x10000000; - asc->features = ASPEED_SMC_FEATURE_DMA; + asc->features = ASPEED_SMC_FEATURE_DMA | + ASPEED_SMC_FEATURE_WDT_CONTROL; asc->dma_flash_mask = 0x0FFFFFFC; asc->dma_dram_mask = 0x000BFFFC; asc->dma_start_length = 1; diff --git a/hw/timer/hpet.c b/hw/timer/hpet.c index cb48cc151f13b..1acba4fa9db08 100644 --- a/hw/timer/hpet.c +++ b/hw/timer/hpet.c @@ -38,6 +38,9 @@ #include "hw/timer/i8254.h" #include "system/address-spaces.h" #include "qom/object.h" +#include "qemu/lockable.h" +#include "qemu/seqlock.h" +#include "qemu/main-loop.h" #include "trace.h" struct hpet_fw_config hpet_fw_cfg = {.count = UINT8_MAX}; @@ -69,9 +72,11 @@ struct HPETState { SysBusDevice parent_obj; /*< public >*/ + QemuMutex lock; MemoryRegion iomem; uint64_t hpet_offset; bool hpet_offset_saved; + QemuSeqLock state_version; qemu_irq irqs[HPET_NUM_IRQ_ROUTES]; uint32_t flags; uint8_t rtc_irq_level; @@ -218,12 +223,15 @@ static void update_irq(struct HPETTimer *timer, int set) timer->fsb & 0xffffffff, MEMTXATTRS_UNSPECIFIED, NULL); } else if (timer->config & HPET_TN_TYPE_LEVEL) { + BQL_LOCK_GUARD(); qemu_irq_raise(s->irqs[route]); } else { + BQL_LOCK_GUARD(); qemu_irq_pulse(s->irqs[route]); } } else { if (!timer_fsb_route(timer)) { + BQL_LOCK_GUARD(); qemu_irq_lower(s->irqs[route]); } } @@ -428,6 +436,25 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, trace_hpet_ram_read(addr); addr &= ~4; + if (addr == HPET_COUNTER) { + unsigned version; + + /* + * Write update is rare, so busywait here is unlikely to happen + */ + do { + version = seqlock_read_begin(&s->state_version); + if (unlikely(!hpet_enabled(s))) { + cur_tick = s->hpet_counter; + } else { + cur_tick = hpet_get_ticks(s); + } + } while (seqlock_read_retry(&s->state_version, version)); + trace_hpet_ram_read_reading_counter(addr & 4, cur_tick); + return cur_tick >> shift; + } + + QEMU_LOCK_GUARD(&s->lock); /*address range of all global regs*/ if (addr <= 0xff) { switch (addr) { @@ -435,14 +462,6 @@ static uint64_t hpet_ram_read(void *opaque, hwaddr addr, return s->capability >> shift; case HPET_CFG: return s->config >> shift; - case HPET_COUNTER: - if (hpet_enabled(s)) { - cur_tick = hpet_get_ticks(s); - } else { - cur_tick = s->hpet_counter; - } - trace_hpet_ram_read_reading_counter(addr & 4, cur_tick); - return cur_tick >> shift; case HPET_STATUS: return s->isr >> shift; default: @@ -482,6 +501,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr, int len = MIN(size * 8, 64 - shift); uint64_t old_val, new_val, cleared; + QEMU_LOCK_GUARD(&s->lock); trace_hpet_ram_write(addr, value); addr &= ~4; @@ -494,6 +514,7 @@ static void hpet_ram_write(void *opaque, hwaddr addr, old_val = s->config; new_val = deposit64(old_val, shift, len, value); new_val = hpet_fixup_reg(new_val, old_val, HPET_CFG_WRITE_MASK); + seqlock_write_begin(&s->state_version); s->config = new_val; if (activating_bit(old_val, new_val, HPET_CFG_ENABLE)) { /* Enable main counter and interrupt generation. */ @@ -512,13 +533,17 @@ static void hpet_ram_write(void *opaque, hwaddr addr, hpet_del_timer(&s->timer[i]); } } + seqlock_write_end(&s->state_version); + /* i8254 and RTC output pins are disabled * when HPET is in legacy mode */ if (activating_bit(old_val, new_val, HPET_CFG_LEGACY)) { + BQL_LOCK_GUARD(); qemu_set_irq(s->pit_enabled, 0); qemu_irq_lower(s->irqs[0]); qemu_irq_lower(s->irqs[RTC_ISA_IRQ]); } else if (deactivating_bit(old_val, new_val, HPET_CFG_LEGACY)) { + BQL_LOCK_GUARD(); qemu_irq_lower(s->irqs[0]); qemu_set_irq(s->pit_enabled, 1); qemu_set_irq(s->irqs[RTC_ISA_IRQ], s->rtc_irq_level); @@ -664,11 +689,13 @@ static void hpet_handle_legacy_irq(void *opaque, int n, int level) if (n == HPET_LEGACY_PIT_INT) { if (!hpet_in_legacy_mode(s)) { + BQL_LOCK_GUARD(); qemu_set_irq(s->irqs[0], level); } } else { s->rtc_irq_level = level; if (!hpet_in_legacy_mode(s)) { + BQL_LOCK_GUARD(); qemu_set_irq(s->irqs[RTC_ISA_IRQ], level); } } @@ -679,8 +706,11 @@ static void hpet_init(Object *obj) SysBusDevice *sbd = SYS_BUS_DEVICE(obj); HPETState *s = HPET(obj); + qemu_mutex_init(&s->lock); + seqlock_init(&s->state_version); /* HPET Area */ memory_region_init_io(&s->iomem, obj, &hpet_ram_ops, s, "hpet", HPET_LEN); + memory_region_enable_lockless_io(&s->iomem); sysbus_init_mmio(sbd, &s->iomem); } diff --git a/hw/timer/i8254.c b/hw/timer/i8254.c index 4b25c487f7971..7033ebf50da40 100644 --- a/hw/timer/i8254.c +++ b/hw/timer/i8254.c @@ -29,6 +29,7 @@ #include "hw/timer/i8254.h" #include "hw/timer/i8254_internal.h" #include "qom/object.h" +#include "trace.h" //#define DEBUG_PIT @@ -130,6 +131,8 @@ static void pit_ioport_write(void *opaque, hwaddr addr, int channel, access; PITChannelState *s; + trace_pit_ioport_write(addr, val); + addr &= 3; if (addr == 3) { channel = val >> 6; @@ -248,6 +251,9 @@ static uint64_t pit_ioport_read(void *opaque, hwaddr addr, break; } } + + trace_pit_ioport_read(addr, ret); + return ret; } diff --git a/hw/timer/i8254_common.c b/hw/timer/i8254_common.c index ad091594cdeeb..419d4cd6e572e 100644 --- a/hw/timer/i8254_common.c +++ b/hw/timer/i8254_common.c @@ -32,9 +32,8 @@ #include "migration/vmstate.h" /* val must be 0 or 1 */ -void pit_set_gate(ISADevice *dev, int channel, int val) +void pit_set_gate(PITCommonState *pit, int channel, int val) { - PITCommonState *pit = PIT_COMMON(dev); PITChannelState *s = &pit->channels[channel]; PITCommonClass *c = PIT_COMMON_GET_CLASS(pit); @@ -139,9 +138,8 @@ void pit_get_channel_info_common(PITCommonState *s, PITChannelState *sc, info->out = pit_get_out(sc, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL)); } -void pit_get_channel_info(ISADevice *dev, int channel, PITChannelInfo *info) +void pit_get_channel_info(PITCommonState *pit, int channel, PITChannelInfo *info) { - PITCommonState *pit = PIT_COMMON(dev); PITChannelState *s = &pit->channels[channel]; PITCommonClass *c = PIT_COMMON_GET_CLASS(pit); diff --git a/hw/timer/trace-events b/hw/timer/trace-events index c5b6db49f5879..2bb51f95ea821 100644 --- a/hw/timer/trace-events +++ b/hw/timer/trace-events @@ -49,6 +49,10 @@ cmsdk_apb_dualtimer_read(uint64_t offset, uint64_t data, unsigned size) "CMSDK A cmsdk_apb_dualtimer_write(uint64_t offset, uint64_t data, unsigned size) "CMSDK APB dualtimer write: offset 0x%" PRIx64 " data 0x%" PRIx64 " size %u" cmsdk_apb_dualtimer_reset(void) "CMSDK APB dualtimer: reset" +# i8254.c +pit_ioport_read(uint8_t addr, uint32_t value) "[0x%" PRIx8 "] -> 0x%" PRIx32 +pit_ioport_write(uint8_t addr, uint32_t value) "[0x%" PRIx8 "] <- 0x%" PRIx32 + # imx_gpt.c imx_gpt_set_freq(uint32_t clksrc, uint32_t freq) "Setting clksrc %u to %u Hz" imx_gpt_read(const char *name, uint64_t value) "%s -> 0x%08" PRIx64 diff --git a/hw/uefi/meson.build b/hw/uefi/meson.build index 91eb95f89e6dc..c8f38dfae2476 100644 --- a/hw/uefi/meson.build +++ b/hw/uefi/meson.build @@ -1,4 +1,4 @@ -system_ss.add(files('hardware-info.c')) +system_ss.add(files('hardware-info.c', 'ovmf-log.c')) uefi_vars_ss = ss.source_set() if (config_all_devices.has_key('CONFIG_UEFI_VARS')) diff --git a/hw/uefi/ovmf-log.c b/hw/uefi/ovmf-log.c new file mode 100644 index 0000000000000..98ebb0209491c --- /dev/null +++ b/hw/uefi/ovmf-log.c @@ -0,0 +1,286 @@ +/* + * SPDX-License-Identifier: GPL-2.0-or-later + * + * print ovmf debug log + * + * see OvmfPkg/Library/MemDebugLogLib/ in edk2 + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qemu/target-info-qapi.h" +#include "hw/boards.h" +#include "hw/i386/x86.h" +#include "hw/arm/virt.h" +#include "system/dma.h" +#include "monitor/hmp.h" +#include "monitor/monitor.h" +#include "qapi/error.h" +#include "qapi/type-helpers.h" +#include "qapi/qapi-commands-machine.h" +#include "qobject/qdict.h" + + +/* ----------------------------------------------------------------------- */ +/* copy from edk2 */ + +#define MEM_DEBUG_LOG_MAGIC1 0x3167646d666d766f /* "ovmfmdg1" */ +#define MEM_DEBUG_LOG_MAGIC2 0x3267646d666d766f /* "ovmfmdg2" */ + +/* + * Mem Debug Log buffer header. + * The Log buffer is circular. Only the most + * recent messages are retained. Older messages + * will be discarded if the buffer overflows. + * The Debug Log starts just after the header. + */ +typedef struct { + /* + * Magic values + * These fields are used by tools to locate the buffer in + * memory. These MUST be the first two fields of the structure. + * Use a 128 bit Magic to vastly reduce the possibility of + * a collision with random data in memory. + */ + uint64_t Magic1; + uint64_t Magic2; + /* + * Header Size + * This MUST be the third field of the structure + */ + uint64_t HeaderSize; + /* + * Debug log size (minus header) + */ + uint64_t DebugLogSize; + /* + * edk2 uses this for locking access. + */ + uint64_t MemDebugLogLock; + /* + * Debug log head offset + */ + uint64_t DebugLogHeadOffset; + /* + * Debug log tail offset + */ + uint64_t DebugLogTailOffset; + /* + * Flag to indicate if the buffer wrapped and was thus truncated. + */ + uint64_t Truncated; + /* + * Firmware Build Version (PcdFirmwareVersionString) + */ + char FirmwareVersion[128]; +} MEM_DEBUG_LOG_HDR; + + +/* ----------------------------------------------------------------------- */ +/* qemu monitor command */ + +typedef struct { + uint64_t magic1; + uint64_t magic2; +} MemDebugLogMagic; + +/* find log buffer in guest memory by searching for the magic cookie */ +static dma_addr_t find_ovmf_log_range(dma_addr_t start, dma_addr_t end) +{ + static const MemDebugLogMagic magic = { + .magic1 = MEM_DEBUG_LOG_MAGIC1, + .magic2 = MEM_DEBUG_LOG_MAGIC2, + }; + MemDebugLogMagic check; + dma_addr_t step = 4 * KiB; + dma_addr_t offset; + + for (offset = start; offset < end; offset += step) { + if (dma_memory_read(&address_space_memory, offset, + &check, sizeof(check), + MEMTXATTRS_UNSPECIFIED)) { + /* dma error -> stop searching */ + break; + } + if (memcmp(&magic, &check, sizeof(check)) == 0) { + return offset; + } + } + return (dma_addr_t)-1; +} + +static dma_addr_t find_ovmf_log(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + dma_addr_t start, end, offset; + + if (target_arch() == SYS_EMU_TARGET_X86_64 && + object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE)) { + X86MachineState *x86ms = X86_MACHINE(ms); + + /* early log buffer, static allocation in memfd, sec + early pei */ + offset = find_ovmf_log_range(0x800000, 0x900000); + if (offset != -1) { + return offset; + } + + /* + * normal log buffer, dynamically allocated close to end of low memory, + * late pei + dxe phase + */ + end = x86ms->below_4g_mem_size; + start = end - MIN(end, 128 * MiB); + return find_ovmf_log_range(start, end); + } + + if (target_arch() == SYS_EMU_TARGET_AARCH64 && + object_dynamic_cast(OBJECT(ms), TYPE_VIRT_MACHINE)) { + VirtMachineState *vms = VIRT_MACHINE(ms); + + /* edk2 ArmVirt firmware allocations are in the first 128 MB */ + start = vms->memmap[VIRT_MEM].base; + end = start + 128 * MiB; + return find_ovmf_log_range(start, end); + } + + return (dma_addr_t)-1; +} + +static void handle_ovmf_log_range(GString *out, + dma_addr_t start, + dma_addr_t end, + Error **errp) +{ + if (start > end) { + return; + } + + size_t len = end - start; + g_string_set_size(out, out->len + len); + if (dma_memory_read(&address_space_memory, start, + out->str + (out->len - len), + len, MEMTXATTRS_UNSPECIFIED)) { + error_setg(errp, "can not read firmware log buffer contents"); + return; + } +} + +FirmwareLog *qmp_query_firmware_log(bool have_max_size, uint64_t max_size, + Error **errp) +{ + MEM_DEBUG_LOG_HDR header; + dma_addr_t offset, base; + FirmwareLog *ret; + g_autoptr(GString) log = g_string_new(""); + + offset = find_ovmf_log(); + if (offset == -1) { + error_setg(errp, "firmware log buffer not found"); + return NULL; + } + + if (dma_memory_read(&address_space_memory, offset, + &header, sizeof(header), + MEMTXATTRS_UNSPECIFIED)) { + error_setg(errp, "can not read firmware log buffer header"); + return NULL; + } + + if (header.DebugLogHeadOffset > header.DebugLogSize || + header.DebugLogTailOffset > header.DebugLogSize) { + error_setg(errp, "firmware log buffer header is invalid"); + return NULL; + } + + if (have_max_size) { + if (max_size > MiB) { + error_setg(errp, "parameter 'max-size' exceeds 1MiB"); + return NULL; + } + } else { + max_size = MiB; + } + + /* adjust header.DebugLogHeadOffset so we return at most maxsize bytes */ + if (header.DebugLogHeadOffset > header.DebugLogTailOffset) { + /* wrap around */ + if (header.DebugLogTailOffset > max_size) { + header.DebugLogHeadOffset = header.DebugLogTailOffset - max_size; + } else { + uint64_t max_chunk = max_size - header.DebugLogTailOffset; + if (header.DebugLogSize > max_chunk && + header.DebugLogHeadOffset < header.DebugLogSize - max_chunk) { + header.DebugLogHeadOffset = header.DebugLogSize - max_chunk; + } + } + } else { + if (header.DebugLogTailOffset > max_size && + header.DebugLogHeadOffset < header.DebugLogTailOffset - max_size) { + header.DebugLogHeadOffset = header.DebugLogTailOffset - max_size; + } + } + + base = offset + header.HeaderSize; + if (header.DebugLogHeadOffset > header.DebugLogTailOffset) { + /* wrap around */ + handle_ovmf_log_range(log, + base + header.DebugLogHeadOffset, + base + header.DebugLogSize, + errp); + if (*errp) { + return NULL; + } + handle_ovmf_log_range(log, + base + 0, + base + header.DebugLogTailOffset, + errp); + if (*errp) { + return NULL; + } + } else { + handle_ovmf_log_range(log, + base + header.DebugLogHeadOffset, + base + header.DebugLogTailOffset, + errp); + if (*errp) { + return NULL; + } + } + + ret = g_new0(FirmwareLog, 1); + if (header.FirmwareVersion[0] != '\0') { + ret->version = g_strndup(header.FirmwareVersion, + sizeof(header.FirmwareVersion)); + } + ret->log = g_base64_encode((const guchar *)log->str, log->len); + return ret; +} + +void hmp_info_firmware_log(Monitor *mon, const QDict *qdict) +{ + g_autofree gchar *log_esc = NULL; + g_autofree guchar *log_out = NULL; + Error *err = NULL; + FirmwareLog *log; + gsize log_len; + int64_t maxsize; + + maxsize = qdict_get_try_int(qdict, "max-size", -1); + log = qmp_query_firmware_log(maxsize != -1, (uint64_t)maxsize, &err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + g_assert(log != NULL); + g_assert(log->log != NULL); + + if (log->version) { + g_autofree gchar *esc = g_strescape(log->version, NULL); + monitor_printf(mon, "[ firmware version: %s ]\n", esc); + } + + log_out = g_base64_decode(log->log, &log_len); + log_esc = g_strescape((gchar *)log_out, "\r\n"); + monitor_printf(mon, "%s\n", log_esc); +} diff --git a/hw/uefi/var-service-core.c b/hw/uefi/var-service-core.c index feec5a59583b5..6ab8df091aaf4 100644 --- a/hw/uefi/var-service-core.c +++ b/hw/uefi/var-service-core.c @@ -259,8 +259,8 @@ static void uefi_vars_write(void *opaque, hwaddr addr, uint64_t val, unsigned si uv->buf_size = val; g_free(uv->buffer); g_free(uv->pio_xfer_buffer); - uv->buffer = g_malloc(uv->buf_size); - uv->pio_xfer_buffer = g_malloc(uv->buf_size); + uv->buffer = g_malloc0(uv->buf_size); + uv->pio_xfer_buffer = g_malloc0(uv->buf_size); break; case UEFI_VARS_REG_DMA_BUFFER_ADDR_LO: uv->buf_addr_lo = val; diff --git a/hw/uefi/var-service-json.c b/hw/uefi/var-service-json.c index ad3462cd15575..f5f155683334c 100644 --- a/hw/uefi/var-service-json.c +++ b/hw/uefi/var-service-json.c @@ -172,7 +172,7 @@ static GString *uefi_vars_to_json(uefi_vars_state *uv) void uefi_vars_json_init(uefi_vars_state *uv, Error **errp) { if (uv->jsonfile) { - uv->jsonfd = qemu_create(uv->jsonfile, O_RDWR, 0666, errp); + uv->jsonfd = qemu_create(uv->jsonfile, O_RDWR | O_BINARY, 0666, errp); } } diff --git a/hw/uefi/var-service-vars.c b/hw/uefi/var-service-vars.c index 37d05b71cf702..8533533ea5c8c 100644 --- a/hw/uefi/var-service-vars.c +++ b/hw/uefi/var-service-vars.c @@ -357,6 +357,9 @@ uefi_vars_mm_get_next_variable(uefi_vars_state *uv, mm_header *mhdr, if (uefi_strlen(name, nv->name_size) == 0) { /* empty string -> first */ var = QTAILQ_FIRST(&uv->variables); + while (var && !check_access(uv, var)) { + var = QTAILQ_NEXT(var, next); + } if (!var) { return uefi_vars_mm_error(mhdr, mvar, EFI_NOT_FOUND); } @@ -702,12 +705,14 @@ uint32_t uefi_vars_mm_vars_proto(uefi_vars_state *uv) case SMM_VARIABLE_FUNCTION_READY_TO_BOOT: trace_uefi_event("ready-to-boot"); uv->ready_to_boot = true; + mvar->status = EFI_SUCCESS; length = 0; break; case SMM_VARIABLE_FUNCTION_EXIT_BOOT_SERVICE: trace_uefi_event("exit-boot-service"); uv->exit_boot_service = true; + mvar->status = EFI_SUCCESS; length = 0; break; diff --git a/hw/usb/dev-network.c b/hw/usb/dev-network.c index 81cc09dcac90f..1df24541814c2 100644 --- a/hw/usb/dev-network.c +++ b/hw/usb/dev-network.c @@ -1383,7 +1383,7 @@ static void usb_net_realize(USBDevice *dev, Error **errp) qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); snprintf(s->usbstring_mac, sizeof(s->usbstring_mac), "%02x%02x%02x%02x%02x%02x", - 0x40, + s->conf.macaddr.a[0], s->conf.macaddr.a[1], s->conf.macaddr.a[2], s->conf.macaddr.a[3], diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index 4822c704f6922..e207d0587a117 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -735,6 +735,7 @@ static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr, bool spd; bool queuing = (q != NULL); uint8_t pid = td->token & 0xff; + uint8_t ep_id = (td->token >> 15) & 0xf; UHCIAsync *async; async = uhci_async_find_td(s, td_addr); @@ -778,9 +779,14 @@ static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr, switch (pid) { case USB_TOKEN_OUT: - case USB_TOKEN_SETUP: case USB_TOKEN_IN: break; + case USB_TOKEN_SETUP: + /* SETUP is only valid to endpoint 0 */ + if (ep_id == 0) { + break; + } + /* fallthrough */ default: /* invalid pid : frame interrupted */ s->status |= UHCI_STS_HCPERR; @@ -829,7 +835,7 @@ static int uhci_handle_td(UHCIState *s, UHCIQueue *q, uint32_t qh_addr, return uhci_handle_td_error(s, td, td_addr, USB_RET_NODEV, int_mask); } - ep = usb_ep_get(dev, pid, (td->token >> 15) & 0xf); + ep = usb_ep_get(dev, pid, ep_id); q = uhci_queue_new(s, qh_addr, td, ep); } async = uhci_async_alloc(q, td_addr); diff --git a/hw/vfio-user/container.c b/hw/vfio-user/container.c index d589dd90f501b..e45192fef6531 100644 --- a/hw/vfio-user/container.c +++ b/hw/vfio-user/container.c @@ -22,30 +22,28 @@ * will fire during memory update transactions. These depend on BQL being held, * so do any resulting map/demap ops async while keeping BQL. */ -static void vfio_user_listener_begin(VFIOContainerBase *bcontainer) +static void vfio_user_listener_begin(VFIOContainer *bcontainer) { - VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, - bcontainer); + VFIOUserContainer *container = VFIO_IOMMU_USER(bcontainer); container->proxy->async_ops = true; } -static void vfio_user_listener_commit(VFIOContainerBase *bcontainer) +static void vfio_user_listener_commit(VFIOContainer *bcontainer) { - VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, - bcontainer); + VFIOUserContainer *container = VFIO_IOMMU_USER(bcontainer); /* wait here for any async requests sent during the transaction */ container->proxy->async_ops = false; vfio_user_wait_reqs(container->proxy); } -static int vfio_user_dma_unmap(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, +static int vfio_user_dma_unmap(const VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, IOMMUTLBEntry *iotlb, bool unmap_all) { - VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, - bcontainer); + VFIOUserContainer *container = VFIO_IOMMU_USER(bcontainer); + Error *local_err = NULL; int ret = 0; @@ -82,12 +80,12 @@ static int vfio_user_dma_unmap(const VFIOContainerBase *bcontainer, return ret; } -static int vfio_user_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly, +static int vfio_user_dma_map(const VFIOContainer *bcontainer, hwaddr iova, + uint64_t size, void *vaddr, bool readonly, MemoryRegion *mrp) { - VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, - bcontainer); + VFIOUserContainer *container = VFIO_IOMMU_USER(bcontainer); + int fd = memory_region_get_fd(mrp); Error *local_err = NULL; int ret = 0; @@ -156,14 +154,14 @@ static int vfio_user_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, } static int -vfio_user_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, +vfio_user_set_dirty_page_tracking(const VFIOContainer *bcontainer, bool start, Error **errp) { error_setg_errno(errp, ENOTSUP, "Not supported"); return -ENOTSUP; } -static int vfio_user_query_dirty_bitmap(const VFIOContainerBase *bcontainer, +static int vfio_user_query_dirty_bitmap(const VFIOContainer *bcontainer, VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) { @@ -171,10 +169,9 @@ static int vfio_user_query_dirty_bitmap(const VFIOContainerBase *bcontainer, return -ENOTSUP; } -static bool vfio_user_setup(VFIOContainerBase *bcontainer, Error **errp) +static bool vfio_user_setup(VFIOContainer *bcontainer, Error **errp) { - VFIOUserContainer *container = container_of(bcontainer, VFIOUserContainer, - bcontainer); + VFIOUserContainer *container = VFIO_IOMMU_USER(bcontainer); assert(container->proxy->dma_pgsizes != 0); bcontainer->pgsizes = container->proxy->dma_pgsizes; @@ -205,7 +202,7 @@ static VFIOUserContainer * vfio_user_container_connect(AddressSpace *as, VFIODevice *vbasedev, Error **errp) { - VFIOContainerBase *bcontainer; + VFIOContainer *bcontainer; VFIOUserContainer *container; VFIOAddressSpace *space; VFIOIOMMUClass *vioc; @@ -218,7 +215,7 @@ vfio_user_container_connect(AddressSpace *as, VFIODevice *vbasedev, goto put_space_exit; } - bcontainer = &container->bcontainer; + bcontainer = VFIO_IOMMU(container); ret = ram_block_uncoordinated_discard_disable(true); if (ret) { @@ -263,7 +260,7 @@ vfio_user_container_connect(AddressSpace *as, VFIODevice *vbasedev, static void vfio_user_container_disconnect(VFIOUserContainer *container) { - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOContainer *bcontainer = VFIO_IOMMU(container); VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); VFIOAddressSpace *space = bcontainer->space; @@ -291,7 +288,7 @@ static bool vfio_user_device_get(VFIOUserContainer *container, vbasedev->fd = -1; - vfio_device_prepare(vbasedev, &container->bcontainer, &info); + vfio_device_prepare(vbasedev, VFIO_IOMMU(container), &info); return true; } @@ -315,8 +312,7 @@ static bool vfio_user_device_attach(const char *name, VFIODevice *vbasedev, static void vfio_user_device_detach(VFIODevice *vbasedev) { - VFIOUserContainer *container = container_of(vbasedev->bcontainer, - VFIOUserContainer, bcontainer); + VFIOUserContainer *container = VFIO_IOMMU_USER(vbasedev->bcontainer); vfio_device_unprepare(vbasedev); diff --git a/hw/vfio-user/container.h b/hw/vfio-user/container.h index 2bb1fa13431c2..a2b42e3169dfb 100644 --- a/hw/vfio-user/container.h +++ b/hw/vfio-user/container.h @@ -9,14 +9,15 @@ #include "qemu/osdep.h" -#include "hw/vfio/vfio-container-base.h" +#include "hw/vfio/vfio-container.h" #include "hw/vfio-user/proxy.h" /* MMU container sub-class for vfio-user. */ -typedef struct VFIOUserContainer { - VFIOContainerBase bcontainer; +struct VFIOUserContainer { + VFIOContainer parent_obj; + VFIOUserProxy *proxy; -} VFIOUserContainer; +}; OBJECT_DECLARE_SIMPLE_TYPE(VFIOUserContainer, VFIO_IOMMU_USER); diff --git a/hw/vfio-user/device.c b/hw/vfio-user/device.c index 0609a7dc25428..64ef35b320942 100644 --- a/hw/vfio-user/device.c +++ b/hw/vfio-user/device.c @@ -134,7 +134,7 @@ static int vfio_user_device_io_get_region_info(VFIODevice *vbasedev, VFIOUserFDs fds = { 0, 1, fd}; int ret; - if (info->index > vbasedev->num_regions) { + if (info->index > vbasedev->num_initial_regions) { return -EINVAL; } diff --git a/hw/vfio-user/pci.c b/hw/vfio-user/pci.c index be71c777291f0..b53ed3b456f9c 100644 --- a/hw/vfio-user/pci.c +++ b/hw/vfio-user/pci.c @@ -20,7 +20,8 @@ OBJECT_DECLARE_SIMPLE_TYPE(VFIOUserPCIDevice, VFIO_USER_PCI) struct VFIOUserPCIDevice { - VFIOPCIDevice device; + VFIOPCIDevice parent_obj; + SocketAddress *socket; bool send_queued; /* all sends are queued */ uint32_t wait_time; /* timeout for message replies */ @@ -64,7 +65,7 @@ static void vfio_user_msix_setup(VFIOPCIDevice *vdev) vdev->msix->pba_region = pba_reg; vfio_reg = vdev->bars[vdev->msix->pba_bar].mr; - msix_reg = &vdev->pdev.msix_pba_mmio; + msix_reg = &PCI_DEVICE(vdev)->msix_pba_mmio; memory_region_init_io(pba_reg, OBJECT(vdev), &vfio_user_pba_ops, vdev, "VFIO MSIX PBA", int128_get64(msix_reg->size)); memory_region_add_subregion_overlap(vfio_reg, vdev->msix->pba_offset, @@ -85,7 +86,7 @@ static void vfio_user_msix_teardown(VFIOPCIDevice *vdev) static void vfio_user_dma_read(VFIOPCIDevice *vdev, VFIOUserDMARW *msg) { - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); VFIOUserProxy *proxy = vdev->vbasedev.proxy; VFIOUserDMARW *res; MemTxResult r; @@ -133,7 +134,7 @@ static void vfio_user_dma_read(VFIOPCIDevice *vdev, VFIOUserDMARW *msg) static void vfio_user_dma_write(VFIOPCIDevice *vdev, VFIOUserDMARW *msg) { - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); VFIOUserProxy *proxy = vdev->vbasedev.proxy; MemTxResult r; @@ -213,8 +214,9 @@ static void vfio_user_compute_needs_reset(VFIODevice *vbasedev) static Object *vfio_user_pci_get_object(VFIODevice *vbasedev) { - VFIOUserPCIDevice *vdev = container_of(vbasedev, VFIOUserPCIDevice, - device.vbasedev); + VFIOUserPCIDevice *vdev = VFIO_USER_PCI(container_of(vbasedev, + VFIOPCIDevice, + vbasedev)); return OBJECT(vdev); } @@ -232,9 +234,10 @@ static void vfio_user_pci_realize(PCIDevice *pdev, Error **errp) { ERRP_GUARD(); VFIOUserPCIDevice *udev = VFIO_USER_PCI(pdev); - VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(pdev); VFIODevice *vbasedev = &vdev->vbasedev; const char *sock_name; + AddressSpace *as; SocketAddress addr; VFIOUserProxy *proxy; @@ -341,10 +344,10 @@ static void vfio_user_pci_realize(PCIDevice *pdev, Error **errp) vfio_pci_put_device(vdev); } -static void vfio_user_instance_init(Object *obj) +static void vfio_user_pci_init(Object *obj) { PCIDevice *pci_dev = PCI_DEVICE(obj); - VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(obj); VFIODevice *vbasedev = &vdev->vbasedev; device_add_bootindex_property(obj, &vdev->bootindex, @@ -367,9 +370,9 @@ static void vfio_user_instance_init(Object *obj) pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; } -static void vfio_user_instance_finalize(Object *obj) +static void vfio_user_pci_finalize(Object *obj) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(obj); VFIODevice *vbasedev = &vdev->vbasedev; if (vdev->msix != NULL) { @@ -385,7 +388,7 @@ static void vfio_user_instance_finalize(Object *obj) static void vfio_user_pci_reset(DeviceState *dev) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(dev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(dev); VFIODevice *vbasedev = &vdev->vbasedev; vfio_pci_pre_reset(vdev); @@ -397,7 +400,7 @@ static void vfio_user_pci_reset(DeviceState *dev) vfio_pci_post_reset(vdev); } -static const Property vfio_user_pci_dev_properties[] = { +static const Property vfio_user_pci_properties[] = { DEFINE_PROP_UINT32("x-pci-vendor-id", VFIOPCIDevice, vendor_id, PCI_ANY_ID), DEFINE_PROP_UINT32("x-pci-device-id", VFIOPCIDevice, @@ -406,6 +409,8 @@ static const Property vfio_user_pci_dev_properties[] = { sub_vendor_id, PCI_ANY_ID), DEFINE_PROP_UINT32("x-pci-sub-device-id", VFIOPCIDevice, sub_device_id, PCI_ANY_ID), + DEFINE_PROP_UINT32("x-pci-class-code", VFIOPCIDevice, + class_code, PCI_ANY_ID), DEFINE_PROP_BOOL("x-send-queued", VFIOUserPCIDevice, send_queued, false), DEFINE_PROP_UINT32("x-msg-timeout", VFIOUserPCIDevice, wait_time, 5000), DEFINE_PROP_BOOL("x-no-posted-writes", VFIOUserPCIDevice, no_post, false), @@ -417,7 +422,7 @@ static void vfio_user_pci_set_socket(Object *obj, Visitor *v, const char *name, VFIOUserPCIDevice *udev = VFIO_USER_PCI(obj); bool success; - if (udev->device.vbasedev.proxy) { + if (VFIO_PCI_DEVICE(udev)->vbasedev.proxy) { error_setg(errp, "Proxy is connected"); return; } @@ -441,13 +446,13 @@ static void vfio_user_pci_set_socket(Object *obj, Visitor *v, const char *name, } } -static void vfio_user_pci_dev_class_init(ObjectClass *klass, const void *data) +static void vfio_user_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); device_class_set_legacy_reset(dc, vfio_user_pci_reset); - device_class_set_props(dc, vfio_user_pci_dev_properties); + device_class_set_props(dc, vfio_user_pci_properties); object_class_property_add(klass, "socket", "SocketAddress", NULL, vfio_user_pci_set_socket, NULL, NULL); @@ -458,18 +463,18 @@ static void vfio_user_pci_dev_class_init(ObjectClass *klass, const void *data) pdc->realize = vfio_user_pci_realize; } -static const TypeInfo vfio_user_pci_dev_info = { +static const TypeInfo vfio_user_pci_info = { .name = TYPE_VFIO_USER_PCI, - .parent = TYPE_VFIO_PCI_BASE, + .parent = TYPE_VFIO_PCI_DEVICE, .instance_size = sizeof(VFIOUserPCIDevice), - .class_init = vfio_user_pci_dev_class_init, - .instance_init = vfio_user_instance_init, - .instance_finalize = vfio_user_instance_finalize, + .class_init = vfio_user_pci_class_init, + .instance_init = vfio_user_pci_init, + .instance_finalize = vfio_user_pci_finalize, }; static void register_vfio_user_dev_type(void) { - type_register_static(&vfio_user_pci_dev_info); + type_register_static(&vfio_user_pci_info); } - type_init(register_vfio_user_dev_type) +type_init(register_vfio_user_dev_type) diff --git a/hw/vfio-user/proxy.c b/hw/vfio-user/proxy.c index 2275d3fe395f2..bbd7ec243d653 100644 --- a/hw/vfio-user/proxy.c +++ b/hw/vfio-user/proxy.c @@ -885,11 +885,12 @@ VFIOUserProxy *vfio_user_connect_dev(SocketAddress *addr, Error **errp) sioc = qio_channel_socket_new(); ioc = QIO_CHANNEL(sioc); - if (qio_channel_socket_connect_sync(sioc, addr, errp)) { - object_unref(OBJECT(ioc)); - return NULL; + if (qio_channel_socket_connect_sync(sioc, addr, errp) < 0) { + goto fail; + } + if (!qio_channel_set_blocking(ioc, false, errp)) { + goto fail; } - qio_channel_set_blocking(ioc, false, NULL); proxy = g_malloc0(sizeof(VFIOUserProxy)); proxy->sockname = g_strdup_printf("unix:%s", sockname); @@ -923,6 +924,10 @@ VFIOUserProxy *vfio_user_connect_dev(SocketAddress *addr, Error **errp) QLIST_INSERT_HEAD(&vfio_user_sockets, proxy, next); return proxy; + +fail: + object_unref(OBJECT(ioc)); + return NULL; } void vfio_user_set_handler(VFIODevice *vbasedev, diff --git a/hw/vfio/Kconfig b/hw/vfio/Kconfig index 91d9023b79b59..27de24e4db1f0 100644 --- a/hw/vfio/Kconfig +++ b/hw/vfio/Kconfig @@ -17,22 +17,6 @@ config VFIO_CCW select VFIO depends on LINUX && S390_CCW_VIRTIO -config VFIO_PLATFORM - bool - default y - select VFIO - depends on LINUX && PLATFORM_BUS - -config VFIO_XGMAC - bool - default y - depends on VFIO_PLATFORM - -config VFIO_AMD_XGBE - bool - default y - depends on VFIO_PLATFORM - config VFIO_AP bool default y diff --git a/hw/vfio/amd-xgbe.c b/hw/vfio/amd-xgbe.c deleted file mode 100644 index 58f590e385b90..0000000000000 --- a/hw/vfio/amd-xgbe.c +++ /dev/null @@ -1,61 +0,0 @@ -/* - * AMD XGBE VFIO device - * - * Copyright Linaro Limited, 2015 - * - * Authors: - * Eric Auger - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "hw/vfio/vfio-amd-xgbe.h" -#include "migration/vmstate.h" -#include "qemu/module.h" -#include "qemu/error-report.h" - -static void amd_xgbe_realize(DeviceState *dev, Error **errp) -{ - VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); - VFIOAmdXgbeDeviceClass *k = VFIO_AMD_XGBE_DEVICE_GET_CLASS(dev); - - warn_report("-device vfio-amd-xgbe is deprecated"); - vdev->compat = g_strdup("amd,xgbe-seattle-v1a"); - vdev->num_compat = 1; - - k->parent_realize(dev, errp); -} - -static const VMStateDescription vfio_platform_amd_xgbe_vmstate = { - .name = "vfio-amd-xgbe", - .unmigratable = 1, -}; - -static void vfio_amd_xgbe_class_init(ObjectClass *klass, const void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - VFIOAmdXgbeDeviceClass *vcxc = - VFIO_AMD_XGBE_DEVICE_CLASS(klass); - device_class_set_parent_realize(dc, amd_xgbe_realize, - &vcxc->parent_realize); - dc->desc = "VFIO AMD XGBE"; - dc->vmsd = &vfio_platform_amd_xgbe_vmstate; -} - -static const TypeInfo vfio_amd_xgbe_dev_info = { - .name = TYPE_VFIO_AMD_XGBE, - .parent = TYPE_VFIO_PLATFORM, - .instance_size = sizeof(VFIOAmdXgbeDevice), - .class_init = vfio_amd_xgbe_class_init, - .class_size = sizeof(VFIOAmdXgbeDeviceClass), -}; - -static void register_amd_xgbe_dev_type(void) -{ - type_register_static(&vfio_amd_xgbe_dev_info); -} - -type_init(register_amd_xgbe_dev_type) diff --git a/hw/vfio/calxeda-xgmac.c b/hw/vfio/calxeda-xgmac.c deleted file mode 100644 index 03f2ff57630b5..0000000000000 --- a/hw/vfio/calxeda-xgmac.c +++ /dev/null @@ -1,61 +0,0 @@ -/* - * calxeda xgmac VFIO device - * - * Copyright Linaro Limited, 2014 - * - * Authors: - * Eric Auger - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - */ - -#include "qemu/osdep.h" -#include "hw/vfio/vfio-calxeda-xgmac.h" -#include "migration/vmstate.h" -#include "qemu/module.h" -#include "qemu/error-report.h" - -static void calxeda_xgmac_realize(DeviceState *dev, Error **errp) -{ - VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); - VFIOCalxedaXgmacDeviceClass *k = VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(dev); - - warn_report("-device vfio-calxeda-xgmac is deprecated"); - vdev->compat = g_strdup("calxeda,hb-xgmac"); - vdev->num_compat = 1; - - k->parent_realize(dev, errp); -} - -static const VMStateDescription vfio_platform_calxeda_xgmac_vmstate = { - .name = "vfio-calxeda-xgmac", - .unmigratable = 1, -}; - -static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, const void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - VFIOCalxedaXgmacDeviceClass *vcxc = - VFIO_CALXEDA_XGMAC_DEVICE_CLASS(klass); - device_class_set_parent_realize(dc, calxeda_xgmac_realize, - &vcxc->parent_realize); - dc->desc = "VFIO Calxeda XGMAC"; - dc->vmsd = &vfio_platform_calxeda_xgmac_vmstate; -} - -static const TypeInfo vfio_calxeda_xgmac_dev_info = { - .name = TYPE_VFIO_CALXEDA_XGMAC, - .parent = TYPE_VFIO_PLATFORM, - .instance_size = sizeof(VFIOCalxedaXgmacDevice), - .class_init = vfio_calxeda_xgmac_class_init, - .class_size = sizeof(VFIOCalxedaXgmacDeviceClass), -}; - -static void register_calxeda_xgmac_dev_type(void) -{ - type_register_static(&vfio_calxeda_xgmac_dev_info); -} - -type_init(register_calxeda_xgmac_dev_type) diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c index 9560b8d851b6b..4d9588e7aa1c0 100644 --- a/hw/vfio/ccw.c +++ b/hw/vfio/ccw.c @@ -484,9 +484,9 @@ static bool vfio_ccw_get_region(VFIOCCWDevice *vcdev, Error **errp) * We always expect at least the I/O region to be present. We also * may have a variable number of regions governed by capabilities. */ - if (vdev->num_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) { + if (vdev->num_initial_regions < VFIO_CCW_CONFIG_REGION_INDEX + 1) { error_setg(errp, "vfio: too few regions (%u), expected at least %u", - vdev->num_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1); + vdev->num_initial_regions, VFIO_CCW_CONFIG_REGION_INDEX + 1); return false; } diff --git a/hw/vfio/container-base.c b/hw/vfio/container-base.c deleted file mode 100644 index 56304978e1e8e..0000000000000 --- a/hw/vfio/container-base.c +++ /dev/null @@ -1,347 +0,0 @@ -/* - * VFIO BASE CONTAINER - * - * Copyright (C) 2023 Intel Corporation. - * Copyright Red Hat, Inc. 2023 - * - * Authors: Yi Liu - * Eric Auger - * - * SPDX-License-Identifier: GPL-2.0-or-later - */ - -#include -#include - -#include "qemu/osdep.h" -#include "system/tcg.h" -#include "system/ram_addr.h" -#include "qapi/error.h" -#include "qemu/error-report.h" -#include "hw/vfio/vfio-container-base.h" -#include "hw/vfio/vfio-device.h" /* vfio_device_reset_handler */ -#include "system/reset.h" -#include "vfio-helpers.h" - -#include "trace.h" - -static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = - QLIST_HEAD_INITIALIZER(vfio_address_spaces); - -VFIOAddressSpace *vfio_address_space_get(AddressSpace *as) -{ - VFIOAddressSpace *space; - - QLIST_FOREACH(space, &vfio_address_spaces, list) { - if (space->as == as) { - return space; - } - } - - /* No suitable VFIOAddressSpace, create a new one */ - space = g_malloc0(sizeof(*space)); - space->as = as; - QLIST_INIT(&space->containers); - - if (QLIST_EMPTY(&vfio_address_spaces)) { - qemu_register_reset(vfio_device_reset_handler, NULL); - } - - QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); - - return space; -} - -void vfio_address_space_put(VFIOAddressSpace *space) -{ - if (!QLIST_EMPTY(&space->containers)) { - return; - } - - QLIST_REMOVE(space, list); - g_free(space); - - if (QLIST_EMPTY(&vfio_address_spaces)) { - qemu_unregister_reset(vfio_device_reset_handler, NULL); - } -} - -void vfio_address_space_insert(VFIOAddressSpace *space, - VFIOContainerBase *bcontainer) -{ - QLIST_INSERT_HEAD(&space->containers, bcontainer, next); - bcontainer->space = space; -} - -int vfio_container_dma_map(VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - void *vaddr, bool readonly, MemoryRegion *mr) -{ - VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - RAMBlock *rb = mr->ram_block; - int mfd = rb ? qemu_ram_get_fd(rb) : -1; - - if (mfd >= 0 && vioc->dma_map_file) { - unsigned long start = vaddr - qemu_ram_get_host_addr(rb); - unsigned long offset = qemu_ram_get_fd_offset(rb); - - return vioc->dma_map_file(bcontainer, iova, size, mfd, start + offset, - readonly); - } - g_assert(vioc->dma_map); - return vioc->dma_map(bcontainer, iova, size, vaddr, readonly, mr); -} - -int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb, bool unmap_all) -{ - VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - - g_assert(vioc->dma_unmap); - return vioc->dma_unmap(bcontainer, iova, size, iotlb, unmap_all); -} - -bool vfio_container_add_section_window(VFIOContainerBase *bcontainer, - MemoryRegionSection *section, - Error **errp) -{ - VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - - if (!vioc->add_window) { - return true; - } - - return vioc->add_window(bcontainer, section, errp); -} - -void vfio_container_del_section_window(VFIOContainerBase *bcontainer, - MemoryRegionSection *section) -{ - VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - - if (!vioc->del_window) { - return; - } - - return vioc->del_window(bcontainer, section); -} - -int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, - bool start, Error **errp) -{ - VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - int ret; - - if (!bcontainer->dirty_pages_supported) { - return 0; - } - - g_assert(vioc->set_dirty_page_tracking); - if (bcontainer->dirty_pages_started == start) { - return 0; - } - - ret = vioc->set_dirty_page_tracking(bcontainer, start, errp); - if (!ret) { - bcontainer->dirty_pages_started = start; - } - - return ret; -} - -static bool vfio_container_devices_dirty_tracking_is_started( - const VFIOContainerBase *bcontainer) -{ - VFIODevice *vbasedev; - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - if (!vbasedev->dirty_tracking) { - return false; - } - } - - return true; -} - -bool vfio_container_dirty_tracking_is_started( - const VFIOContainerBase *bcontainer) -{ - return vfio_container_devices_dirty_tracking_is_started(bcontainer) || - bcontainer->dirty_pages_started; -} - -bool vfio_container_devices_dirty_tracking_is_supported( - const VFIOContainerBase *bcontainer) -{ - VFIODevice *vbasedev; - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - if (vbasedev->device_dirty_page_tracking == ON_OFF_AUTO_OFF) { - return false; - } - if (!vbasedev->dirty_pages_supported) { - return false; - } - } - - return true; -} - -static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, - hwaddr size, void *bitmap) -{ - uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) + - sizeof(struct vfio_device_feature_dma_logging_report), - sizeof(uint64_t))] = {}; - struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; - struct vfio_device_feature_dma_logging_report *report = - (struct vfio_device_feature_dma_logging_report *)feature->data; - - report->iova = iova; - report->length = size; - report->page_size = qemu_real_host_page_size(); - report->bitmap = (uintptr_t)bitmap; - - feature->argsz = sizeof(buf); - feature->flags = VFIO_DEVICE_FEATURE_GET | - VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT; - - return vbasedev->io_ops->device_feature(vbasedev, feature); -} - -static int vfio_container_iommu_query_dirty_bitmap(const VFIOContainerBase *bcontainer, - VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) -{ - VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - - g_assert(vioc->query_dirty_bitmap); - return vioc->query_dirty_bitmap(bcontainer, vbmap, iova, size, - errp); -} - -static int vfio_container_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer, - VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) -{ - VFIODevice *vbasedev; - int ret; - - QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - ret = vfio_device_dma_logging_report(vbasedev, iova, size, - vbmap->bitmap); - if (ret) { - error_setg_errno(errp, -ret, - "%s: Failed to get DMA logging report, iova: " - "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx, - vbasedev->name, iova, size); - - return ret; - } - } - - return 0; -} - -int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova, - uint64_t size, ram_addr_t ram_addr, Error **errp) -{ - bool all_device_dirty_tracking = - vfio_container_devices_dirty_tracking_is_supported(bcontainer); - uint64_t dirty_pages; - VFIOBitmap vbmap; - int ret; - - if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) { - cpu_physical_memory_set_dirty_range(ram_addr, size, - tcg_enabled() ? DIRTY_CLIENTS_ALL : - DIRTY_CLIENTS_NOCODE); - return 0; - } - - ret = vfio_bitmap_alloc(&vbmap, size); - if (ret) { - error_setg_errno(errp, -ret, - "Failed to allocate dirty tracking bitmap"); - return ret; - } - - if (all_device_dirty_tracking) { - ret = vfio_container_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size, - errp); - } else { - ret = vfio_container_iommu_query_dirty_bitmap(bcontainer, &vbmap, iova, size, - errp); - } - - if (ret) { - goto out; - } - - dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr, - vbmap.pages); - - trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size, ram_addr, - dirty_pages); -out: - g_free(vbmap.bitmap); - - return ret; -} - -static gpointer copy_iova_range(gconstpointer src, gpointer data) -{ - Range *source = (Range *)src; - Range *dest = g_new(Range, 1); - - range_set_bounds(dest, range_lob(source), range_upb(source)); - return dest; -} - -GList *vfio_container_get_iova_ranges(const VFIOContainerBase *bcontainer) -{ - assert(bcontainer); - return g_list_copy_deep(bcontainer->iova_ranges, copy_iova_range, NULL); -} - -static void vfio_container_instance_finalize(Object *obj) -{ - VFIOContainerBase *bcontainer = VFIO_IOMMU(obj); - VFIOGuestIOMMU *giommu, *tmp; - - QLIST_SAFE_REMOVE(bcontainer, next); - - QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) { - memory_region_unregister_iommu_notifier( - MEMORY_REGION(giommu->iommu_mr), &giommu->n); - QLIST_REMOVE(giommu, giommu_next); - g_free(giommu); - } - - g_list_free_full(bcontainer->iova_ranges, g_free); -} - -static void vfio_container_instance_init(Object *obj) -{ - VFIOContainerBase *bcontainer = VFIO_IOMMU(obj); - - bcontainer->error = NULL; - bcontainer->dirty_pages_supported = false; - bcontainer->dma_max_mappings = 0; - bcontainer->iova_ranges = NULL; - QLIST_INIT(&bcontainer->giommu_list); - QLIST_INIT(&bcontainer->vrdl_list); -} - -static const TypeInfo types[] = { - { - .name = TYPE_VFIO_IOMMU, - .parent = TYPE_OBJECT, - .instance_init = vfio_container_instance_init, - .instance_finalize = vfio_container_instance_finalize, - .instance_size = sizeof(VFIOContainerBase), - .class_size = sizeof(VFIOIOMMUClass), - .abstract = true, - }, -}; - -DEFINE_TYPES(types) diff --git a/hw/vfio/container-legacy.c b/hw/vfio/container-legacy.c new file mode 100644 index 0000000000000..8e9639603e08b --- /dev/null +++ b/hw/vfio/container-legacy.c @@ -0,0 +1,1266 @@ +/* + * generic functions used by VFIO devices + * + * Copyright Red Hat, Inc. 2012 + * + * Authors: + * Alex Williamson + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on qemu-kvm device-assignment: + * Adapted for KVM by Qumranet. + * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) + * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) + * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) + * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) + * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) + */ + +#include "qemu/osdep.h" +#include +#include + +#include "hw/vfio/vfio-device.h" +#include "system/address-spaces.h" +#include "system/memory.h" +#include "system/physmem.h" +#include "qemu/error-report.h" +#include "qemu/range.h" +#include "system/reset.h" +#include "trace.h" +#include "qapi/error.h" +#include "migration/cpr.h" +#include "migration/blocker.h" +#include "pci.h" +#include "hw/vfio/vfio-container-legacy.h" +#include "vfio-helpers.h" +#include "vfio-listener.h" + +#define TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO TYPE_HOST_IOMMU_DEVICE "-legacy-vfio" + +typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList; +static VFIOGroupList vfio_group_list = + QLIST_HEAD_INITIALIZER(vfio_group_list); + +static int vfio_ram_block_discard_disable(VFIOLegacyContainer *container, + bool state) +{ + switch (container->iommu_type) { + case VFIO_TYPE1v2_IOMMU: + case VFIO_TYPE1_IOMMU: + /* + * We support coordinated discarding of RAM via the RamDiscardManager. + */ + return ram_block_uncoordinated_discard_disable(state); + default: + /* + * VFIO_SPAPR_TCE_IOMMU most probably works just fine with + * RamDiscardManager, however, it is completely untested. + * + * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does + * completely the opposite of managing mapping/pinning dynamically as + * required by RamDiscardManager. We would have to special-case sections + * with a RamDiscardManager. + */ + return ram_block_discard_disable(state); + } +} + +static int vfio_dma_unmap_bitmap(const VFIOLegacyContainer *container, + hwaddr iova, uint64_t size, + IOMMUTLBEntry *iotlb) +{ + const VFIOContainer *bcontainer = VFIO_IOMMU(container); + struct vfio_iommu_type1_dma_unmap *unmap; + struct vfio_bitmap *bitmap; + VFIOBitmap vbmap; + int ret; + + ret = vfio_bitmap_alloc(&vbmap, size); + if (ret) { + return ret; + } + + unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); + + unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); + unmap->iova = iova; + unmap->size = size; + unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; + bitmap = (struct vfio_bitmap *)&unmap->data; + + /* + * physical_memory_set_dirty_lebitmap() supports pages in bitmap of + * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize + * to qemu_real_host_page_size. + */ + bitmap->pgsize = qemu_real_host_page_size(); + bitmap->size = vbmap.size; + bitmap->data = (__u64 *)vbmap.bitmap; + + if (vbmap.size > bcontainer->max_dirty_bitmap_size) { + error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size); + ret = -E2BIG; + goto unmap_exit; + } + + ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); + if (!ret) { + physical_memory_set_dirty_lebitmap(vbmap.bitmap, + iotlb->translated_addr, vbmap.pages); + } else { + error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); + } + +unmap_exit: + g_free(unmap); + g_free(vbmap.bitmap); + + return ret; +} + +static int vfio_legacy_dma_unmap_one(const VFIOLegacyContainer *container, + hwaddr iova, uint64_t size, + uint32_t flags, IOMMUTLBEntry *iotlb) +{ + const VFIOContainer *bcontainer = VFIO_IOMMU(container); + struct vfio_iommu_type1_dma_unmap unmap = { + .argsz = sizeof(unmap), + .flags = flags, + .iova = iova, + .size = size, + }; + bool need_dirty_sync = false; + int ret; + Error *local_err = NULL; + + g_assert(!cpr_is_incoming()); + + if (iotlb && vfio_container_dirty_tracking_is_started(bcontainer)) { + if (!vfio_container_devices_dirty_tracking_is_supported(bcontainer) && + bcontainer->dirty_pages_supported) { + return vfio_dma_unmap_bitmap(container, iova, size, iotlb); + } + + need_dirty_sync = true; + } + + if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { + return -errno; + } + + if (need_dirty_sync) { + ret = vfio_container_query_dirty_bitmap(bcontainer, iova, size, + iotlb->translated_addr, &local_err); + if (ret) { + error_report_err(local_err); + return ret; + } + } + + return 0; +} + +/* + * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 + */ +static int vfio_legacy_dma_unmap(const VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, + IOMMUTLBEntry *iotlb, bool unmap_all) +{ + const VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + uint32_t flags = 0; + int ret; + + if (unmap_all) { + if (container->unmap_all_supported) { + flags = VFIO_DMA_UNMAP_FLAG_ALL; + } else { + /* The unmap ioctl doesn't accept a full 64-bit span. */ + Int128 llsize = int128_rshift(int128_2_64(), 1); + size = int128_get64(llsize); + + ret = vfio_legacy_dma_unmap_one(container, 0, size, flags, iotlb); + if (ret) { + return ret; + } + + iova = size; + } + } + + return vfio_legacy_dma_unmap_one(container, iova, size, flags, iotlb); +} + +static int vfio_legacy_dma_map(const VFIOContainer *bcontainer, hwaddr iova, + uint64_t size, void *vaddr, bool readonly, + MemoryRegion *mr) +{ + const VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + struct vfio_iommu_type1_dma_map map = { + .argsz = sizeof(map), + .flags = VFIO_DMA_MAP_FLAG_READ, + .vaddr = (__u64)(uintptr_t)vaddr, + .iova = iova, + .size = size, + }; + + if (!readonly) { + map.flags |= VFIO_DMA_MAP_FLAG_WRITE; + } + + /* + * Try the mapping, if it fails with EBUSY, unmap the region and try + * again. This shouldn't be necessary, but we sometimes see it in + * the VGA ROM space. + */ + if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || + (errno == EBUSY && + vfio_legacy_dma_unmap(bcontainer, iova, size, NULL, false) == 0 && + ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { + return 0; + } + + return -errno; +} + +static int +vfio_legacy_set_dirty_page_tracking(const VFIOContainer *bcontainer, + bool start, Error **errp) +{ + const VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + int ret; + struct vfio_iommu_type1_dirty_bitmap dirty = { + .argsz = sizeof(dirty), + }; + + if (start) { + dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; + } else { + dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP; + } + + ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); + if (ret) { + ret = -errno; + error_setg_errno(errp, errno, "Failed to set dirty tracking flag 0x%x", + dirty.flags); + } + + return ret; +} + +static int vfio_legacy_query_dirty_bitmap(const VFIOContainer *bcontainer, + VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) +{ + const VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + struct vfio_iommu_type1_dirty_bitmap *dbitmap; + struct vfio_iommu_type1_dirty_bitmap_get *range; + int ret; + + dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); + + dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); + dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; + range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; + range->iova = iova; + range->size = size; + + /* + * physical_memory_set_dirty_lebitmap() supports pages in bitmap of + * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize + * to qemu_real_host_page_size. + */ + range->bitmap.pgsize = qemu_real_host_page_size(); + range->bitmap.size = vbmap->size; + range->bitmap.data = (__u64 *)vbmap->bitmap; + + ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); + if (ret) { + ret = -errno; + error_setg_errno(errp, errno, + "Failed to get dirty bitmap for iova: 0x%"PRIx64 + " size: 0x%"PRIx64, (uint64_t)range->iova, + (uint64_t)range->size); + } + + g_free(dbitmap); + + return ret; +} + +static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, + VFIOContainer *bcontainer) +{ + struct vfio_info_cap_header *hdr; + struct vfio_iommu_type1_info_cap_iova_range *cap; + + hdr = vfio_get_iommu_type1_info_cap(info, + VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE); + if (!hdr) { + return false; + } + + cap = (void *)hdr; + + for (int i = 0; i < cap->nr_iovas; i++) { + Range *range = g_new(Range, 1); + + range_set_bounds(range, cap->iova_ranges[i].start, + cap->iova_ranges[i].end); + bcontainer->iova_ranges = + range_list_insert(bcontainer->iova_ranges, range); + } + + return true; +} + +static void vfio_group_add_kvm_device(VFIOGroup *group) +{ + Error *err = NULL; + + if (vfio_kvm_device_add_fd(group->fd, &err)) { + error_reportf_err(err, "group ID %d: ", group->groupid); + } +} + +static void vfio_group_del_kvm_device(VFIOGroup *group) +{ + Error *err = NULL; + + if (vfio_kvm_device_del_fd(group->fd, &err)) { + error_reportf_err(err, "group ID %d: ", group->groupid); + } +} + +/* + * vfio_get_iommu_type - selects the richest iommu_type (v2 first) + */ +static int vfio_get_iommu_type(int container_fd, + Error **errp) +{ + int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, + VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; + int i; + + for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { + if (ioctl(container_fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { + return iommu_types[i]; + } + } + error_setg(errp, "No available IOMMU models"); + return -EINVAL; +} + +/* + * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type + */ +static const char *vfio_get_iommu_class_name(int iommu_type) +{ + switch (iommu_type) { + case VFIO_TYPE1v2_IOMMU: + case VFIO_TYPE1_IOMMU: + return TYPE_VFIO_IOMMU_LEGACY; + break; + case VFIO_SPAPR_TCE_v2_IOMMU: + case VFIO_SPAPR_TCE_IOMMU: + return TYPE_VFIO_IOMMU_SPAPR; + break; + default: + g_assert_not_reached(); + }; +} + +static bool vfio_set_iommu(int container_fd, int group_fd, + int *iommu_type, Error **errp) +{ + if (ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container_fd)) { + error_setg_errno(errp, errno, "Failed to set group container"); + return false; + } + + while (ioctl(container_fd, VFIO_SET_IOMMU, *iommu_type)) { + if (*iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { + /* + * On sPAPR, despite the IOMMU subdriver always advertises v1 and + * v2, the running platform may not support v2 and there is no + * way to guess it until an IOMMU group gets added to the container. + * So in case it fails with v2, try v1 as a fallback. + */ + *iommu_type = VFIO_SPAPR_TCE_IOMMU; + continue; + } + error_setg_errno(errp, errno, "Failed to set iommu for container"); + return false; + } + + return true; +} + +static VFIOLegacyContainer *vfio_create_container(int fd, VFIOGroup *group, + Error **errp) +{ + int iommu_type; + const char *vioc_name; + VFIOLegacyContainer *container; + + iommu_type = vfio_get_iommu_type(fd, errp); + if (iommu_type < 0) { + return NULL; + } + + /* + * During CPR, just set the container type and skip the ioctls, as the + * container and group are already configured in the kernel. + */ + if (!cpr_is_incoming() && + !vfio_set_iommu(fd, group->fd, &iommu_type, errp)) { + return NULL; + } + + vioc_name = vfio_get_iommu_class_name(iommu_type); + + container = VFIO_IOMMU_LEGACY(object_new(vioc_name)); + container->fd = fd; + container->iommu_type = iommu_type; + return container; +} + +static int vfio_get_iommu_info(VFIOLegacyContainer *container, + struct vfio_iommu_type1_info **info) +{ + + size_t argsz = sizeof(struct vfio_iommu_type1_info); + + *info = g_new0(struct vfio_iommu_type1_info, 1); +again: + (*info)->argsz = argsz; + + if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { + g_free(*info); + *info = NULL; + return -errno; + } + + if (((*info)->argsz > argsz)) { + argsz = (*info)->argsz; + *info = g_realloc(*info, argsz); + goto again; + } + + return 0; +} + +static struct vfio_info_cap_header * +vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) +{ + struct vfio_info_cap_header *hdr; + void *ptr = info; + + if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { + return NULL; + } + + for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { + if (hdr->id == id) { + return hdr; + } + } + + return NULL; +} + +static void vfio_get_iommu_info_migration(VFIOLegacyContainer *container, + struct vfio_iommu_type1_info *info) +{ + struct vfio_info_cap_header *hdr; + struct vfio_iommu_type1_info_cap_migration *cap_mig; + VFIOContainer *bcontainer = VFIO_IOMMU(container); + + hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); + if (!hdr) { + return; + } + + cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, + header); + + /* + * physical_memory_set_dirty_lebitmap() supports pages in bitmap of + * qemu_real_host_page_size to mark those dirty. + */ + if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { + bcontainer->dirty_pages_supported = true; + bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; + bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap; + } +} + +static bool vfio_legacy_setup(VFIOContainer *bcontainer, Error **errp) +{ + VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + g_autofree struct vfio_iommu_type1_info *info = NULL; + int ret; + + ret = vfio_get_iommu_info(container, &info); + if (ret) { + error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); + return false; + } + + if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { + bcontainer->pgsizes = info->iova_pgsizes; + } else { + bcontainer->pgsizes = qemu_real_host_page_size(); + } + + if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) { + bcontainer->dma_max_mappings = 65535; + } + + vfio_get_info_iova_range(info, bcontainer); + + ret = ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UNMAP_ALL); + container->unmap_all_supported = !!ret; + + vfio_get_iommu_info_migration(container, info); + return true; +} + +static bool vfio_container_attach_discard_disable( + VFIOLegacyContainer *container, VFIOGroup *group, Error **errp) +{ + int ret; + + /* + * VFIO is currently incompatible with discarding of RAM insofar as the + * madvise to purge (zap) the page from QEMU's address space does not + * interact with the memory API and therefore leaves stale virtual to + * physical mappings in the IOMMU if the page was previously pinned. We + * therefore set discarding broken for each group added to a container, + * whether the container is used individually or shared. This provides + * us with options to allow devices within a group to opt-in and allow + * discarding, so long as it is done consistently for a group (for instance + * if the device is an mdev device where it is known that the host vendor + * driver will never pin pages outside of the working set of the guest + * driver, which would thus not be discarding candidates). + * + * The first opportunity to induce pinning occurs here where we attempt to + * attach the group to existing containers within the AddressSpace. If any + * pages are already zapped from the virtual address space, such as from + * previous discards, new pinning will cause valid mappings to be + * re-established. Likewise, when the overall MemoryListener for a new + * container is registered, a replay of mappings within the AddressSpace + * will occur, re-establishing any previously zapped pages as well. + * + * Especially virtio-balloon is currently only prevented from discarding + * new memory, it will not yet set ram_block_discard_set_required() and + * therefore, neither stops us here or deals with the sudden memory + * consumption of inflated memory. + * + * We do support discarding of memory coordinated via the RamDiscardManager + * with some IOMMU types. vfio_ram_block_discard_disable() handles the + * details once we know which type of IOMMU we are using. + */ + + ret = vfio_ram_block_discard_disable(container, true); + if (ret) { + error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); + if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { + error_report("vfio: error disconnecting group %d from" + " container", group->groupid); + } + } + return !ret; +} + +static bool vfio_container_group_add(VFIOLegacyContainer *container, + VFIOGroup *group, Error **errp) +{ + if (!vfio_container_attach_discard_disable(container, group, errp)) { + return false; + } + group->container = container; + QLIST_INSERT_HEAD(&container->group_list, group, container_next); + vfio_group_add_kvm_device(group); + /* + * Remember the container fd for each group, so we can attach to the same + * container after CPR. + */ + cpr_resave_fd("vfio_container_for_group", group->groupid, container->fd); + return true; +} + +static void vfio_container_group_del(VFIOLegacyContainer *container, + VFIOGroup *group) +{ + QLIST_REMOVE(group, container_next); + group->container = NULL; + vfio_group_del_kvm_device(group); + vfio_ram_block_discard_disable(container, false); + cpr_delete_fd("vfio_container_for_group", group->groupid); +} + +static bool vfio_container_connect(VFIOGroup *group, AddressSpace *as, + Error **errp) +{ + VFIOLegacyContainer *container; + VFIOContainer *bcontainer; + int ret, fd = -1; + VFIOAddressSpace *space; + VFIOIOMMUClass *vioc = NULL; + bool new_container = false; + bool group_was_added = false; + + space = vfio_address_space_get(as); + fd = cpr_find_fd("vfio_container_for_group", group->groupid); + + if (!cpr_is_incoming()) { + QLIST_FOREACH(bcontainer, &space->containers, next) { + container = VFIO_IOMMU_LEGACY(bcontainer); + if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { + return vfio_container_group_add(container, group, errp); + } + } + + fd = qemu_open("/dev/vfio/vfio", O_RDWR, errp); + if (fd < 0) { + goto fail; + } + } else { + /* + * For incoming CPR, the group is already attached in the kernel. + * If a container with matching fd is found, then update the + * userland group list and return. If not, then after the loop, + * create the container struct and group list. + */ + QLIST_FOREACH(bcontainer, &space->containers, next) { + container = VFIO_IOMMU_LEGACY(bcontainer); + + if (vfio_cpr_container_match(container, group, fd)) { + return vfio_container_group_add(container, group, errp); + } + } + } + + ret = ioctl(fd, VFIO_GET_API_VERSION); + if (ret != VFIO_API_VERSION) { + error_setg(errp, "supported vfio version: %d, " + "reported version: %d", VFIO_API_VERSION, ret); + goto fail; + } + + container = vfio_create_container(fd, group, errp); + if (!container) { + goto fail; + } + new_container = true; + bcontainer = VFIO_IOMMU(container); + + if (!vfio_legacy_cpr_register_container(container, errp)) { + goto fail; + } + + vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + assert(vioc->setup); + + if (!vioc->setup(bcontainer, errp)) { + goto fail; + } + + vfio_address_space_insert(space, bcontainer); + + if (!vfio_container_group_add(container, group, errp)) { + goto fail; + } + group_was_added = true; + + /* + * If CPR, register the listener later, after all state that may + * affect regions and mapping boundaries has been cpr load'ed. Later, + * the listener will invoke its callback on each flat section and call + * dma_map to supply the new vaddr, and the calls will match the mappings + * remembered by the kernel. + */ + if (!cpr_is_incoming()) { + if (!vfio_listener_register(bcontainer, errp)) { + goto fail; + } + } + + bcontainer->initialized = true; + + return true; + +fail: + if (new_container) { + vfio_listener_unregister(bcontainer); + } + + if (group_was_added) { + vfio_container_group_del(container, group); + } + if (vioc && vioc->release) { + vioc->release(bcontainer); + } + if (new_container) { + vfio_legacy_cpr_unregister_container(container); + object_unref(container); + } + if (fd >= 0) { + close(fd); + } + vfio_address_space_put(space); + + return false; +} + +static void vfio_container_disconnect(VFIOGroup *group) +{ + VFIOLegacyContainer *container = group->container; + VFIOContainer *bcontainer = VFIO_IOMMU(container); + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + + QLIST_REMOVE(group, container_next); + group->container = NULL; + cpr_delete_fd("vfio_container_for_group", group->groupid); + + /* + * Explicitly release the listener first before unset container, + * since unset may destroy the backend container if it's the last + * group. + */ + if (QLIST_EMPTY(&container->group_list)) { + vfio_listener_unregister(bcontainer); + if (vioc->release) { + vioc->release(bcontainer); + } + } + + if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { + error_report("vfio: error disconnecting group %d from container", + group->groupid); + } + + if (QLIST_EMPTY(&container->group_list)) { + VFIOAddressSpace *space = bcontainer->space; + + trace_vfio_container_disconnect(container->fd); + vfio_legacy_cpr_unregister_container(container); + close(container->fd); + object_unref(container); + + vfio_address_space_put(space); + } +} + +static VFIOGroup *vfio_group_get(int groupid, AddressSpace *as, Error **errp) +{ + ERRP_GUARD(); + VFIOGroup *group; + char path[32]; + struct vfio_group_status status = { .argsz = sizeof(status) }; + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == groupid) { + /* Found it. Now is it already in the right context? */ + if (VFIO_IOMMU(group->container)->space->as == as) { + return group; + } else { + error_setg(errp, "group %d used in multiple address spaces", + group->groupid); + return NULL; + } + } + } + + group = g_malloc0(sizeof(*group)); + + snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); + group->fd = cpr_open_fd(path, O_RDWR, "vfio_group", groupid, errp); + if (group->fd < 0) { + goto free_group_exit; + } + + if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { + error_setg_errno(errp, errno, "failed to get group %d status", groupid); + goto close_fd_exit; + } + + if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { + error_setg(errp, "group %d is not viable", groupid); + error_append_hint(errp, + "Please ensure all devices within the iommu_group " + "are bound to their vfio bus driver.\n"); + goto close_fd_exit; + } + + group->groupid = groupid; + QLIST_INIT(&group->device_list); + + if (!vfio_container_connect(group, as, errp)) { + error_prepend(errp, "failed to setup container for group %d: ", + groupid); + goto close_fd_exit; + } + + QLIST_INSERT_HEAD(&vfio_group_list, group, next); + + return group; + +close_fd_exit: + cpr_delete_fd("vfio_group", groupid); + close(group->fd); + +free_group_exit: + g_free(group); + + return NULL; +} + +static void vfio_group_put(VFIOGroup *group) +{ + if (!group || !QLIST_EMPTY(&group->device_list)) { + return; + } + + if (!group->ram_block_discard_allowed) { + vfio_ram_block_discard_disable(group->container, false); + } + vfio_group_del_kvm_device(group); + vfio_container_disconnect(group); + QLIST_REMOVE(group, next); + trace_vfio_group_put(group->fd); + cpr_delete_fd("vfio_group", group->groupid); + close(group->fd); + g_free(group); +} + +static bool vfio_device_get(VFIOGroup *group, const char *name, + VFIODevice *vbasedev, Error **errp) +{ + g_autofree struct vfio_device_info *info = NULL; + int fd; + + fd = vfio_cpr_group_get_device_fd(group->fd, name); + if (fd < 0) { + error_setg_errno(errp, errno, "error getting device from group %d", + group->groupid); + error_append_hint(errp, + "Verify all devices in group %d are bound to vfio- " + "or pci-stub and not already in use\n", group->groupid); + return false; + } + + info = vfio_get_device_info(fd); + if (!info) { + error_setg_errno(errp, errno, "error getting device info"); + goto fail; + } + + /* + * Set discarding of RAM as not broken for this group if the driver knows + * the device operates compatibly with discarding. Setting must be + * consistent per group, but since compatibility is really only possible + * with mdev currently, we expect singleton groups. + */ + if (vbasedev->ram_block_discard_allowed != + group->ram_block_discard_allowed) { + if (!QLIST_EMPTY(&group->device_list)) { + error_setg(errp, "Inconsistent setting of support for discarding " + "RAM (e.g., balloon) within group"); + goto fail; + } + + if (!group->ram_block_discard_allowed) { + group->ram_block_discard_allowed = true; + vfio_ram_block_discard_disable(group->container, false); + } + } + + vfio_device_prepare(vbasedev, VFIO_IOMMU(group->container), info); + + vbasedev->fd = fd; + vbasedev->group = group; + QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); + + trace_vfio_device_get(name, info->flags, info->num_regions, info->num_irqs); + + return true; + +fail: + close(fd); + cpr_delete_fd(name, 0); + return false; +} + +static void vfio_device_put(VFIODevice *vbasedev) +{ + if (!vbasedev->group) { + return; + } + QLIST_REMOVE(vbasedev, next); + vbasedev->group = NULL; + trace_vfio_device_put(vbasedev->fd); + cpr_delete_fd(vbasedev->name, 0); + close(vbasedev->fd); +} + +static int vfio_device_get_groupid(VFIODevice *vbasedev, Error **errp) +{ + char *tmp, group_path[PATH_MAX]; + g_autofree char *group_name = NULL; + int ret, groupid; + ssize_t len; + + tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); + len = readlink(tmp, group_path, sizeof(group_path)); + g_free(tmp); + + if (len <= 0 || len >= sizeof(group_path)) { + ret = len < 0 ? -errno : -ENAMETOOLONG; + error_setg_errno(errp, -ret, "no iommu_group found"); + return ret; + } + + group_path[len] = 0; + + group_name = g_path_get_basename(group_path); + if (sscanf(group_name, "%d", &groupid) != 1) { + error_setg_errno(errp, errno, "failed to read %s", group_path); + return -errno; + } + return groupid; +} + +/* + * vfio_device_attach: attach a device to a security context + * @name and @vbasedev->name are likely to be different depending + * on the type of the device, hence the need for passing @name + */ +static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp) +{ + int groupid = vfio_device_get_groupid(vbasedev, errp); + VFIODevice *vbasedev_iter; + VFIOGroup *group; + + if (groupid < 0) { + return false; + } + + trace_vfio_device_attach(vbasedev->name, groupid); + + group = vfio_group_get(groupid, as, errp); + if (!group) { + return false; + } + + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { + error_setg(errp, "device is already attached"); + goto group_put_exit; + } + } + if (!vfio_device_get(group, name, vbasedev, errp)) { + goto group_put_exit; + } + + if (!vfio_device_hiod_create_and_realize(vbasedev, + TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO, + errp)) { + goto device_put_exit; + } + + if (vbasedev->mdev) { + error_setg(&vbasedev->cpr.mdev_blocker, + "CPR does not support vfio mdev %s", vbasedev->name); + if (migrate_add_blocker_modes(&vbasedev->cpr.mdev_blocker, errp, + MIG_MODE_CPR_TRANSFER, MIG_MODE_CPR_EXEC, + -1) < 0) { + goto hiod_unref_exit; + } + } + + return true; + +hiod_unref_exit: + object_unref(vbasedev->hiod); +device_put_exit: + vfio_device_put(vbasedev); +group_put_exit: + vfio_group_put(group); + return false; +} + +static void vfio_legacy_detach_device(VFIODevice *vbasedev) +{ + VFIOGroup *group = vbasedev->group; + + trace_vfio_device_detach(vbasedev->name, group->groupid); + + vfio_device_unprepare(vbasedev); + + migrate_del_blocker(&vbasedev->cpr.mdev_blocker); + object_unref(vbasedev->hiod); + vfio_device_put(vbasedev); + vfio_group_put(group); +} + +static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single) +{ + VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + VFIOGroup *group; + struct vfio_pci_hot_reset_info *info = NULL; + struct vfio_pci_dependent_device *devices; + struct vfio_pci_hot_reset *reset; + int32_t *fds; + int ret, i, count; + bool multi = false; + + trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); + + if (!single) { + vfio_pci_pre_reset(vdev); + } + vdev->vbasedev.needs_reset = false; + + ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); + + if (ret) { + goto out_single; + } + devices = &info->devices[0]; + + trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); + + /* Verify that we have all the groups required */ + for (i = 0; i < info->count; i++) { + PCIHostDeviceAddress host; + VFIOPCIDevice *tmp; + VFIODevice *vbasedev_iter; + + host.domain = devices[i].segment; + host.bus = devices[i].bus; + host.slot = PCI_SLOT(devices[i].devfn); + host.function = PCI_FUNC(devices[i].devfn); + + trace_vfio_pci_hot_reset_dep_devices(host.domain, + host.bus, host.slot, host.function, devices[i].group_id); + + if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { + continue; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == devices[i].group_id) { + break; + } + } + + if (!group) { + if (!vdev->has_pm_reset) { + error_report("vfio: Cannot reset device %s, " + "depends on group %d which is not owned.", + vdev->vbasedev.name, devices[i].group_id); + } + ret = -EPERM; + goto out; + } + + /* Prep dependent devices for reset and clear our marker. */ + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (!vbasedev_iter->dev->realized || + !vfio_pci_from_vfio_device(vbasedev_iter)) { + continue; + } + tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); + if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { + if (single) { + ret = -EINVAL; + goto out_single; + } + vfio_pci_pre_reset(tmp); + tmp->vbasedev.needs_reset = false; + multi = true; + break; + } + } + } + + if (!single && !multi) { + ret = -EINVAL; + goto out_single; + } + + /* Determine how many group fds need to be passed */ + count = 0; + QLIST_FOREACH(group, &vfio_group_list, next) { + for (i = 0; i < info->count; i++) { + if (group->groupid == devices[i].group_id) { + count++; + break; + } + } + } + + reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); + reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); + fds = &reset->group_fds[0]; + + /* Fill in group fds */ + QLIST_FOREACH(group, &vfio_group_list, next) { + for (i = 0; i < info->count; i++) { + if (group->groupid == devices[i].group_id) { + fds[reset->count++] = group->fd; + break; + } + } + } + + /* Bus reset! */ + ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); + g_free(reset); + if (ret) { + ret = -errno; + } + + trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, + ret ? strerror(errno) : "Success"); + +out: + /* Re-enable INTx on affected devices */ + for (i = 0; i < info->count; i++) { + PCIHostDeviceAddress host; + VFIOPCIDevice *tmp; + VFIODevice *vbasedev_iter; + + host.domain = devices[i].segment; + host.bus = devices[i].bus; + host.slot = PCI_SLOT(devices[i].devfn); + host.function = PCI_FUNC(devices[i].devfn); + + if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { + continue; + } + + QLIST_FOREACH(group, &vfio_group_list, next) { + if (group->groupid == devices[i].group_id) { + break; + } + } + + if (!group) { + break; + } + + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (!vbasedev_iter->dev->realized || + !vfio_pci_from_vfio_device(vbasedev_iter)) { + continue; + } + tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); + if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { + vfio_pci_post_reset(tmp); + break; + } + } + } +out_single: + if (!single) { + vfio_pci_post_reset(vdev); + } + g_free(info); + + return ret; +} + +static void vfio_iommu_legacy_class_init(ObjectClass *klass, const void *data) +{ + VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); + + vioc->setup = vfio_legacy_setup; + vioc->dma_map = vfio_legacy_dma_map; + vioc->dma_unmap = vfio_legacy_dma_unmap; + vioc->attach_device = vfio_legacy_attach_device; + vioc->detach_device = vfio_legacy_detach_device; + vioc->set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking; + vioc->query_dirty_bitmap = vfio_legacy_query_dirty_bitmap; + vioc->pci_hot_reset = vfio_legacy_pci_hot_reset; +}; + +static bool hiod_legacy_vfio_realize(HostIOMMUDevice *hiod, void *opaque, + Error **errp) +{ + VFIODevice *vdev = opaque; + + hiod->name = g_strdup(vdev->name); + hiod->agent = opaque; + + return true; +} + +static int hiod_legacy_vfio_get_cap(HostIOMMUDevice *hiod, int cap, + Error **errp) +{ + switch (cap) { + case HOST_IOMMU_DEVICE_CAP_AW_BITS: + return vfio_device_get_aw_bits(hiod->agent); + default: + error_setg(errp, "%s: unsupported capability %x", hiod->name, cap); + return -EINVAL; + } +} + +static GList * +hiod_legacy_vfio_get_iova_ranges(HostIOMMUDevice *hiod) +{ + VFIODevice *vdev = hiod->agent; + + g_assert(vdev); + return vfio_container_get_iova_ranges(vdev->bcontainer); +} + +static uint64_t +hiod_legacy_vfio_get_page_size_mask(HostIOMMUDevice *hiod) +{ + VFIODevice *vdev = hiod->agent; + + g_assert(vdev); + return vfio_container_get_page_size_mask(vdev->bcontainer); +} + +static void vfio_iommu_legacy_instance_init(Object *obj) +{ + VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(obj); + + QLIST_INIT(&container->group_list); +} + +static void hiod_legacy_vfio_class_init(ObjectClass *oc, const void *data) +{ + HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc); + + hioc->realize = hiod_legacy_vfio_realize; + hioc->get_cap = hiod_legacy_vfio_get_cap; + hioc->get_iova_ranges = hiod_legacy_vfio_get_iova_ranges; + hioc->get_page_size_mask = hiod_legacy_vfio_get_page_size_mask; +}; + +static const TypeInfo types[] = { + { + .name = TYPE_VFIO_IOMMU_LEGACY, + .parent = TYPE_VFIO_IOMMU, + .instance_init = vfio_iommu_legacy_instance_init, + .instance_size = sizeof(VFIOLegacyContainer), + .class_init = vfio_iommu_legacy_class_init, + }, { + .name = TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO, + .parent = TYPE_HOST_IOMMU_DEVICE, + .class_init = hiod_legacy_vfio_class_init, + } +}; + +DEFINE_TYPES(types) diff --git a/hw/vfio/container.c b/hw/vfio/container.c index 3e13feaa74c30..9ddec300e35c7 100644 --- a/hw/vfio/container.c +++ b/hw/vfio/container.c @@ -1,1280 +1,352 @@ /* - * generic functions used by VFIO devices + * VFIO BASE CONTAINER * - * Copyright Red Hat, Inc. 2012 + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 * - * Authors: - * Alex Williamson + * Authors: Yi Liu + * Eric Auger * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Based on qemu-kvm device-assignment: - * Adapted for KVM by Qumranet. - * Copyright (c) 2007, Neocleus, Alex Novik (alex@neocleus.com) - * Copyright (c) 2007, Neocleus, Guy Zana (guy@neocleus.com) - * Copyright (C) 2008, Qumranet, Amit Shah (amit.shah@qumranet.com) - * Copyright (C) 2008, Red Hat, Amit Shah (amit.shah@redhat.com) - * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) + * SPDX-License-Identifier: GPL-2.0-or-later */ -#include "qemu/osdep.h" #include #include -#include "hw/vfio/vfio-device.h" -#include "system/address-spaces.h" -#include "system/memory.h" +#include "qemu/osdep.h" +#include "system/tcg.h" #include "system/ram_addr.h" -#include "qemu/error-report.h" -#include "qemu/range.h" -#include "system/reset.h" -#include "trace.h" #include "qapi/error.h" -#include "migration/cpr.h" -#include "migration/blocker.h" -#include "pci.h" +#include "qemu/error-report.h" #include "hw/vfio/vfio-container.h" +#include "hw/vfio/vfio-device.h" /* vfio_device_reset_handler */ +#include "system/physmem.h" +#include "system/reset.h" #include "vfio-helpers.h" -#include "vfio-listener.h" - -#define TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO TYPE_HOST_IOMMU_DEVICE "-legacy-vfio" - -typedef QLIST_HEAD(VFIOGroupList, VFIOGroup) VFIOGroupList; -static VFIOGroupList vfio_group_list = - QLIST_HEAD_INITIALIZER(vfio_group_list); - -static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state) -{ - switch (container->iommu_type) { - case VFIO_TYPE1v2_IOMMU: - case VFIO_TYPE1_IOMMU: - /* - * We support coordinated discarding of RAM via the RamDiscardManager. - */ - return ram_block_uncoordinated_discard_disable(state); - default: - /* - * VFIO_SPAPR_TCE_IOMMU most probably works just fine with - * RamDiscardManager, however, it is completely untested. - * - * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does - * completely the opposite of managing mapping/pinning dynamically as - * required by RamDiscardManager. We would have to special-case sections - * with a RamDiscardManager. - */ - return ram_block_discard_disable(state); - } -} - -static int vfio_dma_unmap_bitmap(const VFIOContainer *container, - hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb) -{ - const VFIOContainerBase *bcontainer = &container->bcontainer; - struct vfio_iommu_type1_dma_unmap *unmap; - struct vfio_bitmap *bitmap; - VFIOBitmap vbmap; - int ret; - - ret = vfio_bitmap_alloc(&vbmap, size); - if (ret) { - return ret; - } - - unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap)); - - unmap->argsz = sizeof(*unmap) + sizeof(*bitmap); - unmap->iova = iova; - unmap->size = size; - unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP; - bitmap = (struct vfio_bitmap *)&unmap->data; - - /* - * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of - * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize - * to qemu_real_host_page_size. - */ - bitmap->pgsize = qemu_real_host_page_size(); - bitmap->size = vbmap.size; - bitmap->data = (__u64 *)vbmap.bitmap; - - if (vbmap.size > bcontainer->max_dirty_bitmap_size) { - error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size); - ret = -E2BIG; - goto unmap_exit; - } - - ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap); - if (!ret) { - cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, - iotlb->translated_addr, vbmap.pages); - } else { - error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m"); - } - -unmap_exit: - g_free(unmap); - g_free(vbmap.bitmap); - - return ret; -} - -static int vfio_legacy_dma_unmap_one(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb) -{ - const VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); - struct vfio_iommu_type1_dma_unmap unmap = { - .argsz = sizeof(unmap), - .flags = 0, - .iova = iova, - .size = size, - }; - bool need_dirty_sync = false; - int ret; - Error *local_err = NULL; - - g_assert(!cpr_is_incoming()); - - if (iotlb && vfio_container_dirty_tracking_is_started(bcontainer)) { - if (!vfio_container_devices_dirty_tracking_is_supported(bcontainer) && - bcontainer->dirty_pages_supported) { - return vfio_dma_unmap_bitmap(container, iova, size, iotlb); - } - need_dirty_sync = true; - } - - while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { - /* - * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c - * v4.15) where an overflow in its wrap-around check prevents us from - * unmapping the last page of the address space. Test for the error - * condition and re-try the unmap excluding the last page. The - * expectation is that we've never mapped the last page anyway and this - * unmap request comes via vIOMMU support which also makes it unlikely - * that this page is used. This bug was introduced well after type1 v2 - * support was introduced, so we shouldn't need to test for v1. A fix - * is queued for kernel v5.0 so this workaround can be removed once - * affected kernels are sufficiently deprecated. - */ - if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) && - container->iommu_type == VFIO_TYPE1v2_IOMMU) { - trace_vfio_legacy_dma_unmap_overflow_workaround(); - unmap.size -= 1ULL << ctz64(bcontainer->pgsizes); - continue; - } - return -errno; - } - - if (need_dirty_sync) { - ret = vfio_container_query_dirty_bitmap(bcontainer, iova, size, - iotlb->translated_addr, &local_err); - if (ret) { - error_report_err(local_err); - return ret; - } - } +#include "trace.h" - return 0; -} +static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces = + QLIST_HEAD_INITIALIZER(vfio_address_spaces); -/* - * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 - */ -static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb, bool unmap_all) +VFIOAddressSpace *vfio_address_space_get(AddressSpace *as) { - int ret; - - if (unmap_all) { - /* The unmap ioctl doesn't accept a full 64-bit span. */ - Int128 llsize = int128_rshift(int128_2_64(), 1); - - ret = vfio_legacy_dma_unmap_one(bcontainer, 0, int128_get64(llsize), - iotlb); + VFIOAddressSpace *space; - if (ret == 0) { - ret = vfio_legacy_dma_unmap_one(bcontainer, int128_get64(llsize), - int128_get64(llsize), iotlb); + QLIST_FOREACH(space, &vfio_address_spaces, list) { + if (space->as == as) { + return space; } - - } else { - ret = vfio_legacy_dma_unmap_one(bcontainer, iova, size, iotlb); - } - - return ret; -} - -static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly, - MemoryRegion *mr) -{ - const VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); - struct vfio_iommu_type1_dma_map map = { - .argsz = sizeof(map), - .flags = VFIO_DMA_MAP_FLAG_READ, - .vaddr = (__u64)(uintptr_t)vaddr, - .iova = iova, - .size = size, - }; - - if (!readonly) { - map.flags |= VFIO_DMA_MAP_FLAG_WRITE; - } - - /* - * Try the mapping, if it fails with EBUSY, unmap the region and try - * again. This shouldn't be necessary, but we sometimes see it in - * the VGA ROM space. - */ - if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 || - (errno == EBUSY && - vfio_legacy_dma_unmap(bcontainer, iova, size, NULL, false) == 0 && - ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) { - return 0; - } - - return -errno; -} - -static int -vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, - bool start, Error **errp) -{ - const VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); - int ret; - struct vfio_iommu_type1_dirty_bitmap dirty = { - .argsz = sizeof(dirty), - }; - - if (start) { - dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START; - } else { - dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP; - } - - ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty); - if (ret) { - ret = -errno; - error_setg_errno(errp, errno, "Failed to set dirty tracking flag 0x%x", - dirty.flags); - } - - return ret; -} - -static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer, - VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) -{ - const VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); - struct vfio_iommu_type1_dirty_bitmap *dbitmap; - struct vfio_iommu_type1_dirty_bitmap_get *range; - int ret; - - dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); - - dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); - dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; - range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; - range->iova = iova; - range->size = size; - - /* - * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of - * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize - * to qemu_real_host_page_size. - */ - range->bitmap.pgsize = qemu_real_host_page_size(); - range->bitmap.size = vbmap->size; - range->bitmap.data = (__u64 *)vbmap->bitmap; - - ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); - if (ret) { - ret = -errno; - error_setg_errno(errp, errno, - "Failed to get dirty bitmap for iova: 0x%"PRIx64 - " size: 0x%"PRIx64, (uint64_t)range->iova, - (uint64_t)range->size); - } - - g_free(dbitmap); - - return ret; -} - -static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info, - VFIOContainerBase *bcontainer) -{ - struct vfio_info_cap_header *hdr; - struct vfio_iommu_type1_info_cap_iova_range *cap; - - hdr = vfio_get_iommu_type1_info_cap(info, - VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE); - if (!hdr) { - return false; } - cap = (void *)hdr; - - for (int i = 0; i < cap->nr_iovas; i++) { - Range *range = g_new(Range, 1); + /* No suitable VFIOAddressSpace, create a new one */ + space = g_malloc0(sizeof(*space)); + space->as = as; + QLIST_INIT(&space->containers); - range_set_bounds(range, cap->iova_ranges[i].start, - cap->iova_ranges[i].end); - bcontainer->iova_ranges = - range_list_insert(bcontainer->iova_ranges, range); + if (QLIST_EMPTY(&vfio_address_spaces)) { + qemu_register_reset(vfio_device_reset_handler, NULL); } - return true; -} - -static void vfio_group_add_kvm_device(VFIOGroup *group) -{ - Error *err = NULL; + QLIST_INSERT_HEAD(&vfio_address_spaces, space, list); - if (vfio_kvm_device_add_fd(group->fd, &err)) { - error_reportf_err(err, "group ID %d: ", group->groupid); - } + return space; } -static void vfio_group_del_kvm_device(VFIOGroup *group) +void vfio_address_space_put(VFIOAddressSpace *space) { - Error *err = NULL; - - if (vfio_kvm_device_del_fd(group->fd, &err)) { - error_reportf_err(err, "group ID %d: ", group->groupid); + if (!QLIST_EMPTY(&space->containers)) { + return; } -} -/* - * vfio_get_iommu_type - selects the richest iommu_type (v2 first) - */ -static int vfio_get_iommu_type(int container_fd, - Error **errp) -{ - int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU, - VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU }; - int i; + QLIST_REMOVE(space, list); + g_free(space); - for (i = 0; i < ARRAY_SIZE(iommu_types); i++) { - if (ioctl(container_fd, VFIO_CHECK_EXTENSION, iommu_types[i])) { - return iommu_types[i]; - } + if (QLIST_EMPTY(&vfio_address_spaces)) { + qemu_unregister_reset(vfio_device_reset_handler, NULL); } - error_setg(errp, "No available IOMMU models"); - return -EINVAL; } -/* - * vfio_get_iommu_ops - get a VFIOIOMMUClass associated with a type - */ -static const char *vfio_get_iommu_class_name(int iommu_type) +void vfio_address_space_insert(VFIOAddressSpace *space, + VFIOContainer *bcontainer) { - switch (iommu_type) { - case VFIO_TYPE1v2_IOMMU: - case VFIO_TYPE1_IOMMU: - return TYPE_VFIO_IOMMU_LEGACY; - break; - case VFIO_SPAPR_TCE_v2_IOMMU: - case VFIO_SPAPR_TCE_IOMMU: - return TYPE_VFIO_IOMMU_SPAPR; - break; - default: - g_assert_not_reached(); - }; + QLIST_INSERT_HEAD(&space->containers, bcontainer, next); + bcontainer->space = space; } -static bool vfio_set_iommu(int container_fd, int group_fd, - int *iommu_type, Error **errp) +int vfio_container_dma_map(VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, + void *vaddr, bool readonly, MemoryRegion *mr) { - if (ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container_fd)) { - error_setg_errno(errp, errno, "Failed to set group container"); - return false; - } - - while (ioctl(container_fd, VFIO_SET_IOMMU, *iommu_type)) { - if (*iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { - /* - * On sPAPR, despite the IOMMU subdriver always advertises v1 and - * v2, the running platform may not support v2 and there is no - * way to guess it until an IOMMU group gets added to the container. - * So in case it fails with v2, try v1 as a fallback. - */ - *iommu_type = VFIO_SPAPR_TCE_IOMMU; - continue; - } - error_setg_errno(errp, errno, "Failed to set iommu for container"); - return false; - } - - return true; -} - -static VFIOContainer *vfio_create_container(int fd, VFIOGroup *group, - Error **errp) -{ - int iommu_type; - const char *vioc_name; - VFIOContainer *container; + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); + RAMBlock *rb = mr->ram_block; + int mfd = rb ? qemu_ram_get_fd(rb) : -1; - iommu_type = vfio_get_iommu_type(fd, errp); - if (iommu_type < 0) { - return NULL; - } + if (mfd >= 0 && vioc->dma_map_file) { + unsigned long start = vaddr - qemu_ram_get_host_addr(rb); + unsigned long offset = qemu_ram_get_fd_offset(rb); - /* - * During CPR, just set the container type and skip the ioctls, as the - * container and group are already configured in the kernel. - */ - if (!cpr_is_incoming() && - !vfio_set_iommu(fd, group->fd, &iommu_type, errp)) { - return NULL; + return vioc->dma_map_file(bcontainer, iova, size, mfd, start + offset, + readonly); } - - vioc_name = vfio_get_iommu_class_name(iommu_type); - - container = VFIO_IOMMU_LEGACY(object_new(vioc_name)); - container->fd = fd; - container->iommu_type = iommu_type; - return container; + g_assert(vioc->dma_map); + return vioc->dma_map(bcontainer, iova, size, vaddr, readonly, mr); } -static int vfio_get_iommu_info(VFIOContainer *container, - struct vfio_iommu_type1_info **info) +int vfio_container_dma_unmap(VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, + IOMMUTLBEntry *iotlb, bool unmap_all) { + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - size_t argsz = sizeof(struct vfio_iommu_type1_info); - - *info = g_new0(struct vfio_iommu_type1_info, 1); -again: - (*info)->argsz = argsz; - - if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) { - g_free(*info); - *info = NULL; - return -errno; - } - - if (((*info)->argsz > argsz)) { - argsz = (*info)->argsz; - *info = g_realloc(*info, argsz); - goto again; - } - - return 0; + g_assert(vioc->dma_unmap); + return vioc->dma_unmap(bcontainer, iova, size, iotlb, unmap_all); } -static struct vfio_info_cap_header * -vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id) +bool vfio_container_add_section_window(VFIOContainer *bcontainer, + MemoryRegionSection *section, + Error **errp) { - struct vfio_info_cap_header *hdr; - void *ptr = info; + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) { - return NULL; + if (!vioc->add_window) { + return true; } - for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) { - if (hdr->id == id) { - return hdr; - } - } - - return NULL; + return vioc->add_window(bcontainer, section, errp); } -static void vfio_get_iommu_info_migration(VFIOContainer *container, - struct vfio_iommu_type1_info *info) +void vfio_container_del_section_window(VFIOContainer *bcontainer, + MemoryRegionSection *section) { - struct vfio_info_cap_header *hdr; - struct vfio_iommu_type1_info_cap_migration *cap_mig; - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION); - if (!hdr) { + if (!vioc->del_window) { return; } - cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration, - header); - - /* - * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of - * qemu_real_host_page_size to mark those dirty. - */ - if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) { - bcontainer->dirty_pages_supported = true; - bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size; - bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap; - } + return vioc->del_window(bcontainer, section); } -static bool vfio_legacy_setup(VFIOContainerBase *bcontainer, Error **errp) +int vfio_container_set_dirty_page_tracking(VFIOContainer *bcontainer, + bool start, Error **errp) { - VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); - g_autofree struct vfio_iommu_type1_info *info = NULL; + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); int ret; - ret = vfio_get_iommu_info(container, &info); - if (ret) { - error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info"); - return false; + if (!bcontainer->dirty_pages_supported) { + return 0; } - if (info->flags & VFIO_IOMMU_INFO_PGSIZES) { - bcontainer->pgsizes = info->iova_pgsizes; - } else { - bcontainer->pgsizes = qemu_real_host_page_size(); + g_assert(vioc->set_dirty_page_tracking); + if (bcontainer->dirty_pages_started == start) { + return 0; } - if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) { - bcontainer->dma_max_mappings = 65535; + ret = vioc->set_dirty_page_tracking(bcontainer, start, errp); + if (!ret) { + bcontainer->dirty_pages_started = start; } - vfio_get_info_iova_range(info, bcontainer); - - vfio_get_iommu_info_migration(container, info); - return true; + return ret; } -static bool vfio_container_attach_discard_disable(VFIOContainer *container, - VFIOGroup *group, Error **errp) +static bool vfio_container_devices_dirty_tracking_is_started( + const VFIOContainer *bcontainer) { - int ret; + VFIODevice *vbasedev; - /* - * VFIO is currently incompatible with discarding of RAM insofar as the - * madvise to purge (zap) the page from QEMU's address space does not - * interact with the memory API and therefore leaves stale virtual to - * physical mappings in the IOMMU if the page was previously pinned. We - * therefore set discarding broken for each group added to a container, - * whether the container is used individually or shared. This provides - * us with options to allow devices within a group to opt-in and allow - * discarding, so long as it is done consistently for a group (for instance - * if the device is an mdev device where it is known that the host vendor - * driver will never pin pages outside of the working set of the guest - * driver, which would thus not be discarding candidates). - * - * The first opportunity to induce pinning occurs here where we attempt to - * attach the group to existing containers within the AddressSpace. If any - * pages are already zapped from the virtual address space, such as from - * previous discards, new pinning will cause valid mappings to be - * re-established. Likewise, when the overall MemoryListener for a new - * container is registered, a replay of mappings within the AddressSpace - * will occur, re-establishing any previously zapped pages as well. - * - * Especially virtio-balloon is currently only prevented from discarding - * new memory, it will not yet set ram_block_discard_set_required() and - * therefore, neither stops us here or deals with the sudden memory - * consumption of inflated memory. - * - * We do support discarding of memory coordinated via the RamDiscardManager - * with some IOMMU types. vfio_ram_block_discard_disable() handles the - * details once we know which type of IOMMU we are using. - */ - - ret = vfio_ram_block_discard_disable(container, true); - if (ret) { - error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken"); - if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { - error_report("vfio: error disconnecting group %d from" - " container", group->groupid); + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + if (!vbasedev->dirty_tracking) { + return false; } } - return !ret; -} -static bool vfio_container_group_add(VFIOContainer *container, VFIOGroup *group, - Error **errp) -{ - if (!vfio_container_attach_discard_disable(container, group, errp)) { - return false; - } - group->container = container; - QLIST_INSERT_HEAD(&container->group_list, group, container_next); - vfio_group_add_kvm_device(group); - /* - * Remember the container fd for each group, so we can attach to the same - * container after CPR. - */ - cpr_resave_fd("vfio_container_for_group", group->groupid, container->fd); return true; } -static void vfio_container_group_del(VFIOContainer *container, VFIOGroup *group) +bool vfio_container_dirty_tracking_is_started( + const VFIOContainer *bcontainer) { - QLIST_REMOVE(group, container_next); - group->container = NULL; - vfio_group_del_kvm_device(group); - vfio_ram_block_discard_disable(container, false); - cpr_delete_fd("vfio_container_for_group", group->groupid); + return vfio_container_devices_dirty_tracking_is_started(bcontainer) || + bcontainer->dirty_pages_started; } -static bool vfio_container_connect(VFIOGroup *group, AddressSpace *as, - Error **errp) +bool vfio_container_devices_dirty_tracking_is_supported( + const VFIOContainer *bcontainer) { - VFIOContainer *container; - VFIOContainerBase *bcontainer; - int ret, fd = -1; - VFIOAddressSpace *space; - VFIOIOMMUClass *vioc = NULL; - bool new_container = false; - bool group_was_added = false; - - space = vfio_address_space_get(as); - fd = cpr_find_fd("vfio_container_for_group", group->groupid); - - if (!cpr_is_incoming()) { - QLIST_FOREACH(bcontainer, &space->containers, next) { - container = container_of(bcontainer, VFIOContainer, bcontainer); - if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) { - return vfio_container_group_add(container, group, errp); - } - } + VFIODevice *vbasedev; - fd = qemu_open("/dev/vfio/vfio", O_RDWR, errp); - if (fd < 0) { - goto fail; + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + if (vbasedev->device_dirty_page_tracking == ON_OFF_AUTO_OFF) { + return false; } - } else { - /* - * For incoming CPR, the group is already attached in the kernel. - * If a container with matching fd is found, then update the - * userland group list and return. If not, then after the loop, - * create the container struct and group list. - */ - QLIST_FOREACH(bcontainer, &space->containers, next) { - container = container_of(bcontainer, VFIOContainer, bcontainer); - - if (vfio_cpr_container_match(container, group, fd)) { - return vfio_container_group_add(container, group, errp); - } - } - } - - ret = ioctl(fd, VFIO_GET_API_VERSION); - if (ret != VFIO_API_VERSION) { - error_setg(errp, "supported vfio version: %d, " - "reported version: %d", VFIO_API_VERSION, ret); - goto fail; - } - - container = vfio_create_container(fd, group, errp); - if (!container) { - goto fail; - } - new_container = true; - bcontainer = &container->bcontainer; - - if (!vfio_legacy_cpr_register_container(container, errp)) { - goto fail; - } - - vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - assert(vioc->setup); - - if (!vioc->setup(bcontainer, errp)) { - goto fail; - } - - vfio_address_space_insert(space, bcontainer); - - if (!vfio_container_group_add(container, group, errp)) { - goto fail; - } - group_was_added = true; - - /* - * If CPR, register the listener later, after all state that may - * affect regions and mapping boundaries has been cpr load'ed. Later, - * the listener will invoke its callback on each flat section and call - * dma_map to supply the new vaddr, and the calls will match the mappings - * remembered by the kernel. - */ - if (!cpr_is_incoming()) { - if (!vfio_listener_register(bcontainer, errp)) { - goto fail; + if (!vbasedev->dirty_pages_supported) { + return false; } } - bcontainer->initialized = true; - return true; - -fail: - if (new_container) { - vfio_listener_unregister(bcontainer); - } - - if (group_was_added) { - vfio_container_group_del(container, group); - } - if (vioc && vioc->release) { - vioc->release(bcontainer); - } - if (new_container) { - vfio_legacy_cpr_unregister_container(container); - object_unref(container); - } - if (fd >= 0) { - close(fd); - } - vfio_address_space_put(space); - - return false; -} - -static void vfio_container_disconnect(VFIOGroup *group) -{ - VFIOContainer *container = group->container; - VFIOContainerBase *bcontainer = &container->bcontainer; - VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - - QLIST_REMOVE(group, container_next); - group->container = NULL; - cpr_delete_fd("vfio_container_for_group", group->groupid); - - /* - * Explicitly release the listener first before unset container, - * since unset may destroy the backend container if it's the last - * group. - */ - if (QLIST_EMPTY(&container->group_list)) { - vfio_listener_unregister(bcontainer); - if (vioc->release) { - vioc->release(bcontainer); - } - } - - if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) { - error_report("vfio: error disconnecting group %d from container", - group->groupid); - } - - if (QLIST_EMPTY(&container->group_list)) { - VFIOAddressSpace *space = bcontainer->space; - - trace_vfio_container_disconnect(container->fd); - vfio_legacy_cpr_unregister_container(container); - close(container->fd); - object_unref(container); - - vfio_address_space_put(space); - } } -static VFIOGroup *vfio_group_get(int groupid, AddressSpace *as, Error **errp) +static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova, + hwaddr size, void *bitmap) { - ERRP_GUARD(); - VFIOGroup *group; - char path[32]; - struct vfio_group_status status = { .argsz = sizeof(status) }; - - QLIST_FOREACH(group, &vfio_group_list, next) { - if (group->groupid == groupid) { - /* Found it. Now is it already in the right context? */ - if (group->container->bcontainer.space->as == as) { - return group; - } else { - error_setg(errp, "group %d used in multiple address spaces", - group->groupid); - return NULL; - } - } - } + uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature) + + sizeof(struct vfio_device_feature_dma_logging_report), + sizeof(uint64_t))] = {}; + struct vfio_device_feature *feature = (struct vfio_device_feature *)buf; + struct vfio_device_feature_dma_logging_report *report = + (struct vfio_device_feature_dma_logging_report *)feature->data; - group = g_malloc0(sizeof(*group)); + report->iova = iova; + report->length = size; + report->page_size = qemu_real_host_page_size(); + report->bitmap = (uintptr_t)bitmap; - snprintf(path, sizeof(path), "/dev/vfio/%d", groupid); - group->fd = cpr_open_fd(path, O_RDWR, "vfio_group", groupid, errp); - if (group->fd < 0) { - goto free_group_exit; - } + feature->argsz = sizeof(buf); + feature->flags = VFIO_DEVICE_FEATURE_GET | + VFIO_DEVICE_FEATURE_DMA_LOGGING_REPORT; - if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) { - error_setg_errno(errp, errno, "failed to get group %d status", groupid); - goto close_fd_exit; - } - - if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) { - error_setg(errp, "group %d is not viable", groupid); - error_append_hint(errp, - "Please ensure all devices within the iommu_group " - "are bound to their vfio bus driver.\n"); - goto close_fd_exit; - } - - group->groupid = groupid; - QLIST_INIT(&group->device_list); - - if (!vfio_container_connect(group, as, errp)) { - error_prepend(errp, "failed to setup container for group %d: ", - groupid); - goto close_fd_exit; - } - - QLIST_INSERT_HEAD(&vfio_group_list, group, next); - - return group; - -close_fd_exit: - cpr_delete_fd("vfio_group", groupid); - close(group->fd); - -free_group_exit: - g_free(group); - - return NULL; -} - -static void vfio_group_put(VFIOGroup *group) -{ - if (!group || !QLIST_EMPTY(&group->device_list)) { - return; - } - - if (!group->ram_block_discard_allowed) { - vfio_ram_block_discard_disable(group->container, false); - } - vfio_group_del_kvm_device(group); - vfio_container_disconnect(group); - QLIST_REMOVE(group, next); - trace_vfio_group_put(group->fd); - cpr_delete_fd("vfio_group", group->groupid); - close(group->fd); - g_free(group); -} - -static bool vfio_device_get(VFIOGroup *group, const char *name, - VFIODevice *vbasedev, Error **errp) -{ - g_autofree struct vfio_device_info *info = NULL; - int fd; - - fd = vfio_cpr_group_get_device_fd(group->fd, name); - if (fd < 0) { - error_setg_errno(errp, errno, "error getting device from group %d", - group->groupid); - error_append_hint(errp, - "Verify all devices in group %d are bound to vfio- " - "or pci-stub and not already in use\n", group->groupid); - return false; - } - - info = vfio_get_device_info(fd); - if (!info) { - error_setg_errno(errp, errno, "error getting device info"); - goto fail; - } - - /* - * Set discarding of RAM as not broken for this group if the driver knows - * the device operates compatibly with discarding. Setting must be - * consistent per group, but since compatibility is really only possible - * with mdev currently, we expect singleton groups. - */ - if (vbasedev->ram_block_discard_allowed != - group->ram_block_discard_allowed) { - if (!QLIST_EMPTY(&group->device_list)) { - error_setg(errp, "Inconsistent setting of support for discarding " - "RAM (e.g., balloon) within group"); - goto fail; - } - - if (!group->ram_block_discard_allowed) { - group->ram_block_discard_allowed = true; - vfio_ram_block_discard_disable(group->container, false); - } - } - - vfio_device_prepare(vbasedev, &group->container->bcontainer, info); - - vbasedev->fd = fd; - vbasedev->group = group; - QLIST_INSERT_HEAD(&group->device_list, vbasedev, next); - - trace_vfio_device_get(name, info->flags, info->num_regions, info->num_irqs); - - return true; - -fail: - close(fd); - cpr_delete_fd(name, 0); - return false; -} - -static void vfio_device_put(VFIODevice *vbasedev) -{ - if (!vbasedev->group) { - return; - } - QLIST_REMOVE(vbasedev, next); - vbasedev->group = NULL; - trace_vfio_device_put(vbasedev->fd); - cpr_delete_fd(vbasedev->name, 0); - close(vbasedev->fd); + return vbasedev->io_ops->device_feature(vbasedev, feature); } -static int vfio_device_get_groupid(VFIODevice *vbasedev, Error **errp) +static int vfio_container_iommu_query_dirty_bitmap( + const VFIOContainer *bcontainer, VFIOBitmap *vbmap, hwaddr iova, + hwaddr size, Error **errp) { - char *tmp, group_path[PATH_MAX]; - g_autofree char *group_name = NULL; - int ret, groupid; - ssize_t len; - - tmp = g_strdup_printf("%s/iommu_group", vbasedev->sysfsdev); - len = readlink(tmp, group_path, sizeof(group_path)); - g_free(tmp); - - if (len <= 0 || len >= sizeof(group_path)) { - ret = len < 0 ? -errno : -ENAMETOOLONG; - error_setg_errno(errp, -ret, "no iommu_group found"); - return ret; - } - - group_path[len] = 0; + VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); - group_name = g_path_get_basename(group_path); - if (sscanf(group_name, "%d", &groupid) != 1) { - error_setg_errno(errp, errno, "failed to read %s", group_path); - return -errno; - } - return groupid; + g_assert(vioc->query_dirty_bitmap); + return vioc->query_dirty_bitmap(bcontainer, vbmap, iova, size, + errp); } -/* - * vfio_device_attach: attach a device to a security context - * @name and @vbasedev->name are likely to be different depending - * on the type of the device, hence the need for passing @name - */ -static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev, - AddressSpace *as, Error **errp) +static int vfio_container_devices_query_dirty_bitmap( + const VFIOContainer *bcontainer, VFIOBitmap *vbmap, hwaddr iova, + hwaddr size, Error **errp) { - int groupid = vfio_device_get_groupid(vbasedev, errp); - VFIODevice *vbasedev_iter; - VFIOGroup *group; - - if (groupid < 0) { - return false; - } - - trace_vfio_device_attach(vbasedev->name, groupid); - - group = vfio_group_get(groupid, as, errp); - if (!group) { - return false; - } - - QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { - if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { - error_setg(errp, "device is already attached"); - goto group_put_exit; - } - } - if (!vfio_device_get(group, name, vbasedev, errp)) { - goto group_put_exit; - } + VFIODevice *vbasedev; + int ret; - if (!vfio_device_hiod_create_and_realize(vbasedev, - TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO, - errp)) { - goto device_put_exit; - } + QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { + ret = vfio_device_dma_logging_report(vbasedev, iova, size, + vbmap->bitmap); + if (ret) { + error_setg_errno(errp, -ret, + "%s: Failed to get DMA logging report, iova: " + "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx, + vbasedev->name, iova, size); - if (vbasedev->mdev) { - error_setg(&vbasedev->cpr.mdev_blocker, - "CPR does not support vfio mdev %s", vbasedev->name); - if (migrate_add_blocker_modes(&vbasedev->cpr.mdev_blocker, errp, - MIG_MODE_CPR_TRANSFER, -1) < 0) { - goto hiod_unref_exit; + return ret; } } - return true; - -hiod_unref_exit: - object_unref(vbasedev->hiod); -device_put_exit: - vfio_device_put(vbasedev); -group_put_exit: - vfio_group_put(group); - return false; + return 0; } -static void vfio_legacy_detach_device(VFIODevice *vbasedev) +int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer, + uint64_t iova, uint64_t size, + hwaddr translated_addr, Error **errp) { - VFIOGroup *group = vbasedev->group; - - trace_vfio_device_detach(vbasedev->name, group->groupid); - - vfio_device_unprepare(vbasedev); - - migrate_del_blocker(&vbasedev->cpr.mdev_blocker); - object_unref(vbasedev->hiod); - vfio_device_put(vbasedev); - vfio_group_put(group); -} + bool all_device_dirty_tracking = + vfio_container_devices_dirty_tracking_is_supported(bcontainer); + uint64_t dirty_pages; + VFIOBitmap vbmap; + int ret; -static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single) -{ - VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); - VFIOGroup *group; - struct vfio_pci_hot_reset_info *info = NULL; - struct vfio_pci_dependent_device *devices; - struct vfio_pci_hot_reset *reset; - int32_t *fds; - int ret, i, count; - bool multi = false; - - trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi"); - - if (!single) { - vfio_pci_pre_reset(vdev); + if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) { + physical_memory_set_dirty_range(translated_addr, size, + tcg_enabled() ? DIRTY_CLIENTS_ALL : + DIRTY_CLIENTS_NOCODE); + return 0; } - vdev->vbasedev.needs_reset = false; - - ret = vfio_pci_get_pci_hot_reset_info(vdev, &info); + ret = vfio_bitmap_alloc(&vbmap, size); if (ret) { - goto out_single; - } - devices = &info->devices[0]; - - trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name); - - /* Verify that we have all the groups required */ - for (i = 0; i < info->count; i++) { - PCIHostDeviceAddress host; - VFIOPCIDevice *tmp; - VFIODevice *vbasedev_iter; - - host.domain = devices[i].segment; - host.bus = devices[i].bus; - host.slot = PCI_SLOT(devices[i].devfn); - host.function = PCI_FUNC(devices[i].devfn); - - trace_vfio_pci_hot_reset_dep_devices(host.domain, - host.bus, host.slot, host.function, devices[i].group_id); - - if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { - continue; - } - - QLIST_FOREACH(group, &vfio_group_list, next) { - if (group->groupid == devices[i].group_id) { - break; - } - } - - if (!group) { - if (!vdev->has_pm_reset) { - error_report("vfio: Cannot reset device %s, " - "depends on group %d which is not owned.", - vdev->vbasedev.name, devices[i].group_id); - } - ret = -EPERM; - goto out; - } - - /* Prep dependent devices for reset and clear our marker. */ - QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { - if (!vbasedev_iter->dev->realized || - vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { - continue; - } - tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); - if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { - if (single) { - ret = -EINVAL; - goto out_single; - } - vfio_pci_pre_reset(tmp); - tmp->vbasedev.needs_reset = false; - multi = true; - break; - } - } - } - - if (!single && !multi) { - ret = -EINVAL; - goto out_single; - } - - /* Determine how many group fds need to be passed */ - count = 0; - QLIST_FOREACH(group, &vfio_group_list, next) { - for (i = 0; i < info->count; i++) { - if (group->groupid == devices[i].group_id) { - count++; - break; - } - } + error_setg_errno(errp, -ret, + "Failed to allocate dirty tracking bitmap"); + return ret; } - reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds))); - reset->argsz = sizeof(*reset) + (count * sizeof(*fds)); - fds = &reset->group_fds[0]; - - /* Fill in group fds */ - QLIST_FOREACH(group, &vfio_group_list, next) { - for (i = 0; i < info->count; i++) { - if (group->groupid == devices[i].group_id) { - fds[reset->count++] = group->fd; - break; - } - } + if (all_device_dirty_tracking) { + ret = vfio_container_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size, + errp); + } else { + ret = vfio_container_iommu_query_dirty_bitmap(bcontainer, &vbmap, iova, size, + errp); } - /* Bus reset! */ - ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset); - g_free(reset); if (ret) { - ret = -errno; + goto out; } - trace_vfio_pci_hot_reset_result(vdev->vbasedev.name, - ret ? strerror(errno) : "Success"); + dirty_pages = physical_memory_set_dirty_lebitmap(vbmap.bitmap, + translated_addr, + vbmap.pages); + trace_vfio_container_query_dirty_bitmap(iova, size, vbmap.size, + translated_addr, dirty_pages); out: - /* Re-enable INTx on affected devices */ - for (i = 0; i < info->count; i++) { - PCIHostDeviceAddress host; - VFIOPCIDevice *tmp; - VFIODevice *vbasedev_iter; - - host.domain = devices[i].segment; - host.bus = devices[i].bus; - host.slot = PCI_SLOT(devices[i].devfn); - host.function = PCI_FUNC(devices[i].devfn); - - if (vfio_pci_host_match(&host, vdev->vbasedev.name)) { - continue; - } - - QLIST_FOREACH(group, &vfio_group_list, next) { - if (group->groupid == devices[i].group_id) { - break; - } - } - - if (!group) { - break; - } - - QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { - if (!vbasedev_iter->dev->realized || - vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) { - continue; - } - tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev); - if (vfio_pci_host_match(&host, tmp->vbasedev.name)) { - vfio_pci_post_reset(tmp); - break; - } - } - } -out_single: - if (!single) { - vfio_pci_post_reset(vdev); - } - g_free(info); + g_free(vbmap.bitmap); return ret; } -static void vfio_iommu_legacy_class_init(ObjectClass *klass, const void *data) +static gpointer copy_iova_range(gconstpointer src, gpointer data) { - VFIOIOMMUClass *vioc = VFIO_IOMMU_CLASS(klass); - - vioc->setup = vfio_legacy_setup; - vioc->dma_map = vfio_legacy_dma_map; - vioc->dma_unmap = vfio_legacy_dma_unmap; - vioc->attach_device = vfio_legacy_attach_device; - vioc->detach_device = vfio_legacy_detach_device; - vioc->set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking; - vioc->query_dirty_bitmap = vfio_legacy_query_dirty_bitmap; - vioc->pci_hot_reset = vfio_legacy_pci_hot_reset; -}; - -static bool hiod_legacy_vfio_realize(HostIOMMUDevice *hiod, void *opaque, - Error **errp) -{ - VFIODevice *vdev = opaque; + Range *source = (Range *)src; + Range *dest = g_new(Range, 1); - hiod->name = g_strdup(vdev->name); - hiod->agent = opaque; - - return true; + range_set_bounds(dest, range_lob(source), range_upb(source)); + return dest; } -static int hiod_legacy_vfio_get_cap(HostIOMMUDevice *hiod, int cap, - Error **errp) +GList *vfio_container_get_iova_ranges(const VFIOContainer *bcontainer) { - switch (cap) { - case HOST_IOMMU_DEVICE_CAP_AW_BITS: - return vfio_device_get_aw_bits(hiod->agent); - default: - error_setg(errp, "%s: unsupported capability %x", hiod->name, cap); - return -EINVAL; - } + assert(bcontainer); + return g_list_copy_deep(bcontainer->iova_ranges, copy_iova_range, NULL); } -static GList * -hiod_legacy_vfio_get_iova_ranges(HostIOMMUDevice *hiod) +static void vfio_container_instance_finalize(Object *obj) { - VFIODevice *vdev = hiod->agent; + VFIOContainer *bcontainer = VFIO_IOMMU(obj); + VFIOGuestIOMMU *giommu, *tmp; - g_assert(vdev); - return vfio_container_get_iova_ranges(vdev->bcontainer); -} + QLIST_SAFE_REMOVE(bcontainer, next); -static uint64_t -hiod_legacy_vfio_get_page_size_mask(HostIOMMUDevice *hiod) -{ - VFIODevice *vdev = hiod->agent; + QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) { + memory_region_unregister_iommu_notifier( + MEMORY_REGION(giommu->iommu_mr), &giommu->n); + QLIST_REMOVE(giommu, giommu_next); + g_free(giommu); + } - g_assert(vdev); - return vfio_container_get_page_size_mask(vdev->bcontainer); + g_list_free_full(bcontainer->iova_ranges, g_free); } -static void vfio_iommu_legacy_instance_init(Object *obj) +static void vfio_container_instance_init(Object *obj) { - VFIOContainer *container = VFIO_IOMMU_LEGACY(obj); + VFIOContainer *bcontainer = VFIO_IOMMU(obj); - QLIST_INIT(&container->group_list); + bcontainer->error = NULL; + bcontainer->dirty_pages_supported = false; + bcontainer->dma_max_mappings = 0; + bcontainer->iova_ranges = NULL; + QLIST_INIT(&bcontainer->giommu_list); + QLIST_INIT(&bcontainer->vrdl_list); } -static void hiod_legacy_vfio_class_init(ObjectClass *oc, const void *data) -{ - HostIOMMUDeviceClass *hioc = HOST_IOMMU_DEVICE_CLASS(oc); - - hioc->realize = hiod_legacy_vfio_realize; - hioc->get_cap = hiod_legacy_vfio_get_cap; - hioc->get_iova_ranges = hiod_legacy_vfio_get_iova_ranges; - hioc->get_page_size_mask = hiod_legacy_vfio_get_page_size_mask; -}; - static const TypeInfo types[] = { { - .name = TYPE_VFIO_IOMMU_LEGACY, - .parent = TYPE_VFIO_IOMMU, - .instance_init = vfio_iommu_legacy_instance_init, + .name = TYPE_VFIO_IOMMU, + .parent = TYPE_OBJECT, + .instance_init = vfio_container_instance_init, + .instance_finalize = vfio_container_instance_finalize, .instance_size = sizeof(VFIOContainer), - .class_init = vfio_iommu_legacy_class_init, - }, { - .name = TYPE_HOST_IOMMU_DEVICE_LEGACY_VFIO, - .parent = TYPE_HOST_IOMMU_DEVICE, - .class_init = hiod_legacy_vfio_class_init, - } + .class_size = sizeof(VFIOIOMMUClass), + .abstract = true, + }, }; DEFINE_TYPES(types) diff --git a/hw/vfio/cpr-iommufd.c b/hw/vfio/cpr-iommufd.c index 148a06d552ff3..8a4d65de5e96e 100644 --- a/hw/vfio/cpr-iommufd.c +++ b/hw/vfio/cpr-iommufd.c @@ -159,7 +159,8 @@ bool vfio_iommufd_cpr_register_iommufd(IOMMUFDBackend *be, Error **errp) if (!vfio_cpr_supported(be, cpr_blocker)) { return migrate_add_blocker_modes(cpr_blocker, errp, - MIG_MODE_CPR_TRANSFER, -1) == 0; + MIG_MODE_CPR_TRANSFER, + MIG_MODE_CPR_EXEC, -1) == 0; } vmstate_register(NULL, -1, &iommufd_cpr_vmstate, be); @@ -176,7 +177,7 @@ void vfio_iommufd_cpr_unregister_iommufd(IOMMUFDBackend *be) bool vfio_iommufd_cpr_register_container(VFIOIOMMUFDContainer *container, Error **errp) { - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOContainer *bcontainer = VFIO_IOMMU(container); migration_add_notifier_mode(&bcontainer->cpr_reboot_notifier, vfio_cpr_reboot_notifier, @@ -189,7 +190,7 @@ bool vfio_iommufd_cpr_register_container(VFIOIOMMUFDContainer *container, void vfio_iommufd_cpr_unregister_container(VFIOIOMMUFDContainer *container) { - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOContainer *bcontainer = VFIO_IOMMU(container); migration_remove_notifier(&bcontainer->cpr_reboot_notifier); } diff --git a/hw/vfio/cpr-legacy.c b/hw/vfio/cpr-legacy.c index 553b203e9b60b..7184c9399128e 100644 --- a/hw/vfio/cpr-legacy.c +++ b/hw/vfio/cpr-legacy.c @@ -7,7 +7,7 @@ #include #include #include "qemu/osdep.h" -#include "hw/vfio/vfio-container.h" +#include "hw/vfio/vfio-container-legacy.h" #include "hw/vfio/vfio-device.h" #include "hw/vfio/vfio-listener.h" #include "migration/blocker.h" @@ -17,7 +17,8 @@ #include "qapi/error.h" #include "qemu/error-report.h" -static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp) +static bool vfio_dma_unmap_vaddr_all(VFIOLegacyContainer *container, + Error **errp) { struct vfio_iommu_type1_dma_unmap unmap = { .argsz = sizeof(unmap), @@ -37,12 +38,12 @@ static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp) * Set the new @vaddr for any mappings registered during cpr load. * The incoming state is cleared thereafter. */ -static int vfio_legacy_cpr_dma_map(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, void *vaddr, +static int vfio_legacy_cpr_dma_map(const VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, void *vaddr, bool readonly, MemoryRegion *mr) { - const VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); + const VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + struct vfio_iommu_type1_dma_map map = { .argsz = sizeof(map), .flags = VFIO_DMA_MAP_FLAG_VADDR, @@ -51,8 +52,6 @@ static int vfio_legacy_cpr_dma_map(const VFIOContainerBase *bcontainer, .size = size, }; - g_assert(cpr_is_incoming()); - if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map)) { return -errno; } @@ -63,12 +62,13 @@ static int vfio_legacy_cpr_dma_map(const VFIOContainerBase *bcontainer, static void vfio_region_remap(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, - cpr.remap_listener); - vfio_container_region_add(&container->bcontainer, section, true); + VFIOLegacyContainer *container = container_of(listener, + VFIOLegacyContainer, + cpr.remap_listener); + vfio_container_region_add(VFIO_IOMMU(container), section, true); } -static bool vfio_cpr_supported(VFIOContainer *container, Error **errp) +static bool vfio_cpr_supported(VFIOLegacyContainer *container, Error **errp) { if (!ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UPDATE_VADDR)) { error_setg(errp, "VFIO container does not support VFIO_UPDATE_VADDR"); @@ -85,7 +85,7 @@ static bool vfio_cpr_supported(VFIOContainer *container, Error **errp) static int vfio_container_pre_save(void *opaque) { - VFIOContainer *container = opaque; + VFIOLegacyContainer *container = opaque; Error *local_err = NULL; if (!vfio_dma_unmap_vaddr_all(container, &local_err)) { @@ -97,8 +97,8 @@ static int vfio_container_pre_save(void *opaque) static int vfio_container_post_load(void *opaque, int version_id) { - VFIOContainer *container = opaque; - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOLegacyContainer *container = opaque; + VFIOContainer *bcontainer = VFIO_IOMMU(container); VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer); dma_map_fn saved_dma_map = vioc->dma_map; Error *local_err = NULL; @@ -133,9 +133,9 @@ static const VMStateDescription vfio_container_vmstate = { static int vfio_cpr_fail_notifier(NotifierWithReturn *notifier, MigrationEvent *e, Error **errp) { - VFIOContainer *container = - container_of(notifier, VFIOContainer, cpr.transfer_notifier); - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOLegacyContainer *container = + container_of(notifier, VFIOLegacyContainer, cpr.transfer_notifier); + VFIOContainer *bcontainer = VFIO_IOMMU(container); if (e->type != MIG_EVENT_PRECOPY_FAILED) { return 0; @@ -165,9 +165,10 @@ static int vfio_cpr_fail_notifier(NotifierWithReturn *notifier, return 0; } -bool vfio_legacy_cpr_register_container(VFIOContainer *container, Error **errp) +bool vfio_legacy_cpr_register_container(VFIOLegacyContainer *container, + Error **errp) { - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOContainer *bcontainer = VFIO_IOMMU(container); Error **cpr_blocker = &container->cpr.blocker; migration_add_notifier_mode(&bcontainer->cpr_reboot_notifier, @@ -176,22 +177,23 @@ bool vfio_legacy_cpr_register_container(VFIOContainer *container, Error **errp) if (!vfio_cpr_supported(container, cpr_blocker)) { return migrate_add_blocker_modes(cpr_blocker, errp, - MIG_MODE_CPR_TRANSFER, -1) == 0; + MIG_MODE_CPR_TRANSFER, + MIG_MODE_CPR_EXEC, -1) == 0; } vfio_cpr_add_kvm_notifier(); vmstate_register(NULL, -1, &vfio_container_vmstate, container); - migration_add_notifier_mode(&container->cpr.transfer_notifier, - vfio_cpr_fail_notifier, - MIG_MODE_CPR_TRANSFER); + migration_add_notifier_modes(&container->cpr.transfer_notifier, + vfio_cpr_fail_notifier, + MIG_MODE_CPR_TRANSFER, MIG_MODE_CPR_EXEC, -1); return true; } -void vfio_legacy_cpr_unregister_container(VFIOContainer *container) +void vfio_legacy_cpr_unregister_container(VFIOLegacyContainer *container) { - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOContainer *bcontainer = VFIO_IOMMU(container); migration_remove_notifier(&bcontainer->cpr_reboot_notifier); migrate_del_blocker(&container->cpr.blocker); @@ -207,7 +209,7 @@ void vfio_legacy_cpr_unregister_container(VFIOContainer *container) * The giommu already exists. Find it and replay it, which calls * vfio_legacy_cpr_dma_map further down the stack. */ -void vfio_cpr_giommu_remap(VFIOContainerBase *bcontainer, +void vfio_cpr_giommu_remap(VFIOContainer *bcontainer, MemoryRegionSection *section) { VFIOGuestIOMMU *giommu = NULL; @@ -224,22 +226,32 @@ void vfio_cpr_giommu_remap(VFIOContainerBase *bcontainer, memory_region_iommu_replay(giommu->iommu_mr, &giommu->n); } +static int vfio_cpr_rdm_remap(MemoryRegionSection *section, void *opaque) +{ + RamDiscardListener *rdl = opaque; + + return rdl->notify_populate(rdl, section); +} + /* * In old QEMU, VFIO_DMA_UNMAP_FLAG_VADDR may fail on some mapping after * succeeding for others, so the latter have lost their vaddr. Call this - * to restore vaddr for a section with a RamDiscardManager. + * to restore vaddr for populated parts in a section with a RamDiscardManager. * - * The ram discard listener already exists. Call its populate function + * The ram discard listener already exists. Call its replay_populated function * directly, which calls vfio_legacy_cpr_dma_map. */ -bool vfio_cpr_ram_discard_register_listener(VFIOContainerBase *bcontainer, - MemoryRegionSection *section) +bool vfio_cpr_ram_discard_replay_populated(VFIOContainer *bcontainer, + MemoryRegionSection *section) { + RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); VFIORamDiscardListener *vrdl = vfio_find_ram_discard_listener(bcontainer, section); g_assert(vrdl); - return vrdl->listener.notify_populate(&vrdl->listener, section) == 0; + return ram_discard_manager_replay_populated(rdm, section, + vfio_cpr_rdm_remap, + &vrdl->listener) == 0; } int vfio_cpr_group_get_device_fd(int d, const char *name) @@ -263,7 +275,7 @@ static bool same_device(int fd1, int fd2) return !fstat(fd1, &st1) && !fstat(fd2, &st2) && st1.st_dev == st2.st_dev; } -bool vfio_cpr_container_match(VFIOContainer *container, VFIOGroup *group, +bool vfio_cpr_container_match(VFIOLegacyContainer *container, VFIOGroup *group, int fd) { if (container->fd == fd) { diff --git a/hw/vfio/cpr.c b/hw/vfio/cpr.c index af0f12a7adf5a..db462aabcbfe9 100644 --- a/hw/vfio/cpr.c +++ b/hw/vfio/cpr.c @@ -56,7 +56,7 @@ static void vfio_cpr_claim_vectors(VFIOPCIDevice *vdev, int nr_vectors, { int i, fd; bool pending = false; - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); vdev->nr_vectors = nr_vectors; vdev->msi_vectors = g_new0(VFIOMSIVector, nr_vectors); @@ -70,7 +70,7 @@ static void vfio_cpr_claim_vectors(VFIOPCIDevice *vdev, int nr_vectors, fd = vfio_cpr_load_vector_fd(vdev, "interrupt", i); if (fd >= 0) { vfio_pci_vector_init(vdev, i); - vfio_pci_msi_set_handler(vdev, i); + vfio_pci_msi_set_handler(vdev, i, true); } if (vfio_cpr_load_vector_fd(vdev, "kvm_interrupt", i) >= 0) { @@ -99,7 +99,7 @@ static void vfio_cpr_claim_vectors(VFIOPCIDevice *vdev, int nr_vectors, static int vfio_cpr_pci_pre_load(void *opaque) { VFIOPCIDevice *vdev = opaque; - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); int size = MIN(pci_config_size(pdev), vdev->config_size); int i; @@ -113,9 +113,11 @@ static int vfio_cpr_pci_pre_load(void *opaque) static int vfio_cpr_pci_post_load(void *opaque, int version_id) { VFIOPCIDevice *vdev = opaque; - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); int nr_vectors; + vfio_sub_page_bar_update_mappings(vdev); + if (msix_enabled(pdev)) { vfio_pci_msix_set_notifiers(vdev); nr_vectors = vdev->msix->entries; @@ -171,8 +173,8 @@ const VMStateDescription vfio_cpr_pci_vmstate = { .post_load = vfio_cpr_pci_post_load, .needed = cpr_incoming_needed, .fields = (VMStateField[]) { - VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice), - VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, pci_msix_present), + VMSTATE_PCI_DEVICE(parent_obj, VFIOPCIDevice), + VMSTATE_MSIX_TEST(parent_obj, VFIOPCIDevice, pci_msix_present), VMSTATE_VFIO_INTX(intx, VFIOPCIDevice), VMSTATE_END_OF_LIST() } @@ -193,8 +195,100 @@ static int vfio_cpr_kvm_close_notifier(NotifierWithReturn *notifier, void vfio_cpr_add_kvm_notifier(void) { if (!kvm_close_notifier.notify) { - migration_add_notifier_mode(&kvm_close_notifier, - vfio_cpr_kvm_close_notifier, - MIG_MODE_CPR_TRANSFER); + migration_add_notifier_modes(&kvm_close_notifier, + vfio_cpr_kvm_close_notifier, + MIG_MODE_CPR_TRANSFER, MIG_MODE_CPR_EXEC, + -1); + } +} + +static int set_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq, bool enable) +{ + if (enable) { + return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, virq); + } else { + return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, virq); + } +} + +static int vfio_cpr_set_msi_virq(VFIOPCIDevice *vdev, Error **errp, bool enable) +{ + const char *op = (enable ? "enable" : "disable"); + PCIDevice *pdev = PCI_DEVICE(vdev); + int i, nr_vectors, ret = 0; + + if (msix_enabled(pdev)) { + nr_vectors = vdev->msix->entries; + + } else if (msi_enabled(pdev)) { + nr_vectors = msi_nr_vectors_allocated(pdev); + + } else if (vfio_pci_read_config(pdev, PCI_INTERRUPT_PIN, 1)) { + ret = set_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, + &vdev->intx.unmask, vdev->intx.route.irq, + enable); + if (ret) { + error_setg_errno(errp, -ret, "failed to %s INTx irq %d", + op, vdev->intx.route.irq); + return ret; + } + vfio_pci_intx_set_handler(vdev, enable); + return ret; + + } else { + return 0; + } + + for (i = 0; i < nr_vectors; i++) { + VFIOMSIVector *vector = &vdev->msi_vectors[i]; + if (vector->use) { + ret = set_irqfd_notifier_gsi(kvm_state, &vector->kvm_interrupt, + NULL, vector->virq, enable); + if (ret) { + error_setg_errno(errp, -ret, + "failed to %s msi vector %d virq %d", + op, i, vector->virq); + return ret; + } + vfio_pci_msi_set_handler(vdev, i, enable); + } + } + + return ret; +} + +/* + * When CPR starts, detach IRQs from the VFIO device so future interrupts + * are posted to kvm_interrupt, which is preserved in new QEMU. Interrupts + * that were already posted to the old KVM instance, but not delivered to the + * VCPU, are recovered via KVM_GET_LAPIC and pushed to the new KVM instance + * in new QEMU. + * + * If CPR fails, reattach the IRQs. + */ +static int vfio_cpr_pci_notifier(NotifierWithReturn *notifier, + MigrationEvent *e, Error **errp) +{ + VFIOPCIDevice *vdev = + container_of(notifier, VFIOPCIDevice, cpr.transfer_notifier); + + if (e->type == MIG_EVENT_PRECOPY_SETUP) { + return vfio_cpr_set_msi_virq(vdev, errp, false); + } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { + return vfio_cpr_set_msi_virq(vdev, errp, true); } + return 0; +} + +void vfio_cpr_pci_register_device(VFIOPCIDevice *vdev) +{ + migration_add_notifier_modes(&vdev->cpr.transfer_notifier, + vfio_cpr_pci_notifier, + MIG_MODE_CPR_TRANSFER, MIG_MODE_CPR_EXEC, -1); +} + +void vfio_cpr_pci_unregister_device(VFIOPCIDevice *vdev) +{ + migration_remove_notifier(&vdev->cpr.transfer_notifier); } diff --git a/hw/vfio/device.c b/hw/vfio/device.c index 52a1996dc4e12..8b63e765acbac 100644 --- a/hw/vfio/device.c +++ b/hw/vfio/device.c @@ -129,7 +129,7 @@ static inline const char *action_to_str(int action) static const char *index_to_str(VFIODevice *vbasedev, int index) { - if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { + if (!vfio_pci_from_vfio_device(vbasedev)) { return NULL; } @@ -205,10 +205,19 @@ int vfio_device_get_region_info(VFIODevice *vbasedev, int index, int fd = -1; int ret; - /* check cache */ - if (vbasedev->reginfo[index] != NULL) { - *info = vbasedev->reginfo[index]; - return 0; + /* + * We only set up the region info cache for the initial number of regions. + * + * Since a VFIO device may later increase the number of regions then use + * such regions with an index past ->num_initial_regions, don't attempt to + * use the info cache in those cases. + */ + if (index < vbasedev->num_initial_regions) { + /* check cache */ + if (vbasedev->reginfo[index] != NULL) { + *info = vbasedev->reginfo[index]; + return 0; + } } *info = g_malloc0(argsz); @@ -236,10 +245,12 @@ int vfio_device_get_region_info(VFIODevice *vbasedev, int index, goto retry; } - /* fill cache */ - vbasedev->reginfo[index] = *info; - if (vbasedev->region_fds != NULL) { - vbasedev->region_fds[index] = fd; + if (index < vbasedev->num_initial_regions) { + /* fill cache */ + vbasedev->reginfo[index] = *info; + if (vbasedev->region_fds != NULL) { + vbasedev->region_fds[index] = fd; + } } return 0; @@ -257,7 +268,7 @@ int vfio_device_get_region_info_type(VFIODevice *vbasedev, uint32_t type, { int i; - for (i = 0; i < vbasedev->num_regions; i++) { + for (i = 0; i < vbasedev->num_initial_regions; i++) { struct vfio_info_cap_header *hdr; struct vfio_region_info_cap_type *cap_type; @@ -423,7 +434,7 @@ bool vfio_device_hiod_create_and_realize(VFIODevice *vbasedev, VFIODevice *vfio_get_vfio_device(Object *obj) { if (object_dynamic_cast(obj, TYPE_VFIO_PCI)) { - return &VFIO_PCI_BASE(obj)->vbasedev; + return &VFIO_PCI_DEVICE(obj)->vbasedev; } else { return NULL; } @@ -460,13 +471,13 @@ void vfio_device_detach(VFIODevice *vbasedev) VFIO_IOMMU_GET_CLASS(vbasedev->bcontainer)->detach_device(vbasedev); } -void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer, +void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainer *bcontainer, struct vfio_device_info *info) { int i; vbasedev->num_irqs = info->num_irqs; - vbasedev->num_regions = info->num_regions; + vbasedev->num_initial_regions = info->num_regions; vbasedev->flags = info->flags; vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET); @@ -476,10 +487,10 @@ void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer, QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next); vbasedev->reginfo = g_new0(struct vfio_region_info *, - vbasedev->num_regions); + vbasedev->num_initial_regions); if (vbasedev->use_region_fds) { - vbasedev->region_fds = g_new0(int, vbasedev->num_regions); - for (i = 0; i < vbasedev->num_regions; i++) { + vbasedev->region_fds = g_new0(int, vbasedev->num_initial_regions); + for (i = 0; i < vbasedev->num_initial_regions; i++) { vbasedev->region_fds[i] = -1; } } @@ -489,7 +500,7 @@ void vfio_device_unprepare(VFIODevice *vbasedev) { int i; - for (i = 0; i < vbasedev->num_regions; i++) { + for (i = 0; i < vbasedev->num_initial_regions; i++) { g_free(vbasedev->reginfo[i]); if (vbasedev->region_fds != NULL && vbasedev->region_fds[i] != -1) { close(vbasedev->region_fds[i]); diff --git a/hw/vfio/display.c b/hw/vfio/display.c index 9c6f5aa265dd7..faacd9019a558 100644 --- a/hw/vfio/display.c +++ b/hw/vfio/display.c @@ -365,7 +365,7 @@ static bool vfio_display_dmabuf_init(VFIOPCIDevice *vdev, Error **errp) &vfio_display_dmabuf_ops, vdev); if (vdev->enable_ramfb) { - vdev->dpy->ramfb = ramfb_setup(errp); + vdev->dpy->ramfb = ramfb_setup(vdev->use_legacy_x86_rom, errp); if (!vdev->dpy->ramfb) { return false; } @@ -494,7 +494,7 @@ static bool vfio_display_region_init(VFIOPCIDevice *vdev, Error **errp) &vfio_display_region_ops, vdev); if (vdev->enable_ramfb) { - vdev->dpy->ramfb = ramfb_setup(errp); + vdev->dpy->ramfb = ramfb_setup(vdev->use_legacy_x86_rom, errp); if (!vdev->dpy->ramfb) { return false; } diff --git a/hw/vfio/igd.c b/hw/vfio/igd.c index e7a9d1ffc1304..4bfa2e0fcd22a 100644 --- a/hw/vfio/igd.c +++ b/hw/vfio/igd.c @@ -113,6 +113,7 @@ static int igd_gen(VFIOPCIDevice *vdev) #define IGD_BDSM 0x5c /* Base Data of Stolen Memory */ #define IGD_BDSM_GEN11 0xc0 /* Base Data of Stolen Memory of gen 11 and later */ +#define IGD_GMCH_VGA_DISABLE BIT(1) #define IGD_GMCH_GEN6_GMS_SHIFT 3 /* SNB_GMCH in i915 */ #define IGD_GMCH_GEN6_GMS_MASK 0x1f #define IGD_GMCH_GEN8_GMS_SHIFT 8 /* BDW_GMCH in i915 */ @@ -199,7 +200,7 @@ static bool vfio_pci_igd_opregion_detect(VFIOPCIDevice *vdev, } /* Hotplugging is not supported for opregion access */ - if (vdev->pdev.qdev.hotplugged) { + if (DEVICE(vdev)->hotplugged) { warn_report("IGD device detected, but OpRegion is not supported " "on hotplugged device."); return false; @@ -259,11 +260,12 @@ static int vfio_pci_igd_copy(VFIOPCIDevice *vdev, PCIDevice *pdev, static int vfio_pci_igd_host_init(VFIOPCIDevice *vdev, struct vfio_region_info *info) { + PCIDevice *pdev = PCI_DEVICE(vdev); PCIBus *bus; PCIDevice *host_bridge; int ret; - bus = pci_device_root_bus(&vdev->pdev); + bus = pci_device_root_bus(pdev); host_bridge = pci_find_device(bus, 0, PCI_DEVFN(0, 0)); if (!host_bridge) { @@ -326,13 +328,14 @@ type_init(vfio_pci_igd_register_types) static int vfio_pci_igd_lpc_init(VFIOPCIDevice *vdev, struct vfio_region_info *info) { + PCIDevice *pdev = PCI_DEVICE(vdev); PCIDevice *lpc_bridge; int ret; - lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev), + lpc_bridge = pci_find_device(pci_device_root_bus(pdev), 0, PCI_DEVFN(0x1f, 0)); if (!lpc_bridge) { - lpc_bridge = pci_create_simple(pci_device_root_bus(&vdev->pdev), + lpc_bridge = pci_create_simple(pci_device_root_bus(pdev), PCI_DEVFN(0x1f, 0), "vfio-pci-igd-lpc-bridge"); } @@ -349,13 +352,14 @@ static bool vfio_pci_igd_setup_lpc_bridge(VFIOPCIDevice *vdev, Error **errp) { struct vfio_region_info *host = NULL; struct vfio_region_info *lpc = NULL; + PCIDevice *pdev = PCI_DEVICE(vdev); PCIDevice *lpc_bridge; int ret; /* * Copying IDs or creating new devices are not supported on hotplug */ - if (vdev->pdev.qdev.hotplugged) { + if (DEVICE(vdev)->hotplugged) { error_setg(errp, "IGD LPC is not supported on hotplugged device"); return false; } @@ -365,7 +369,7 @@ static bool vfio_pci_igd_setup_lpc_bridge(VFIOPCIDevice *vdev, Error **errp) * can stuff host values into, so if there's already one there and it's not * one we can hack on, this quirk is no-go. Sorry Q35. */ - lpc_bridge = pci_find_device(pci_device_root_bus(&vdev->pdev), + lpc_bridge = pci_find_device(pci_device_root_bus(pdev), 0, PCI_DEVFN(0x1f, 0)); if (lpc_bridge && !object_dynamic_cast(OBJECT(lpc_bridge), "vfio-pci-igd-lpc-bridge")) { @@ -459,7 +463,7 @@ void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr) int gen; if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) || - !vfio_is_vga(vdev) || nr != 0) { + !vfio_is_base_display(vdev) || nr != 0) { return; } @@ -509,6 +513,7 @@ void vfio_probe_igd_bar0_quirk(VFIOPCIDevice *vdev, int nr) static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp) { struct vfio_region_info *opregion = NULL; + PCIDevice *pdev = PCI_DEVICE(vdev); int ret, gen; uint64_t gms_size = 0; uint64_t *bdsm_size; @@ -517,7 +522,7 @@ static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp) Error *err = NULL; if (!vfio_pci_is(vdev, PCI_VENDOR_ID_INTEL, PCI_ANY_ID) || - !vfio_is_vga(vdev)) { + !vfio_is_base_display(vdev)) { return true; } @@ -528,19 +533,22 @@ static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp) info_report("OpRegion detected on Intel display %x.", vdev->device_id); gen = igd_gen(vdev); - gmch = vfio_pci_read_config(&vdev->pdev, IGD_GMCH, 4); + gmch = vfio_pci_read_config(pdev, IGD_GMCH, 4); /* * For backward compatibility, enable legacy mode when * - Device geneation is 6 to 9 (including both) + * - IGD exposes itself as VGA controller and claims VGA cycles on host * - Machine type is i440fx (pc_piix) * - IGD device is at guest BDF 00:02.0 * - Not manually disabled by x-igd-legacy-mode=off */ if ((vdev->igd_legacy_mode != ON_OFF_AUTO_OFF) && + vfio_is_vga(vdev) && (gen >= 6 && gen <= 9) && + !(gmch & IGD_GMCH_VGA_DISABLE) && !strcmp(MACHINE_GET_CLASS(qdev_get_machine())->family, "pc_piix") && - (&vdev->pdev == pci_find_device(pci_device_root_bus(&vdev->pdev), + (pdev == pci_find_device(pci_device_root_bus(pdev), 0, PCI_DEVFN(0x2, 0)))) { /* * IGD legacy mode requires: @@ -562,20 +570,22 @@ static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp) */ ret = vfio_device_get_region_info(&vdev->vbasedev, VFIO_PCI_ROM_REGION_INDEX, &rom); - if ((ret || !rom->size) && !vdev->pdev.romfile) { + if ((ret || !rom->size) && !pdev->romfile) { error_setg(&err, "Device has no ROM"); goto error; } /* - * If IGD VGA Disable is clear (expected) and VGA is not already - * enabled, try to enable it. Probably shouldn't be using legacy mode - * without VGA, but also no point in us enabling VGA if disabled in - * hardware. + * If VGA is not already enabled, try to enable it. We shouldn't be + * using legacy mode without VGA. */ - if (!(gmch & 0x2) && !vdev->vga && !vfio_populate_vga(vdev, &err)) { - error_setg(&err, "Unable to enable VGA access"); - goto error; + if (!vdev->vga) { + if (vfio_populate_vga(vdev, &err)) { + vfio_pci_config_register_vga(vdev); + } else { + error_setg(&err, "Unable to enable VGA access"); + goto error; + } } /* Enable OpRegion and LPC bridge quirk */ @@ -605,8 +615,8 @@ static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp) * ASLS (OpRegion address) is read-only, emulated * It contains HPA, guest firmware need to reprogram it with GPA. */ - pci_set_long(vdev->pdev.config + IGD_ASLS, 0); - pci_set_long(vdev->pdev.wmask + IGD_ASLS, ~0); + pci_set_long(pdev->config + IGD_ASLS, 0); + pci_set_long(pdev->wmask + IGD_ASLS, ~0); pci_set_long(vdev->emulated_config_bits + IGD_ASLS, ~0); /* @@ -620,8 +630,8 @@ static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp) } /* GMCH is read-only, emulated */ - pci_set_long(vdev->pdev.config + IGD_GMCH, gmch); - pci_set_long(vdev->pdev.wmask + IGD_GMCH, 0); + pci_set_long(pdev->config + IGD_GMCH, gmch); + pci_set_long(pdev->wmask + IGD_GMCH, 0); pci_set_long(vdev->emulated_config_bits + IGD_GMCH, ~0); } @@ -630,12 +640,12 @@ static bool vfio_pci_igd_config_quirk(VFIOPCIDevice *vdev, Error **errp) /* BDSM is read-write, emulated. BIOS needs to be able to write it */ if (gen < 11) { - pci_set_long(vdev->pdev.config + IGD_BDSM, 0); - pci_set_long(vdev->pdev.wmask + IGD_BDSM, ~0); + pci_set_long(pdev->config + IGD_BDSM, 0); + pci_set_long(pdev->wmask + IGD_BDSM, ~0); pci_set_long(vdev->emulated_config_bits + IGD_BDSM, ~0); } else { - pci_set_quad(vdev->pdev.config + IGD_BDSM_GEN11, 0); - pci_set_quad(vdev->pdev.wmask + IGD_BDSM_GEN11, ~0); + pci_set_quad(pdev->config + IGD_BDSM_GEN11, 0); + pci_set_quad(pdev->wmask + IGD_BDSM_GEN11, ~0); pci_set_quad(vdev->emulated_config_bits + IGD_BDSM_GEN11, ~0); } } diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c index 48c590b6a9467..bb5775aa711a6 100644 --- a/hw/vfio/iommufd.c +++ b/hw/vfio/iommufd.c @@ -34,52 +34,36 @@ #define TYPE_HOST_IOMMU_DEVICE_IOMMUFD_VFIO \ TYPE_HOST_IOMMU_DEVICE_IOMMUFD "-vfio" -static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly, +static int iommufd_cdev_map(const VFIOContainer *bcontainer, hwaddr iova, + uint64_t size, void *vaddr, bool readonly, MemoryRegion *mr) { - const VFIOIOMMUFDContainer *container = - container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + const VFIOIOMMUFDContainer *container = VFIO_IOMMU_IOMMUFD(bcontainer); return iommufd_backend_map_dma(container->be, container->ioas_id, iova, size, vaddr, readonly); } -static int iommufd_cdev_map_file(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, +static int iommufd_cdev_map_file(const VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, int fd, unsigned long start, bool readonly) { - const VFIOIOMMUFDContainer *container = - container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + const VFIOIOMMUFDContainer *container = VFIO_IOMMU_IOMMUFD(bcontainer); return iommufd_backend_map_file_dma(container->be, container->ioas_id, iova, size, fd, start, readonly); } -static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, +static int iommufd_cdev_unmap(const VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, IOMMUTLBEntry *iotlb, bool unmap_all) { - const VFIOIOMMUFDContainer *container = - container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + const VFIOIOMMUFDContainer *container = VFIO_IOMMU_IOMMUFD(bcontainer); - /* unmap in halves */ if (unmap_all) { - Int128 llsize = int128_rshift(int128_2_64(), 1); - int ret; - - ret = iommufd_backend_unmap_dma(container->be, container->ioas_id, - 0, int128_get64(llsize)); - - if (ret == 0) { - ret = iommufd_backend_unmap_dma(container->be, container->ioas_id, - int128_get64(llsize), - int128_get64(llsize)); - } - - return ret; + size = UINT64_MAX; } /* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */ @@ -159,11 +143,10 @@ static bool iommufd_hwpt_dirty_tracking(VFIOIOASHwpt *hwpt) return hwpt && hwpt->hwpt_flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING; } -static int iommufd_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, +static int iommufd_set_dirty_page_tracking(const VFIOContainer *bcontainer, bool start, Error **errp) { - const VFIOIOMMUFDContainer *container = - container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + const VFIOIOMMUFDContainer *container = VFIO_IOMMU_IOMMUFD(bcontainer); VFIOIOASHwpt *hwpt; QLIST_FOREACH(hwpt, &container->hwpt_list, next) { @@ -190,13 +173,11 @@ static int iommufd_set_dirty_page_tracking(const VFIOContainerBase *bcontainer, return -EINVAL; } -static int iommufd_query_dirty_bitmap(const VFIOContainerBase *bcontainer, +static int iommufd_query_dirty_bitmap(const VFIOContainer *bcontainer, VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp) { - VFIOIOMMUFDContainer *container = container_of(bcontainer, - VFIOIOMMUFDContainer, - bcontainer); + VFIOIOMMUFDContainer *container = VFIO_IOMMU_IOMMUFD(bcontainer); unsigned long page_size = qemu_real_host_page_size(); VFIOIOASHwpt *hwpt; @@ -324,6 +305,7 @@ static bool iommufd_cdev_autodomains_get(VFIODevice *vbasedev, { ERRP_GUARD(); IOMMUFDBackend *iommufd = vbasedev->iommufd; + VFIOContainer *bcontainer = VFIO_IOMMU(container); uint32_t type, flags = 0; uint64_t hw_caps; VFIOIOASHwpt *hwpt; @@ -408,9 +390,9 @@ static bool iommufd_cdev_autodomains_get(VFIODevice *vbasedev, vbasedev->iommu_dirty_tracking = iommufd_hwpt_dirty_tracking(hwpt); QLIST_INSERT_HEAD(&hwpt->device_list, vbasedev, hwpt_next); QLIST_INSERT_HEAD(&container->hwpt_list, hwpt, next); - container->bcontainer.dirty_pages_supported |= + bcontainer->dirty_pages_supported |= vbasedev->iommu_dirty_tracking; - if (container->bcontainer.dirty_pages_supported && + if (bcontainer->dirty_pages_supported && !vbasedev->iommu_dirty_tracking) { warn_report("IOMMU instance for device %s doesn't support dirty tracking", vbasedev->name); @@ -464,7 +446,7 @@ static void iommufd_cdev_detach_container(VFIODevice *vbasedev, static void iommufd_cdev_container_destroy(VFIOIOMMUFDContainer *container) { - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOContainer *bcontainer = VFIO_IOMMU(container); if (!QLIST_EMPTY(&bcontainer->device_list)) { return; @@ -486,7 +468,7 @@ static int iommufd_cdev_ram_block_discard_disable(bool state) static bool iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container, uint32_t ioas_id, Error **errp) { - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOContainer *bcontainer = VFIO_IOMMU(container); g_autofree struct iommu_ioas_iova_ranges *info = NULL; struct iommu_iova_range *iova_ranges; int sz, fd = container->be->fd; @@ -528,7 +510,7 @@ static bool iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container, static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, AddressSpace *as, Error **errp) { - VFIOContainerBase *bcontainer; + VFIOContainer *bcontainer; VFIOIOMMUFDContainer *container; VFIOAddressSpace *space; struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) }; @@ -559,16 +541,15 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, /* try to attach to an existing container in this space */ QLIST_FOREACH(bcontainer, &space->containers, next) { - container = container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer); + container = VFIO_IOMMU_IOMMUFD(bcontainer); if (VFIO_IOMMU_GET_CLASS(bcontainer) != iommufd_vioc || vbasedev->iommufd != container->be) { continue; } - if (!cpr_is_incoming()) { + if (!cpr_is_incoming() || + (vbasedev->cpr.ioas_id == container->ioas_id)) { res = iommufd_cdev_attach_container(vbasedev, container, &err); - } else if (vbasedev->cpr.ioas_id == container->ioas_id) { - res = true; } else { continue; } @@ -607,9 +588,8 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, container->be = vbasedev->iommufd; container->ioas_id = ioas_id; QLIST_INIT(&container->hwpt_list); - vbasedev->cpr.ioas_id = ioas_id; - bcontainer = &container->bcontainer; + bcontainer = VFIO_IOMMU(container); vfio_address_space_insert(space, bcontainer); if (!iommufd_cdev_attach_container(vbasedev, container, errp)) { @@ -641,6 +621,8 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, bcontainer->initialized = true; found_container: + vbasedev->cpr.ioas_id = container->ioas_id; + ret = ioctl(devfd, VFIO_DEVICE_GET_INFO, &dev_info); if (ret) { error_setg_errno(errp, errno, "error getting device info"); @@ -668,7 +650,8 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, vfio_iommufd_cpr_register_device(vbasedev); trace_iommufd_cdev_device_info(vbasedev->name, devfd, vbasedev->num_irqs, - vbasedev->num_regions, vbasedev->flags); + vbasedev->num_initial_regions, + vbasedev->flags); return true; err_listener_register: @@ -687,11 +670,10 @@ static bool iommufd_cdev_attach(const char *name, VFIODevice *vbasedev, static void iommufd_cdev_detach(VFIODevice *vbasedev) { - VFIOContainerBase *bcontainer = vbasedev->bcontainer; + VFIOContainer *bcontainer = vbasedev->bcontainer; VFIOAddressSpace *space = bcontainer->space; - VFIOIOMMUFDContainer *container = container_of(bcontainer, - VFIOIOMMUFDContainer, - bcontainer); + VFIOIOMMUFDContainer *container = VFIO_IOMMU_IOMMUFD(bcontainer); + vfio_device_unprepare(vbasedev); if (!vbasedev->ram_block_discard_allowed) { @@ -737,8 +719,8 @@ iommufd_cdev_dep_get_realized_vpdev(struct vfio_pci_dependent_device *dep_dev, } vbasedev_tmp = iommufd_cdev_pci_find_by_devid(dep_dev->devid); - if (!vbasedev_tmp || !vbasedev_tmp->dev->realized || - vbasedev_tmp->type != VFIO_DEVICE_TYPE_PCI) { + if (!vfio_pci_from_vfio_device(vbasedev_tmp) || + !vbasedev_tmp->dev->realized) { return NULL; } diff --git a/hw/vfio/listener.c b/hw/vfio/listener.c index f498e23a93747..2d7d3a464577e 100644 --- a/hw/vfio/listener.c +++ b/hw/vfio/listener.c @@ -25,11 +25,11 @@ #endif #include +#include "exec/target_page.h" #include "hw/vfio/vfio-device.h" #include "hw/vfio/pci.h" #include "system/address-spaces.h" #include "system/memory.h" -#include "system/ram_addr.h" #include "hw/hw.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" @@ -52,7 +52,7 @@ */ -static bool vfio_log_sync_needed(const VFIOContainerBase *bcontainer) +static bool vfio_log_sync_needed(const VFIOContainer *bcontainer) { VFIODevice *vbasedev; @@ -125,7 +125,7 @@ static MemoryRegion *vfio_translate_iotlb(IOMMUTLBEntry *iotlb, hwaddr *xlat_p, static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) { VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); - VFIOContainerBase *bcontainer = giommu->bcontainer; + VFIOContainer *bcontainer = giommu->bcontainer; hwaddr iova = iotlb->iova + giommu->iommu_offset; MemoryRegion *mr; hwaddr xlat; @@ -202,7 +202,7 @@ static void vfio_ram_discard_notify_discard(RamDiscardListener *rdl, { VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, listener); - VFIOContainerBase *bcontainer = vrdl->bcontainer; + VFIOContainer *bcontainer = vrdl->bcontainer; const hwaddr size = int128_get64(section->size); const hwaddr iova = section->offset_within_address_space; int ret; @@ -220,7 +220,7 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, { VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener, listener); - VFIOContainerBase *bcontainer = vrdl->bcontainer; + VFIOContainer *bcontainer = vrdl->bcontainer; const hwaddr end = section->offset_within_region + int128_get64(section->size); hwaddr start, next, iova; @@ -250,8 +250,9 @@ static int vfio_ram_discard_notify_populate(RamDiscardListener *rdl, return 0; } -static void vfio_ram_discard_register_listener(VFIOContainerBase *bcontainer, - MemoryRegionSection *section) +static bool vfio_ram_discard_register_listener(VFIOContainer *bcontainer, + MemoryRegionSection *section, + Error **errp) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); int target_page_size = qemu_target_page_size(); @@ -316,16 +317,18 @@ static void vfio_ram_discard_register_listener(VFIOContainerBase *bcontainer, if (vrdl_mappings + max_memslots - vrdl_count > bcontainer->dma_max_mappings) { - warn_report("%s: possibly running out of DMA mappings. E.g., try" + error_setg(errp, "%s: possibly running out of DMA mappings. E.g., try" " increasing the 'block-size' of virtio-mem devies." " Maximum possible DMA mappings: %d, Maximum possible" " memslots: %d", __func__, bcontainer->dma_max_mappings, max_memslots); + return false; } } + return true; } -static void vfio_ram_discard_unregister_listener(VFIOContainerBase *bcontainer, +static void vfio_ram_discard_unregister_listener(VFIOContainer *bcontainer, MemoryRegionSection *section) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); @@ -393,7 +396,7 @@ static bool vfio_listener_valid_section(MemoryRegionSection *section, return true; } -static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer, +static bool vfio_get_section_iova_range(VFIOContainer *bcontainer, MemoryRegionSection *section, hwaddr *out_iova, hwaddr *out_end, Int128 *out_llend) @@ -420,9 +423,9 @@ static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer, static void vfio_listener_begin(MemoryListener *listener) { - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); - void (*listener_begin)(VFIOContainerBase *bcontainer); + VFIOContainer *bcontainer = container_of(listener, VFIOContainer, + listener); + void (*listener_begin)(VFIOContainer *bcontainer); listener_begin = VFIO_IOMMU_GET_CLASS(bcontainer)->listener_begin; @@ -433,9 +436,9 @@ static void vfio_listener_begin(MemoryListener *listener) static void vfio_listener_commit(MemoryListener *listener) { - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); - void (*listener_commit)(VFIOContainerBase *bcontainer); + VFIOContainer *bcontainer = container_of(listener, VFIOContainer, + listener); + void (*listener_commit)(VFIOContainer *bcontainer); listener_commit = VFIO_IOMMU_GET_CLASS(bcontainer)->listener_commit; @@ -450,14 +453,14 @@ static void vfio_device_error_append(VFIODevice *vbasedev, Error **errp) * MMIO region mapping failures are not fatal but in this case PCI * peer-to-peer transactions are broken. */ - if (vbasedev && vbasedev->type == VFIO_DEVICE_TYPE_PCI) { + if (vfio_pci_from_vfio_device(vbasedev)) { error_append_hint(errp, "%s: PCI peer-to-peer transactions " "on BARs are not supported.\n", vbasedev->name); } } VFIORamDiscardListener *vfio_find_ram_discard_listener( - VFIOContainerBase *bcontainer, MemoryRegionSection *section) + VFIOContainer *bcontainer, MemoryRegionSection *section) { VFIORamDiscardListener *vrdl = NULL; @@ -479,12 +482,12 @@ VFIORamDiscardListener *vfio_find_ram_discard_listener( static void vfio_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); + VFIOContainer *bcontainer = container_of(listener, VFIOContainer, + listener); vfio_container_region_add(bcontainer, section, false); } -void vfio_container_region_add(VFIOContainerBase *bcontainer, +void vfio_container_region_add(VFIOContainer *bcontainer, MemoryRegionSection *section, bool cpr_remap) { @@ -571,9 +574,14 @@ void vfio_container_region_add(VFIOContainerBase *bcontainer, */ if (memory_region_has_ram_discard_manager(section->mr)) { if (!cpr_remap) { - vfio_ram_discard_register_listener(bcontainer, section); - } else if (!vfio_cpr_ram_discard_register_listener(bcontainer, - section)) { + if (!vfio_ram_discard_register_listener(bcontainer, section, &err)) { + goto fail; + } + } else if (!vfio_cpr_ram_discard_replay_populated(bcontainer, + section)) { + error_setg(&err, + "vfio_cpr_ram_discard_register_listener for %s failed", + memory_region_name(section->mr)); goto fail; } return; @@ -648,8 +656,8 @@ void vfio_container_region_add(VFIOContainerBase *bcontainer, static void vfio_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); + VFIOContainer *bcontainer = container_of(listener, VFIOContainer, + listener); hwaddr iova, end; Int128 llend, llsize; int ret; @@ -707,6 +715,7 @@ static void vfio_listener_region_del(MemoryListener *listener, bool unmap_all = false; if (int128_eq(llsize, int128_2_64())) { + assert(!iova); unmap_all = true; llsize = int128_zero(); } @@ -736,13 +745,13 @@ typedef struct VFIODirtyRanges { } VFIODirtyRanges; typedef struct VFIODirtyRangesListener { - VFIOContainerBase *bcontainer; + VFIOContainer *bcontainer; VFIODirtyRanges ranges; MemoryListener listener; } VFIODirtyRangesListener; static bool vfio_section_is_vfio_pci(MemoryRegionSection *section, - VFIOContainerBase *bcontainer) + VFIOContainer *bcontainer) { VFIOPCIDevice *pcidev; VFIODevice *vbasedev; @@ -751,7 +760,7 @@ static bool vfio_section_is_vfio_pci(MemoryRegionSection *section, owner = memory_region_owner(section->mr); QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) { - if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) { + if (!vfio_pci_from_vfio_device(vbasedev)) { continue; } pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev); @@ -827,7 +836,7 @@ static const MemoryListener vfio_dirty_tracking_listener = { .region_add = vfio_dirty_tracking_update, }; -static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer, +static void vfio_dirty_tracking_init(VFIOContainer *bcontainer, VFIODirtyRanges *ranges) { VFIODirtyRangesListener dirty; @@ -852,7 +861,7 @@ static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer, memory_listener_unregister(&dirty.listener); } -static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer) +static void vfio_devices_dma_logging_stop(VFIOContainer *bcontainer) { uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature), sizeof(uint64_t))] = {}; @@ -881,7 +890,7 @@ static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer) } static struct vfio_device_feature * -vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer, +vfio_device_feature_dma_logging_start_create(VFIOContainer *bcontainer, VFIODirtyRanges *tracking) { struct vfio_device_feature *feature; @@ -954,7 +963,7 @@ static void vfio_device_feature_dma_logging_start_destroy( g_free(feature); } -static bool vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer, +static bool vfio_devices_dma_logging_start(VFIOContainer *bcontainer, Error **errp) { struct vfio_device_feature *feature; @@ -998,8 +1007,8 @@ static bool vfio_listener_log_global_start(MemoryListener *listener, Error **errp) { ERRP_GUARD(); - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); + VFIOContainer *bcontainer = container_of(listener, VFIOContainer, + listener); bool ret; if (vfio_container_devices_dirty_tracking_is_supported(bcontainer)) { @@ -1016,8 +1025,8 @@ static bool vfio_listener_log_global_start(MemoryListener *listener, static void vfio_listener_log_global_stop(MemoryListener *listener) { - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); + VFIOContainer *bcontainer = container_of(listener, VFIOContainer, + listener); Error *local_err = NULL; int ret = 0; @@ -1049,9 +1058,9 @@ static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) vfio_giommu_dirty_notifier *gdn = container_of(n, vfio_giommu_dirty_notifier, n); VFIOGuestIOMMU *giommu = gdn->giommu; - VFIOContainerBase *bcontainer = giommu->bcontainer; + VFIOContainer *bcontainer = giommu->bcontainer; hwaddr iova = iotlb->iova + giommu->iommu_offset; - ram_addr_t translated_addr; + hwaddr translated_addr; Error *local_err = NULL; int ret = -EINVAL; MemoryRegion *mr; @@ -1100,8 +1109,8 @@ static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section, { const hwaddr size = int128_get64(section->size); const hwaddr iova = section->offset_within_address_space; - const ram_addr_t ram_addr = memory_region_get_ram_addr(section->mr) + - section->offset_within_region; + const hwaddr translated_addr = memory_region_get_ram_addr(section->mr) + + section->offset_within_region; VFIORamDiscardListener *vrdl = opaque; Error *local_err = NULL; int ret; @@ -1110,8 +1119,8 @@ static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section, * Sync the whole mapped region (spanning multiple individual mappings) * in one go. */ - ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr, - &local_err); + ret = vfio_container_query_dirty_bitmap(vrdl->bcontainer, iova, size, + translated_addr, &local_err); if (ret) { error_report_err(local_err); } @@ -1119,7 +1128,7 @@ static int vfio_ram_discard_query_dirty_bitmap(MemoryRegionSection *section, } static int -vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer, +vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *bcontainer, MemoryRegionSection *section) { RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr); @@ -1135,7 +1144,7 @@ vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer, &vrdl); } -static int vfio_sync_iommu_dirty_bitmap(VFIOContainerBase *bcontainer, +static int vfio_sync_iommu_dirty_bitmap(VFIOContainer *bcontainer, MemoryRegionSection *section) { VFIOGuestIOMMU *giommu; @@ -1172,10 +1181,10 @@ static int vfio_sync_iommu_dirty_bitmap(VFIOContainerBase *bcontainer, return 0; } -static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer, +static int vfio_sync_dirty_bitmap(VFIOContainer *bcontainer, MemoryRegionSection *section, Error **errp) { - ram_addr_t ram_addr; + hwaddr translated_addr; if (memory_region_is_iommu(section->mr)) { return vfio_sync_iommu_dirty_bitmap(bcontainer, section); @@ -1190,19 +1199,19 @@ static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer, return ret; } - ram_addr = memory_region_get_ram_addr(section->mr) + - section->offset_within_region; + translated_addr = memory_region_get_ram_addr(section->mr) + + section->offset_within_region; return vfio_container_query_dirty_bitmap(bcontainer, REAL_HOST_PAGE_ALIGN(section->offset_within_address_space), - int128_get64(section->size), ram_addr, errp); + int128_get64(section->size), translated_addr, errp); } static void vfio_listener_log_sync(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase, - listener); + VFIOContainer *bcontainer = container_of(listener, VFIOContainer, + listener); int ret; Error *local_err = NULL; @@ -1233,7 +1242,7 @@ static const MemoryListener vfio_memory_listener = { .log_sync = vfio_listener_log_sync, }; -bool vfio_listener_register(VFIOContainerBase *bcontainer, Error **errp) +bool vfio_listener_register(VFIOContainer *bcontainer, Error **errp) { bcontainer->listener = vfio_memory_listener; memory_listener_register(&bcontainer->listener, bcontainer->space->as); @@ -1247,7 +1256,7 @@ bool vfio_listener_register(VFIOContainerBase *bcontainer, Error **errp) return true; } -void vfio_listener_unregister(VFIOContainerBase *bcontainer) +void vfio_listener_unregister(VFIOContainer *bcontainer) { memory_listener_unregister(&bcontainer->listener); } diff --git a/hw/vfio/meson.build b/hw/vfio/meson.build index bfaf6be805483..82f68698fb848 100644 --- a/hw/vfio/meson.build +++ b/hw/vfio/meson.build @@ -3,8 +3,8 @@ vfio_ss = ss.source_set() vfio_ss.add(files( 'listener.c', - 'container-base.c', 'container.c', + 'container-legacy.c', 'helpers.c', )) vfio_ss.add(when: 'CONFIG_PSERIES', if_true: files('spapr.c')) @@ -13,14 +13,11 @@ vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files( 'pci.c', )) vfio_ss.add(when: 'CONFIG_VFIO_CCW', if_true: files('ccw.c')) -vfio_ss.add(when: 'CONFIG_VFIO_PLATFORM', if_true: files('platform.c')) vfio_ss.add(when: 'CONFIG_VFIO_AP', if_true: files('ap.c')) vfio_ss.add(when: 'CONFIG_VFIO_IGD', if_true: files('igd.c')) specific_ss.add_all(when: 'CONFIG_VFIO', if_true: vfio_ss) -system_ss.add(when: 'CONFIG_VFIO_XGMAC', if_true: files('calxeda-xgmac.c')) -system_ss.add(when: 'CONFIG_VFIO_AMD_XGBE', if_true: files('amd-xgbe.c')) system_ss.add(when: 'CONFIG_VFIO', if_true: files( 'cpr.c', 'cpr-legacy.c', diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index 3f002252acfb7..b5da6afbf5b00 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -113,6 +113,7 @@ static uint64_t vfio_generic_window_quirk_data_read(void *opaque, { VFIOConfigWindowQuirk *window = opaque; VFIOPCIDevice *vdev = window->vdev; + PCIDevice *pdev = PCI_DEVICE(vdev); uint64_t data; /* Always read data reg, discard if window enabled */ @@ -120,7 +121,7 @@ static uint64_t vfio_generic_window_quirk_data_read(void *opaque, addr + window->data_offset, size); if (window->window_enabled) { - data = vfio_pci_read_config(&vdev->pdev, window->address_val, size); + data = vfio_pci_read_config(pdev, window->address_val, size); trace_vfio_quirk_generic_window_data_read(vdev->vbasedev.name, memory_region_name(window->data_mem), data); } @@ -133,9 +134,10 @@ static void vfio_generic_window_quirk_data_write(void *opaque, hwaddr addr, { VFIOConfigWindowQuirk *window = opaque; VFIOPCIDevice *vdev = window->vdev; + PCIDevice *pdev = PCI_DEVICE(vdev); if (window->window_enabled) { - vfio_pci_write_config(&vdev->pdev, window->address_val, data, size); + vfio_pci_write_config(pdev, window->address_val, data, size); trace_vfio_quirk_generic_window_data_write(vdev->vbasedev.name, memory_region_name(window->data_mem), data); return; @@ -156,6 +158,7 @@ static uint64_t vfio_generic_quirk_mirror_read(void *opaque, { VFIOConfigMirrorQuirk *mirror = opaque; VFIOPCIDevice *vdev = mirror->vdev; + PCIDevice *pdev = PCI_DEVICE(vdev); uint64_t data; /* Read and discard in case the hardware cares */ @@ -163,7 +166,7 @@ static uint64_t vfio_generic_quirk_mirror_read(void *opaque, addr + mirror->offset, size); addr += mirror->config_offset; - data = vfio_pci_read_config(&vdev->pdev, addr, size); + data = vfio_pci_read_config(pdev, addr, size); trace_vfio_quirk_generic_mirror_read(vdev->vbasedev.name, memory_region_name(mirror->mem), addr, data); @@ -175,9 +178,10 @@ static void vfio_generic_quirk_mirror_write(void *opaque, hwaddr addr, { VFIOConfigMirrorQuirk *mirror = opaque; VFIOPCIDevice *vdev = mirror->vdev; + PCIDevice *pdev = PCI_DEVICE(vdev); addr += mirror->config_offset; - vfio_pci_write_config(&vdev->pdev, addr, data, size); + vfio_pci_write_config(pdev, addr, data, size); trace_vfio_quirk_generic_mirror_write(vdev->vbasedev.name, memory_region_name(mirror->mem), addr, data); @@ -211,7 +215,8 @@ static uint64_t vfio_ati_3c3_quirk_read(void *opaque, hwaddr addr, unsigned size) { VFIOPCIDevice *vdev = opaque; - uint64_t data = vfio_pci_read_config(&vdev->pdev, + PCIDevice *pdev = PCI_DEVICE(vdev); + uint64_t data = vfio_pci_read_config(pdev, PCI_BASE_ADDRESS_4 + 1, size); trace_vfio_quirk_ati_3c3_read(vdev->vbasedev.name, data); @@ -563,6 +568,7 @@ static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque, { VFIONvidia3d0Quirk *quirk = opaque; VFIOPCIDevice *vdev = quirk->vdev; + PCIDevice *pdev = PCI_DEVICE(vdev); VFIONvidia3d0State old_state = quirk->state; uint64_t data = vfio_vga_read(&vdev->vga->region[QEMU_PCI_VGA_IO_HI], addr + 0x10, size); @@ -573,7 +579,7 @@ static uint64_t vfio_nvidia_3d0_quirk_read(void *opaque, (quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) { uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1); - data = vfio_pci_read_config(&vdev->pdev, offset, size); + data = vfio_pci_read_config(pdev, offset, size); trace_vfio_quirk_nvidia_3d0_read(vdev->vbasedev.name, offset, size, data); } @@ -586,6 +592,7 @@ static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr, { VFIONvidia3d0Quirk *quirk = opaque; VFIOPCIDevice *vdev = quirk->vdev; + PCIDevice *pdev = PCI_DEVICE(vdev); VFIONvidia3d0State old_state = quirk->state; quirk->state = NONE; @@ -599,7 +606,7 @@ static void vfio_nvidia_3d0_quirk_write(void *opaque, hwaddr addr, if ((quirk->offset & ~(PCI_CONFIG_SPACE_SIZE - 1)) == 0x1800) { uint8_t offset = quirk->offset & (PCI_CONFIG_SPACE_SIZE - 1); - vfio_pci_write_config(&vdev->pdev, offset, data, size); + vfio_pci_write_config(pdev, offset, data, size); trace_vfio_quirk_nvidia_3d0_write(vdev->vbasedev.name, offset, data, size); return; @@ -815,7 +822,7 @@ static void vfio_nvidia_quirk_mirror_write(void *opaque, hwaddr addr, { VFIOConfigMirrorQuirk *mirror = opaque; VFIOPCIDevice *vdev = mirror->vdev; - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); LastDataSet *last = (LastDataSet *)&mirror->data; vfio_generic_quirk_mirror_write(opaque, addr, data, size); @@ -1005,6 +1012,7 @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, { VFIOrtl8168Quirk *rtl = opaque; VFIOPCIDevice *vdev = rtl->vdev; + PCIDevice *pdev = PCI_DEVICE(vdev); rtl->enabled = false; @@ -1013,7 +1021,7 @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, rtl->addr = (uint32_t)data; if (data & 0x80000000U) { /* Do write */ - if (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX) { + if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { hwaddr offset = data & 0xfff; uint64_t val = rtl->data; @@ -1021,7 +1029,7 @@ static void vfio_rtl8168_quirk_address_write(void *opaque, hwaddr addr, (uint16_t)offset, val); /* Write to the proper guest MSI-X table instead */ - memory_region_dispatch_write(&vdev->pdev.msix_table_mmio, + memory_region_dispatch_write(&pdev->msix_table_mmio, offset, val, size_memop(size) | MO_LE, MEMTXATTRS_UNSPECIFIED); @@ -1049,11 +1057,12 @@ static uint64_t vfio_rtl8168_quirk_data_read(void *opaque, { VFIOrtl8168Quirk *rtl = opaque; VFIOPCIDevice *vdev = rtl->vdev; + PCIDevice *pdev = PCI_DEVICE(vdev); uint64_t data = vfio_region_read(&vdev->bars[2].region, addr + 0x70, size); - if (rtl->enabled && (vdev->pdev.cap_present & QEMU_PCI_CAP_MSIX)) { + if (rtl->enabled && (pdev->cap_present & QEMU_PCI_CAP_MSIX)) { hwaddr offset = rtl->addr & 0xfff; - memory_region_dispatch_read(&vdev->pdev.msix_table_mmio, offset, + memory_region_dispatch_read(&pdev->msix_table_mmio, offset, &data, size_memop(size) | MO_LE, MEMTXATTRS_UNSPECIFIED); trace_vfio_quirk_rtl8168_msix_read(vdev->vbasedev.name, offset, data); @@ -1150,15 +1159,12 @@ void vfio_vga_quirk_exit(VFIOPCIDevice *vdev) void vfio_vga_quirk_finalize(VFIOPCIDevice *vdev) { - int i, j; + int i; for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) { while (!QLIST_EMPTY(&vdev->vga->region[i].quirks)) { VFIOQuirk *quirk = QLIST_FIRST(&vdev->vga->region[i].quirks); QLIST_REMOVE(quirk, next); - for (j = 0; j < quirk->nr_mem; j++) { - object_unparent(OBJECT(&quirk->mem[j])); - } g_free(quirk->mem); g_free(quirk->data); g_free(quirk); @@ -1198,14 +1204,10 @@ void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr) void vfio_bar_quirk_finalize(VFIOPCIDevice *vdev, int nr) { VFIOBAR *bar = &vdev->bars[nr]; - int i; while (!QLIST_EMPTY(&bar->quirks)) { VFIOQuirk *quirk = QLIST_FIRST(&bar->quirks); QLIST_REMOVE(quirk, next); - for (i = 0; i < quirk->nr_mem; i++) { - object_unparent(OBJECT(&quirk->mem[i])); - } g_free(quirk->mem); g_free(quirk->data); g_free(quirk); @@ -1297,7 +1299,7 @@ static void vfio_radeon_set_gfx_only_reset(VFIOPCIDevice *vdev) static int vfio_radeon_reset(VFIOPCIDevice *vdev) { - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); int i, ret = 0; uint32_t data; @@ -1454,7 +1456,7 @@ static bool is_valid_std_cap_offset(uint8_t pos) static bool vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp) { ERRP_GUARD(); - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); int ret, pos; bool c8_conflict = false, d4_conflict = false; uint8_t tmp; @@ -1547,6 +1549,7 @@ static bool vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp) static bool vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp) { ERRP_GUARD(); + PCIDevice *pdev = PCI_DEVICE(vdev); uint8_t membar_phys[16]; int ret, pos = 0xE8; @@ -1565,7 +1568,7 @@ static bool vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp) return false; } - ret = pci_add_capability(&vdev->pdev, PCI_CAP_ID_VNDR, pos, + ret = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, VMD_SHADOW_CAP_LEN, errp); if (ret < 0) { error_prepend(errp, "Failed to add VMD MEMBAR Shadow cap: "); @@ -1574,10 +1577,10 @@ static bool vfio_add_vmd_shadow_cap(VFIOPCIDevice *vdev, Error **errp) memset(vdev->emulated_config_bits + pos, 0xFF, VMD_SHADOW_CAP_LEN); pos += PCI_CAP_FLAGS; - pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_LEN); - pci_set_byte(vdev->pdev.config + pos++, VMD_SHADOW_CAP_VER); - pci_set_long(vdev->pdev.config + pos, 0x53484457); /* SHDW */ - memcpy(vdev->pdev.config + pos + 4, membar_phys, 16); + pci_set_byte(pdev->config + pos++, VMD_SHADOW_CAP_LEN); + pci_set_byte(pdev->config + pos++, VMD_SHADOW_CAP_VER); + pci_set_long(pdev->config + pos, 0x53484457); /* SHDW */ + memcpy(pdev->config + pos + 4, membar_phys, 16); return true; } diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index be05002b9819f..8b8bc5a42186d 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -49,8 +49,6 @@ #include "vfio-migration-internal.h" #include "vfio-helpers.h" -#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug" - /* Protected by BQL */ static KVMRouteChange vfio_route_change; @@ -119,6 +117,7 @@ static void vfio_intx_mmap_enable(void *opaque) static void vfio_intx_interrupt(void *opaque) { VFIOPCIDevice *vdev = opaque; + PCIDevice *pdev = PCI_DEVICE(vdev); if (!event_notifier_test_and_clear(&vdev->intx.interrupt)) { return; @@ -127,7 +126,7 @@ static void vfio_intx_interrupt(void *opaque) trace_vfio_intx_interrupt(vdev->vbasedev.name, 'A' + vdev->intx.pin); vdev->intx.pending = true; - pci_irq_assert(&vdev->pdev); + pci_irq_assert(pdev); vfio_mmap_set_enabled(vdev, false); if (vdev->intx.mmap_timeout) { timer_mod(vdev->intx.mmap_timer, @@ -138,6 +137,7 @@ static void vfio_intx_interrupt(void *opaque) void vfio_pci_intx_eoi(VFIODevice *vbasedev) { VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); + PCIDevice *pdev = PCI_DEVICE(vdev); if (!vdev->intx.pending) { return; @@ -146,13 +146,14 @@ void vfio_pci_intx_eoi(VFIODevice *vbasedev) trace_vfio_pci_intx_eoi(vbasedev->name); vdev->intx.pending = false; - pci_irq_deassert(&vdev->pdev); + pci_irq_deassert(pdev); vfio_device_irq_unmask(vbasedev, VFIO_PCI_INTX_IRQ_INDEX); } static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) { #ifdef CONFIG_KVM + PCIDevice *pdev = PCI_DEVICE(vdev); int irq_fd = event_notifier_get_fd(&vdev->intx.interrupt); if (vdev->no_kvm_intx || !kvm_irqfds_enabled() || @@ -165,7 +166,7 @@ static bool vfio_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) qemu_set_fd_handler(irq_fd, NULL, NULL, vdev); vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); vdev->intx.pending = false; - pci_irq_deassert(&vdev->pdev); + pci_irq_deassert(pdev); /* Get an eventfd for resample/unmask */ if (!vfio_notifier_init(vdev, &vdev->intx.unmask, "intx-unmask", 0, errp)) { @@ -243,6 +244,8 @@ static bool vfio_cpr_intx_enable_kvm(VFIOPCIDevice *vdev, Error **errp) static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev) { #ifdef CONFIG_KVM + PCIDevice *pdev = PCI_DEVICE(vdev); + if (!vdev->intx.kvm_accel) { return; } @@ -253,7 +256,7 @@ static void vfio_intx_disable_kvm(VFIOPCIDevice *vdev) */ vfio_device_irq_mask(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); vdev->intx.pending = false; - pci_irq_deassert(&vdev->pdev); + pci_irq_deassert(pdev); /* Tell KVM to stop listening for an INTx irqfd */ if (kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, &vdev->intx.interrupt, @@ -302,14 +305,14 @@ static void vfio_intx_update(VFIOPCIDevice *vdev, PCIINTxRoute *route) static void vfio_intx_routing_notifier(PCIDevice *pdev) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(pdev); PCIINTxRoute route; if (vdev->interrupt != VFIO_INT_INTx) { return; } - route = pci_device_route_intx_to_irq(&vdev->pdev, vdev->intx.pin); + route = pci_device_route_intx_to_irq(pdev, vdev->intx.pin); if (pci_intx_route_changed(&vdev->intx.route, &route)) { vfio_intx_update(vdev, &route); @@ -326,7 +329,8 @@ static void vfio_irqchip_change(Notifier *notify, void *data) static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) { - uint8_t pin = vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1); + PCIDevice *pdev = PCI_DEVICE(vdev); + uint8_t pin = vfio_pci_read_config(pdev, PCI_INTERRUPT_PIN, 1); Error *err = NULL; int32_t fd; @@ -344,7 +348,7 @@ static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) } vdev->intx.pin = pin - 1; /* Pin A (1) -> irq[0] */ - pci_config_set_interrupt_pin(vdev->pdev.config, pin); + pci_config_set_interrupt_pin(pdev->config, pin); #ifdef CONFIG_KVM /* @@ -352,7 +356,7 @@ static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) * where we won't actually use the result anyway. */ if (kvm_irqfds_enabled() && kvm_resamplefds_enabled()) { - vdev->intx.route = pci_device_route_intx_to_irq(&vdev->pdev, + vdev->intx.route = pci_device_route_intx_to_irq(pdev, vdev->intx.pin); } #endif @@ -392,13 +396,14 @@ static bool vfio_intx_enable(VFIOPCIDevice *vdev, Error **errp) static void vfio_intx_disable(VFIOPCIDevice *vdev) { + PCIDevice *pdev = PCI_DEVICE(vdev); int fd; timer_del(vdev->intx.mmap_timer); vfio_intx_disable_kvm(vdev); vfio_device_irq_disable(&vdev->vbasedev, VFIO_PCI_INTX_IRQ_INDEX); vdev->intx.pending = false; - pci_irq_deassert(&vdev->pdev); + pci_irq_deassert(pdev); vfio_mmap_set_enabled(vdev, true); fd = event_notifier_get_fd(&vdev->intx.interrupt); @@ -415,6 +420,14 @@ bool vfio_pci_intx_enable(VFIOPCIDevice *vdev, Error **errp) return vfio_intx_enable(vdev, errp); } +void vfio_pci_intx_set_handler(VFIOPCIDevice *vdev, bool enable) +{ + int fd = event_notifier_get_fd(&vdev->intx.interrupt); + IOHandler *handler = (enable ? vfio_intx_interrupt : NULL); + + qemu_set_fd_handler(fd, handler, NULL, vdev); +} + /* * MSI/X */ @@ -422,6 +435,7 @@ static void vfio_msi_interrupt(void *opaque) { VFIOMSIVector *vector = opaque; VFIOPCIDevice *vdev = vector->vdev; + PCIDevice *pdev = PCI_DEVICE(vdev); MSIMessage (*get_msg)(PCIDevice *dev, unsigned vector); void (*notify)(PCIDevice *dev, unsigned vector); MSIMessage msg; @@ -436,9 +450,9 @@ static void vfio_msi_interrupt(void *opaque) notify = msix_notify; /* A masked vector firing needs to use the PBA, enable it */ - if (msix_is_masked(&vdev->pdev, nr)) { + if (msix_is_masked(pdev, nr)) { set_bit(nr, vdev->msix->pending); - memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, true); + memory_region_set_enabled(&pdev->msix_pba_mmio, true); trace_vfio_msix_pba_enable(vdev->vbasedev.name); } } else if (vdev->interrupt == VFIO_INT_MSI) { @@ -448,17 +462,18 @@ static void vfio_msi_interrupt(void *opaque) abort(); } - msg = get_msg(&vdev->pdev, nr); + msg = get_msg(pdev, nr); trace_vfio_msi_interrupt(vdev->vbasedev.name, nr, msg.address, msg.data); - notify(&vdev->pdev, nr); + notify(pdev, nr); } -void vfio_pci_msi_set_handler(VFIOPCIDevice *vdev, int nr) +void vfio_pci_msi_set_handler(VFIOPCIDevice *vdev, int nr, bool enable) { VFIOMSIVector *vector = &vdev->msi_vectors[nr]; int fd = event_notifier_get_fd(&vector->interrupt); + IOHandler *handler = (enable ? vfio_msi_interrupt : NULL); - qemu_set_fd_handler(fd, vfio_msi_interrupt, NULL, vector); + qemu_set_fd_handler(fd, handler, NULL, vector); } /* @@ -488,6 +503,7 @@ static int vfio_enable_msix_no_vec(VFIOPCIDevice *vdev) static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) { + PCIDevice *pdev = PCI_DEVICE(vdev); struct vfio_irq_set *irq_set; int ret = 0, i, argsz; int32_t *fds; @@ -530,7 +546,7 @@ static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) */ if (vdev->msi_vectors[i].use) { if (vdev->msi_vectors[i].virq < 0 || - (msix && msix_is_masked(&vdev->pdev, i))) { + (msix && msix_is_masked(pdev, i))) { fd = event_notifier_get_fd(&vdev->msi_vectors[i].interrupt); } else { fd = event_notifier_get_fd(&vdev->msi_vectors[i].kvm_interrupt); @@ -550,12 +566,14 @@ static int vfio_enable_vectors(VFIOPCIDevice *vdev, bool msix) void vfio_pci_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, int vector_n, bool msix) { + PCIDevice *pdev = PCI_DEVICE(vdev); + if ((msix && vdev->no_kvm_msix) || (!msix && vdev->no_kvm_msi)) { return; } vector->virq = kvm_irqchip_add_msi_route(&vfio_route_change, - vector_n, &vdev->pdev); + vector_n, pdev); } static void vfio_connect_kvm_msi_virq(VFIOMSIVector *vector, int nr) @@ -624,7 +642,7 @@ static void set_irq_signalling(VFIODevice *vbasedev, VFIOMSIVector *vector, void vfio_pci_vector_init(VFIOPCIDevice *vdev, int nr) { VFIOMSIVector *vector = &vdev->msi_vectors[nr]; - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); Error *local_err = NULL; vector->vdev = vdev; @@ -642,7 +660,7 @@ void vfio_pci_vector_init(VFIOPCIDevice *vdev, int nr) static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, MSIMessage *msg, IOHandler *handler) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(pdev); VFIOMSIVector *vector; int ret; bool resizing = !!(vdev->nr_vectors < nr + 1); @@ -713,7 +731,7 @@ static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr, clear_bit(nr, vdev->msix->pending); if (find_first_bit(vdev->msix->pending, vdev->nr_vectors) == vdev->nr_vectors) { - memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); + memory_region_set_enabled(&pdev->msix_pba_mmio, false); trace_vfio_msix_pba_disable(vdev->vbasedev.name); } @@ -737,7 +755,7 @@ static int vfio_msix_vector_use(PCIDevice *pdev, static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(pdev); VFIOMSIVector *vector = &vdev->msi_vectors[nr]; trace_vfio_msix_vector_release(vdev->vbasedev.name, nr); @@ -764,7 +782,9 @@ static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr) void vfio_pci_msix_set_notifiers(VFIOPCIDevice *vdev) { - msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, + PCIDevice *pdev = PCI_DEVICE(vdev); + + msix_set_vector_notifiers(pdev, vfio_msix_vector_use, vfio_msix_vector_release, NULL); } @@ -791,6 +811,7 @@ void vfio_pci_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev) static void vfio_msix_enable(VFIOPCIDevice *vdev) { + PCIDevice *pdev = PCI_DEVICE(vdev); int ret; vfio_disable_interrupts(vdev); @@ -807,7 +828,7 @@ static void vfio_msix_enable(VFIOPCIDevice *vdev) */ vfio_pci_prepare_kvm_msi_virq_batch(vdev); - if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use, + if (msix_set_vector_notifiers(pdev, vfio_msix_vector_use, vfio_msix_vector_release, NULL)) { error_report("vfio: msix_set_vector_notifiers failed"); } @@ -845,11 +866,12 @@ static void vfio_msix_enable(VFIOPCIDevice *vdev) static void vfio_msi_enable(VFIOPCIDevice *vdev) { + PCIDevice *pdev = PCI_DEVICE(vdev); int ret, i; vfio_disable_interrupts(vdev); - vdev->nr_vectors = msi_nr_vectors_allocated(&vdev->pdev); + vdev->nr_vectors = msi_nr_vectors_allocated(pdev); retry: /* * Setting vector notifiers needs to enable route for each vector. @@ -942,10 +964,11 @@ static void vfio_msi_disable_common(VFIOPCIDevice *vdev) static void vfio_msix_disable(VFIOPCIDevice *vdev) { + PCIDevice *pdev = PCI_DEVICE(vdev); Error *err = NULL; int i; - msix_unset_vector_notifiers(&vdev->pdev); + msix_unset_vector_notifiers(pdev); /* * MSI-X will only release vectors if MSI-X is still enabled on the @@ -953,8 +976,8 @@ static void vfio_msix_disable(VFIOPCIDevice *vdev) */ for (i = 0; i < vdev->nr_vectors; i++) { if (vdev->msi_vectors[i].use) { - vfio_msix_vector_release(&vdev->pdev, i); - msix_vector_unuse(&vdev->pdev, i); + vfio_msix_vector_release(pdev, i); + msix_vector_unuse(pdev, i); } } @@ -991,6 +1014,7 @@ static void vfio_msi_disable(VFIOPCIDevice *vdev) static void vfio_update_msi(VFIOPCIDevice *vdev) { + PCIDevice *pdev = PCI_DEVICE(vdev); int i; for (i = 0; i < vdev->nr_vectors; i++) { @@ -1001,8 +1025,8 @@ static void vfio_update_msi(VFIOPCIDevice *vdev) continue; } - msg = msi_get_message(&vdev->pdev, i); - vfio_update_kvm_msi_virq(vector, msg, &vdev->pdev); + msg = msi_get_message(pdev, i); + vfio_update_kvm_msi_virq(vector, msg, pdev); } } @@ -1164,13 +1188,14 @@ static const MemoryRegionOps vfio_rom_ops = { static void vfio_pci_size_rom(VFIOPCIDevice *vdev) { + PCIDevice *pdev = PCI_DEVICE(vdev); VFIODevice *vbasedev = &vdev->vbasedev; uint32_t orig, size = cpu_to_le32((uint32_t)PCI_ROM_ADDRESS_MASK); char *name; - if (vdev->pdev.romfile || !vdev->pdev.rom_bar) { + if (pdev->romfile || !pdev->rom_bar) { /* Since pci handles romfile, just print a message and return */ - if (vfio_opt_rom_in_denylist(vdev) && vdev->pdev.romfile) { + if (vfio_opt_rom_in_denylist(vdev) && pdev->romfile) { warn_report("Device at %s is known to cause system instability" " issues during option rom execution", vdev->vbasedev.name); @@ -1199,7 +1224,7 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev) } if (vfio_opt_rom_in_denylist(vdev)) { - if (vdev->pdev.rom_bar > 0) { + if (pdev->rom_bar > 0) { warn_report("Device at %s is known to cause system instability" " issues during option rom execution", vdev->vbasedev.name); @@ -1218,12 +1243,12 @@ static void vfio_pci_size_rom(VFIOPCIDevice *vdev) name = g_strdup_printf("vfio[%s].rom", vdev->vbasedev.name); - memory_region_init_io(&vdev->pdev.rom, OBJECT(vdev), + memory_region_init_io(&pdev->rom, OBJECT(vdev), &vfio_rom_ops, vdev, name, size); g_free(name); - pci_register_bar(&vdev->pdev, PCI_ROM_SLOT, - PCI_BASE_ADDRESS_SPACE_MEMORY, &vdev->pdev.rom); + pci_register_bar(pdev, PCI_ROM_SLOT, + PCI_BASE_ADDRESS_SPACE_MEMORY, &pdev->rom); vdev->rom_read_failed = false; } @@ -1321,7 +1346,7 @@ static const MemoryRegionOps vfio_vga_ops = { */ static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(pdev); VFIORegion *region = &vdev->bars[bar].region; MemoryRegion *mmap_mr, *region_mr, *base_mr; PCIIORegion *r; @@ -1367,7 +1392,7 @@ static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar) */ uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(pdev); VFIODevice *vbasedev = &vdev->vbasedev; uint32_t emu_bits = 0, emu_val = 0, phys_val = 0, val; @@ -1401,7 +1426,7 @@ uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) void vfio_pci_write_config(PCIDevice *pdev, uint32_t addr, uint32_t val, int len) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(pdev); VFIODevice *vbasedev = &vdev->vbasedev; uint32_t val_le = cpu_to_le32(val); int ret; @@ -1496,6 +1521,7 @@ static void vfio_disable_interrupts(VFIOPCIDevice *vdev) static bool vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp) { + PCIDevice *pdev = PCI_DEVICE(vdev); uint16_t ctrl; bool msi_64bit, msi_maskbit; int ret, entries; @@ -1516,7 +1542,7 @@ static bool vfio_msi_setup(VFIOPCIDevice *vdev, int pos, Error **errp) trace_vfio_msi_setup(vdev->vbasedev.name, pos); - ret = msi_init(&vdev->pdev, pos, entries, msi_64bit, msi_maskbit, &err); + ret = msi_init(pdev, pos, entries, msi_64bit, msi_maskbit, &err); if (ret < 0) { if (ret == -ENOTSUP) { return true; @@ -1709,6 +1735,7 @@ static bool vfio_pci_relocate_msix(VFIOPCIDevice *vdev, Error **errp) */ static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp) { + PCIDevice *pdev = PCI_DEVICE(vdev); uint8_t pos; uint16_t ctrl; uint32_t table, pba; @@ -1716,7 +1743,7 @@ static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp) VFIOMSIXInfo *msix; int ret; - pos = pci_find_capability(&vdev->pdev, PCI_CAP_ID_MSIX); + pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); if (!pos) { return true; } @@ -1808,12 +1835,13 @@ static bool vfio_msix_early_setup(VFIOPCIDevice *vdev, Error **errp) static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) { + PCIDevice *pdev = PCI_DEVICE(vdev); int ret; Error *err = NULL; vdev->msix->pending = g_new0(unsigned long, BITS_TO_LONGS(vdev->msix->entries)); - ret = msix_init(&vdev->pdev, vdev->msix->entries, + ret = msix_init(pdev, vdev->msix->entries, vdev->bars[vdev->msix->table_bar].mr, vdev->msix->table_bar, vdev->msix->table_offset, vdev->bars[vdev->msix->pba_bar].mr, @@ -1845,7 +1873,7 @@ static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) * vector-use notifier is called, which occurs on unmask, we test whether * PBA emulation is needed and again disable if not. */ - memory_region_set_enabled(&vdev->pdev.msix_pba_mmio, false); + memory_region_set_enabled(&pdev->msix_pba_mmio, false); /* * The emulated machine may provide a paravirt interface for MSIX setup @@ -1857,7 +1885,7 @@ static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) */ if (object_property_get_bool(OBJECT(qdev_get_machine()), "vfio-no-msix-emulation", NULL)) { - memory_region_set_enabled(&vdev->pdev.msix_table_mmio, false); + memory_region_set_enabled(&pdev->msix_table_mmio, false); } return true; @@ -1865,10 +1893,12 @@ static bool vfio_msix_setup(VFIOPCIDevice *vdev, int pos, Error **errp) void vfio_pci_teardown_msi(VFIOPCIDevice *vdev) { - msi_uninit(&vdev->pdev); + PCIDevice *pdev = PCI_DEVICE(vdev); + + msi_uninit(pdev); if (vdev->msix) { - msix_uninit(&vdev->pdev, + msix_uninit(pdev, vdev->bars[vdev->msix->table_bar].mr, vdev->bars[vdev->msix->pba_bar].mr); g_free(vdev->msix->pending); @@ -1929,6 +1959,7 @@ static void vfio_bars_prepare(VFIOPCIDevice *vdev) static void vfio_bar_register(VFIOPCIDevice *vdev, int nr) { + PCIDevice *pdev = PCI_DEVICE(vdev); VFIOBAR *bar = &vdev->bars[nr]; char *name; @@ -1950,7 +1981,7 @@ static void vfio_bar_register(VFIOPCIDevice *vdev, int nr) } } - pci_register_bar(&vdev->pdev, nr, bar->type, bar->mr); + pci_register_bar(pdev, nr, bar->type, bar->mr); } static void vfio_bars_register(VFIOPCIDevice *vdev) @@ -1964,6 +1995,7 @@ static void vfio_bars_register(VFIOPCIDevice *vdev) void vfio_pci_bars_exit(VFIOPCIDevice *vdev) { + PCIDevice *pdev = PCI_DEVICE(vdev); int i; for (i = 0; i < PCI_ROM_SLOT; i++) { @@ -1977,7 +2009,7 @@ void vfio_pci_bars_exit(VFIOPCIDevice *vdev) } if (vdev->vga) { - pci_unregister_vga(&vdev->pdev); + pci_unregister_vga(pdev); vfio_vga_quirk_exit(vdev); } } @@ -1993,7 +2025,6 @@ static void vfio_bars_finalize(VFIOPCIDevice *vdev) vfio_region_finalize(&bar->region); if (bar->mr) { assert(bar->size); - object_unparent(OBJECT(bar->mr)); g_free(bar->mr); bar->mr = NULL; } @@ -2001,9 +2032,6 @@ static void vfio_bars_finalize(VFIOPCIDevice *vdev) if (vdev->vga) { vfio_vga_quirk_finalize(vdev); - for (i = 0; i < ARRAY_SIZE(vdev->vga->region); i++) { - object_unparent(OBJECT(&vdev->vga->region[i].mem)); - } g_free(vdev->vga); } } @@ -2049,8 +2077,10 @@ static void vfio_set_word_bits(uint8_t *buf, uint16_t val, uint16_t mask) static void vfio_add_emulated_word(VFIOPCIDevice *vdev, int pos, uint16_t val, uint16_t mask) { - vfio_set_word_bits(vdev->pdev.config + pos, val, mask); - vfio_set_word_bits(vdev->pdev.wmask + pos, ~mask, mask); + PCIDevice *pdev = PCI_DEVICE(vdev); + + vfio_set_word_bits(pdev->config + pos, val, mask); + vfio_set_word_bits(pdev->wmask + pos, ~mask, mask); vfio_set_word_bits(vdev->emulated_config_bits + pos, mask, mask); } @@ -2062,8 +2092,10 @@ static void vfio_set_long_bits(uint8_t *buf, uint32_t val, uint32_t mask) static void vfio_add_emulated_long(VFIOPCIDevice *vdev, int pos, uint32_t val, uint32_t mask) { - vfio_set_long_bits(vdev->pdev.config + pos, val, mask); - vfio_set_long_bits(vdev->pdev.wmask + pos, ~mask, mask); + PCIDevice *pdev = PCI_DEVICE(vdev); + + vfio_set_long_bits(pdev->config + pos, val, mask); + vfio_set_long_bits(pdev->wmask + pos, ~mask, mask); vfio_set_long_bits(vdev->emulated_config_bits + pos, mask, mask); } @@ -2071,7 +2103,8 @@ static void vfio_pci_enable_rp_atomics(VFIOPCIDevice *vdev) { struct vfio_device_info_cap_pci_atomic_comp *cap; g_autofree struct vfio_device_info *info = NULL; - PCIBus *bus = pci_get_bus(&vdev->pdev); + PCIDevice *pdev = PCI_DEVICE(vdev); + PCIBus *bus = pci_get_bus(pdev); PCIDevice *parent = bus->parent_dev; struct vfio_info_cap_header *hdr; uint32_t mask = 0; @@ -2087,8 +2120,8 @@ static void vfio_pci_enable_rp_atomics(VFIOPCIDevice *vdev) if (pci_bus_is_root(bus) || !parent || !parent->exp.exp_cap || pcie_cap_get_type(parent) != PCI_EXP_TYPE_ROOT_PORT || pcie_cap_get_version(parent) != PCI_EXP_FLAGS_VER2 || - vdev->pdev.devfn || - vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { + pdev->devfn || + pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { return; } @@ -2132,8 +2165,10 @@ static void vfio_pci_enable_rp_atomics(VFIOPCIDevice *vdev) static void vfio_pci_disable_rp_atomics(VFIOPCIDevice *vdev) { + PCIDevice *pdev = PCI_DEVICE(vdev); + if (vdev->clear_parent_atomics_on_exit) { - PCIDevice *parent = pci_get_bus(&vdev->pdev)->parent_dev; + PCIDevice *parent = pci_get_bus(pdev)->parent_dev; uint8_t *pos = parent->config + parent->exp.exp_cap + PCI_EXP_DEVCAP2; pci_long_test_and_clear_mask(pos, PCI_EXP_DEVCAP2_ATOMIC_COMP32 | @@ -2145,10 +2180,11 @@ static void vfio_pci_disable_rp_atomics(VFIOPCIDevice *vdev) static bool vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size, Error **errp) { + PCIDevice *pdev = PCI_DEVICE(vdev); uint16_t flags; uint8_t type; - flags = pci_get_word(vdev->pdev.config + pos + PCI_CAP_FLAGS); + flags = pci_get_word(pdev->config + pos + PCI_CAP_FLAGS); type = (flags & PCI_EXP_FLAGS_TYPE) >> 4; if (type != PCI_EXP_TYPE_ENDPOINT && @@ -2160,8 +2196,8 @@ static bool vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size, return false; } - if (!pci_bus_is_express(pci_get_bus(&vdev->pdev))) { - PCIBus *bus = pci_get_bus(&vdev->pdev); + if (!pci_bus_is_express(pci_get_bus(pdev))) { + PCIBus *bus = pci_get_bus(pdev); PCIDevice *bridge; /* @@ -2193,7 +2229,7 @@ static bool vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size, return true; } - } else if (pci_bus_is_root(pci_get_bus(&vdev->pdev))) { + } else if (pci_bus_is_root(pci_get_bus(pdev))) { /* * On a Root Complex bus Endpoints become Root Complex Integrated * Endpoints, which changes the type and clears the LNK & LNK2 fields. @@ -2261,20 +2297,20 @@ static bool vfio_setup_pcie_cap(VFIOPCIDevice *vdev, int pos, uint8_t size, 1, PCI_EXP_FLAGS_VERS); } - pos = pci_add_capability(&vdev->pdev, PCI_CAP_ID_EXP, pos, size, - errp); + pos = pci_add_capability(pdev, PCI_CAP_ID_EXP, pos, size, errp); if (pos < 0) { return false; } - vdev->pdev.exp.exp_cap = pos; + pdev->exp.exp_cap = pos; return true; } static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos) { - uint32_t cap = pci_get_long(vdev->pdev.config + pos + PCI_EXP_DEVCAP); + PCIDevice *pdev = PCI_DEVICE(vdev); + uint32_t cap = pci_get_long(pdev->config + pos + PCI_EXP_DEVCAP); if (cap & PCI_EXP_DEVCAP_FLR) { trace_vfio_check_pcie_flr(vdev->vbasedev.name); @@ -2284,7 +2320,8 @@ static void vfio_check_pcie_flr(VFIOPCIDevice *vdev, uint8_t pos) static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos) { - uint16_t csr = pci_get_word(vdev->pdev.config + pos + PCI_PM_CTRL); + PCIDevice *pdev = PCI_DEVICE(vdev); + uint16_t csr = pci_get_word(pdev->config + pos + PCI_PM_CTRL); if (!(csr & PCI_PM_CTRL_NO_SOFT_RESET)) { trace_vfio_check_pm_reset(vdev->vbasedev.name); @@ -2294,7 +2331,8 @@ static void vfio_check_pm_reset(VFIOPCIDevice *vdev, uint8_t pos) static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos) { - uint8_t cap = pci_get_byte(vdev->pdev.config + pos + PCI_AF_CAP); + PCIDevice *pdev = PCI_DEVICE(vdev); + uint8_t cap = pci_get_byte(pdev->config + pos + PCI_AF_CAP); if ((cap & PCI_AF_CAP_TP) && (cap & PCI_AF_CAP_FLR)) { trace_vfio_check_af_flr(vdev->vbasedev.name); @@ -2305,7 +2343,7 @@ static void vfio_check_af_flr(VFIOPCIDevice *vdev, uint8_t pos) static bool vfio_add_vendor_specific_cap(VFIOPCIDevice *vdev, int pos, uint8_t size, Error **errp) { - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); pos = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, size, errp); if (pos < 0) { @@ -2327,7 +2365,7 @@ static bool vfio_add_vendor_specific_cap(VFIOPCIDevice *vdev, int pos, static bool vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp) { ERRP_GUARD(); - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); uint8_t cap_id, next, size; bool ret; @@ -2413,17 +2451,18 @@ static bool vfio_add_std_cap(VFIOPCIDevice *vdev, uint8_t pos, Error **errp) static int vfio_setup_rebar_ecap(VFIOPCIDevice *vdev, uint16_t pos) { + PCIDevice *pdev = PCI_DEVICE(vdev); uint32_t ctrl; int i, nbar; - ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL); + ctrl = pci_get_long(pdev->config + pos + PCI_REBAR_CTRL); nbar = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >> PCI_REBAR_CTRL_NBAR_SHIFT; for (i = 0; i < nbar; i++) { uint32_t cap; int size; - ctrl = pci_get_long(vdev->pdev.config + pos + PCI_REBAR_CTRL + (i * 8)); + ctrl = pci_get_long(pdev->config + pos + PCI_REBAR_CTRL + (i * 8)); size = (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT; /* The cap register reports sizes 1MB to 128TB, with 4 reserved bits */ @@ -2461,7 +2500,7 @@ static int vfio_setup_rebar_ecap(VFIOPCIDevice *vdev, uint16_t pos) static void vfio_add_ext_cap(VFIOPCIDevice *vdev) { - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); uint32_t header; uint16_t cap_id, next, size; uint8_t cap_ver; @@ -2555,7 +2594,7 @@ static void vfio_add_ext_cap(VFIOPCIDevice *vdev) bool vfio_pci_add_capabilities(VFIOPCIDevice *vdev, Error **errp) { - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); if (!(pdev->config[PCI_STATUS] & PCI_STATUS_CAP_LIST) || !pdev->config[PCI_CAPABILITY_LIST]) { @@ -2572,7 +2611,7 @@ bool vfio_pci_add_capabilities(VFIOPCIDevice *vdev, Error **errp) void vfio_pci_pre_reset(VFIOPCIDevice *vdev) { - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); uint16_t cmd; vfio_disable_interrupts(vdev); @@ -2768,8 +2807,8 @@ static const VMStateDescription vmstate_vfio_pci_config = { .version_id = 1, .minimum_version_id = 1, .fields = (const VMStateField[]) { - VMSTATE_PCI_DEVICE(pdev, VFIOPCIDevice), - VMSTATE_MSIX_TEST(pdev, VFIOPCIDevice, vfio_msix_present), + VMSTATE_PCI_DEVICE(parent_obj, VFIOPCIDevice), + VMSTATE_MSIX_TEST(parent_obj, VFIOPCIDevice, vfio_msix_present), VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription * const []) { @@ -2782,23 +2821,26 @@ static int vfio_pci_save_config(VFIODevice *vbasedev, QEMUFile *f, Error **errp) { VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); - return vmstate_save_state_with_err(f, &vmstate_vfio_pci_config, vdev, NULL, - errp); + return vmstate_save_state(f, &vmstate_vfio_pci_config, vdev, NULL, + errp); } static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) { VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev); - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); pcibus_t old_addr[PCI_NUM_REGIONS - 1]; int bar, ret; + Error *local_err = NULL; for (bar = 0; bar < PCI_ROM_SLOT; bar++) { old_addr[bar] = pdev->io_regions[bar].addr; } - ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1); + ret = vmstate_load_state(f, &vmstate_vfio_pci_config, vdev, 1, + &local_err); if (ret) { + error_report_err(local_err); return ret; } @@ -2826,6 +2868,29 @@ static int vfio_pci_load_config(VFIODevice *vbasedev, QEMUFile *f) return ret; } +/* Transform from VFIODevice to VFIOPCIDevice. Return NULL if fails. */ +VFIOPCIDevice *vfio_pci_from_vfio_device(VFIODevice *vbasedev) +{ + if (vbasedev && vbasedev->type == VFIO_DEVICE_TYPE_PCI) { + return container_of(vbasedev, VFIOPCIDevice, vbasedev); + } + return NULL; +} + +void vfio_sub_page_bar_update_mappings(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = PCI_DEVICE(vdev); + int page_size = qemu_real_host_page_size(); + int bar; + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + PCIIORegion *r = &pdev->io_regions[bar]; + if (r->addr != PCI_BAR_UNMAPPED && r->size > 0 && r->size < page_size) { + vfio_sub_page_bar_update_mapping(pdev, bar); + } + } +} + static VFIODeviceOps vfio_pci_ops = { .vfio_compute_needs_reset = vfio_pci_compute_needs_reset, .vfio_hot_reset_multi = vfio_pci_hot_reset_multi, @@ -2898,6 +2963,7 @@ bool vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp) bool vfio_pci_populate_device(VFIOPCIDevice *vdev, Error **errp) { + PCIDevice *pdev = PCI_DEVICE(vdev); VFIODevice *vbasedev = &vdev->vbasedev; struct vfio_region_info *reg_info = NULL; struct vfio_irq_info irq_info; @@ -2909,9 +2975,9 @@ bool vfio_pci_populate_device(VFIOPCIDevice *vdev, Error **errp) return false; } - if (vbasedev->num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { + if (vbasedev->num_initial_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) { error_setg(errp, "unexpected number of io regions %u", - vbasedev->num_regions); + vbasedev->num_initial_regions); return false; } @@ -2949,7 +3015,7 @@ bool vfio_pci_populate_device(VFIOPCIDevice *vdev, Error **errp) vdev->config_size = reg_info->size; if (vdev->config_size == PCI_CONFIG_SPACE_SIZE) { - vdev->pdev.cap_present &= ~QEMU_PCI_CAP_EXPRESS; + pdev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; } vdev->config_offset = reg_info->offset; @@ -2980,6 +3046,7 @@ void vfio_pci_put_device(VFIOPCIDevice *vdev) { vfio_display_finalize(vdev); vfio_bars_finalize(vdev); + vfio_cpr_pci_unregister_device(vdev); g_free(vdev->emulated_config_bits); g_free(vdev->rom); /* @@ -3150,18 +3217,28 @@ static void vfio_unregister_req_notifier(VFIOPCIDevice *vdev) vdev->req_enabled = false; } +void vfio_pci_config_register_vga(VFIOPCIDevice *vdev) +{ + PCIDevice *pdev = PCI_DEVICE(vdev); + assert(vdev->vga != NULL); + + pci_register_vga(pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem, + &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, + &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem); +} + bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp) { - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); VFIODevice *vbasedev = &vdev->vbasedev; uint32_t config_space_size; int ret; - config_space_size = MIN(pci_config_size(&vdev->pdev), vdev->config_size); + config_space_size = MIN(pci_config_size(pdev), vdev->config_size); /* Get a copy of config space */ ret = vfio_pci_config_space_read(vdev, 0, config_space_size, - vdev->pdev.config); + pdev->config); if (ret < (int)config_space_size) { ret = ret < 0 ? -ret : EFAULT; error_setg_errno(errp, ret, "failed to read device config space"); @@ -3246,10 +3323,10 @@ bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp) PCI_HEADER_TYPE_MULTI_FUNCTION; /* Restore or clear multifunction, this is always controlled by QEMU */ - if (vdev->pdev.cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { - vdev->pdev.config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; + if (pdev->cap_present & QEMU_PCI_CAP_MULTIFUNCTION) { + pdev->config[PCI_HEADER_TYPE] |= PCI_HEADER_TYPE_MULTI_FUNCTION; } else { - vdev->pdev.config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; + pdev->config[PCI_HEADER_TYPE] &= ~PCI_HEADER_TYPE_MULTI_FUNCTION; } /* @@ -3257,8 +3334,8 @@ bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp) * BAR, such as might be the case with the option ROM, we can get * confusing, unwritable, residual addresses from the host here. */ - memset(&vdev->pdev.config[PCI_BASE_ADDRESS_0], 0, 24); - memset(&vdev->pdev.config[PCI_ROM_ADDRESS], 0, 4); + memset(&pdev->config[PCI_BASE_ADDRESS_0], 0, 24); + memset(&pdev->config[PCI_ROM_ADDRESS], 0, 4); vfio_pci_size_rom(vdev); @@ -3271,9 +3348,7 @@ bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp) vfio_bars_register(vdev); if (vdev->vga && vfio_is_vga(vdev)) { - pci_register_vga(&vdev->pdev, &vdev->vga->region[QEMU_PCI_VGA_MEM].mem, - &vdev->vga->region[QEMU_PCI_VGA_IO_LO].mem, - &vdev->vga->region[QEMU_PCI_VGA_IO_HI].mem); + vfio_pci_config_register_vga(vdev); } return true; @@ -3281,7 +3356,7 @@ bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp) bool vfio_pci_interrupt_setup(VFIOPCIDevice *vdev, Error **errp) { - PCIDevice *pdev = &vdev->pdev; + PCIDevice *pdev = PCI_DEVICE(vdev); /* QEMU emulates all of MSI & MSIX */ if (pdev->cap_present & QEMU_PCI_CAP_MSIX) { @@ -3294,10 +3369,10 @@ bool vfio_pci_interrupt_setup(VFIOPCIDevice *vdev, Error **errp) vdev->msi_cap_size); } - if (vfio_pci_read_config(&vdev->pdev, PCI_INTERRUPT_PIN, 1)) { + if (vfio_pci_read_config(pdev, PCI_INTERRUPT_PIN, 1)) { vdev->intx.mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, vfio_intx_mmap_enable, vdev); - pci_device_set_intx_routing_notifier(&vdev->pdev, + pci_device_set_intx_routing_notifier(pdev, vfio_intx_routing_notifier); vdev->irqchip_change_notifier.notify = vfio_irqchip_change; kvm_irqchip_add_change_notifier(&vdev->irqchip_change_notifier); @@ -3309,7 +3384,7 @@ bool vfio_pci_interrupt_setup(VFIOPCIDevice *vdev, Error **errp) */ if (!cpr_is_incoming() && !vfio_intx_enable(vdev, errp)) { timer_free(vdev->intx.mmap_timer); - pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); + pci_device_set_intx_routing_notifier(pdev, NULL); kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); return false; } @@ -3320,7 +3395,7 @@ bool vfio_pci_interrupt_setup(VFIOPCIDevice *vdev, Error **errp) static void vfio_pci_realize(PCIDevice *pdev, Error **errp) { ERRP_GUARD(); - VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(pdev); VFIODevice *vbasedev = &vdev->vbasedev; int i; char uuid[UUID_STR_LEN]; @@ -3452,6 +3527,7 @@ static void vfio_pci_realize(PCIDevice *pdev, Error **errp) vfio_pci_register_err_notifier(vdev); vfio_pci_register_req_notifier(vdev); vfio_setup_resetfn_quirk(vdev); + vfio_cpr_pci_register_device(vdev); return; @@ -3459,7 +3535,7 @@ static void vfio_pci_realize(PCIDevice *pdev, Error **errp) if (vdev->interrupt == VFIO_INT_INTx) { vfio_intx_disable(vdev); } - pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); + pci_device_set_intx_routing_notifier(pdev, NULL); if (vdev->irqchip_change_notifier.notify) { kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); } @@ -3477,21 +3553,21 @@ static void vfio_pci_realize(PCIDevice *pdev, Error **errp) error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->name); } -static void vfio_instance_finalize(Object *obj) +static void vfio_pci_finalize(Object *obj) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(obj); vfio_pci_put_device(vdev); } static void vfio_exitfn(PCIDevice *pdev) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(pdev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(pdev); VFIODevice *vbasedev = &vdev->vbasedev; vfio_unregister_req_notifier(vdev); vfio_unregister_err_notifier(vdev); - pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); + pci_device_set_intx_routing_notifier(pdev, NULL); if (vdev->irqchip_change_notifier.notify) { kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); } @@ -3510,7 +3586,7 @@ static void vfio_exitfn(PCIDevice *pdev) static void vfio_pci_reset(DeviceState *dev) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(dev); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(dev); /* Do not reset the device during qemu_system_reset prior to cpr load */ if (cpr_is_incoming()) { @@ -3552,10 +3628,10 @@ static void vfio_pci_reset(DeviceState *dev) vfio_pci_post_reset(vdev); } -static void vfio_instance_init(Object *obj) +static void vfio_pci_init(Object *obj) { PCIDevice *pci_dev = PCI_DEVICE(obj); - VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(obj); VFIODevice *vbasedev = &vdev->vbasedev; device_add_bootindex_property(obj, &vdev->bootindex, @@ -3583,7 +3659,7 @@ static void vfio_instance_init(Object *obj) pci_dev->cap_present |= QEMU_PCI_SKIP_RESET_ON_CPR; } -static void vfio_pci_base_dev_class_init(ObjectClass *klass, const void *data) +static void vfio_pci_device_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); @@ -3595,12 +3671,12 @@ static void vfio_pci_base_dev_class_init(ObjectClass *klass, const void *data) pdc->config_write = vfio_pci_write_config; } -static const TypeInfo vfio_pci_base_dev_info = { - .name = TYPE_VFIO_PCI_BASE, +static const TypeInfo vfio_pci_device_info = { + .name = TYPE_VFIO_PCI_DEVICE, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(VFIOPCIDevice), .abstract = true, - .class_init = vfio_pci_base_dev_class_init, + .class_init = vfio_pci_device_class_init, .interfaces = (const InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, { INTERFACE_CONVENTIONAL_PCI_DEVICE }, @@ -3610,7 +3686,7 @@ static const TypeInfo vfio_pci_base_dev_info = { static PropertyInfo vfio_pci_migration_multifd_transfer_prop; -static const Property vfio_pci_dev_properties[] = { +static const Property vfio_pci_properties[] = { DEFINE_PROP_PCI_HOST_DEVADDR("host", VFIOPCIDevice, host), DEFINE_PROP_UUID_NODEFAULT("vf-token", VFIOPCIDevice, vf_token), DEFINE_PROP_STRING("sysfsdev", VFIOPCIDevice, vbasedev.sysfsdev), @@ -3685,18 +3761,18 @@ static const Property vfio_pci_dev_properties[] = { #ifdef CONFIG_IOMMUFD static void vfio_pci_set_fd(Object *obj, const char *str, Error **errp) { - VFIOPCIDevice *vdev = VFIO_PCI_BASE(obj); + VFIOPCIDevice *vdev = VFIO_PCI_DEVICE(obj); vfio_device_set_fd(&vdev->vbasedev, str, errp); } #endif -static void vfio_pci_dev_class_init(ObjectClass *klass, const void *data) +static void vfio_pci_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *pdc = PCI_DEVICE_CLASS(klass); device_class_set_legacy_reset(dc, vfio_pci_reset); - device_class_set_props(dc, vfio_pci_dev_properties); + device_class_set_props(dc, vfio_pci_properties); #ifdef CONFIG_IOMMUFD object_class_property_add_str(klass, "fd", NULL, vfio_pci_set_fd); #endif @@ -3839,26 +3915,28 @@ static void vfio_pci_dev_class_init(ObjectClass *klass, const void *data) "multifd channels"); } -static const TypeInfo vfio_pci_dev_info = { +static const TypeInfo vfio_pci_info = { .name = TYPE_VFIO_PCI, - .parent = TYPE_VFIO_PCI_BASE, - .class_init = vfio_pci_dev_class_init, - .instance_init = vfio_instance_init, - .instance_finalize = vfio_instance_finalize, + .parent = TYPE_VFIO_PCI_DEVICE, + .class_init = vfio_pci_class_init, + .instance_init = vfio_pci_init, + .instance_finalize = vfio_pci_finalize, }; -static const Property vfio_pci_dev_nohotplug_properties[] = { +static const Property vfio_pci_nohotplug_properties[] = { DEFINE_PROP_BOOL("ramfb", VFIOPCIDevice, enable_ramfb, false), + DEFINE_PROP_BOOL("use-legacy-x86-rom", VFIOPCIDevice, + use_legacy_x86_rom, false), DEFINE_PROP_ON_OFF_AUTO("x-ramfb-migrate", VFIOPCIDevice, ramfb_migrate, ON_OFF_AUTO_AUTO), }; -static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass, +static void vfio_pci_nohotplug_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - device_class_set_props(dc, vfio_pci_dev_nohotplug_properties); + device_class_set_props(dc, vfio_pci_nohotplug_properties); dc->hotpluggable = false; object_class_property_set_description(klass, /* 3.1 */ @@ -3869,13 +3947,16 @@ static void vfio_pci_nohotplug_dev_class_init(ObjectClass *klass, "x-ramfb-migrate", "Override default migration support for ramfb support " "(DEBUG)"); + object_class_property_set_description(klass, /* 10.1 */ + "use-legacy-x86-rom", + "Controls loading of a legacy VGA BIOS ROM"); } -static const TypeInfo vfio_pci_nohotplug_dev_info = { +static const TypeInfo vfio_pci_nohotplug_info = { .name = TYPE_VFIO_PCI_NOHOTPLUG, .parent = TYPE_VFIO_PCI, .instance_size = sizeof(VFIOPCIDevice), - .class_init = vfio_pci_nohotplug_dev_class_init, + .class_init = vfio_pci_nohotplug_class_init, }; static void register_vfio_pci_dev_type(void) @@ -3891,9 +3972,9 @@ static void register_vfio_pci_dev_type(void) vfio_pci_migration_multifd_transfer_prop = qdev_prop_on_off_auto; vfio_pci_migration_multifd_transfer_prop.realized_set_allowed = true; - type_register_static(&vfio_pci_base_dev_info); - type_register_static(&vfio_pci_dev_info); - type_register_static(&vfio_pci_nohotplug_dev_info); + type_register_static(&vfio_pci_device_info); + type_register_static(&vfio_pci_info); + type_register_static(&vfio_pci_nohotplug_info); } type_init(register_vfio_pci_dev_type) diff --git a/hw/vfio/pci.h b/hw/vfio/pci.h index 4aa64611171d3..0f78cf9cdbf18 100644 --- a/hw/vfio/pci.h +++ b/hw/vfio/pci.h @@ -14,6 +14,7 @@ #include "system/memory.h" #include "hw/pci/pci_device.h" +#include "hw/vfio/types.h" #include "hw/vfio/vfio-device.h" #include "hw/vfio/vfio-region.h" #include "qemu/event_notifier.h" @@ -119,19 +120,11 @@ typedef struct VFIOMSIXInfo { MemoryRegion *pba_region; } VFIOMSIXInfo; -/* - * TYPE_VFIO_PCI_BASE is an abstract type used to share code - * between VFIO implementations that use a kernel driver - * with those that use user sockets. - */ -#define TYPE_VFIO_PCI_BASE "vfio-pci-base" -OBJECT_DECLARE_SIMPLE_TYPE(VFIOPCIDevice, VFIO_PCI_BASE) - -#define TYPE_VFIO_PCI "vfio-pci" -/* TYPE_VFIO_PCI shares struct VFIOPCIDevice. */ +OBJECT_DECLARE_SIMPLE_TYPE(VFIOPCIDevice, VFIO_PCI_DEVICE) struct VFIOPCIDevice { - PCIDevice pdev; + PCIDevice parent_obj; + VFIODevice vbasedev; VFIOINTx intx; unsigned int config_size; @@ -189,12 +182,14 @@ struct VFIOPCIDevice { bool no_kvm_ioeventfd; bool no_vfio_ioeventfd; bool enable_ramfb; + bool use_legacy_x86_rom; OnOffAuto ramfb_migrate; bool defer_kvm_irq_routing; bool clear_parent_atomics_on_exit; bool skip_vsc_check; VFIODisplay *dpy; Notifier irqchip_change_notifier; + VFIOPCICPR cpr; }; /* Use uin32_t for vendor & device so PCI_ANY_ID expands and cannot match hw */ @@ -209,6 +204,11 @@ static inline bool vfio_is_vga(VFIOPCIDevice *vdev) return (vdev->class_code >> 8) == PCI_CLASS_DISPLAY_VGA; } +static inline bool vfio_is_base_display(VFIOPCIDevice *vdev) +{ + return (vdev->class_code >> 16) == PCI_BASE_CLASS_DISPLAY; +} + /* MSI/MSI-X/INTx */ void vfio_pci_vector_init(VFIOPCIDevice *vdev, int nr); void vfio_pci_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, @@ -216,8 +216,9 @@ void vfio_pci_add_kvm_msi_virq(VFIOPCIDevice *vdev, VFIOMSIVector *vector, void vfio_pci_prepare_kvm_msi_virq_batch(VFIOPCIDevice *vdev); void vfio_pci_commit_kvm_msi_virq_batch(VFIOPCIDevice *vdev); bool vfio_pci_intx_enable(VFIOPCIDevice *vdev, Error **errp); +void vfio_pci_intx_set_handler(VFIOPCIDevice *vdev, bool enable); void vfio_pci_msix_set_notifiers(VFIOPCIDevice *vdev); -void vfio_pci_msi_set_handler(VFIOPCIDevice *vdev, int nr); +void vfio_pci_msi_set_handler(VFIOPCIDevice *vdev, int nr, bool enable); uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len); void vfio_pci_write_config(PCIDevice *pdev, @@ -226,6 +227,19 @@ void vfio_pci_write_config(PCIDevice *pdev, uint64_t vfio_vga_read(void *opaque, hwaddr addr, unsigned size); void vfio_vga_write(void *opaque, hwaddr addr, uint64_t data, unsigned size); +/** + * vfio_pci_from_vfio_device: Transform from VFIODevice to + * VFIOPCIDevice + * + * This function checks if the given @vbasedev is a VFIO PCI device. + * If it is, it returns the containing VFIOPCIDevice. + * + * @vbasedev: The VFIODevice to transform + * + * Return: The VFIOPCIDevice on success, NULL on failure. + */ +VFIOPCIDevice *vfio_pci_from_vfio_device(VFIODevice *vbasedev); +void vfio_sub_page_bar_update_mappings(VFIOPCIDevice *vdev); bool vfio_opt_rom_in_denylist(VFIOPCIDevice *vdev); bool vfio_config_quirk_setup(VFIOPCIDevice *vdev, Error **errp); void vfio_vga_quirk_setup(VFIOPCIDevice *vdev); @@ -259,6 +273,7 @@ extern const VMStateDescription vfio_display_vmstate; void vfio_pci_bars_exit(VFIOPCIDevice *vdev); bool vfio_pci_add_capabilities(VFIOPCIDevice *vdev, Error **errp); +void vfio_pci_config_register_vga(VFIOPCIDevice *vdev); bool vfio_pci_config_setup(VFIOPCIDevice *vdev, Error **errp); bool vfio_pci_interrupt_setup(VFIOPCIDevice *vdev, Error **errp); void vfio_pci_intx_eoi(VFIODevice *vbasedev); diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c deleted file mode 100644 index 5c1795a26fe79..0000000000000 --- a/hw/vfio/platform.c +++ /dev/null @@ -1,716 +0,0 @@ -/* - * vfio based device assignment support - platform devices - * - * Copyright Linaro Limited, 2014 - * - * Authors: - * Kim Phillips - * Eric Auger - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Based on vfio based PCI device assignment support: - * Copyright Red Hat, Inc. 2012 - */ - -#include "qemu/osdep.h" -#include CONFIG_DEVICES /* CONFIG_IOMMUFD */ -#include "qapi/error.h" -#include -#include - -#include "hw/vfio/vfio-platform.h" -#include "system/iommufd.h" -#include "migration/vmstate.h" -#include "qemu/error-report.h" -#include "qemu/lockable.h" -#include "qemu/main-loop.h" -#include "qemu/module.h" -#include "qemu/range.h" -#include "system/memory.h" -#include "system/address-spaces.h" -#include "qemu/queue.h" -#include "hw/sysbus.h" -#include "trace.h" -#include "hw/irq.h" -#include "hw/platform-bus.h" -#include "hw/qdev-properties.h" -#include "system/kvm.h" -#include "hw/vfio/vfio-region.h" - -/* - * Functions used whatever the injection method - */ - -static inline bool vfio_irq_is_automasked(VFIOINTp *intp) -{ - return intp->flags & VFIO_IRQ_INFO_AUTOMASKED; -} - -/** - * vfio_init_intp - allocate, initialize the IRQ struct pointer - * and add it into the list of IRQs - * @vbasedev: the VFIO device handle - * @info: irq info struct retrieved from VFIO driver - * @errp: error object - */ -static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, - struct vfio_irq_info info, Error **errp) -{ - int ret; - VFIOPlatformDevice *vdev = - container_of(vbasedev, VFIOPlatformDevice, vbasedev); - SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev); - VFIOINTp *intp; - - intp = g_malloc0(sizeof(*intp)); - intp->vdev = vdev; - intp->pin = info.index; - intp->flags = info.flags; - intp->state = VFIO_IRQ_INACTIVE; - intp->kvm_accel = false; - - sysbus_init_irq(sbdev, &intp->qemuirq); - - /* Get an eventfd for trigger */ - intp->interrupt = g_new0(EventNotifier, 1); - ret = event_notifier_init(intp->interrupt, 0); - if (ret) { - g_free(intp->interrupt); - g_free(intp); - error_setg_errno(errp, -ret, - "failed to initialize trigger eventfd notifier"); - return NULL; - } - if (vfio_irq_is_automasked(intp)) { - /* Get an eventfd for resample/unmask */ - intp->unmask = g_new0(EventNotifier, 1); - ret = event_notifier_init(intp->unmask, 0); - if (ret) { - g_free(intp->interrupt); - g_free(intp->unmask); - g_free(intp); - error_setg_errno(errp, -ret, - "failed to initialize resample eventfd notifier"); - return NULL; - } - } - - QLIST_INSERT_HEAD(&vdev->intp_list, intp, next); - return intp; -} - -/** - * vfio_set_trigger_eventfd - set VFIO eventfd handling - * - * @intp: IRQ struct handle - * @handler: handler to be called on eventfd signaling - * - * Setup VFIO signaling and attach an optional user-side handler - * to the eventfd - */ -static int vfio_set_trigger_eventfd(VFIOINTp *intp, - eventfd_user_side_handler_t handler) -{ - VFIODevice *vbasedev = &intp->vdev->vbasedev; - int32_t fd = event_notifier_get_fd(intp->interrupt); - Error *err = NULL; - - qemu_set_fd_handler(fd, (IOHandler *)handler, NULL, intp); - - if (!vfio_device_irq_set_signaling(vbasedev, intp->pin, 0, - VFIO_IRQ_SET_ACTION_TRIGGER, fd, &err)) { - error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); - qemu_set_fd_handler(fd, NULL, NULL, NULL); - return -EINVAL; - } - - return 0; -} - -/* - * Functions only used when eventfds are handled on user-side - * ie. without irqfd - */ - -/** - * vfio_mmap_set_enabled - enable/disable the fast path mode - * @vdev: the VFIO platform device - * @enabled: the target mmap state - * - * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP); - * enabled = false ~ slow path = MMIO region is trapped and region callbacks - * are called; slow path enables to trap the device IRQ status register reset -*/ - -static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled) -{ - int i; - - for (i = 0; i < vdev->vbasedev.num_regions; i++) { - vfio_region_mmaps_set_enabled(vdev->regions[i], enabled); - } -} - -/** - * vfio_intp_mmap_enable - timer function, restores the fast path - * if there is no more active IRQ - * @opaque: actually points to the VFIO platform device - * - * Called on mmap timer timeout, this function checks whether the - * IRQ is still active and if not, restores the fast path. - * by construction a single eventfd is handled at a time. - * if the IRQ is still active, the timer is re-programmed. - */ -static void vfio_intp_mmap_enable(void *opaque) -{ - VFIOINTp *tmp; - VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque; - - QEMU_LOCK_GUARD(&vdev->intp_mutex); - QLIST_FOREACH(tmp, &vdev->intp_list, next) { - if (tmp->state == VFIO_IRQ_ACTIVE) { - trace_vfio_platform_intp_mmap_enable(tmp->pin); - /* re-program the timer to check active status later */ - timer_mod(vdev->mmap_timer, - qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + - vdev->mmap_timeout); - return; - } - } - vfio_mmap_set_enabled(vdev, true); -} - -/** - * vfio_intp_inject_pending_lockheld - Injects a pending IRQ - * @opaque: opaque pointer, in practice the VFIOINTp handle - * - * The function is called on a previous IRQ completion, from - * vfio_platform_eoi, while the intp_mutex is locked. - * Also in such situation, the slow path already is set and - * the mmap timer was already programmed. - */ -static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp) -{ - trace_vfio_platform_intp_inject_pending_lockheld(intp->pin, - event_notifier_get_fd(intp->interrupt)); - - intp->state = VFIO_IRQ_ACTIVE; - - /* trigger the virtual IRQ */ - qemu_set_irq(intp->qemuirq, 1); -} - -/** - * vfio_intp_interrupt - The user-side eventfd handler - * @opaque: opaque pointer which in practice is the VFIOINTp handle - * - * the function is entered in event handler context: - * the vIRQ is injected into the guest if there is no other active - * or pending IRQ. - */ -static void vfio_intp_interrupt(VFIOINTp *intp) -{ - int ret; - VFIOINTp *tmp; - VFIOPlatformDevice *vdev = intp->vdev; - bool delay_handling = false; - - QEMU_LOCK_GUARD(&vdev->intp_mutex); - if (intp->state == VFIO_IRQ_INACTIVE) { - QLIST_FOREACH(tmp, &vdev->intp_list, next) { - if (tmp->state == VFIO_IRQ_ACTIVE || - tmp->state == VFIO_IRQ_PENDING) { - delay_handling = true; - break; - } - } - } - if (delay_handling) { - /* - * the new IRQ gets a pending status and is pushed in - * the pending queue - */ - intp->state = VFIO_IRQ_PENDING; - trace_vfio_intp_interrupt_set_pending(intp->pin); - QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, - intp, pqnext); - event_notifier_test_and_clear(intp->interrupt); - return; - } - - trace_vfio_platform_intp_interrupt(intp->pin, - event_notifier_get_fd(intp->interrupt)); - - ret = event_notifier_test_and_clear(intp->interrupt); - if (!ret) { - error_report("Error when clearing fd=%d (ret = %d)", - event_notifier_get_fd(intp->interrupt), ret); - } - - intp->state = VFIO_IRQ_ACTIVE; - - /* sets slow path */ - vfio_mmap_set_enabled(vdev, false); - - /* trigger the virtual IRQ */ - qemu_set_irq(intp->qemuirq, 1); - - /* - * Schedule the mmap timer which will restore fastpath when no IRQ - * is active anymore - */ - if (vdev->mmap_timeout) { - timer_mod(vdev->mmap_timer, - qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + - vdev->mmap_timeout); - } -} - -/** - * vfio_platform_eoi - IRQ completion routine - * @vbasedev: the VFIO device handle - * - * De-asserts the active virtual IRQ and unmasks the physical IRQ - * (effective for level sensitive IRQ auto-masked by the VFIO driver). - * Then it handles next pending IRQ if any. - * eoi function is called on the first access to any MMIO region - * after an IRQ was triggered, trapped since slow path was set. - * It is assumed this access corresponds to the IRQ status - * register reset. With such a mechanism, a single IRQ can be - * handled at a time since there is no way to know which IRQ - * was completed by the guest (we would need additional details - * about the IRQ status register mask). - */ -static void vfio_platform_eoi(VFIODevice *vbasedev) -{ - VFIOINTp *intp; - VFIOPlatformDevice *vdev = - container_of(vbasedev, VFIOPlatformDevice, vbasedev); - - QEMU_LOCK_GUARD(&vdev->intp_mutex); - QLIST_FOREACH(intp, &vdev->intp_list, next) { - if (intp->state == VFIO_IRQ_ACTIVE) { - trace_vfio_platform_eoi(intp->pin, - event_notifier_get_fd(intp->interrupt)); - intp->state = VFIO_IRQ_INACTIVE; - - /* deassert the virtual IRQ */ - qemu_set_irq(intp->qemuirq, 0); - - if (vfio_irq_is_automasked(intp)) { - /* unmasks the physical level-sensitive IRQ */ - vfio_device_irq_unmask(vbasedev, intp->pin); - } - - /* a single IRQ can be active at a time */ - break; - } - } - /* in case there are pending IRQs, handle the first one */ - if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) { - intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue); - vfio_intp_inject_pending_lockheld(intp); - QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext); - } -} - -/** - * vfio_start_eventfd_injection - starts the virtual IRQ injection using - * user-side handled eventfds - * @sbdev: the sysbus device handle - * @irq: the qemu irq handle - */ - -static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq) -{ - VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); - VFIOINTp *intp; - - QLIST_FOREACH(intp, &vdev->intp_list, next) { - if (intp->qemuirq == irq) { - break; - } - } - assert(intp); - - if (vfio_set_trigger_eventfd(intp, vfio_intp_interrupt)) { - abort(); - } -} - -/* - * Functions used for irqfd - */ - -/** - * vfio_set_resample_eventfd - sets the resamplefd for an IRQ - * @intp: the IRQ struct handle - * programs the VFIO driver to unmask this IRQ when the - * intp->unmask eventfd is triggered - */ -static int vfio_set_resample_eventfd(VFIOINTp *intp) -{ - int32_t fd = event_notifier_get_fd(intp->unmask); - VFIODevice *vbasedev = &intp->vdev->vbasedev; - Error *err = NULL; - - qemu_set_fd_handler(fd, NULL, NULL, NULL); - if (!vfio_device_irq_set_signaling(vbasedev, intp->pin, 0, - VFIO_IRQ_SET_ACTION_UNMASK, fd, &err)) { - error_reportf_err(err, VFIO_MSG_PREFIX, vbasedev->name); - return -EINVAL; - } - return 0; -} - -/** - * vfio_start_irqfd_injection - starts the virtual IRQ injection using - * irqfd - * - * @sbdev: the sysbus device handle - * @irq: the qemu irq handle - * - * In case the irqfd setup fails, we fallback to userspace handled eventfd - */ -static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) -{ - VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); - VFIOINTp *intp; - - if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() || - !vdev->irqfd_allowed) { - goto fail_irqfd; - } - - QLIST_FOREACH(intp, &vdev->intp_list, next) { - if (intp->qemuirq == irq) { - break; - } - } - assert(intp); - - if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt, - intp->unmask, irq) < 0) { - goto fail_irqfd; - } - - if (vfio_set_trigger_eventfd(intp, NULL) < 0) { - goto fail_vfio; - } - if (vfio_irq_is_automasked(intp)) { - if (vfio_set_resample_eventfd(intp) < 0) { - goto fail_vfio; - } - trace_vfio_platform_start_level_irqfd_injection(intp->pin, - event_notifier_get_fd(intp->interrupt), - event_notifier_get_fd(intp->unmask)); - } else { - trace_vfio_platform_start_edge_irqfd_injection(intp->pin, - event_notifier_get_fd(intp->interrupt)); - } - - intp->kvm_accel = true; - - return; -fail_vfio: - kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq); - abort(); -fail_irqfd: - vfio_start_eventfd_injection(sbdev, irq); -} - -/* VFIO skeleton */ - -static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev) -{ - vbasedev->needs_reset = true; -} - -/* not implemented yet */ -static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev) -{ - return -1; -} - -/** - * vfio_populate_device - Allocate and populate MMIO region - * and IRQ structs according to driver returned information - * @vbasedev: the VFIO device handle - * @errp: error object - * - */ -static bool vfio_populate_device(VFIODevice *vbasedev, Error **errp) -{ - VFIOINTp *intp, *tmp; - int i, ret = -1; - VFIOPlatformDevice *vdev = - container_of(vbasedev, VFIOPlatformDevice, vbasedev); - - if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) { - error_setg(errp, "this isn't a platform device"); - return false; - } - - vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions); - - for (i = 0; i < vbasedev->num_regions; i++) { - char *name = g_strdup_printf("VFIO %s region %d\n", vbasedev->name, i); - - vdev->regions[i] = g_new0(VFIORegion, 1); - ret = vfio_region_setup(OBJECT(vdev), vbasedev, - vdev->regions[i], i, name); - g_free(name); - if (ret) { - error_setg_errno(errp, -ret, "failed to get region %d info", i); - goto reg_error; - } - } - - vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, - vfio_intp_mmap_enable, vdev); - - QSIMPLEQ_INIT(&vdev->pending_intp_queue); - - for (i = 0; i < vbasedev->num_irqs; i++) { - struct vfio_irq_info irq; - - ret = vfio_device_get_irq_info(vbasedev, i, &irq); - - if (ret) { - error_setg_errno(errp, -ret, "failed to get device irq info"); - goto irq_err; - } else { - trace_vfio_platform_populate_interrupts(irq.index, - irq.count, - irq.flags); - intp = vfio_init_intp(vbasedev, irq, errp); - if (!intp) { - goto irq_err; - } - } - } - return true; -irq_err: - timer_del(vdev->mmap_timer); - QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) { - QLIST_REMOVE(intp, next); - g_free(intp); - } -reg_error: - for (i = 0; i < vbasedev->num_regions; i++) { - if (vdev->regions[i]) { - vfio_region_finalize(vdev->regions[i]); - } - g_free(vdev->regions[i]); - } - g_free(vdev->regions); - return false; -} - -/* specialized functions for VFIO Platform devices */ -static VFIODeviceOps vfio_platform_ops = { - .vfio_compute_needs_reset = vfio_platform_compute_needs_reset, - .vfio_hot_reset_multi = vfio_platform_hot_reset_multi, - .vfio_eoi = vfio_platform_eoi, -}; - -/** - * vfio_base_device_init - perform preliminary VFIO setup - * @vbasedev: the VFIO device handle - * @errp: error object - * - * Implement the VFIO command sequence that allows to discover - * assigned device resources: group extraction, device - * fd retrieval, resource query. - * Precondition: the device name must be initialized - */ -static bool vfio_base_device_init(VFIODevice *vbasedev, Error **errp) -{ - /* @fd takes precedence over @sysfsdev which takes precedence over @host */ - if (vbasedev->fd < 0 && vbasedev->sysfsdev) { - vfio_device_free_name(vbasedev); - vbasedev->name = g_path_get_basename(vbasedev->sysfsdev); - } else if (vbasedev->fd < 0) { - if (!vbasedev->name || strchr(vbasedev->name, '/')) { - error_setg(errp, "wrong host device name"); - return false; - } - - vbasedev->sysfsdev = g_strdup_printf("/sys/bus/platform/devices/%s", - vbasedev->name); - } - - if (!vfio_device_get_name(vbasedev, errp)) { - return false; - } - - if (!vfio_device_attach(vbasedev->name, vbasedev, - &address_space_memory, errp)) { - return false; - } - - if (vfio_populate_device(vbasedev, errp)) { - return true; - } - - vfio_device_detach(vbasedev); - return false; -} - -/** - * vfio_platform_realize - the device realize function - * @dev: device state pointer - * @errp: error - * - * initialize the device, its memory regions and IRQ structures - * IRQ are started separately - */ -static void vfio_platform_realize(DeviceState *dev, Error **errp) -{ - ERRP_GUARD(); - VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); - SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); - VFIODevice *vbasedev = &vdev->vbasedev; - int i; - - warn_report("-device vfio-platform is deprecated"); - qemu_mutex_init(&vdev->intp_mutex); - - trace_vfio_platform_realize(vbasedev->sysfsdev ? - vbasedev->sysfsdev : vbasedev->name, - vdev->compat); - - if (!vfio_base_device_init(vbasedev, errp)) { - goto init_err; - } - - if (!vdev->compat) { - GError *gerr = NULL; - gchar *contents; - gsize length; - char *path; - - path = g_strdup_printf("%s/of_node/compatible", vbasedev->sysfsdev); - if (!g_file_get_contents(path, &contents, &length, &gerr)) { - error_setg(errp, "%s", gerr->message); - g_error_free(gerr); - g_free(path); - return; - } - g_free(path); - vdev->compat = contents; - for (vdev->num_compat = 0; length; vdev->num_compat++) { - size_t skip = strlen(contents) + 1; - contents += skip; - length -= skip; - } - } - - for (i = 0; i < vbasedev->num_regions; i++) { - if (vfio_region_mmap(vdev->regions[i])) { - warn_report("%s mmap unsupported, performance may be slow", - memory_region_name(vdev->regions[i]->mem)); - } - sysbus_init_mmio(sbdev, vdev->regions[i]->mem); - } - return; - -init_err: - if (vdev->vbasedev.name) { - error_prepend(errp, VFIO_MSG_PREFIX, vdev->vbasedev.name); - } else { - error_prepend(errp, "vfio error: "); - } -} - -static const VMStateDescription vfio_platform_vmstate = { - .name = "vfio-platform", - .unmigratable = 1, -}; - -static const Property vfio_platform_dev_properties[] = { - DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name), - DEFINE_PROP_STRING("sysfsdev", VFIOPlatformDevice, vbasedev.sysfsdev), - DEFINE_PROP_BOOL("x-no-mmap", VFIOPlatformDevice, vbasedev.no_mmap, false), - DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice, - mmap_timeout, 1100), - DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true), -#ifdef CONFIG_IOMMUFD - DEFINE_PROP_LINK("iommufd", VFIOPlatformDevice, vbasedev.iommufd, - TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *), -#endif -}; - -static void vfio_platform_instance_init(Object *obj) -{ - VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(obj); - VFIODevice *vbasedev = &vdev->vbasedev; - - vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PLATFORM, &vfio_platform_ops, - DEVICE(vdev), false); -} - -#ifdef CONFIG_IOMMUFD -static void vfio_platform_set_fd(Object *obj, const char *str, Error **errp) -{ - vfio_device_set_fd(&VFIO_PLATFORM_DEVICE(obj)->vbasedev, str, errp); -} -#endif - -static void vfio_platform_class_init(ObjectClass *klass, const void *data) -{ - DeviceClass *dc = DEVICE_CLASS(klass); - SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass); - - dc->realize = vfio_platform_realize; - device_class_set_props(dc, vfio_platform_dev_properties); -#ifdef CONFIG_IOMMUFD - object_class_property_add_str(klass, "fd", NULL, vfio_platform_set_fd); -#endif - dc->vmsd = &vfio_platform_vmstate; - dc->desc = "VFIO-based platform device assignment"; - sbc->connect_irq_notifier = vfio_start_irqfd_injection; - set_bit(DEVICE_CATEGORY_MISC, dc->categories); - - object_class_property_set_description(klass, /* 2.4 */ - "host", - "Host device name of assigned device"); - object_class_property_set_description(klass, /* 2.4 and 2.5 */ - "x-no-mmap", - "Disable MMAP for device. Allows to trace MMIO " - "accesses (DEBUG)"); - object_class_property_set_description(klass, /* 2.4 */ - "mmap-timeout-ms", - "When EOI is not provided by KVM/QEMU, wait time " - "(milliseconds) to re-enable device direct access " - "after level interrupt (DEBUG)"); - object_class_property_set_description(klass, /* 2.4 */ - "x-irqfd", - "Allow disabling irqfd support (DEBUG)"); - object_class_property_set_description(klass, /* 2.6 */ - "sysfsdev", - "Host sysfs path of assigned device"); -#ifdef CONFIG_IOMMUFD - object_class_property_set_description(klass, /* 9.0 */ - "iommufd", - "Set host IOMMUFD backend device"); -#endif -} - -static const TypeInfo vfio_platform_dev_info = { - .name = TYPE_VFIO_PLATFORM, - .parent = TYPE_DYNAMIC_SYS_BUS_DEVICE, - .instance_size = sizeof(VFIOPlatformDevice), - .instance_init = vfio_platform_instance_init, - .class_init = vfio_platform_class_init, - .class_size = sizeof(VFIOPlatformDeviceClass), -}; - -static void register_vfio_platform_dev_type(void) -{ - type_register_static(&vfio_platform_dev_info); -} - -type_init(register_vfio_platform_dev_type) diff --git a/hw/vfio/region.c b/hw/vfio/region.c index d04c57db630f3..b165ab0b9378c 100644 --- a/hw/vfio/region.c +++ b/hw/vfio/region.c @@ -365,12 +365,9 @@ void vfio_region_finalize(VFIORegion *region) for (i = 0; i < region->nr_mmaps; i++) { if (region->mmaps[i].mmap) { munmap(region->mmaps[i].mmap, region->mmaps[i].size); - object_unparent(OBJECT(®ion->mmaps[i].mem)); } } - object_unparent(OBJECT(region->mem)); - g_free(region->mem); g_free(region->mmaps); diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index 564b70ef97fb8..0f23681a3f936 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -15,9 +15,8 @@ #include "system/hostmem.h" #include "system/address-spaces.h" -#include "hw/vfio/vfio-container.h" +#include "hw/vfio/vfio-container-legacy.h" #include "hw/hw.h" -#include "system/ram_addr.h" #include "qemu/error-report.h" #include "qapi/error.h" #include "trace.h" @@ -30,12 +29,13 @@ typedef struct VFIOHostDMAWindow { QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next; } VFIOHostDMAWindow; -typedef struct VFIOSpaprContainer { - VFIOContainer container; +struct VFIOSpaprContainer { + VFIOLegacyContainer parent_obj; + MemoryListener prereg_listener; QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list; unsigned int levels; -} VFIOSpaprContainer; +}; OBJECT_DECLARE_SIMPLE_TYPE(VFIOSpaprContainer, VFIO_IOMMU_SPAPR); @@ -61,8 +61,8 @@ static void vfio_prereg_listener_region_add(MemoryListener *listener, { VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer, prereg_listener); - VFIOContainer *container = &scontainer->container; - VFIOContainerBase *bcontainer = &container->bcontainer; + VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(scontainer); + VFIOContainer *bcontainer = VFIO_IOMMU(container); const hwaddr gpa = section->offset_within_address_space; hwaddr end; int ret; @@ -121,7 +121,7 @@ static void vfio_prereg_listener_region_del(MemoryListener *listener, { VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer, prereg_listener); - VFIOContainer *container = &scontainer->container; + VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(scontainer); const hwaddr gpa = section->offset_within_address_space; hwaddr end; int ret; @@ -218,7 +218,7 @@ static VFIOHostDMAWindow *vfio_find_hostwin(VFIOSpaprContainer *container, return hostwin_found ? hostwin : NULL; } -static int vfio_spapr_remove_window(VFIOContainer *container, +static int vfio_spapr_remove_window(VFIOLegacyContainer *container, hwaddr offset_within_address_space) { struct vfio_iommu_spapr_tce_remove remove = { @@ -239,14 +239,13 @@ static int vfio_spapr_remove_window(VFIOContainer *container, return 0; } -static bool vfio_spapr_create_window(VFIOContainer *container, +static bool vfio_spapr_create_window(VFIOLegacyContainer *container, MemoryRegionSection *section, hwaddr *pgsize, Error **errp) { int ret = 0; - VFIOContainerBase *bcontainer = &container->bcontainer; - VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, - container); + VFIOContainer *bcontainer = VFIO_IOMMU(container); + VFIOSpaprContainer *scontainer = VFIO_IOMMU_SPAPR(bcontainer); IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr); uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask; unsigned entries, bits_total, bits_per_level, max_levels, ddw_levels; @@ -348,14 +347,12 @@ static bool vfio_spapr_create_window(VFIOContainer *container, } static bool -vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer, +vfio_spapr_container_add_section_window(VFIOContainer *bcontainer, MemoryRegionSection *section, Error **errp) { - VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); - VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, - container); + VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + VFIOSpaprContainer *scontainer = VFIO_IOMMU_SPAPR(container); VFIOHostDMAWindow *hostwin; hwaddr pgsize = 0; int ret; @@ -440,13 +437,11 @@ vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer, } static void -vfio_spapr_container_del_section_window(VFIOContainerBase *bcontainer, +vfio_spapr_container_del_section_window(VFIOContainer *bcontainer, MemoryRegionSection *section) { - VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); - VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, - container); + VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + VFIOSpaprContainer *scontainer = VFIO_IOMMU_SPAPR(container); if (container->iommu_type != VFIO_SPAPR_TCE_v2_IOMMU) { return; @@ -463,12 +458,10 @@ vfio_spapr_container_del_section_window(VFIOContainerBase *bcontainer, } } -static void vfio_spapr_container_release(VFIOContainerBase *bcontainer) +static void vfio_spapr_container_release(VFIOContainer *bcontainer) { - VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); - VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, - container); + VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + VFIOSpaprContainer *scontainer = VFIO_IOMMU_SPAPR(container); VFIOHostDMAWindow *hostwin, *next; if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { @@ -481,13 +474,11 @@ static void vfio_spapr_container_release(VFIOContainerBase *bcontainer) } } -static bool vfio_spapr_container_setup(VFIOContainerBase *bcontainer, +static bool vfio_spapr_container_setup(VFIOContainer *bcontainer, Error **errp) { - VFIOContainer *container = container_of(bcontainer, VFIOContainer, - bcontainer); - VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer, - container); + VFIOLegacyContainer *container = VFIO_IOMMU_LEGACY(bcontainer); + VFIOSpaprContainer *scontainer = VFIO_IOMMU_SPAPR(container); struct vfio_iommu_spapr_tce_info info; bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU; int ret, fd = container->fd; diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index fc6ed230d0c89..1e895448cd9b4 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -104,15 +104,14 @@ vfio_device_dirty_tracking_update(uint64_t start, uint64_t end, uint64_t min, ui vfio_device_dirty_tracking_start(int nr_ranges, uint64_t min32, uint64_t max32, uint64_t min64, uint64_t max64, uint64_t minpci, uint64_t maxpci) "nr_ranges %d 32:[0x%"PRIx64" - 0x%"PRIx64"], 64:[0x%"PRIx64" - 0x%"PRIx64"], pci64:[0x%"PRIx64" - 0x%"PRIx64"]" vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 -# container-base.c -vfio_container_query_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64 - # container.c +vfio_container_query_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t translated_addr, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" gpa=0x%"PRIx64" dirty_pages=%"PRIu64 + +# container-legacy.c vfio_container_disconnect(int fd) "close container->fd=%d" vfio_group_put(int fd) "close group->fd=%d" vfio_device_get(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u" vfio_device_put(int fd) "close vdev->fd=%d" -vfio_legacy_dma_unmap_overflow_workaround(void) "" # region.c vfio_region_write(const char *name, int index, uint64_t addr, uint64_t data, unsigned size) " (%s:region%d+0x%"PRIx64", 0x%"PRIx64 ", %d)" @@ -127,17 +126,6 @@ vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Re vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries" vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" -# platform.c -vfio_platform_realize(char *name, char *compat) "vfio device %s, compat = %s" -vfio_platform_eoi(int pin, int fd) "EOI IRQ pin %d (fd=%d)" -vfio_platform_intp_mmap_enable(int pin) "IRQ #%d still active, stay in slow path" -vfio_platform_intp_interrupt(int pin, int fd) "Inject IRQ #%d (fd = %d)" -vfio_platform_intp_inject_pending_lockheld(int pin, int fd) "Inject pending IRQ #%d (fd = %d)" -vfio_platform_populate_interrupts(int pin, int count, int flags) "- IRQ index %d: count %d, flags=0x%x" -vfio_intp_interrupt_set_pending(int index) "irq %d is set PENDING" -vfio_platform_start_level_irqfd_injection(int index, int fd, int resamplefd) "IRQ index=%d, fd = %d, resamplefd = %d" -vfio_platform_start_edge_irqfd_injection(int index, int fd) "IRQ index=%d, fd = %d" - # spapr.c vfio_prereg_listener_region_add_skip(uint64_t start, uint64_t end) "0x%"PRIx64" - 0x%"PRIx64 vfio_prereg_listener_region_del_skip(uint64_t start, uint64_t end) "0x%"PRIx64" - 0x%"PRIx64 diff --git a/hw/vfio/types.h b/hw/vfio/types.h new file mode 100644 index 0000000000000..5482d908088af --- /dev/null +++ b/hw/vfio/types.h @@ -0,0 +1,23 @@ +/* + * VFIO types definition + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef HW_VFIO_VFIO_TYPES_H +#define HW_VFIO_VFIO_TYPES_H + +/* + * TYPE_VFIO_PCI_DEVICE is an abstract type used to share code + * between VFIO implementations that use a kernel driver + * with those that use user sockets. + */ +#define TYPE_VFIO_PCI_DEVICE "vfio-pci-device" + +#define TYPE_VFIO_PCI "vfio-pci" +/* TYPE_VFIO_PCI shares struct VFIOPCIDevice. */ + +#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug" + +#endif /* HW_VFIO_VFIO_TYPES_H */ diff --git a/hw/vfio/vfio-iommufd.h b/hw/vfio/vfio-iommufd.h index 07ea0f4304964..6b28e1ff7bbb1 100644 --- a/hw/vfio/vfio-iommufd.h +++ b/hw/vfio/vfio-iommufd.h @@ -9,7 +9,7 @@ #ifndef HW_VFIO_VFIO_IOMMUFD_H #define HW_VFIO_VFIO_IOMMUFD_H -#include "hw/vfio/vfio-container-base.h" +#include "hw/vfio/vfio-container.h" typedef struct VFIODevice VFIODevice; @@ -22,12 +22,13 @@ typedef struct VFIOIOASHwpt { typedef struct IOMMUFDBackend IOMMUFDBackend; -typedef struct VFIOIOMMUFDContainer { - VFIOContainerBase bcontainer; +struct VFIOIOMMUFDContainer { + VFIOContainer parent_obj; + IOMMUFDBackend *be; uint32_t ioas_id; QLIST_HEAD(, VFIOIOASHwpt) hwpt_list; -} VFIOIOMMUFDContainer; +}; OBJECT_DECLARE_SIMPLE_TYPE(VFIOIOMMUFDContainer, VFIO_IOMMU_IOMMUFD); diff --git a/hw/vfio/vfio-listener.h b/hw/vfio/vfio-listener.h index eb69ddd374fa2..a90674ca96e8c 100644 --- a/hw/vfio/vfio-listener.h +++ b/hw/vfio/vfio-listener.h @@ -9,7 +9,7 @@ #ifndef HW_VFIO_VFIO_LISTENER_H #define HW_VFIO_VFIO_LISTENER_H -bool vfio_listener_register(VFIOContainerBase *bcontainer, Error **errp); -void vfio_listener_unregister(VFIOContainerBase *bcontainer); +bool vfio_listener_register(VFIOContainer *bcontainer, Error **errp); +void vfio_listener_unregister(VFIOContainer *bcontainer); #endif /* HW_VFIO_VFIO_LISTENER_H */ diff --git a/hw/vfio/vfio-migration-internal.h b/hw/vfio/vfio-migration-internal.h index 54141e27e6b2f..814fbd9ebaef3 100644 --- a/hw/vfio/vfio-migration-internal.h +++ b/hw/vfio/vfio-migration-internal.h @@ -13,7 +13,6 @@ #include #endif -#include "qemu/typedefs.h" #include "qemu/notify.h" /* diff --git a/include/hw/vfio/vfio-region.h b/hw/vfio/vfio-region.h similarity index 100% rename from include/hw/vfio/vfio-region.h rename to hw/vfio/vfio-region.h diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig index 7648a2d68da97..10f5c53ac0937 100644 --- a/hw/virtio/Kconfig +++ b/hw/virtio/Kconfig @@ -126,3 +126,8 @@ config VHOST_USER_SCMI bool default y depends on VIRTIO && VHOST_USER && ARM + +config VHOST_USER_TEST + bool + default y + depends on VIRTIO && VHOST_USER diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index 3ea7b3cec832f..affd66887db50 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -22,7 +22,7 @@ if have_vhost system_virtio_ss.add(files('vhost-user-base.c')) # MMIO Stubs - system_virtio_ss.add(files('vhost-user-device.c')) + system_virtio_ss.add(when: 'CONFIG_VHOST_USER_TEST', if_true: files('vhost-user-test-device.c')) system_virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c')) system_virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c')) system_virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c')) @@ -30,7 +30,8 @@ if have_vhost system_virtio_ss.add(when: 'CONFIG_VHOST_USER_INPUT', if_true: files('vhost-user-input.c')) # PCI Stubs - system_virtio_ss.add(when: 'CONFIG_VIRTIO_PCI', if_true: files('vhost-user-device-pci.c')) + system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_TEST'], + if_true: files('vhost-user-test-device-pci.c')) system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c')) system_virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_I2C'], @@ -56,7 +57,7 @@ specific_virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-bal specific_virtio_ss.add(when: 'CONFIG_VHOST_USER_FS', if_true: files('vhost-user-fs.c')) specific_virtio_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem.c')) specific_virtio_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock.c')) -specific_virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c')) +system_virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c')) system_virtio_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: files('virtio-nsm.c')) system_virtio_ss.add(when: 'CONFIG_VIRTIO_NSM', if_true: [files('cbor-helpers.c'), libcbor]) system_virtio_ss.add(when: 'CONFIG_VHOST_USER_SCMI', if_true: files('vhost-user-scmi.c')) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 76f0d458b29c0..658cc365e7010 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -75,7 +75,6 @@ virtqueue_flush(void *vq, unsigned int count) "vq %p count %u" virtqueue_pop(void *vq, void *elem, unsigned int in_num, unsigned int out_num) "vq %p elem %p in_num %u out_num %u" virtio_queue_notify(void *vdev, int n, void *vq) "vdev %p n %d vq %p" virtio_notify_irqfd_deferred_fn(void *vdev, void *vq) "vdev %p vq %p" -virtio_notify_irqfd(void *vdev, void *vq) "vdev %p vq %p" virtio_notify(void *vdev, void *vq) "vdev %p vq %p" virtio_set_status(void *vdev, uint8_t val) "vdev %p val %u" diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c index d1da40afc8016..4a7b970976165 100644 --- a/hw/virtio/vdpa-dev.c +++ b/hw/virtio/vdpa-dev.c @@ -338,6 +338,12 @@ static int vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status) return 0; } +static struct vhost_dev *vhost_vdpa_device_get_vhost(VirtIODevice *vdev) +{ + VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); + return &s->dev; +} + static const Property vhost_vdpa_device_properties[] = { DEFINE_PROP_STRING("vhostdev", VhostVdpaDevice, vhostdev), DEFINE_PROP_UINT16("queue-size", VhostVdpaDevice, queue_size, 0), @@ -369,6 +375,7 @@ static void vhost_vdpa_device_class_init(ObjectClass *klass, const void *data) vdc->set_config = vhost_vdpa_device_set_config; vdc->get_features = vhost_vdpa_device_get_features; vdc->set_status = vhost_vdpa_device_set_status; + vdc->get_vhost = vhost_vdpa_device_get_vhost; } static void vhost_vdpa_device_instance_init(Object *obj) diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c index 833804dd40f2e..4367db0d9511e 100644 --- a/hw/virtio/vhost-backend.c +++ b/hw/virtio/vhost-backend.c @@ -20,6 +20,11 @@ #include #include +struct vhost_features { + uint64_t count; + uint64_t features[VIRTIO_FEATURES_NU64S]; +}; + static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request, void *arg) { @@ -182,12 +187,6 @@ static int vhost_kernel_get_vring_worker(struct vhost_dev *dev, return vhost_kernel_call(dev, VHOST_GET_VRING_WORKER, worker); } -static int vhost_kernel_set_features(struct vhost_dev *dev, - uint64_t features) -{ - return vhost_kernel_call(dev, VHOST_SET_FEATURES, &features); -} - static int vhost_kernel_set_backend_cap(struct vhost_dev *dev) { uint64_t features; @@ -210,10 +209,51 @@ static int vhost_kernel_set_backend_cap(struct vhost_dev *dev) return 0; } -static int vhost_kernel_get_features(struct vhost_dev *dev, - uint64_t *features) +static int vhost_kernel_set_features(struct vhost_dev *dev, + const uint64_t *features) { - return vhost_kernel_call(dev, VHOST_GET_FEATURES, features); + struct vhost_features farray; + bool extended_in_use; + int r; + + farray.count = VIRTIO_FEATURES_NU64S; + virtio_features_copy(farray.features, features); + extended_in_use = virtio_features_use_ex(farray.features); + + /* + * Can't check for ENOTTY: for unknown ioctls the kernel interprets + * the argument as a virtio queue id and most likely errors out validating + * such id, instead of reporting an unknown operation. + */ + r = vhost_kernel_call(dev, VHOST_SET_FEATURES_ARRAY, &farray); + if (!r) { + return 0; + } + + if (extended_in_use) { + error_report("Trying to set extended features without kernel support"); + return -EINVAL; + } + return vhost_kernel_call(dev, VHOST_SET_FEATURES, &farray.features[0]); +} + +static int vhost_kernel_get_features(struct vhost_dev *dev, uint64_t *features) +{ + struct vhost_features farray; + int r; + + farray.count = VIRTIO_FEATURES_NU64S; + r = vhost_kernel_call(dev, VHOST_GET_FEATURES_ARRAY, &farray); + if (r) { + memset(&farray, 0, sizeof(farray)); + r = vhost_kernel_call(dev, VHOST_GET_FEATURES, &farray.features[0]); + } + if (r) { + return r; + } + + virtio_features_copy(features, farray.features); + return 0; } static int vhost_kernel_set_owner(struct vhost_dev *dev) @@ -341,8 +381,8 @@ const VhostOps kernel_ops = { .vhost_attach_vring_worker = vhost_kernel_attach_vring_worker, .vhost_new_worker = vhost_kernel_new_worker, .vhost_free_worker = vhost_kernel_free_worker, - .vhost_set_features = vhost_kernel_set_features, - .vhost_get_features = vhost_kernel_get_features, + .vhost_set_features_ex = vhost_kernel_set_features, + .vhost_get_features_ex = vhost_kernel_get_features, .vhost_set_backend_cap = vhost_kernel_set_backend_cap, .vhost_set_owner = vhost_kernel_set_owner, .vhost_get_vq_index = vhost_kernel_get_vq_index, diff --git a/hw/virtio/vhost-user-device-pci.c b/hw/virtio/vhost-user-test-device-pci.c similarity index 77% rename from hw/virtio/vhost-user-device-pci.c rename to hw/virtio/vhost-user-test-device-pci.c index f10bac874e784..b4ed0efb50f28 100644 --- a/hw/virtio/vhost-user-device-pci.c +++ b/hw/virtio/vhost-user-test-device-pci.c @@ -18,13 +18,13 @@ struct VHostUserDevicePCI { VHostUserBase vub; }; -#define TYPE_VHOST_USER_DEVICE_PCI "vhost-user-device-pci-base" +#define TYPE_VHOST_USER_TEST_DEVICE_PCI "vhost-user-test-device-pci-base" -OBJECT_DECLARE_SIMPLE_TYPE(VHostUserDevicePCI, VHOST_USER_DEVICE_PCI) +OBJECT_DECLARE_SIMPLE_TYPE(VHostUserDevicePCI, VHOST_USER_TEST_DEVICE_PCI) static void vhost_user_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { - VHostUserDevicePCI *dev = VHOST_USER_DEVICE_PCI(vpci_dev); + VHostUserDevicePCI *dev = VHOST_USER_TEST_DEVICE_PCI(vpci_dev); DeviceState *vdev = DEVICE(&dev->vub); vpci_dev->nvectors = 1; @@ -38,9 +38,6 @@ static void vhost_user_device_pci_class_init(ObjectClass *klass, VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); - /* Reason: stop users confusing themselves */ - dc->user_creatable = false; - k->realize = vhost_user_device_pci_realize; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; @@ -51,15 +48,15 @@ static void vhost_user_device_pci_class_init(ObjectClass *klass, static void vhost_user_device_pci_instance_init(Object *obj) { - VHostUserDevicePCI *dev = VHOST_USER_DEVICE_PCI(obj); + VHostUserDevicePCI *dev = VHOST_USER_TEST_DEVICE_PCI(obj); virtio_instance_init_common(obj, &dev->vub, sizeof(dev->vub), - TYPE_VHOST_USER_DEVICE); + TYPE_VHOST_USER_TEST_DEVICE); } static const VirtioPCIDeviceTypeInfo vhost_user_device_pci_info = { - .base_name = TYPE_VHOST_USER_DEVICE_PCI, - .non_transitional_name = "vhost-user-device-pci", + .base_name = TYPE_VHOST_USER_TEST_DEVICE_PCI, + .non_transitional_name = "vhost-user-test-device-pci", .instance_size = sizeof(VHostUserDevicePCI), .instance_init = vhost_user_device_pci_instance_init, .class_init = vhost_user_device_pci_class_init, diff --git a/hw/virtio/vhost-user-device.c b/hw/virtio/vhost-user-test-device.c similarity index 87% rename from hw/virtio/vhost-user-device.c rename to hw/virtio/vhost-user-test-device.c index 3939bdf755222..1b98ea3e4883d 100644 --- a/hw/virtio/vhost-user-device.c +++ b/hw/virtio/vhost-user-test-device.c @@ -1,5 +1,5 @@ /* - * Generic vhost-user-device implementation for any vhost-user-backend + * Generic vhost-user-test-device implementation for any vhost-user-backend * * This is a concrete implementation of vhost-user-base which can be * configured via properties. It is useful for development and @@ -25,7 +25,7 @@ */ static const VMStateDescription vud_vmstate = { - .name = "vhost-user-device", + .name = "vhost-user-test-device", .unmigratable = 1, }; @@ -41,16 +41,13 @@ static void vud_class_init(ObjectClass *klass, const void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - /* Reason: stop inexperienced users confusing themselves */ - dc->user_creatable = false; - device_class_set_props(dc, vud_properties); dc->vmsd = &vud_vmstate; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); } static const TypeInfo vud_info = { - .name = TYPE_VHOST_USER_DEVICE, + .name = TYPE_VHOST_USER_TEST_DEVICE, .parent = TYPE_VHOST_USER_BASE, .class_init = vud_class_init, }; diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 1e1d6b0d6e023..36c9c2e04d618 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -2039,7 +2039,10 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp) error_setg(errp, "%s: Failed to get ufd", __func__); return -EIO; } - qemu_socket_set_nonblock(ufd); + if (!qemu_set_blocking(ufd, false, errp)) { + close(ufd); + return -EINVAL; + } /* register ufd with userfault thread */ u->postcopy_fd.fd = ufd; diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c index 6e4088831fc50..107d88babea83 100644 --- a/hw/virtio/vhost-vsock.c +++ b/hw/virtio/vhost-vsock.c @@ -147,9 +147,7 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) return; } - if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { - error_setg_errno(errp, errno, - "vhost-vsock: unable to set non-blocking mode"); + if (!qemu_set_blocking(vhostfd, false, errp)) { return; } } else { @@ -160,9 +158,7 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) return; } - if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { - error_setg_errno(errp, errno, - "Failed to set FD nonblocking"); + if (!qemu_set_blocking(vhostfd, false, errp)) { return; } } diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index c30ea1156e811..266a11514a13e 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -27,6 +27,7 @@ #include "migration/blocker.h" #include "migration/qemu-file-types.h" #include "system/dma.h" +#include "system/memory.h" #include "trace.h" /* enabled until disconnected backend stabilizes */ @@ -455,7 +456,8 @@ static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, hwaddr *plen, bool is_write) { if (!vhost_dev_has_iommu(dev)) { - return cpu_physical_memory_map(addr, plen, is_write); + return address_space_map(dev->vdev->dma_as, addr, plen, is_write, + MEMTXATTRS_UNSPECIFIED); } else { return (void *)(uintptr_t)addr; } @@ -466,7 +468,8 @@ static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, hwaddr access_len) { if (!vhost_dev_has_iommu(dev)) { - cpu_physical_memory_unmap(buffer, len, is_write, access_len); + address_space_unmap(dev->vdev->dma_as, buffer, len, is_write, + access_len); } } @@ -972,20 +975,34 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev, static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log) { - uint64_t features = dev->acked_features; + uint64_t features[VIRTIO_FEATURES_NU64S]; int r; + + virtio_features_copy(features, dev->acked_features_ex); if (enable_log) { - features |= 0x1ULL << VHOST_F_LOG_ALL; + virtio_add_feature_ex(features, VHOST_F_LOG_ALL); } if (!vhost_dev_has_iommu(dev)) { - features &= ~(0x1ULL << VIRTIO_F_IOMMU_PLATFORM); + virtio_clear_feature_ex(features, VIRTIO_F_IOMMU_PLATFORM); } if (dev->vhost_ops->vhost_force_iommu) { if (dev->vhost_ops->vhost_force_iommu(dev) == true) { - features |= 0x1ULL << VIRTIO_F_IOMMU_PLATFORM; + virtio_add_feature_ex(features, VIRTIO_F_IOMMU_PLATFORM); } } - r = dev->vhost_ops->vhost_set_features(dev, features); + + if (virtio_features_use_ex(features) && + !dev->vhost_ops->vhost_set_features_ex) { + r = -EINVAL; + VHOST_OPS_DEBUG(r, "extended features without device support"); + goto out; + } + + if (dev->vhost_ops->vhost_set_features_ex) { + r = dev->vhost_ops->vhost_set_features_ex(dev, features); + } else { + r = dev->vhost_ops->vhost_set_features(dev, features[0]); + } if (r < 0) { VHOST_OPS_DEBUG(r, "vhost_set_features failed"); goto out; @@ -1110,7 +1127,8 @@ static bool vhost_log_global_start(MemoryListener *listener, Error **errp) r = vhost_migration_log(listener, true); if (r < 0) { - abort(); + error_setg_errno(errp, -r, "vhost: Failed to start logging"); + return false; } return true; } @@ -1121,7 +1139,8 @@ static void vhost_log_global_stop(MemoryListener *listener) r = vhost_migration_log(listener, false); if (r < 0) { - abort(); + /* Not fatal, so report it, but take no further action */ + warn_report("vhost: Failed to stop logging"); } } @@ -1506,12 +1525,27 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) } } +static int vhost_dev_get_features(struct vhost_dev *hdev, + uint64_t *features) +{ + uint64_t features64; + int r; + + if (hdev->vhost_ops->vhost_get_features_ex) { + return hdev->vhost_ops->vhost_get_features_ex(hdev, features); + } + + r = hdev->vhost_ops->vhost_get_features(hdev, &features64); + virtio_features_from_u64(features, features64); + return r; +} + int vhost_dev_init(struct vhost_dev *hdev, void *opaque, VhostBackendType backend_type, uint32_t busyloop_timeout, Error **errp) { + uint64_t features[VIRTIO_FEATURES_NU64S]; unsigned int used, reserved, limit; - uint64_t features; int i, r, n_initialized_vqs = 0; hdev->vdev = NULL; @@ -1531,7 +1565,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, goto fail; } - r = hdev->vhost_ops->vhost_get_features(hdev, &features); + r = vhost_dev_get_features(hdev, features); if (r < 0) { error_setg_errno(errp, -r, "vhost_get_features failed"); goto fail; @@ -1569,7 +1603,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, } } - hdev->features = features; + virtio_features_copy(hdev->features_ex, features); hdev->memory_listener = (MemoryListener) { .name = "vhost", @@ -1592,7 +1626,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, }; if (hdev->migration_blocker == NULL) { - if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) { + if (!virtio_has_feature_ex(hdev->features_ex, VHOST_F_LOG_ALL)) { error_setg(&hdev->migration_blocker, "Migration disabled: vhost lacks VHOST_F_LOG_ALL feature."); } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_alloc_check()) { @@ -1815,7 +1849,7 @@ void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask) int r; EventNotifier *notifier = &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; - EventNotifier *config_notifier = &vdev->config_notifier; + EventNotifier *config_notifier = virtio_config_get_guest_notifier(vdev); assert(hdev->vhost_ops); if ((hdev->started == false) || @@ -1846,39 +1880,40 @@ static void vhost_stop_config_intr(struct vhost_dev *dev) static void vhost_start_config_intr(struct vhost_dev *dev) { int r; + EventNotifier *config_notifier = + virtio_config_get_guest_notifier(dev->vdev); assert(dev->vhost_ops); - int fd = event_notifier_get_fd(&dev->vdev->config_notifier); + int fd = event_notifier_get_fd(config_notifier); if (dev->vhost_ops->vhost_set_config_call) { r = dev->vhost_ops->vhost_set_config_call(dev, fd); if (!r) { - event_notifier_set(&dev->vdev->config_notifier); + event_notifier_set(config_notifier); } } } -uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, - uint64_t features) +void vhost_get_features_ex(struct vhost_dev *hdev, + const int *feature_bits, + uint64_t *features) { const int *bit = feature_bits; + while (*bit != VHOST_INVALID_FEATURE_BIT) { - uint64_t bit_mask = (1ULL << *bit); - if (!(hdev->features & bit_mask)) { - features &= ~bit_mask; + if (!virtio_has_feature_ex(hdev->features_ex, *bit)) { + virtio_clear_feature_ex(features, *bit); } bit++; } - return features; } -void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, - uint64_t features) +void vhost_ack_features_ex(struct vhost_dev *hdev, const int *feature_bits, + const uint64_t *features) { const int *bit = feature_bits; while (*bit != VHOST_INVALID_FEATURE_BIT) { - uint64_t bit_mask = (1ULL << *bit); - if (features & bit_mask) { - hdev->acked_features |= bit_mask; + if (virtio_has_feature_ex(features, *bit)) { + virtio_add_feature_ex(hdev->acked_features_ex, *bit); } bit++; } @@ -2137,12 +2172,13 @@ static int do_vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, { int i; int rc = 0; + EventNotifier *config_notifier = virtio_config_get_guest_notifier(vdev); /* should only be called after backend is connected */ assert(hdev->vhost_ops); event_notifier_test_and_clear( &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); - event_notifier_test_and_clear(&vdev->config_notifier); + event_notifier_test_and_clear(config_notifier); event_notifier_cleanup( &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index db787d00b312e..02cdd807d77e2 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -23,6 +23,7 @@ #include "hw/qdev-properties.h" #include "hw/boards.h" #include "system/balloon.h" +#include "system/ramblock.h" #include "hw/virtio/virtio-balloon.h" #include "system/address-spaces.h" #include "qapi/error.h" diff --git a/hw/virtio/virtio-bus.c b/hw/virtio/virtio-bus.c index 11adfbf3abacd..cef944e01588f 100644 --- a/hw/virtio/virtio-bus.c +++ b/hw/virtio/virtio-bus.c @@ -62,9 +62,14 @@ void virtio_bus_device_plugged(VirtIODevice *vdev, Error **errp) } /* Get the features of the plugged device. */ - assert(vdc->get_features != NULL); - vdev->host_features = vdc->get_features(vdev, vdev->host_features, - &local_err); + if (vdc->get_features_ex) { + vdc->get_features_ex(vdev, vdev->host_features_ex, &local_err); + } else { + assert(vdc->get_features != NULL); + virtio_features_from_u64(vdev->host_features_ex, + vdc->get_features(vdev, vdev->host_features, + &local_err)); + } if (local_err) { error_propagate(errp, local_err); return; diff --git a/hw/virtio/virtio-hmp-cmds.c b/hw/virtio/virtio-hmp-cmds.c index 7d8677bcf0dc7..1daae482d3715 100644 --- a/hw/virtio/virtio-hmp-cmds.c +++ b/hw/virtio/virtio-hmp-cmds.c @@ -74,7 +74,8 @@ static void hmp_virtio_dump_features(Monitor *mon, } if (features->has_unknown_dev_features) { - monitor_printf(mon, " unknown-features(0x%016"PRIx64")\n", + monitor_printf(mon, " unknown-features(0x%016"PRIx64"%016"PRIx64")\n", + features->unknown_dev_features2, features->unknown_dev_features); } } diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index c46f6f9c3e2f8..ae7c13e33cf39 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -15,8 +15,10 @@ #include "qemu/cutils.h" #include "qemu/error-report.h" #include "qemu/units.h" +#include "qemu/target-info-qapi.h" #include "system/numa.h" #include "system/system.h" +#include "system/ramblock.h" #include "system/reset.h" #include "system/runstate.h" #include "hw/virtio/virtio.h" @@ -24,7 +26,6 @@ #include "hw/virtio/virtio-mem.h" #include "qapi/error.h" #include "qapi/visitor.h" -#include "system/ram_addr.h" #include "migration/misc.h" #include "hw/boards.h" #include "hw/qdev-properties.h" @@ -33,13 +34,21 @@ static const VMStateDescription vmstate_virtio_mem_device_early; -/* - * We only had legacy x86 guests that did not support - * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE. Other targets don't have legacy guests. - */ -#if defined(TARGET_X86_64) || defined(TARGET_I386) -#define VIRTIO_MEM_HAS_LEGACY_GUESTS -#endif +static bool virtio_mem_has_legacy_guests(void) +{ + /* + * We only had legacy x86 guests that did not support + * VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE. Other targets don't have + * legacy guests. + */ + switch (target_arch()) { + case SYS_EMU_TARGET_I386: + case SYS_EMU_TARGET_X86_64: + return true; + default: + return false; + } +} /* * Let's not allow blocks smaller than 1 MiB, for example, to keep the tracking @@ -143,7 +152,6 @@ static uint64_t virtio_mem_default_block_size(RAMBlock *rb) return MAX(page_size, VIRTIO_MEM_MIN_BLOCK_SIZE); } -#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS) static bool virtio_mem_has_shared_zeropage(RAMBlock *rb) { /* @@ -154,7 +162,6 @@ static bool virtio_mem_has_shared_zeropage(RAMBlock *rb) return !qemu_ram_is_shared(rb) && qemu_ram_get_fd(rb) < 0 && qemu_ram_pagesize(rb) == qemu_real_host_page_size(); } -#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */ /* * Size the usable region bigger than the requested size if possible. Esp. @@ -170,13 +177,20 @@ static bool virtio_mem_has_shared_zeropage(RAMBlock *rb) * necessary (as the section size can change). But it's more likely that the * section size will rather get smaller and not bigger over time. */ -#if defined(TARGET_X86_64) || defined(TARGET_I386) || defined(TARGET_S390X) -#define VIRTIO_MEM_USABLE_EXTENT (2 * (128 * MiB)) -#elif defined(TARGET_ARM) -#define VIRTIO_MEM_USABLE_EXTENT (2 * (512 * MiB)) -#else -#error VIRTIO_MEM_USABLE_EXTENT not defined -#endif +static uint64_t virtio_mem_usable_extent_size(void) +{ + switch (target_arch()) { + case SYS_EMU_TARGET_I386: + case SYS_EMU_TARGET_X86_64: + case SYS_EMU_TARGET_S390X: + return 2 * 128 * MiB; + case SYS_EMU_TARGET_AARCH64: + case SYS_EMU_TARGET_ARM: + return 2 * 512 * MiB; + default: + g_assert_not_reached(); + } +} static bool virtio_mem_is_busy(void) { @@ -699,7 +713,7 @@ static void virtio_mem_resize_usable_region(VirtIOMEM *vmem, bool can_shrink) { uint64_t newsize = MIN(memory_region_size(&vmem->memdev->mr), - requested_size + VIRTIO_MEM_USABLE_EXTENT); + requested_size + virtio_mem_usable_extent_size()); /* The usable region size always has to be multiples of the block size. */ newsize = QEMU_ALIGN_UP(newsize, vmem->block_size); @@ -975,28 +989,28 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) rb = vmem->memdev->mr.ram_block; page_size = qemu_ram_pagesize(rb); -#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS) - switch (vmem->unplugged_inaccessible) { - case ON_OFF_AUTO_AUTO: - if (virtio_mem_has_shared_zeropage(rb)) { - vmem->unplugged_inaccessible = ON_OFF_AUTO_OFF; - } else { - vmem->unplugged_inaccessible = ON_OFF_AUTO_ON; - } - break; - case ON_OFF_AUTO_OFF: - if (!virtio_mem_has_shared_zeropage(rb)) { - warn_report("'%s' property set to 'off' with a memdev that does" - " not support the shared zeropage.", - VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP); + if (virtio_mem_has_legacy_guests()) { + switch (vmem->unplugged_inaccessible) { + case ON_OFF_AUTO_AUTO: + if (virtio_mem_has_shared_zeropage(rb)) { + vmem->unplugged_inaccessible = ON_OFF_AUTO_OFF; + } else { + vmem->unplugged_inaccessible = ON_OFF_AUTO_ON; + } + break; + case ON_OFF_AUTO_OFF: + if (!virtio_mem_has_shared_zeropage(rb)) { + warn_report("'%s' property set to 'off' with a memdev that does" + " not support the shared zeropage.", + VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP); + } + break; + default: + break; } - break; - default: - break; + } else { + vmem->unplugged_inaccessible = ON_OFF_AUTO_ON; } -#else /* VIRTIO_MEM_HAS_LEGACY_GUESTS */ - vmem->unplugged_inaccessible = ON_OFF_AUTO_ON; -#endif /* VIRTIO_MEM_HAS_LEGACY_GUESTS */ if (vmem->dynamic_memslots && vmem->unplugged_inaccessible != ON_OFF_AUTO_ON) { @@ -1693,16 +1707,17 @@ static const Property virtio_mem_properties[] = { DEFINE_PROP_BOOL(VIRTIO_MEM_PREALLOC_PROP, VirtIOMEM, prealloc, false), DEFINE_PROP_LINK(VIRTIO_MEM_MEMDEV_PROP, VirtIOMEM, memdev, TYPE_MEMORY_BACKEND, HostMemoryBackend *), -#if defined(VIRTIO_MEM_HAS_LEGACY_GUESTS) - DEFINE_PROP_ON_OFF_AUTO(VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP, VirtIOMEM, - unplugged_inaccessible, ON_OFF_AUTO_ON), -#endif DEFINE_PROP_BOOL(VIRTIO_MEM_EARLY_MIGRATION_PROP, VirtIOMEM, early_migration, true), DEFINE_PROP_BOOL(VIRTIO_MEM_DYNAMIC_MEMSLOTS_PROP, VirtIOMEM, dynamic_memslots, false), }; +static const Property virtio_mem_legacy_guests_properties[] = { + DEFINE_PROP_ON_OFF_AUTO(VIRTIO_MEM_UNPLUGGED_INACCESSIBLE_PROP, VirtIOMEM, + unplugged_inaccessible, ON_OFF_AUTO_ON), +}; + static uint64_t virtio_mem_rdm_get_min_granularity(const RamDiscardManager *rdm, const MemoryRegion *mr) { @@ -1854,6 +1869,9 @@ static void virtio_mem_class_init(ObjectClass *klass, const void *data) RamDiscardManagerClass *rdmc = RAM_DISCARD_MANAGER_CLASS(klass); device_class_set_props(dc, virtio_mem_properties); + if (virtio_mem_has_legacy_guests()) { + device_class_set_props(dc, virtio_mem_legacy_guests_properties); + } dc->vmsd = &vmstate_virtio_mem; set_bit(DEVICE_CATEGORY_MISC, dc->categories); diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 532c67107ba1d..fb58c36452730 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -34,6 +34,7 @@ #include "qemu/error-report.h" #include "qemu/log.h" #include "trace.h" +#include "qapi/error.h" static bool virtio_mmio_ioeventfd_enabled(DeviceState *d) { @@ -612,14 +613,14 @@ static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); - vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL); + vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL, &error_fatal); } static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); - return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1); + return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1, &error_fatal); } static bool virtio_mmio_has_extra_state(DeviceState *opaque) diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 767216d795998..937e22f08a200 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -34,6 +34,7 @@ #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "hw/loader.h" +#include "system/accel-irq.h" #include "system/kvm.h" #include "hw/virtio/virtio-pci.h" #include "qemu/range.h" @@ -109,6 +110,29 @@ static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { } }; +static bool virtio_pci_modern_state_features128_needed(void *opaque) +{ + VirtIOPCIProxy *proxy = opaque; + uint32_t features = 0; + int i; + + for (i = 2; i < ARRAY_SIZE(proxy->guest_features); ++i) { + features |= proxy->guest_features[i]; + } + return features; +} + +static const VMStateDescription vmstate_virtio_pci_modern_state_features128 = { + .name = "virtio_pci/modern_state/features128", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_pci_modern_state_features128_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT32_SUB_ARRAY(guest_features, VirtIOPCIProxy, 2, 2), + VMSTATE_END_OF_LIST() + } +}; + static bool virtio_pci_modern_state_needed(void *opaque) { VirtIOPCIProxy *proxy = opaque; @@ -116,6 +140,12 @@ static bool virtio_pci_modern_state_needed(void *opaque) return virtio_pci_modern(proxy); } +/* + * Avoid silently breaking migration should the feature space increase + * even more in the (far away) future + */ +QEMU_BUILD_BUG_ON(VIRTIO_FEATURES_NU32S != 4); + static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { .name = "virtio_pci/modern_state", .version_id = 1, @@ -124,11 +154,15 @@ static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { .fields = (const VMStateField[]) { VMSTATE_UINT32(dfselect, VirtIOPCIProxy), VMSTATE_UINT32(gfselect, VirtIOPCIProxy), - VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), + VMSTATE_UINT32_SUB_ARRAY(guest_features, VirtIOPCIProxy, 0, 2), VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, vmstate_virtio_pci_modern_queue_state, VirtIOPCIQueue), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * const []) { + &vmstate_virtio_pci_modern_state_features128, + NULL } }; @@ -154,14 +188,14 @@ static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) { VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); - vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); + vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL, &error_fatal); } static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) { VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); - return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); + return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1, &error_fatal); } static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) @@ -825,11 +859,11 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, if (irqfd->users == 0) { KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); - ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); + ret = accel_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); if (ret < 0) { return ret; } - kvm_irqchip_commit_route_changes(&c); + accel_irqchip_commit_route_changes(&c); irqfd->virq = ret; } irqfd->users++; @@ -841,7 +875,7 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, { VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; if (--irqfd->users == 0) { - kvm_irqchip_release_virq(kvm_state, irqfd->virq); + accel_irqchip_release_virq(irqfd->virq); } } @@ -850,7 +884,7 @@ static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, unsigned int vector) { VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; - return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); + return accel_irqchip_add_irqfd_notifier_gsi(n, NULL, irqfd->virq); } static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, @@ -860,7 +894,7 @@ static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; int ret; - ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); + ret = accel_irqchip_remove_irqfd_notifier_gsi(n, irqfd->virq); assert(ret == 0); } static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no, @@ -995,12 +1029,12 @@ static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy, if (proxy->vector_irqfd) { irqfd = &proxy->vector_irqfd[vector]; if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { - ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, - &proxy->pci_dev); + ret = accel_irqchip_update_msi_route(irqfd->virq, msg, + &proxy->pci_dev); if (ret < 0) { return ret; } - kvm_irqchip_commit_routes(kvm_state); + accel_irqchip_commit_routes(); } } @@ -1229,7 +1263,7 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); int r, n; bool with_irqfd = msix_enabled(&proxy->pci_dev) && - kvm_msi_via_irqfd_enabled(); + accel_msi_via_irqfd_enabled() ; nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); @@ -1433,7 +1467,7 @@ static void virtio_pci_set_vector(VirtIODevice *vdev, uint16_t new_vector) { bool kvm_irqfd = (vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) && - msix_enabled(&proxy->pci_dev) && kvm_msi_via_irqfd_enabled(); + msix_enabled(&proxy->pci_dev) && accel_msi_via_irqfd_enabled(); if (new_vector == old_vector) { return; @@ -1477,6 +1511,19 @@ int virtio_pci_add_shm_cap(VirtIOPCIProxy *proxy, return virtio_pci_add_mem_cap(proxy, &cap.cap); } +static int virtio_pci_select_max(const VirtIODevice *vdev) +{ + int i; + + for (i = VIRTIO_FEATURES_NU64S - 1; i > 0; i--) { + if (vdev->host_features_ex[i]) { + return (i + 1) * 2; + } + } + + return 2; +} + static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, unsigned size) { @@ -1494,18 +1541,21 @@ static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, val = proxy->dfselect; break; case VIRTIO_PCI_COMMON_DF: - if (proxy->dfselect <= 1) { + if (proxy->dfselect < virtio_pci_select_max(vdev)) { VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); - val = (vdev->host_features & ~vdc->legacy_features) >> - (32 * proxy->dfselect); + val = vdev->host_features_ex[proxy->dfselect >> 1] >> + (32 * (proxy->dfselect & 1)); + if (proxy->dfselect <= 1) { + val &= (~vdc->legacy_features) >> (32 * proxy->dfselect); + } } break; case VIRTIO_PCI_COMMON_GFSELECT: val = proxy->gfselect; break; case VIRTIO_PCI_COMMON_GF: - if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { + if (proxy->gfselect < virtio_pci_select_max(vdev)) { val = proxy->guest_features[proxy->gfselect]; } break; @@ -1588,11 +1638,18 @@ static void virtio_pci_common_write(void *opaque, hwaddr addr, proxy->gfselect = val; break; case VIRTIO_PCI_COMMON_GF: - if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { + if (proxy->gfselect < virtio_pci_select_max(vdev)) { + uint64_t features[VIRTIO_FEATURES_NU64S]; + int i; + proxy->guest_features[proxy->gfselect] = val; - virtio_set_features(vdev, - (((uint64_t)proxy->guest_features[1]) << 32) | - proxy->guest_features[0]); + virtio_features_clear(features); + for (i = 0; i < ARRAY_SIZE(proxy->guest_features); ++i) { + uint64_t cur = proxy->guest_features[i]; + + features[i >> 1] |= cur << ((i & 1) * 32); + } + virtio_set_features_ex(vdev, features); } break; case VIRTIO_PCI_COMMON_MSIX: @@ -2311,6 +2368,8 @@ static void virtio_pci_reset(DeviceState *qdev) virtio_bus_reset(bus); msix_unuse_all_vectors(&proxy->pci_dev); + memset(proxy->guest_features, 0, sizeof(proxy->guest_features)); + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { proxy->vqs[i].enabled = 0; proxy->vqs[i].reset = 0; diff --git a/hw/virtio/virtio-qmp.c b/hw/virtio/virtio-qmp.c index 3b6377cf0d24c..b338344c6cca1 100644 --- a/hw/virtio/virtio-qmp.c +++ b/hw/virtio/virtio-qmp.c @@ -325,6 +325,20 @@ static const qmp_virtio_feature_map_t virtio_net_feature_map[] = { FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features " "negotiation supported"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO, \ + "VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO: Driver can receive GSO over " + "UDP tunnel packets"), + FEATURE_ENTRY(VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM, \ + "VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO: Driver can receive GSO over " + "UDP tunnel packets requiring checksum offload for the outer " + "header"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO, \ + "VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO: Device can receive GSO over " + "UDP tunnel packets"), + FEATURE_ENTRY(VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM, \ + "VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM: Device can receive GSO over " + "UDP tunnel packets requiring checksum offload for the outer " + "header"), { -1, "" } }; #endif @@ -510,6 +524,24 @@ static const qmp_virtio_feature_map_t virtio_gpio_feature_map[] = { list; \ }) +#define CONVERT_FEATURES_EX(type, map, bitmap) \ + ({ \ + type *list = NULL; \ + type *node; \ + for (i = 0; map[i].virtio_bit != -1; i++) { \ + bit = map[i].virtio_bit; \ + if (!virtio_has_feature_ex(bitmap, bit)) { \ + continue; \ + } \ + node = g_new0(type, 1); \ + node->value = g_strdup(map[i].feature_desc); \ + node->next = list; \ + list = node; \ + virtio_clear_feature_ex(bitmap, bit); \ + } \ + list; \ + }) + VirtioDeviceStatus *qmp_decode_status(uint8_t bitmap) { VirtioDeviceStatus *status; @@ -545,109 +577,112 @@ VhostDeviceProtocols *qmp_decode_protocols(uint64_t bitmap) return vhu_protocols; } -VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, uint64_t bitmap) +VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, + const uint64_t *bmap) { + uint64_t bitmap[VIRTIO_FEATURES_NU64S]; VirtioDeviceFeatures *features; uint64_t bit; int i; + virtio_features_copy(bitmap, bmap); features = g_new0(VirtioDeviceFeatures, 1); features->has_dev_features = true; /* transport features */ - features->transports = CONVERT_FEATURES(strList, virtio_transport_map, 0, - bitmap); + features->transports = CONVERT_FEATURES_EX(strList, virtio_transport_map, + bitmap); /* device features */ switch (device_id) { #ifdef CONFIG_VIRTIO_SERIAL case VIRTIO_ID_CONSOLE: features->dev_features = - CONVERT_FEATURES(strList, virtio_serial_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_serial_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_BLK case VIRTIO_ID_BLOCK: features->dev_features = - CONVERT_FEATURES(strList, virtio_blk_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_blk_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_GPU case VIRTIO_ID_GPU: features->dev_features = - CONVERT_FEATURES(strList, virtio_gpu_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_gpu_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_NET case VIRTIO_ID_NET: features->dev_features = - CONVERT_FEATURES(strList, virtio_net_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_net_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_SCSI case VIRTIO_ID_SCSI: features->dev_features = - CONVERT_FEATURES(strList, virtio_scsi_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_scsi_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_BALLOON case VIRTIO_ID_BALLOON: features->dev_features = - CONVERT_FEATURES(strList, virtio_balloon_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_balloon_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_IOMMU case VIRTIO_ID_IOMMU: features->dev_features = - CONVERT_FEATURES(strList, virtio_iommu_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_iommu_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_INPUT case VIRTIO_ID_INPUT: features->dev_features = - CONVERT_FEATURES(strList, virtio_input_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_input_feature_map, bitmap); break; #endif #ifdef CONFIG_VHOST_USER_FS case VIRTIO_ID_FS: features->dev_features = - CONVERT_FEATURES(strList, virtio_fs_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_fs_feature_map, bitmap); break; #endif #ifdef CONFIG_VHOST_VSOCK case VIRTIO_ID_VSOCK: features->dev_features = - CONVERT_FEATURES(strList, virtio_vsock_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_vsock_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_CRYPTO case VIRTIO_ID_CRYPTO: features->dev_features = - CONVERT_FEATURES(strList, virtio_crypto_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_crypto_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_MEM case VIRTIO_ID_MEM: features->dev_features = - CONVERT_FEATURES(strList, virtio_mem_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_mem_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_I2C_ADAPTER case VIRTIO_ID_I2C_ADAPTER: features->dev_features = - CONVERT_FEATURES(strList, virtio_i2c_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_i2c_feature_map, bitmap); break; #endif #ifdef CONFIG_VIRTIO_RNG case VIRTIO_ID_RNG: features->dev_features = - CONVERT_FEATURES(strList, virtio_rng_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_rng_feature_map, bitmap); break; #endif #ifdef CONFIG_VHOST_USER_GPIO case VIRTIO_ID_GPIO: features->dev_features = - CONVERT_FEATURES(strList, virtio_gpio_feature_map, 0, bitmap); + CONVERT_FEATURES_EX(strList, virtio_gpio_feature_map, bitmap); break; #endif /* No features */ @@ -680,10 +715,9 @@ VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, uint64_t bitmap) g_assert_not_reached(); } - features->has_unknown_dev_features = bitmap != 0; - if (features->has_unknown_dev_features) { - features->unknown_dev_features = bitmap; - } + features->has_unknown_dev_features = !virtio_features_empty(bitmap); + features->unknown_dev_features = bitmap[0]; + features->unknown_dev_features2 = bitmap[1]; return features; } @@ -743,11 +777,11 @@ VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp) status->device_id = vdev->device_id; status->vhost_started = vdev->vhost_started; status->guest_features = qmp_decode_features(vdev->device_id, - vdev->guest_features); + vdev->guest_features_ex); status->host_features = qmp_decode_features(vdev->device_id, - vdev->host_features); + vdev->host_features_ex); status->backend_features = qmp_decode_features(vdev->device_id, - vdev->backend_features); + vdev->backend_features_ex); switch (vdev->device_endian) { case VIRTIO_DEVICE_ENDIAN_LITTLE: @@ -785,11 +819,12 @@ VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp) status->vhost_dev->nvqs = hdev->nvqs; status->vhost_dev->vq_index = hdev->vq_index; status->vhost_dev->features = - qmp_decode_features(vdev->device_id, hdev->features); + qmp_decode_features(vdev->device_id, hdev->features_ex); status->vhost_dev->acked_features = - qmp_decode_features(vdev->device_id, hdev->acked_features); + qmp_decode_features(vdev->device_id, hdev->acked_features_ex); status->vhost_dev->backend_features = - qmp_decode_features(vdev->device_id, hdev->backend_features); + qmp_decode_features(vdev->device_id, hdev->backend_features_ex); + status->vhost_dev->protocol_features = qmp_decode_protocols(hdev->protocol_features); status->vhost_dev->max_queues = hdev->max_queues; diff --git a/hw/virtio/virtio-qmp.h b/hw/virtio/virtio-qmp.h index 245a446a5675f..e0a1e49035e4b 100644 --- a/hw/virtio/virtio-qmp.h +++ b/hw/virtio/virtio-qmp.h @@ -18,6 +18,7 @@ VirtIODevice *qmp_find_virtio_device(const char *path); VirtioDeviceStatus *qmp_decode_status(uint8_t bitmap); VhostDeviceProtocols *qmp_decode_protocols(uint64_t bitmap); -VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, uint64_t bitmap); +VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id, + const uint64_t *bitmap); #endif diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index 2ab1d20769495..153ee0a0cf1dd 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -31,6 +31,8 @@ #include "hw/qdev-properties.h" #include "hw/virtio/virtio-access.h" #include "system/dma.h" +#include "system/iothread.h" +#include "system/memory.h" #include "system/runstate.h" #include "virtio-qmp.h" @@ -256,7 +258,10 @@ void virtio_init_region_cache(VirtIODevice *vdev, int n) len = address_space_cache_init(&new->desc, vdev->dma_as, addr, size, packed); if (len < size) { - virtio_error(vdev, "Cannot map desc"); + virtio_error(vdev, + "Failed to map descriptor ring for device %s: " + "invalid guest physical address or corrupted queue setup", + qdev_get_printable_name(DEVICE(vdev))); goto err_desc; } @@ -264,7 +269,10 @@ void virtio_init_region_cache(VirtIODevice *vdev, int n) len = address_space_cache_init(&new->used, vdev->dma_as, vq->vring.used, size, true); if (len < size) { - virtio_error(vdev, "Cannot map used"); + virtio_error(vdev, + "Failed to map used ring for device %s: " + "possible guest misconfiguration or insufficient memory", + qdev_get_printable_name(DEVICE(vdev))); goto err_used; } @@ -272,7 +280,10 @@ void virtio_init_region_cache(VirtIODevice *vdev, int n) len = address_space_cache_init(&new->avail, vdev->dma_as, vq->vring.avail, size, false); if (len < size) { - virtio_error(vdev, "Cannot map avail"); + virtio_error(vdev, + "Failed to map avalaible ring for device %s: " + "possible queue misconfiguration or overlapping memory region", + qdev_get_printable_name(DEVICE(vdev))); goto err_avail; } @@ -938,18 +949,18 @@ static void virtqueue_packed_fill(VirtQueue *vq, const VirtQueueElement *elem, static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem, unsigned int len) { - unsigned int i, steps, max_steps; + unsigned int i, steps, max_steps, ndescs; i = vq->used_idx % vq->vring.num; steps = 0; /* - * We shouldn't need to increase 'i' by more than the distance - * between used_idx and last_avail_idx. + * We shouldn't need to increase 'i' by more than or equal to + * the distance between used_idx and last_avail_idx (max_steps). */ max_steps = (vq->last_avail_idx - vq->used_idx) % vq->vring.num; /* Search for element in vq->used_elems */ - while (steps <= max_steps) { + while (steps < max_steps) { /* Found element, set length and mark as filled */ if (vq->used_elems[i].index == elem->index) { vq->used_elems[i].len = len; @@ -957,8 +968,18 @@ static void virtqueue_ordered_fill(VirtQueue *vq, const VirtQueueElement *elem, break; } - i += vq->used_elems[i].ndescs; - steps += vq->used_elems[i].ndescs; + ndescs = vq->used_elems[i].ndescs; + + /* Defensive sanity check */ + if (unlikely(ndescs == 0 || ndescs > vq->vring.num)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: %s invalid ndescs %u at position %u\n", + __func__, vq->vdev->name, ndescs, i); + return; + } + + i += ndescs; + steps += ndescs; if (i >= vq->vring.num) { i -= vq->vring.num; @@ -1612,7 +1633,8 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg, * virtqueue_unmap_sg() can't be used). Assumes buffers weren't written to * yet. */ -static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num, +static void virtqueue_undo_map_desc(AddressSpace *as, + unsigned int out_num, unsigned int in_num, struct iovec *iov) { unsigned int i; @@ -1620,7 +1642,7 @@ static void virtqueue_undo_map_desc(unsigned int out_num, unsigned int in_num, for (i = 0; i < out_num + in_num; i++) { int is_write = i >= out_num; - cpu_physical_memory_unmap(iov->iov_base, iov->iov_len, is_write, 0); + address_space_unmap(as, iov->iov_base, iov->iov_len, is_write, 0); iov++; } } @@ -1822,7 +1844,7 @@ static void *virtqueue_split_pop(VirtQueue *vq, size_t sz) return elem; err_undo_map: - virtqueue_undo_map_desc(out_num, in_num, iov); + virtqueue_undo_map_desc(vdev->dma_as, out_num, in_num, iov); goto done; } @@ -1972,7 +1994,7 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz) return elem; err_undo_map: - virtqueue_undo_map_desc(out_num, in_num, iov); + virtqueue_undo_map_desc(vdev->dma_as, out_num, in_num, iov); goto done; } @@ -2644,16 +2666,8 @@ static void virtio_notify_irqfd_deferred_fn(void *opaque) event_notifier_set(notifier); } -void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) +static void virtio_irq(VirtQueue *vq) { - WITH_RCU_READ_LOCK_GUARD() { - if (!virtio_should_notify(vdev, vq)) { - return; - } - } - - trace_virtio_notify_irqfd(vdev, vq); - /* * virtio spec 1.0 says ISR bit 0 should be ignored with MSI, but * windows drivers included in virtio-win 1.8.0 (circa 2015) are @@ -2670,13 +2684,18 @@ void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq) * to an atomic operation. */ virtio_set_isr(vq->vdev, 0x1); - defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier); -} -static void virtio_irq(VirtQueue *vq) -{ - virtio_set_isr(vq->vdev, 0x1); - virtio_notify_vector(vq->vdev, vq->vector); + /* + * The interrupt code path requires the Big QEMU Lock (BQL), so use the + * notifier instead when in an IOThread. This assumes that device models + * have already called ->set_guest_notifiers() sometime before calling this + * function. + */ + if (qemu_in_iothread()) { + defer_call(virtio_notify_irqfd_deferred_fn, &vq->guest_notifier); + } else { + virtio_notify_vector(vq->vdev, vq->vector); + } } void virtio_notify(VirtIODevice *vdev, VirtQueue *vq) @@ -2698,7 +2717,12 @@ void virtio_notify_config(VirtIODevice *vdev) virtio_set_isr(vdev, 0x3); vdev->generation++; - virtio_notify_vector(vdev, vdev->config_vector); + + if (qemu_in_iothread()) { + defer_call(virtio_notify_irqfd_deferred_fn, &vdev->config_notifier); + } else { + virtio_notify_vector(vdev, vdev->config_vector); + } } static bool virtio_device_endian_needed(void *opaque) @@ -2954,6 +2978,30 @@ static const VMStateDescription vmstate_virtio_disabled = { } }; +static bool virtio_128bit_features_needed(void *opaque) +{ + VirtIODevice *vdev = opaque; + + return virtio_features_use_ex(vdev->host_features_ex); +} + +static const VMStateDescription vmstate_virtio_128bit_features = { + .name = "virtio/128bit_features", + .version_id = 1, + .minimum_version_id = 1, + .needed = &virtio_128bit_features_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT64(guest_features_ex[1], VirtIODevice), + VMSTATE_END_OF_LIST() + } +}; + +/* + * Avoid silently breaking migration should the feature space increase + * even more in the (far away) future + */ +QEMU_BUILD_BUG_ON(VIRTIO_FEATURES_NU64S != 2); + static const VMStateDescription vmstate_virtio = { .name = "virtio", .version_id = 1, @@ -2963,6 +3011,7 @@ static const VMStateDescription vmstate_virtio = { }, .subsections = (const VMStateDescription * const []) { &vmstate_virtio_device_endian, + &vmstate_virtio_128bit_features, &vmstate_virtio_64bit_features, &vmstate_virtio_virtqueues, &vmstate_virtio_ringsize, @@ -2982,6 +3031,7 @@ int virtio_save(VirtIODevice *vdev, QEMUFile *f) VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); uint32_t guest_features_lo = (vdev->guest_features & 0xffffffff); int i; + Error *local_err = NULL; if (k->save_config) { k->save_config(qbus->parent, f); @@ -3025,14 +3075,15 @@ int virtio_save(VirtIODevice *vdev, QEMUFile *f) } if (vdc->vmsd) { - int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL); + int ret = vmstate_save_state(f, vdc->vmsd, vdev, NULL, &local_err); if (ret) { + error_report_err(local_err); return ret; } } /* Subsections */ - return vmstate_save_state(f, &vmstate_virtio, vdev, NULL); + return vmstate_save_state(f, &vmstate_virtio, vdev, NULL, &error_fatal); } /* A wrapper for use as a VMState .put function */ @@ -3059,23 +3110,30 @@ const VMStateInfo virtio_vmstate_info = { .put = virtio_device_put, }; -static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) +static int virtio_set_features_nocheck(VirtIODevice *vdev, const uint64_t *val) { VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); - bool bad = (val & ~(vdev->host_features)) != 0; + uint64_t tmp[VIRTIO_FEATURES_NU64S]; + bool bad; + + bad = virtio_features_andnot(tmp, val, vdev->host_features_ex); + virtio_features_and(tmp, val, vdev->host_features_ex); - val &= vdev->host_features; - if (k->set_features) { - k->set_features(vdev, val); + if (k->set_features_ex) { + k->set_features_ex(vdev, val); + } else if (k->set_features) { + bad = bad || virtio_features_use_ex(tmp); + k->set_features(vdev, tmp[0]); } - vdev->guest_features = val; + + virtio_features_copy(vdev->guest_features_ex, tmp); return bad ? -1 : 0; } typedef struct VirtioSetFeaturesNocheckData { Coroutine *co; VirtIODevice *vdev; - uint64_t val; + uint64_t val[VIRTIO_FEATURES_NU64S]; int ret; } VirtioSetFeaturesNocheckData; @@ -3088,14 +3146,15 @@ static void virtio_set_features_nocheck_bh(void *opaque) } static int coroutine_mixed_fn -virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, uint64_t val) +virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, + const uint64_t *val) { if (qemu_in_coroutine()) { VirtioSetFeaturesNocheckData data = { .co = qemu_coroutine_self(), .vdev = vdev, - .val = val, }; + virtio_features_copy(data.val, val); aio_bh_schedule_oneshot(qemu_get_current_aio_context(), virtio_set_features_nocheck_bh, &data); qemu_coroutine_yield(); @@ -3106,6 +3165,14 @@ virtio_set_features_nocheck_maybe_co(VirtIODevice *vdev, uint64_t val) } int virtio_set_features(VirtIODevice *vdev, uint64_t val) +{ + uint64_t features[VIRTIO_FEATURES_NU64S]; + + virtio_features_from_u64(features, val); + return virtio_set_features_ex(vdev, features); +} + +int virtio_set_features_ex(VirtIODevice *vdev, const uint64_t *features) { int ret; /* @@ -3116,13 +3183,13 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val) return -EINVAL; } - if (val & (1ull << VIRTIO_F_BAD_FEATURE)) { + if (features[0] & (1ull << VIRTIO_F_BAD_FEATURE)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n", __func__, vdev->name); } - ret = virtio_set_features_nocheck(vdev, val); + ret = virtio_set_features_nocheck(vdev, features); if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) { /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */ int i; @@ -3145,6 +3212,7 @@ void virtio_reset(void *opaque) { VirtIODevice *vdev = opaque; VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + uint64_t features[VIRTIO_FEATURES_NU64S]; int i; virtio_set_status(vdev, 0); @@ -3171,7 +3239,8 @@ void virtio_reset(void *opaque) vdev->start_on_kick = false; vdev->started = false; vdev->broken = false; - virtio_set_features_nocheck(vdev, 0); + virtio_features_clear(features); + virtio_set_features_nocheck(vdev, features); vdev->queue_sel = 0; vdev->status = 0; vdev->disabled = false; @@ -3225,6 +3294,7 @@ virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + Error *local_err = NULL; /* * We poison the endianness to ensure it does not get used before @@ -3254,7 +3324,7 @@ virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) * Note: devices should always test host features in future - don't create * new dependencies like this. */ - vdev->guest_features = features; + virtio_features_from_u64(vdev->guest_features_ex, features); config_len = qemu_get_be32(f); @@ -3317,15 +3387,17 @@ virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) } if (vdc->vmsd) { - ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id); + ret = vmstate_load_state(f, vdc->vmsd, vdev, version_id, &local_err); if (ret) { + error_report_err(local_err); return ret; } } /* Subsections */ - ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1); + ret = vmstate_load_state(f, &vmstate_virtio, vdev, 1, &local_err); if (ret) { + error_report_err(local_err); return ret; } @@ -3333,26 +3405,17 @@ virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id) vdev->device_endian = virtio_default_endian(); } - if (virtio_64bit_features_needed(vdev)) { - /* - * Subsection load filled vdev->guest_features. Run them - * through virtio_set_features to sanity-check them against - * host_features. - */ - uint64_t features64 = vdev->guest_features; - if (virtio_set_features_nocheck_maybe_co(vdev, features64) < 0) { - error_report("Features 0x%" PRIx64 " unsupported. " - "Allowed features: 0x%" PRIx64, - features64, vdev->host_features); - return -1; - } - } else { - if (virtio_set_features_nocheck_maybe_co(vdev, features) < 0) { - error_report("Features 0x%x unsupported. " - "Allowed features: 0x%" PRIx64, - features, vdev->host_features); - return -1; - } + /* + * guest_features_ex is fully initialized with u32 features and upper + * bits have been filled as needed by the later load. + */ + if (virtio_set_features_nocheck_maybe_co(vdev, + vdev->guest_features_ex) < 0) { + error_report("Features 0x" VIRTIO_FEATURES_FMT " unsupported. " + "Allowed features: 0x" VIRTIO_FEATURES_FMT, + VIRTIO_FEATURES_PR(vdev->guest_features_ex), + VIRTIO_FEATURES_PR(vdev->host_features_ex)); + return -1; } if (!virtio_device_started(vdev, vdev->status) && diff --git a/hw/vmapple/vmapple.c b/hw/vmapple/vmapple.c index 16e6110b68f8e..1e4365f32c953 100644 --- a/hw/vmapple/vmapple.c +++ b/hw/vmapple/vmapple.c @@ -51,6 +51,8 @@ #include "system/reset.h" #include "system/runstate.h" #include "system/system.h" +#include "target/arm/gtimer.h" +#include "target/arm/cpu.h" struct VMAppleMachineState { MachineState parent; diff --git a/hw/watchdog/wdt_i6300esb.c b/hw/watchdog/wdt_i6300esb.c index bb8a2766b69b6..3aa01b8d68fe9 100644 --- a/hw/watchdog/wdt_i6300esb.c +++ b/hw/watchdog/wdt_i6300esb.c @@ -55,7 +55,7 @@ /* Config register bits */ #define ESB_WDT_REBOOT (0x01 << 5) /* Enable reboot on timeout */ #define ESB_WDT_FREQ (0x01 << 2) /* Decrement frequency */ -#define ESB_WDT_INTTYPE (0x11 << 0) /* Interrupt type on timer1 timeout */ +#define ESB_WDT_INTTYPE (0x03 << 0) /* Interrupt type on timer1 timeout */ /* Reload register bits */ #define ESB_WDT_RELOAD (0x01 << 8) /* prevent timeout */ diff --git a/hw/xen/xen-hvm-common.c b/hw/xen/xen-hvm-common.c index 78e0bc8f644ea..52e2cce397a42 100644 --- a/hw/xen/xen-hvm-common.c +++ b/hw/xen/xen-hvm-common.c @@ -12,6 +12,7 @@ #include "hw/xen/xen-bus.h" #include "hw/boards.h" #include "hw/xen/arch_hvm.h" +#include "system/memory.h" #include "system/runstate.h" #include "system/system.h" #include "system/xen.h" @@ -279,8 +280,8 @@ static void do_outp(uint32_t addr, * memory, as part of the implementation of an ioreq. * * Equivalent to - * cpu_physical_memory_rw(addr + (req->df ? -1 : +1) * req->size * i, - * val, req->size, 0/1) + * address_space_rw(as, addr + (req->df ? -1 : +1) * req->size * i, + * attrs, val, req->size, 0/1) * except without the integer overflow problems. */ static void rw_phys_req_item(hwaddr addr, @@ -295,7 +296,8 @@ static void rw_phys_req_item(hwaddr addr, } else { addr += offset; } - cpu_physical_memory_rw(addr, val, req->size, rw); + address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, + val, req->size, rw); } static inline void read_phys_req_item(hwaddr addr, diff --git a/hw/xen/xen_pt.c b/hw/xen/xen_pt.c index 9d16644d82e8c..006b5b55f249d 100644 --- a/hw/xen/xen_pt.c +++ b/hw/xen/xen_pt.c @@ -54,6 +54,7 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include #include "hw/pci/pci.h" diff --git a/hw/xen/xen_pt_msi.c b/hw/xen/xen_pt_msi.c index 09cca4eecb1c0..df15ccf0d030e 100644 --- a/hw/xen/xen_pt_msi.c +++ b/hw/xen/xen_pt_msi.c @@ -138,6 +138,7 @@ static int msi_msix_setup(XenPCIPassthroughState *s, rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN, ppirq, PCI_DEVFN(s->real_device.dev, s->real_device.func), + ((uint32_t)s->real_device.domain << 16) | s->real_device.bus, msix_entry, table_base); if (rc) { @@ -637,14 +638,5 @@ void xen_pt_msix_unmap(XenPCIPassthroughState *s) void xen_pt_msix_delete(XenPCIPassthroughState *s) { - XenPTMSIX *msix = s->msix; - - if (!msix) { - return; - } - - object_unparent(OBJECT(&msix->mmio)); - - g_free(s->msix); - s->msix = NULL; + g_clear_pointer(&s->msix, g_free); } diff --git a/hw/xtensa/xtfpga.c b/hw/xtensa/xtfpga.c index 6efffae466b8d..55de1a7a073c1 100644 --- a/hw/xtensa/xtfpga.c +++ b/hw/xtensa/xtfpga.c @@ -268,7 +268,7 @@ static void xtfpga_init(const XtfpgaBoardDesc *board, MachineState *machine) /* Need MMU initialized prior to ELF loading, * so that ELF gets loaded into virtual addresses */ - cpu_reset(CPU(cpu)); + reset_mmu(cenv); } if (smp_cpus > 1) { extints = xtensa_mx_pic_get_extints(mx_pic); diff --git a/include/accel/tcg/cpu-ldst-common.h b/include/accel/tcg/cpu-ldst-common.h index 8bf17c2fab0f7..17a3250dedaa4 100644 --- a/include/accel/tcg/cpu-ldst-common.h +++ b/include/accel/tcg/cpu-ldst-common.h @@ -100,9 +100,6 @@ GEN_ATOMIC_HELPER_ALL(umax_fetch) GEN_ATOMIC_HELPER_ALL(xchg) -#undef GEN_ATOMIC_HELPER_ALL -#undef GEN_ATOMIC_HELPER - Int128 cpu_atomic_cmpxchgo_le_mmu(CPUArchState *env, vaddr addr, Int128 cmpv, Int128 newv, MemOpIdx oi, uintptr_t retaddr); @@ -110,6 +107,16 @@ Int128 cpu_atomic_cmpxchgo_be_mmu(CPUArchState *env, vaddr addr, Int128 cmpv, Int128 newv, MemOpIdx oi, uintptr_t retaddr); +GEN_ATOMIC_HELPER(xchg, Int128, o_le) +GEN_ATOMIC_HELPER(xchg, Int128, o_be) +GEN_ATOMIC_HELPER(fetch_and, Int128, o_le) +GEN_ATOMIC_HELPER(fetch_and, Int128, o_be) +GEN_ATOMIC_HELPER(fetch_or, Int128, o_le) +GEN_ATOMIC_HELPER(fetch_or, Int128, o_be) + +#undef GEN_ATOMIC_HELPER_ALL +#undef GEN_ATOMIC_HELPER + uint8_t cpu_ldb_code_mmu(CPUArchState *env, vaddr addr, MemOpIdx oi, uintptr_t ra); uint16_t cpu_ldw_code_mmu(CPUArchState *env, vaddr addr, diff --git a/include/chardev/char-fd.h b/include/chardev/char-fd.h index 9de0e440de5b2..6fe43062ca954 100644 --- a/include/chardev/char-fd.h +++ b/include/chardev/char-fd.h @@ -41,7 +41,7 @@ typedef struct FDChardev FDChardev; DECLARE_INSTANCE_CHECKER(FDChardev, FD_CHARDEV, TYPE_CHARDEV_FD) -void qemu_chr_open_fd(Chardev *chr, int fd_in, int fd_out); +bool qemu_chr_open_fd(Chardev *chr, int fd_in, int fd_out, Error **errp); int qmp_chardev_open_file_source(char *src, int flags, Error **errp); #endif /* CHAR_FD_H */ diff --git a/include/crypto/hmac.h b/include/crypto/hmac.h index da8a1e3ceb9dd..af3d5f8feb212 100644 --- a/include/crypto/hmac.h +++ b/include/crypto/hmac.h @@ -90,6 +90,12 @@ G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoHmac, qcrypto_hmac_free) * The memory referenced in @result must be released with a call * to g_free() when no longer required by the caller. * + * If @result_len is set to a NULL pointer, no result will be returned, and + * the hmac object can be used for further invocations of qcrypto_hmac_bytes() + * or qcrypto_hmac_bytesv() until a non-NULL pointer is provided. This allows + * to build the hmac across memory regions that are not available at the same + * time. + * * Returns: * 0 on success, -1 on error */ @@ -123,6 +129,12 @@ int qcrypto_hmac_bytesv(QCryptoHmac *hmac, * The memory referenced in @result must be released with a call * to g_free() when no longer required by the caller. * + * If @result_len is set to a NULL pointer, no result will be returned, and + * the hmac object can be used for further invocations of qcrypto_hmac_bytes() + * or qcrypto_hmac_bytesv() until a non-NULL pointer is provided. This allows + * to build the hmac across memory regions that are not available at the same + * time. + * * Returns: * 0 on success, -1 on error */ diff --git a/include/crypto/tlssession.h b/include/crypto/tlssession.h index d77ae0d423709..2e9fe11cf6e20 100644 --- a/include/crypto/tlssession.h +++ b/include/crypto/tlssession.h @@ -110,6 +110,7 @@ typedef struct QCryptoTLSSession QCryptoTLSSession; #define QCRYPTO_TLS_SESSION_ERR_BLOCK -2 +#define QCRYPTO_TLS_SESSION_PREMATURE_TERMINATION -3 /** * qcrypto_tls_session_new: @@ -165,6 +166,20 @@ void qcrypto_tls_session_free(QCryptoTLSSession *sess); G_DEFINE_AUTOPTR_CLEANUP_FUNC(QCryptoTLSSession, qcrypto_tls_session_free) +/** + * qcrypto_tls_session_require_thread_safety: + * @sess: the TLS session object + * + * Mark that this TLS session will require thread safety + * for concurrent I/O in both directions. This must be + * called before the handshake is performed. + * + * This will activate a workaround for GNUTLS thread + * safety issues, where appropriate for the negotiated + * TLS session parameters. + */ +void qcrypto_tls_session_require_thread_safety(QCryptoTLSSession *sess); + /** * qcrypto_tls_session_check_credentials: * @sess: the TLS session object @@ -245,7 +260,6 @@ ssize_t qcrypto_tls_session_write(QCryptoTLSSession *sess, * @sess: the TLS session object * @buf: to fill with plain text received * @len: the length of @buf - * @gracefulTermination: treat premature termination as graceful EOF * @errp: pointer to hold returned error object * * Receive up to @len bytes of data from the remote peer @@ -253,22 +267,18 @@ ssize_t qcrypto_tls_session_write(QCryptoTLSSession *sess, * qcrypto_tls_session_set_callbacks(), decrypt it and * store it in @buf. * - * If @gracefulTermination is true, then a premature termination - * of the TLS session will be treated as indicating EOF, as - * opposed to an error. - * * It is an error to call this before * qcrypto_tls_session_handshake() returns * QCRYPTO_TLS_HANDSHAKE_COMPLETE * * Returns: the number of bytes received, * or QCRYPTO_TLS_SESSION_ERR_BLOCK if the receive would block, - * or -1 on error. + * or QCRYPTO_TLS_SESSION_PREMATURE_TERMINATION if a premature termination + * is detected, or -1 on error. */ ssize_t qcrypto_tls_session_read(QCryptoTLSSession *sess, char *buf, size_t len, - bool gracefulTermination, Error **errp); /** diff --git a/include/elf.h b/include/elf.h index e7259ec366fca..bbfac055de44e 100644 --- a/include/elf.h +++ b/include/elf.h @@ -56,6 +56,13 @@ typedef int64_t Elf64_Sxword; #define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32r6 code. */ #define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64r6 code. */ +/* MIPS Architectural Extensions. */ +#define EF_MIPS_ARCH_ASE 0x0f000000 + +#define EF_MIPS_ARCH_ASE_MICROMIPS 0x02000000 +#define EF_MIPS_ARCH_ASE_M16 0x04000000 +#define EF_MIPS_ARCH_ASE_MDMX 0x08000000 + /* The ABI of a file. */ #define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */ #define EF_MIPS_ABI_O64 0x00002000 /* O32 extended for 64 bit. */ diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 9b658a3f48f78..67e15c8e5078f 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -123,34 +123,22 @@ size_t qemu_ram_pagesize_largest(void); void cpu_address_space_init(CPUState *cpu, int asidx, const char *prefix, MemoryRegion *mr); /** - * cpu_address_space_destroy: - * @cpu: CPU for which address space needs to be destroyed - * @asidx: integer index of this address space + * cpu_destroy_address_spaces: + * @cpu: CPU for which address spaces need to be destroyed * - * Note that with KVM only one address space is supported. + * Destroy all address spaces associated with this CPU; this + * is called as part of unrealizing the CPU. */ -void cpu_address_space_destroy(CPUState *cpu, int asidx); +void cpu_destroy_address_spaces(CPUState *cpu); -void cpu_physical_memory_rw(hwaddr addr, void *buf, - hwaddr len, bool is_write); -static inline void cpu_physical_memory_read(hwaddr addr, - void *buf, hwaddr len) -{ - cpu_physical_memory_rw(addr, buf, len, false); -} -static inline void cpu_physical_memory_write(hwaddr addr, - const void *buf, hwaddr len) -{ - cpu_physical_memory_rw(addr, (void *)buf, len, true); -} +void cpu_physical_memory_read(hwaddr addr, void *buf, hwaddr len); +void cpu_physical_memory_write(hwaddr addr, const void *buf, hwaddr len); void *cpu_physical_memory_map(hwaddr addr, hwaddr *plen, bool is_write); void cpu_physical_memory_unmap(void *buffer, hwaddr len, bool is_write, hwaddr access_len); -bool cpu_physical_memory_is_io(hwaddr phys_addr); - /* Coalesced MMIO regions are areas where write operations can be reordered. * This usually implies that write operations are side-effect free. This allows * batching which can make a major impact on performance when using @@ -158,14 +146,9 @@ bool cpu_physical_memory_is_io(hwaddr phys_addr); */ void qemu_flush_coalesced_mmio_buffer(void); -void cpu_flush_icache_range(hwaddr start, hwaddr len); - typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque); int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); -int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length); -int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, - size_t length); /* Returns: 0 on success, -1 on error */ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, @@ -221,9 +204,9 @@ static inline bool cpu_loop_exit_requested(CPUState *cpu) G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu); G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); +G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); #endif /* CONFIG_TCG */ G_NORETURN void cpu_loop_exit(CPUState *cpu); -G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); /* accel/tcg/cpu-exec.c */ int cpu_exec(CPUState *cpu); diff --git a/include/exec/cputlb.h b/include/exec/cputlb.h index 03ed7e216590a..9bec0e78909aa 100644 --- a/include/exec/cputlb.h +++ b/include/exec/cputlb.h @@ -150,7 +150,7 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu); * MMU indexes. */ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap); + MMUIdxMap idxmap); /** * tlb_flush_page_by_mmuidx_all_cpus_synced: @@ -165,7 +165,7 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, * translations using the flushed TLBs. */ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap); + MMUIdxMap idxmap); /** * tlb_flush_by_mmuidx: @@ -176,7 +176,7 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, * Flush all entries from the TLB of the specified CPU, for the specified * MMU indexes. */ -void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); +void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap); /** * tlb_flush_by_mmuidx_all_cpus_synced: @@ -189,7 +189,7 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); * When this function returns, no CPUs will subsequently perform * translations using the flushed TLBs. */ -void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); +void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, MMUIdxMap idxmap); /** * tlb_flush_page_bits_by_mmuidx @@ -201,11 +201,11 @@ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); * Similar to tlb_flush_page_mask, but with a bitmap of indexes. */ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits); + MMUIdxMap idxmap, unsigned bits); /* Similarly, with broadcast and syncing. */ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap, + MMUIdxMap idxmap, unsigned bits); /** @@ -220,14 +220,14 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, * comparing only the low @bits worth of each virtual page. */ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, + vaddr len, MMUIdxMap idxmap, unsigned bits); /* Similarly, with broadcast and syncing. */ void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, vaddr len, - uint16_t idxmap, + MMUIdxMap idxmap, unsigned bits); #else static inline void tlb_flush_page(CPUState *cpu, vaddr addr) @@ -243,42 +243,42 @@ static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) { } static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, - vaddr addr, uint16_t idxmap) + vaddr addr, MMUIdxMap idxmap) { } -static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) +static inline void tlb_flush_by_mmuidx(CPUState *cpu, MMUIdxMap idxmap) { } static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap) + MMUIdxMap idxmap) { } static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, - uint16_t idxmap) + MMUIdxMap idxmap) { } static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, - uint16_t idxmap, + MMUIdxMap idxmap, unsigned bits) { } static inline void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits) + MMUIdxMap idxmap, unsigned bits) { } static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, + vaddr len, MMUIdxMap idxmap, unsigned bits) { } static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, vaddr len, - uint16_t idxmap, + MMUIdxMap idxmap, unsigned bits) { } diff --git a/include/exec/gdbstub.h b/include/exec/gdbstub.h index a16c0051ce0a9..bd7182c4d324f 100644 --- a/include/exec/gdbstub.h +++ b/include/exec/gdbstub.h @@ -55,9 +55,6 @@ void gdb_unregister_coprocessor_all(CPUState *cpu); * system emulation you can use a full chardev spec for your gdbserver * port. * - * The error handle should be either &error_fatal (for start-up) or - * &error_warn (for QMP/HMP initiated sessions). - * * Returns true when server successfully started. */ bool gdbserver_start(const char *port_or_device, Error **errp); diff --git a/include/exec/memopidx.h b/include/exec/memopidx.h index eb7f1591a3764..66d9c58b3afcd 100644 --- a/include/exec/memopidx.h +++ b/include/exec/memopidx.h @@ -25,9 +25,10 @@ typedef uint32_t MemOpIdx; static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx) { #ifdef CONFIG_DEBUG_TCG - assert(idx <= 15); + assert(idx <= 31); + assert(clz32(op) >= 5); #endif - return (op << 4) | idx; + return (op << 5) | idx; } /** @@ -38,7 +39,7 @@ static inline MemOpIdx make_memop_idx(MemOp op, unsigned idx) */ static inline MemOp get_memop(MemOpIdx oi) { - return oi >> 4; + return oi >> 5; } /** @@ -49,7 +50,7 @@ static inline MemOp get_memop(MemOpIdx oi) */ static inline unsigned get_mmuidx(MemOpIdx oi) { - return oi & 15; + return oi & 31; } #endif diff --git a/include/exec/page-protection.h b/include/exec/page-protection.h index c43231af8b543..c50ce57d150f3 100644 --- a/include/exec/page-protection.h +++ b/include/exec/page-protection.h @@ -23,19 +23,20 @@ * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */ #define PAGE_WRITE_INV 0x0020 -/* For use with page_set_flags: page is being replaced; target_data cleared. */ -#define PAGE_RESET 0x0040 +/* + * For linux-user, indicates that the page is mapped with the same semantics + * in both guest and host. + */ +#define PAGE_PASSTHROUGH 0x40 /* For linux-user, indicates that the page is MAP_ANON. */ #define PAGE_ANON 0x0080 - +/* + * For linux-user, indicates that the page should not be + * included in a core dump. + */ +#define PAGE_DONTDUMP 0x0100 /* Target-specific bits that will be used via page_get_flags(). */ #define PAGE_TARGET_1 0x0200 #define PAGE_TARGET_2 0x0400 -/* - * For linux-user, indicates that the page is mapped with the same semantics - * in both guest and host. - */ -#define PAGE_PASSTHROUGH 0x0800 - #endif diff --git a/include/exec/target_page.h b/include/exec/target_page.h index ca0ebbc8bbd2c..813591c9b51cf 100644 --- a/include/exec/target_page.h +++ b/include/exec/target_page.h @@ -62,6 +62,15 @@ static inline int qemu_target_page_bits(void) return TARGET_PAGE_BITS; } -size_t qemu_target_pages_to_MiB(size_t pages); +/* Convert target pages to MiB (2**20). */ +static inline size_t qemu_target_pages_to_MiB(size_t pages) +{ + int page_bits = TARGET_PAGE_BITS; + + /* So far, the largest (non-huge) page size is 64k, i.e. 16 bits. */ + g_assert(page_bits < 20); + + return pages >> (20 - page_bits); +} #endif diff --git a/include/exec/tb-flush.h b/include/exec/tb-flush.h index 142c240d94c31..e971d4ba6d69b 100644 --- a/include/exec/tb-flush.h +++ b/include/exec/tb-flush.h @@ -9,19 +9,29 @@ #define _TB_FLUSH_H_ /** - * tb_flush() - flush all translation blocks - * @cs: CPUState (must be valid, but treated as anonymous pointer) + * tb_flush__exclusive_or_serial() * - * Used to flush all the translation blocks in the system. Sometimes - * it is simpler to flush everything than work out which individual - * translations are now invalid and ensure they are not called - * anymore. + * Used to flush all the translation blocks in the system. Mostly this is + * used to empty the code generation buffer after it is full. Sometimes it + * is used when it is simpler to flush everything than work out which + * individual translations are now invalid. * - * tb_flush() takes care of running the flush in an exclusive context - * if it is not already running in one. This means no guest code will - * run until this complete. + * Must be called from an exclusive or serial context, e.g. start_exclusive, + * vm_stop, or when there is only one vcpu. Note that start_exclusive cannot + * be called from within the cpu run loop, so this cannot be called from + * within target code. */ -void tb_flush(CPUState *cs); +void tb_flush__exclusive_or_serial(void); + +/** + * queue_tb_flush() - add flush to the cpu work queue + * @cs: CPUState + * + * Flush all translation blocks the next time @cs processes the work queue. + * This should generally be followed by cpu_loop_exit(), so that the work + * queue is processed promptly. + */ +void queue_tb_flush(CPUState *cs); void tcg_flush_jmp_cache(CPUState *cs); diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h index 68d9d15f50aa7..8294f8f0ccca3 100644 --- a/include/hw/acpi/acpi_dev_interface.h +++ b/include/hw/acpi/acpi_dev_interface.h @@ -13,6 +13,7 @@ typedef enum { ACPI_NVDIMM_HOTPLUG_STATUS = 16, ACPI_VMGENID_CHANGE_STATUS = 32, ACPI_POWER_DOWN_STATUS = 64, + ACPI_GENERIC_ERROR = 128, } AcpiEventStatusBits; #define TYPE_ACPI_DEVICE_IF "acpi-device-interface" diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h index c18f68134246a..f38e12971932c 100644 --- a/include/hw/acpi/aml-build.h +++ b/include/hw/acpi/aml-build.h @@ -252,6 +252,7 @@ struct CrsRangeSet { /* Consumer/Producer */ #define AML_SERIAL_BUS_FLAG_CONSUME_ONLY (1 << 1) +#define ACPI_APEI_ERROR_DEVICE "GEDD" /** * init_aml_allocator: * @@ -382,6 +383,7 @@ Aml *aml_dma(AmlDmaType typ, AmlDmaBusMaster bm, AmlTransferSize sz, uint8_t channel); Aml *aml_sleep(uint64_t msec); Aml *aml_i2c_serial_bus_device(uint16_t address, const char *resource_source); +Aml *aml_error_device(void); /* Block AML object primitives */ Aml *aml_scope(const char *name_format, ...) G_GNUC_PRINTF(1, 2); diff --git a/include/hw/acpi/generic_event_device.h b/include/hw/acpi/generic_event_device.h index 2c5b055327a43..130c014d3f0eb 100644 --- a/include/hw/acpi/generic_event_device.h +++ b/include/hw/acpi/generic_event_device.h @@ -103,6 +103,7 @@ OBJECT_DECLARE_TYPE(AcpiGedState, AcpiGedClass, ACPI_GED) #define ACPI_GED_NVDIMM_HOTPLUG_EVT 0x4 #define ACPI_GED_CPU_HOTPLUG_EVT 0x8 #define ACPI_GED_PCI_HOTPLUG_EVT 0x10 +#define ACPI_GED_ERROR_EVT 0x20 typedef struct GEDState { MemoryRegion evt; diff --git a/include/hw/acpi/ghes.h b/include/hw/acpi/ghes.h index 578a582203cea..df2ecbf6e4a9e 100644 --- a/include/hw/acpi/ghes.h +++ b/include/hw/acpi/ghes.h @@ -24,6 +24,9 @@ #include "hw/acpi/bios-linker-loader.h" #include "qapi/error.h" +#include "qemu/notify.h" + +extern NotifierList acpi_generic_error_notifiers; /* * Values for Hardware Error Notification Type field @@ -57,30 +60,54 @@ enum AcpiGhesNotifyType { ACPI_GHES_NOTIFY_RESERVED = 12 }; -enum { - ACPI_HEST_SRC_ID_SEA = 0, - /* future ids go here */ - - ACPI_GHES_ERROR_SOURCE_COUNT +/* + * ID numbers used to fill HEST source ID field + */ +enum AcpiGhesSourceID { + ACPI_HEST_SRC_ID_SYNC, + ACPI_HEST_SRC_ID_QMP, /* Use it only for QMP injected errors */ }; +typedef struct AcpiNotificationSourceId { + enum AcpiGhesSourceID source_id; + enum AcpiGhesNotifyType notify; +} AcpiNotificationSourceId; + +/* + * AcpiGhesState stores GPA values that will be used to fill HEST entries. + * + * When use_hest_addr is false, the GPA of the etc/hardware_errors firmware + * is stored at hw_error_le. This is the default on QEMU 9.x. + * + * When use_hest_addr is true, the GPA of the HEST table is stored at + * hest_addr_le. This is the default for QEMU 10.x and above. + * + * Whe both GPA values are equal to zero means that GHES is not present. + */ typedef struct AcpiGhesState { + uint64_t hest_addr_le; uint64_t hw_error_le; - bool present; /* True if GHES is present at all on this board */ + bool use_hest_addr; /* True if HEST address is present */ } AcpiGhesState; -void acpi_build_hest(GArray *table_data, GArray *hardware_errors, +void acpi_build_hest(AcpiGhesState *ags, GArray *table_data, + GArray *hardware_errors, BIOSLinker *linker, + const AcpiNotificationSourceId * const notif_source, + int num_sources, const char *oem_id, const char *oem_table_id); void acpi_ghes_add_fw_cfg(AcpiGhesState *vms, FWCfgState *s, GArray *hardware_errors); -int acpi_ghes_memory_errors(uint16_t source_id, uint64_t error_physical_addr); +int acpi_ghes_memory_errors(AcpiGhesState *ags, uint16_t source_id, + uint64_t error_physical_addr); +void ghes_record_cper_errors(AcpiGhesState *ags, const void *cper, size_t len, + uint16_t source_id, Error **errp); /** - * acpi_ghes_present: Report whether ACPI GHES table is present + * acpi_ghes_get_state: Get a pointer for ACPI ghes state * - * Returns: true if the system has an ACPI GHES table and it is - * safe to call acpi_ghes_memory_errors() to record a memory error. + * Returns: a pointer to ghes state if the system has an ACPI GHES table, + * NULL, otherwise. */ -bool acpi_ghes_present(void); +AcpiGhesState *acpi_ghes_get_state(void); #endif diff --git a/include/hw/arm/aspeed_coprocessor.h b/include/hw/arm/aspeed_coprocessor.h new file mode 100644 index 0000000000000..4a50f688ecdcd --- /dev/null +++ b/include/hw/arm/aspeed_coprocessor.h @@ -0,0 +1,63 @@ +/* + * ASPEED Coprocessor + * + * Copyright (C) 2025 ASPEED Technology Inc. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ASPEED_COPROCESSOR_H +#define ASPEED_COPROCESSOR_H + +#include "qom/object.h" +#include "hw/arm/aspeed_soc.h" + +struct AspeedCoprocessorState { + DeviceState parent; + + MemoryRegion *memory; + MemoryRegion sdram; + MemoryRegion *sram; + MemoryRegion sram_alias; + MemoryRegion uart_alias; + MemoryRegion scu_alias; + Clock *sysclk; + + AspeedSCUState *scu; + AspeedSCUState scuio; + AspeedTimerCtrlState timerctrl; + SerialMM *uart; + int uart_dev; +}; + +#define TYPE_ASPEED_COPROCESSOR "aspeed-coprocessor" +OBJECT_DECLARE_TYPE(AspeedCoprocessorState, AspeedCoprocessorClass, + ASPEED_COPROCESSOR) + +struct AspeedCoprocessorClass { + DeviceClass parent_class; + + /** valid_cpu_types: NULL terminated array of a single CPU type. */ + const char * const *valid_cpu_types; + const hwaddr *memmap; + const int *irqmap; +}; + +struct Aspeed27x0CoprocessorState { + AspeedCoprocessorState parent; + AspeedINTCState intc[2]; + UnimplementedDeviceState ipc[2]; + UnimplementedDeviceState scuio; + + ARMv7MState armv7m; +}; + +#define TYPE_ASPEED27X0SSP_COPROCESSOR "aspeed27x0ssp-coprocessor" +OBJECT_DECLARE_SIMPLE_TYPE(Aspeed27x0CoprocessorState, + ASPEED27X0SSP_COPROCESSOR) + +#define TYPE_ASPEED27X0TSP_COPROCESSOR "aspeed27x0tsp-coprocessor" +DECLARE_OBJ_CHECKERS(Aspeed27x0CoprocessorState, AspeedCoprocessorClass, + ASPEED27X0TSP_COPROCESSOR, TYPE_ASPEED27X0TSP_COPROCESSOR) + +#endif /* ASPEED_COPROCESSOR_H */ diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index 217ef0eafd6c7..4b8e599f1a53b 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -37,11 +37,14 @@ #include "qom/object.h" #include "hw/misc/aspeed_lpc.h" #include "hw/misc/unimp.h" +#include "hw/pci-host/aspeed_pcie.h" #include "hw/misc/aspeed_peci.h" #include "hw/fsi/aspeed_apb2opb.h" #include "hw/char/serial-mm.h" #include "hw/intc/arm_gicv3.h" +#define VBOOTROM_FILE_NAME "ast27x0_bootrom.bin" + #define ASPEED_SPIS_NUM 3 #define ASPEED_EHCIS_NUM 4 #define ASPEED_WDTS_NUM 8 @@ -49,6 +52,7 @@ #define ASPEED_MACS_NUM 4 #define ASPEED_UARTS_NUM 13 #define ASPEED_JTAG_NUM 2 +#define ASPEED_PCIE_NUM 3 struct AspeedSoCState { DeviceState parent; @@ -60,6 +64,7 @@ struct AspeedSoCState { MemoryRegion spi_boot_container; MemoryRegion spi_boot; MemoryRegion vbootrom; + MemoryRegion pcie_mmio_alias[ASPEED_PCIE_NUM]; AddressSpace dram_as; AspeedRtcState rtc; AspeedTimerCtrlState timerctrl; @@ -87,6 +92,8 @@ struct AspeedSoCState { AspeedSDHCIState sdhci; AspeedSDHCIState emmc; AspeedLPCState lpc; + AspeedPCIECfgState pcie[ASPEED_PCIE_NUM]; + AspeedPCIEPhyState pcie_phy[ASPEED_PCIE_NUM]; AspeedPECIState peci; SerialMM uart[ASPEED_UARTS_NUM]; Clock *sysclk; @@ -146,30 +153,6 @@ struct Aspeed10x0SoCState { ARMv7MState armv7m; }; -struct Aspeed27x0SSPSoCState { - AspeedSoCState parent; - AspeedINTCState intc[2]; - UnimplementedDeviceState ipc[2]; - UnimplementedDeviceState scuio; - - ARMv7MState armv7m; -}; - -#define TYPE_ASPEED27X0SSP_SOC "aspeed27x0ssp-soc" -OBJECT_DECLARE_SIMPLE_TYPE(Aspeed27x0SSPSoCState, ASPEED27X0SSP_SOC) - -struct Aspeed27x0TSPSoCState { - AspeedSoCState parent; - AspeedINTCState intc[2]; - UnimplementedDeviceState ipc[2]; - UnimplementedDeviceState scuio; - - ARMv7MState armv7m; -}; - -#define TYPE_ASPEED27X0TSP_SOC "aspeed27x0tsp-soc" -OBJECT_DECLARE_SIMPLE_TYPE(Aspeed27x0TSPSoCState, ASPEED27X0TSP_SOC) - #define TYPE_ASPEED10X0_SOC "aspeed10x0-soc" OBJECT_DECLARE_SIMPLE_TYPE(Aspeed10x0SoCState, ASPEED10X0_SOC) @@ -181,6 +164,7 @@ struct AspeedSoCClass { uint32_t silicon_rev; uint64_t sram_size; uint64_t secsram_size; + int pcie_num; int spis_num; int ehcis_num; int wdts_num; @@ -190,12 +174,9 @@ struct AspeedSoCClass { const int *irqmap; const hwaddr *memmap; uint32_t num_cpus; - qemu_irq (*get_irq)(AspeedSoCState *s, int dev); bool (*boot_from_emmc)(AspeedSoCState *s); }; -const char *aspeed_soc_cpu_type(AspeedSoCClass *sc); - enum { ASPEED_DEV_VBOOTROM, ASPEED_DEV_SPI_BOOT, @@ -254,6 +235,15 @@ enum { ASPEED_DEV_LPC, ASPEED_DEV_IBT, ASPEED_DEV_I2C, + ASPEED_DEV_PCIE0, + ASPEED_DEV_PCIE1, + ASPEED_DEV_PCIE2, + ASPEED_DEV_PCIE_PHY0, + ASPEED_DEV_PCIE_PHY1, + ASPEED_DEV_PCIE_PHY2, + ASPEED_DEV_PCIE_MMIO0, + ASPEED_DEV_PCIE_MMIO1, + ASPEED_DEV_PCIE_MMIO2, ASPEED_DEV_PECI, ASPEED_DEV_ETH1, ASPEED_DEV_ETH2, @@ -287,30 +277,39 @@ enum { ASPEED_DEV_IPC1, }; -qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int dev); -bool aspeed_soc_uart_realize(AspeedSoCState *s, Error **errp); -void aspeed_soc_uart_set_chr(AspeedSoCState *s, int dev, Chardev *chr); +const char *aspeed_soc_cpu_type(const char * const *valid_cpu_types); +bool aspeed_soc_uart_realize(MemoryRegion *memory, SerialMM *smm, + const hwaddr addr, Error **errp); +void aspeed_soc_uart_set_chr(SerialMM *uart, int dev, int uarts_base, + int uarts_num, Chardev *chr); bool aspeed_soc_dram_init(AspeedSoCState *s, Error **errp); -void aspeed_mmio_map(AspeedSoCState *s, SysBusDevice *dev, int n, hwaddr addr); -void aspeed_mmio_map_unimplemented(AspeedSoCState *s, SysBusDevice *dev, +void aspeed_mmio_map(MemoryRegion *memory, SysBusDevice *dev, int n, + hwaddr addr); +void aspeed_mmio_map_unimplemented(MemoryRegion *memory, SysBusDevice *dev, const char *name, hwaddr addr, uint64_t size); void aspeed_board_init_flashes(AspeedSMCState *s, const char *flashtype, unsigned int count, int unit0); +void aspeed_write_boot_rom(BlockBackend *blk, hwaddr addr, size_t rom_size, + Error **errp); +void aspeed_install_boot_rom(AspeedSoCState *soc, BlockBackend *blk, + MemoryRegion *boot_rom, uint64_t rom_size); +void aspeed_load_vbootrom(AspeedSoCState *soc, const char *bios_name, + Error **errp); static inline int aspeed_uart_index(int uart_dev) { return uart_dev - ASPEED_DEV_UART0; } -static inline int aspeed_uart_first(AspeedSoCClass *sc) +static inline int aspeed_uart_first(int uarts_base) { - return aspeed_uart_index(sc->uarts_base); + return aspeed_uart_index(uarts_base); } -static inline int aspeed_uart_last(AspeedSoCClass *sc) +static inline int aspeed_uart_last(int uarts_base, int uarts_num) { - return aspeed_uart_first(sc) + sc->uarts_num - 1; + return aspeed_uart_first(uarts_base) + uarts_num - 1; } #endif /* ASPEED_SOC_H */ diff --git a/include/hw/arm/sharpsl.h b/include/hw/arm/sharpsl.h deleted file mode 100644 index 1e3992fcd0014..0000000000000 --- a/include/hw/arm/sharpsl.h +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Common declarations for the Zaurii. - * - * This file is licensed under the GNU GPL. - */ - -#ifndef QEMU_SHARPSL_H -#define QEMU_SHARPSL_H - -#include "exec/hwaddr.h" - -/* zaurus.c */ - -#define SL_PXA_PARAM_BASE 0xa0000a00 -void sl_bootparam_write(hwaddr ptr); - -#endif diff --git a/include/hw/arm/smmu-common.h b/include/hw/arm/smmu-common.h index e5e2d09294d73..80d0fecfde8ad 100644 --- a/include/hw/arm/smmu-common.h +++ b/include/hw/arm/smmu-common.h @@ -161,6 +161,7 @@ struct SMMUState { QLIST_HEAD(, SMMUDevice) devices_with_notifiers; uint8_t bus_num; PCIBus *primary_bus; + bool smmu_per_bus; /* SMMU is specific to the primary_bus */ }; struct SMMUBaseClass { diff --git a/include/hw/arm/stm32f205_soc.h b/include/hw/arm/stm32f205_soc.h index 4f4c8bbebc1e4..46eda3403a939 100644 --- a/include/hw/arm/stm32f205_soc.h +++ b/include/hw/arm/stm32f205_soc.h @@ -59,7 +59,7 @@ struct STM32F205State { STM32F2XXADCState adc[STM_NUM_ADCS]; STM32F2XXSPIState spi[STM_NUM_SPIS]; - OrIRQState *adc_irqs; + OrIRQState adc_irqs; MemoryRegion sram; MemoryRegion flash; diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index 365a28b082cae..04a09af354061 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -33,6 +33,7 @@ #include "exec/hwaddr.h" #include "qemu/notify.h" #include "hw/boards.h" +#include "hw/acpi/ghes.h" #include "hw/arm/boot.h" #include "hw/arm/bsa.h" #include "hw/block/flash.h" @@ -174,11 +175,13 @@ struct VirtMachineState { DeviceState *gic; DeviceState *acpi_dev; Notifier powerdown_notifier; + Notifier generic_error_notifier; PCIBus *bus; char *oem_id; char *oem_table_id; bool ns_el2_virt_timer_irq; CXLState cxl_devices_state; + bool legacy_smmuv3_present; }; #define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM) diff --git a/include/hw/arm/xlnx-versal-version.h b/include/hw/arm/xlnx-versal-version.h new file mode 100644 index 0000000000000..5b6b6e57a573a --- /dev/null +++ b/include/hw/arm/xlnx-versal-version.h @@ -0,0 +1,16 @@ +/* + * AMD Versal versions + * + * Copyright (c) 2025 Advanced Micro Devices, Inc. + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_ARM_XLNX_VERSAL_VERSION_H +#define HW_ARM_XLNX_VERSAL_VERSION_H + +typedef enum VersalVersion { + VERSAL_VER_VERSAL, + VERSAL_VER_VERSAL2, +} VersalVersion; + +#endif diff --git a/include/hw/arm/xlnx-versal.h b/include/hw/arm/xlnx-versal.h index 05ed641b6b6c8..e1fb1f4cf5b3d 100644 --- a/include/hw/arm/xlnx-versal.h +++ b/include/hw/arm/xlnx-versal.h @@ -1,7 +1,8 @@ /* - * Model of the Xilinx Versal + * AMD/Xilinx Versal family SoC model. * * Copyright (c) 2018 Xilinx Inc. + * Copyright (c) 2025 Advanced Micro Devices, Inc. * Written by Edgar E. Iglesias * * This program is free software; you can redistribute it and/or modify @@ -13,326 +14,77 @@ #define XLNX_VERSAL_H #include "hw/sysbus.h" -#include "hw/cpu/cluster.h" -#include "hw/or-irq.h" -#include "hw/sd/sdhci.h" -#include "hw/intc/arm_gicv3.h" -#include "hw/char/pl011.h" -#include "hw/dma/xlnx-zdma.h" -#include "hw/net/cadence_gem.h" -#include "hw/rtc/xlnx-zynqmp-rtc.h" #include "qom/object.h" -#include "hw/usb/xlnx-usb-subsystem.h" -#include "hw/misc/xlnx-versal-xramc.h" -#include "hw/nvram/xlnx-bbram.h" -#include "hw/nvram/xlnx-versal-efuse.h" -#include "hw/ssi/xlnx-versal-ospi.h" -#include "hw/dma/xlnx_csu_dma.h" -#include "hw/misc/xlnx-versal-crl.h" -#include "hw/misc/xlnx-versal-pmc-iou-slcr.h" -#include "hw/misc/xlnx-versal-trng.h" -#include "hw/net/xlnx-versal-canfd.h" -#include "hw/misc/xlnx-versal-cfu.h" -#include "hw/misc/xlnx-versal-cframe-reg.h" -#include "target/arm/cpu.h" +#include "net/can_emu.h" +#include "hw/arm/xlnx-versal-version.h" -#define TYPE_XLNX_VERSAL "xlnx-versal" -OBJECT_DECLARE_SIMPLE_TYPE(Versal, XLNX_VERSAL) +#define TYPE_XLNX_VERSAL_BASE "xlnx-versal-base" +OBJECT_DECLARE_TYPE(Versal, VersalClass, XLNX_VERSAL_BASE) -#define XLNX_VERSAL_NR_ACPUS 2 -#define XLNX_VERSAL_NR_RCPUS 2 -#define XLNX_VERSAL_NR_UARTS 2 -#define XLNX_VERSAL_NR_GEMS 2 -#define XLNX_VERSAL_NR_ADMAS 8 -#define XLNX_VERSAL_NR_SDS 2 -#define XLNX_VERSAL_NR_XRAM 4 -#define XLNX_VERSAL_NR_IRQS 192 -#define XLNX_VERSAL_NR_CANFD 2 -#define XLNX_VERSAL_CANFD_REF_CLK (24 * 1000 * 1000) -#define XLNX_VERSAL_NR_CFRAME 15 +#define TYPE_XLNX_VERSAL "xlnx-versal" +#define TYPE_XLNX_VERSAL2 "xlnx-versal2" struct Versal { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ - struct { - struct { - MemoryRegion mr; - CPUClusterState cluster; - ARMCPU cpu[XLNX_VERSAL_NR_ACPUS]; - GICv3State gic; - } apu; - } fpd; - + GArray *intc; MemoryRegion mr_ps; struct { - /* 4 ranges to access DDR. */ - MemoryRegion mr_ddr_ranges[4]; - } noc; - - struct { - MemoryRegion mr_ocm; - - struct { - PL011State uart[XLNX_VERSAL_NR_UARTS]; - CadenceGEMState gem[XLNX_VERSAL_NR_GEMS]; - OrIRQState gem_irq_orgate[XLNX_VERSAL_NR_GEMS]; - XlnxZDMA adma[XLNX_VERSAL_NR_ADMAS]; - VersalUsb2 usb; - CanBusState *canbus[XLNX_VERSAL_NR_CANFD]; - XlnxVersalCANFDState canfd[XLNX_VERSAL_NR_CANFD]; - } iou; - - /* Real-time Processing Unit. */ - struct { - MemoryRegion mr; - MemoryRegion mr_ps_alias; - - CPUClusterState cluster; - ARMCPU cpu[XLNX_VERSAL_NR_RCPUS]; - } rpu; - - struct { - OrIRQState irq_orgate; - XlnxXramCtrl ctrl[XLNX_VERSAL_NR_XRAM]; - } xram; - - XlnxVersalCRL crl; - } lpd; - - /* The Platform Management Controller subsystem. */ - struct { - struct { - SDHCIState sd[XLNX_VERSAL_NR_SDS]; - XlnxVersalPmcIouSlcr slcr; - - struct { - XlnxVersalOspi ospi; - XlnxCSUDMA dma_src; - XlnxCSUDMA dma_dst; - MemoryRegion linear_mr; - OrIRQState irq_orgate; - } ospi; - } iou; - - XlnxZynqMPRTC rtc; - XlnxVersalTRng trng; - XlnxBBRam bbram; - XlnxEFuse efuse; - XlnxVersalEFuseCtrl efuse_ctrl; - XlnxVersalEFuseCache efuse_cache; - XlnxVersalCFUAPB cfu_apb; - XlnxVersalCFUFDRO cfu_fdro; - XlnxVersalCFUSFR cfu_sfr; - XlnxVersalCFrameReg cframe[XLNX_VERSAL_NR_CFRAME]; - XlnxVersalCFrameBcastReg cframe_bcast; - - OrIRQState apb_irq_orgate; - } pmc; + uint32_t clk_25mhz; + uint32_t clk_125mhz; + uint32_t gic; + } phandle; struct { MemoryRegion *mr_ddr; + CanBusState **canbus; + void *fdt; } cfg; }; -/* Memory-map and IRQ definitions. Copied a subset from - * auto-generated files. */ - -#define VERSAL_GIC_MAINT_IRQ 9 -#define VERSAL_TIMER_VIRT_IRQ 11 -#define VERSAL_TIMER_S_EL1_IRQ 13 -#define VERSAL_TIMER_NS_EL1_IRQ 14 -#define VERSAL_TIMER_NS_EL2_IRQ 10 - -#define VERSAL_CRL_IRQ 10 -#define VERSAL_UART0_IRQ_0 18 -#define VERSAL_UART1_IRQ_0 19 -#define VERSAL_CANFD0_IRQ_0 20 -#define VERSAL_CANFD1_IRQ_0 21 -#define VERSAL_USB0_IRQ_0 22 -#define VERSAL_GEM0_IRQ_0 56 -#define VERSAL_GEM0_WAKE_IRQ_0 57 -#define VERSAL_GEM1_IRQ_0 58 -#define VERSAL_GEM1_WAKE_IRQ_0 59 -#define VERSAL_ADMA_IRQ_0 60 -#define VERSAL_XRAM_IRQ_0 79 -#define VERSAL_CFU_IRQ_0 120 -#define VERSAL_PMC_APB_IRQ 121 -#define VERSAL_OSPI_IRQ 124 -#define VERSAL_SD0_IRQ_0 126 -#define VERSAL_EFUSE_IRQ 139 -#define VERSAL_TRNG_IRQ 141 -#define VERSAL_RTC_ALARM_IRQ 142 -#define VERSAL_RTC_SECONDS_IRQ 143 - -/* Architecturally reserved IRQs suitable for virtualization. */ -#define VERSAL_RSVD_IRQ_FIRST 111 -#define VERSAL_RSVD_IRQ_LAST 118 - -#define MM_TOP_RSVD 0xa0000000U -#define MM_TOP_RSVD_SIZE 0x4000000 -#define MM_GIC_APU_DIST_MAIN 0xf9000000U -#define MM_GIC_APU_DIST_MAIN_SIZE 0x10000 -#define MM_GIC_APU_REDIST_0 0xf9080000U -#define MM_GIC_APU_REDIST_0_SIZE 0x80000 - -#define MM_UART0 0xff000000U -#define MM_UART0_SIZE 0x10000 -#define MM_UART1 0xff010000U -#define MM_UART1_SIZE 0x10000 - -#define MM_CANFD0 0xff060000U -#define MM_CANFD0_SIZE 0x10000 -#define MM_CANFD1 0xff070000U -#define MM_CANFD1_SIZE 0x10000 - -#define MM_GEM0 0xff0c0000U -#define MM_GEM0_SIZE 0x10000 -#define MM_GEM1 0xff0d0000U -#define MM_GEM1_SIZE 0x10000 +struct VersalClass { + SysBusDeviceClass parent; -#define MM_ADMA_CH0 0xffa80000U -#define MM_ADMA_CH0_SIZE 0x10000 - -#define MM_OCM 0xfffc0000U -#define MM_OCM_SIZE 0x40000 - -#define MM_XRAM 0xfe800000 -#define MM_XRAMC 0xff8e0000 -#define MM_XRAMC_SIZE 0x10000 - -#define MM_USB2_CTRL_REGS 0xFF9D0000 -#define MM_USB2_CTRL_REGS_SIZE 0x10000 - -#define MM_USB_0 0xFE200000 -#define MM_USB_0_SIZE 0x10000 - -#define MM_TOP_DDR 0x0 -#define MM_TOP_DDR_SIZE 0x80000000U -#define MM_TOP_DDR_2 0x800000000ULL -#define MM_TOP_DDR_2_SIZE 0x800000000ULL -#define MM_TOP_DDR_3 0xc000000000ULL -#define MM_TOP_DDR_3_SIZE 0x4000000000ULL -#define MM_TOP_DDR_4 0x10000000000ULL -#define MM_TOP_DDR_4_SIZE 0xb780000000ULL - -#define MM_PSM_START 0xffc80000U -#define MM_PSM_END 0xffcf0000U + VersalVersion version; +}; -#define MM_CRL 0xff5e0000U -#define MM_CRL_SIZE 0x300000 -#define MM_IOU_SCNTR 0xff130000U -#define MM_IOU_SCNTR_SIZE 0x10000 -#define MM_IOU_SCNTRS 0xff140000U -#define MM_IOU_SCNTRS_SIZE 0x10000 -#define MM_FPD_CRF 0xfd1a0000U -#define MM_FPD_CRF_SIZE 0x140000 -#define MM_FPD_FPD_APU 0xfd5c0000 -#define MM_FPD_FPD_APU_SIZE 0x100 +static inline void versal_set_fdt(Versal *s, void *fdt) +{ + g_assert(!qdev_is_realized(DEVICE(s))); + s->cfg.fdt = fdt; +} -#define MM_PMC_PMC_IOU_SLCR 0xf1060000 -#define MM_PMC_PMC_IOU_SLCR_SIZE 0x10000 +void versal_fdt_add_memory_nodes(Versal *s, uint64_t ram_size); -#define MM_PMC_OSPI 0xf1010000 -#define MM_PMC_OSPI_SIZE 0x10000 +DeviceState *versal_get_boot_cpu(Versal *s); +void versal_sdhci_plug_card(Versal *s, int sd_idx, BlockBackend *blk); +void versal_efuse_attach_drive(Versal *s, BlockBackend *blk); +void versal_bbram_attach_drive(Versal *s, BlockBackend *blk); +void versal_ospi_create_flash(Versal *s, int flash_idx, const char *flash_mdl, + BlockBackend *blk); -#define MM_PMC_OSPI_DAC 0xc0000000 -#define MM_PMC_OSPI_DAC_SIZE 0x20000000 +qemu_irq versal_get_reserved_irq(Versal *s, int idx, int *dtb_idx); +hwaddr versal_get_reserved_mmio_addr(Versal *s); -#define MM_PMC_OSPI_DMA_DST 0xf1011800 -#define MM_PMC_OSPI_DMA_SRC 0xf1011000 +int versal_get_num_cpu(VersalVersion version); +int versal_get_num_can(VersalVersion version); +int versal_get_num_sdhci(VersalVersion version); -#define MM_PMC_SD0 0xf1040000U -#define MM_PMC_SD0_SIZE 0x10000 -#define MM_PMC_BBRAM_CTRL 0xf11f0000 -#define MM_PMC_BBRAM_CTRL_SIZE 0x00050 -#define MM_PMC_EFUSE_CTRL 0xf1240000 -#define MM_PMC_EFUSE_CTRL_SIZE 0x00104 -#define MM_PMC_EFUSE_CACHE 0xf1250000 -#define MM_PMC_EFUSE_CACHE_SIZE 0x00C00 +static inline const char *versal_get_class(VersalVersion version) +{ + switch (version) { + case VERSAL_VER_VERSAL: + return TYPE_XLNX_VERSAL; -#define MM_PMC_CFU_APB 0xf12b0000 -#define MM_PMC_CFU_APB_SIZE 0x10000 -#define MM_PMC_CFU_STREAM 0xf12c0000 -#define MM_PMC_CFU_STREAM_SIZE 0x1000 -#define MM_PMC_CFU_SFR 0xf12c1000 -#define MM_PMC_CFU_SFR_SIZE 0x1000 -#define MM_PMC_CFU_FDRO 0xf12c2000 -#define MM_PMC_CFU_FDRO_SIZE 0x1000 -#define MM_PMC_CFU_STREAM_2 0xf1f80000 -#define MM_PMC_CFU_STREAM_2_SIZE 0x40000 + case VERSAL_VER_VERSAL2: + return TYPE_XLNX_VERSAL2; -#define MM_PMC_CFRAME0_REG 0xf12d0000 -#define MM_PMC_CFRAME0_REG_SIZE 0x1000 -#define MM_PMC_CFRAME0_FDRI 0xf12d1000 -#define MM_PMC_CFRAME0_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME1_REG 0xf12d2000 -#define MM_PMC_CFRAME1_REG_SIZE 0x1000 -#define MM_PMC_CFRAME1_FDRI 0xf12d3000 -#define MM_PMC_CFRAME1_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME2_REG 0xf12d4000 -#define MM_PMC_CFRAME2_REG_SIZE 0x1000 -#define MM_PMC_CFRAME2_FDRI 0xf12d5000 -#define MM_PMC_CFRAME2_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME3_REG 0xf12d6000 -#define MM_PMC_CFRAME3_REG_SIZE 0x1000 -#define MM_PMC_CFRAME3_FDRI 0xf12d7000 -#define MM_PMC_CFRAME3_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME4_REG 0xf12d8000 -#define MM_PMC_CFRAME4_REG_SIZE 0x1000 -#define MM_PMC_CFRAME4_FDRI 0xf12d9000 -#define MM_PMC_CFRAME4_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME5_REG 0xf12da000 -#define MM_PMC_CFRAME5_REG_SIZE 0x1000 -#define MM_PMC_CFRAME5_FDRI 0xf12db000 -#define MM_PMC_CFRAME5_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME6_REG 0xf12dc000 -#define MM_PMC_CFRAME6_REG_SIZE 0x1000 -#define MM_PMC_CFRAME6_FDRI 0xf12dd000 -#define MM_PMC_CFRAME6_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME7_REG 0xf12de000 -#define MM_PMC_CFRAME7_REG_SIZE 0x1000 -#define MM_PMC_CFRAME7_FDRI 0xf12df000 -#define MM_PMC_CFRAME7_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME8_REG 0xf12e0000 -#define MM_PMC_CFRAME8_REG_SIZE 0x1000 -#define MM_PMC_CFRAME8_FDRI 0xf12e1000 -#define MM_PMC_CFRAME8_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME9_REG 0xf12e2000 -#define MM_PMC_CFRAME9_REG_SIZE 0x1000 -#define MM_PMC_CFRAME9_FDRI 0xf12e3000 -#define MM_PMC_CFRAME9_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME10_REG 0xf12e4000 -#define MM_PMC_CFRAME10_REG_SIZE 0x1000 -#define MM_PMC_CFRAME10_FDRI 0xf12e5000 -#define MM_PMC_CFRAME10_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME11_REG 0xf12e6000 -#define MM_PMC_CFRAME11_REG_SIZE 0x1000 -#define MM_PMC_CFRAME11_FDRI 0xf12e7000 -#define MM_PMC_CFRAME11_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME12_REG 0xf12e8000 -#define MM_PMC_CFRAME12_REG_SIZE 0x1000 -#define MM_PMC_CFRAME12_FDRI 0xf12e9000 -#define MM_PMC_CFRAME12_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME13_REG 0xf12ea000 -#define MM_PMC_CFRAME13_REG_SIZE 0x1000 -#define MM_PMC_CFRAME13_FDRI 0xf12eb000 -#define MM_PMC_CFRAME13_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME14_REG 0xf12ec000 -#define MM_PMC_CFRAME14_REG_SIZE 0x1000 -#define MM_PMC_CFRAME14_FDRI 0xf12ed000 -#define MM_PMC_CFRAME14_FDRI_SIZE 0x1000 -#define MM_PMC_CFRAME_BCAST_REG 0xf12ee000 -#define MM_PMC_CFRAME_BCAST_REG_SIZE 0x1000 -#define MM_PMC_CFRAME_BCAST_FDRI 0xf12ef000 -#define MM_PMC_CFRAME_BCAST_FDRI_SIZE 0x1000 + default: + g_assert_not_reached(); + } +} -#define MM_PMC_CRP 0xf1260000U -#define MM_PMC_CRP_SIZE 0x10000 -#define MM_PMC_RTC 0xf12a0000 -#define MM_PMC_RTC_SIZE 0x10000 -#define MM_PMC_TRNG 0xf1230000 -#define MM_PMC_TRNG_SIZE 0x10000 #endif diff --git a/include/hw/arm/xlnx-zynqmp.h b/include/hw/arm/xlnx-zynqmp.h index c137ac59e85fa..a3117bd6c50b3 100644 --- a/include/hw/arm/xlnx-zynqmp.h +++ b/include/hw/arm/xlnx-zynqmp.h @@ -42,6 +42,7 @@ #include "hw/misc/xlnx-zynqmp-crf.h" #include "hw/timer/cadence_ttc.h" #include "hw/usb/hcd-dwc3.h" +#include "hw/core/split-irq.h" #define TYPE_XLNX_ZYNQMP "xlnx-zynqmp" OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP) @@ -67,6 +68,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(XlnxZynqMPState, XLNX_ZYNQMP) #define XLNX_ZYNQMP_OCM_RAM_SIZE 0x10000 #define XLNX_ZYNQMP_GIC_REGIONS 6 +#define XLNX_ZYNQMP_GIC_NUM_SPI_INTR 160 /* * ZynqMP maps the ARM GIC regions (GICC, GICD ...) at consecutive 64k offsets @@ -105,6 +107,9 @@ struct XlnxZynqMPState { GICState gic; MemoryRegion gic_mr[XLNX_ZYNQMP_GIC_REGIONS][XLNX_ZYNQMP_GIC_ALIASES]; + GICState rpu_gic; + SplitIRQ splitter[XLNX_ZYNQMP_GIC_NUM_SPI_INTR]; + MemoryRegion ocm_ram[XLNX_ZYNQMP_NUM_OCM_BANKS]; MemoryRegion *ddr_ram; diff --git a/include/hw/audio/soundhw.h b/include/hw/audio/soundhw.h index 474c5ff94e584..83b30110834fb 100644 --- a/include/hw/audio/soundhw.h +++ b/include/hw/audio/soundhw.h @@ -7,7 +7,7 @@ void deprecated_register_soundhw(const char *name, const char *descr, int isa, const char *typename); void soundhw_init(void); -void show_valid_soundhw(void); +void audio_print_available_models(void); void select_soundhw(const char *name, const char *audiodev); #endif diff --git a/include/hw/boards.h b/include/hw/boards.h index f94713e6e29a6..a48ed4f86a353 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -25,6 +25,11 @@ OBJECT_DECLARE_TYPE(MachineState, MachineClass, MACHINE) extern MachineState *current_machine; +/** + * machine_default_cpu_type: Return the machine default CPU type. + * @ms: Machine state + */ +const char *machine_default_cpu_type(const MachineState *ms); /** * machine_class_default_cpu_type: Return the machine default CPU type. * @mc: Machine class @@ -250,15 +255,12 @@ typedef struct { * It also will be used as a way to option into "-m" option support. * If it's not set by board, '-m' will be ignored and generic code will * not create default RAM MemoryRegion. - * @fixup_ram_size: - * Amends user provided ram size (with -m option) using machine - * specific algorithm. To be used by old machine types for compat - * purposes only. - * Applies only to default memory backend, i.e., explicit memory backend - * wasn't used. * @smbios_memory_device_size: * Default size of memory device, * SMBIOS 3.1.0 "7.18 Memory Device (Type 17)" + * @get_valid_cpu_types: + * Returns a list of valid CPU types for this board. May be NULL + * if not needed. */ struct MachineClass { /*< private >*/ @@ -305,6 +307,8 @@ struct MachineClass { bool ignore_memory_transaction_failures; int numa_mem_align_shift; const char * const *valid_cpu_types; + GPtrArray *(*get_valid_cpu_types)(const MachineState *ms); + const char *(*get_default_cpu_type)(const MachineState *ms); strList *allowed_dynamic_sysbus_devices; bool auto_enable_numa_with_memhp; bool auto_enable_numa_with_memdev; @@ -325,7 +329,6 @@ struct MachineClass { unsigned cpu_index); const CPUArchIdList *(*possible_cpu_arch_ids)(MachineState *machine); int64_t (*get_default_cpu_node_id)(const MachineState *ms, int idx); - ram_addr_t (*fixup_ram_size)(ram_addr_t size); uint64_t smbios_memory_device_size; bool (*create_default_memdev)(MachineState *ms, const char *path, Error **errp); @@ -503,6 +506,39 @@ struct MachineState { * DEFINE_VIRT_MACHINE_IMPL(false, major, minor, micro, _, tag) */ +#define DEFINE_MACHINE_EXTENDED(namestr, PARENT_NAME, InstanceName, \ + machine_initfn, ABSTRACT, ifaces...) \ + static void machine_initfn##_class_init(ObjectClass *oc, const void *data) \ + { \ + MachineClass *mc = MACHINE_CLASS(oc); \ + machine_initfn(mc); \ + } \ + static const TypeInfo machine_initfn##_typeinfo = { \ + .name = MACHINE_TYPE_NAME(namestr), \ + .parent = TYPE_##PARENT_NAME, \ + .class_init = machine_initfn##_class_init, \ + .instance_size = sizeof(InstanceName), \ + .abstract = ABSTRACT, \ + .interfaces = ifaces, \ + }; \ + static void machine_initfn##_register_types(void) \ + { \ + type_register_static(&machine_initfn##_typeinfo); \ + } \ + type_init(machine_initfn##_register_types) + +#define DEFINE_MACHINE(namestr, machine_initfn) \ + DEFINE_MACHINE_EXTENDED(namestr, MACHINE, MachineState, machine_initfn, \ + false, NULL) + +#define DEFINE_MACHINE_WITH_INTERFACE_ARRAY(namestr, machine_initfn, ifaces...)\ + DEFINE_MACHINE_EXTENDED(namestr, MACHINE, MachineState, machine_initfn, \ + false, ifaces) + +#define DEFINE_MACHINE_WITH_INTERFACES(namestr, machine_initfn, ...) \ + DEFINE_MACHINE_WITH_INTERFACE_ARRAY(namestr, machine_initfn, \ + (const InterfaceInfo[]) { __VA_ARGS__ }) + /* * Helper for dispatching different macros based on how * many __VA_ARGS__ are passed. Supports 1 to 5 variadic @@ -762,22 +798,8 @@ struct MachineState { } \ } while (0) -#define DEFINE_MACHINE(namestr, machine_initfn) \ - static void machine_initfn##_class_init(ObjectClass *oc, const void *data) \ - { \ - MachineClass *mc = MACHINE_CLASS(oc); \ - machine_initfn(mc); \ - } \ - static const TypeInfo machine_initfn##_typeinfo = { \ - .name = MACHINE_TYPE_NAME(namestr), \ - .parent = TYPE_MACHINE, \ - .class_init = machine_initfn##_class_init, \ - }; \ - static void machine_initfn##_register_types(void) \ - { \ - type_register_static(&machine_initfn##_typeinfo); \ - } \ - type_init(machine_initfn##_register_types) +extern GlobalProperty hw_compat_10_1[]; +extern const size_t hw_compat_10_1_len; extern GlobalProperty hw_compat_10_0[]; extern const size_t hw_compat_10_0_len; diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 5eaf41a566f37..e79e8e0a8ee7b 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -169,7 +169,7 @@ struct CPUClass { vaddr (*gdb_adjust_breakpoint)(CPUState *cpu, vaddr addr); const char *gdb_core_xml_file; - const gchar * (*gdb_arch_name)(CPUState *cpu); + const char * (*gdb_arch_name)(CPUState *cpu); const char * (*gdb_get_core_xml_file)(CPUState *cpu); void (*disas_set_info)(CPUState *cpu, disassemble_info *info); @@ -198,10 +198,11 @@ struct CPUClass { }; /* - * Fix the number of mmu modes to 16, which is also the maximum - * supported by the softmmu tlb api. + * Fix the number of mmu modes across all targets. + * Current maximum is target/arm/. */ -#define NB_MMU_MODES 16 +#define NB_MMU_MODES 22 +typedef uint32_t MMUIdxMap; /* Use a fully associative victim tlb of 8 entries. */ #define CPU_VTLB_SIZE 8 @@ -306,7 +307,7 @@ typedef struct CPUTLBCommon { * mmu_idx N since the last time that mmu_idx was flushed. * Protected by tlb_c.lock. */ - uint16_t dirty; + MMUIdxMap dirty; /* * Statistics. These are not lock protected, but are read and * written atomically. This allows the monitor to print a snapshot @@ -422,7 +423,17 @@ struct qemu_work_item; * valid under cpu_list_lock. * @created: Indicates whether the CPU thread has been successfully created. * @halt_cond: condition variable sleeping threads can wait on. + * @exit_request: Another thread requests the CPU to call qemu_process_cpu_events(). + * Should be read only by CPU thread with load-acquire, to synchronize with + * other threads' store-release operation. + * + * In some cases, accelerator-specific code will write exit_request from + * within the same thread, to "bump" the effect of qemu_cpu_kick() to + * the one provided by cpu_exit(), especially when processing interrupt + * flags. In this case, the write and read happen in the same thread + * and the write therefore can use qemu_atomic_set(). * @interrupt_request: Indicates a pending interrupt request. + * Only used by system emulation. * @halted: Nonzero if the CPU is in suspended state. * @stop: Indicates a pending stop request. * @stopped: Indicates the CPU has been artificially stopped. @@ -494,7 +505,6 @@ struct CPUState { bool exit_request; int exclusive_context_count; uint32_t cflags_next_tb; - /* updates protected by BQL */ uint32_t interrupt_request; int singlestep_enabled; int64_t icount_budget; @@ -506,7 +516,6 @@ struct CPUState { QSIMPLEQ_HEAD(, qemu_work_item) work_list; struct CPUAddressSpace *cpu_ases; - int cpu_ases_count; int num_ases; AddressSpace *as; MemoryRegion *memory; @@ -593,6 +602,22 @@ static inline CPUArchState *cpu_env(CPUState *cpu) return (CPUArchState *)(cpu + 1); } +#ifdef CONFIG_TCG +/* + * Invert the index order of the CPUTLBDescFast array so that lower + * mmu_idx have offsets from env with smaller magnitude. + */ +static inline int mmuidx_to_fast_index(int mmu_idx) +{ + return NB_MMU_MODES - 1 - mmu_idx; +} + +static inline CPUTLBDescFast *cpu_tlb_fast(CPUState *cpu, int mmu_idx) +{ + return &cpu->neg.tlb.f[mmuidx_to_fast_index(mmu_idx)]; +} +#endif + typedef QTAILQ_HEAD(CPUTailQ, CPUState) CPUTailQ; extern CPUTailQ cpus_queue; @@ -829,7 +854,8 @@ bool qemu_cpu_is_self(CPUState *cpu); * qemu_cpu_kick: * @cpu: The vCPU to kick. * - * Kicks @cpu's thread. + * Kicks @cpu's thread to exit the accelerator. For accelerators that + * can do that, the target vCPU thread will try not to take the BQL. */ void qemu_cpu_kick(CPUState *cpu); @@ -942,6 +968,28 @@ CPUState *cpu_by_arch_id(int64_t id); void cpu_interrupt(CPUState *cpu, int mask); +/** + * cpu_test_interrupt: + * @cpu: The CPU to check interrupt(s) on. + * @mask: The interrupts to check. + * + * Checks if any of interrupts in @mask are pending on @cpu. + */ +static inline bool cpu_test_interrupt(CPUState *cpu, int mask) +{ + return qatomic_load_acquire(&cpu->interrupt_request) & mask; +} + +/** + * cpu_set_interrupt: + * @cpu: The CPU to set pending interrupt(s) on. + * @mask: The interrupts to set. + * + * Sets interrupts in @mask as pending on @cpu. Unlike @cpu_interrupt, + * this does not kick the vCPU. + */ +void cpu_set_interrupt(CPUState *cpu, int mask); + /** * cpu_set_pc: * @cpu: The CPU to set the program counter for. @@ -1113,6 +1161,15 @@ AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx); G_NORETURN void cpu_abort(CPUState *cpu, const char *fmt, ...) G_GNUC_PRINTF(2, 3); +/** + * qemu_process_cpu_events: + * @cpu: CPU that left the execution loop + * + * Perform accelerator-independent work after the CPU has left + * the inner execution loop. + */ +void qemu_process_cpu_events(CPUState *cpu); + /* $(top_srcdir)/cpu.c */ void cpu_class_init_props(DeviceClass *dc); void cpu_exec_class_post_init(CPUClass *cc); diff --git a/include/hw/display/bcm2835_fb.h b/include/hw/display/bcm2835_fb.h index 49541bf08f450..acc9230b6a857 100644 --- a/include/hw/display/bcm2835_fb.h +++ b/include/hw/display/bcm2835_fb.h @@ -13,7 +13,6 @@ #define BCM2835_FB_H #include "hw/sysbus.h" -#include "ui/console.h" #include "qom/object.h" #define UPPER_RAM_BASE 0x40000000 diff --git a/include/hw/display/ramfb.h b/include/hw/display/ramfb.h index a7e00191445e1..172aa6dc89e80 100644 --- a/include/hw/display/ramfb.h +++ b/include/hw/display/ramfb.h @@ -6,7 +6,7 @@ /* ramfb.c */ typedef struct RAMFBState RAMFBState; void ramfb_display_update(QemuConsole *con, RAMFBState *s); -RAMFBState *ramfb_setup(Error **errp); +RAMFBState *ramfb_setup(bool romfile, Error **errp); extern const VMStateDescription ramfb_vmstate; diff --git a/include/hw/firmware/smbios.h b/include/hw/firmware/smbios.h index f066ab7262912..3ea732f4e6a60 100644 --- a/include/hw/firmware/smbios.h +++ b/include/hw/firmware/smbios.h @@ -22,7 +22,7 @@ extern GArray *usr_blobs_sizes; typedef struct { const char *vendor, *version, *date; - bool have_major_minor, uefi; + bool have_major_minor, uefi, vm; uint8_t major, minor; } smbios_type0_t; extern smbios_type0_t smbios_type0; diff --git a/include/hw/hyperv/hvgdk.h b/include/hw/hyperv/hvgdk.h new file mode 100644 index 0000000000000..71161f477c49b --- /dev/null +++ b/include/hw/hyperv/hvgdk.h @@ -0,0 +1,20 @@ +/* + * Type definitions for the mshv guest interface. + * + * Copyright Microsoft, Corp. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_HYPERV_HVGDK_H +#define HW_HYPERV_HVGDK_H + +#define HVGDK_H_VERSION (25125) + +enum hv_unimplemented_msr_action { + HV_UNIMPLEMENTED_MSR_ACTION_FAULT = 0, + HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO = 1, + HV_UNIMPLEMENTED_MSR_ACTION_COUNT = 2, +}; + +#endif /* HW_HYPERV_HVGDK_H */ diff --git a/include/hw/hyperv/hvgdk_mini.h b/include/hw/hyperv/hvgdk_mini.h new file mode 100644 index 0000000000000..d89315f5452fd --- /dev/null +++ b/include/hw/hyperv/hvgdk_mini.h @@ -0,0 +1,817 @@ +/* + * Userspace interfaces for /dev/mshv* devices and derived fds + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_HYPERV_HVGDK_MINI_H +#define HW_HYPERV_HVGDK_MINI_H + +#define MSHV_IOCTL 0xB8 + +typedef enum hv_register_name { + /* Pending Interruption Register */ + HV_REGISTER_PENDING_INTERRUPTION = 0x00010002, + + /* X64 User-Mode Registers */ + HV_X64_REGISTER_RAX = 0x00020000, + HV_X64_REGISTER_RCX = 0x00020001, + HV_X64_REGISTER_RDX = 0x00020002, + HV_X64_REGISTER_RBX = 0x00020003, + HV_X64_REGISTER_RSP = 0x00020004, + HV_X64_REGISTER_RBP = 0x00020005, + HV_X64_REGISTER_RSI = 0x00020006, + HV_X64_REGISTER_RDI = 0x00020007, + HV_X64_REGISTER_R8 = 0x00020008, + HV_X64_REGISTER_R9 = 0x00020009, + HV_X64_REGISTER_R10 = 0x0002000A, + HV_X64_REGISTER_R11 = 0x0002000B, + HV_X64_REGISTER_R12 = 0x0002000C, + HV_X64_REGISTER_R13 = 0x0002000D, + HV_X64_REGISTER_R14 = 0x0002000E, + HV_X64_REGISTER_R15 = 0x0002000F, + HV_X64_REGISTER_RIP = 0x00020010, + HV_X64_REGISTER_RFLAGS = 0x00020011, + + /* X64 Floating Point and Vector Registers */ + HV_X64_REGISTER_XMM0 = 0x00030000, + HV_X64_REGISTER_XMM1 = 0x00030001, + HV_X64_REGISTER_XMM2 = 0x00030002, + HV_X64_REGISTER_XMM3 = 0x00030003, + HV_X64_REGISTER_XMM4 = 0x00030004, + HV_X64_REGISTER_XMM5 = 0x00030005, + HV_X64_REGISTER_XMM6 = 0x00030006, + HV_X64_REGISTER_XMM7 = 0x00030007, + HV_X64_REGISTER_XMM8 = 0x00030008, + HV_X64_REGISTER_XMM9 = 0x00030009, + HV_X64_REGISTER_XMM10 = 0x0003000A, + HV_X64_REGISTER_XMM11 = 0x0003000B, + HV_X64_REGISTER_XMM12 = 0x0003000C, + HV_X64_REGISTER_XMM13 = 0x0003000D, + HV_X64_REGISTER_XMM14 = 0x0003000E, + HV_X64_REGISTER_XMM15 = 0x0003000F, + HV_X64_REGISTER_FP_MMX0 = 0x00030010, + HV_X64_REGISTER_FP_MMX1 = 0x00030011, + HV_X64_REGISTER_FP_MMX2 = 0x00030012, + HV_X64_REGISTER_FP_MMX3 = 0x00030013, + HV_X64_REGISTER_FP_MMX4 = 0x00030014, + HV_X64_REGISTER_FP_MMX5 = 0x00030015, + HV_X64_REGISTER_FP_MMX6 = 0x00030016, + HV_X64_REGISTER_FP_MMX7 = 0x00030017, + HV_X64_REGISTER_FP_CONTROL_STATUS = 0x00030018, + HV_X64_REGISTER_XMM_CONTROL_STATUS = 0x00030019, + + /* X64 Control Registers */ + HV_X64_REGISTER_CR0 = 0x00040000, + HV_X64_REGISTER_CR2 = 0x00040001, + HV_X64_REGISTER_CR3 = 0x00040002, + HV_X64_REGISTER_CR4 = 0x00040003, + HV_X64_REGISTER_CR8 = 0x00040004, + HV_X64_REGISTER_XFEM = 0x00040005, + + /* X64 Segment Registers */ + HV_X64_REGISTER_ES = 0x00060000, + HV_X64_REGISTER_CS = 0x00060001, + HV_X64_REGISTER_SS = 0x00060002, + HV_X64_REGISTER_DS = 0x00060003, + HV_X64_REGISTER_FS = 0x00060004, + HV_X64_REGISTER_GS = 0x00060005, + HV_X64_REGISTER_LDTR = 0x00060006, + HV_X64_REGISTER_TR = 0x00060007, + + /* X64 Table Registers */ + HV_X64_REGISTER_IDTR = 0x00070000, + HV_X64_REGISTER_GDTR = 0x00070001, + + /* X64 Virtualized MSRs */ + HV_X64_REGISTER_TSC = 0x00080000, + HV_X64_REGISTER_EFER = 0x00080001, + HV_X64_REGISTER_KERNEL_GS_BASE = 0x00080002, + HV_X64_REGISTER_APIC_BASE = 0x00080003, + HV_X64_REGISTER_PAT = 0x00080004, + HV_X64_REGISTER_SYSENTER_CS = 0x00080005, + HV_X64_REGISTER_SYSENTER_EIP = 0x00080006, + HV_X64_REGISTER_SYSENTER_ESP = 0x00080007, + HV_X64_REGISTER_STAR = 0x00080008, + HV_X64_REGISTER_LSTAR = 0x00080009, + HV_X64_REGISTER_CSTAR = 0x0008000A, + HV_X64_REGISTER_SFMASK = 0x0008000B, + HV_X64_REGISTER_INITIAL_APIC_ID = 0x0008000C, + + /* X64 Cache control MSRs */ + HV_X64_REGISTER_MSR_MTRR_CAP = 0x0008000D, + HV_X64_REGISTER_MSR_MTRR_DEF_TYPE = 0x0008000E, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE0 = 0x00080010, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE1 = 0x00080011, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE2 = 0x00080012, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE3 = 0x00080013, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE4 = 0x00080014, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE5 = 0x00080015, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE6 = 0x00080016, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE7 = 0x00080017, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE8 = 0x00080018, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASE9 = 0x00080019, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASEA = 0x0008001A, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASEB = 0x0008001B, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASEC = 0x0008001C, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASED = 0x0008001D, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASEE = 0x0008001E, + HV_X64_REGISTER_MSR_MTRR_PHYS_BASEF = 0x0008001F, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK0 = 0x00080040, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK1 = 0x00080041, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK2 = 0x00080042, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK3 = 0x00080043, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK4 = 0x00080044, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK5 = 0x00080045, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK6 = 0x00080046, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK7 = 0x00080047, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK8 = 0x00080048, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASK9 = 0x00080049, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASKA = 0x0008004A, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASKB = 0x0008004B, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASKC = 0x0008004C, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASKD = 0x0008004D, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASKE = 0x0008004E, + HV_X64_REGISTER_MSR_MTRR_PHYS_MASKF = 0x0008004F, + HV_X64_REGISTER_MSR_MTRR_FIX64K00000 = 0x00080070, + HV_X64_REGISTER_MSR_MTRR_FIX16K80000 = 0x00080071, + HV_X64_REGISTER_MSR_MTRR_FIX16KA0000 = 0x00080072, + HV_X64_REGISTER_MSR_MTRR_FIX4KC0000 = 0x00080073, + HV_X64_REGISTER_MSR_MTRR_FIX4KC8000 = 0x00080074, + HV_X64_REGISTER_MSR_MTRR_FIX4KD0000 = 0x00080075, + HV_X64_REGISTER_MSR_MTRR_FIX4KD8000 = 0x00080076, + HV_X64_REGISTER_MSR_MTRR_FIX4KE0000 = 0x00080077, + HV_X64_REGISTER_MSR_MTRR_FIX4KE8000 = 0x00080078, + HV_X64_REGISTER_MSR_MTRR_FIX4KF0000 = 0x00080079, + HV_X64_REGISTER_MSR_MTRR_FIX4KF8000 = 0x0008007A, + + HV_X64_REGISTER_TSC_AUX = 0x0008007B, + HV_X64_REGISTER_BNDCFGS = 0x0008007C, + HV_X64_REGISTER_DEBUG_CTL = 0x0008007D, + + /* Available */ + + HV_X64_REGISTER_SPEC_CTRL = 0x00080084, + HV_X64_REGISTER_TSC_ADJUST = 0x00080096, + + /* Other MSRs */ + HV_X64_REGISTER_MSR_IA32_MISC_ENABLE = 0x000800A0, + + /* Misc */ + HV_REGISTER_GUEST_OS_ID = 0x00090002, + HV_REGISTER_REFERENCE_TSC = 0x00090017, + + /* Hypervisor-defined Registers (Synic) */ + HV_REGISTER_SINT0 = 0x000A0000, + HV_REGISTER_SINT1 = 0x000A0001, + HV_REGISTER_SINT2 = 0x000A0002, + HV_REGISTER_SINT3 = 0x000A0003, + HV_REGISTER_SINT4 = 0x000A0004, + HV_REGISTER_SINT5 = 0x000A0005, + HV_REGISTER_SINT6 = 0x000A0006, + HV_REGISTER_SINT7 = 0x000A0007, + HV_REGISTER_SINT8 = 0x000A0008, + HV_REGISTER_SINT9 = 0x000A0009, + HV_REGISTER_SINT10 = 0x000A000A, + HV_REGISTER_SINT11 = 0x000A000B, + HV_REGISTER_SINT12 = 0x000A000C, + HV_REGISTER_SINT13 = 0x000A000D, + HV_REGISTER_SINT14 = 0x000A000E, + HV_REGISTER_SINT15 = 0x000A000F, + HV_REGISTER_SCONTROL = 0x000A0010, + HV_REGISTER_SVERSION = 0x000A0011, + HV_REGISTER_SIEFP = 0x000A0012, + HV_REGISTER_SIMP = 0x000A0013, + HV_REGISTER_EOM = 0x000A0014, + HV_REGISTER_SIRBP = 0x000A0015, +} hv_register_name; + +enum hv_intercept_type { + HV_INTERCEPT_TYPE_X64_IO_PORT = 0X00000000, + HV_INTERCEPT_TYPE_X64_MSR = 0X00000001, + HV_INTERCEPT_TYPE_X64_CPUID = 0X00000002, + HV_INTERCEPT_TYPE_EXCEPTION = 0X00000003, + + /* Used to be HV_INTERCEPT_TYPE_REGISTER */ + HV_INTERCEPT_TYPE_RESERVED0 = 0X00000004, + HV_INTERCEPT_TYPE_MMIO = 0X00000005, + HV_INTERCEPT_TYPE_X64_GLOBAL_CPUID = 0X00000006, + HV_INTERCEPT_TYPE_X64_APIC_SMI = 0X00000007, + HV_INTERCEPT_TYPE_HYPERCALL = 0X00000008, + + HV_INTERCEPT_TYPE_X64_APIC_INIT_SIPI = 0X00000009, + HV_INTERCEPT_MC_UPDATE_PATCH_LEVEL_MSR_READ = 0X0000000A, + + HV_INTERCEPT_TYPE_X64_APIC_WRITE = 0X0000000B, + HV_INTERCEPT_TYPE_X64_MSR_INDEX = 0X0000000C, + HV_INTERCEPT_TYPE_MAX, + HV_INTERCEPT_TYPE_INVALID = 0XFFFFFFFF, +}; + +struct hv_u128 { + uint64_t low_part; + uint64_t high_part; +}; + +union hv_x64_xmm_control_status_register { + struct hv_u128 as_uint128; + struct { + union { + /* long mode */ + uint64_t last_fp_rdp; + /* 32 bit mode */ + struct { + uint32_t last_fp_dp; + uint16_t last_fp_ds; + uint16_t padding; + }; + }; + uint32_t xmm_status_control; + uint32_t xmm_status_control_mask; + }; +}; + +union hv_x64_fp_register { + struct hv_u128 as_uint128; + struct { + uint64_t mantissa; + uint64_t biased_exponent:15; + uint64_t sign:1; + uint64_t reserved:48; + }; +}; + +union hv_x64_pending_exception_event { + uint64_t as_uint64[2]; + struct { + uint32_t event_pending:1; + uint32_t event_type:3; + uint32_t reserved0:4; + uint32_t deliver_error_code:1; + uint32_t reserved1:7; + uint32_t vector:16; + uint32_t error_code; + uint64_t exception_parameter; + }; +}; + +union hv_x64_pending_virtualization_fault_event { + uint64_t as_uint64[2]; + struct { + uint32_t event_pending:1; + uint32_t event_type:3; + uint32_t reserved0:4; + uint32_t reserved1:8; + uint32_t parameter0:16; + uint32_t code; + uint64_t parameter1; + }; +}; + +union hv_x64_pending_interruption_register { + uint64_t as_uint64; + struct { + uint32_t interruption_pending:1; + uint32_t interruption_type:3; + uint32_t deliver_error_code:1; + uint32_t instruction_length:4; + uint32_t nested_event:1; + uint32_t reserved:6; + uint32_t interruption_vector:16; + uint32_t error_code; + }; +}; + +union hv_x64_register_sev_control { + uint64_t as_uint64; + struct { + uint64_t enable_encrypted_state:1; + uint64_t reserved_z:11; + uint64_t vmsa_gpa_page_number:52; + }; +}; + +union hv_x64_msr_npiep_config_contents { + uint64_t as_uint64; + struct { + /* + * These bits enable instruction execution prevention for + * specific instructions. + */ + uint64_t prevents_gdt:1; + uint64_t prevents_idt:1; + uint64_t prevents_ldt:1; + uint64_t prevents_tr:1; + + /* The reserved bits must always be 0. */ + uint64_t reserved:60; + }; +}; + +typedef struct hv_x64_segment_register { + uint64_t base; + uint32_t limit; + uint16_t selector; + union { + struct { + uint16_t segment_type:4; + uint16_t non_system_segment:1; + uint16_t descriptor_privilege_level:2; + uint16_t present:1; + uint16_t reserved:4; + uint16_t available:1; + uint16_t _long:1; + uint16_t _default:1; + uint16_t granularity:1; + }; + uint16_t attributes; + }; +} hv_x64_segment_register; + +typedef struct hv_x64_table_register { + uint16_t pad[3]; + uint16_t limit; + uint64_t base; +} hv_x64_table_register; + +union hv_x64_fp_control_status_register { + struct hv_u128 as_uint128; + struct { + uint16_t fp_control; + uint16_t fp_status; + uint8_t fp_tag; + uint8_t reserved; + uint16_t last_fp_op; + union { + /* long mode */ + uint64_t last_fp_rip; + /* 32 bit mode */ + struct { + uint32_t last_fp_eip; + uint16_t last_fp_cs; + uint16_t padding; + }; + }; + }; +}; + +/* General Hypervisor Register Content Definitions */ + +union hv_explicit_suspend_register { + uint64_t as_uint64; + struct { + uint64_t suspended:1; + uint64_t reserved:63; + }; +}; + +union hv_internal_activity_register { + uint64_t as_uint64; + + struct { + uint64_t startup_suspend:1; + uint64_t halt_suspend:1; + uint64_t idle_suspend:1; + uint64_t rsvd_z:61; + }; +}; + +union hv_x64_interrupt_state_register { + uint64_t as_uint64; + struct { + uint64_t interrupt_shadow:1; + uint64_t nmi_masked:1; + uint64_t reserved:62; + }; +}; + +union hv_intercept_suspend_register { + uint64_t as_uint64; + struct { + uint64_t suspended:1; + uint64_t reserved:63; + }; +}; + +typedef union hv_register_value { + struct hv_u128 reg128; + uint64_t reg64; + uint32_t reg32; + uint16_t reg16; + uint8_t reg8; + union hv_x64_fp_register fp; + union hv_x64_fp_control_status_register fp_control_status; + union hv_x64_xmm_control_status_register xmm_control_status; + struct hv_x64_segment_register segment; + struct hv_x64_table_register table; + union hv_explicit_suspend_register explicit_suspend; + union hv_intercept_suspend_register intercept_suspend; + union hv_internal_activity_register internal_activity; + union hv_x64_interrupt_state_register interrupt_state; + union hv_x64_pending_interruption_register pending_interruption; + union hv_x64_msr_npiep_config_contents npiep_config; + union hv_x64_pending_exception_event pending_exception_event; + union hv_x64_pending_virtualization_fault_event + pending_virtualization_fault_event; + union hv_x64_register_sev_control sev_control; +} hv_register_value; + +typedef struct hv_register_assoc { + uint32_t name; /* enum hv_register_name */ + uint32_t reserved1; + uint64_t reserved2; + union hv_register_value value; +} hv_register_assoc; + +union hv_input_vtl { + uint8_t as_uint8; + struct { + uint8_t target_vtl:4; + uint8_t use_target_vtl:1; + uint8_t reserved_z:3; + }; +}; + +typedef struct hv_input_get_vp_registers { + uint64_t partition_id; + uint32_t vp_index; + union hv_input_vtl input_vtl; + uint8_t rsvd_z8; + uint16_t rsvd_z16; + uint32_t names[]; +} hv_input_get_vp_registers; + +typedef struct hv_input_set_vp_registers { + uint64_t partition_id; + uint32_t vp_index; + union hv_input_vtl input_vtl; + uint8_t rsvd_z8; + uint16_t rsvd_z16; + struct hv_register_assoc elements[]; +} hv_input_set_vp_registers; + +#define MSHV_VP_MAX_REGISTERS 128 + +struct mshv_vp_registers { + int count; /* at most MSHV_VP_MAX_REGISTERS */ + struct hv_register_assoc *regs; +}; + +union hv_interrupt_control { + uint64_t as_uint64; + struct { + uint32_t interrupt_type; /* enum hv_interrupt type */ + uint32_t level_triggered:1; + uint32_t logical_dest_mode:1; + uint32_t rsvd:30; + }; +}; + +struct hv_input_assert_virtual_interrupt { + uint64_t partition_id; + union hv_interrupt_control control; + uint64_t dest_addr; /* cpu's apic id */ + uint32_t vector; + uint8_t target_vtl; + uint8_t rsvd_z0; + uint16_t rsvd_z1; +}; + +/* /dev/mshv */ +#define MSHV_CREATE_PARTITION _IOW(MSHV_IOCTL, 0x00, struct mshv_create_partition) +#define MSHV_CREATE_VP _IOW(MSHV_IOCTL, 0x01, struct mshv_create_vp) + +/* Partition fds created with MSHV_CREATE_PARTITION */ +#define MSHV_INITIALIZE_PARTITION _IO(MSHV_IOCTL, 0x00) +#define MSHV_SET_GUEST_MEMORY _IOW(MSHV_IOCTL, 0x02, struct mshv_user_mem_region) +#define MSHV_IRQFD _IOW(MSHV_IOCTL, 0x03, struct mshv_user_irqfd) +#define MSHV_IOEVENTFD _IOW(MSHV_IOCTL, 0x04, struct mshv_user_ioeventfd) +#define MSHV_SET_MSI_ROUTING _IOW(MSHV_IOCTL, 0x05, struct mshv_user_irq_table) + +/* + ******************************** + * VP APIs for child partitions * + ******************************** + */ + +struct hv_local_interrupt_controller_state { + /* HV_X64_INTERRUPT_CONTROLLER_STATE */ + uint32_t apic_id; + uint32_t apic_version; + uint32_t apic_ldr; + uint32_t apic_dfr; + uint32_t apic_spurious; + uint32_t apic_isr[8]; + uint32_t apic_tmr[8]; + uint32_t apic_irr[8]; + uint32_t apic_esr; + uint32_t apic_icr_high; + uint32_t apic_icr_low; + uint32_t apic_lvt_timer; + uint32_t apic_lvt_thermal; + uint32_t apic_lvt_perfmon; + uint32_t apic_lvt_lint0; + uint32_t apic_lvt_lint1; + uint32_t apic_lvt_error; + uint32_t apic_lvt_cmci; + uint32_t apic_error_status; + uint32_t apic_initial_count; + uint32_t apic_counter_value; + uint32_t apic_divide_configuration; + uint32_t apic_remote_read; +}; + +/* Generic hypercall */ +#define MSHV_ROOT_HVCALL _IOWR(MSHV_IOCTL, 0x07, struct mshv_root_hvcall) + +/* From hvgdk_mini.h */ + +#define HV_X64_MSR_GUEST_OS_ID 0x40000000 +#define HV_X64_MSR_SINT0 0x40000090 +#define HV_X64_MSR_SINT1 0x40000091 +#define HV_X64_MSR_SINT2 0x40000092 +#define HV_X64_MSR_SINT3 0x40000093 +#define HV_X64_MSR_SINT4 0x40000094 +#define HV_X64_MSR_SINT5 0x40000095 +#define HV_X64_MSR_SINT6 0x40000096 +#define HV_X64_MSR_SINT7 0x40000097 +#define HV_X64_MSR_SINT8 0x40000098 +#define HV_X64_MSR_SINT9 0x40000099 +#define HV_X64_MSR_SINT10 0x4000009A +#define HV_X64_MSR_SINT11 0x4000009B +#define HV_X64_MSR_SINT12 0x4000009C +#define HV_X64_MSR_SINT13 0x4000009D +#define HV_X64_MSR_SINT14 0x4000009E +#define HV_X64_MSR_SINT15 0x4000009F +#define HV_X64_MSR_SCONTROL 0x40000080 +#define HV_X64_MSR_SIEFP 0x40000082 +#define HV_X64_MSR_SIMP 0x40000083 +#define HV_X64_MSR_REFERENCE_TSC 0x40000021 +#define HV_X64_MSR_EOM 0x40000084 + +/* Define port identifier type. */ +union hv_port_id { + uint32_t asuint32_t; + struct { + uint32_t id:24; + uint32_t reserved:8; + }; +}; + +#define HV_MESSAGE_SIZE (256) +#define HV_MESSAGE_PAYLOAD_BYTE_COUNT (240) +#define HV_MESSAGE_PAYLOAD_QWORD_COUNT (30) + +/* Define hypervisor message types. */ +enum hv_message_type { + HVMSG_NONE = 0x00000000, + + /* Memory access messages. */ + HVMSG_UNMAPPED_GPA = 0x80000000, + HVMSG_GPA_INTERCEPT = 0x80000001, + HVMSG_UNACCEPTED_GPA = 0x80000003, + HVMSG_GPA_ATTRIBUTE_INTERCEPT = 0x80000004, + + /* Timer notification messages. */ + HVMSG_TIMER_EXPIRED = 0x80000010, + + /* Error messages. */ + HVMSG_INVALID_VP_REGISTER_VALUE = 0x80000020, + HVMSG_UNRECOVERABLE_EXCEPTION = 0x80000021, + HVMSG_UNSUPPORTED_FEATURE = 0x80000022, + + /* + * Opaque intercept message. The original intercept message is only + * accessible from the mapped intercept message page. + */ + HVMSG_OPAQUE_INTERCEPT = 0x8000003F, + + /* Trace buffer complete messages. */ + HVMSG_EVENTLOG_BUFFERCOMPLETE = 0x80000040, + + /* Hypercall intercept */ + HVMSG_HYPERCALL_INTERCEPT = 0x80000050, + + /* SynIC intercepts */ + HVMSG_SYNIC_EVENT_INTERCEPT = 0x80000060, + HVMSG_SYNIC_SINT_INTERCEPT = 0x80000061, + HVMSG_SYNIC_SINT_DELIVERABLE = 0x80000062, + + /* Async call completion intercept */ + HVMSG_ASYNC_CALL_COMPLETION = 0x80000070, + + /* Root scheduler messages */ + HVMSG_SCHEDULER_VP_SIGNAL_BITSE = 0x80000100, + HVMSG_SCHEDULER_VP_SIGNAL_PAIR = 0x80000101, + + /* Platform-specific processor intercept messages. */ + HVMSG_X64_IO_PORT_INTERCEPT = 0x80010000, + HVMSG_X64_MSR_INTERCEPT = 0x80010001, + HVMSG_X64_CPUID_INTERCEPT = 0x80010002, + HVMSG_X64_EXCEPTION_INTERCEPT = 0x80010003, + HVMSG_X64_APIC_EOI = 0x80010004, + HVMSG_X64_LEGACY_FP_ERROR = 0x80010005, + HVMSG_X64_IOMMU_PRQ = 0x80010006, + HVMSG_X64_HALT = 0x80010007, + HVMSG_X64_INTERRUPTION_DELIVERABLE = 0x80010008, + HVMSG_X64_SIPI_INTERCEPT = 0x80010009, + HVMSG_X64_SEV_VMGEXIT_INTERCEPT = 0x80010013, +}; + +union hv_x64_vp_execution_state { + uint16_t as_uint16; + struct { + uint16_t cpl:2; + uint16_t cr0_pe:1; + uint16_t cr0_am:1; + uint16_t efer_lma:1; + uint16_t debug_active:1; + uint16_t interruption_pending:1; + uint16_t vtl:4; + uint16_t enclave_mode:1; + uint16_t interrupt_shadow:1; + uint16_t virtualization_fault_active:1; + uint16_t reserved:2; + }; +}; + +/* From openvmm::hvdef */ +enum hv_x64_intercept_access_type { + HV_X64_INTERCEPT_ACCESS_TYPE_READ = 0, + HV_X64_INTERCEPT_ACCESS_TYPE_WRITE = 1, + HV_X64_INTERCEPT_ACCESS_TYPE_EXECUTE = 2, +}; + +struct hv_x64_intercept_message_header { + uint32_t vp_index; + uint8_t instruction_length:4; + uint8_t cr8:4; /* Only set for exo partitions */ + uint8_t intercept_access_type; + union hv_x64_vp_execution_state execution_state; + struct hv_x64_segment_register cs_segment; + uint64_t rip; + uint64_t rflags; +}; + +union hv_x64_io_port_access_info { + uint8_t as_uint8; + struct { + uint8_t access_size:3; + uint8_t string_op:1; + uint8_t rep_prefix:1; + uint8_t reserved:3; + }; +}; + +typedef struct hv_x64_io_port_intercept_message { + struct hv_x64_intercept_message_header header; + uint16_t port_number; + union hv_x64_io_port_access_info access_info; + uint8_t instruction_byte_count; + uint32_t reserved; + uint64_t rax; + uint8_t instruction_bytes[16]; + struct hv_x64_segment_register ds_segment; + struct hv_x64_segment_register es_segment; + uint64_t rcx; + uint64_t rsi; + uint64_t rdi; +} hv_x64_io_port_intercept_message; + +union hv_x64_memory_access_info { + uint8_t as_uint8; + struct { + uint8_t gva_valid:1; + uint8_t gva_gpa_valid:1; + uint8_t hypercall_output_pending:1; + uint8_t tlb_locked_no_overlay:1; + uint8_t reserved:4; + }; +}; + +struct hv_x64_memory_intercept_message { + struct hv_x64_intercept_message_header header; + uint32_t cache_type; /* enum hv_cache_type */ + uint8_t instruction_byte_count; + union hv_x64_memory_access_info memory_access_info; + uint8_t tpr_priority; + uint8_t reserved1; + uint64_t guest_virtual_address; + uint64_t guest_physical_address; + uint8_t instruction_bytes[16]; +}; + +union hv_message_flags { + uint8_t asu8; + struct { + uint8_t msg_pending:1; + uint8_t reserved:7; + }; +}; + +struct hv_message_header { + uint32_t message_type; + uint8_t payload_size; + union hv_message_flags message_flags; + uint8_t reserved[2]; + union { + uint64_t sender; + union hv_port_id port; + }; +}; + +struct hv_message { + struct hv_message_header header; + union { + uint64_t payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT]; + } u; +}; + +/* From github.com/rust-vmm/mshv-bindings/src/x86_64/regs.rs */ + +struct hv_cpuid_entry { + uint32_t function; + uint32_t index; + uint32_t flags; + uint32_t eax; + uint32_t ebx; + uint32_t ecx; + uint32_t edx; + uint32_t padding[3]; +}; + +struct hv_cpuid { + uint32_t nent; + uint32_t padding; + struct hv_cpuid_entry entries[0]; +}; + +#define IA32_MSR_TSC 0x00000010 +#define IA32_MSR_EFER 0xC0000080 +#define IA32_MSR_KERNEL_GS_BASE 0xC0000102 +#define IA32_MSR_APIC_BASE 0x0000001B +#define IA32_MSR_PAT 0x0277 +#define IA32_MSR_SYSENTER_CS 0x00000174 +#define IA32_MSR_SYSENTER_ESP 0x00000175 +#define IA32_MSR_SYSENTER_EIP 0x00000176 +#define IA32_MSR_STAR 0xC0000081 +#define IA32_MSR_LSTAR 0xC0000082 +#define IA32_MSR_CSTAR 0xC0000083 +#define IA32_MSR_SFMASK 0xC0000084 + +#define IA32_MSR_MTRR_CAP 0x00FE +#define IA32_MSR_MTRR_DEF_TYPE 0x02FF +#define IA32_MSR_MTRR_PHYSBASE0 0x0200 +#define IA32_MSR_MTRR_PHYSMASK0 0x0201 +#define IA32_MSR_MTRR_PHYSBASE1 0x0202 +#define IA32_MSR_MTRR_PHYSMASK1 0x0203 +#define IA32_MSR_MTRR_PHYSBASE2 0x0204 +#define IA32_MSR_MTRR_PHYSMASK2 0x0205 +#define IA32_MSR_MTRR_PHYSBASE3 0x0206 +#define IA32_MSR_MTRR_PHYSMASK3 0x0207 +#define IA32_MSR_MTRR_PHYSBASE4 0x0208 +#define IA32_MSR_MTRR_PHYSMASK4 0x0209 +#define IA32_MSR_MTRR_PHYSBASE5 0x020A +#define IA32_MSR_MTRR_PHYSMASK5 0x020B +#define IA32_MSR_MTRR_PHYSBASE6 0x020C +#define IA32_MSR_MTRR_PHYSMASK6 0x020D +#define IA32_MSR_MTRR_PHYSBASE7 0x020E +#define IA32_MSR_MTRR_PHYSMASK7 0x020F + +#define IA32_MSR_MTRR_FIX64K_00000 0x0250 +#define IA32_MSR_MTRR_FIX16K_80000 0x0258 +#define IA32_MSR_MTRR_FIX16K_A0000 0x0259 +#define IA32_MSR_MTRR_FIX4K_C0000 0x0268 +#define IA32_MSR_MTRR_FIX4K_C8000 0x0269 +#define IA32_MSR_MTRR_FIX4K_D0000 0x026A +#define IA32_MSR_MTRR_FIX4K_D8000 0x026B +#define IA32_MSR_MTRR_FIX4K_E0000 0x026C +#define IA32_MSR_MTRR_FIX4K_E8000 0x026D +#define IA32_MSR_MTRR_FIX4K_F0000 0x026E +#define IA32_MSR_MTRR_FIX4K_F8000 0x026F + +#define IA32_MSR_TSC_AUX 0xC0000103 +#define IA32_MSR_BNDCFGS 0x00000d90 +#define IA32_MSR_DEBUG_CTL 0x1D9 +#define IA32_MSR_SPEC_CTRL 0x00000048 +#define IA32_MSR_TSC_ADJUST 0x0000003b + +#define IA32_MSR_MISC_ENABLE 0x000001a0 + +#define HV_TRANSLATE_GVA_VALIDATE_READ (0x0001) +#define HV_TRANSLATE_GVA_VALIDATE_WRITE (0x0002) +#define HV_TRANSLATE_GVA_VALIDATE_EXECUTE (0x0004) + +#define HV_HYP_PAGE_SHIFT 12 +#define HV_HYP_PAGE_SIZE BIT(HV_HYP_PAGE_SHIFT) +#define HV_HYP_PAGE_MASK (~(HV_HYP_PAGE_SIZE - 1)) + +#define HVCALL_GET_PARTITION_PROPERTY 0x0044 +#define HVCALL_SET_PARTITION_PROPERTY 0x0045 +#define HVCALL_GET_VP_REGISTERS 0x0050 +#define HVCALL_SET_VP_REGISTERS 0x0051 +#define HVCALL_TRANSLATE_VIRTUAL_ADDRESS 0x0052 +#define HVCALL_REGISTER_INTERCEPT_RESULT 0x0091 +#define HVCALL_ASSERT_VIRTUAL_INTERRUPT 0x0094 + +#endif /* HW_HYPERV_HVGDK_MINI_H */ diff --git a/include/hw/hyperv/hvhdk.h b/include/hw/hyperv/hvhdk.h new file mode 100644 index 0000000000000..866c8211bfeff --- /dev/null +++ b/include/hw/hyperv/hvhdk.h @@ -0,0 +1,249 @@ +/* + * Type definitions for the mshv host. + * + * Copyright Microsoft, Corp. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_HYPERV_HVHDK_H +#define HW_HYPERV_HVHDK_H + +#define HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS 1 + +struct hv_input_set_partition_property { + uint64_t partition_id; + uint32_t property_code; /* enum hv_partition_property_code */ + uint32_t padding; + uint64_t property_value; +}; + +union hv_partition_synthetic_processor_features { + uint64_t as_uint64[HV_PARTITION_SYNTHETIC_PROCESSOR_FEATURES_BANKS]; + + struct { + /* + * Report a hypervisor is present. CPUID leaves + * 0x40000000 and 0x40000001 are supported. + */ + uint64_t hypervisor_present:1; + + /* + * Features associated with HV#1: + */ + + /* Report support for Hv1 (CPUID leaves 0x40000000 - 0x40000006). */ + uint64_t hv1:1; + + /* + * Access to HV_X64_MSR_VP_RUNTIME. + * Corresponds to access_vp_run_time_reg privilege. + */ + uint64_t access_vp_run_time_reg:1; + + /* + * Access to HV_X64_MSR_TIME_REF_COUNT. + * Corresponds to access_partition_reference_counter privilege. + */ + uint64_t access_partition_reference_counter:1; + + /* + * Access to SINT-related registers (HV_X64_MSR_SCONTROL through + * HV_X64_MSR_EOM and HV_X64_MSR_SINT0 through HV_X64_MSR_SINT15). + * Corresponds to access_synic_regs privilege. + */ + uint64_t access_synic_regs:1; + + /* + * Access to synthetic timers and associated MSRs + * (HV_X64_MSR_STIMER0_CONFIG through HV_X64_MSR_STIMER3_COUNT). + * Corresponds to access_synthetic_timer_regs privilege. + */ + uint64_t access_synthetic_timer_regs:1; + + /* + * Access to APIC MSRs (HV_X64_MSR_EOI, HV_X64_MSR_ICR and + * HV_X64_MSR_TPR) as well as the VP assist page. + * Corresponds to access_intr_ctrl_regs privilege. + */ + uint64_t access_intr_ctrl_regs:1; + + /* + * Access to registers associated with hypercalls + * (HV_X64_MSR_GUEST_OS_ID and HV_X64_MSR_HYPERCALL). + * Corresponds to access_hypercall_msrs privilege. + */ + uint64_t access_hypercall_regs:1; + + /* VP index can be queried. corresponds to access_vp_index privilege. */ + uint64_t access_vp_index:1; + + /* + * Access to the reference TSC. Corresponds to + * access_partition_reference_tsc privilege. + */ + uint64_t access_partition_reference_tsc:1; + + /* + * Partition has access to the guest idle reg. Corresponds to + * access_guest_idle_reg privilege. + */ + uint64_t access_guest_idle_reg:1; + + /* + * Partition has access to frequency regs. corresponds to + * access_frequency_regs privilege. + */ + uint64_t access_frequency_regs:1; + + uint64_t reserved_z12:1; /* Reserved for access_reenlightenment_controls */ + uint64_t reserved_z13:1; /* Reserved for access_root_scheduler_reg */ + uint64_t reserved_z14:1; /* Reserved for access_tsc_invariant_controls */ + + /* + * Extended GVA ranges for HvCallFlushVirtualAddressList hypercall. + * Corresponds to privilege. + */ + uint64_t enable_extended_gva_ranges_for_flush_virtual_address_list:1; + + uint64_t reserved_z16:1; /* Reserved for access_vsm. */ + uint64_t reserved_z17:1; /* Reserved for access_vp_registers. */ + + /* Use fast hypercall output. Corresponds to privilege. */ + uint64_t fast_hypercall_output:1; + + uint64_t reserved_z19:1; /* Reserved for enable_extended_hypercalls. */ + + /* + * HvStartVirtualProcessor can be used to start virtual processors. + * Corresponds to privilege. + */ + uint64_t start_virtual_processor:1; + + uint64_t reserved_z21:1; /* Reserved for Isolation. */ + + /* Synthetic timers in direct mode. */ + uint64_t direct_synthetic_timers:1; + + uint64_t reserved_z23:1; /* Reserved for synthetic time unhalted timer */ + + /* Use extended processor masks. */ + uint64_t extended_processor_masks:1; + + /* + * HvCallFlushVirtualAddressSpace / HvCallFlushVirtualAddressList are + * supported. + */ + uint64_t tb_flush_hypercalls:1; + + /* HvCallSendSyntheticClusterIpi is supported. */ + uint64_t synthetic_cluster_ipi:1; + + /* HvCallNotifyLongSpinWait is supported. */ + uint64_t notify_long_spin_wait:1; + + /* HvCallQueryNumaDistance is supported. */ + uint64_t query_numa_distance:1; + + /* HvCallSignalEvent is supported. Corresponds to privilege. */ + uint64_t signal_events:1; + + /* HvCallRetargetDeviceInterrupt is supported. */ + uint64_t retarget_device_interrupt:1; + + /* HvCallRestorePartitionTime is supported. */ + uint64_t restore_time:1; + + /* EnlightenedVmcs nested enlightenment is supported. */ + uint64_t enlightened_vmcs:1; + + uint64_t reserved:30; + }; +}; + +enum hv_translate_gva_result_code { + HV_TRANSLATE_GVA_SUCCESS = 0, + + /* Translation failures. */ + HV_TRANSLATE_GVA_PAGE_NOT_PRESENT = 1, + HV_TRANSLATE_GVA_PRIVILEGE_VIOLATION = 2, + HV_TRANSLATE_GVA_INVALIDE_PAGE_TABLE_FLAGS = 3, + + /* GPA access failures. */ + HV_TRANSLATE_GVA_GPA_UNMAPPED = 4, + HV_TRANSLATE_GVA_GPA_NO_READ_ACCESS = 5, + HV_TRANSLATE_GVA_GPA_NO_WRITE_ACCESS = 6, + HV_TRANSLATE_GVA_GPA_ILLEGAL_OVERLAY_ACCESS = 7, + + /* + * Intercept for memory access by either + * - a higher VTL + * - a nested hypervisor (due to a violation of the nested page table) + */ + HV_TRANSLATE_GVA_INTERCEPT = 8, + + HV_TRANSLATE_GVA_GPA_UNACCEPTED = 9, +}; + +union hv_translate_gva_result { + uint64_t as_uint64; + struct { + uint32_t result_code; /* enum hv_translate_hva_result_code */ + uint32_t cache_type:8; + uint32_t overlay_page:1; + uint32_t reserved:23; + }; +}; + +typedef struct hv_input_translate_virtual_address { + uint64_t partition_id; + uint32_t vp_index; + uint32_t padding; + uint64_t control_flags; + uint64_t gva_page; +} hv_input_translate_virtual_address; + +typedef struct hv_output_translate_virtual_address { + union hv_translate_gva_result translation_result; + uint64_t gpa_page; +} hv_output_translate_virtual_address; + +typedef struct hv_register_x64_cpuid_result_parameters { + struct { + uint32_t eax; + uint32_t ecx; + uint8_t subleaf_specific; + uint8_t always_override; + uint16_t padding; + } input; + struct { + uint32_t eax; + uint32_t eax_mask; + uint32_t ebx; + uint32_t ebx_mask; + uint32_t ecx; + uint32_t ecx_mask; + uint32_t edx; + uint32_t edx_mask; + } result; +} hv_register_x64_cpuid_result_parameters; + +typedef struct hv_register_x64_msr_result_parameters { + uint32_t msr_index; + uint32_t access_type; + uint32_t action; /* enum hv_unimplemented_msr_action */ +} hv_register_x64_msr_result_parameters; + +union hv_register_intercept_result_parameters { + struct hv_register_x64_cpuid_result_parameters cpuid; + struct hv_register_x64_msr_result_parameters msr; +}; + +typedef struct hv_input_register_intercept_result { + uint64_t partition_id; + uint32_t vp_index; + uint32_t intercept_type; /* enum hv_intercept_type */ + union hv_register_intercept_result_parameters parameters; +} hv_input_register_intercept_result; + +#endif /* HW_HYPERV_HVHDK_H */ diff --git a/include/hw/hyperv/hvhdk_mini.h b/include/hw/hyperv/hvhdk_mini.h new file mode 100644 index 0000000000000..9c2f3cf5aeec8 --- /dev/null +++ b/include/hw/hyperv/hvhdk_mini.h @@ -0,0 +1,102 @@ +/* + * Type definitions for the mshv host interface. + * + * Copyright Microsoft, Corp. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_HYPERV_HVHDK_MINI_H +#define HW_HYPERV_HVHDK_MINI_H + +#define HVHVK_MINI_VERSION (25294) + +/* Each generic set contains 64 elements */ +#define HV_GENERIC_SET_SHIFT (6) +#define HV_GENERIC_SET_MASK (63) + +enum hv_generic_set_format { + HV_GENERIC_SET_SPARSE_4K, + HV_GENERIC_SET_ALL, +}; + +enum hv_partition_property_code { + /* Privilege properties */ + HV_PARTITION_PROPERTY_PRIVILEGE_FLAGS = 0x00010000, + HV_PARTITION_PROPERTY_SYNTHETIC_PROC_FEATURES = 0x00010001, + + /* Scheduling properties */ + HV_PARTITION_PROPERTY_SUSPEND = 0x00020000, + HV_PARTITION_PROPERTY_CPU_RESERVE = 0x00020001, + HV_PARTITION_PROPERTY_CPU_CAP = 0x00020002, + HV_PARTITION_PROPERTY_CPU_WEIGHT = 0x00020003, + HV_PARTITION_PROPERTY_CPU_GROUP_ID = 0x00020004, + + /* Time properties */ + HV_PARTITION_PROPERTY_TIME_FREEZE = 0x00030003, + HV_PARTITION_PROPERTY_REFERENCE_TIME = 0x00030005, + + /* Debugging properties */ + HV_PARTITION_PROPERTY_DEBUG_CHANNEL_ID = 0x00040000, + + /* Resource properties */ + HV_PARTITION_PROPERTY_VIRTUAL_TLB_PAGE_COUNT = 0x00050000, + HV_PARTITION_PROPERTY_VSM_CONFIG = 0x00050001, + HV_PARTITION_PROPERTY_ZERO_MEMORY_ON_RESET = 0x00050002, + HV_PARTITION_PROPERTY_PROCESSORS_PER_SOCKET = 0x00050003, + HV_PARTITION_PROPERTY_NESTED_TLB_SIZE = 0x00050004, + HV_PARTITION_PROPERTY_GPA_PAGE_ACCESS_TRACKING = 0x00050005, + HV_PARTITION_PROPERTY_VSM_PERMISSIONS_DIRTY_SINCE_LAST_QUERY = 0x00050006, + HV_PARTITION_PROPERTY_SGX_LAUNCH_CONTROL_CONFIG = 0x00050007, + HV_PARTITION_PROPERTY_DEFAULT_SGX_LAUNCH_CONTROL0 = 0x00050008, + HV_PARTITION_PROPERTY_DEFAULT_SGX_LAUNCH_CONTROL1 = 0x00050009, + HV_PARTITION_PROPERTY_DEFAULT_SGX_LAUNCH_CONTROL2 = 0x0005000a, + HV_PARTITION_PROPERTY_DEFAULT_SGX_LAUNCH_CONTROL3 = 0x0005000b, + HV_PARTITION_PROPERTY_ISOLATION_STATE = 0x0005000c, + HV_PARTITION_PROPERTY_ISOLATION_CONTROL = 0x0005000d, + HV_PARTITION_PROPERTY_ALLOCATION_ID = 0x0005000e, + HV_PARTITION_PROPERTY_MONITORING_ID = 0x0005000f, + HV_PARTITION_PROPERTY_IMPLEMENTED_PHYSICAL_ADDRESS_BITS = 0x00050010, + HV_PARTITION_PROPERTY_NON_ARCHITECTURAL_CORE_SHARING = 0x00050011, + HV_PARTITION_PROPERTY_HYPERCALL_DOORBELL_PAGE = 0x00050012, + HV_PARTITION_PROPERTY_ISOLATION_POLICY = 0x00050014, + HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION = 0x00050017, + HV_PARTITION_PROPERTY_SEV_VMGEXIT_OFFLOADS = 0x00050022, + + /* Compatibility properties */ + HV_PARTITION_PROPERTY_PROCESSOR_VENDOR = 0x00060000, + HV_PARTITION_PROPERTY_PROCESSOR_FEATURES_DEPRECATED = 0x00060001, + HV_PARTITION_PROPERTY_PROCESSOR_XSAVE_FEATURES = 0x00060002, + HV_PARTITION_PROPERTY_PROCESSOR_CL_FLUSH_SIZE = 0x00060003, + HV_PARTITION_PROPERTY_ENLIGHTENMENT_MODIFICATIONS = 0x00060004, + HV_PARTITION_PROPERTY_COMPATIBILITY_VERSION = 0x00060005, + HV_PARTITION_PROPERTY_PHYSICAL_ADDRESS_WIDTH = 0x00060006, + HV_PARTITION_PROPERTY_XSAVE_STATES = 0x00060007, + HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE = 0x00060008, + HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY = 0x00060009, + HV_PARTITION_PROPERTY_PROCESSOR_FEATURES0 = 0x0006000a, + HV_PARTITION_PROPERTY_PROCESSOR_FEATURES1 = 0x0006000b, + + /* Guest software properties */ + HV_PARTITION_PROPERTY_GUEST_OS_ID = 0x00070000, + + /* Nested virtualization properties */ + HV_PARTITION_PROPERTY_PROCESSOR_VIRTUALIZATION_FEATURES = 0x00080000, +}; + +/* HV Map GPA (Guest Physical Address) Flags */ +#define HV_MAP_GPA_PERMISSIONS_NONE 0x0 +#define HV_MAP_GPA_READABLE 0x1 +#define HV_MAP_GPA_WRITABLE 0x2 +#define HV_MAP_GPA_KERNEL_EXECUTABLE 0x4 +#define HV_MAP_GPA_USER_EXECUTABLE 0x8 +#define HV_MAP_GPA_EXECUTABLE 0xC +#define HV_MAP_GPA_PERMISSIONS_MASK 0xF +#define HV_MAP_GPA_ADJUSTABLE 0x8000 +#define HV_MAP_GPA_NO_ACCESS 0x10000 +#define HV_MAP_GPA_NOT_CACHED 0x200000 +#define HV_MAP_GPA_LARGE_PAGE 0x80000000 + +#define HV_PFN_RNG_PAGEBITS 24 /* HV_SPA_PAGE_RANGE_ADDITIONAL_PAGES_BITS */ + +#endif /* HW_HYPERV_HVHDK_MINI_H */ diff --git a/include/hw/i386/apic.h b/include/hw/i386/apic.h index eb606d60760f4..6a0933f401cb7 100644 --- a/include/hw/i386/apic.h +++ b/include/hw/i386/apic.h @@ -1,28 +1,29 @@ #ifndef APIC_H #define APIC_H +typedef struct APICCommonState APICCommonState; /* apic.c */ void apic_set_max_apic_id(uint32_t max_apic_id); -int apic_accept_pic_intr(DeviceState *s); -void apic_deliver_pic_intr(DeviceState *s, int level); -void apic_deliver_nmi(DeviceState *d); -int apic_get_interrupt(DeviceState *s); -int cpu_set_apic_base(DeviceState *s, uint64_t val); -uint64_t cpu_get_apic_base(DeviceState *s); -bool cpu_is_apic_enabled(DeviceState *s); -void cpu_set_apic_tpr(DeviceState *s, uint8_t val); -uint8_t cpu_get_apic_tpr(DeviceState *s); -void apic_init_reset(DeviceState *s); -void apic_sipi(DeviceState *s); -void apic_poll_irq(DeviceState *d); -void apic_designate_bsp(DeviceState *d, bool bsp); -int apic_get_highest_priority_irr(DeviceState *dev); -int apic_msr_read(int index, uint64_t *val); -int apic_msr_write(int index, uint64_t val); -bool is_x2apic_mode(DeviceState *d); +int apic_accept_pic_intr(APICCommonState *s); +void apic_deliver_pic_intr(APICCommonState *s, int level); +void apic_deliver_nmi(APICCommonState *s); +int apic_get_interrupt(APICCommonState *s); +int cpu_set_apic_base(APICCommonState *s, uint64_t val); +uint64_t cpu_get_apic_base(APICCommonState *s); +bool cpu_is_apic_enabled(APICCommonState *s); +void cpu_set_apic_tpr(APICCommonState *s, uint8_t val); +uint8_t cpu_get_apic_tpr(APICCommonState *s); +void apic_init_reset(APICCommonState *s); +void apic_sipi(APICCommonState *s); +void apic_poll_irq(APICCommonState *s); +void apic_designate_bsp(APICCommonState *s, bool bsp); +int apic_get_highest_priority_irr(APICCommonState *s); +int apic_msr_read(APICCommonState *s, int index, uint64_t *val); +int apic_msr_write(APICCommonState *s, int index, uint64_t val); +bool is_x2apic_mode(APICCommonState *s); /* pc.c */ -DeviceState *cpu_get_current_apic(void); +APICCommonState *cpu_get_current_apic(void); #endif diff --git a/include/hw/i386/apic_internal.h b/include/hw/i386/apic_internal.h index 429278da61876..4a62fdceb4ea1 100644 --- a/include/hw/i386/apic_internal.h +++ b/include/hw/i386/apic_internal.h @@ -22,6 +22,7 @@ #define QEMU_APIC_INTERNAL_H #include "cpu.h" +#include "hw/i386/apic.h" #include "system/memory.h" #include "qemu/timer.h" #include "target/i386/cpu-qom.h" @@ -125,8 +126,6 @@ #define VAPIC_ENABLE_BIT 0 #define VAPIC_ENABLE_MASK (1 << VAPIC_ENABLE_BIT) -typedef struct APICCommonState APICCommonState; - #define TYPE_APIC_COMMON "apic-common" typedef struct APICCommonClass APICCommonClass; DECLARE_OBJ_CHECKERS(APICCommonState, APICCommonClass, @@ -203,8 +202,8 @@ typedef struct VAPICState { extern bool apic_report_tpr_access; bool apic_next_timer(APICCommonState *s, int64_t current_time); -void apic_enable_tpr_access_reporting(DeviceState *d, bool enable); -void apic_enable_vapic(DeviceState *d, hwaddr paddr); +void apic_enable_tpr_access_reporting(APICCommonState *s, bool enable); +void apic_enable_vapic(APICCommonState *s, hwaddr paddr); void vapic_report_tpr_access(DeviceState *dev, CPUState *cpu, target_ulong ip, TPRAccess access); diff --git a/include/hw/i386/intel_iommu.h b/include/hw/i386/intel_iommu.h index e95477e8554c1..47730ac3c761c 100644 --- a/include/hw/i386/intel_iommu.h +++ b/include/hw/i386/intel_iommu.h @@ -110,6 +110,7 @@ struct VTDAddressSpace { QLIST_ENTRY(VTDAddressSpace) next; /* Superset of notifier flags that this address space has */ IOMMUNotifierFlag notifier_flags; + IOMMUPRINotifier *pri_notifier; /* * @iova_tree traces mapped IOVA ranges. * diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h index 79b72c54dd3f8..e83157ab358f0 100644 --- a/include/hw/i386/pc.h +++ b/include/hw/i386/pc.h @@ -214,6 +214,9 @@ void pc_system_parse_ovmf_flash(uint8_t *flash_ptr, size_t flash_size); /* sgx.c */ void pc_machine_init_sgx_epc(PCMachineState *pcms); +extern GlobalProperty pc_compat_10_1[]; +extern const size_t pc_compat_10_1_len; + extern GlobalProperty pc_compat_10_0[]; extern const size_t pc_compat_10_0_len; diff --git a/include/hw/i386/x86-iommu.h b/include/hw/i386/x86-iommu.h index bfd21649d0838..e89f55a5c215c 100644 --- a/include/hw/i386/x86-iommu.h +++ b/include/hw/i386/x86-iommu.h @@ -64,6 +64,7 @@ struct X86IOMMUState { OnOffAuto intr_supported; /* Whether vIOMMU supports IR */ bool dt_supported; /* Whether vIOMMU supports DT */ bool pt_supported; /* Whether vIOMMU supports pass-through */ + bool dma_translation; /* Whether vIOMMU supports DMA translation */ QLIST_HEAD(, IEC_Notifier) iec_notifiers; /* IEC notify list */ }; diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h index c18503869f9a8..38aa1961c5096 100644 --- a/include/hw/intc/arm_gicv3_common.h +++ b/include/hw/intc/arm_gicv3_common.h @@ -27,6 +27,7 @@ #include "hw/sysbus.h" #include "hw/intc/arm_gic_common.h" #include "qom/object.h" +#include "qemu/notify.h" /* * Maximum number of possible interrupts, determined by the GIC architecture. @@ -228,6 +229,7 @@ struct GICv3State { uint32_t *redist_region_count; /* redistributor count within each region */ uint32_t nb_redist_regions; /* number of redist regions */ + uint32_t first_cpu_idx; uint32_t num_cpu; uint32_t num_irq; uint32_t revision; @@ -271,6 +273,8 @@ struct GICv3State { GICv3CPUState *cpu; /* List of all ITSes connected to this GIC */ GPtrArray *itslist; + + NotifierWithReturn cpr_notifier; }; #define GICV3_BITMAP_ACCESSORS(BMP) \ diff --git a/include/hw/intc/loongarch_dintc.h b/include/hw/intc/loongarch_dintc.h new file mode 100644 index 0000000000000..0b0b5347b2c8f --- /dev/null +++ b/include/hw/intc/loongarch_dintc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch direct interrupt controller definitions + * + * Copyright (C) 2025 Loongson Technology Corporation Limited + */ + +#include "qom/object.h" +#include "hw/sysbus.h" +#include "hw/loongarch/virt.h" + + +#define NR_VECTORS 256 + +#define TYPE_LOONGARCH_DINTC "loongarch_dintc" +OBJECT_DECLARE_TYPE(LoongArchDINTCState, LoongArchDINTCClass, LOONGARCH_DINTC) + +typedef struct DINTCCore { + CPUState *cpu; + qemu_irq parent_irq; + uint64_t arch_id; +} DINTCCore; + +struct LoongArchDINTCState { + SysBusDevice parent_obj; + MemoryRegion dintc_mmio; + DINTCCore *cpu; + uint32_t num_cpu; +}; + +struct LoongArchDINTCClass { + SysBusDeviceClass parent_class; + + DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; +}; diff --git a/include/hw/intc/loongarch_pic_common.h b/include/hw/intc/loongarch_pic_common.h index f774c975d4055..675ba96e64eca 100644 --- a/include/hw/intc/loongarch_pic_common.h +++ b/include/hw/intc/loongarch_pic_common.h @@ -7,7 +7,7 @@ #ifndef HW_LOONGARCH_PIC_COMMON_H #define HW_LOONGARCH_PIC_COMMON_H -#include "hw/pci-host/ls7a.h" +#include "hw/loongarch/virt.h" #include "hw/sysbus.h" #define PCH_PIC_INT_ID 0x00 diff --git a/include/hw/intc/riscv_aclint.h b/include/hw/intc/riscv_aclint.h index 693415eb6defe..4b7406eec005a 100644 --- a/include/hw/intc/riscv_aclint.h +++ b/include/hw/intc/riscv_aclint.h @@ -80,4 +80,8 @@ enum { RISCV_ACLINT_SWI_SIZE = 0x4000 }; +#define VMSTATE_TIMER_PTR_VARRAY(_f, _s, _f_n) \ +VMSTATE_VARRAY_OF_POINTER_UINT32(_f, _s, _f_n, 0, vmstate_info_timer, \ + QEMUTimer *) + #endif diff --git a/include/hw/irq.h b/include/hw/irq.h index b3012237acde9..291fdd67df465 100644 --- a/include/hw/irq.h +++ b/include/hw/irq.h @@ -36,11 +36,32 @@ static inline void qemu_irq_pulse(qemu_irq irq) /* * Init a single IRQ. The irq is assigned with a handler, an opaque data - * and the interrupt number. + * and the interrupt number. The caller must free this with qemu_free_irq(). + * If you are using this inside a device's init or realize method, then + * qemu_init_irq_child() is probably a better choice to avoid the need + * to manually clean up the IRQ. */ void qemu_init_irq(IRQState *irq, qemu_irq_handler handler, void *opaque, int n); +/** + * qemu_init_irq_child: Initialize IRQ and make it a QOM child + * @parent: QOM object which owns this IRQ + * @propname: child property name + * @irq: pointer to IRQState to initialize + * @handler: handler function for incoming interrupts + * @opaque: opaque data to pass to @handler + * @n: interrupt number to pass to @handler + * + * Init a single IRQ and make the IRQ object a child of @parent with + * the child-property name @propname. The IRQ object will thus be + * automatically freed when @parent is destroyed. + */ +void qemu_init_irq_child(Object *parent, const char *propname, + IRQState *irq, qemu_irq_handler handler, + void *opaque, int n); + + /** * qemu_init_irqs: Initialize an array of IRQs. * diff --git a/include/hw/loongarch/virt.h b/include/hw/loongarch/virt.h index 602feab0f0dc7..27b1755802092 100644 --- a/include/hw/loongarch/virt.h +++ b/include/hw/loongarch/virt.h @@ -13,28 +13,84 @@ #include "hw/block/flash.h" #include "hw/loongarch/boot.h" +/* IOCSR region */ +#define VERSION_REG 0x0 +#define FEATURE_REG 0x8 +#define IOCSRF_TEMP 0 +#define IOCSRF_NODECNT 1 +#define IOCSRF_MSI 2 +#define IOCSRF_EXTIOI 3 +#define IOCSRF_CSRIPI 4 +#define IOCSRF_FREQCSR 5 +#define IOCSRF_FREQSCALE 6 +#define IOCSRF_DVFSV1 7 +#define IOCSRF_GMOD 9 +#define IOCSRF_VM 11 +#define IOCSRF_DMSI 15 +#define VENDOR_REG 0x10 +#define CPUNAME_REG 0x20 +#define MISC_FUNC_REG 0x420 +#define IOCSRM_EXTIOI_EN 48 +#define IOCSRM_EXTIOI_INT_ENCODE 49 +#define IOCSRM_DMSI_EN 51 + #define LOONGARCH_MAX_CPUS 256 -#define VIRT_FWCFG_BASE 0x1e020000UL +/* MMIO memory region */ +#define VIRT_PCH_REG_BASE 0x10000000UL +#define VIRT_PCH_REG_SIZE 0x400 +#define VIRT_RTC_REG_BASE 0x100d0100UL +#define VIRT_RTC_LEN 0x100 +#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000UL +#define VIRT_PLATFORM_BUS_SIZE 0x02000000 +#define VIRT_PCI_IO_BASE 0x18004000UL +#define VIRT_PCI_IO_OFFSET 0x4000 +#define VIRT_PCI_IO_SIZE 0xC000 #define VIRT_BIOS_BASE 0x1c000000UL -#define VIRT_BIOS_SIZE (16 * MiB) +#define VIRT_BIOS_SIZE 0x01000000UL #define VIRT_FLASH_SECTOR_SIZE (256 * KiB) #define VIRT_FLASH0_BASE VIRT_BIOS_BASE #define VIRT_FLASH0_SIZE VIRT_BIOS_SIZE #define VIRT_FLASH1_BASE 0x1d000000UL -#define VIRT_FLASH1_SIZE (16 * MiB) +#define VIRT_FLASH1_SIZE 0x01000000UL +#define VIRT_FWCFG_BASE 0x1e020000UL +#define VIRT_UART_BASE 0x1fe001e0UL +#define VIRT_UART_SIZE 0x100 +#define VIRT_PCI_CFG_BASE 0x20000000UL +#define VIRT_PCI_CFG_SIZE 0x08000000UL +#define VIRT_DINTC_BASE 0x2FE00000UL +#define VIRT_DINTC_SIZE 0x00100000UL +#define VIRT_PCH_MSI_ADDR_LOW 0x2FF00000UL +#define VIRT_PCH_MSI_SIZE 0x8 +#define VIRT_PCI_MEM_BASE 0x40000000UL +#define VIRT_PCI_MEM_SIZE 0x40000000UL #define VIRT_LOWMEM_BASE 0 #define VIRT_LOWMEM_SIZE 0x10000000 +#define FDT_BASE 0x100000 #define VIRT_HIGHMEM_BASE 0x80000000 #define VIRT_GED_EVT_ADDR 0x100e0000 -#define VIRT_GED_MEM_ADDR (VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN) -#define VIRT_GED_REG_ADDR (VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN) -#define VIRT_GED_CPUHP_ADDR (VIRT_GED_REG_ADDR + ACPI_GED_REG_COUNT) +#define VIRT_GED_MEM_ADDR QEMU_ALIGN_UP(VIRT_GED_EVT_ADDR + ACPI_GED_EVT_SEL_LEN, 4) +#define VIRT_GED_REG_ADDR QEMU_ALIGN_UP(VIRT_GED_MEM_ADDR + MEMORY_HOTPLUG_IO_LEN, 4) +#define VIRT_GED_CPUHP_ADDR QEMU_ALIGN_UP(VIRT_GED_REG_ADDR + ACPI_GED_REG_COUNT, 4) -#define COMMAND_LINE_SIZE 512 +/* + * GSI_BASE is hard-coded with 64 in linux kernel, else kernel fails to boot + * 0 - 15 GSI for ISA devices even if there is no ISA devices + * 16 - 63 GSI for CPU devices such as timers/perf monitor etc + * 64 - GSI for external devices + */ +#define VIRT_PCH_PIC_IRQ_NUM 32 +#define VIRT_GSI_BASE 64 +#define VIRT_DEVICE_IRQS 16 +#define VIRT_UART_IRQ (VIRT_GSI_BASE + 2) +#define VIRT_UART_COUNT 4 +#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 6) +#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 7) +#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 8) +#define VIRT_PLATFORM_BUS_NUM_IRQS 2 -#define FDT_BASE 0x100000 +#define COMMAND_LINE_SIZE 512 struct LoongArchVirtMachineState { /*< private >*/ @@ -50,6 +106,7 @@ struct LoongArchVirtMachineState { Notifier powerdown_notifier; OnOffAuto acpi; OnOffAuto veiointc; + OnOffAuto dmsi; char *oem_id; char *oem_table_id; DeviceState *acpi_ged; @@ -65,6 +122,9 @@ struct LoongArchVirtMachineState { DeviceState *extioi; struct memmap_entry *memmap_table; unsigned int memmap_entries; + uint64_t misc_feature; + uint64_t misc_status; + DeviceState *dintc; }; #define TYPE_LOONGARCH_VIRT_MACHINE MACHINE_TYPE_NAME("virt") @@ -72,6 +132,15 @@ OBJECT_DECLARE_SIMPLE_TYPE(LoongArchVirtMachineState, LOONGARCH_VIRT_MACHINE) void virt_acpi_setup(LoongArchVirtMachineState *lvms); void virt_fdt_setup(LoongArchVirtMachineState *lvms); +static inline bool virt_has_dmsi(LoongArchVirtMachineState *lvms) +{ + if (!(lvms->misc_feature & BIT(IOCSRF_DMSI))) { + return false; + } + + return true; +} + static inline bool virt_is_veiointc_enabled(LoongArchVirtMachineState *lvms) { if (lvms->veiointc == ON_OFF_AUTO_OFF) { diff --git a/include/hw/misc/aspeed_sbc.h b/include/hw/misc/aspeed_sbc.h index 405e6782b97a0..7d640a022e4d2 100644 --- a/include/hw/misc/aspeed_sbc.h +++ b/include/hw/misc/aspeed_sbc.h @@ -10,9 +10,11 @@ #define ASPEED_SBC_H #include "hw/sysbus.h" +#include "hw/nvram/aspeed_otp.h" #define TYPE_ASPEED_SBC "aspeed.sbc" #define TYPE_ASPEED_AST2600_SBC TYPE_ASPEED_SBC "-ast2600" +#define TYPE_ASPEED_AST10X0_SBC TYPE_ASPEED_SBC "-ast10x0" OBJECT_DECLARE_TYPE(AspeedSBCState, AspeedSBCClass, ASPEED_SBC) #define ASPEED_SBC_NR_REGS (0x93c >> 2) @@ -36,10 +38,14 @@ struct AspeedSBCState { MemoryRegion iomem; uint32_t regs[ASPEED_SBC_NR_REGS]; + + AspeedOTPState otp; }; struct AspeedSBCClass { SysBusDeviceClass parent_class; + + bool has_otp; }; #endif /* ASPEED_SBC_H */ diff --git a/include/hw/misc/xlnx-versal-crl.h b/include/hw/misc/xlnx-versal-crl.h index dba6d3585d12b..49ed500acde7b 100644 --- a/include/hw/misc/xlnx-versal-crl.h +++ b/include/hw/misc/xlnx-versal-crl.h @@ -2,6 +2,7 @@ * QEMU model of the Clock-Reset-LPD (CRL). * * Copyright (c) 2022 Xilinx Inc. + * Copyright (c) 2025 Advanced Micro Devices, Inc. * SPDX-License-Identifier: GPL-2.0-or-later * * Written by Edgar E. Iglesias @@ -12,9 +13,16 @@ #include "hw/sysbus.h" #include "hw/register.h" #include "target/arm/cpu-qom.h" +#include "hw/arm/xlnx-versal-version.h" +#define TYPE_XLNX_VERSAL_CRL_BASE "xlnx-versal-crl-base" #define TYPE_XLNX_VERSAL_CRL "xlnx-versal-crl" +#define TYPE_XLNX_VERSAL2_CRL "xlnx-versal2-crl" + +OBJECT_DECLARE_TYPE(XlnxVersalCRLBase, XlnxVersalCRLBaseClass, + XLNX_VERSAL_CRL_BASE) OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersalCRL, XLNX_VERSAL_CRL) +OBJECT_DECLARE_SIMPLE_TYPE(XlnxVersal2CRL, XLNX_VERSAL2_CRL) REG32(ERR_CTRL, 0x0) FIELD(ERR_CTRL, SLVERR_ENABLE, 0, 1) @@ -214,22 +222,369 @@ REG32(PSM_RST_MODE, 0x370) #define CRL_R_MAX (R_PSM_RST_MODE + 1) -#define RPU_MAX_CPU 2 +REG32(VERSAL2_ERR_CTRL, 0x0) +REG32(VERSAL2_WPROT, 0x1c) + FIELD(VERSAL2_WPROT, ACTIVE, 0, 1) +REG32(VERSAL2_RPLL_CTRL, 0x40) + FIELD(VERSAL2_RPLL_CTRL, POST_SRC, 24, 3) + FIELD(VERSAL2_RPLL_CTRL, PRE_SRC, 20, 3) + FIELD(VERSAL2_RPLL_CTRL, CLKOUTDIV, 16, 2) + FIELD(VERSAL2_RPLL_CTRL, FBDIV, 8, 8) + FIELD(VERSAL2_RPLL_CTRL, BYPASS, 3, 1) + FIELD(VERSAL2_RPLL_CTRL, RESET, 0, 1) +REG32(VERSAL2_RPLL_CFG, 0x44) + FIELD(VERSAL2_RPLL_CFG, LOCK_DLY, 25, 7) + FIELD(VERSAL2_RPLL_CFG, LOCK_CNT, 13, 10) + FIELD(VERSAL2_RPLL_CFG, LFHF, 10, 2) + FIELD(VERSAL2_RPLL_CFG, CP, 5, 4) + FIELD(VERSAL2_RPLL_CFG, RES, 0, 4) +REG32(VERSAL2_FLXPLL_CTRL, 0x50) + FIELD(VERSAL2_FLXPLL_CTRL, POST_SRC, 24, 3) + FIELD(VERSAL2_FLXPLL_CTRL, PRE_SRC, 20, 3) + FIELD(VERSAL2_FLXPLL_CTRL, CLKOUTDIV, 16, 2) + FIELD(VERSAL2_FLXPLL_CTRL, FBDIV, 8, 8) + FIELD(VERSAL2_FLXPLL_CTRL, BYPASS, 3, 1) + FIELD(VERSAL2_FLXPLL_CTRL, RESET, 0, 1) +REG32(VERSAL2_FLXPLL_CFG, 0x54) + FIELD(VERSAL2_FLXPLL_CFG, LOCK_DLY, 25, 7) + FIELD(VERSAL2_FLXPLL_CFG, LOCK_CNT, 13, 10) + FIELD(VERSAL2_FLXPLL_CFG, LFHF, 10, 2) + FIELD(VERSAL2_FLXPLL_CFG, CP, 5, 4) + FIELD(VERSAL2_FLXPLL_CFG, RES, 0, 4) +REG32(VERSAL2_PLL_STATUS, 0x60) + FIELD(VERSAL2_PLL_STATUS, FLXPLL_STABLE, 3, 1) + FIELD(VERSAL2_PLL_STATUS, RPLL_STABLE, 2, 1) + FIELD(VERSAL2_PLL_STATUS, FLXPLL_LOCK, 1, 1) + FIELD(VERSAL2_PLL_STATUS, RPLL_LOCK, 0, 1) +REG32(VERSAL2_RPLL_TO_XPD_CTRL, 0x100) + FIELD(VERSAL2_RPLL_TO_XPD_CTRL, DIVISOR0, 8, 10) +REG32(VERSAL2_LPX_TOP_SWITCH_CTRL, 0x104) + FIELD(VERSAL2_LPX_TOP_SWITCH_CTRL, CLKACT_ADMA, 26, 1) + FIELD(VERSAL2_LPX_TOP_SWITCH_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_LPX_TOP_SWITCH_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_LPX_TOP_SWITCH_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_LPX_LSBUS_CLK_CTRL, 0x108) + FIELD(VERSAL2_LPX_LSBUS_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_LPX_LSBUS_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_LPX_LSBUS_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_RPU_CLK_CTRL, 0x10c) + FIELD(VERSAL2_RPU_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_RPU_CLK_CTRL, CLKACT_CLUSTERE, 24, 1) + FIELD(VERSAL2_RPU_CLK_CTRL, CLKACT_CLUSTERD, 23, 1) + FIELD(VERSAL2_RPU_CLK_CTRL, CLKACT_CLUSTERC, 22, 1) + FIELD(VERSAL2_RPU_CLK_CTRL, CLKACT_CLUSTERB, 21, 1) + FIELD(VERSAL2_RPU_CLK_CTRL, CLKACT_CLUSTERA, 20, 1) + FIELD(VERSAL2_RPU_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_RPU_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_OCM_CLK_CTRL, 0x120) + FIELD(VERSAL2_OCM_CLK_CTRL, CLKACT_OCM3, 24, 1) + FIELD(VERSAL2_OCM_CLK_CTRL, CLKACT_OCM2, 23, 1) + FIELD(VERSAL2_OCM_CLK_CTRL, CLKACT_OCM1, 22, 1) + FIELD(VERSAL2_OCM_CLK_CTRL, CLKACT_OCM0, 21, 1) +REG32(VERSAL2_IOU_SWITCH_CLK_CTRL, 0x124) + FIELD(VERSAL2_IOU_SWITCH_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_IOU_SWITCH_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_IOU_SWITCH_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_GEM0_REF_CTRL, 0x128) + FIELD(VERSAL2_GEM0_REF_CTRL, CLKACT_RX, 27, 1) + FIELD(VERSAL2_GEM0_REF_CTRL, CLKACT_TX, 26, 1) + FIELD(VERSAL2_GEM0_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_GEM0_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_GEM0_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_GEM1_REF_CTRL, 0x12c) + FIELD(VERSAL2_GEM1_REF_CTRL, CLKACT_RX, 27, 1) + FIELD(VERSAL2_GEM1_REF_CTRL, CLKACT_TX, 26, 1) + FIELD(VERSAL2_GEM1_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_GEM1_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_GEM1_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_GEM_TSU_REF_CLK_CTRL, 0x130) + FIELD(VERSAL2_GEM_TSU_REF_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_GEM_TSU_REF_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_GEM_TSU_REF_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_USB0_BUS_REF_CLK_CTRL, 0x134) + FIELD(VERSAL2_USB0_BUS_REF_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_USB0_BUS_REF_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_USB0_BUS_REF_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_USB1_BUS_REF_CLK_CTRL, 0x138) + FIELD(VERSAL2_USB1_BUS_REF_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_USB1_BUS_REF_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_USB1_BUS_REF_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_UART0_REF_CLK_CTRL, 0x13c) + FIELD(VERSAL2_UART0_REF_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_UART0_REF_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_UART0_REF_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_UART1_REF_CLK_CTRL, 0x140) + FIELD(VERSAL2_UART1_REF_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_UART1_REF_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_UART1_REF_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_SPI0_REF_CLK_CTRL, 0x144) + FIELD(VERSAL2_SPI0_REF_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_SPI0_REF_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_SPI0_REF_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_SPI1_REF_CLK_CTRL, 0x148) + FIELD(VERSAL2_SPI1_REF_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_SPI1_REF_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_SPI1_REF_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_CAN0_REF_2X_CTRL, 0x14c) + FIELD(VERSAL2_CAN0_REF_2X_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_CAN0_REF_2X_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_CAN0_REF_2X_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_CAN1_REF_2X_CTRL, 0x150) + FIELD(VERSAL2_CAN1_REF_2X_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_CAN1_REF_2X_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_CAN1_REF_2X_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_CAN2_REF_2X_CTRL, 0x154) + FIELD(VERSAL2_CAN2_REF_2X_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_CAN2_REF_2X_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_CAN2_REF_2X_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_CAN3_REF_2X_CTRL, 0x158) + FIELD(VERSAL2_CAN3_REF_2X_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_CAN3_REF_2X_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_CAN3_REF_2X_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_I3C0_REF_CTRL, 0x15c) + FIELD(VERSAL2_I3C0_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_I3C0_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_I3C0_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_I3C1_REF_CTRL, 0x160) + FIELD(VERSAL2_I3C1_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_I3C1_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_I3C1_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_I3C2_REF_CTRL, 0x164) + FIELD(VERSAL2_I3C2_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_I3C2_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_I3C2_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_I3C3_REF_CTRL, 0x168) + FIELD(VERSAL2_I3C3_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_I3C3_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_I3C3_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_I3C4_REF_CTRL, 0x16c) + FIELD(VERSAL2_I3C4_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_I3C4_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_I3C4_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_I3C5_REF_CTRL, 0x170) + FIELD(VERSAL2_I3C5_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_I3C5_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_I3C5_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_I3C6_REF_CTRL, 0x174) + FIELD(VERSAL2_I3C6_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_I3C6_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_I3C6_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_I3C7_REF_CTRL, 0x178) + FIELD(VERSAL2_I3C7_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_I3C7_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_I3C7_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_DBG_LPX_CTRL, 0x17c) + FIELD(VERSAL2_DBG_LPX_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_DBG_LPX_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_DBG_LPX_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_TIMESTAMP_REF_CTRL, 0x180) + FIELD(VERSAL2_TIMESTAMP_REF_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_TIMESTAMP_REF_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_TIMESTAMP_REF_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_SAFETY_CHK, 0x184) +REG32(VERSAL2_ASU_CLK_CTRL, 0x188) + FIELD(VERSAL2_ASU_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_ASU_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_DBG_TSTMP_CLK_CTRL, 0x18c) + FIELD(VERSAL2_DBG_TSTMP_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_DBG_TSTMP_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_DBG_TSTMP_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_MMI_TOPSW_CLK_CTRL, 0x190) + FIELD(VERSAL2_MMI_TOPSW_CLK_CTRL, CLKACT, 25, 1) + FIELD(VERSAL2_MMI_TOPSW_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_MMI_TOPSW_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_WWDT_PLL_CLK_CTRL, 0x194) + FIELD(VERSAL2_WWDT_PLL_CLK_CTRL, DIVISOR0, 8, 10) + FIELD(VERSAL2_WWDT_PLL_CLK_CTRL, SRCSEL, 0, 3) +REG32(VERSAL2_RCLK_CTRL, 0x1a0) + FIELD(VERSAL2_RCLK_CTRL, CLKACT, 8, 6) + FIELD(VERSAL2_RCLK_CTRL, SELECT, 0, 6) +REG32(VERSAL2_RST_RPU_A, 0x310) + FIELD(VERSAL2_RST_RPU_A, TOPRESET, 16, 1) + FIELD(VERSAL2_RST_RPU_A, CORE1_POR, 9, 1) + FIELD(VERSAL2_RST_RPU_A, CORE0_POR, 8, 1) + FIELD(VERSAL2_RST_RPU_A, CORE1_RESET, 1, 1) + FIELD(VERSAL2_RST_RPU_A, CORE0_RESET, 0, 1) +REG32(VERSAL2_RST_RPU_B, 0x314) + FIELD(VERSAL2_RST_RPU_B, TOPRESET, 16, 1) + FIELD(VERSAL2_RST_RPU_B, CORE1_POR, 9, 1) + FIELD(VERSAL2_RST_RPU_B, CORE0_POR, 8, 1) + FIELD(VERSAL2_RST_RPU_B, CORE1_RESET, 1, 1) + FIELD(VERSAL2_RST_RPU_B, CORE0_RESET, 0, 1) +REG32(VERSAL2_RST_RPU_C, 0x318) + FIELD(VERSAL2_RST_RPU_C, TOPRESET, 16, 1) + FIELD(VERSAL2_RST_RPU_C, CORE1_POR, 9, 1) + FIELD(VERSAL2_RST_RPU_C, CORE0_POR, 8, 1) + FIELD(VERSAL2_RST_RPU_C, CORE1_RESET, 1, 1) + FIELD(VERSAL2_RST_RPU_C, CORE0_RESET, 0, 1) +REG32(VERSAL2_RST_RPU_D, 0x31c) + FIELD(VERSAL2_RST_RPU_D, TOPRESET, 16, 1) + FIELD(VERSAL2_RST_RPU_D, CORE1_POR, 9, 1) + FIELD(VERSAL2_RST_RPU_D, CORE0_POR, 8, 1) + FIELD(VERSAL2_RST_RPU_D, CORE1_RESET, 1, 1) + FIELD(VERSAL2_RST_RPU_D, CORE0_RESET, 0, 1) +REG32(VERSAL2_RST_RPU_E, 0x320) + FIELD(VERSAL2_RST_RPU_E, TOPRESET, 16, 1) + FIELD(VERSAL2_RST_RPU_E, CORE1_POR, 9, 1) + FIELD(VERSAL2_RST_RPU_E, CORE0_POR, 8, 1) + FIELD(VERSAL2_RST_RPU_E, CORE1_RESET, 1, 1) + FIELD(VERSAL2_RST_RPU_E, CORE0_RESET, 0, 1) +REG32(VERSAL2_RST_RPU_GD_0, 0x324) + FIELD(VERSAL2_RST_RPU_GD_0, RESET, 1, 1) + FIELD(VERSAL2_RST_RPU_GD_0, TOP_RESET, 0, 1) +REG32(VERSAL2_RST_RPU_GD_1, 0x328) + FIELD(VERSAL2_RST_RPU_GD_1, RESET, 1, 1) + FIELD(VERSAL2_RST_RPU_GD_1, TOP_RESET, 0, 1) +REG32(VERSAL2_RST_ASU_GD, 0x32c) + FIELD(VERSAL2_RST_ASU_GD, RESET, 1, 1) + FIELD(VERSAL2_RST_ASU_GD, TOP_RESET, 0, 1) +REG32(VERSAL2_RST_ADMA, 0x334) + FIELD(VERSAL2_RST_ADMA, RESET, 0, 1) +REG32(VERSAL2_RST_SDMA, 0x338) + FIELD(VERSAL2_RST_SDMA, RESET, 0, 1) +REG32(VERSAL2_RST_GEM0, 0x33c) + FIELD(VERSAL2_RST_GEM0, RESET, 0, 1) +REG32(VERSAL2_RST_GEM1, 0x340) + FIELD(VERSAL2_RST_GEM1, RESET, 0, 1) +REG32(VERSAL2_RST_USB0, 0x348) + FIELD(VERSAL2_RST_USB0, RESET, 0, 1) +REG32(VERSAL2_RST_USB1, 0x34c) + FIELD(VERSAL2_RST_USB1, RESET, 0, 1) +REG32(VERSAL2_RST_UART0, 0x350) + FIELD(VERSAL2_RST_UART0, RESET, 0, 1) +REG32(VERSAL2_RST_UART1, 0x354) + FIELD(VERSAL2_RST_UART1, RESET, 0, 1) +REG32(VERSAL2_RST_SPI0, 0x358) + FIELD(VERSAL2_RST_SPI0, RESET, 0, 1) +REG32(VERSAL2_RST_SPI1, 0x35c) + FIELD(VERSAL2_RST_SPI1, RESET, 0, 1) +REG32(VERSAL2_RST_CAN0, 0x360) + FIELD(VERSAL2_RST_CAN0, RESET, 0, 1) +REG32(VERSAL2_RST_CAN1, 0x364) + FIELD(VERSAL2_RST_CAN1, RESET, 0, 1) +REG32(VERSAL2_RST_CAN2, 0x368) + FIELD(VERSAL2_RST_CAN2, RESET, 0, 1) +REG32(VERSAL2_RST_CAN3, 0x36c) + FIELD(VERSAL2_RST_CAN3, RESET, 0, 1) +REG32(VERSAL2_RST_I3C0, 0x374) + FIELD(VERSAL2_RST_I3C0, RESET, 0, 1) +REG32(VERSAL2_RST_I3C1, 0x378) + FIELD(VERSAL2_RST_I3C1, RESET, 0, 1) +REG32(VERSAL2_RST_I3C2, 0x37c) + FIELD(VERSAL2_RST_I3C2, RESET, 0, 1) +REG32(VERSAL2_RST_I3C3, 0x380) + FIELD(VERSAL2_RST_I3C3, RESET, 0, 1) +REG32(VERSAL2_RST_I3C4, 0x384) + FIELD(VERSAL2_RST_I3C4, RESET, 0, 1) +REG32(VERSAL2_RST_I3C5, 0x388) + FIELD(VERSAL2_RST_I3C5, RESET, 0, 1) +REG32(VERSAL2_RST_I3C6, 0x38c) + FIELD(VERSAL2_RST_I3C6, RESET, 0, 1) +REG32(VERSAL2_RST_I3C7, 0x390) + FIELD(VERSAL2_RST_I3C7, RESET, 0, 1) +REG32(VERSAL2_RST_DBG_LPX, 0x398) + FIELD(VERSAL2_RST_DBG_LPX, RESET_HSDP, 1, 1) + FIELD(VERSAL2_RST_DBG_LPX, RESET, 0, 1) +REG32(VERSAL2_RST_GPIO, 0x39c) + FIELD(VERSAL2_RST_GPIO, RESET, 0, 1) +REG32(VERSAL2_RST_TTC, 0x3a0) + FIELD(VERSAL2_RST_TTC, TTC7_RESET, 7, 1) + FIELD(VERSAL2_RST_TTC, TTC6_RESET, 6, 1) + FIELD(VERSAL2_RST_TTC, TTC5_RESET, 5, 1) + FIELD(VERSAL2_RST_TTC, TTC4_RESET, 4, 1) + FIELD(VERSAL2_RST_TTC, TTC3_RESET, 3, 1) + FIELD(VERSAL2_RST_TTC, TTC2_RESET, 2, 1) + FIELD(VERSAL2_RST_TTC, TTC1_RESET, 1, 1) + FIELD(VERSAL2_RST_TTC, TTC0_RESET, 0, 1) +REG32(VERSAL2_RST_TIMESTAMP, 0x3a4) + FIELD(VERSAL2_RST_TIMESTAMP, RESET, 0, 1) +REG32(VERSAL2_RST_SWDT0, 0x3a8) + FIELD(VERSAL2_RST_SWDT0, RESET, 0, 1) +REG32(VERSAL2_RST_SWDT1, 0x3ac) + FIELD(VERSAL2_RST_SWDT1, RESET, 0, 1) +REG32(VERSAL2_RST_SWDT2, 0x3b0) + FIELD(VERSAL2_RST_SWDT2, RESET, 0, 1) +REG32(VERSAL2_RST_SWDT3, 0x3b4) + FIELD(VERSAL2_RST_SWDT3, RESET, 0, 1) +REG32(VERSAL2_RST_SWDT4, 0x3b8) + FIELD(VERSAL2_RST_SWDT4, RESET, 0, 1) +REG32(VERSAL2_RST_IPI, 0x3bc) + FIELD(VERSAL2_RST_IPI, RESET, 0, 1) +REG32(VERSAL2_RST_SYSMON, 0x3c0) + FIELD(VERSAL2_RST_SYSMON, CFG_RST, 0, 1) +REG32(VERSAL2_ASU_MB_RST_MODE, 0x3c4) + FIELD(VERSAL2_ASU_MB_RST_MODE, WAKEUP, 2, 1) + FIELD(VERSAL2_ASU_MB_RST_MODE, RST_MODE, 0, 2) +REG32(VERSAL2_FPX_TOPSW_MUX_CTRL, 0x3c8) + FIELD(VERSAL2_FPX_TOPSW_MUX_CTRL, SELECT, 0, 1) +REG32(VERSAL2_RST_FPX, 0x3d0) + FIELD(VERSAL2_RST_FPX, SRST, 1, 1) + FIELD(VERSAL2_RST_FPX, POR, 0, 1) +REG32(VERSAL2_RST_MMI, 0x3d4) + FIELD(VERSAL2_RST_MMI, POR, 0, 1) +REG32(VERSAL2_RST_OCM, 0x3d8) + FIELD(VERSAL2_RST_OCM, RESET_OCM3, 3, 1) + FIELD(VERSAL2_RST_OCM, RESET_OCM2, 2, 1) + FIELD(VERSAL2_RST_OCM, RESET_OCM1, 1, 1) + FIELD(VERSAL2_RST_OCM, RESET_OCM0, 0, 1) -struct XlnxVersalCRL { +#define VERSAL2_CRL_R_MAX (R_VERSAL2_RST_OCM + 1) + +struct XlnxVersalCRLBase { SysBusDevice parent_obj; + + uint32_t *regs; +}; + +struct XlnxVersalCRLBaseClass { + SysBusDeviceClass parent_class; + + DeviceState ** (*decode_periph_rst)(XlnxVersalCRLBase *s, hwaddr, size_t *); +}; + +struct XlnxVersalCRL { + XlnxVersalCRLBase parent_obj; qemu_irq irq; struct { - ARMCPU *cpu_r5[RPU_MAX_CPU]; + DeviceState *rpu[2]; DeviceState *adma[8]; DeviceState *uart[2]; DeviceState *gem[2]; - DeviceState *usb; + DeviceState *usb[1]; } cfg; - RegisterInfoArray *reg_array; uint32_t regs[CRL_R_MAX]; RegisterInfo regs_info[CRL_R_MAX]; }; + +struct XlnxVersal2CRL { + XlnxVersalCRLBase parent_obj; + + struct { + DeviceState *rpu[10]; + DeviceState *adma[8]; + DeviceState *sdma[8]; + DeviceState *uart[2]; + DeviceState *gem[2]; + DeviceState *usb[2]; + DeviceState *can[4]; + } cfg; + + RegisterInfo regs_info[VERSAL2_CRL_R_MAX]; + uint32_t regs[VERSAL2_CRL_R_MAX]; +}; + +static inline const char *xlnx_versal_crl_class_name(VersalVersion ver) +{ + switch (ver) { + case VERSAL_VER_VERSAL: + return TYPE_XLNX_VERSAL_CRL; + case VERSAL_VER_VERSAL2: + return TYPE_XLNX_VERSAL2_CRL; + default: + g_assert_not_reached(); + } +} + #endif diff --git a/include/hw/misc/xlnx-versal-xramc.h b/include/hw/misc/xlnx-versal-xramc.h index d3d1862676fc1..35e4e8b91ddf0 100644 --- a/include/hw/misc/xlnx-versal-xramc.h +++ b/include/hw/misc/xlnx-versal-xramc.h @@ -90,7 +90,6 @@ typedef struct XlnxXramCtrl { unsigned int encoded_size; } cfg; - RegisterInfoArray *reg_array; uint32_t regs[XRAM_CTRL_R_MAX]; RegisterInfo regs_info[XRAM_CTRL_R_MAX]; } XlnxXramCtrl; diff --git a/include/hw/misc/xlnx-zynqmp-apu-ctrl.h b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h index c3bf3c1583bb2..fbfe34aa7e5ea 100644 --- a/include/hw/misc/xlnx-zynqmp-apu-ctrl.h +++ b/include/hw/misc/xlnx-zynqmp-apu-ctrl.h @@ -85,7 +85,6 @@ struct XlnxZynqMPAPUCtrl { uint8_t cpu_pwrdwn_req; uint8_t cpu_in_wfi; - RegisterInfoArray *reg_array; uint32_t regs[APU_R_MAX]; RegisterInfo regs_info[APU_R_MAX]; }; diff --git a/include/hw/misc/xlnx-zynqmp-crf.h b/include/hw/misc/xlnx-zynqmp-crf.h index 02ef0bdeeee56..c746ae10397a8 100644 --- a/include/hw/misc/xlnx-zynqmp-crf.h +++ b/include/hw/misc/xlnx-zynqmp-crf.h @@ -203,7 +203,6 @@ struct XlnxZynqMPCRF { MemoryRegion iomem; qemu_irq irq_ir; - RegisterInfoArray *reg_array; uint32_t regs[CRF_R_MAX]; RegisterInfo regs_info[CRF_R_MAX]; }; diff --git a/include/hw/net/xlnx-versal-canfd.h b/include/hw/net/xlnx-versal-canfd.h index ad3104dd13f19..396f90d6dc100 100644 --- a/include/hw/net/xlnx-versal-canfd.h +++ b/include/hw/net/xlnx-versal-canfd.h @@ -54,14 +54,6 @@ typedef struct XlnxVersalCANFDState { qemu_irq irq_addr_err; RegisterInfo reg_info[XLNX_VERSAL_CANFD_R_MAX]; - RegisterAccessInfo *tx_regs; - RegisterAccessInfo *rx0_regs; - RegisterAccessInfo *rx1_regs; - RegisterAccessInfo *af_regs; - RegisterAccessInfo *txe_regs; - RegisterAccessInfo *rx_mailbox_regs; - RegisterAccessInfo *af_mask_regs_mailbox; - uint32_t regs[XLNX_VERSAL_CANFD_R_MAX]; ptimer_state *canfd_timer; diff --git a/include/hw/nvram/aspeed_otp.h b/include/hw/nvram/aspeed_otp.h new file mode 100644 index 0000000000000..3752353860796 --- /dev/null +++ b/include/hw/nvram/aspeed_otp.h @@ -0,0 +1,33 @@ +/* + * ASPEED OTP (One-Time Programmable) memory + * + * Copyright (C) 2025 Aspeed + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef ASPEED_OTP_H +#define ASPEED_OTP_H + +#include "system/memory.h" +#include "hw/block/block.h" +#include "system/address-spaces.h" + +#define TYPE_ASPEED_OTP "aspeed-otp" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedOTPState, ASPEED_OTP) + +typedef struct AspeedOTPState { + DeviceState parent_obj; + + BlockBackend *blk; + + uint64_t size; + + AddressSpace as; + + MemoryRegion mmio; + + uint8_t *storage; +} AspeedOTPState; + +#endif /* ASPEED_OTP_H */ diff --git a/include/hw/nvram/xlnx-bbram.h b/include/hw/nvram/xlnx-bbram.h index 58acbe9f51b8c..af90900bfc64d 100644 --- a/include/hw/nvram/xlnx-bbram.h +++ b/include/hw/nvram/xlnx-bbram.h @@ -47,7 +47,6 @@ struct XlnxBBRam { bool bbram8_wo; bool blk_ro; - RegisterInfoArray *reg_array; uint32_t regs[RMAX_XLNX_BBRAM]; RegisterInfo regs_info[RMAX_XLNX_BBRAM]; }; diff --git a/include/hw/pci-host/aspeed_pcie.h b/include/hw/pci-host/aspeed_pcie.h new file mode 100644 index 0000000000000..be53ea96b9078 --- /dev/null +++ b/include/hw/pci-host/aspeed_pcie.h @@ -0,0 +1,137 @@ +/* + * ASPEED PCIe Host Controller + * + * Copyright (C) 2025 ASPEED Technology Inc. + * Copyright (c) 2022 Cédric Le Goater + * + * Authors: + * Cédric Le Goater + * Jamin Lin + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + * Based on previous work from Cédric Le Goater. + * Modifications extend support for the ASPEED AST2600 and AST2700 platforms. + */ + +#ifndef ASPEED_PCIE_H +#define ASPEED_PCIE_H + +#include "hw/sysbus.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pcie_host.h" +#include "hw/pci/pcie_port.h" +#include "qom/object.h" + +typedef struct AspeedPCIECfgTxDesc { + uint32_t desc0; + uint32_t desc1; + uint32_t desc2; + uint32_t desc3; + uint32_t wdata; + uint32_t rdata_reg; +} AspeedPCIECfgTxDesc; + +typedef struct AspeedPCIERcRegs { + uint32_t int_en_reg; + uint32_t int_sts_reg; + uint32_t msi_sts0_reg; + uint32_t msi_sts1_reg; +} AspeedPCIERcRegs; + +typedef struct AspeedPCIERegMap { + AspeedPCIERcRegs rc; +} AspeedPCIERegMap; + +#define TYPE_ASPEED_PCIE_ROOT_PORT "aspeed.pcie-root-port" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedPCIERootPortState, ASPEED_PCIE_ROOT_PORT) + +typedef struct AspeedPCIERootPortState { + PCIESlot parent_obj; +} AspeedPCIERootPortState; + +#define TYPE_ASPEED_PCIE_ROOT_DEVICE "aspeed.pcie-root-device" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedPCIERootDeviceState, ASPEED_PCIE_ROOT_DEVICE); + +struct AspeedPCIERootDeviceState { + PCIBridge parent_obj; +}; + +#define TYPE_ASPEED_PCIE_RC "aspeed.pcie-rc" +OBJECT_DECLARE_SIMPLE_TYPE(AspeedPCIERcState, ASPEED_PCIE_RC); + +struct AspeedPCIERcState { + PCIExpressHost parent_obj; + + MemoryRegion iommu_root; + AddressSpace iommu_as; + MemoryRegion dram_alias; + MemoryRegion *dram_mr; + MemoryRegion mmio_window; + MemoryRegion msi_window; + MemoryRegion io_window; + MemoryRegion mmio; + MemoryRegion io; + + uint64_t dram_base; + uint32_t msi_addr; + uint32_t rp_addr; + uint32_t bus_nr; + char name[16]; + bool has_rd; + qemu_irq irq; + + AspeedPCIERootDeviceState root_device; + AspeedPCIERootPortState root_port; +}; + +/* Bridge between AHB bus and PCIe RC. */ +#define TYPE_ASPEED_PCIE_CFG "aspeed.pcie-cfg" +#define TYPE_ASPEED_2700_PCIE_CFG TYPE_ASPEED_PCIE_CFG "-ast2700" +OBJECT_DECLARE_TYPE(AspeedPCIECfgState, AspeedPCIECfgClass, ASPEED_PCIE_CFG); + +struct AspeedPCIECfgState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + uint32_t *regs; + uint32_t id; + + const AspeedPCIERcRegs *rc_regs; + AspeedPCIERcState rc; + uint32_t tlpn_fifo[3]; + uint32_t tlpn_idx; +}; + +struct AspeedPCIECfgClass { + SysBusDeviceClass parent_class; + + const AspeedPCIERegMap *reg_map; + const MemoryRegionOps *reg_ops; + + uint32_t rc_msi_addr; + uint32_t rc_rp_addr; + uint64_t rc_bus_nr; + uint64_t nr_regs; + bool rc_has_rd; +}; + +#define TYPE_ASPEED_PCIE_PHY "aspeed.pcie-phy" +#define TYPE_ASPEED_2700_PCIE_PHY TYPE_ASPEED_PCIE_PHY "-ast2700" +OBJECT_DECLARE_TYPE(AspeedPCIEPhyState, AspeedPCIEPhyClass, ASPEED_PCIE_PHY); + +struct AspeedPCIEPhyState { + SysBusDevice parent_obj; + + MemoryRegion mmio; + uint32_t *regs; + uint32_t id; +}; + +struct AspeedPCIEPhyClass { + SysBusDeviceClass parent_class; + + uint64_t nr_regs; +}; + +#endif /* ASPEED_PCIE_H */ diff --git a/include/hw/pci-host/ls7a.h b/include/hw/pci-host/ls7a.h index 79d4ea8501274..33e7942de9f7b 100644 --- a/include/hw/pci-host/ls7a.h +++ b/include/hw/pci-host/ls7a.h @@ -13,41 +13,4 @@ #include "qemu/range.h" #include "qom/object.h" -#define VIRT_PCI_MEM_BASE 0x40000000UL -#define VIRT_PCI_MEM_SIZE 0x40000000UL -#define VIRT_PCI_IO_OFFSET 0x4000 -#define VIRT_PCI_CFG_BASE 0x20000000 -#define VIRT_PCI_CFG_SIZE 0x08000000 -#define VIRT_PCI_IO_BASE 0x18004000UL -#define VIRT_PCI_IO_SIZE 0xC000 - -#define VIRT_PCH_REG_BASE 0x10000000UL -#define VIRT_IOAPIC_REG_BASE (VIRT_PCH_REG_BASE) -#define VIRT_PCH_MSI_ADDR_LOW 0x2FF00000UL -#define VIRT_PCH_REG_SIZE 0x400 -#define VIRT_PCH_MSI_SIZE 0x8 - -/* - * GSI_BASE is hard-coded with 64 in linux kernel, else kernel fails to boot - * 0 - 15 GSI for ISA devices even if there is no ISA devices - * 16 - 63 GSI for CPU devices such as timers/perf monitor etc - * 64 - GSI for external devices - */ -#define VIRT_PCH_PIC_IRQ_NUM 32 -#define VIRT_GSI_BASE 64 -#define VIRT_DEVICE_IRQS 16 -#define VIRT_UART_COUNT 4 -#define VIRT_UART_IRQ (VIRT_GSI_BASE + 2) -#define VIRT_UART_BASE 0x1fe001e0 -#define VIRT_UART_SIZE 0x100 -#define VIRT_RTC_IRQ (VIRT_GSI_BASE + 6) -#define VIRT_MISC_REG_BASE (VIRT_PCH_REG_BASE + 0x00080000) -#define VIRT_RTC_REG_BASE (VIRT_MISC_REG_BASE + 0x00050100) -#define VIRT_RTC_LEN 0x100 -#define VIRT_SCI_IRQ (VIRT_GSI_BASE + 7) - -#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000 -#define VIRT_PLATFORM_BUS_SIZE 0x2000000 -#define VIRT_PLATFORM_BUS_NUM_IRQS 2 -#define VIRT_PLATFORM_BUS_IRQ (VIRT_GSI_BASE + 8) #endif diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index 6b7d3ac8a3611..6bccb25ac2f51 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -773,6 +773,8 @@ int pci_iommu_unregister_iotlb_notifier(PCIDevice *dev, uint32_t pasid, */ void pci_setup_iommu(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque); +void pci_setup_iommu_per_bus(PCIBus *bus, const PCIIOMMUOps *ops, void *opaque); + pcibus_t pci_bar_address(PCIDevice *d, int reg, uint8_t type, pcibus_t size); diff --git a/include/hw/pci/pci_bridge.h b/include/hw/pci/pci_bridge.h index 8cdacbc4e16d4..a055fd8d32189 100644 --- a/include/hw/pci/pci_bridge.h +++ b/include/hw/pci/pci_bridge.h @@ -104,6 +104,7 @@ typedef struct PXBPCIEDev { PXBDev parent_obj; } PXBPCIEDev; +#define TYPE_PXB_PCIE_BUS "pxb-pcie-bus" #define TYPE_PXB_CXL_BUS "pxb-cxl-bus" #define TYPE_PXB_DEV "pxb" OBJECT_DECLARE_SIMPLE_TYPE(PXBDev, PXB_DEV) diff --git a/include/hw/pci/pci_bus.h b/include/hw/pci/pci_bus.h index 226131254621f..c73844678886e 100644 --- a/include/hw/pci/pci_bus.h +++ b/include/hw/pci/pci_bus.h @@ -35,6 +35,7 @@ struct PCIBus { enum PCIBusFlags flags; const PCIIOMMUOps *iommu_ops; void *iommu_opaque; + bool iommu_per_bus; uint8_t devfn_min; uint32_t slot_reserved_mask; pci_set_irq_fn set_irq; diff --git a/include/hw/pci/pci_ids.h b/include/hw/pci/pci_ids.h index 33e2898be9576..16034aaa2c7e4 100644 --- a/include/hw/pci/pci_ids.h +++ b/include/hw/pci/pci_ids.h @@ -291,4 +291,6 @@ #define PCI_VENDOR_ID_NVIDIA 0x10de +#define PCI_VENDOR_ID_ASPEED 0x1A03 + #endif diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index ff6ce08e135a4..42cebcd0338cd 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -158,6 +158,7 @@ void pcie_pasid_init(PCIDevice *dev, uint16_t offset, uint8_t pasid_width, void pcie_pri_init(PCIDevice *dev, uint16_t offset, uint32_t outstanding_pr_cap, bool prg_response_pasid_req); +uint32_t pcie_pri_get_req_alloc(const PCIDevice *dev); bool pcie_pri_enabled(const PCIDevice *dev); bool pcie_pasid_enabled(const PCIDevice *dev); bool pcie_ats_enabled(const PCIDevice *dev); diff --git a/include/hw/pci/pcie_sriov.h b/include/hw/pci/pcie_sriov.h index aeaa38cf3456d..b0ea6a62c7496 100644 --- a/include/hw/pci/pcie_sriov.h +++ b/include/hw/pci/pcie_sriov.h @@ -37,10 +37,6 @@ void pcie_sriov_pf_exit(PCIDevice *dev); void pcie_sriov_pf_init_vf_bar(PCIDevice *dev, int region_num, uint8_t type, dma_addr_t size); -/* Instantiate a bar for a VF */ -void pcie_sriov_vf_register_bar(PCIDevice *dev, int region_num, - MemoryRegion *memory); - /** * pcie_sriov_pf_init_from_user_created_vfs() - Initialize PF with user-created * VFs, adding ARI to PF diff --git a/include/hw/ppc/pnv.h b/include/hw/ppc/pnv.h index d8fca079f2feb..cbdddfc73cd42 100644 --- a/include/hw/ppc/pnv.h +++ b/include/hw/ppc/pnv.h @@ -33,6 +33,7 @@ typedef struct PnvChip PnvChip; typedef struct Pnv8Chip Pnv8Chip; typedef struct Pnv9Chip Pnv9Chip; typedef struct Pnv10Chip Pnv10Chip; +typedef struct Pnv10Chip Pnv11Chip; #define PNV_CHIP_TYPE_SUFFIX "-" TYPE_PNV_CHIP #define PNV_CHIP_TYPE_NAME(cpu_model) cpu_model PNV_CHIP_TYPE_SUFFIX @@ -57,6 +58,10 @@ DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER9, DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER10, TYPE_PNV_CHIP_POWER10) +#define TYPE_PNV_CHIP_POWER11 PNV_CHIP_TYPE_NAME("power11_v2.0") +DECLARE_INSTANCE_CHECKER(PnvChip, PNV_CHIP_POWER11, + TYPE_PNV_CHIP_POWER11) + PnvCore *pnv_chip_find_core(PnvChip *chip, uint32_t core_id); PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir); @@ -252,4 +257,37 @@ void pnv_bmc_set_pnor(IPMIBmc *bmc, PnvPnor *pnor); #define PNV10_HOMER_BASE(chip) \ (0x300ffd800000ll + ((uint64_t)(chip)->chip_id) * PNV_HOMER_SIZE) +/* Power11 */ +#define PNV11_XSCOM_SIZE PNV10_XSCOM_SIZE +#define PNV11_XSCOM_BASE(chip) PNV10_XSCOM_BASE(chip) + +#define PNV11_LPCM_SIZE PNV10_LPCM_SIZE +#define PNV11_LPCM_BASE(chip) PNV10_LPCM_BASE(chip) + +#define PNV11_PSIHB_ESB_SIZE PNV10_PSIHB_ESB_SIZE +#define PNV11_PSIHB_ESB_BASE(chip) PNV10_PSIHB_ESB_BASE(chip) + +#define PNV11_PSIHB_SIZE PNV10_PSIHB_SIZE +#define PNV11_PSIHB_BASE(chip) PNV10_PSIHB_BASE(chip) + +#define PNV11_XIVE2_IC_SIZE PNV10_XIVE2_IC_SIZE +#define PNV11_XIVE2_IC_BASE(chip) PNV10_XIVE2_IC_BASE(chip) + +#define PNV11_XIVE2_TM_SIZE PNV10_XIVE2_TM_SIZE +#define PNV11_XIVE2_TM_BASE(chip) PNV10_XIVE2_TM_BASE(chip) + +#define PNV11_XIVE2_NVC_SIZE PNV10_XIVE2_NVC_SIZE +#define PNV11_XIVE2_NVC_BASE(chip) PNV10_XIVE2_NVC_BASE(chip) + +#define PNV11_XIVE2_NVPG_SIZE PNV10_XIVE2_NVPG_SIZE +#define PNV11_XIVE2_NVPG_BASE(chip) PNV10_XIVE2_NVPG_BASE(chip) + +#define PNV11_XIVE2_ESB_SIZE PNV10_XIVE2_ESB_SIZE +#define PNV11_XIVE2_ESB_BASE(chip) PNV10_XIVE2_ESB_BASE(chip) + +#define PNV11_XIVE2_END_SIZE PNV10_XIVE2_END_SIZE +#define PNV11_XIVE2_END_BASE(chip) PNV10_XIVE2_END_BASE(chip) + +#define PNV11_OCC_SENSOR_BASE(chip) PNV10_OCC_SENSOR_BASE(chip) + #endif /* PPC_PNV_H */ diff --git a/include/hw/ppc/pnv_chip.h b/include/hw/ppc/pnv_chip.h index 24ce37a9c8e47..a5b8c49680d37 100644 --- a/include/hw/ppc/pnv_chip.h +++ b/include/hw/ppc/pnv_chip.h @@ -141,6 +141,13 @@ struct Pnv10Chip { #define PNV10_PIR2CHIP(pir) (((pir) >> 8) & 0x7f) #define PNV10_PIR2THREAD(pir) (((pir) & 0x7f)) +#define TYPE_PNV11_CHIP "pnv11-chip" +DECLARE_INSTANCE_CHECKER(Pnv11Chip, PNV11_CHIP, + TYPE_PNV11_CHIP) + +/* Power11 core is same as Power10 */ +typedef struct Pnv10Chip Pnv11Chip; + struct PnvChipClass { /*< private >*/ SysBusDeviceClass parent_class; @@ -163,6 +170,7 @@ struct PnvChipClass { void (*intc_reset)(PnvChip *chip, PowerPCCPU *cpu); void (*intc_destroy)(PnvChip *chip, PowerPCCPU *cpu); void (*intc_print_info)(PnvChip *chip, PowerPCCPU *cpu, GString *buf); + void* (*intc_get)(PnvChip *chip); ISABus *(*isa_create)(PnvChip *chip, Error **errp); void (*dt_populate)(PnvChip *chip, void *fdt); void (*pic_print_info)(PnvChip *chip, GString *buf); diff --git a/include/hw/ppc/pnv_chiptod.h b/include/hw/ppc/pnv_chiptod.h index fde569bcbfa9c..466b06560a28f 100644 --- a/include/hw/ppc/pnv_chiptod.h +++ b/include/hw/ppc/pnv_chiptod.h @@ -17,6 +17,8 @@ OBJECT_DECLARE_TYPE(PnvChipTOD, PnvChipTODClass, PNV_CHIPTOD) DECLARE_INSTANCE_CHECKER(PnvChipTOD, PNV9_CHIPTOD, TYPE_PNV9_CHIPTOD) #define TYPE_PNV10_CHIPTOD TYPE_PNV_CHIPTOD "-POWER10" DECLARE_INSTANCE_CHECKER(PnvChipTOD, PNV10_CHIPTOD, TYPE_PNV10_CHIPTOD) +#define TYPE_PNV11_CHIPTOD TYPE_PNV_CHIPTOD "-POWER11" +DECLARE_INSTANCE_CHECKER(PnvChipTOD, PNV11_CHIPTOD, TYPE_PNV11_CHIPTOD) enum tod_state { tod_error = 0, diff --git a/include/hw/ppc/pnv_xscom.h b/include/hw/ppc/pnv_xscom.h index b14549db7033a..610b075a27c3f 100644 --- a/include/hw/ppc/pnv_xscom.h +++ b/include/hw/ppc/pnv_xscom.h @@ -207,6 +207,55 @@ struct PnvXScomInterfaceClass { #define PNV10_XSCOM_PIB_SPIC_BASE 0xc0000 #define PNV10_XSCOM_PIB_SPIC_SIZE 0x20 +/* + * Power11 core is same as Power10 + */ +#define PNV11_XSCOM_EC_BASE(core) PNV10_XSCOM_EC_BASE(core) + +#define PNV11_XSCOM_ADU_BASE PNV10_XSCOM_ADU_BASE +#define PNV11_XSCOM_ADU_SIZE PNV10_XSCOM_ADU_SIZE + +#define PNV11_XSCOM_QME_BASE(core) PNV10_XSCOM_QME_BASE(core) + +#define PNV11_XSCOM_EQ_BASE(core) PNV10_XSCOM_EQ_BASE(core) + +#define PNV11_XSCOM_PSIHB_BASE PNV10_XSCOM_PSIHB_BASE +#define PNV11_XSCOM_PSIHB_SIZE PNV10_XSCOM_PSIHB_SIZE + +#define PNV11_XSCOM_I2CM_BASE PNV10_XSCOM_I2CM_BASE +#define PNV11_XSCOM_I2CM_SIZE PNV10_XSCOM_I2CM_SIZE + +#define PNV11_XSCOM_CHIPTOD_BASE PNV10_XSCOM_CHIPTOD_BASE +#define PNV11_XSCOM_CHIPTOD_SIZE PNV10_XSCOM_CHIPTOD_SIZE + +#define PNV11_XSCOM_OCC_BASE PNV10_XSCOM_OCC_BASE +#define PNV11_XSCOM_OCC_SIZE PNV10_XSCOM_OCC_SIZE + +#define PNV11_XSCOM_SBE_CTRL_BASE PNV10_XSCOM_SBE_CTRL_BASE +#define PNV11_XSCOM_SBE_CTRL_SIZE PNV10_XSCOM_SBE_CTRL_SIZE + +#define PNV11_XSCOM_SBE_MBOX_BASE PNV10_XSCOM_SBE_MBOX_BASE +#define PNV11_XSCOM_SBE_MBOX_SIZE PNV10_XSCOM_SBE_MBOX_SIZE + +#define PNV11_XSCOM_PBA_BASE PNV10_XSCOM_PBA_BASE +#define PNV11_XSCOM_PBA_SIZE PNV10_XSCOM_PBA_SIZE + +#define PNV11_XSCOM_XIVE2_BASE PNV10_XSCOM_XIVE2_BASE +#define PNV11_XSCOM_XIVE2_SIZE PNV10_XSCOM_XIVE2_SIZE + +#define PNV11_XSCOM_N1_CHIPLET_CTRL_REGS_BASE \ + PNV10_XSCOM_N1_CHIPLET_CTRL_REGS_BASE +#define PNV11_XSCOM_CHIPLET_CTRL_REGS_SIZE PNV10_XSCOM_CHIPLET_CTRL_REGS_SIZE + +#define PNV11_XSCOM_N1_PB_SCOM_EQ_BASE PNV10_XSCOM_N1_PB_SCOM_EQ_BASE +#define PNV11_XSCOM_N1_PB_SCOM_EQ_SIZE PNV10_XSCOM_N1_PB_SCOM_EQ_SIZE + +#define PNV11_XSCOM_N1_PB_SCOM_ES_BASE PNV10_XSCOM_N1_PB_SCOM_ES_BASE +#define PNV11_XSCOM_N1_PB_SCOM_ES_SIZE PNV10_XSCOM_N1_PB_SCOM_ES_SIZE + +#define PNV11_XSCOM_PIB_SPIC_BASE PNV10_XSCOM_PIB_SPIC_BASE +#define PNV11_XSCOM_PIB_SPIC_SIZE PNV10_XSCOM_PIB_SPIC_SIZE + void pnv_xscom_init(PnvChip *chip, uint64_t size, hwaddr addr); int pnv_dt_xscom(PnvChip *chip, void *fdt, int root_offset, uint64_t xscom_base, uint64_t xscom_size, diff --git a/include/hw/ppc/ppc.h b/include/hw/ppc/ppc.h index 8a14d623f8caa..cb51d704c6d3b 100644 --- a/include/hw/ppc/ppc.h +++ b/include/hw/ppc/ppc.h @@ -52,6 +52,7 @@ struct ppc_tb_t { #define PPC_DECR_UNDERFLOW_LEVEL (1 << 4) /* Decr interrupt active when * the most significant bit is 1. */ +#define PPC_TIMER_PPE (1 << 5) /* Enable PPE support */ uint64_t cpu_ppc_get_tb(ppc_tb_t *tb_env, uint64_t vmclk, int64_t tb_offset); void cpu_ppc_tb_init(CPUPPCState *env, uint32_t freq); diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h index 538f438681727..b7ca8544e4314 100644 --- a/include/hw/ppc/xive.h +++ b/include/hw/ppc/xive.h @@ -365,6 +365,11 @@ static inline uint32_t xive_tctx_word2(uint8_t *ring) return *((uint32_t *) &ring[TM_WORD2]); } +bool xive_ring_valid(XiveTCTX *tctx, uint8_t ring); +bool xive_nsr_indicates_exception(uint8_t ring, uint8_t nsr); +bool xive_nsr_indicates_group_exception(uint8_t ring, uint8_t nsr); +uint8_t xive_nsr_exception_ring(uint8_t ring, uint8_t nsr); + /* * XIVE Router */ @@ -421,6 +426,7 @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas); typedef struct XiveTCTXMatch { XiveTCTX *tctx; + int count; uint8_t ring; bool precluded; } XiveTCTXMatch; @@ -436,10 +442,10 @@ DECLARE_CLASS_CHECKERS(XivePresenterClass, XIVE_PRESENTER, struct XivePresenterClass { InterfaceClass parent; - int (*match_nvt)(XivePresenter *xptr, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match); + bool (*match_nvt)(XivePresenter *xptr, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match); bool (*in_kernel)(const XivePresenter *xptr); uint32_t (*get_config)(XivePresenter *xptr); int (*broadcast)(XivePresenter *xptr, @@ -451,12 +457,14 @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, uint8_t format, uint8_t nvt_blk, uint32_t nvt_idx, bool cam_ignore, uint32_t logic_serv); -bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, bool *precluded); +bool xive_presenter_match(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match); uint32_t xive_get_vpgroup_size(uint32_t nvp_index); +uint8_t xive_get_group_level(bool crowd, bool ignore, + uint32_t nvp_blk, uint32_t nvp_index); /* * XIVE Fabric (Interface between Interrupt Controller and Machine) @@ -471,10 +479,10 @@ DECLARE_CLASS_CHECKERS(XiveFabricClass, XIVE_FABRIC, struct XiveFabricClass { InterfaceClass parent; - int (*match_nvt)(XiveFabric *xfb, uint8_t format, - uint8_t nvt_blk, uint32_t nvt_idx, - bool crowd, bool cam_ignore, uint8_t priority, - uint32_t logic_serv, XiveTCTXMatch *match); + bool (*match_nvt)(XiveFabric *xfb, uint8_t format, + uint8_t nvt_blk, uint32_t nvt_idx, + bool crowd, bool cam_ignore, uint8_t priority, + uint32_t logic_serv, XiveTCTXMatch *match); int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, bool crowd, bool cam_ignore, uint8_t priority); }; @@ -532,7 +540,7 @@ static inline uint8_t xive_ipb_to_pipr(uint8_t ibp) } /* - * XIVE Thread Interrupt Management Aera (TIMA) + * XIVE Thread Interrupt Management Area (TIMA) * * This region gives access to the registers of the thread interrupt * management context. It is four page wide, each page providing a @@ -544,6 +552,30 @@ static inline uint8_t xive_ipb_to_pipr(uint8_t ibp) #define XIVE_TM_OS_PAGE 0x2 #define XIVE_TM_USER_PAGE 0x3 +/* + * The TCTX (TIMA) has 4 rings (phys, pool, os, user), but only signals + * (raises an interrupt on) the CPU from 3 of them. Phys and pool both + * cause a hypervisor privileged interrupt so interrupts presented on + * those rings signal using the phys ring. This helper returns the signal + * regs from the given ring. + */ +static inline uint8_t *xive_tctx_signal_regs(XiveTCTX *tctx, uint8_t ring) +{ + /* + * This is a good point to add invariants to ensure nothing has tried to + * signal using the POOL ring. + */ + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_NSR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_PIPR] == 0); + g_assert(tctx->regs[TM_QW2_HV_POOL + TM_CPPR] == 0); + + if (ring == TM_QW2_HV_POOL) { + /* POOL and PHYS rings share the signal regs (PIPR, NSR, CPPR) */ + ring = TM_QW3_HV_PHYS; + } + return &tctx->regs[ring]; +} + void xive_tctx_tm_write(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); uint64_t xive_tctx_tm_read(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, @@ -553,10 +585,12 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf); Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp); void xive_tctx_reset(XiveTCTX *tctx); void xive_tctx_destroy(XiveTCTX *tctx); -void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, - uint8_t group_level); +void xive_tctx_pipr_set(XiveTCTX *tctx, uint8_t ring, uint8_t priority, + uint8_t group_level); +void xive_tctx_pipr_present(XiveTCTX *tctx, uint8_t ring, uint8_t priority, + uint8_t group_level); void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring); -void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level); +uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring); /* * KVM XIVE device helpers diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h index 8cdf8191742e9..f4437e2c79a72 100644 --- a/include/hw/ppc/xive2.h +++ b/include/hw/ppc/xive2.h @@ -29,9 +29,11 @@ OBJECT_DECLARE_TYPE(Xive2Router, Xive2RouterClass, XIVE2_ROUTER); * Configuration flags */ -#define XIVE2_GEN1_TIMA_OS 0x00000001 -#define XIVE2_VP_SAVE_RESTORE 0x00000002 -#define XIVE2_THREADID_8BITS 0x00000004 +#define XIVE2_GEN1_TIMA_OS 0x00000001 +#define XIVE2_VP_SAVE_RESTORE 0x00000002 +#define XIVE2_THREADID_8BITS 0x00000004 +#define XIVE2_EN_VP_GRP_PRIORITY 0x00000008 +#define XIVE2_VP_INT_PRIO 0x00000030 typedef struct Xive2RouterClass { SysBusDeviceClass parent; @@ -80,6 +82,7 @@ int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, uint32_t xive2_router_get_config(Xive2Router *xrtr); void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); +void xive2_notify(Xive2Router *xrtr, uint32_t lisn, bool pq_checked); /* * XIVE2 Presenter (POWER10) @@ -127,6 +130,8 @@ void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); +void xive2_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, @@ -137,7 +142,16 @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority); void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority); void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); +void xive2_tm_push_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +uint64_t xive2_tm_pull_pool_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size); +void xive2_tm_push_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); +uint64_t xive2_tm_pull_phys_ctx(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, unsigned size); void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, uint64_t value, unsigned size); - +void xive2_tm_ack_os_el(XivePresenter *xptr, XiveTCTX *tctx, + hwaddr offset, uint64_t value, unsigned size); #endif /* PPC_XIVE2_H */ diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h index b11395c563504..2a3e60abadbf9 100644 --- a/include/hw/ppc/xive2_regs.h +++ b/include/hw/ppc/xive2_regs.h @@ -39,15 +39,18 @@ typedef struct Xive2Eas { uint64_t w; -#define EAS2_VALID PPC_BIT(0) -#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ -#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ -#define EAS2_MASKED PPC_BIT(32) /* Masked */ -#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ +#define EAS2_VALID PPC_BIT(0) +#define EAS2_QOS PPC_BIT(1, 2) /* Quality of Service(unimp) */ +#define EAS2_RESUME PPC_BIT(3) /* END Resume(unimp) */ +#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ +#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ +#define EAS2_MASKED PPC_BIT(32) /* Masked */ +#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ } Xive2Eas; #define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID) #define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED) +#define xive2_eas_is_resume(eas) (be64_to_cpu((eas)->w) & EAS2_RESUME) void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf); @@ -87,6 +90,7 @@ typedef struct Xive2End { #define END2_W2_EQ_ADDR_HI PPC_BITMASK32(8, 31) uint32_t w3; #define END2_W3_EQ_ADDR_LO PPC_BITMASK32(0, 24) +#define END2_W3_CL PPC_BIT32(27) #define END2_W3_QSIZE PPC_BITMASK32(28, 31) uint32_t w4; #define END2_W4_END_BLOCK PPC_BITMASK32(4, 7) @@ -154,6 +158,7 @@ typedef struct Xive2Nvp { #define NVP2_W0_L PPC_BIT32(8) #define NVP2_W0_G PPC_BIT32(9) #define NVP2_W0_T PPC_BIT32(10) +#define NVP2_W0_P PPC_BIT32(11) #define NVP2_W0_ESC_END PPC_BIT32(25) /* 'N' bit 0:ESB 1:END */ #define NVP2_W0_PGOFIRST PPC_BITMASK32(26, 31) uint32_t w1; @@ -205,9 +210,9 @@ static inline uint32_t xive2_nvp_idx(uint32_t cam_line) return cam_line & ((1 << XIVE2_NVP_SHIFT) - 1); } -static inline uint32_t xive2_nvp_blk(uint32_t cam_line) +static inline uint8_t xive2_nvp_blk(uint32_t cam_line) { - return (cam_line >> XIVE2_NVP_SHIFT) & 0xf; + return (uint8_t)((cam_line >> XIVE2_NVP_SHIFT) & 0xf); } void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx, GString *buf); @@ -220,6 +225,9 @@ typedef struct Xive2Nvgc { #define NVGC2_W0_VALID PPC_BIT32(0) #define NVGC2_W0_PGONEXT PPC_BITMASK32(26, 31) uint32_t w1; +#define NVGC2_W1_PSIZE PPC_BITMASK32(0, 1) +#define NVGC2_W1_END_BLK PPC_BITMASK32(4, 7) +#define NVGC2_W1_END_IDX PPC_BITMASK32(8, 31) uint32_t w2; uint32_t w3; uint32_t w4; diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index 530f3da70218d..a7bfb10dc70c7 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -1064,6 +1064,7 @@ bool qdev_set_parent_bus(DeviceState *dev, BusState *bus, Error **errp); extern bool qdev_hot_removed; char *qdev_get_dev_path(DeviceState *dev); +const char *qdev_get_printable_name(DeviceState *dev); void qbus_set_hotplug_handler(BusState *bus, Object *handler); void qbus_set_bus_hotplug_handler(BusState *bus); diff --git a/include/hw/register.h b/include/hw/register.h index a913c52aee5be..7b0f4c8b7a663 100644 --- a/include/hw/register.h +++ b/include/hw/register.h @@ -75,10 +75,6 @@ struct RegisterAccessInfo { */ struct RegisterInfo { - /* */ - DeviceState parent_obj; - - /* */ void *data; int data_size; @@ -87,9 +83,8 @@ struct RegisterInfo { void *opaque; }; -#define TYPE_REGISTER "qemu-register" -DECLARE_INSTANCE_CHECKER(RegisterInfo, REGISTER, - TYPE_REGISTER) +#define TYPE_REGISTER_ARRAY "qemu-register-array" +OBJECT_DECLARE_SIMPLE_TYPE(RegisterInfoArray, REGISTER_ARRAY) /** * This structure is used to group all of the individual registers which are @@ -103,6 +98,8 @@ DECLARE_INSTANCE_CHECKER(RegisterInfo, REGISTER, */ struct RegisterInfoArray { + Object parent_obj; + MemoryRegion mem; int num_elements; @@ -212,18 +209,4 @@ RegisterInfoArray *register_init_block64(DeviceState *owner, bool debug_enabled, uint64_t memory_size); -/** - * This function should be called to cleanup the registers that were initialized - * when calling register_init_block32(). This function should only be called - * from the device's instance_finalize function. - * - * Any memory operations that the device performed that require cleanup (such - * as creating subregions) need to be called before calling this function. - * - * @r_array: A structure containing all of the registers, as returned by - * register_init_block32() - */ - -void register_finalize_block(RegisterInfoArray *r_array); - #endif diff --git a/include/hw/s390x/s390-pci-kvm.h b/include/hw/s390x/s390-pci-kvm.h index 933814a4025bb..c33f2833a3cc3 100644 --- a/include/hw/s390x/s390-pci-kvm.h +++ b/include/hw/s390x/s390-pci-kvm.h @@ -14,12 +14,19 @@ #include "hw/s390x/s390-pci-bus.h" #include "hw/s390x/s390-pci-inst.h" +#include "system/kvm.h" #ifdef CONFIG_KVM +static inline void s390_pcihost_kvm_realize(void) +{ + kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); +} + bool s390_pci_kvm_interp_allowed(void); int s390_pci_kvm_aif_enable(S390PCIBusDevice *pbdev, ZpciFib *fib, bool assist); int s390_pci_kvm_aif_disable(S390PCIBusDevice *pbdev); #else +static inline void s390_pcihost_kvm_realize(void) {} static inline bool s390_pci_kvm_interp_allowed(void) { return false; diff --git a/include/hw/s390x/sclp.h b/include/hw/s390x/sclp.h index d32f6180e0d3e..33f01f85bb168 100644 --- a/include/hw/s390x/sclp.h +++ b/include/hw/s390x/sclp.h @@ -197,12 +197,9 @@ OBJECT_DECLARE_TYPE(SCLPDevice, SCLPDeviceClass, struct SCLPEventFacility; struct SCLPDevice { - /* private */ DeviceState parent_obj; - struct SCLPEventFacility *event_facility; - int increment_size; - /* public */ + struct SCLPEventFacility *event_facility; }; struct SCLPDeviceClass { diff --git a/include/hw/sd/sd.h b/include/hw/sd/sd.h index d6bad175131c2..91b5c40a5f893 100644 --- a/include/hw/sd/sd.h +++ b/include/hw/sd/sd.h @@ -56,7 +56,6 @@ #define AKE_SEQ_ERROR (1 << 3) enum SDPhySpecificationVersion { - SD_PHY_SPECv1_10_VERS = 1, SD_PHY_SPECv2_00_VERS = 2, SD_PHY_SPECv3_01_VERS = 3, }; @@ -96,7 +95,17 @@ struct SDCardClass { DeviceClass parent_class; /*< public >*/ - int (*do_command)(SDState *sd, SDRequest *req, uint8_t *response); + /** + * Process a SD command request. + * @sd: card + * @req: command request + * @resp: buffer to receive the command response + * @respsz: size of @resp buffer + * + * Return: size of the response + */ + size_t (*do_command)(SDState *sd, SDRequest *req, + uint8_t *resp, size_t respsz); /** * Write a byte to a SD card. * @sd: card @@ -153,7 +162,16 @@ struct SDBusClass { void sdbus_set_voltage(SDBus *sdbus, uint16_t millivolts); uint8_t sdbus_get_dat_lines(SDBus *sdbus); bool sdbus_get_cmd_line(SDBus *sdbus); -int sdbus_do_command(SDBus *sd, SDRequest *req, uint8_t *response); +/** + * sdbus_do_command: Process a SD command request + * @sd: card + * @req: command request + * @resp: buffer to receive the command response + * @respsz: size of @resp buffer + * + * Return: size of the response + */ +size_t sdbus_do_command(SDBus *sd, SDRequest *req, uint8_t *resp, size_t respsz); /** * Write a byte to a SD bus. * @sd: bus diff --git a/include/hw/southbridge/ich9.h b/include/hw/southbridge/ich9.h index 1e231e89c9260..2c35dd04848ec 100644 --- a/include/hw/southbridge/ich9.h +++ b/include/hw/southbridge/ich9.h @@ -95,7 +95,7 @@ struct ICH9LPCState { #define ICH9_CC_OIC 0x31FF #define ICH9_CC_OIC_AEN 0x1 #define ICH9_CC_GCS 0x3410 -#define ICH9_CC_GCS_DEFAULT 0x00000020 +#define ICH9_CC_GCS_DEFAULT 0x00000000 #define ICH9_CC_GCS_NO_REBOOT (1 << 5) /* D28:F[0-5] */ diff --git a/include/hw/ssi/ssi.h b/include/hw/ssi/ssi.h index 3cdcbd5390428..2ad8033d8f5b7 100644 --- a/include/hw/ssi/ssi.h +++ b/include/hw/ssi/ssi.h @@ -38,6 +38,7 @@ struct SSIPeripheralClass { /* if you have standard or no CS behaviour, just override transfer. * This is called when the device cs is active (true by default). + * See ssi_transfer(). */ uint32_t (*transfer)(SSIPeripheral *dev, uint32_t val); /* called when the CS line changes. Optional, devices only need to implement @@ -52,6 +53,7 @@ struct SSIPeripheralClass { * of the CS behaviour at the device level. transfer, set_cs, and * cs_polarity are unused if this is overwritten. Transfer_raw will * always be called for the device for every txrx access to the parent bus + * See ssi_transfer(). */ uint32_t (*transfer_raw)(SSIPeripheral *dev, uint32_t val); }; @@ -110,6 +112,18 @@ bool ssi_realize_and_unref(DeviceState *dev, SSIBus *bus, Error **errp); /* Master interface. */ SSIBus *ssi_create_bus(DeviceState *parent, const char *name); +/** + * Transfer a word on a SSI bus + * @bus: SSI bus + * @val: word to transmit + * + * At the same time, read a word and write the @val one on the SSI bus. + * + * SSI words might vary between 8 and 32 bits. The same number of bits + * written is received. + * + * Return: word value received + */ uint32_t ssi_transfer(SSIBus *bus, uint32_t val); DeviceState *ssi_get_cs(SSIBus *bus, uint8_t cs_index); diff --git a/include/hw/timer/i8254.h b/include/hw/timer/i8254.h index 8402caad30733..f7148d92865f8 100644 --- a/include/hw/timer/i8254.h +++ b/include/hw/timer/i8254.h @@ -75,7 +75,7 @@ static inline ISADevice *kvm_pit_init(ISABus *bus, int base) return d; } -void pit_set_gate(ISADevice *dev, int channel, int val); -void pit_get_channel_info(ISADevice *dev, int channel, PITChannelInfo *info); +void pit_set_gate(PITCommonState *pit, int channel, int val); +void pit_get_channel_info(PITCommonState *pit, int channel, PITChannelInfo *info); #endif /* HW_I8254_H */ diff --git a/include/hw/vfio/vfio-amd-xgbe.h b/include/hw/vfio/vfio-amd-xgbe.h deleted file mode 100644 index a894546c02d14..0000000000000 --- a/include/hw/vfio/vfio-amd-xgbe.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * VFIO AMD XGBE device - * - * Copyright Linaro Limited, 2015 - * - * Authors: - * Eric Auger - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - */ - -#ifndef HW_VFIO_VFIO_AMD_XGBE_H -#define HW_VFIO_VFIO_AMD_XGBE_H - -#include "hw/vfio/vfio-platform.h" -#include "qom/object.h" - -#define TYPE_VFIO_AMD_XGBE "vfio-amd-xgbe" - -/** - * This device exposes: - * - 5 MMIO regions: MAC, PCS, SerDes Rx/Tx regs, - SerDes Integration Registers 1/2 & 2/2 - * - 2 level sensitive IRQs and optional DMA channel IRQs - */ -struct VFIOAmdXgbeDevice { - VFIOPlatformDevice vdev; -}; - -typedef struct VFIOAmdXgbeDevice VFIOAmdXgbeDevice; - -struct VFIOAmdXgbeDeviceClass { - /*< private >*/ - VFIOPlatformDeviceClass parent_class; - /*< public >*/ - DeviceRealize parent_realize; -}; - -typedef struct VFIOAmdXgbeDeviceClass VFIOAmdXgbeDeviceClass; - -DECLARE_OBJ_CHECKERS(VFIOAmdXgbeDevice, VFIOAmdXgbeDeviceClass, - VFIO_AMD_XGBE_DEVICE, TYPE_VFIO_AMD_XGBE) - -#endif diff --git a/include/hw/vfio/vfio-calxeda-xgmac.h b/include/hw/vfio/vfio-calxeda-xgmac.h deleted file mode 100644 index 8482f151dd5ab..0000000000000 --- a/include/hw/vfio/vfio-calxeda-xgmac.h +++ /dev/null @@ -1,43 +0,0 @@ -/* - * VFIO calxeda xgmac device - * - * Copyright Linaro Limited, 2014 - * - * Authors: - * Eric Auger - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - */ - -#ifndef HW_VFIO_VFIO_CALXEDA_XGMAC_H -#define HW_VFIO_VFIO_CALXEDA_XGMAC_H - -#include "hw/vfio/vfio-platform.h" -#include "qom/object.h" - -#define TYPE_VFIO_CALXEDA_XGMAC "vfio-calxeda-xgmac" - -/** - * This device exposes: - * - a single MMIO region corresponding to its register space - * - 3 IRQS (main and 2 power related IRQs) - */ -struct VFIOCalxedaXgmacDevice { - VFIOPlatformDevice vdev; -}; -typedef struct VFIOCalxedaXgmacDevice VFIOCalxedaXgmacDevice; - -struct VFIOCalxedaXgmacDeviceClass { - /*< private >*/ - VFIOPlatformDeviceClass parent_class; - /*< public >*/ - DeviceRealize parent_realize; -}; -typedef struct VFIOCalxedaXgmacDeviceClass VFIOCalxedaXgmacDeviceClass; - -DECLARE_OBJ_CHECKERS(VFIOCalxedaXgmacDevice, VFIOCalxedaXgmacDeviceClass, - VFIO_CALXEDA_XGMAC_DEVICE, TYPE_VFIO_CALXEDA_XGMAC) - -#endif diff --git a/include/hw/vfio/vfio-container-base.h b/include/hw/vfio/vfio-container-base.h deleted file mode 100644 index bded6e993ffd3..0000000000000 --- a/include/hw/vfio/vfio-container-base.h +++ /dev/null @@ -1,278 +0,0 @@ -/* - * VFIO BASE CONTAINER - * - * Copyright (C) 2023 Intel Corporation. - * Copyright Red Hat, Inc. 2023 - * - * Authors: Yi Liu - * Eric Auger - * - * SPDX-License-Identifier: GPL-2.0-or-later - */ - -#ifndef HW_VFIO_VFIO_CONTAINER_BASE_H -#define HW_VFIO_VFIO_CONTAINER_BASE_H - -#include "system/memory.h" - -typedef struct VFIODevice VFIODevice; -typedef struct VFIOIOMMUClass VFIOIOMMUClass; - -typedef struct { - unsigned long *bitmap; - hwaddr size; - hwaddr pages; -} VFIOBitmap; - -typedef struct VFIOAddressSpace { - AddressSpace *as; - QLIST_HEAD(, VFIOContainerBase) containers; - QLIST_ENTRY(VFIOAddressSpace) list; -} VFIOAddressSpace; - -/* - * This is the base object for vfio container backends - */ -typedef struct VFIOContainerBase { - Object parent; - VFIOAddressSpace *space; - MemoryListener listener; - Error *error; - bool initialized; - uint64_t dirty_pgsizes; - uint64_t max_dirty_bitmap_size; - unsigned long pgsizes; - unsigned int dma_max_mappings; - bool dirty_pages_supported; - bool dirty_pages_started; /* Protected by BQL */ - QLIST_HEAD(, VFIOGuestIOMMU) giommu_list; - QLIST_HEAD(, VFIORamDiscardListener) vrdl_list; - QLIST_ENTRY(VFIOContainerBase) next; - QLIST_HEAD(, VFIODevice) device_list; - GList *iova_ranges; - NotifierWithReturn cpr_reboot_notifier; -} VFIOContainerBase; - -typedef struct VFIOGuestIOMMU { - VFIOContainerBase *bcontainer; - IOMMUMemoryRegion *iommu_mr; - hwaddr iommu_offset; - IOMMUNotifier n; - QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; -} VFIOGuestIOMMU; - -typedef struct VFIORamDiscardListener { - VFIOContainerBase *bcontainer; - MemoryRegion *mr; - hwaddr offset_within_address_space; - hwaddr size; - uint64_t granularity; - RamDiscardListener listener; - QLIST_ENTRY(VFIORamDiscardListener) next; -} VFIORamDiscardListener; - -VFIOAddressSpace *vfio_address_space_get(AddressSpace *as); -void vfio_address_space_put(VFIOAddressSpace *space); -void vfio_address_space_insert(VFIOAddressSpace *space, - VFIOContainerBase *bcontainer); - -int vfio_container_dma_map(VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - void *vaddr, bool readonly, MemoryRegion *mr); -int vfio_container_dma_unmap(VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb, bool unmap_all); -bool vfio_container_add_section_window(VFIOContainerBase *bcontainer, - MemoryRegionSection *section, - Error **errp); -void vfio_container_del_section_window(VFIOContainerBase *bcontainer, - MemoryRegionSection *section); -int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer, - bool start, Error **errp); -bool vfio_container_dirty_tracking_is_started( - const VFIOContainerBase *bcontainer); -bool vfio_container_devices_dirty_tracking_is_supported( - const VFIOContainerBase *bcontainer); -int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer, - uint64_t iova, uint64_t size, ram_addr_t ram_addr, Error **errp); - -GList *vfio_container_get_iova_ranges(const VFIOContainerBase *bcontainer); - -static inline uint64_t -vfio_container_get_page_size_mask(const VFIOContainerBase *bcontainer) -{ - assert(bcontainer); - return bcontainer->pgsizes; -} - -#define TYPE_VFIO_IOMMU "vfio-iommu" -#define TYPE_VFIO_IOMMU_LEGACY TYPE_VFIO_IOMMU "-legacy" -#define TYPE_VFIO_IOMMU_SPAPR TYPE_VFIO_IOMMU "-spapr" -#define TYPE_VFIO_IOMMU_IOMMUFD TYPE_VFIO_IOMMU "-iommufd" -#define TYPE_VFIO_IOMMU_USER TYPE_VFIO_IOMMU "-user" - -OBJECT_DECLARE_TYPE(VFIOContainerBase, VFIOIOMMUClass, VFIO_IOMMU) - -struct VFIOIOMMUClass { - ObjectClass parent_class; - - /** - * @setup - * - * Perform basic setup of the container, including configuring IOMMU - * capabilities, IOVA ranges, supported page sizes, etc. - * - * @bcontainer: #VFIOContainerBase - * @errp: pointer to Error*, to store an error if it happens. - * - * Returns true to indicate success and false for error. - */ - bool (*setup)(VFIOContainerBase *bcontainer, Error **errp); - - /** - * @listener_begin - * - * Called at the beginning of an address space update transaction. - * See #MemoryListener. - * - * @bcontainer: #VFIOContainerBase - */ - void (*listener_begin)(VFIOContainerBase *bcontainer); - - /** - * @listener_commit - * - * Called at the end of an address space update transaction, - * See #MemoryListener. - * - * @bcontainer: #VFIOContainerBase - */ - void (*listener_commit)(VFIOContainerBase *bcontainer); - - /** - * @dma_map - * - * Map an address range into the container. Note that the memory region is - * referenced within an RCU read lock region across this call. - * - * @bcontainer: #VFIOContainerBase to use - * @iova: start address to map - * @size: size of the range to map - * @vaddr: process virtual address of mapping - * @readonly: true if mapping should be readonly - * @mr: the memory region for this mapping - * - * Returns 0 to indicate success and -errno otherwise. - */ - int (*dma_map)(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - void *vaddr, bool readonly, MemoryRegion *mr); - /** - * @dma_map_file - * - * Map a file range for the container. - * - * @bcontainer: #VFIOContainerBase to use for map - * @iova: start address to map - * @size: size of the range to map - * @fd: descriptor of the file to map - * @start: starting file offset of the range to map - * @readonly: map read only if true - */ - int (*dma_map_file)(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - int fd, unsigned long start, bool readonly); - /** - * @dma_unmap - * - * Unmap an address range from the container. - * - * @bcontainer: #VFIOContainerBase to use for unmap - * @iova: start address to unmap - * @size: size of the range to unmap - * @iotlb: The IOMMU TLB mapping entry (or NULL) - * @unmap_all: if set, unmap the entire address space - * - * Returns 0 to indicate success and -errno otherwise. - */ - int (*dma_unmap)(const VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, - IOMMUTLBEntry *iotlb, bool unmap_all); - - - /** - * @attach_device - * - * Associate the given device with a container and do some related - * initialization of the device context. - * - * @name: name of the device - * @vbasedev: the device - * @as: address space to use - * @errp: pointer to Error*, to store an error if it happens. - * - * Returns true to indicate success and false for error. - */ - bool (*attach_device)(const char *name, VFIODevice *vbasedev, - AddressSpace *as, Error **errp); - - /* - * @detach_device - * - * Detach the given device from its container and clean up any necessary - * state. - * - * @vbasedev: the device to disassociate - */ - void (*detach_device)(VFIODevice *vbasedev); - - /* migration feature */ - - /** - * @set_dirty_page_tracking - * - * Start or stop dirty pages tracking on VFIO container - * - * @bcontainer: #VFIOContainerBase on which to de/activate dirty - * page tracking - * @start: indicates whether to start or stop dirty pages tracking - * @errp: pointer to Error*, to store an error if it happens. - * - * Returns zero to indicate success and negative for error. - */ - int (*set_dirty_page_tracking)(const VFIOContainerBase *bcontainer, - bool start, Error **errp); - /** - * @query_dirty_bitmap - * - * Get bitmap of dirty pages from container - * - * @bcontainer: #VFIOContainerBase from which to get dirty pages - * @vbmap: #VFIOBitmap internal bitmap structure - * @iova: iova base address - * @size: size of iova range - * @errp: pointer to Error*, to store an error if it happens. - * - * Returns zero to indicate success and negative for error. - */ - int (*query_dirty_bitmap)(const VFIOContainerBase *bcontainer, - VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp); - /* PCI specific */ - int (*pci_hot_reset)(VFIODevice *vbasedev, bool single); - - /* SPAPR specific */ - bool (*add_window)(VFIOContainerBase *bcontainer, - MemoryRegionSection *section, - Error **errp); - void (*del_window)(VFIOContainerBase *bcontainer, - MemoryRegionSection *section); - void (*release)(VFIOContainerBase *bcontainer); -}; - -VFIORamDiscardListener *vfio_find_ram_discard_listener( - VFIOContainerBase *bcontainer, MemoryRegionSection *section); - -void vfio_container_region_add(VFIOContainerBase *bcontainer, - MemoryRegionSection *section, bool cpr_remap); - -#endif /* HW_VFIO_VFIO_CONTAINER_BASE_H */ diff --git a/include/hw/vfio/vfio-container-legacy.h b/include/hw/vfio/vfio-container-legacy.h new file mode 100644 index 0000000000000..ffd594e80d64e --- /dev/null +++ b/include/hw/vfio/vfio-container-legacy.h @@ -0,0 +1,40 @@ +/* + * VFIO container + * + * Copyright Red Hat, Inc. 2025 + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VFIO_CONTAINER_LEGACY_H +#define HW_VFIO_CONTAINER_LEGACY_H + +#include "hw/vfio/vfio-container.h" +#include "hw/vfio/vfio-cpr.h" + +typedef struct VFIOLegacyContainer VFIOLegacyContainer; +typedef struct VFIODevice VFIODevice; + +typedef struct VFIOGroup { + int fd; + int groupid; + VFIOLegacyContainer *container; + QLIST_HEAD(, VFIODevice) device_list; + QLIST_ENTRY(VFIOGroup) next; + QLIST_ENTRY(VFIOGroup) container_next; + bool ram_block_discard_allowed; +} VFIOGroup; + +struct VFIOLegacyContainer { + VFIOContainer parent_obj; + + int fd; /* /dev/vfio/vfio, empowered by the attached groups */ + unsigned iommu_type; + bool unmap_all_supported; + QLIST_HEAD(, VFIOGroup) group_list; + VFIOContainerCPR cpr; +}; + +OBJECT_DECLARE_SIMPLE_TYPE(VFIOLegacyContainer, VFIO_IOMMU_LEGACY); + +#endif /* HW_VFIO_CONTAINER_LEGACY_H */ diff --git a/include/hw/vfio/vfio-container.h b/include/hw/vfio/vfio-container.h index 21e5807e48e1b..c4b58d664b7e7 100644 --- a/include/hw/vfio/vfio-container.h +++ b/include/hw/vfio/vfio-container.h @@ -1,38 +1,280 @@ /* - * VFIO container + * VFIO BASE CONTAINER * - * Copyright Red Hat, Inc. 2025 + * Copyright (C) 2023 Intel Corporation. + * Copyright Red Hat, Inc. 2023 + * + * Authors: Yi Liu + * Eric Auger * * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef HW_VFIO_CONTAINER_H -#define HW_VFIO_CONTAINER_H +#ifndef HW_VFIO_VFIO_CONTAINER_H +#define HW_VFIO_VFIO_CONTAINER_H -#include "hw/vfio/vfio-container-base.h" -#include "hw/vfio/vfio-cpr.h" +#include "system/memory.h" -typedef struct VFIOContainer VFIOContainer; typedef struct VFIODevice VFIODevice; +typedef struct VFIOIOMMUClass VFIOIOMMUClass; + +typedef struct { + unsigned long *bitmap; + hwaddr size; + hwaddr pages; +} VFIOBitmap; -typedef struct VFIOGroup { - int fd; - int groupid; - VFIOContainer *container; +typedef struct VFIOAddressSpace { + AddressSpace *as; + QLIST_HEAD(, VFIOContainer) containers; + QLIST_ENTRY(VFIOAddressSpace) list; +} VFIOAddressSpace; + +/* + * This is the base object for vfio container backends + */ +struct VFIOContainer { + Object parent_obj; + + VFIOAddressSpace *space; + MemoryListener listener; + Error *error; + bool initialized; + uint64_t dirty_pgsizes; + uint64_t max_dirty_bitmap_size; + unsigned long pgsizes; + unsigned int dma_max_mappings; + bool dirty_pages_supported; + bool dirty_pages_started; /* Protected by BQL */ + QLIST_HEAD(, VFIOGuestIOMMU) giommu_list; + QLIST_HEAD(, VFIORamDiscardListener) vrdl_list; + QLIST_ENTRY(VFIOContainer) next; QLIST_HEAD(, VFIODevice) device_list; - QLIST_ENTRY(VFIOGroup) next; - QLIST_ENTRY(VFIOGroup) container_next; - bool ram_block_discard_allowed; -} VFIOGroup; - -typedef struct VFIOContainer { - VFIOContainerBase bcontainer; - int fd; /* /dev/vfio/vfio, empowered by the attached groups */ - unsigned iommu_type; - QLIST_HEAD(, VFIOGroup) group_list; - VFIOContainerCPR cpr; -} VFIOContainer; - -OBJECT_DECLARE_SIMPLE_TYPE(VFIOContainer, VFIO_IOMMU_LEGACY); - -#endif /* HW_VFIO_CONTAINER_H */ + GList *iova_ranges; + NotifierWithReturn cpr_reboot_notifier; +}; + +#define TYPE_VFIO_IOMMU "vfio-iommu" +OBJECT_DECLARE_TYPE(VFIOContainer, VFIOIOMMUClass, VFIO_IOMMU) + +typedef struct VFIOGuestIOMMU { + VFIOContainer *bcontainer; + IOMMUMemoryRegion *iommu_mr; + hwaddr iommu_offset; + IOMMUNotifier n; + QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; +} VFIOGuestIOMMU; + +typedef struct VFIORamDiscardListener { + VFIOContainer *bcontainer; + MemoryRegion *mr; + hwaddr offset_within_address_space; + hwaddr size; + uint64_t granularity; + RamDiscardListener listener; + QLIST_ENTRY(VFIORamDiscardListener) next; +} VFIORamDiscardListener; + +VFIOAddressSpace *vfio_address_space_get(AddressSpace *as); +void vfio_address_space_put(VFIOAddressSpace *space); +void vfio_address_space_insert(VFIOAddressSpace *space, + VFIOContainer *bcontainer); + +int vfio_container_dma_map(VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, + void *vaddr, bool readonly, MemoryRegion *mr); +int vfio_container_dma_unmap(VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, + IOMMUTLBEntry *iotlb, bool unmap_all); +bool vfio_container_add_section_window(VFIOContainer *bcontainer, + MemoryRegionSection *section, + Error **errp); +void vfio_container_del_section_window(VFIOContainer *bcontainer, + MemoryRegionSection *section); +int vfio_container_set_dirty_page_tracking(VFIOContainer *bcontainer, + bool start, Error **errp); +bool vfio_container_dirty_tracking_is_started( + const VFIOContainer *bcontainer); +bool vfio_container_devices_dirty_tracking_is_supported( + const VFIOContainer *bcontainer); +int vfio_container_query_dirty_bitmap(const VFIOContainer *bcontainer, + uint64_t iova, uint64_t size, + hwaddr translated_addr, Error **errp); + +GList *vfio_container_get_iova_ranges(const VFIOContainer *bcontainer); + +static inline uint64_t +vfio_container_get_page_size_mask(const VFIOContainer *bcontainer) +{ + assert(bcontainer); + return bcontainer->pgsizes; +} + +#define TYPE_VFIO_IOMMU_LEGACY TYPE_VFIO_IOMMU "-legacy" +#define TYPE_VFIO_IOMMU_SPAPR TYPE_VFIO_IOMMU "-spapr" +#define TYPE_VFIO_IOMMU_IOMMUFD TYPE_VFIO_IOMMU "-iommufd" +#define TYPE_VFIO_IOMMU_USER TYPE_VFIO_IOMMU "-user" + +struct VFIOIOMMUClass { + ObjectClass parent_class; + + /** + * @setup + * + * Perform basic setup of the container, including configuring IOMMU + * capabilities, IOVA ranges, supported page sizes, etc. + * + * @bcontainer: #VFIOContainer + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns true to indicate success and false for error. + */ + bool (*setup)(VFIOContainer *bcontainer, Error **errp); + + /** + * @listener_begin + * + * Called at the beginning of an address space update transaction. + * See #MemoryListener. + * + * @bcontainer: #VFIOContainer + */ + void (*listener_begin)(VFIOContainer *bcontainer); + + /** + * @listener_commit + * + * Called at the end of an address space update transaction, + * See #MemoryListener. + * + * @bcontainer: #VFIOContainer + */ + void (*listener_commit)(VFIOContainer *bcontainer); + + /** + * @dma_map + * + * Map an address range into the container. Note that the memory region is + * referenced within an RCU read lock region across this call. + * + * @bcontainer: #VFIOContainer to use + * @iova: start address to map + * @size: size of the range to map + * @vaddr: process virtual address of mapping + * @readonly: true if mapping should be readonly + * @mr: the memory region for this mapping + * + * Returns 0 to indicate success and -errno otherwise. + */ + int (*dma_map)(const VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, + void *vaddr, bool readonly, MemoryRegion *mr); + /** + * @dma_map_file + * + * Map a file range for the container. + * + * @bcontainer: #VFIOContainer to use for map + * @iova: start address to map + * @size: size of the range to map + * @fd: descriptor of the file to map + * @start: starting file offset of the range to map + * @readonly: map read only if true + */ + int (*dma_map_file)(const VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, + int fd, unsigned long start, bool readonly); + /** + * @dma_unmap + * + * Unmap an address range from the container. + * + * @bcontainer: #VFIOContainer to use for unmap + * @iova: start address to unmap + * @size: size of the range to unmap + * @iotlb: The IOMMU TLB mapping entry (or NULL) + * @unmap_all: if set, unmap the entire address space + * + * Returns 0 to indicate success and -errno otherwise. + */ + int (*dma_unmap)(const VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, + IOMMUTLBEntry *iotlb, bool unmap_all); + + + /** + * @attach_device + * + * Associate the given device with a container and do some related + * initialization of the device context. + * + * @name: name of the device + * @vbasedev: the device + * @as: address space to use + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns true to indicate success and false for error. + */ + bool (*attach_device)(const char *name, VFIODevice *vbasedev, + AddressSpace *as, Error **errp); + + /* + * @detach_device + * + * Detach the given device from its container and clean up any necessary + * state. + * + * @vbasedev: the device to disassociate + */ + void (*detach_device)(VFIODevice *vbasedev); + + /* migration feature */ + + /** + * @set_dirty_page_tracking + * + * Start or stop dirty pages tracking on VFIO container + * + * @bcontainer: #VFIOContainer on which to de/activate dirty + * page tracking + * @start: indicates whether to start or stop dirty pages tracking + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns zero to indicate success and negative for error. + */ + int (*set_dirty_page_tracking)(const VFIOContainer *bcontainer, + bool start, Error **errp); + /** + * @query_dirty_bitmap + * + * Get bitmap of dirty pages from container + * + * @bcontainer: #VFIOContainer from which to get dirty pages + * @vbmap: #VFIOBitmap internal bitmap structure + * @iova: iova base address + * @size: size of iova range + * @errp: pointer to Error*, to store an error if it happens. + * + * Returns zero to indicate success and negative for error. + */ + int (*query_dirty_bitmap)(const VFIOContainer *bcontainer, + VFIOBitmap *vbmap, hwaddr iova, hwaddr size, Error **errp); + /* PCI specific */ + int (*pci_hot_reset)(VFIODevice *vbasedev, bool single); + + /* SPAPR specific */ + bool (*add_window)(VFIOContainer *bcontainer, + MemoryRegionSection *section, + Error **errp); + void (*del_window)(VFIOContainer *bcontainer, + MemoryRegionSection *section); + void (*release)(VFIOContainer *bcontainer); +}; + +VFIORamDiscardListener *vfio_find_ram_discard_listener( + VFIOContainer *bcontainer, MemoryRegionSection *section); + +void vfio_container_region_add(VFIOContainer *bcontainer, + MemoryRegionSection *section, bool cpr_remap); + +#endif /* HW_VFIO_VFIO_CONTAINER_H */ diff --git a/include/hw/vfio/vfio-cpr.h b/include/hw/vfio/vfio-cpr.h index 80ad20d216908..4606da500a796 100644 --- a/include/hw/vfio/vfio-cpr.h +++ b/include/hw/vfio/vfio-cpr.h @@ -12,16 +12,16 @@ #include "migration/misc.h" #include "system/memory.h" +struct VFIOLegacyContainer; struct VFIOContainer; -struct VFIOContainerBase; struct VFIOGroup; struct VFIODevice; struct VFIOPCIDevice; struct VFIOIOMMUFDContainer; struct IOMMUFDBackend; -typedef int (*dma_map_fn)(const struct VFIOContainerBase *bcontainer, - hwaddr iova, ram_addr_t size, void *vaddr, +typedef int (*dma_map_fn)(const struct VFIOContainer *bcontainer, + hwaddr iova, uint64_t size, void *vaddr, bool readonly, MemoryRegion *mr); typedef struct VFIOContainerCPR { @@ -38,9 +38,14 @@ typedef struct VFIODeviceCPR { uint32_t ioas_id; } VFIODeviceCPR; -bool vfio_legacy_cpr_register_container(struct VFIOContainer *container, +typedef struct VFIOPCICPR { + NotifierWithReturn transfer_notifier; +} VFIOPCICPR; + +bool vfio_legacy_cpr_register_container(struct VFIOLegacyContainer *container, Error **errp); -void vfio_legacy_cpr_unregister_container(struct VFIOContainer *container); +void vfio_legacy_cpr_unregister_container( + struct VFIOLegacyContainer *container); int vfio_cpr_reboot_notifier(NotifierWithReturn *notifier, MigrationEvent *e, Error **errp); @@ -57,14 +62,14 @@ void vfio_cpr_load_device(struct VFIODevice *vbasedev); int vfio_cpr_group_get_device_fd(int d, const char *name); -bool vfio_cpr_container_match(struct VFIOContainer *container, +bool vfio_cpr_container_match(struct VFIOLegacyContainer *container, struct VFIOGroup *group, int fd); -void vfio_cpr_giommu_remap(struct VFIOContainerBase *bcontainer, +void vfio_cpr_giommu_remap(struct VFIOContainer *bcontainer, MemoryRegionSection *section); -bool vfio_cpr_ram_discard_register_listener( - struct VFIOContainerBase *bcontainer, MemoryRegionSection *section); +bool vfio_cpr_ram_discard_replay_populated( + struct VFIOContainer *bcontainer, MemoryRegionSection *section); void vfio_cpr_save_vector_fd(struct VFIOPCIDevice *vdev, const char *name, int nr, int fd); @@ -77,5 +82,7 @@ extern const VMStateDescription vfio_cpr_pci_vmstate; extern const VMStateDescription vmstate_cpr_vfio_devices; void vfio_cpr_add_kvm_notifier(void); +void vfio_cpr_pci_register_device(struct VFIOPCIDevice *vdev); +void vfio_cpr_pci_unregister_device(struct VFIOPCIDevice *vdev); #endif /* HW_VFIO_VFIO_CPR_H */ diff --git a/include/hw/vfio/vfio-device.h b/include/hw/vfio/vfio-device.h index 6e4d5ccdac6ea..0fe6c60ba2d65 100644 --- a/include/hw/vfio/vfio-device.h +++ b/include/hw/vfio/vfio-device.h @@ -18,8 +18,8 @@ * Copyright (C) 2008, IBM, Muli Ben-Yehuda (muli@il.ibm.com) */ -#ifndef HW_VFIO_VFIO_COMMON_H -#define HW_VFIO_VFIO_COMMON_H +#ifndef HW_VFIO_VFIO_DEVICE_H +#define HW_VFIO_VFIO_DEVICE_H #include "system/memory.h" #include "qemu/queue.h" @@ -27,7 +27,7 @@ #include #endif #include "system/system.h" -#include "hw/vfio/vfio-container-base.h" +#include "hw/vfio/vfio-container.h" #include "hw/vfio/vfio-cpr.h" #include "system/host_iommu_device.h" #include "system/iommufd.h" @@ -36,7 +36,7 @@ enum { VFIO_DEVICE_TYPE_PCI = 0, - VFIO_DEVICE_TYPE_PLATFORM = 1, + VFIO_DEVICE_TYPE_UNUSED = 1, VFIO_DEVICE_TYPE_CCW = 2, VFIO_DEVICE_TYPE_AP = 3, }; @@ -54,7 +54,7 @@ typedef struct VFIODevice { QLIST_ENTRY(VFIODevice) container_next; QLIST_ENTRY(VFIODevice) global_next; struct VFIOGroup *group; - VFIOContainerBase *bcontainer; + VFIOContainer *bcontainer; char *sysfsdev; char *name; DeviceState *dev; @@ -74,7 +74,7 @@ typedef struct VFIODevice { VFIODeviceOps *ops; VFIODeviceIOOps *io_ops; unsigned int num_irqs; - unsigned int num_regions; + unsigned int num_initial_regions; unsigned int flags; VFIOMigration *migration; Error *migration_blocker; @@ -252,7 +252,7 @@ struct VFIODeviceIOOps { void *data, bool post); }; -void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainerBase *bcontainer, +void vfio_device_prepare(VFIODevice *vbasedev, VFIOContainer *bcontainer, struct vfio_device_info *info); void vfio_device_unprepare(VFIODevice *vbasedev); @@ -288,4 +288,4 @@ void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops, int vfio_device_get_aw_bits(VFIODevice *vdev); void vfio_kvm_device_close(void); -#endif /* HW_VFIO_VFIO_COMMON_H */ +#endif /* HW_VFIO_VFIO_DEVICE_H */ diff --git a/include/hw/vfio/vfio-platform.h b/include/hw/vfio/vfio-platform.h deleted file mode 100644 index 256d8500b70a2..0000000000000 --- a/include/hw/vfio/vfio-platform.h +++ /dev/null @@ -1,78 +0,0 @@ -/* - * vfio based device assignment support - platform devices - * - * Copyright Linaro Limited, 2014 - * - * Authors: - * Kim Phillips - * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. - * - * Based on vfio based PCI device assignment support: - * Copyright Red Hat, Inc. 2012 - */ - -#ifndef HW_VFIO_VFIO_PLATFORM_H -#define HW_VFIO_VFIO_PLATFORM_H - -#include "hw/sysbus.h" -#include "hw/vfio/vfio-device.h" -#include "qemu/event_notifier.h" -#include "qemu/queue.h" -#include "qom/object.h" - -#define TYPE_VFIO_PLATFORM "vfio-platform" - -enum { - VFIO_IRQ_INACTIVE = 0, - VFIO_IRQ_PENDING = 1, - VFIO_IRQ_ACTIVE = 2, - /* VFIO_IRQ_ACTIVE_AND_PENDING cannot happen with VFIO */ -}; - -typedef struct VFIOINTp { - QLIST_ENTRY(VFIOINTp) next; /* entry for IRQ list */ - QSIMPLEQ_ENTRY(VFIOINTp) pqnext; /* entry for pending IRQ queue */ - EventNotifier *interrupt; /* eventfd triggered on interrupt */ - EventNotifier *unmask; /* eventfd for unmask on QEMU bypass */ - qemu_irq qemuirq; - struct VFIOPlatformDevice *vdev; /* back pointer to device */ - int state; /* inactive, pending, active */ - uint8_t pin; /* index */ - uint32_t flags; /* IRQ info flags */ - bool kvm_accel; /* set when QEMU bypass through KVM enabled */ -} VFIOINTp; - -/* function type for user side eventfd handler */ -typedef void (*eventfd_user_side_handler_t)(VFIOINTp *intp); - -typedef struct VFIORegion VFIORegion; - -struct VFIOPlatformDevice { - SysBusDevice sbdev; - VFIODevice vbasedev; /* not a QOM object */ - VFIORegion **regions; - QLIST_HEAD(, VFIOINTp) intp_list; /* list of IRQs */ - /* queue of pending IRQs */ - QSIMPLEQ_HEAD(, VFIOINTp) pending_intp_queue; - char *compat; /* DT compatible values, separated by NUL */ - unsigned int num_compat; /* number of compatible values */ - uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */ - QEMUTimer *mmap_timer; /* allows fast-path resume after IRQ hit */ - QemuMutex intp_mutex; /* protect the intp_list IRQ state */ - bool irqfd_allowed; /* debug option to force irqfd on/off */ -}; -typedef struct VFIOPlatformDevice VFIOPlatformDevice; - -struct VFIOPlatformDeviceClass { - /*< private >*/ - SysBusDeviceClass parent_class; - /*< public >*/ -}; -typedef struct VFIOPlatformDeviceClass VFIOPlatformDeviceClass; - -DECLARE_OBJ_CHECKERS(VFIOPlatformDevice, VFIOPlatformDeviceClass, - VFIO_PLATFORM_DEVICE, TYPE_VFIO_PLATFORM) - -#endif /* HW_VFIO_VFIO_PLATFORM_H */ diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index d6df209a2f0eb..ff94fa1734247 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -95,6 +95,10 @@ typedef int (*vhost_new_worker_op)(struct vhost_dev *dev, struct vhost_worker_state *worker); typedef int (*vhost_free_worker_op)(struct vhost_dev *dev, struct vhost_worker_state *worker); +typedef int (*vhost_set_features_ex_op)(struct vhost_dev *dev, + const uint64_t *features); +typedef int (*vhost_get_features_ex_op)(struct vhost_dev *dev, + uint64_t *features); typedef int (*vhost_set_features_op)(struct vhost_dev *dev, uint64_t features); typedef int (*vhost_get_features_op)(struct vhost_dev *dev, @@ -186,6 +190,8 @@ typedef struct VhostOps { vhost_free_worker_op vhost_free_worker; vhost_get_vring_worker_op vhost_get_vring_worker; vhost_attach_vring_worker_op vhost_attach_vring_worker; + vhost_set_features_ex_op vhost_set_features_ex; + vhost_get_features_ex_op vhost_get_features_ex; vhost_set_features_op vhost_set_features; vhost_get_features_op vhost_get_features; vhost_set_backend_cap_op vhost_set_backend_cap; diff --git a/include/hw/virtio/vhost-user-base.h b/include/hw/virtio/vhost-user-base.h index 51d0968b893aa..387e434b8046d 100644 --- a/include/hw/virtio/vhost-user-base.h +++ b/include/hw/virtio/vhost-user-base.h @@ -44,6 +44,6 @@ struct VHostUserBaseClass { }; -#define TYPE_VHOST_USER_DEVICE "vhost-user-device" +#define TYPE_VHOST_USER_TEST_DEVICE "vhost-user-test-device" #endif /* QEMU_VHOST_USER_BASE_H */ diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index 66be6afc88427..08bbb4dfe98af 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -107,9 +107,9 @@ struct vhost_dev { * future use should be discouraged and the variable retired as * its easy to confuse with the VirtIO backend_features. */ - uint64_t features; - uint64_t acked_features; - uint64_t backend_features; + VIRTIO_DECLARE_FEATURES(features); + VIRTIO_DECLARE_FEATURES(acked_features); + VIRTIO_DECLARE_FEATURES(backend_features); /** * @protocol_features: is the vhost-user only feature set by @@ -320,6 +320,20 @@ bool vhost_virtqueue_pending(struct vhost_dev *hdev, int n); void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, bool mask); +/** + * vhost_get_features_ex() - sanitize the extended features set + * @hdev: common vhost_dev structure + * @feature_bits: pointer to terminated table of feature bits + * @features: original features set, filtered out on return + * + * This is the extended variant of vhost_get_features(), supporting the + * the extended features set. Filter it with the intersection of what is + * supported by the vhost backend (hdev->features) and the supported + * feature_bits. + */ +void vhost_get_features_ex(struct vhost_dev *hdev, + const int *feature_bits, + uint64_t *features); /** * vhost_get_features() - return a sanitised set of feature bits * @hdev: common vhost_dev structure @@ -330,8 +344,28 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, * is supported by the vhost backend (hdev->features), the supported * feature_bits and the requested feature set. */ -uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, - uint64_t features); +static inline uint64_t vhost_get_features(struct vhost_dev *hdev, + const int *feature_bits, + uint64_t features) +{ + uint64_t features_ex[VIRTIO_FEATURES_NU64S]; + + virtio_features_from_u64(features_ex, features); + vhost_get_features_ex(hdev, feature_bits, features_ex); + return features_ex[0]; +} + +/** + * vhost_ack_features_ex() - set vhost full set of acked_features + * @hdev: common vhost_dev structure + * @feature_bits: pointer to terminated table of feature bits + * @features: requested feature set + * + * This sets the internal hdev->acked_features to the intersection of + * the backends advertised features and the supported feature_bits. + */ +void vhost_ack_features_ex(struct vhost_dev *hdev, const int *feature_bits, + const uint64_t *features); /** * vhost_ack_features() - set vhost acked_features @@ -342,8 +376,16 @@ uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits, * This sets the internal hdev->acked_features to the intersection of * the backends advertised features and the supported feature_bits. */ -void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits, - uint64_t features); +static inline void vhost_ack_features(struct vhost_dev *hdev, + const int *feature_bits, + uint64_t features) +{ + uint64_t features_ex[VIRTIO_FEATURES_NU64S]; + + virtio_features_from_u64(features_ex, features); + vhost_ack_features_ex(hdev, feature_bits, features_ex); +} + unsigned int vhost_get_max_memslots(void); unsigned int vhost_get_free_memslots(void); diff --git a/include/hw/virtio/virtio-features.h b/include/hw/virtio/virtio-features.h new file mode 100644 index 0000000000000..e29b7fe48fcef --- /dev/null +++ b/include/hw/virtio/virtio-features.h @@ -0,0 +1,126 @@ +/* + * Virtio features helpers + * + * Copyright 2025 Red Hat, Inc. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef QEMU_VIRTIO_FEATURES_H +#define QEMU_VIRTIO_FEATURES_H + +#include "qemu/bitops.h" + +#define VIRTIO_FEATURES_FMT "%016"PRIx64"%016"PRIx64 +#define VIRTIO_FEATURES_PR(f) (f)[1], (f)[0] + +#define VIRTIO_FEATURES_MAX 128 +#define VIRTIO_FEATURES_BIT(b) BIT_ULL((b) % 64) +#define VIRTIO_FEATURES_U64(b) ((b) / 64) +#define VIRTIO_FEATURES_NU32S (VIRTIO_FEATURES_MAX / 32) +#define VIRTIO_FEATURES_NU64S (VIRTIO_FEATURES_MAX / 64) + +#define VIRTIO_DECLARE_FEATURES(name) \ + union { \ + uint64_t name; \ + uint64_t name##_ex[VIRTIO_FEATURES_NU64S]; \ + } + +#define VIRTIO_DEFINE_PROP_FEATURE(_name, _state, _field, _bit, _defval) \ + DEFINE_PROP_BIT64(_name, _state, _field[VIRTIO_FEATURES_U64(_bit)], \ + (_bit) % 64, _defval) + +static inline void virtio_features_clear(uint64_t *features) +{ + memset(features, 0, sizeof(features[0]) * VIRTIO_FEATURES_NU64S); +} + +static inline void virtio_features_from_u64(uint64_t *features, uint64_t from) +{ + virtio_features_clear(features); + features[0] = from; +} + +static inline bool virtio_has_feature_ex(const uint64_t *features, + unsigned int fbit) +{ + assert(fbit < VIRTIO_FEATURES_MAX); + return features[VIRTIO_FEATURES_U64(fbit)] & VIRTIO_FEATURES_BIT(fbit); +} + +static inline void virtio_add_feature_ex(uint64_t *features, + unsigned int fbit) +{ + assert(fbit < VIRTIO_FEATURES_MAX); + features[VIRTIO_FEATURES_U64(fbit)] |= VIRTIO_FEATURES_BIT(fbit); +} + +static inline void virtio_clear_feature_ex(uint64_t *features, + unsigned int fbit) +{ + assert(fbit < VIRTIO_FEATURES_MAX); + features[VIRTIO_FEATURES_U64(fbit)] &= ~VIRTIO_FEATURES_BIT(fbit); +} + +static inline bool virtio_features_equal(const uint64_t *f1, + const uint64_t *f2) +{ + return !memcmp(f1, f2, sizeof(uint64_t) * VIRTIO_FEATURES_NU64S); +} + +static inline bool virtio_features_use_ex(const uint64_t *features) +{ + int i; + + for (i = 1; i < VIRTIO_FEATURES_NU64S; ++i) { + if (features[i]) { + return true; + } + } + return false; +} + +static inline bool virtio_features_empty(const uint64_t *features) +{ + return !virtio_features_use_ex(features) && !features[0]; +} + +static inline void virtio_features_copy(uint64_t *to, const uint64_t *from) +{ + memcpy(to, from, sizeof(to[0]) * VIRTIO_FEATURES_NU64S); +} + +static inline bool virtio_features_andnot(uint64_t *to, const uint64_t *f1, + const uint64_t *f2) +{ + uint64_t diff = 0; + int i; + + for (i = 0; i < VIRTIO_FEATURES_NU64S; i++) { + to[i] = f1[i] & ~f2[i]; + diff |= to[i]; + } + return diff; +} + +static inline void virtio_features_and(uint64_t *to, const uint64_t *f1, + const uint64_t *f2) +{ + int i; + + for (i = 0; i < VIRTIO_FEATURES_NU64S; i++) { + to[i] = f1[i] & f2[i]; + } +} + +static inline void virtio_features_or(uint64_t *to, const uint64_t *f1, + const uint64_t *f2) +{ + int i; + + for (i = 0; i < VIRTIO_FEATURES_NU64S; i++) { + to[i] = f1[i] | f2[i]; + } +} + +#endif diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h index 73fdefc0dcb89..5b8ab7bda796c 100644 --- a/include/hw/virtio/virtio-net.h +++ b/include/hw/virtio/virtio-net.h @@ -182,7 +182,7 @@ struct VirtIONet { uint32_t has_vnet_hdr; size_t host_hdr_len; size_t guest_hdr_len; - uint64_t host_features; + VIRTIO_DECLARE_FEATURES(host_features); uint32_t rsc_timeout; uint8_t rsc4_enabled; uint8_t rsc6_enabled; diff --git a/include/hw/virtio/virtio-pci.h b/include/hw/virtio/virtio-pci.h index eab5394898d99..639752977ee8a 100644 --- a/include/hw/virtio/virtio-pci.h +++ b/include/hw/virtio/virtio-pci.h @@ -158,7 +158,7 @@ struct VirtIOPCIProxy { uint32_t nvectors; uint32_t dfselect; uint32_t gfselect; - uint32_t guest_features[2]; + uint32_t guest_features[VIRTIO_FEATURES_NU32S]; VirtIOPCIQueue vqs[VIRTIO_QUEUE_MAX]; VirtIOIRQFD *vector_irqfd; diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index c594764f23f40..d97529c3f1eb3 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -16,6 +16,7 @@ #include "system/memory.h" #include "hw/qdev-core.h" +#include "hw/virtio/virtio-features.h" #include "net/net.h" #include "migration/vmstate.h" #include "qemu/event_notifier.h" @@ -121,9 +122,9 @@ struct VirtIODevice * backend (e.g. vhost) and could potentially be a subset of the * total feature set offered by QEMU. */ - uint64_t host_features; - uint64_t guest_features; - uint64_t backend_features; + VIRTIO_DECLARE_FEATURES(host_features); + VIRTIO_DECLARE_FEATURES(guest_features); + VIRTIO_DECLARE_FEATURES(backend_features); size_t config_len; void *config; @@ -177,6 +178,9 @@ struct VirtioDeviceClass { /* This is what a VirtioDevice must implement */ DeviceRealize realize; DeviceUnrealize unrealize; + void (*get_features_ex)(VirtIODevice *vdev, uint64_t *requested_features, + Error **errp); + void (*set_features_ex)(VirtIODevice *vdev, const uint64_t *val); uint64_t (*get_features)(VirtIODevice *vdev, uint64_t requested_features, Error **errp); @@ -290,7 +294,6 @@ int virtqueue_get_avail_bytes(VirtQueue *vq, unsigned int *in_bytes, unsigned int *out_bytes, unsigned max_in_bytes, unsigned max_out_bytes); -void virtio_notify_irqfd(VirtIODevice *vdev, VirtQueue *vq); void virtio_notify(VirtIODevice *vdev, VirtQueue *vq); int virtio_save(VirtIODevice *vdev, QEMUFile *f); @@ -372,6 +375,7 @@ void virtio_queue_reset(VirtIODevice *vdev, uint32_t queue_index); void virtio_queue_enable(VirtIODevice *vdev, uint32_t queue_index); void virtio_update_irq(VirtIODevice *vdev); int virtio_set_features(VirtIODevice *vdev, uint64_t val); +int virtio_set_features_ex(VirtIODevice *vdev, const uint64_t *val); /* Base devices. */ typedef struct VirtIOBlkConf VirtIOBlkConf; diff --git a/include/io/channel.h b/include/io/channel.h index 62b657109c7d9..0f25ae0069f9e 100644 --- a/include/io/channel.h +++ b/include/io/channel.h @@ -36,6 +36,7 @@ OBJECT_DECLARE_TYPE(QIOChannel, QIOChannelClass, #define QIO_CHANNEL_READ_FLAG_MSG_PEEK 0x1 #define QIO_CHANNEL_READ_FLAG_RELAXED_EOF 0x2 +#define QIO_CHANNEL_READ_FLAG_FD_PRESERVE_BLOCKING 0x4 typedef enum QIOChannelFeature QIOChannelFeature; @@ -46,6 +47,7 @@ enum QIOChannelFeature { QIO_CHANNEL_FEATURE_WRITE_ZERO_COPY, QIO_CHANNEL_FEATURE_READ_MSG_PEEK, QIO_CHANNEL_FEATURE_SEEKABLE, + QIO_CHANNEL_FEATURE_CONCURRENT_IO, }; @@ -116,6 +118,15 @@ struct QIOChannelClass { size_t nfds, int flags, Error **errp); + + /* + * The io_readv handler must guarantee that all + * incoming fds are set BLOCKING (unless + * QIO_CHANNEL_READ_FLAG_FD_PRESERVE_BLOCKING flag is set) and + * CLOEXEC (if available). + * @fds and @nfds are set only on success path. Still, setting + * @fds and @nfds to zero is acceptable on failure path. + */ ssize_t (*io_readv)(QIOChannel *ioc, const struct iovec *iov, size_t niov, @@ -123,6 +134,7 @@ struct QIOChannelClass { size_t *nfds, int flags, Error **errp); + int (*io_close)(QIOChannel *ioc, Error **errp); GSource * (*io_create_watch)(QIOChannel *ioc, @@ -233,6 +245,13 @@ void qio_channel_set_name(QIOChannel *ioc, * was allocated. It is the callers responsibility * to call close() on each file descriptor and to * call g_free() on the array pointer in @fds. + * @fds allocated and set (and @nfds is set too) + * _only_ on success path. Still, @fds and @nfds + * may be set to zero on failure path. + * qio_channel_readv_full() guarantees that all + * incoming fds are set BLOCKING (unless + * QIO_CHANNEL_READ_FLAG_FD_PRESERVE_BLOCKING flag + * is set) and CLOEXEC (if available). * * It is an error to pass a non-NULL @fds parameter * unless qio_channel_has_feature() returns a true @@ -512,9 +531,9 @@ int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc, * return QIO_CHANNEL_ERR_BLOCK if they would otherwise * block on I/O */ -int qio_channel_set_blocking(QIOChannel *ioc, - bool enabled, - Error **errp); +bool qio_channel_set_blocking(QIOChannel *ioc, + bool enabled, + Error **errp); /** * qio_channel_set_follow_coroutine_ctx: diff --git a/include/migration/colo.h b/include/migration/colo.h index 43222ef5ae6ad..d4fe422e4d335 100644 --- a/include/migration/colo.h +++ b/include/migration/colo.h @@ -25,7 +25,7 @@ void migrate_start_colo_process(MigrationState *s); bool migration_in_colo_state(void); /* loadvm */ -int migration_incoming_enable_colo(void); +int migration_incoming_enable_colo(Error **errp); void migration_incoming_disable_colo(void); bool migration_incoming_colo_enabled(void); bool migration_incoming_in_colo_state(void); diff --git a/include/migration/cpr.h b/include/migration/cpr.h index 3fc19a74efdcf..a412d6663c3df 100644 --- a/include/migration/cpr.h +++ b/include/migration/cpr.h @@ -34,6 +34,9 @@ void cpr_resave_fd(const char *name, int id, int fd); int cpr_open_fd(const char *path, int flags, const char *name, int id, Error **errp); +typedef bool (*cpr_walk_fd_cb)(int fd); +bool cpr_walk_fd(cpr_walk_fd_cb cb); + MigMode cpr_get_incoming_mode(void); void cpr_set_incoming_mode(MigMode mode); bool cpr_is_incoming(void); @@ -50,4 +53,11 @@ int cpr_get_fd_param(const char *name, const char *fdname, int index, QEMUFile *cpr_transfer_output(MigrationChannel *channel, Error **errp); QEMUFile *cpr_transfer_input(MigrationChannel *channel, Error **errp); +void cpr_exec_init(void); +QEMUFile *cpr_exec_output(Error **errp); +QEMUFile *cpr_exec_input(Error **errp); +void cpr_exec_persist_state(QEMUFile *f); +bool cpr_exec_has_state(void); +void cpr_exec_unpersist_state(void); +void cpr_exec_unpreserve_fds(void); #endif diff --git a/include/migration/misc.h b/include/migration/misc.h index a261f99d89051..592b93021eb42 100644 --- a/include/migration/misc.h +++ b/include/migration/misc.h @@ -95,7 +95,19 @@ void migration_add_notifier(NotifierWithReturn *notify, void migration_add_notifier_mode(NotifierWithReturn *notify, MigrationNotifyFunc func, MigMode mode); +/* + * Same as migration_add_notifier, but applies to all @mode in the argument + * list. The list is terminated by -1 or MIG_MODE_ALL. For the latter, + * the notifier is added for all modes. + */ +void migration_add_notifier_modes(NotifierWithReturn *notify, + MigrationNotifyFunc func, MigMode mode, ...); + +/* + * Remove a notifier from all modes. + */ void migration_remove_notifier(NotifierWithReturn *notify); + void migration_file_set_error(int ret, Error *err); /* True if incoming migration entered POSTCOPY_INCOMING_DISCARD */ diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index 1ff7bd9ac425b..63ccaee07ad6e 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -163,6 +163,7 @@ typedef enum { MIG_PRI_IOMMU, /* Must happen before PCI devices */ MIG_PRI_PCI_BUS, /* Must happen before IOMMU */ MIG_PRI_VIRTIO_MEM, /* Must happen before IOMMU */ + MIG_PRI_APIC, /* Must happen before PCI devices */ MIG_PRI_GICV3_ITS, /* Must happen before PCI devices */ MIG_PRI_GICV3, /* Must happen before the ITS */ MIG_PRI_MAX, @@ -200,14 +201,28 @@ struct VMStateDescription { * exclusive. For this reason, also early_setup VMSDs are migrated in a * QEMU_VM_SECTION_FULL section, while save_setup() data is migrated in * a QEMU_VM_SECTION_START section. + * + * There are duplicate impls of the post/pre save/load hooks. + * New impls should preferentally use 'errp' variants of these + * methods and existing impls incrementally converted. + * The variants without 'errp' are intended to be removed + * once all usage is converted. + * + * For the errp variants, + * Returns: 0 on success, + * <0 on error where -value is an error number from errno.h */ + bool early_setup; int version_id; int minimum_version_id; MigrationPriority priority; int (*pre_load)(void *opaque); + int (*pre_load_errp)(void *opaque, Error **errp); int (*post_load)(void *opaque, int version_id); + int (*post_load_errp)(void *opaque, int version_id, Error **errp); int (*pre_save)(void *opaque); + int (*pre_save_errp)(void *opaque, Error **errp); int (*post_save)(void *opaque); bool (*needed)(void *opaque); bool (*dev_unplug_pending)(void *opaque); @@ -522,6 +537,16 @@ extern const VMStateInfo vmstate_info_qlist; .offset = vmstate_offset_array(_s, _f, _type*, _n), \ } +#define VMSTATE_VARRAY_OF_POINTER_UINT32(_field, _state, _field_num, _version, _info, _type) { \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .num_offset = vmstate_offset_value(_state, _field_num, uint32_t), \ + .info = &(_info), \ + .size = sizeof(_type), \ + .flags = VMS_VARRAY_UINT32 | VMS_ARRAY_OF_POINTER | VMS_POINTER, \ + .offset = vmstate_offset_pointer(_state, _field, _type), \ +} + #define VMSTATE_STRUCT_SUB_ARRAY(_field, _state, _start, _num, _version, _vmsd, _type) { \ .name = (stringify(_field)), \ .version_id = (_version), \ @@ -1196,10 +1221,8 @@ extern const VMStateInfo vmstate_info_qlist; } int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, - void *opaque, int version_id); + void *opaque, int version_id, Error **errp); int vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd, - void *opaque, JSONWriter *vmdesc); -int vmstate_save_state_with_err(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, JSONWriter *vmdesc, Error **errp); int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, JSONWriter *vmdesc, diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h index ae116d9804a3e..83721b5ffc6de 100644 --- a/include/monitor/hmp.h +++ b/include/monitor/hmp.h @@ -24,6 +24,7 @@ strList *hmp_split_at_comma(const char *str); void hmp_info_name(Monitor *mon, const QDict *qdict); void hmp_info_version(Monitor *mon, const QDict *qdict); void hmp_info_kvm(Monitor *mon, const QDict *qdict); +void hmp_info_accelerators(Monitor *mon, const QDict *qdict); void hmp_info_status(Monitor *mon, const QDict *qdict); void hmp_info_uuid(Monitor *mon, const QDict *qdict); void hmp_info_chardev(Monitor *mon, const QDict *qdict); @@ -178,5 +179,6 @@ void hmp_boot_set(Monitor *mon, const QDict *qdict); void hmp_info_mtree(Monitor *mon, const QDict *qdict); void hmp_info_cryptodev(Monitor *mon, const QDict *qdict); void hmp_dumpdtb(Monitor *mon, const QDict *qdict); +void hmp_info_firmware_log(Monitor *mon, const QDict *qdict); #endif diff --git a/include/net/net.h b/include/net/net.h index 84ee18e0f9727..72b476ee1dc49 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -35,6 +35,18 @@ typedef struct NICConf { int32_t bootindex; } NICConf; +typedef struct NetOffloads { + bool csum; + bool tso4; + bool tso6; + bool ecn; + bool ufo; + bool uso4; + bool uso6; + bool tnl; + bool tnl_csum; +} NetOffloads; + #define DEFINE_NIC_PROPERTIES(_state, _conf) \ DEFINE_PROP_MACADDR("mac", _state, _conf.macaddr), \ DEFINE_PROP_NETDEV("netdev", _state, _conf.peers) @@ -55,9 +67,10 @@ typedef void (NetClientDestructor)(NetClientState *); typedef RxFilterInfo *(QueryRxFilter)(NetClientState *); typedef bool (HasUfo)(NetClientState *); typedef bool (HasUso)(NetClientState *); +typedef bool (HasTunnel)(NetClientState *); typedef bool (HasVnetHdr)(NetClientState *); typedef bool (HasVnetHdrLen)(NetClientState *, int); -typedef void (SetOffload)(NetClientState *, int, int, int, int, int, int, int); +typedef void (SetOffload)(NetClientState *, const NetOffloads *); typedef int (GetVnetHdrLen)(NetClientState *); typedef void (SetVnetHdrLen)(NetClientState *, int); typedef bool (GetVnetHashSupportedTypes)(NetClientState *, uint32_t *); @@ -85,6 +98,7 @@ typedef struct NetClientInfo { NetPoll *poll; HasUfo *has_ufo; HasUso *has_uso; + HasTunnel *has_tunnel; HasVnetHdr *has_vnet_hdr; HasVnetHdrLen *has_vnet_hdr_len; SetOffload *set_offload; @@ -187,10 +201,10 @@ void qemu_set_info_str(NetClientState *nc, void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]); bool qemu_has_ufo(NetClientState *nc); bool qemu_has_uso(NetClientState *nc); +bool qemu_has_tunnel(NetClientState *nc); bool qemu_has_vnet_hdr(NetClientState *nc); bool qemu_has_vnet_hdr_len(NetClientState *nc, int len); -void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6, - int ecn, int ufo, int uso4, int uso6); +void qemu_set_offload(NetClientState *nc, const NetOffloads *ol); int qemu_get_vnet_hdr_len(NetClientState *nc); void qemu_set_vnet_hdr_len(NetClientState *nc, int len); bool qemu_get_vnet_hash_supported_types(NetClientState *nc, uint32_t *types); diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h index 879781dad7b63..0225207491287 100644 --- a/include/net/vhost_net.h +++ b/include/net/vhost_net.h @@ -2,6 +2,7 @@ #define VHOST_NET_H #include "net/net.h" +#include "hw/virtio/virtio-features.h" #include "hw/virtio/vhost-backend.h" struct vhost_net; @@ -33,8 +34,26 @@ void vhost_net_stop(VirtIODevice *dev, NetClientState *ncs, void vhost_net_cleanup(VHostNetState *net); -uint64_t vhost_net_get_features(VHostNetState *net, uint64_t features); -void vhost_net_ack_features(VHostNetState *net, uint64_t features); +void vhost_net_get_features_ex(VHostNetState *net, uint64_t *features); +static inline uint64_t vhost_net_get_features(VHostNetState *net, + uint64_t features) +{ + uint64_t features_array[VIRTIO_FEATURES_NU64S]; + + virtio_features_from_u64(features_array, features); + vhost_net_get_features_ex(net, features_array); + return features_array[0]; +} + +void vhost_net_ack_features_ex(VHostNetState *net, const uint64_t *features); +static inline void vhost_net_ack_features(VHostNetState *net, + uint64_t features) +{ + uint64_t features_array[VIRTIO_FEATURES_NU64S]; + + virtio_features_from_u64(features_array, features); + vhost_net_ack_features_ex(net, features_array); +} int vhost_net_get_config(struct vhost_net *net, uint8_t *config, uint32_t config_len); @@ -51,7 +70,15 @@ VHostNetState *get_vhost_net(NetClientState *nc); int vhost_net_set_vring_enable(NetClientState *nc, int enable); -uint64_t vhost_net_get_acked_features(VHostNetState *net); +void vhost_net_get_acked_features_ex(VHostNetState *net, uint64_t *features); +static inline uint64_t vhost_net_get_acked_features(VHostNetState *net) +{ + uint64_t features[VIRTIO_FEATURES_NU64S]; + + vhost_net_get_acked_features_ex(net, features); + assert(!virtio_features_use_ex(features)); + return features[0]; +} int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu); diff --git a/include/qapi/error.h b/include/qapi/error.h index 41e3816380490..b16c6303f83ee 100644 --- a/include/qapi/error.h +++ b/include/qapi/error.h @@ -533,12 +533,6 @@ static inline void error_propagator_cleanup(ErrorPropagator *prop) G_DEFINE_AUTO_CLEANUP_CLEAR_FUNC(ErrorPropagator, error_propagator_cleanup); -/* - * Special error destination to warn on error. - * See error_setg() and error_propagate() for details. - */ -extern Error *error_warn; - /* * Special error destination to abort on error. * See error_setg() and error_propagate() for details. diff --git a/include/qemu/compiler.h b/include/qemu/compiler.h index 65b89958d36fe..1c2b673c05836 100644 --- a/include/qemu/compiler.h +++ b/include/qemu/compiler.h @@ -182,19 +182,6 @@ #define QEMU_DISABLE_CFI #endif -/* - * Apple clang version 14 has a bug in its __builtin_subcll(); define - * BUILTIN_SUBCLL_BROKEN for the offending versions so we can avoid it. - * When a version of Apple clang which has this bug fixed is released - * we can add an upper bound to this check. - * See https://gitlab.com/qemu-project/qemu/-/issues/1631 - * and https://gitlab.com/qemu-project/qemu/-/issues/1659 for details. - * The bug never made it into any upstream LLVM releases, only Apple ones. - */ -#if defined(__apple_build_version__) && __clang_major__ >= 14 -#define BUILTIN_SUBCLL_BROKEN -#endif - #if __has_attribute(annotate) #define QEMU_ANNOTATE(x) __attribute__((annotate(x))) #else diff --git a/include/qemu/cpuid.h b/include/qemu/cpuid.h index b11161555b691..de7a9005096a8 100644 --- a/include/qemu/cpuid.h +++ b/include/qemu/cpuid.h @@ -68,6 +68,9 @@ #ifndef bit_AVX512VBMI2 #define bit_AVX512VBMI2 (1 << 6) #endif +#ifndef bit_GFNI +#define bit_GFNI (1 << 8) +#endif /* Leaf 0x80000001, %ecx */ #ifndef bit_LZCNT diff --git a/include/qemu/host-utils.h b/include/qemu/host-utils.h index 4d28fa22cfa8d..dd558589cb523 100644 --- a/include/qemu/host-utils.h +++ b/include/qemu/host-utils.h @@ -677,7 +677,7 @@ static inline uint64_t uadd64_carry(uint64_t x, uint64_t y, bool *pcarry) */ static inline uint64_t usub64_borrow(uint64_t x, uint64_t y, bool *pborrow) { -#if __has_builtin(__builtin_subcll) && !defined(BUILTIN_SUBCLL_BROKEN) +#if __has_builtin(__builtin_subcll) unsigned long long b = *pborrow; x = __builtin_subcll(x, y, b, &b); *pborrow = b & 1; diff --git a/include/qemu/log-for-trace.h b/include/qemu/log-for-trace.h index d47c9cd4462b7..f3a8791f1d4ec 100644 --- a/include/qemu/log-for-trace.h +++ b/include/qemu/log-for-trace.h @@ -19,9 +19,9 @@ #define QEMU_LOG_FOR_TRACE_H /* Private global variable, don't use */ -extern int qemu_loglevel; +extern unsigned qemu_loglevel; -#define LOG_TRACE (1 << 15) +#define LOG_TRACE (1u << 15) /* Returns true if a bit is set in the current loglevel mask */ static inline bool qemu_loglevel_mask(int mask) diff --git a/include/qemu/log.h b/include/qemu/log.h index aae72985f0dfc..7effba4da4cac 100644 --- a/include/qemu/log.h +++ b/include/qemu/log.h @@ -14,30 +14,30 @@ bool qemu_log_enabled(void); /* Returns true if qemu_log() will write somewhere other than stderr. */ bool qemu_log_separate(void); -#define CPU_LOG_TB_OUT_ASM (1 << 0) -#define CPU_LOG_TB_IN_ASM (1 << 1) -#define CPU_LOG_TB_OP (1 << 2) -#define CPU_LOG_TB_OP_OPT (1 << 3) -#define CPU_LOG_INT (1 << 4) -#define CPU_LOG_EXEC (1 << 5) -#define CPU_LOG_PCALL (1 << 6) -#define CPU_LOG_TB_CPU (1 << 8) -#define CPU_LOG_RESET (1 << 9) -#define LOG_UNIMP (1 << 10) -#define LOG_GUEST_ERROR (1 << 11) -#define CPU_LOG_MMU (1 << 12) -#define CPU_LOG_TB_NOCHAIN (1 << 13) -#define CPU_LOG_PAGE (1 << 14) +#define CPU_LOG_TB_OUT_ASM (1u << 0) +#define CPU_LOG_TB_IN_ASM (1u << 1) +#define CPU_LOG_TB_OP (1u << 2) +#define CPU_LOG_TB_OP_OPT (1u << 3) +#define CPU_LOG_INT (1u << 4) +#define CPU_LOG_EXEC (1u << 5) +#define CPU_LOG_PCALL (1u << 6) +#define CPU_LOG_TB_CPU (1u << 8) +#define CPU_LOG_RESET (1u << 9) +#define LOG_UNIMP (1u << 10) +#define LOG_GUEST_ERROR (1u << 11) +#define CPU_LOG_MMU (1u << 12) +#define CPU_LOG_TB_NOCHAIN (1u << 13) +#define CPU_LOG_PAGE (1u << 14) /* LOG_TRACE (1 << 15) is defined in log-for-trace.h */ -#define CPU_LOG_TB_OP_IND (1 << 16) -#define CPU_LOG_TB_FPU (1 << 17) -#define CPU_LOG_PLUGIN (1 << 18) +#define CPU_LOG_TB_OP_IND (1u << 16) +#define CPU_LOG_TB_FPU (1u << 17) +#define CPU_LOG_PLUGIN (1u << 18) /* LOG_STRACE is used for user-mode strace logging. */ -#define LOG_STRACE (1 << 19) -#define LOG_PER_THREAD (1 << 20) -#define CPU_LOG_TB_VPU (1 << 21) -#define LOG_TB_OP_PLUGIN (1 << 22) -#define LOG_INVALID_MEM (1 << 23) +#define LOG_STRACE (1u << 19) +#define LOG_PER_THREAD (1u << 20) +#define CPU_LOG_TB_VPU (1u << 21) +#define LOG_TB_OP_PLUGIN (1u << 22) +#define LOG_INVALID_MEM (1u << 23) /* Lock/unlock output. */ diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index 96fe51bc39046..cf8d7cf7e6132 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -133,6 +133,14 @@ QEMU_EXTERN_C int daemon(int, int); #include #include +/* + * Avoid conflict with linux/arch/powerpc/include/uapi/asm/elf.h, included + * from , but we might as well do this unconditionally. + */ +#undef ELF_CLASS +#undef ELF_DATA +#undef ELF_ARCH + #ifdef CONFIG_IOVEC #include #endif @@ -553,7 +561,7 @@ int madvise(char *, size_t, int); #if defined(__linux__) && \ (defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) \ - || defined(__powerpc64__)) + || defined(__powerpc64__) || defined(__riscv)) /* Use 2 MiB alignment so transparent hugepages can be used by KVM. Valgrind does not support alignments larger than 1 MiB, therefore we need special code which handles running on Valgrind. */ @@ -679,6 +687,16 @@ ssize_t qemu_write_full(int fd, const void *buf, size_t count) G_GNUC_WARN_UNUSED_RESULT; void qemu_set_cloexec(int fd); +bool qemu_set_blocking(int fd, bool block, Error **errp); + +/* + * Clear FD_CLOEXEC for a descriptor. + * + * The caller must guarantee that no other fork+exec's occur before the + * exec that is intended to inherit this descriptor, eg by suspending CPUs + * and blocking monitor commands. + */ +void qemu_clear_cloexec(int fd); /* Return a dynamically allocated directory path that is appropriate for storing * local state. diff --git a/include/qemu/sockets.h b/include/qemu/sockets.h index c562690d89371..be351d85f7be5 100644 --- a/include/qemu/sockets.h +++ b/include/qemu/sockets.h @@ -46,9 +46,6 @@ ssize_t qemu_send_full(int s, const void *buf, size_t count) G_GNUC_WARN_UNUSED_RESULT; int socket_set_cork(int fd, int v); int socket_set_nodelay(int fd); -void qemu_socket_set_block(int fd); -int qemu_socket_try_set_nonblock(int fd); -void qemu_socket_set_nonblock(int fd); int socket_set_fast_reuse(int fd); #ifdef WIN32 diff --git a/include/qemu/target-info-impl.h b/include/qemu/target-info-impl.h index 17887f64e26a4..e446585bf538f 100644 --- a/include/qemu/target-info-impl.h +++ b/include/qemu/target-info-impl.h @@ -9,6 +9,7 @@ #ifndef QEMU_TARGET_INFO_IMPL_H #define QEMU_TARGET_INFO_IMPL_H +#include "qapi/qapi-types-common.h" #include "qapi/qapi-types-machine.h" typedef struct TargetInfo { diff --git a/include/qemu/timer.h b/include/qemu/timer.h index abd2204f3beac..406d741120302 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -699,7 +699,7 @@ void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time); * * Returns: true if the timer is pending */ -bool timer_pending(QEMUTimer *ts); +bool timer_pending(const QEMUTimer *ts); /** * timer_expired: @@ -710,7 +710,7 @@ bool timer_pending(QEMUTimer *ts); * * Returns: true if the timer has expired */ -bool timer_expired(QEMUTimer *timer_head, int64_t current_time); +bool timer_expired(const QEMUTimer *timer_head, int64_t current_time); /** * timer_expire_time_ns: @@ -720,7 +720,7 @@ bool timer_expired(QEMUTimer *timer_head, int64_t current_time); * * Returns: the expiry time in nanoseconds */ -uint64_t timer_expire_time_ns(QEMUTimer *ts); +uint64_t timer_expire_time_ns(const QEMUTimer *ts); /** * timer_get: @@ -850,12 +850,11 @@ static inline int64_t get_clock(void) /*******************************************/ /* host CPU ticks (if available) */ -#if defined(_ARCH_PPC) +#if defined(_ARCH_PPC64) static inline int64_t cpu_get_host_ticks(void) { int64_t retval; -#ifdef _ARCH_PPC64 /* This reads timebase in one 64bit go and includes Cell workaround from: http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html */ @@ -863,16 +862,6 @@ static inline int64_t cpu_get_host_ticks(void) "cmpwi %0,0\n\t" "beq- $-8" : "=r" (retval)); -#else - /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */ - unsigned long junk; - __asm__ __volatile__ ("mfspr %1,269\n\t" /* mftbu */ - "mfspr %L0,268\n\t" /* mftb */ - "mfspr %0,269\n\t" /* mftbu */ - "cmpw %0,%1\n\t" - "bne $-16" - : "=r" (retval), "=r" (junk)); -#endif return retval; } diff --git a/include/qemu/xattr.h b/include/qemu/xattr.h index b08a934acc2db..224ba1276e685 100644 --- a/include/qemu/xattr.h +++ b/include/qemu/xattr.h @@ -26,7 +26,11 @@ # define ENOATTR ENODATA # endif # ifndef CONFIG_WIN32 -# include +# ifdef CONFIG_FREEBSD +# include +# else +# include +# endif # endif #endif diff --git a/include/qobject/qdict.h b/include/qobject/qdict.h index 903e6e5462fb2..861996f08dd6a 100644 --- a/include/qobject/qdict.h +++ b/include/qobject/qdict.h @@ -57,6 +57,7 @@ void qdict_put_str(QDict *qdict, const char *key, const char *value); double qdict_get_double(const QDict *qdict, const char *key); int64_t qdict_get_int(const QDict *qdict, const char *key); +uint64_t qdict_get_uint(const QDict *qdict, const char *key); bool qdict_get_bool(const QDict *qdict, const char *key); QList *qdict_get_qlist(const QDict *qdict, const char *key); QDict *qdict_get_qdict(const QDict *qdict, const char *key); diff --git a/include/semihosting/common-semi.h b/include/semihosting/common-semi.h index 0a91db7c4149a..aa511a46f42c8 100644 --- a/include/semihosting/common-semi.h +++ b/include/semihosting/common-semi.h @@ -35,5 +35,11 @@ #define COMMON_SEMI_H void do_common_semihosting(CPUState *cs); +uint64_t common_semi_arg(CPUState *cs, int argno); +void common_semi_set_ret(CPUState *cs, uint64_t ret); +bool is_64bit_semihosting(CPUArchState *env); +bool common_semi_sys_exit_is_extended(CPUState *cs); +uint64_t common_semi_stack_bottom(CPUState *cs); +bool common_semi_has_synccache(CPUArchState *env); #endif /* COMMON_SEMI_H */ diff --git a/include/semihosting/guestfd.h b/include/semihosting/guestfd.h index 3d426fedab390..a7ea1041ea0b6 100644 --- a/include/semihosting/guestfd.h +++ b/include/semihosting/guestfd.h @@ -35,13 +35,6 @@ typedef struct GuestFD { }; } GuestFD; -/* - * For ARM semihosting, we have a separate structure for routing - * data for the console which is outside the guest fd address space. - */ -extern GuestFD console_in_gf; -extern GuestFD console_out_gf; - /** * alloc_guestfd: * diff --git a/include/semihosting/semihost.h b/include/semihosting/semihost.h index b03e6375787f7..231dc8903955d 100644 --- a/include/semihosting/semihost.h +++ b/include/semihosting/semihost.h @@ -33,6 +33,8 @@ typedef enum SemihostingTarget { * Return true if guest code is allowed to make semihosting calls. */ bool semihosting_enabled(bool is_user); +bool semihosting_arm_compatible(void); +void semihosting_arm_compatible_init(void); SemihostingTarget semihosting_get_target(void); const char *semihosting_get_arg(int i); diff --git a/include/semihosting/syscalls.h b/include/semihosting/syscalls.h index 6627c45fb281a..03aa45b7bb957 100644 --- a/include/semihosting/syscalls.h +++ b/include/semihosting/syscalls.h @@ -9,7 +9,7 @@ #ifndef SEMIHOSTING_SYSCALLS_H #define SEMIHOSTING_SYSCALLS_H -#include "exec/cpu-defs.h" +#include "exec/vaddr.h" #include "gdbstub/syscalls.h" /* @@ -24,23 +24,23 @@ typedef struct GuestFD GuestFD; void semihost_sys_open(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len, + vaddr fname, uint64_t fname_len, int gdb_flags, int mode); void semihost_sys_close(CPUState *cs, gdb_syscall_complete_cb complete, int fd); void semihost_sys_read(CPUState *cs, gdb_syscall_complete_cb complete, - int fd, target_ulong buf, target_ulong len); + int fd, vaddr buf, uint64_t len); void semihost_sys_read_gf(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len); + GuestFD *gf, vaddr buf, uint64_t len); void semihost_sys_write(CPUState *cs, gdb_syscall_complete_cb complete, - int fd, target_ulong buf, target_ulong len); + int fd, vaddr buf, uint64_t len); void semihost_sys_write_gf(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len); + GuestFD *gf, vaddr buf, uint64_t len); void semihost_sys_lseek(CPUState *cs, gdb_syscall_complete_cb complete, int fd, int64_t off, int gdb_whence); @@ -50,27 +50,27 @@ void semihost_sys_isatty(CPUState *cs, gdb_syscall_complete_cb complete, void semihost_sys_flen(CPUState *cs, gdb_syscall_complete_cb fstat_cb, gdb_syscall_complete_cb flen_cb, - int fd, target_ulong fstat_addr); + int fd, vaddr fstat_addr); void semihost_sys_fstat(CPUState *cs, gdb_syscall_complete_cb complete, - int fd, target_ulong addr); + int fd, vaddr addr); void semihost_sys_stat(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len, - target_ulong addr); + vaddr fname, uint64_t fname_len, + vaddr addr); void semihost_sys_remove(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len); + vaddr fname, uint64_t fname_len); void semihost_sys_rename(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong oname, target_ulong oname_len, - target_ulong nname, target_ulong nname_len); + vaddr oname, uint64_t oname_len, + vaddr nname, uint64_t nname_len); void semihost_sys_system(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong cmd, target_ulong cmd_len); + vaddr cmd, uint64_t cmd_len); void semihost_sys_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong tv_addr, target_ulong tz_addr); + vaddr tv_addr, vaddr tz_addr); void semihost_sys_poll_one(CPUState *cs, gdb_syscall_complete_cb complete, int fd, GIOCondition cond, int timeout); diff --git a/include/standard-headers/drm/drm_fourcc.h b/include/standard-headers/drm/drm_fourcc.h index c8309d378bff7..cef077dfb35a6 100644 --- a/include/standard-headers/drm/drm_fourcc.h +++ b/include/standard-headers/drm/drm_fourcc.h @@ -209,6 +209,10 @@ extern "C" { #define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */ #define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */ +/* 48 bpp RGB */ +#define DRM_FORMAT_RGB161616 fourcc_code('R', 'G', '4', '8') /* [47:0] R:G:B 16:16:16 little endian */ +#define DRM_FORMAT_BGR161616 fourcc_code('B', 'G', '4', '8') /* [47:0] B:G:R 16:16:16 little endian */ + /* 64 bpp RGB */ #define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */ #define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */ @@ -217,7 +221,7 @@ extern "C" { #define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */ /* - * Floating point 64bpp RGB + * Half-Floating point - 16b/component * IEEE 754-2008 binary16 half-precision float * [15:0] sign:exponent:mantissa 1:5:10 */ @@ -227,6 +231,20 @@ extern "C" { #define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */ #define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */ +#define DRM_FORMAT_R16F fourcc_code('R', ' ', ' ', 'H') /* [15:0] R 16 little endian */ +#define DRM_FORMAT_GR1616F fourcc_code('G', 'R', ' ', 'H') /* [31:0] G:R 16:16 little endian */ +#define DRM_FORMAT_BGR161616F fourcc_code('B', 'G', 'R', 'H') /* [47:0] B:G:R 16:16:16 little endian */ + +/* + * Floating point - 32b/component + * IEEE 754-2008 binary32 float + * [31:0] sign:exponent:mantissa 1:8:23 + */ +#define DRM_FORMAT_R32F fourcc_code('R', ' ', ' ', 'F') /* [31:0] R 32 little endian */ +#define DRM_FORMAT_GR3232F fourcc_code('G', 'R', ' ', 'F') /* [63:0] R:G 32:32 little endian */ +#define DRM_FORMAT_BGR323232F fourcc_code('B', 'G', 'R', 'F') /* [95:0] R:G:B 32:32:32 little endian */ +#define DRM_FORMAT_ABGR32323232F fourcc_code('A', 'B', '8', 'F') /* [127:0] R:G:B:A 32:32:32:32 little endian */ + /* * RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits * of unused padding per component: @@ -376,6 +394,42 @@ extern "C" { */ #define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1') +/* + * 3 plane YCbCr LSB aligned + * In order to use these formats in a similar fashion to MSB aligned ones + * implementation can multiply the values by 2^6=64. For that reason the padding + * must only contain zeros. + * index 0 = Y plane, [15:0] z:Y [6:10] little endian + * index 1 = Cr plane, [15:0] z:Cr [6:10] little endian + * index 2 = Cb plane, [15:0] z:Cb [6:10] little endian + */ +#define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */ +#define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */ +#define DRM_FORMAT_S410 fourcc_code('S', '4', '1', '0') /* non-subsampled Cb (1) and Cr (2) planes 10 bits per channel */ + +/* + * 3 plane YCbCr LSB aligned + * In order to use these formats in a similar fashion to MSB aligned ones + * implementation can multiply the values by 2^4=16. For that reason the padding + * must only contain zeros. + * index 0 = Y plane, [15:0] z:Y [4:12] little endian + * index 1 = Cr plane, [15:0] z:Cr [4:12] little endian + * index 2 = Cb plane, [15:0] z:Cb [4:12] little endian + */ +#define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */ +#define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */ +#define DRM_FORMAT_S412 fourcc_code('S', '4', '1', '2') /* non-subsampled Cb (1) and Cr (2) planes 12 bits per channel */ + +/* + * 3 plane YCbCr + * index 0 = Y plane, [15:0] Y little endian + * index 1 = Cr plane, [15:0] Cr little endian + * index 2 = Cb plane, [15:0] Cb little endian + */ +#define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */ +#define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */ +#define DRM_FORMAT_S416 fourcc_code('S', '4', '1', '6') /* non-subsampled Cb (1) and Cr (2) planes 16 bits per channel */ + /* * 3 plane YCbCr * index 0: Y plane, [7:0] Y diff --git a/include/standard-headers/linux/ethtool.h b/include/standard-headers/linux/ethtool.h index cef0d207a6279..eb803140283d2 100644 --- a/include/standard-headers/linux/ethtool.h +++ b/include/standard-headers/linux/ethtool.h @@ -2314,7 +2314,7 @@ enum { IPV6_USER_FLOW = 0x0e, /* spec only (usr_ip6_spec; nfc only) */ IPV4_FLOW = 0x10, /* hash only */ IPV6_FLOW = 0x11, /* hash only */ - ETHER_FLOW = 0x12, /* spec only (ether_spec) */ + ETHER_FLOW = 0x12, /* hash or spec (ether_spec) */ /* Used for GTP-U IPv4 and IPv6. * The format of GTP packets only includes @@ -2371,7 +2371,7 @@ enum { /* Flag to enable RSS spreading of traffic matching rule (nfc only) */ #define FLOW_RSS 0x20000000 -/* L3-L4 network traffic flow hash options */ +/* L2-L4 network traffic flow hash options */ #define RXH_L2DA (1 << 1) #define RXH_VLAN (1 << 2) #define RXH_L3_PROTO (1 << 3) diff --git a/include/standard-headers/linux/input-event-codes.h b/include/standard-headers/linux/input-event-codes.h index a82ff795e068b..00dc9caac923a 100644 --- a/include/standard-headers/linux/input-event-codes.h +++ b/include/standard-headers/linux/input-event-codes.h @@ -601,6 +601,11 @@ #define BTN_DPAD_LEFT 0x222 #define BTN_DPAD_RIGHT 0x223 +#define BTN_GRIPL 0x224 +#define BTN_GRIPR 0x225 +#define BTN_GRIPL2 0x226 +#define BTN_GRIPR2 0x227 + #define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */ #define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */ #define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */ @@ -765,6 +770,9 @@ #define KEY_KBD_LCD_MENU4 0x2bb #define KEY_KBD_LCD_MENU5 0x2bc +/* Performance Boost key (Alienware)/G-Mode key (Dell) */ +#define KEY_PERFORMANCE 0x2bd + #define BTN_TRIGGER_HAPPY 0x2c0 #define BTN_TRIGGER_HAPPY1 0x2c0 #define BTN_TRIGGER_HAPPY2 0x2c1 diff --git a/include/standard-headers/linux/input.h b/include/standard-headers/linux/input.h index 942ea6aaa9773..d4512c20b543f 100644 --- a/include/standard-headers/linux/input.h +++ b/include/standard-headers/linux/input.h @@ -272,6 +272,7 @@ struct input_mask { #define BUS_CEC 0x1E #define BUS_INTEL_ISHTP 0x1F #define BUS_AMD_SFH 0x20 +#define BUS_SDW 0x21 /* * MT_TOOL types diff --git a/include/standard-headers/linux/pci_regs.h b/include/standard-headers/linux/pci_regs.h index a3a3e942dedff..f5b17745de607 100644 --- a/include/standard-headers/linux/pci_regs.h +++ b/include/standard-headers/linux/pci_regs.h @@ -745,6 +745,7 @@ #define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ #define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ #define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */ +#define PCI_EXT_CAP_ID_VF_REBAR 0x24 /* VF Resizable BAR */ #define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */ #define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */ #define PCI_EXT_CAP_ID_NPEM 0x29 /* Native PCIe Enclosure Management */ @@ -1141,6 +1142,14 @@ #define PCI_DVSEC_HEADER2 0x8 /* Designated Vendor-Specific Header2 */ #define PCI_DVSEC_HEADER2_ID(x) ((x) & 0xffff) +/* VF Resizable BARs, same layout as PCI_REBAR */ +#define PCI_VF_REBAR_CAP PCI_REBAR_CAP +#define PCI_VF_REBAR_CAP_SIZES PCI_REBAR_CAP_SIZES +#define PCI_VF_REBAR_CTRL PCI_REBAR_CTRL +#define PCI_VF_REBAR_CTRL_BAR_IDX PCI_REBAR_CTRL_BAR_IDX +#define PCI_VF_REBAR_CTRL_NBAR_MASK PCI_REBAR_CTRL_NBAR_MASK +#define PCI_VF_REBAR_CTRL_BAR_SIZE PCI_REBAR_CTRL_BAR_SIZE + /* Data Link Feature */ #define PCI_DLF_CAP 0x04 /* Capabilities Register */ #define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */ diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h index fd54044936fc5..79b53a931a8c3 100644 --- a/include/standard-headers/linux/vhost_types.h +++ b/include/standard-headers/linux/vhost_types.h @@ -110,6 +110,11 @@ struct vhost_msg_v2 { }; }; +struct vhost_features_array { + uint64_t count; /* number of entries present in features array */ + uint64_t features[] ; +}; + struct vhost_memory_region { uint64_t guest_phys_addr; uint64_t memory_size; /* bytes */ diff --git a/include/standard-headers/linux/virtio_net.h b/include/standard-headers/linux/virtio_net.h index 982e854f14e49..93abaae0b9079 100644 --- a/include/standard-headers/linux/virtio_net.h +++ b/include/standard-headers/linux/virtio_net.h @@ -70,6 +70,28 @@ * with the same MAC. */ #define VIRTIO_NET_F_SPEED_DUPLEX 63 /* Device set linkspeed and duplex */ +#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO 65 /* Driver can receive + * GSO-over-UDP-tunnel packets + */ +#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM 66 /* Driver handles + * GSO-over-UDP-tunnel + * packets with partial csum + * for the outer header + */ +#define VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO 67 /* Device can receive + * GSO-over-UDP-tunnel packets + */ +#define VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO_CSUM 68 /* Device handles + * GSO-over-UDP-tunnel + * packets with partial csum + * for the outer header + */ + +/* Offloads bits corresponding to VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO{,_CSUM} + * features + */ +#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_MAPPED 46 +#define VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO_CSUM_MAPPED 47 #ifndef VIRTIO_NET_NO_LEGACY #define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ @@ -131,12 +153,17 @@ struct virtio_net_hdr_v1 { #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */ #define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ #define VIRTIO_NET_HDR_F_RSC_INFO 4 /* rsc info in csum_ fields */ +#define VIRTIO_NET_HDR_F_UDP_TUNNEL_CSUM 8 /* UDP tunnel csum offload */ uint8_t flags; #define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */ #define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */ #define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */ #define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */ #define VIRTIO_NET_HDR_GSO_UDP_L4 5 /* GSO frame, IPv4& IPv6 UDP (USO) */ +#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4 0x20 /* UDPv4 tunnel present */ +#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6 0x40 /* UDPv6 tunnel present */ +#define VIRTIO_NET_HDR_GSO_UDP_TUNNEL (VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV4 | \ + VIRTIO_NET_HDR_GSO_UDP_TUNNEL_IPV6) #define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */ uint8_t gso_type; __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ @@ -181,6 +208,12 @@ struct virtio_net_hdr_v1_hash { uint16_t padding; }; +struct virtio_net_hdr_v1_hash_tunnel { + struct virtio_net_hdr_v1_hash hash_hdr; + uint16_t outer_th_offset; + uint16_t inner_nh_offset; +}; + #ifndef VIRTIO_NET_NO_LEGACY /* This header comes first in the scatter-gather list. * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must diff --git a/include/system/accel-irq.h b/include/system/accel-irq.h new file mode 100644 index 0000000000000..671fb7dfdbbc0 --- /dev/null +++ b/include/system/accel-irq.h @@ -0,0 +1,37 @@ +/* + * Accelerated irqchip abstraction + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: Ziqiao Zhou + * Magnus Kulke + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef SYSTEM_ACCEL_IRQ_H +#define SYSTEM_ACCEL_IRQ_H +#include "hw/pci/msi.h" +#include "qemu/osdep.h" +#include "system/kvm.h" +#include "system/mshv.h" + +static inline bool accel_msi_via_irqfd_enabled(void) +{ + return mshv_msi_via_irqfd_enabled() || kvm_msi_via_irqfd_enabled(); +} + +static inline bool accel_irqchip_is_split(void) +{ + return mshv_msi_via_irqfd_enabled() || kvm_irqchip_is_split(); +} + +int accel_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev); +int accel_irqchip_update_msi_route(int vector, MSIMessage msg, PCIDevice *dev); +void accel_irqchip_commit_route_changes(KVMRouteChange *c); +void accel_irqchip_commit_routes(void); +void accel_irqchip_release_virq(int virq); +int accel_irqchip_add_irqfd_notifier_gsi(EventNotifier *n, EventNotifier *rn, + int virq); +int accel_irqchip_remove_irqfd_notifier_gsi(EventNotifier *n, int virq); +#endif diff --git a/include/system/cpus.h b/include/system/cpus.h index 69be6a77a75ac..508444ccf1c30 100644 --- a/include/system/cpus.h +++ b/include/system/cpus.h @@ -17,8 +17,7 @@ bool cpu_work_list_empty(CPUState *cpu); bool cpu_thread_is_idle(CPUState *cpu); bool all_cpu_threads_idle(void); bool cpu_can_run(CPUState *cpu); -void qemu_wait_io_event_common(CPUState *cpu); -void qemu_wait_io_event(CPUState *cpu); +void qemu_process_cpu_events_common(CPUState *cpu); void cpu_thread_signal_created(CPUState *cpu); void cpu_thread_signal_destroyed(CPUState *cpu); void cpu_handle_guest_debug(CPUState *cpu); diff --git a/include/system/hw_accel.h b/include/system/hw_accel.h index fa9228d5d2dca..55497edc2936d 100644 --- a/include/system/hw_accel.h +++ b/include/system/hw_accel.h @@ -14,6 +14,7 @@ #include "hw/core/cpu.h" #include "system/kvm.h" #include "system/hvf.h" +#include "system/mshv.h" #include "system/whpx.h" #include "system/nvmm.h" diff --git a/include/system/iommufd.h b/include/system/iommufd.h index c9c72ffc4509d..a659f36a20fdc 100644 --- a/include/system/iommufd.h +++ b/include/system/iommufd.h @@ -45,12 +45,12 @@ bool iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id, Error **errp); void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id); int iommufd_backend_map_file_dma(IOMMUFDBackend *be, uint32_t ioas_id, - hwaddr iova, ram_addr_t size, int fd, + hwaddr iova, uint64_t size, int fd, unsigned long start, bool readonly); int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova, - ram_addr_t size, void *vaddr, bool readonly); + uint64_t size, void *vaddr, bool readonly); int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id, - hwaddr iova, ram_addr_t size); + hwaddr iova, uint64_t size); bool iommufd_backend_get_device_info(IOMMUFDBackend *be, uint32_t devid, uint32_t *type, void *data, uint32_t len, uint64_t *caps, Error **errp); diff --git a/include/system/kvm.h b/include/system/kvm.h index 3c7d31473663b..8f9eecf044c2a 100644 --- a/include/system/kvm.h +++ b/include/system/kvm.h @@ -317,23 +317,6 @@ int kvm_create_device(KVMState *s, uint64_t type, bool test); */ bool kvm_device_supported(int vmfd, uint64_t type); -/** - * kvm_park_vcpu - Park QEMU KVM vCPU context - * @cpu: QOM CPUState object for which QEMU KVM vCPU context has to be parked. - * - * @returns: none - */ -void kvm_park_vcpu(CPUState *cpu); - -/** - * kvm_unpark_vcpu - unpark QEMU KVM vCPU context - * @s: KVM State - * @vcpu_id: Architecture vCPU ID of the parked vCPU - * - * @returns: KVM fd - */ -int kvm_unpark_vcpu(KVMState *s, unsigned long vcpu_id); - /** * kvm_create_and_park_vcpu - Create and park a KVM vCPU * @cpu: QOM CPUState object for which KVM vCPU has to be created and parked. @@ -357,14 +340,16 @@ int kvm_arch_process_async_events(CPUState *cpu); int kvm_arch_get_registers(CPUState *cpu, Error **errp); -/* state subset only touched by the VCPU itself during runtime */ -#define KVM_PUT_RUNTIME_STATE 1 -/* state subset modified during VCPU reset */ -#define KVM_PUT_RESET_STATE 2 -/* full state set, modified during initialization or on vmload */ -#define KVM_PUT_FULL_STATE 3 +typedef enum kvm_put_state { + /* state subset only touched by the VCPU itself during runtime */ + KVM_PUT_RUNTIME_STATE = 1, + /* state subset modified during VCPU reset */ + KVM_PUT_RESET_STATE = 2, + /* full state set, modified during initialization or on vmload */ + KVM_PUT_FULL_STATE = 3, +} KvmPutState; -int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp); +int kvm_arch_put_registers(CPUState *cpu, KvmPutState level, Error **errp); int kvm_arch_get_default_type(MachineState *ms); diff --git a/include/system/memory.h b/include/system/memory.h index e2cd6ed126144..3bd5ffa5e0d07 100644 --- a/include/system/memory.h +++ b/include/system/memory.h @@ -833,6 +833,7 @@ struct MemoryRegion { bool nonvolatile; bool rom_device; bool flush_coalesced_mmio; + bool lockless_io; bool unmergeable; uint8_t dirty_log_mask; bool is_iommu; @@ -2341,6 +2342,17 @@ void memory_region_set_flush_coalesced(MemoryRegion *mr); */ void memory_region_clear_flush_coalesced(MemoryRegion *mr); +/** + * memory_region_enable_lockless_io: Enable lockless (BQL free) acceess. + * + * Enable BQL-free access for devices that are well prepared to handle + * locking during I/O themselves: either by doing fine grained locking or + * by providing lock-free I/O schemes. + * + * @mr: the memory region to be updated. + */ +void memory_region_enable_lockless_io(MemoryRegion *mr); + /** * memory_region_add_eventfd: Request an eventfd to be triggered when a word * is written to a location. @@ -2715,14 +2727,32 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name); /** * address_space_destroy: destroy an address space * - * Releases all resources associated with an address space. After an address space - * is destroyed, its root memory region (given by address_space_init()) may be destroyed - * as well. + * Releases all resources associated with an address space. After an + * address space is destroyed, the reference the AddressSpace had to + * its root memory region is dropped, which may result in the + * destruction of that memory region as well. + * + * Note that destruction of the AddressSpace is done via RCU; + * it is therefore not valid to free the memory the AddressSpace + * struct is in until after that RCU callback has completed. + * If you want to g_free() the AddressSpace after destruction you + * can do that with address_space_destroy_free(). * * @as: address space to be destroyed */ void address_space_destroy(AddressSpace *as); +/** + * address_space_destroy_free: destroy an address space and free it + * + * This does the same thing as address_space_destroy(), and then also + * frees (via g_free()) the AddressSpace itself once the destruction + * is complete. + * + * @as: address space to be destroyed + */ +void address_space_destroy_free(AddressSpace *as); + /** * address_space_remove_listeners: unregister all listeners of an address space * @@ -2965,6 +2995,8 @@ void address_space_cache_invalidate(MemoryRegionCache *cache, */ void address_space_cache_destroy(MemoryRegionCache *cache); +void address_space_flush_icache_range(AddressSpace *as, hwaddr addr, hwaddr len); + /* address_space_get_iotlb_entry: translate an address into an IOTLB * entry. Should be called from an RCU critical section. */ @@ -3017,6 +3049,15 @@ static inline MemoryRegion *address_space_translate(AddressSpace *as, bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, bool is_write, MemTxAttrs attrs); +/** + * address_space_is_io: check whether an guest physical addresses + * whithin an address space is I/O memory. + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + */ +bool address_space_is_io(AddressSpace *as, hwaddr addr); + /* address_space_map: map a physical memory region into a host virtual address * * May map a subset of the requested range, given by and returned in @plen. diff --git a/include/system/mshv.h b/include/system/mshv.h new file mode 100644 index 0000000000000..8b1fc20c80ddf --- /dev/null +++ b/include/system/mshv.h @@ -0,0 +1,64 @@ +/* + * QEMU MSHV support + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: Ziqiao Zhou + * Magnus Kulke + * Jinank Jain + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + */ + +#ifndef QEMU_MSHV_H +#define QEMU_MSHV_H + +#include "qemu/osdep.h" +#include "qemu/accel.h" +#include "hw/hyperv/hyperv-proto.h" +#include "hw/hyperv/hvhdk.h" +#include "hw/hyperv/hvgdk_mini.h" +#include "qapi/qapi-types-common.h" +#include "system/memory.h" +#include "accel/accel-ops.h" + +#ifdef COMPILING_PER_TARGET +#ifdef CONFIG_MSHV +#include +#define CONFIG_MSHV_IS_POSSIBLE +#endif +#else +#define CONFIG_MSHV_IS_POSSIBLE +#endif + +#define MSHV_MAX_MSI_ROUTES 4096 + +#define MSHV_PAGE_SHIFT 12 + +#ifdef CONFIG_MSHV_IS_POSSIBLE +extern bool mshv_allowed; +#define mshv_enabled() (mshv_allowed) +#define mshv_msi_via_irqfd_enabled() mshv_enabled() +#else /* CONFIG_MSHV_IS_POSSIBLE */ +#define mshv_enabled() false +#define mshv_msi_via_irqfd_enabled() mshv_enabled() +#endif + +typedef struct MshvState MshvState; +extern MshvState *mshv_state; + +/* interrupt */ +int mshv_request_interrupt(MshvState *mshv_state, uint32_t interrupt_type, uint32_t vector, + uint32_t vp_index, bool logical_destination_mode, + bool level_triggered); + +int mshv_irqchip_add_msi_route(int vector, PCIDevice *dev); +int mshv_irqchip_update_msi_route(int virq, MSIMessage msg, PCIDevice *dev); +void mshv_irqchip_commit_routes(void); +void mshv_irqchip_release_virq(int virq); +int mshv_irqchip_add_irqfd_notifier_gsi(const EventNotifier *n, + const EventNotifier *rn, int virq); +int mshv_irqchip_remove_irqfd_notifier_gsi(const EventNotifier *n, int virq); + +#endif diff --git a/include/system/mshv_int.h b/include/system/mshv_int.h new file mode 100644 index 0000000000000..490563c1ab294 --- /dev/null +++ b/include/system/mshv_int.h @@ -0,0 +1,155 @@ +/* + * QEMU MSHV support + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: Ziqiao Zhou + * Magnus Kulke + * Jinank Jain + * + * SPDX-License-Identifier: GPL-2.0-or-later + * + */ + +#ifndef QEMU_MSHV_INT_H +#define QEMU_MSHV_INT_H + +#define MSHV_MSR_ENTRIES_COUNT 64 + +#define MSHV_MAX_MEM_SLOTS 32 + +typedef struct hyperv_message hv_message; + +typedef struct MshvHvCallArgs { + void *base; + void *input_page; + void *output_page; +} MshvHvCallArgs; + +struct AccelCPUState { + int cpufd; + bool dirty; + MshvHvCallArgs hvcall_args; +}; + +typedef struct MshvMemoryListener { + MemoryListener listener; + int as_id; +} MshvMemoryListener; + +typedef struct MshvAddressSpace { + MshvMemoryListener *ml; + AddressSpace *as; +} MshvAddressSpace; + +typedef struct MshvMemorySlotManager { + size_t n_slots; + GList *slots; + QemuMutex mutex; +} MshvMemorySlotManager; + +struct MshvState { + AccelState parent_obj; + int vm; + MshvMemoryListener memory_listener; + /* number of listeners */ + int nr_as; + MshvAddressSpace *as; + int fd; + MshvMemorySlotManager msm; +}; + +typedef struct MshvMsiControl { + bool updated; + GHashTable *gsi_routes; +} MshvMsiControl; + +#define mshv_vcpufd(cpu) (cpu->accel->cpufd) + +/* cpu */ +typedef struct MshvFPU { + uint8_t fpr[8][16]; + uint16_t fcw; + uint16_t fsw; + uint8_t ftwx; + uint8_t pad1; + uint16_t last_opcode; + uint64_t last_ip; + uint64_t last_dp; + uint8_t xmm[16][16]; + uint32_t mxcsr; + uint32_t pad2; +} MshvFPU; + +typedef enum MshvVmExit { + MshvVmExitIgnore = 0, + MshvVmExitShutdown = 1, + MshvVmExitSpecial = 2, +} MshvVmExit; + +typedef enum MshvRemapResult { + MshvRemapOk = 0, + MshvRemapNoMapping = 1, + MshvRemapNoOverlap = 2, +} MshvRemapResult; + +void mshv_init_mmio_emu(void); +int mshv_create_vcpu(int vm_fd, uint8_t vp_index, int *cpu_fd); +void mshv_remove_vcpu(int vm_fd, int cpu_fd); +int mshv_configure_vcpu(const CPUState *cpu, const MshvFPU *fpu, uint64_t xcr0); +int mshv_get_standard_regs(CPUState *cpu); +int mshv_get_special_regs(CPUState *cpu); +int mshv_run_vcpu(int vm_fd, CPUState *cpu, hv_message *msg, MshvVmExit *exit); +int mshv_load_regs(CPUState *cpu); +int mshv_store_regs(CPUState *cpu); +int mshv_set_generic_regs(const CPUState *cpu, const hv_register_assoc *assocs, + size_t n_regs); +int mshv_arch_put_registers(const CPUState *cpu); +void mshv_arch_init_vcpu(CPUState *cpu); +void mshv_arch_destroy_vcpu(CPUState *cpu); +void mshv_arch_amend_proc_features( + union hv_partition_synthetic_processor_features *features); +int mshv_arch_post_init_vm(int vm_fd); + +#if defined COMPILING_PER_TARGET && defined CONFIG_MSHV_IS_POSSIBLE +int mshv_hvcall(int fd, const struct mshv_root_hvcall *args); +#endif + +/* memory */ +typedef struct MshvMemorySlot { + uint64_t guest_phys_addr; + uint64_t memory_size; + uint64_t userspace_addr; + bool readonly; + bool mapped; +} MshvMemorySlot; + +MshvRemapResult mshv_remap_overlap_region(int vm_fd, uint64_t gpa); +int mshv_guest_mem_read(uint64_t gpa, uint8_t *data, uintptr_t size, + bool is_secure_mode, bool instruction_fetch); +int mshv_guest_mem_write(uint64_t gpa, const uint8_t *data, uintptr_t size, + bool is_secure_mode); +void mshv_set_phys_mem(MshvMemoryListener *mml, MemoryRegionSection *section, + bool add); +void mshv_init_memory_slot_manager(MshvState *mshv_state); + +/* msr */ +typedef struct MshvMsrEntry { + uint32_t index; + uint32_t reserved; + uint64_t data; +} MshvMsrEntry; + +typedef struct MshvMsrEntries { + MshvMsrEntry entries[MSHV_MSR_ENTRIES_COUNT]; + uint32_t nmsrs; +} MshvMsrEntries; + +int mshv_configure_msr(const CPUState *cpu, const MshvMsrEntry *msrs, + size_t n_msrs); + +/* interrupt */ +void mshv_init_msicontrol(void); +int mshv_reserve_ioapic_msi_routes(int vm_fd); + +#endif diff --git a/include/system/os-win32.h b/include/system/os-win32.h index 3aa6cee4c232c..22d72babdf407 100644 --- a/include/system/os-win32.h +++ b/include/system/os-win32.h @@ -168,11 +168,14 @@ static inline void qemu_funlockfile(FILE *f) #endif } -/* Helper for WSAEventSelect, to report errors */ +/* Helpers for WSAEventSelect() */ bool qemu_socket_select(int sockfd, WSAEVENT hEventObject, long lNetworkEvents, Error **errp); +void qemu_socket_select_nofail(int sockfd, WSAEVENT hEventObject, + long lNetworkEvents); bool qemu_socket_unselect(int sockfd, Error **errp); +void qemu_socket_unselect_nofail(int sockfd); /* We wrap all the sockets functions so that we can set errno based on * WSAGetLastError(), and use file-descriptors instead of SOCKET. diff --git a/include/system/physmem.h b/include/system/physmem.h new file mode 100644 index 0000000000000..879f6eae38b07 --- /dev/null +++ b/include/system/physmem.h @@ -0,0 +1,54 @@ +/* + * QEMU physical memory interfaces (target independent). + * + * Copyright (c) 2003 Fabrice Bellard + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef QEMU_SYSTEM_PHYSMEM_H +#define QEMU_SYSTEM_PHYSMEM_H + +#include "exec/hwaddr.h" +#include "exec/ramlist.h" + +#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1) +#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE)) + +bool physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client); + +bool physical_memory_is_clean(ram_addr_t addr); + +uint8_t physical_memory_range_includes_clean(ram_addr_t start, + ram_addr_t length, + uint8_t mask); + +void physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client); + +void physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length, + uint8_t mask); + +/* + * Contrary to physical_memory_sync_dirty_bitmap() this function returns + * the number of dirty pages in @bitmap passed as argument. On the other hand, + * physical_memory_sync_dirty_bitmap() returns newly dirtied pages that + * weren't set in the global migration bitmap. + */ +uint64_t physical_memory_set_dirty_lebitmap(unsigned long *bitmap, + ram_addr_t start, + ram_addr_t pages); + +void physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length); + +bool physical_memory_test_and_clear_dirty(ram_addr_t start, + ram_addr_t length, + unsigned client); + +DirtyBitmapSnapshot * +physical_memory_snapshot_and_clear_dirty(MemoryRegion *mr, hwaddr offset, + hwaddr length, unsigned client); + +bool physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, + ram_addr_t start, + ram_addr_t length); + +#endif diff --git a/include/system/ram_addr.h b/include/system/ram_addr.h index 15a1b1a4fa2bd..683485980cea6 100644 --- a/include/system/ram_addr.h +++ b/include/system/ram_addr.h @@ -19,17 +19,9 @@ #ifndef SYSTEM_RAM_ADDR_H #define SYSTEM_RAM_ADDR_H -#include "system/xen.h" -#include "system/tcg.h" -#include "exec/cputlb.h" -#include "exec/ramlist.h" #include "system/ramblock.h" -#include "system/memory.h" #include "exec/target_page.h" -#include "qemu/rcu.h" - #include "exec/hwaddr.h" -#include "exec/cpu-common.h" extern uint64_t total_dirty_pages; @@ -80,17 +72,6 @@ static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page) return bitmap_test_and_clear(rb->clear_bmap, page >> shift, 1); } -static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset) -{ - return (b && b->host && offset < b->used_length) ? true : false; -} - -static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) -{ - assert(offset_in_ramblock(block, offset)); - return (char *)block->host + offset; -} - static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr, RAMBlock *rb) { @@ -99,8 +80,6 @@ static inline unsigned long int ramblock_recv_bitmap_offset(void *host_addr, return host_addr_offset >> TARGET_PAGE_BITS; } -bool ramblock_is_pmem(RAMBlock *rb); - /** * qemu_ram_alloc_from_file, * qemu_ram_alloc_from_fd: Allocate a ram block from the specified backing @@ -153,409 +132,4 @@ static inline void qemu_ram_block_writeback(RAMBlock *block) qemu_ram_msync(block, 0, block->used_length); } -#define DIRTY_CLIENTS_ALL ((1 << DIRTY_MEMORY_NUM) - 1) -#define DIRTY_CLIENTS_NOCODE (DIRTY_CLIENTS_ALL & ~(1 << DIRTY_MEMORY_CODE)) - -static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, - ram_addr_t length, - unsigned client) -{ - DirtyMemoryBlocks *blocks; - unsigned long end, page; - unsigned long idx, offset, base; - bool dirty = false; - - assert(client < DIRTY_MEMORY_NUM); - - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; - page = start >> TARGET_PAGE_BITS; - - WITH_RCU_READ_LOCK_GUARD() { - blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); - - idx = page / DIRTY_MEMORY_BLOCK_SIZE; - offset = page % DIRTY_MEMORY_BLOCK_SIZE; - base = page - offset; - while (page < end) { - unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); - unsigned long num = next - base; - unsigned long found = find_next_bit(blocks->blocks[idx], - num, offset); - if (found < num) { - dirty = true; - break; - } - - page = next; - idx++; - offset = 0; - base += DIRTY_MEMORY_BLOCK_SIZE; - } - } - - return dirty; -} - -static inline bool cpu_physical_memory_all_dirty(ram_addr_t start, - ram_addr_t length, - unsigned client) -{ - DirtyMemoryBlocks *blocks; - unsigned long end, page; - unsigned long idx, offset, base; - bool dirty = true; - - assert(client < DIRTY_MEMORY_NUM); - - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; - page = start >> TARGET_PAGE_BITS; - - RCU_READ_LOCK_GUARD(); - - blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); - - idx = page / DIRTY_MEMORY_BLOCK_SIZE; - offset = page % DIRTY_MEMORY_BLOCK_SIZE; - base = page - offset; - while (page < end) { - unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); - unsigned long num = next - base; - unsigned long found = find_next_zero_bit(blocks->blocks[idx], num, offset); - if (found < num) { - dirty = false; - break; - } - - page = next; - idx++; - offset = 0; - base += DIRTY_MEMORY_BLOCK_SIZE; - } - - return dirty; -} - -static inline bool cpu_physical_memory_get_dirty_flag(ram_addr_t addr, - unsigned client) -{ - return cpu_physical_memory_get_dirty(addr, 1, client); -} - -static inline bool cpu_physical_memory_is_clean(ram_addr_t addr) -{ - bool vga = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA); - bool code = cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE); - bool migration = - cpu_physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION); - return !(vga && code && migration); -} - -static inline uint8_t cpu_physical_memory_range_includes_clean(ram_addr_t start, - ram_addr_t length, - uint8_t mask) -{ - uint8_t ret = 0; - - if (mask & (1 << DIRTY_MEMORY_VGA) && - !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) { - ret |= (1 << DIRTY_MEMORY_VGA); - } - if (mask & (1 << DIRTY_MEMORY_CODE) && - !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) { - ret |= (1 << DIRTY_MEMORY_CODE); - } - if (mask & (1 << DIRTY_MEMORY_MIGRATION) && - !cpu_physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) { - ret |= (1 << DIRTY_MEMORY_MIGRATION); - } - return ret; -} - -static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, - unsigned client) -{ - unsigned long page, idx, offset; - DirtyMemoryBlocks *blocks; - - assert(client < DIRTY_MEMORY_NUM); - - page = addr >> TARGET_PAGE_BITS; - idx = page / DIRTY_MEMORY_BLOCK_SIZE; - offset = page % DIRTY_MEMORY_BLOCK_SIZE; - - RCU_READ_LOCK_GUARD(); - - blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); - - set_bit_atomic(offset, blocks->blocks[idx]); -} - -static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, - ram_addr_t length, - uint8_t mask) -{ - DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM]; - unsigned long end, page; - unsigned long idx, offset, base; - int i; - - if (!mask && !xen_enabled()) { - return; - } - - end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; - page = start >> TARGET_PAGE_BITS; - - WITH_RCU_READ_LOCK_GUARD() { - for (i = 0; i < DIRTY_MEMORY_NUM; i++) { - blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]); - } - - idx = page / DIRTY_MEMORY_BLOCK_SIZE; - offset = page % DIRTY_MEMORY_BLOCK_SIZE; - base = page - offset; - while (page < end) { - unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); - - if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) { - bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx], - offset, next - page); - } - if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) { - bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx], - offset, next - page); - } - if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) { - bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx], - offset, next - page); - } - - page = next; - idx++; - offset = 0; - base += DIRTY_MEMORY_BLOCK_SIZE; - } - } - - if (xen_enabled()) { - xen_hvm_modified_memory(start, length); - } -} - -#if !defined(_WIN32) - -/* - * Contrary to cpu_physical_memory_sync_dirty_bitmap() this function returns - * the number of dirty pages in @bitmap passed as argument. On the other hand, - * cpu_physical_memory_sync_dirty_bitmap() returns newly dirtied pages that - * weren't set in the global migration bitmap. - */ -static inline -uint64_t cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, - ram_addr_t start, - ram_addr_t pages) -{ - unsigned long i, j; - unsigned long page_number, c, nbits; - hwaddr addr; - ram_addr_t ram_addr; - uint64_t num_dirty = 0; - unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; - unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE; - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); - - /* start address is aligned at the start of a word? */ - if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) && - (hpratio == 1)) { - unsigned long **blocks[DIRTY_MEMORY_NUM]; - unsigned long idx; - unsigned long offset; - long k; - long nr = BITS_TO_LONGS(pages); - - idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE; - offset = BIT_WORD((start >> TARGET_PAGE_BITS) % - DIRTY_MEMORY_BLOCK_SIZE); - - WITH_RCU_READ_LOCK_GUARD() { - for (i = 0; i < DIRTY_MEMORY_NUM; i++) { - blocks[i] = - qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks; - } - - for (k = 0; k < nr; k++) { - if (bitmap[k]) { - unsigned long temp = leul_to_cpu(bitmap[k]); - - nbits = ctpopl(temp); - qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); - - if (global_dirty_tracking) { - qatomic_or( - &blocks[DIRTY_MEMORY_MIGRATION][idx][offset], - temp); - if (unlikely( - global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { - total_dirty_pages += nbits; - } - } - - num_dirty += nbits; - - if (tcg_enabled()) { - qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], - temp); - } - } - - if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { - offset = 0; - idx++; - } - } - } - - if (xen_enabled()) { - xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS); - } - } else { - uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE; - - if (!global_dirty_tracking) { - clients &= ~(1 << DIRTY_MEMORY_MIGRATION); - } - - /* - * bitmap-traveling is faster than memory-traveling (for addr...) - * especially when most of the memory is not dirty. - */ - for (i = 0; i < len; i++) { - if (bitmap[i] != 0) { - c = leul_to_cpu(bitmap[i]); - nbits = ctpopl(c); - if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { - total_dirty_pages += nbits; - } - num_dirty += nbits; - do { - j = ctzl(c); - c &= ~(1ul << j); - page_number = (i * HOST_LONG_BITS + j) * hpratio; - addr = page_number * TARGET_PAGE_SIZE; - ram_addr = start + addr; - cpu_physical_memory_set_dirty_range(ram_addr, - TARGET_PAGE_SIZE * hpratio, clients); - } while (c != 0); - } - } - } - - return num_dirty; -} -#endif /* not _WIN32 */ - -static inline void cpu_physical_memory_dirty_bits_cleared(ram_addr_t start, - ram_addr_t length) -{ - if (tcg_enabled()) { - tlb_reset_dirty_range_all(start, length); - } - -} -bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, - ram_addr_t length, - unsigned client); - -DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty - (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client); - -bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, - ram_addr_t start, - ram_addr_t length); - -static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, - ram_addr_t length) -{ - cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_MIGRATION); - cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_VGA); - cpu_physical_memory_test_and_clear_dirty(start, length, DIRTY_MEMORY_CODE); -} - - -/* Called with RCU critical section */ -static inline -uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, - ram_addr_t start, - ram_addr_t length) -{ - ram_addr_t addr; - unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); - uint64_t num_dirty = 0; - unsigned long *dest = rb->bmap; - - /* start address and length is aligned at the start of a word? */ - if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == - (start + rb->offset) && - !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { - int k; - int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); - unsigned long * const *src; - unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; - unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % - DIRTY_MEMORY_BLOCK_SIZE); - unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); - - src = qatomic_rcu_read( - &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; - - for (k = page; k < page + nr; k++) { - if (src[idx][offset]) { - unsigned long bits = qatomic_xchg(&src[idx][offset], 0); - unsigned long new_dirty; - new_dirty = ~dest[k]; - dest[k] |= bits; - new_dirty &= bits; - num_dirty += ctpopl(new_dirty); - } - - if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { - offset = 0; - idx++; - } - } - if (num_dirty) { - cpu_physical_memory_dirty_bits_cleared(start, length); - } - - if (rb->clear_bmap) { - /* - * Postpone the dirty bitmap clear to the point before we - * really send the pages, also we will split the clear - * dirty procedure into smaller chunks. - */ - clear_bmap_set(rb, start >> TARGET_PAGE_BITS, - length >> TARGET_PAGE_BITS); - } else { - /* Slow path - still do that in a huge chunk */ - memory_region_clear_dirty_bitmap(rb->mr, start, length); - } - } else { - ram_addr_t offset = rb->offset; - - for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { - if (cpu_physical_memory_test_and_clear_dirty( - start + addr + offset, - TARGET_PAGE_SIZE, - DIRTY_MEMORY_MIGRATION)) { - long k = (start + addr) >> TARGET_PAGE_BITS; - if (!test_and_set_bit(k, dest)) { - num_dirty++; - } - } - } - } - - return num_dirty; -} - #endif diff --git a/include/system/ramblock.h b/include/system/ramblock.h index 87e847e184aa4..76694fe1b5bb9 100644 --- a/include/system/ramblock.h +++ b/include/system/ramblock.h @@ -11,11 +11,6 @@ * */ -/* - * This header is for use by exec.c and memory.c ONLY. Do not include it. - * The functions declared here will be removed soon. - */ - #ifndef SYSTEM_RAMBLOCK_H #define SYSTEM_RAMBLOCK_H @@ -108,9 +103,31 @@ struct RamBlockAttributes { QLIST_HEAD(, RamDiscardListener) rdl_list; }; +/* @offset: the offset within the RAMBlock */ +int ram_block_discard_range(RAMBlock *rb, uint64_t offset, size_t length); +/* @offset: the offset within the RAMBlock */ +int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t offset, + size_t length); + RamBlockAttributes *ram_block_attributes_create(RAMBlock *ram_block); void ram_block_attributes_destroy(RamBlockAttributes *attr); int ram_block_attributes_state_change(RamBlockAttributes *attr, uint64_t offset, uint64_t size, bool to_discard); +/** + * ram_block_is_pmem: Whether the RAM block is of persistent memory + */ +bool ram_block_is_pmem(RAMBlock *rb); + +static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset) +{ + return b && b->host && (offset < b->used_length); +} + +static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) +{ + assert(offset_in_ramblock(block, offset)); + return (char *)block->host + offset; +} + #endif diff --git a/include/system/system.h b/include/system/system.h index a7effe7dfd8b2..03a2d0e9005d6 100644 --- a/include/system/system.h +++ b/include/system/system.h @@ -42,7 +42,6 @@ extern int graphic_height; extern int graphic_depth; extern int display_opengl; extern const char *keyboard_layout; -extern int old_param; extern uint8_t *boot_splash_filedata; extern bool enable_cpu_pm; extern QEMUClockType rtc_clock; diff --git a/include/tcg/tcg-op-common.h b/include/tcg/tcg-op-common.h index e1071adebf26a..f752ef440b209 100644 --- a/include/tcg/tcg-op-common.h +++ b/include/tcg/tcg-op-common.h @@ -344,6 +344,8 @@ void tcg_gen_atomic_xchg_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGArg, MemOp, TCGType); void tcg_gen_atomic_xchg_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGArg, MemOp, TCGType); +void tcg_gen_atomic_xchg_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128, + TCGArg, MemOp, TCGType); void tcg_gen_atomic_fetch_add_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, TCGArg, MemOp, TCGType); @@ -411,6 +413,11 @@ void tcg_gen_atomic_umax_fetch_i32_chk(TCGv_i32, TCGTemp *, TCGv_i32, void tcg_gen_atomic_umax_fetch_i64_chk(TCGv_i64, TCGTemp *, TCGv_i64, TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_and_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128, + TCGArg, MemOp, TCGType); +void tcg_gen_atomic_fetch_or_i128_chk(TCGv_i128, TCGTemp *, TCGv_i128, + TCGArg, MemOp, TCGType); + /* Vector ops */ void tcg_gen_mov_vec(TCGv_vec, TCGv_vec); diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h index c912578fdd2ee..232733cb718ae 100644 --- a/include/tcg/tcg-op.h +++ b/include/tcg/tcg-op.h @@ -134,13 +134,16 @@ DEF_ATOMIC3(tcg_gen_nonatomic_cmpxchg, i128) DEF_ATOMIC2(tcg_gen_atomic_xchg, i32) DEF_ATOMIC2(tcg_gen_atomic_xchg, i64) +DEF_ATOMIC2(tcg_gen_atomic_xchg, i128) DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i32) DEF_ATOMIC2(tcg_gen_atomic_fetch_add, i64) DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i32) DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_and, i128) DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i32) DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i64) +DEF_ATOMIC2(tcg_gen_atomic_fetch_or, i128) DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i32) DEF_ATOMIC2(tcg_gen_atomic_fetch_xor, i64) DEF_ATOMIC2(tcg_gen_atomic_fetch_smin, i32) diff --git a/include/user/cpu_loop.h b/include/user/cpu_loop.h index ad8a1d711f0bd..346e37ede8bea 100644 --- a/include/user/cpu_loop.h +++ b/include/user/cpu_loop.h @@ -81,8 +81,4 @@ void target_exception_dump(CPUArchState *env, const char *fmt, int code); #define EXCP_DUMP(env, fmt, code) \ target_exception_dump(env, fmt, code) -typedef struct target_pt_regs target_pt_regs; - -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs); - #endif diff --git a/include/user/page-protection.h b/include/user/page-protection.h index 4bde664e4a74f..41b23e72fcb88 100644 --- a/include/user/page-protection.h +++ b/include/user/page-protection.h @@ -23,14 +23,19 @@ int page_get_flags(vaddr address); * page_set_flags: * @start: first byte of range * @last: last byte of range - * @flags: flags to set + * @set_flags: flags to set + * @clr_flags: flags to clear * Context: holding mmap lock * * Modify the flags of a page and invalidate the code if necessary. * The flag PAGE_WRITE_ORG is positioned automatically depending * on PAGE_WRITE. The mmap_lock should already be held. + * + * For each page, flags = (flags & ~clr_flags) | set_flags. + * If clr_flags includes PAGE_VALID, this indicates a new mapping + * and page_reset_target_data will be called as well. */ -void page_set_flags(vaddr start, vaddr last, int flags); +void page_set_flags(vaddr start, vaddr last, int set_flags, int clr_flags); void page_reset_target_data(vaddr start, vaddr last); diff --git a/io/channel-command.c b/io/channel-command.c index 8966dd3a2babf..8ae9a026b34c0 100644 --- a/io/channel-command.c +++ b/io/channel-command.c @@ -277,9 +277,12 @@ static int qio_channel_command_set_blocking(QIOChannel *ioc, cioc->blocking = enabled; #else - if ((cioc->writefd >= 0 && !g_unix_set_fd_nonblocking(cioc->writefd, !enabled, NULL)) || - (cioc->readfd >= 0 && !g_unix_set_fd_nonblocking(cioc->readfd, !enabled, NULL))) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (cioc->writefd >= 0 && + !qemu_set_blocking(cioc->writefd, enabled, errp)) { + return -1; + } + if (cioc->readfd >= 0 && + !qemu_set_blocking(cioc->readfd, enabled, errp)) { return -1; } #endif diff --git a/io/channel-file.c b/io/channel-file.c index ca3f180cc2fd9..5cef75a67c627 100644 --- a/io/channel-file.c +++ b/io/channel-file.c @@ -223,8 +223,7 @@ static int qio_channel_file_set_blocking(QIOChannel *ioc, #else QIOChannelFile *fioc = QIO_CHANNEL_FILE(ioc); - if (!g_unix_set_fd_nonblocking(fioc->fd, !enabled, NULL)) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (!qemu_set_blocking(fioc->fd, enabled, errp)) { return -1; } return 0; diff --git a/io/channel-socket.c b/io/channel-socket.c index 3b7ca924ff349..712b793eaf2cd 100644 --- a/io/channel-socket.c +++ b/io/channel-socket.c @@ -454,7 +454,7 @@ static void qio_channel_socket_finalize(Object *obj) } } #ifdef WIN32 - qemu_socket_unselect(ioc->fd, NULL); + qemu_socket_unselect_nofail(ioc->fd); #endif close(ioc->fd); ioc->fd = -1; @@ -472,7 +472,7 @@ static void qio_channel_socket_copy_fds(struct msghdr *msg, *fds = NULL; for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { - int fd_size, i; + int fd_size; int gotfds; if (cmsg->cmsg_len < CMSG_LEN(sizeof(int)) || @@ -490,22 +490,53 @@ static void qio_channel_socket_copy_fds(struct msghdr *msg, gotfds = fd_size / sizeof(int); *fds = g_renew(int, *fds, *nfds + gotfds); memcpy(*fds + *nfds, CMSG_DATA(cmsg), fd_size); + *nfds += gotfds; + } +} - for (i = 0; i < gotfds; i++) { - int fd = (*fds)[*nfds + i]; - if (fd < 0) { - continue; - } +static bool qio_channel_handle_fds(int *fds, size_t nfds, + bool preserve_blocking, Error **errp) +{ + int *end = fds + nfds, *fd; +#ifdef MSG_CMSG_CLOEXEC + if (preserve_blocking) { + /* Nothing to do */ + return true; + } +#endif + + for (fd = fds; fd != end; fd++) { + if (*fd < 0) { + continue; + } + + if (!preserve_blocking) { /* O_NONBLOCK is preserved across SCM_RIGHTS so reset it */ - qemu_socket_set_block(fd); + if (!qemu_set_blocking(*fd, true, errp)) { + return false; + } + } #ifndef MSG_CMSG_CLOEXEC - qemu_set_cloexec(fd); + qemu_set_cloexec(*fd); #endif + } + + return true; +} + +static void qio_channel_cleanup_fds(int **fds, size_t *nfds) +{ + for (size_t i = 0; i < *nfds; i++) { + if ((*fds)[i] < 0) { + continue; } - *nfds += gotfds; + close((*fds)[i]); } + + g_clear_pointer(fds, g_free); + *nfds = 0; } @@ -556,7 +587,16 @@ static ssize_t qio_channel_socket_readv(QIOChannel *ioc, } if (fds && nfds) { + bool preserve_blocking = + flags & QIO_CHANNEL_READ_FLAG_FD_PRESERVE_BLOCKING; + qio_channel_socket_copy_fds(&msg, fds, nfds); + + if (!qio_channel_handle_fds(*fds, *nfds, + preserve_blocking, errp)) { + qio_channel_cleanup_fds(fds, nfds); + return -1; + } } return ret; @@ -820,11 +860,10 @@ qio_channel_socket_set_blocking(QIOChannel *ioc, { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(ioc); - if (enabled) { - qemu_socket_set_block(sioc->fd); - } else { - qemu_socket_set_nonblock(sioc->fd); + if (!qemu_set_blocking(sioc->fd, enabled, errp)) { + return -1; } + return 0; } @@ -890,7 +929,7 @@ qio_channel_socket_close(QIOChannel *ioc, if (sioc->fd != -1) { #ifdef WIN32 - qemu_socket_unselect(sioc->fd, NULL); + qemu_socket_unselect_nofail(sioc->fd); #endif if (qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_LISTEN)) { socket_listen_cleanup(sioc->fd, errp); diff --git a/io/channel-tls.c b/io/channel-tls.c index db2ac1deae63e..1fbed4be0c670 100644 --- a/io/channel-tls.c +++ b/io/channel-tls.c @@ -241,6 +241,11 @@ void qio_channel_tls_handshake(QIOChannelTLS *ioc, { QIOTask *task; + if (qio_channel_has_feature(QIO_CHANNEL(ioc), + QIO_CHANNEL_FEATURE_CONCURRENT_IO)) { + qcrypto_tls_session_require_thread_safety(ioc->session); + } + task = qio_task_new(OBJECT(ioc), func, opaque, destroy); @@ -341,6 +346,19 @@ static void qio_channel_tls_finalize(Object *obj) qcrypto_tls_session_free(ioc->session); } +static bool +qio_channel_tls_allow_premature_termination(QIOChannelTLS *tioc, int flags) +{ + if (flags & QIO_CHANNEL_READ_FLAG_RELAXED_EOF) { + return true; + } + + if (qatomic_read(&tioc->shutdown) & QIO_CHANNEL_SHUTDOWN_READ) { + return true; + } + + return false; +} static ssize_t qio_channel_tls_readv(QIOChannel *ioc, const struct iovec *iov, @@ -359,8 +377,6 @@ static ssize_t qio_channel_tls_readv(QIOChannel *ioc, tioc->session, iov[i].iov_base, iov[i].iov_len, - flags & QIO_CHANNEL_READ_FLAG_RELAXED_EOF || - qatomic_load_acquire(&tioc->shutdown) & QIO_CHANNEL_SHUTDOWN_READ, errp); if (ret == QCRYPTO_TLS_SESSION_ERR_BLOCK) { if (got) { @@ -368,6 +384,12 @@ static ssize_t qio_channel_tls_readv(QIOChannel *ioc, } else { return QIO_CHANNEL_ERR_BLOCK; } + } else if (ret == QCRYPTO_TLS_SESSION_PREMATURE_TERMINATION) { + if (qio_channel_tls_allow_premature_termination(tioc, flags)) { + ret = 0; + } else { + return -1; + } } else if (ret < 0) { return -1; } @@ -420,7 +442,7 @@ static int qio_channel_tls_set_blocking(QIOChannel *ioc, { QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc); - return qio_channel_set_blocking(tioc->master, enabled, errp); + return qio_channel_set_blocking(tioc->master, enabled, errp) ? 0 : -1; } static void qio_channel_tls_set_delay(QIOChannel *ioc, diff --git a/io/channel-watch.c b/io/channel-watch.c index 64b486e378184..018648b36b4cf 100644 --- a/io/channel-watch.c +++ b/io/channel-watch.c @@ -281,9 +281,9 @@ GSource *qio_channel_create_socket_watch(QIOChannel *ioc, GSource *source; QIOChannelSocketSource *ssource; - qemu_socket_select(sockfd, ioc->event, - FD_READ | FD_ACCEPT | FD_CLOSE | - FD_CONNECT | FD_WRITE | FD_OOB, NULL); + qemu_socket_select_nofail(sockfd, ioc->event, + FD_READ | FD_ACCEPT | FD_CLOSE | + FD_CONNECT | FD_WRITE | FD_OOB); source = g_source_new(&qio_channel_socket_source_funcs, sizeof(QIOChannelSocketSource)); diff --git a/io/channel-websock.c b/io/channel-websock.c index 08ddb274f0c92..0a8c5c47123e6 100644 --- a/io/channel-websock.c +++ b/io/channel-websock.c @@ -1184,8 +1184,7 @@ static int qio_channel_websock_set_blocking(QIOChannel *ioc, { QIOChannelWebsock *wioc = QIO_CHANNEL_WEBSOCK(ioc); - qio_channel_set_blocking(wioc->master, enabled, errp); - return 0; + return qio_channel_set_blocking(wioc->master, enabled, errp) ? 0 : -1; } static void qio_channel_websock_set_delay(QIOChannel *ioc, diff --git a/io/channel.c b/io/channel.c index ebd93227651f1..852e684938c5f 100644 --- a/io/channel.c +++ b/io/channel.c @@ -359,12 +359,12 @@ int coroutine_mixed_fn qio_channel_write_all(QIOChannel *ioc, } -int qio_channel_set_blocking(QIOChannel *ioc, +bool qio_channel_set_blocking(QIOChannel *ioc, bool enabled, Error **errp) { QIOChannelClass *klass = QIO_CHANNEL_GET_CLASS(ioc); - return klass->io_set_blocking(ioc, enabled, errp); + return klass->io_set_blocking(ioc, enabled, errp) == 0; } diff --git a/linux-headers/LICENSES/preferred/GPL-2.0 b/linux-headers/LICENSES/preferred/GPL-2.0 index ff0812fd89cc4..ea8e93dc44bc4 100644 --- a/linux-headers/LICENSES/preferred/GPL-2.0 +++ b/linux-headers/LICENSES/preferred/GPL-2.0 @@ -20,8 +20,8 @@ License-Text: GNU GENERAL PUBLIC LICENSE Version 2, June 1991 - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -322,10 +322,8 @@ the "copyright" line and a pointer to where the full notice is found. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - + You should have received a copy of the GNU General Public License along + with this program; if not, see . Also add information on how to contact you by electronic and paper mail. diff --git a/linux-headers/asm-arm/bitsperlong.h b/linux-headers/asm-arm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b29..0000000000000 --- a/linux-headers/asm-arm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/linux-headers/asm-arm/kvm.h b/linux-headers/asm-arm/kvm.h deleted file mode 100644 index 0db5644e27afb..0000000000000 --- a/linux-headers/asm-arm/kvm.h +++ /dev/null @@ -1,312 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_H__ -#define __ARM_KVM_H__ - -#include -#include -#include - -#define __KVM_HAVE_GUEST_DEBUG -#define __KVM_HAVE_IRQ_LINE -#define __KVM_HAVE_READONLY_MEM -#define __KVM_HAVE_VCPU_EVENTS - -#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 - -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) - -/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */ -#define KVM_ARM_SVC_sp svc_regs[0] -#define KVM_ARM_SVC_lr svc_regs[1] -#define KVM_ARM_SVC_spsr svc_regs[2] -#define KVM_ARM_ABT_sp abt_regs[0] -#define KVM_ARM_ABT_lr abt_regs[1] -#define KVM_ARM_ABT_spsr abt_regs[2] -#define KVM_ARM_UND_sp und_regs[0] -#define KVM_ARM_UND_lr und_regs[1] -#define KVM_ARM_UND_spsr und_regs[2] -#define KVM_ARM_IRQ_sp irq_regs[0] -#define KVM_ARM_IRQ_lr irq_regs[1] -#define KVM_ARM_IRQ_spsr irq_regs[2] - -/* Valid only for fiq_regs in struct kvm_regs */ -#define KVM_ARM_FIQ_r8 fiq_regs[0] -#define KVM_ARM_FIQ_r9 fiq_regs[1] -#define KVM_ARM_FIQ_r10 fiq_regs[2] -#define KVM_ARM_FIQ_fp fiq_regs[3] -#define KVM_ARM_FIQ_ip fiq_regs[4] -#define KVM_ARM_FIQ_sp fiq_regs[5] -#define KVM_ARM_FIQ_lr fiq_regs[6] -#define KVM_ARM_FIQ_spsr fiq_regs[7] - -struct kvm_regs { - struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */ - unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ - unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ - unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */ - unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ - unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ -}; - -/* Supported Processor Types */ -#define KVM_ARM_TARGET_CORTEX_A15 0 -#define KVM_ARM_TARGET_CORTEX_A7 1 -#define KVM_ARM_NUM_TARGETS 2 - -/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */ -#define KVM_ARM_DEVICE_TYPE_SHIFT 0 -#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT) -#define KVM_ARM_DEVICE_ID_SHIFT 16 -#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT) - -/* Supported device IDs */ -#define KVM_ARM_DEVICE_VGIC_V2 0 - -/* Supported VGIC address types */ -#define KVM_VGIC_V2_ADDR_TYPE_DIST 0 -#define KVM_VGIC_V2_ADDR_TYPE_CPU 1 - -#define KVM_VGIC_V2_DIST_SIZE 0x1000 -#define KVM_VGIC_V2_CPU_SIZE 0x2000 - -/* Supported VGICv3 address types */ -#define KVM_VGIC_V3_ADDR_TYPE_DIST 2 -#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 -#define KVM_VGIC_ITS_ADDR_TYPE 4 -#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5 - -#define KVM_VGIC_V3_DIST_SIZE SZ_64K -#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) -#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K) - -#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ -#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ - -struct kvm_vcpu_init { - __u32 target; - __u32 features[7]; -}; - -struct kvm_sregs { -}; - -struct kvm_fpu { -}; - -struct kvm_guest_debug_arch { -}; - -struct kvm_debug_exit_arch { -}; - -struct kvm_sync_regs { - /* Used with KVM_CAP_ARM_USER_IRQ */ - __u64 device_irq_level; -}; - -struct kvm_arch_memory_slot { -}; - -/* for KVM_GET/SET_VCPU_EVENTS */ -struct kvm_vcpu_events { - struct { - __u8 serror_pending; - __u8 serror_has_esr; - __u8 ext_dabt_pending; - /* Align it to 8 bytes */ - __u8 pad[5]; - __u64 serror_esr; - } exception; - __u32 reserved[12]; -}; - -/* If you need to interpret the index values, here is the key: */ -#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 -#define KVM_REG_ARM_COPROC_SHIFT 16 -#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007 -#define KVM_REG_ARM_32_OPC2_SHIFT 0 -#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078 -#define KVM_REG_ARM_OPC1_SHIFT 3 -#define KVM_REG_ARM_CRM_MASK 0x0000000000000780 -#define KVM_REG_ARM_CRM_SHIFT 7 -#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 -#define KVM_REG_ARM_32_CRN_SHIFT 11 -/* - * For KVM currently all guest registers are nonsecure, but we reserve a bit - * in the encoding to distinguish secure from nonsecure for AArch32 system - * registers that are banked by security. This is 1 for the secure banked - * register, and 0 for the nonsecure banked register or if the register is - * not banked by security. - */ -#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 -#define KVM_REG_ARM_SECURE_SHIFT 28 - -#define ARM_CP15_REG_SHIFT_MASK(x,n) \ - (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) - -#define __ARM_CP15_REG(op1,crn,crm,op2) \ - (KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \ - ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \ - ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \ - ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \ - ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2)) - -#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32) - -#define __ARM_CP15_REG64(op1,crm) \ - (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) -#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) - -/* PL1 Physical Timer Registers */ -#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1) -#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14) -#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14) - -/* Virtual Timer Registers */ -#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) -#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) -#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) - -/* Normal registers are mapped as coprocessor 16. */ -#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) - -/* Some registers need more space to represent values. */ -#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 -#define KVM_REG_ARM_DEMUX_ID_SHIFT 8 -#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) -#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF -#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 - -/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */ -#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF -#define KVM_REG_ARM_VFP_BASE_REG 0x0 -#define KVM_REG_ARM_VFP_FPSID 0x1000 -#define KVM_REG_ARM_VFP_FPSCR 0x1001 -#define KVM_REG_ARM_VFP_MVFR1 0x1006 -#define KVM_REG_ARM_VFP_MVFR0 0x1007 -#define KVM_REG_ARM_VFP_FPEXC 0x1008 -#define KVM_REG_ARM_VFP_FPINST 0x1009 -#define KVM_REG_ARM_VFP_FPINST2 0x100A - -/* KVM-as-firmware specific pseudo-registers */ -#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \ - KVM_REG_ARM_FW | ((r) & 0xffff)) -#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1) - /* Higher values mean better protection. */ -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2) - /* Higher values mean better protection. */ -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3 -#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4) - -/* Device Control API: ARM VGIC */ -#define KVM_DEV_ARM_VGIC_GRP_ADDR 0 -#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1 -#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2 -#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32 -#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT) -#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32 -#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \ - (0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT) -#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0 -#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT) -#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff) -#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3 -#define KVM_DEV_ARM_VGIC_GRP_CTRL 4 -#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5 -#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6 -#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7 -#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8 -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10 -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \ - (0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT) -#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff -#define VGIC_LEVEL_INFO_LINE_LEVEL 0 - -/* Device Control API on vcpu fd */ -#define KVM_ARM_VCPU_PMU_V3_CTRL 0 -#define KVM_ARM_VCPU_PMU_V3_IRQ 0 -#define KVM_ARM_VCPU_PMU_V3_INIT 1 -#define KVM_ARM_VCPU_TIMER_CTRL 1 -#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0 -#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1 - -#define KVM_DEV_ARM_VGIC_CTRL_INIT 0 -#define KVM_DEV_ARM_ITS_SAVE_TABLES 1 -#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 -#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 -#define KVM_DEV_ARM_ITS_CTRL_RESET 4 - -/* KVM_IRQ_LINE irq field index values */ -#define KVM_ARM_IRQ_VCPU2_SHIFT 28 -#define KVM_ARM_IRQ_VCPU2_MASK 0xf -#define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xf -#define KVM_ARM_IRQ_VCPU_SHIFT 16 -#define KVM_ARM_IRQ_VCPU_MASK 0xff -#define KVM_ARM_IRQ_NUM_SHIFT 0 -#define KVM_ARM_IRQ_NUM_MASK 0xffff - -/* irq_type field */ -#define KVM_ARM_IRQ_TYPE_CPU 0 -#define KVM_ARM_IRQ_TYPE_SPI 1 -#define KVM_ARM_IRQ_TYPE_PPI 2 - -/* out-of-kernel GIC cpu interrupt injection irq_number field */ -#define KVM_ARM_IRQ_CPU_IRQ 0 -#define KVM_ARM_IRQ_CPU_FIQ 1 - -/* - * This used to hold the highest supported SPI, but it is now obsolete - * and only here to provide source code level compatibility with older - * userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS. - */ -#define KVM_ARM_IRQ_GIC_MAX 127 - -/* One single KVM irqchip, ie. the VGIC */ -#define KVM_NR_IRQCHIPS 1 - -/* PSCI interface */ -#define KVM_PSCI_FN_BASE 0x95c1ba5e -#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) - -#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) -#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) -#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) -#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) - -#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS -#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED -#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS -#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED - -#endif /* __ARM_KVM_H__ */ diff --git a/linux-headers/asm-arm/mman.h b/linux-headers/asm-arm/mman.h deleted file mode 100644 index 41f99c573b93c..0000000000000 --- a/linux-headers/asm-arm/mman.h +++ /dev/null @@ -1,4 +0,0 @@ -#include - -#define arch_mmap_check(addr, len, flags) \ - (((flags) & MAP_FIXED && (addr) < FIRST_USER_ADDRESS) ? -EINVAL : 0) diff --git a/linux-headers/asm-arm/unistd-common.h b/linux-headers/asm-arm/unistd-common.h deleted file mode 100644 index 57cd1f21dbd5f..0000000000000 --- a/linux-headers/asm-arm/unistd-common.h +++ /dev/null @@ -1,397 +0,0 @@ -#ifndef _ASM_ARM_UNISTD_COMMON_H -#define _ASM_ARM_UNISTD_COMMON_H 1 - -#define __NR_restart_syscall (__NR_SYSCALL_BASE + 0) -#define __NR_exit (__NR_SYSCALL_BASE + 1) -#define __NR_fork (__NR_SYSCALL_BASE + 2) -#define __NR_read (__NR_SYSCALL_BASE + 3) -#define __NR_write (__NR_SYSCALL_BASE + 4) -#define __NR_open (__NR_SYSCALL_BASE + 5) -#define __NR_close (__NR_SYSCALL_BASE + 6) -#define __NR_creat (__NR_SYSCALL_BASE + 8) -#define __NR_link (__NR_SYSCALL_BASE + 9) -#define __NR_unlink (__NR_SYSCALL_BASE + 10) -#define __NR_execve (__NR_SYSCALL_BASE + 11) -#define __NR_chdir (__NR_SYSCALL_BASE + 12) -#define __NR_mknod (__NR_SYSCALL_BASE + 14) -#define __NR_chmod (__NR_SYSCALL_BASE + 15) -#define __NR_lchown (__NR_SYSCALL_BASE + 16) -#define __NR_lseek (__NR_SYSCALL_BASE + 19) -#define __NR_getpid (__NR_SYSCALL_BASE + 20) -#define __NR_mount (__NR_SYSCALL_BASE + 21) -#define __NR_setuid (__NR_SYSCALL_BASE + 23) -#define __NR_getuid (__NR_SYSCALL_BASE + 24) -#define __NR_ptrace (__NR_SYSCALL_BASE + 26) -#define __NR_pause (__NR_SYSCALL_BASE + 29) -#define __NR_access (__NR_SYSCALL_BASE + 33) -#define __NR_nice (__NR_SYSCALL_BASE + 34) -#define __NR_sync (__NR_SYSCALL_BASE + 36) -#define __NR_kill (__NR_SYSCALL_BASE + 37) -#define __NR_rename (__NR_SYSCALL_BASE + 38) -#define __NR_mkdir (__NR_SYSCALL_BASE + 39) -#define __NR_rmdir (__NR_SYSCALL_BASE + 40) -#define __NR_dup (__NR_SYSCALL_BASE + 41) -#define __NR_pipe (__NR_SYSCALL_BASE + 42) -#define __NR_times (__NR_SYSCALL_BASE + 43) -#define __NR_brk (__NR_SYSCALL_BASE + 45) -#define __NR_setgid (__NR_SYSCALL_BASE + 46) -#define __NR_getgid (__NR_SYSCALL_BASE + 47) -#define __NR_geteuid (__NR_SYSCALL_BASE + 49) -#define __NR_getegid (__NR_SYSCALL_BASE + 50) -#define __NR_acct (__NR_SYSCALL_BASE + 51) -#define __NR_umount2 (__NR_SYSCALL_BASE + 52) -#define __NR_ioctl (__NR_SYSCALL_BASE + 54) -#define __NR_fcntl (__NR_SYSCALL_BASE + 55) -#define __NR_setpgid (__NR_SYSCALL_BASE + 57) -#define __NR_umask (__NR_SYSCALL_BASE + 60) -#define __NR_chroot (__NR_SYSCALL_BASE + 61) -#define __NR_ustat (__NR_SYSCALL_BASE + 62) -#define __NR_dup2 (__NR_SYSCALL_BASE + 63) -#define __NR_getppid (__NR_SYSCALL_BASE + 64) -#define __NR_getpgrp (__NR_SYSCALL_BASE + 65) -#define __NR_setsid (__NR_SYSCALL_BASE + 66) -#define __NR_sigaction (__NR_SYSCALL_BASE + 67) -#define __NR_setreuid (__NR_SYSCALL_BASE + 70) -#define __NR_setregid (__NR_SYSCALL_BASE + 71) -#define __NR_sigsuspend (__NR_SYSCALL_BASE + 72) -#define __NR_sigpending (__NR_SYSCALL_BASE + 73) -#define __NR_sethostname (__NR_SYSCALL_BASE + 74) -#define __NR_setrlimit (__NR_SYSCALL_BASE + 75) -#define __NR_getrusage (__NR_SYSCALL_BASE + 77) -#define __NR_gettimeofday (__NR_SYSCALL_BASE + 78) -#define __NR_settimeofday (__NR_SYSCALL_BASE + 79) -#define __NR_getgroups (__NR_SYSCALL_BASE + 80) -#define __NR_setgroups (__NR_SYSCALL_BASE + 81) -#define __NR_symlink (__NR_SYSCALL_BASE + 83) -#define __NR_readlink (__NR_SYSCALL_BASE + 85) -#define __NR_uselib (__NR_SYSCALL_BASE + 86) -#define __NR_swapon (__NR_SYSCALL_BASE + 87) -#define __NR_reboot (__NR_SYSCALL_BASE + 88) -#define __NR_munmap (__NR_SYSCALL_BASE + 91) -#define __NR_truncate (__NR_SYSCALL_BASE + 92) -#define __NR_ftruncate (__NR_SYSCALL_BASE + 93) -#define __NR_fchmod (__NR_SYSCALL_BASE + 94) -#define __NR_fchown (__NR_SYSCALL_BASE + 95) -#define __NR_getpriority (__NR_SYSCALL_BASE + 96) -#define __NR_setpriority (__NR_SYSCALL_BASE + 97) -#define __NR_statfs (__NR_SYSCALL_BASE + 99) -#define __NR_fstatfs (__NR_SYSCALL_BASE + 100) -#define __NR_syslog (__NR_SYSCALL_BASE + 103) -#define __NR_setitimer (__NR_SYSCALL_BASE + 104) -#define __NR_getitimer (__NR_SYSCALL_BASE + 105) -#define __NR_stat (__NR_SYSCALL_BASE + 106) -#define __NR_lstat (__NR_SYSCALL_BASE + 107) -#define __NR_fstat (__NR_SYSCALL_BASE + 108) -#define __NR_vhangup (__NR_SYSCALL_BASE + 111) -#define __NR_wait4 (__NR_SYSCALL_BASE + 114) -#define __NR_swapoff (__NR_SYSCALL_BASE + 115) -#define __NR_sysinfo (__NR_SYSCALL_BASE + 116) -#define __NR_fsync (__NR_SYSCALL_BASE + 118) -#define __NR_sigreturn (__NR_SYSCALL_BASE + 119) -#define __NR_clone (__NR_SYSCALL_BASE + 120) -#define __NR_setdomainname (__NR_SYSCALL_BASE + 121) -#define __NR_uname (__NR_SYSCALL_BASE + 122) -#define __NR_adjtimex (__NR_SYSCALL_BASE + 124) -#define __NR_mprotect (__NR_SYSCALL_BASE + 125) -#define __NR_sigprocmask (__NR_SYSCALL_BASE + 126) -#define __NR_init_module (__NR_SYSCALL_BASE + 128) -#define __NR_delete_module (__NR_SYSCALL_BASE + 129) -#define __NR_quotactl (__NR_SYSCALL_BASE + 131) -#define __NR_getpgid (__NR_SYSCALL_BASE + 132) -#define __NR_fchdir (__NR_SYSCALL_BASE + 133) -#define __NR_bdflush (__NR_SYSCALL_BASE + 134) -#define __NR_sysfs (__NR_SYSCALL_BASE + 135) -#define __NR_personality (__NR_SYSCALL_BASE + 136) -#define __NR_setfsuid (__NR_SYSCALL_BASE + 138) -#define __NR_setfsgid (__NR_SYSCALL_BASE + 139) -#define __NR__llseek (__NR_SYSCALL_BASE + 140) -#define __NR_getdents (__NR_SYSCALL_BASE + 141) -#define __NR__newselect (__NR_SYSCALL_BASE + 142) -#define __NR_flock (__NR_SYSCALL_BASE + 143) -#define __NR_msync (__NR_SYSCALL_BASE + 144) -#define __NR_readv (__NR_SYSCALL_BASE + 145) -#define __NR_writev (__NR_SYSCALL_BASE + 146) -#define __NR_getsid (__NR_SYSCALL_BASE + 147) -#define __NR_fdatasync (__NR_SYSCALL_BASE + 148) -#define __NR__sysctl (__NR_SYSCALL_BASE + 149) -#define __NR_mlock (__NR_SYSCALL_BASE + 150) -#define __NR_munlock (__NR_SYSCALL_BASE + 151) -#define __NR_mlockall (__NR_SYSCALL_BASE + 152) -#define __NR_munlockall (__NR_SYSCALL_BASE + 153) -#define __NR_sched_setparam (__NR_SYSCALL_BASE + 154) -#define __NR_sched_getparam (__NR_SYSCALL_BASE + 155) -#define __NR_sched_setscheduler (__NR_SYSCALL_BASE + 156) -#define __NR_sched_getscheduler (__NR_SYSCALL_BASE + 157) -#define __NR_sched_yield (__NR_SYSCALL_BASE + 158) -#define __NR_sched_get_priority_max (__NR_SYSCALL_BASE + 159) -#define __NR_sched_get_priority_min (__NR_SYSCALL_BASE + 160) -#define __NR_sched_rr_get_interval (__NR_SYSCALL_BASE + 161) -#define __NR_nanosleep (__NR_SYSCALL_BASE + 162) -#define __NR_mremap (__NR_SYSCALL_BASE + 163) -#define __NR_setresuid (__NR_SYSCALL_BASE + 164) -#define __NR_getresuid (__NR_SYSCALL_BASE + 165) -#define __NR_poll (__NR_SYSCALL_BASE + 168) -#define __NR_nfsservctl (__NR_SYSCALL_BASE + 169) -#define __NR_setresgid (__NR_SYSCALL_BASE + 170) -#define __NR_getresgid (__NR_SYSCALL_BASE + 171) -#define __NR_prctl (__NR_SYSCALL_BASE + 172) -#define __NR_rt_sigreturn (__NR_SYSCALL_BASE + 173) -#define __NR_rt_sigaction (__NR_SYSCALL_BASE + 174) -#define __NR_rt_sigprocmask (__NR_SYSCALL_BASE + 175) -#define __NR_rt_sigpending (__NR_SYSCALL_BASE + 176) -#define __NR_rt_sigtimedwait (__NR_SYSCALL_BASE + 177) -#define __NR_rt_sigqueueinfo (__NR_SYSCALL_BASE + 178) -#define __NR_rt_sigsuspend (__NR_SYSCALL_BASE + 179) -#define __NR_pread64 (__NR_SYSCALL_BASE + 180) -#define __NR_pwrite64 (__NR_SYSCALL_BASE + 181) -#define __NR_chown (__NR_SYSCALL_BASE + 182) -#define __NR_getcwd (__NR_SYSCALL_BASE + 183) -#define __NR_capget (__NR_SYSCALL_BASE + 184) -#define __NR_capset (__NR_SYSCALL_BASE + 185) -#define __NR_sigaltstack (__NR_SYSCALL_BASE + 186) -#define __NR_sendfile (__NR_SYSCALL_BASE + 187) -#define __NR_vfork (__NR_SYSCALL_BASE + 190) -#define __NR_ugetrlimit (__NR_SYSCALL_BASE + 191) -#define __NR_mmap2 (__NR_SYSCALL_BASE + 192) -#define __NR_truncate64 (__NR_SYSCALL_BASE + 193) -#define __NR_ftruncate64 (__NR_SYSCALL_BASE + 194) -#define __NR_stat64 (__NR_SYSCALL_BASE + 195) -#define __NR_lstat64 (__NR_SYSCALL_BASE + 196) -#define __NR_fstat64 (__NR_SYSCALL_BASE + 197) -#define __NR_lchown32 (__NR_SYSCALL_BASE + 198) -#define __NR_getuid32 (__NR_SYSCALL_BASE + 199) -#define __NR_getgid32 (__NR_SYSCALL_BASE + 200) -#define __NR_geteuid32 (__NR_SYSCALL_BASE + 201) -#define __NR_getegid32 (__NR_SYSCALL_BASE + 202) -#define __NR_setreuid32 (__NR_SYSCALL_BASE + 203) -#define __NR_setregid32 (__NR_SYSCALL_BASE + 204) -#define __NR_getgroups32 (__NR_SYSCALL_BASE + 205) -#define __NR_setgroups32 (__NR_SYSCALL_BASE + 206) -#define __NR_fchown32 (__NR_SYSCALL_BASE + 207) -#define __NR_setresuid32 (__NR_SYSCALL_BASE + 208) -#define __NR_getresuid32 (__NR_SYSCALL_BASE + 209) -#define __NR_setresgid32 (__NR_SYSCALL_BASE + 210) -#define __NR_getresgid32 (__NR_SYSCALL_BASE + 211) -#define __NR_chown32 (__NR_SYSCALL_BASE + 212) -#define __NR_setuid32 (__NR_SYSCALL_BASE + 213) -#define __NR_setgid32 (__NR_SYSCALL_BASE + 214) -#define __NR_setfsuid32 (__NR_SYSCALL_BASE + 215) -#define __NR_setfsgid32 (__NR_SYSCALL_BASE + 216) -#define __NR_getdents64 (__NR_SYSCALL_BASE + 217) -#define __NR_pivot_root (__NR_SYSCALL_BASE + 218) -#define __NR_mincore (__NR_SYSCALL_BASE + 219) -#define __NR_madvise (__NR_SYSCALL_BASE + 220) -#define __NR_fcntl64 (__NR_SYSCALL_BASE + 221) -#define __NR_gettid (__NR_SYSCALL_BASE + 224) -#define __NR_readahead (__NR_SYSCALL_BASE + 225) -#define __NR_setxattr (__NR_SYSCALL_BASE + 226) -#define __NR_lsetxattr (__NR_SYSCALL_BASE + 227) -#define __NR_fsetxattr (__NR_SYSCALL_BASE + 228) -#define __NR_getxattr (__NR_SYSCALL_BASE + 229) -#define __NR_lgetxattr (__NR_SYSCALL_BASE + 230) -#define __NR_fgetxattr (__NR_SYSCALL_BASE + 231) -#define __NR_listxattr (__NR_SYSCALL_BASE + 232) -#define __NR_llistxattr (__NR_SYSCALL_BASE + 233) -#define __NR_flistxattr (__NR_SYSCALL_BASE + 234) -#define __NR_removexattr (__NR_SYSCALL_BASE + 235) -#define __NR_lremovexattr (__NR_SYSCALL_BASE + 236) -#define __NR_fremovexattr (__NR_SYSCALL_BASE + 237) -#define __NR_tkill (__NR_SYSCALL_BASE + 238) -#define __NR_sendfile64 (__NR_SYSCALL_BASE + 239) -#define __NR_futex (__NR_SYSCALL_BASE + 240) -#define __NR_sched_setaffinity (__NR_SYSCALL_BASE + 241) -#define __NR_sched_getaffinity (__NR_SYSCALL_BASE + 242) -#define __NR_io_setup (__NR_SYSCALL_BASE + 243) -#define __NR_io_destroy (__NR_SYSCALL_BASE + 244) -#define __NR_io_getevents (__NR_SYSCALL_BASE + 245) -#define __NR_io_submit (__NR_SYSCALL_BASE + 246) -#define __NR_io_cancel (__NR_SYSCALL_BASE + 247) -#define __NR_exit_group (__NR_SYSCALL_BASE + 248) -#define __NR_lookup_dcookie (__NR_SYSCALL_BASE + 249) -#define __NR_epoll_create (__NR_SYSCALL_BASE + 250) -#define __NR_epoll_ctl (__NR_SYSCALL_BASE + 251) -#define __NR_epoll_wait (__NR_SYSCALL_BASE + 252) -#define __NR_remap_file_pages (__NR_SYSCALL_BASE + 253) -#define __NR_set_tid_address (__NR_SYSCALL_BASE + 256) -#define __NR_timer_create (__NR_SYSCALL_BASE + 257) -#define __NR_timer_settime (__NR_SYSCALL_BASE + 258) -#define __NR_timer_gettime (__NR_SYSCALL_BASE + 259) -#define __NR_timer_getoverrun (__NR_SYSCALL_BASE + 260) -#define __NR_timer_delete (__NR_SYSCALL_BASE + 261) -#define __NR_clock_settime (__NR_SYSCALL_BASE + 262) -#define __NR_clock_gettime (__NR_SYSCALL_BASE + 263) -#define __NR_clock_getres (__NR_SYSCALL_BASE + 264) -#define __NR_clock_nanosleep (__NR_SYSCALL_BASE + 265) -#define __NR_statfs64 (__NR_SYSCALL_BASE + 266) -#define __NR_fstatfs64 (__NR_SYSCALL_BASE + 267) -#define __NR_tgkill (__NR_SYSCALL_BASE + 268) -#define __NR_utimes (__NR_SYSCALL_BASE + 269) -#define __NR_arm_fadvise64_64 (__NR_SYSCALL_BASE + 270) -#define __NR_pciconfig_iobase (__NR_SYSCALL_BASE + 271) -#define __NR_pciconfig_read (__NR_SYSCALL_BASE + 272) -#define __NR_pciconfig_write (__NR_SYSCALL_BASE + 273) -#define __NR_mq_open (__NR_SYSCALL_BASE + 274) -#define __NR_mq_unlink (__NR_SYSCALL_BASE + 275) -#define __NR_mq_timedsend (__NR_SYSCALL_BASE + 276) -#define __NR_mq_timedreceive (__NR_SYSCALL_BASE + 277) -#define __NR_mq_notify (__NR_SYSCALL_BASE + 278) -#define __NR_mq_getsetattr (__NR_SYSCALL_BASE + 279) -#define __NR_waitid (__NR_SYSCALL_BASE + 280) -#define __NR_socket (__NR_SYSCALL_BASE + 281) -#define __NR_bind (__NR_SYSCALL_BASE + 282) -#define __NR_connect (__NR_SYSCALL_BASE + 283) -#define __NR_listen (__NR_SYSCALL_BASE + 284) -#define __NR_accept (__NR_SYSCALL_BASE + 285) -#define __NR_getsockname (__NR_SYSCALL_BASE + 286) -#define __NR_getpeername (__NR_SYSCALL_BASE + 287) -#define __NR_socketpair (__NR_SYSCALL_BASE + 288) -#define __NR_send (__NR_SYSCALL_BASE + 289) -#define __NR_sendto (__NR_SYSCALL_BASE + 290) -#define __NR_recv (__NR_SYSCALL_BASE + 291) -#define __NR_recvfrom (__NR_SYSCALL_BASE + 292) -#define __NR_shutdown (__NR_SYSCALL_BASE + 293) -#define __NR_setsockopt (__NR_SYSCALL_BASE + 294) -#define __NR_getsockopt (__NR_SYSCALL_BASE + 295) -#define __NR_sendmsg (__NR_SYSCALL_BASE + 296) -#define __NR_recvmsg (__NR_SYSCALL_BASE + 297) -#define __NR_semop (__NR_SYSCALL_BASE + 298) -#define __NR_semget (__NR_SYSCALL_BASE + 299) -#define __NR_semctl (__NR_SYSCALL_BASE + 300) -#define __NR_msgsnd (__NR_SYSCALL_BASE + 301) -#define __NR_msgrcv (__NR_SYSCALL_BASE + 302) -#define __NR_msgget (__NR_SYSCALL_BASE + 303) -#define __NR_msgctl (__NR_SYSCALL_BASE + 304) -#define __NR_shmat (__NR_SYSCALL_BASE + 305) -#define __NR_shmdt (__NR_SYSCALL_BASE + 306) -#define __NR_shmget (__NR_SYSCALL_BASE + 307) -#define __NR_shmctl (__NR_SYSCALL_BASE + 308) -#define __NR_add_key (__NR_SYSCALL_BASE + 309) -#define __NR_request_key (__NR_SYSCALL_BASE + 310) -#define __NR_keyctl (__NR_SYSCALL_BASE + 311) -#define __NR_semtimedop (__NR_SYSCALL_BASE + 312) -#define __NR_vserver (__NR_SYSCALL_BASE + 313) -#define __NR_ioprio_set (__NR_SYSCALL_BASE + 314) -#define __NR_ioprio_get (__NR_SYSCALL_BASE + 315) -#define __NR_inotify_init (__NR_SYSCALL_BASE + 316) -#define __NR_inotify_add_watch (__NR_SYSCALL_BASE + 317) -#define __NR_inotify_rm_watch (__NR_SYSCALL_BASE + 318) -#define __NR_mbind (__NR_SYSCALL_BASE + 319) -#define __NR_get_mempolicy (__NR_SYSCALL_BASE + 320) -#define __NR_set_mempolicy (__NR_SYSCALL_BASE + 321) -#define __NR_openat (__NR_SYSCALL_BASE + 322) -#define __NR_mkdirat (__NR_SYSCALL_BASE + 323) -#define __NR_mknodat (__NR_SYSCALL_BASE + 324) -#define __NR_fchownat (__NR_SYSCALL_BASE + 325) -#define __NR_futimesat (__NR_SYSCALL_BASE + 326) -#define __NR_fstatat64 (__NR_SYSCALL_BASE + 327) -#define __NR_unlinkat (__NR_SYSCALL_BASE + 328) -#define __NR_renameat (__NR_SYSCALL_BASE + 329) -#define __NR_linkat (__NR_SYSCALL_BASE + 330) -#define __NR_symlinkat (__NR_SYSCALL_BASE + 331) -#define __NR_readlinkat (__NR_SYSCALL_BASE + 332) -#define __NR_fchmodat (__NR_SYSCALL_BASE + 333) -#define __NR_faccessat (__NR_SYSCALL_BASE + 334) -#define __NR_pselect6 (__NR_SYSCALL_BASE + 335) -#define __NR_ppoll (__NR_SYSCALL_BASE + 336) -#define __NR_unshare (__NR_SYSCALL_BASE + 337) -#define __NR_set_robust_list (__NR_SYSCALL_BASE + 338) -#define __NR_get_robust_list (__NR_SYSCALL_BASE + 339) -#define __NR_splice (__NR_SYSCALL_BASE + 340) -#define __NR_arm_sync_file_range (__NR_SYSCALL_BASE + 341) -#define __NR_tee (__NR_SYSCALL_BASE + 342) -#define __NR_vmsplice (__NR_SYSCALL_BASE + 343) -#define __NR_move_pages (__NR_SYSCALL_BASE + 344) -#define __NR_getcpu (__NR_SYSCALL_BASE + 345) -#define __NR_epoll_pwait (__NR_SYSCALL_BASE + 346) -#define __NR_kexec_load (__NR_SYSCALL_BASE + 347) -#define __NR_utimensat (__NR_SYSCALL_BASE + 348) -#define __NR_signalfd (__NR_SYSCALL_BASE + 349) -#define __NR_timerfd_create (__NR_SYSCALL_BASE + 350) -#define __NR_eventfd (__NR_SYSCALL_BASE + 351) -#define __NR_fallocate (__NR_SYSCALL_BASE + 352) -#define __NR_timerfd_settime (__NR_SYSCALL_BASE + 353) -#define __NR_timerfd_gettime (__NR_SYSCALL_BASE + 354) -#define __NR_signalfd4 (__NR_SYSCALL_BASE + 355) -#define __NR_eventfd2 (__NR_SYSCALL_BASE + 356) -#define __NR_epoll_create1 (__NR_SYSCALL_BASE + 357) -#define __NR_dup3 (__NR_SYSCALL_BASE + 358) -#define __NR_pipe2 (__NR_SYSCALL_BASE + 359) -#define __NR_inotify_init1 (__NR_SYSCALL_BASE + 360) -#define __NR_preadv (__NR_SYSCALL_BASE + 361) -#define __NR_pwritev (__NR_SYSCALL_BASE + 362) -#define __NR_rt_tgsigqueueinfo (__NR_SYSCALL_BASE + 363) -#define __NR_perf_event_open (__NR_SYSCALL_BASE + 364) -#define __NR_recvmmsg (__NR_SYSCALL_BASE + 365) -#define __NR_accept4 (__NR_SYSCALL_BASE + 366) -#define __NR_fanotify_init (__NR_SYSCALL_BASE + 367) -#define __NR_fanotify_mark (__NR_SYSCALL_BASE + 368) -#define __NR_prlimit64 (__NR_SYSCALL_BASE + 369) -#define __NR_name_to_handle_at (__NR_SYSCALL_BASE + 370) -#define __NR_open_by_handle_at (__NR_SYSCALL_BASE + 371) -#define __NR_clock_adjtime (__NR_SYSCALL_BASE + 372) -#define __NR_syncfs (__NR_SYSCALL_BASE + 373) -#define __NR_sendmmsg (__NR_SYSCALL_BASE + 374) -#define __NR_setns (__NR_SYSCALL_BASE + 375) -#define __NR_process_vm_readv (__NR_SYSCALL_BASE + 376) -#define __NR_process_vm_writev (__NR_SYSCALL_BASE + 377) -#define __NR_kcmp (__NR_SYSCALL_BASE + 378) -#define __NR_finit_module (__NR_SYSCALL_BASE + 379) -#define __NR_sched_setattr (__NR_SYSCALL_BASE + 380) -#define __NR_sched_getattr (__NR_SYSCALL_BASE + 381) -#define __NR_renameat2 (__NR_SYSCALL_BASE + 382) -#define __NR_seccomp (__NR_SYSCALL_BASE + 383) -#define __NR_getrandom (__NR_SYSCALL_BASE + 384) -#define __NR_memfd_create (__NR_SYSCALL_BASE + 385) -#define __NR_bpf (__NR_SYSCALL_BASE + 386) -#define __NR_execveat (__NR_SYSCALL_BASE + 387) -#define __NR_userfaultfd (__NR_SYSCALL_BASE + 388) -#define __NR_membarrier (__NR_SYSCALL_BASE + 389) -#define __NR_mlock2 (__NR_SYSCALL_BASE + 390) -#define __NR_copy_file_range (__NR_SYSCALL_BASE + 391) -#define __NR_preadv2 (__NR_SYSCALL_BASE + 392) -#define __NR_pwritev2 (__NR_SYSCALL_BASE + 393) -#define __NR_pkey_mprotect (__NR_SYSCALL_BASE + 394) -#define __NR_pkey_alloc (__NR_SYSCALL_BASE + 395) -#define __NR_pkey_free (__NR_SYSCALL_BASE + 396) -#define __NR_statx (__NR_SYSCALL_BASE + 397) -#define __NR_rseq (__NR_SYSCALL_BASE + 398) -#define __NR_io_pgetevents (__NR_SYSCALL_BASE + 399) -#define __NR_migrate_pages (__NR_SYSCALL_BASE + 400) -#define __NR_kexec_file_load (__NR_SYSCALL_BASE + 401) -#define __NR_clock_gettime64 (__NR_SYSCALL_BASE + 403) -#define __NR_clock_settime64 (__NR_SYSCALL_BASE + 404) -#define __NR_clock_adjtime64 (__NR_SYSCALL_BASE + 405) -#define __NR_clock_getres_time64 (__NR_SYSCALL_BASE + 406) -#define __NR_clock_nanosleep_time64 (__NR_SYSCALL_BASE + 407) -#define __NR_timer_gettime64 (__NR_SYSCALL_BASE + 408) -#define __NR_timer_settime64 (__NR_SYSCALL_BASE + 409) -#define __NR_timerfd_gettime64 (__NR_SYSCALL_BASE + 410) -#define __NR_timerfd_settime64 (__NR_SYSCALL_BASE + 411) -#define __NR_utimensat_time64 (__NR_SYSCALL_BASE + 412) -#define __NR_pselect6_time64 (__NR_SYSCALL_BASE + 413) -#define __NR_ppoll_time64 (__NR_SYSCALL_BASE + 414) -#define __NR_io_pgetevents_time64 (__NR_SYSCALL_BASE + 416) -#define __NR_recvmmsg_time64 (__NR_SYSCALL_BASE + 417) -#define __NR_mq_timedsend_time64 (__NR_SYSCALL_BASE + 418) -#define __NR_mq_timedreceive_time64 (__NR_SYSCALL_BASE + 419) -#define __NR_semtimedop_time64 (__NR_SYSCALL_BASE + 420) -#define __NR_rt_sigtimedwait_time64 (__NR_SYSCALL_BASE + 421) -#define __NR_futex_time64 (__NR_SYSCALL_BASE + 422) -#define __NR_sched_rr_get_interval_time64 (__NR_SYSCALL_BASE + 423) -#define __NR_pidfd_send_signal (__NR_SYSCALL_BASE + 424) -#define __NR_io_uring_setup (__NR_SYSCALL_BASE + 425) -#define __NR_io_uring_enter (__NR_SYSCALL_BASE + 426) -#define __NR_io_uring_register (__NR_SYSCALL_BASE + 427) -#define __NR_open_tree (__NR_SYSCALL_BASE + 428) -#define __NR_move_mount (__NR_SYSCALL_BASE + 429) -#define __NR_fsopen (__NR_SYSCALL_BASE + 430) -#define __NR_fsconfig (__NR_SYSCALL_BASE + 431) -#define __NR_fsmount (__NR_SYSCALL_BASE + 432) -#define __NR_fspick (__NR_SYSCALL_BASE + 433) -#define __NR_pidfd_open (__NR_SYSCALL_BASE + 434) -#define __NR_clone3 (__NR_SYSCALL_BASE + 435) -#define __NR_openat2 (__NR_SYSCALL_BASE + 437) -#define __NR_pidfd_getfd (__NR_SYSCALL_BASE + 438) -#define __NR_faccessat2 (__NR_SYSCALL_BASE + 439) - -#endif /* _ASM_ARM_UNISTD_COMMON_H */ diff --git a/linux-headers/asm-arm/unistd-eabi.h b/linux-headers/asm-arm/unistd-eabi.h deleted file mode 100644 index 266f1fcdfb370..0000000000000 --- a/linux-headers/asm-arm/unistd-eabi.h +++ /dev/null @@ -1,5 +0,0 @@ -#ifndef _ASM_ARM_UNISTD_EABI_H -#define _ASM_ARM_UNISTD_EABI_H 1 - - -#endif /* _ASM_ARM_UNISTD_EABI_H */ diff --git a/linux-headers/asm-arm/unistd-oabi.h b/linux-headers/asm-arm/unistd-oabi.h deleted file mode 100644 index 47d9afb96d59d..0000000000000 --- a/linux-headers/asm-arm/unistd-oabi.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef _ASM_ARM_UNISTD_OABI_H -#define _ASM_ARM_UNISTD_OABI_H 1 - -#define __NR_time (__NR_SYSCALL_BASE + 13) -#define __NR_umount (__NR_SYSCALL_BASE + 22) -#define __NR_stime (__NR_SYSCALL_BASE + 25) -#define __NR_alarm (__NR_SYSCALL_BASE + 27) -#define __NR_utime (__NR_SYSCALL_BASE + 30) -#define __NR_getrlimit (__NR_SYSCALL_BASE + 76) -#define __NR_select (__NR_SYSCALL_BASE + 82) -#define __NR_readdir (__NR_SYSCALL_BASE + 89) -#define __NR_mmap (__NR_SYSCALL_BASE + 90) -#define __NR_socketcall (__NR_SYSCALL_BASE + 102) -#define __NR_syscall (__NR_SYSCALL_BASE + 113) -#define __NR_ipc (__NR_SYSCALL_BASE + 117) - -#endif /* _ASM_ARM_UNISTD_OABI_H */ diff --git a/linux-headers/asm-arm/unistd.h b/linux-headers/asm-arm/unistd.h deleted file mode 100644 index 18b0825885ead..0000000000000 --- a/linux-headers/asm-arm/unistd.h +++ /dev/null @@ -1,41 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * arch/arm/include/asm/unistd.h - * - * Copyright (C) 2001-2005 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Please forward _all_ changes to this file to rmk@arm.linux.org.uk, - * no matter what the change is. Thanks! - */ -#ifndef __ASM_ARM_UNISTD_H -#define __ASM_ARM_UNISTD_H - -#define __NR_OABI_SYSCALL_BASE 0x900000 - -#if defined(__thumb__) || defined(__ARM_EABI__) -#define __NR_SYSCALL_BASE 0 -#include -#else -#define __NR_SYSCALL_BASE __NR_OABI_SYSCALL_BASE -#include -#endif - -#include -#define __NR_sync_file_range2 __NR_arm_sync_file_range - -/* - * The following SWIs are ARM private. - */ -#define __ARM_NR_BASE (__NR_SYSCALL_BASE+0x0f0000) -#define __ARM_NR_breakpoint (__ARM_NR_BASE+1) -#define __ARM_NR_cacheflush (__ARM_NR_BASE+2) -#define __ARM_NR_usr26 (__ARM_NR_BASE+3) -#define __ARM_NR_usr32 (__ARM_NR_BASE+4) -#define __ARM_NR_set_tls (__ARM_NR_BASE+5) -#define __ARM_NR_get_tls (__ARM_NR_BASE+6) - -#endif /* __ASM_ARM_UNISTD_H */ diff --git a/linux-headers/asm-arm64/unistd_64.h b/linux-headers/asm-arm64/unistd_64.h index ee9aaebdf3252..4ae25c2b91685 100644 --- a/linux-headers/asm-arm64/unistd_64.h +++ b/linux-headers/asm-arm64/unistd_64.h @@ -324,6 +324,8 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-generic/unistd.h b/linux-headers/asm-generic/unistd.h index 2892a45023af6..04e0077fb4c97 100644 --- a/linux-headers/asm-generic/unistd.h +++ b/linux-headers/asm-generic/unistd.h @@ -852,8 +852,14 @@ __SYSCALL(__NR_removexattrat, sys_removexattrat) #define __NR_open_tree_attr 467 __SYSCALL(__NR_open_tree_attr, sys_open_tree_attr) +/* fs/inode.c */ +#define __NR_file_getattr 468 +__SYSCALL(__NR_file_getattr, sys_file_getattr) +#define __NR_file_setattr 469 +__SYSCALL(__NR_file_setattr, sys_file_setattr) + #undef __NR_syscalls -#define __NR_syscalls 468 +#define __NR_syscalls 470 /* * 32 bit systems traditionally used different diff --git a/linux-headers/asm-loongarch/unistd_64.h b/linux-headers/asm-loongarch/unistd_64.h index 50d22df8f7ce8..5033fc8f2fe9e 100644 --- a/linux-headers/asm-loongarch/unistd_64.h +++ b/linux-headers/asm-loongarch/unistd_64.h @@ -320,6 +320,8 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-mips/unistd_n32.h b/linux-headers/asm-mips/unistd_n32.h index bdcc2f460baba..c99c10e5bf45d 100644 --- a/linux-headers/asm-mips/unistd_n32.h +++ b/linux-headers/asm-mips/unistd_n32.h @@ -396,5 +396,7 @@ #define __NR_listxattrat (__NR_Linux + 465) #define __NR_removexattrat (__NR_Linux + 466) #define __NR_open_tree_attr (__NR_Linux + 467) +#define __NR_file_getattr (__NR_Linux + 468) +#define __NR_file_setattr (__NR_Linux + 469) #endif /* _ASM_UNISTD_N32_H */ diff --git a/linux-headers/asm-mips/unistd_n64.h b/linux-headers/asm-mips/unistd_n64.h index 3b6b0193b6905..0d975bb185605 100644 --- a/linux-headers/asm-mips/unistd_n64.h +++ b/linux-headers/asm-mips/unistd_n64.h @@ -372,5 +372,7 @@ #define __NR_listxattrat (__NR_Linux + 465) #define __NR_removexattrat (__NR_Linux + 466) #define __NR_open_tree_attr (__NR_Linux + 467) +#define __NR_file_getattr (__NR_Linux + 468) +#define __NR_file_setattr (__NR_Linux + 469) #endif /* _ASM_UNISTD_N64_H */ diff --git a/linux-headers/asm-mips/unistd_o32.h b/linux-headers/asm-mips/unistd_o32.h index 4609a4b4d38b4..86ac0ac84b39d 100644 --- a/linux-headers/asm-mips/unistd_o32.h +++ b/linux-headers/asm-mips/unistd_o32.h @@ -442,5 +442,7 @@ #define __NR_listxattrat (__NR_Linux + 465) #define __NR_removexattrat (__NR_Linux + 466) #define __NR_open_tree_attr (__NR_Linux + 467) +#define __NR_file_getattr (__NR_Linux + 468) +#define __NR_file_setattr (__NR_Linux + 469) #endif /* _ASM_UNISTD_O32_H */ diff --git a/linux-headers/asm-powerpc/kvm.h b/linux-headers/asm-powerpc/kvm.h index eaeda001784eb..077c5437f5219 100644 --- a/linux-headers/asm-powerpc/kvm.h +++ b/linux-headers/asm-powerpc/kvm.h @@ -1,18 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - * * Copyright IBM Corp. 2007 * * Authors: Hollis Blanchard diff --git a/linux-headers/asm-powerpc/unistd_32.h b/linux-headers/asm-powerpc/unistd_32.h index 5d38a427e0af7..d7a32c5e06903 100644 --- a/linux-headers/asm-powerpc/unistd_32.h +++ b/linux-headers/asm-powerpc/unistd_32.h @@ -449,6 +449,8 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-powerpc/unistd_64.h b/linux-headers/asm-powerpc/unistd_64.h index 860a488e4d1a1..ff35c51fc6566 100644 --- a/linux-headers/asm-powerpc/unistd_64.h +++ b/linux-headers/asm-powerpc/unistd_64.h @@ -421,6 +421,8 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-riscv/kvm.h b/linux-headers/asm-riscv/kvm.h index 5f59fd226cc57..ef27d4289da11 100644 --- a/linux-headers/asm-riscv/kvm.h +++ b/linux-headers/asm-riscv/kvm.h @@ -18,6 +18,7 @@ #define __KVM_HAVE_IRQ_LINE #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 #define KVM_INTERRUPT_SET -1U #define KVM_INTERRUPT_UNSET -2U diff --git a/linux-headers/asm-riscv/unistd_32.h b/linux-headers/asm-riscv/unistd_32.h index a5e769f1d9967..6083373e884ca 100644 --- a/linux-headers/asm-riscv/unistd_32.h +++ b/linux-headers/asm-riscv/unistd_32.h @@ -315,6 +315,8 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-riscv/unistd_64.h b/linux-headers/asm-riscv/unistd_64.h index 8df4d64841c2a..f0c7585c60edf 100644 --- a/linux-headers/asm-riscv/unistd_64.h +++ b/linux-headers/asm-riscv/unistd_64.h @@ -325,6 +325,8 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-s390/unistd_32.h b/linux-headers/asm-s390/unistd_32.h index 85eedbd18e31f..37b8f6f3585df 100644 --- a/linux-headers/asm-s390/unistd_32.h +++ b/linux-headers/asm-s390/unistd_32.h @@ -440,5 +440,7 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_S390_UNISTD_32_H */ diff --git a/linux-headers/asm-s390/unistd_64.h b/linux-headers/asm-s390/unistd_64.h index c03b1b9701024..0652ba63318b0 100644 --- a/linux-headers/asm-s390/unistd_64.h +++ b/linux-headers/asm-s390/unistd_64.h @@ -388,5 +388,7 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_S390_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/unistd_32.h b/linux-headers/asm-x86/unistd_32.h index 491d6b4eb68ee..8f784a56347d8 100644 --- a/linux-headers/asm-x86/unistd_32.h +++ b/linux-headers/asm-x86/unistd_32.h @@ -458,6 +458,8 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_UNISTD_32_H */ diff --git a/linux-headers/asm-x86/unistd_64.h b/linux-headers/asm-x86/unistd_64.h index 7cf88bf9bdac6..2f55bebb81cca 100644 --- a/linux-headers/asm-x86/unistd_64.h +++ b/linux-headers/asm-x86/unistd_64.h @@ -381,6 +381,8 @@ #define __NR_listxattrat 465 #define __NR_removexattrat 466 #define __NR_open_tree_attr 467 +#define __NR_file_getattr 468 +#define __NR_file_setattr 469 #endif /* _ASM_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/unistd_x32.h b/linux-headers/asm-x86/unistd_x32.h index 82959111e6a5e..8cc8673f15471 100644 --- a/linux-headers/asm-x86/unistd_x32.h +++ b/linux-headers/asm-x86/unistd_x32.h @@ -334,6 +334,8 @@ #define __NR_listxattrat (__X32_SYSCALL_BIT + 465) #define __NR_removexattrat (__X32_SYSCALL_BIT + 466) #define __NR_open_tree_attr (__X32_SYSCALL_BIT + 467) +#define __NR_file_getattr (__X32_SYSCALL_BIT + 468) +#define __NR_file_setattr (__X32_SYSCALL_BIT + 469) #define __NR_rt_sigaction (__X32_SYSCALL_BIT + 512) #define __NR_rt_sigreturn (__X32_SYSCALL_BIT + 513) #define __NR_ioctl (__X32_SYSCALL_BIT + 514) diff --git a/linux-headers/linux/iommufd.h b/linux-headers/linux/iommufd.h index cb0f7d6b4dec9..2105a039551e9 100644 --- a/linux-headers/linux/iommufd.h +++ b/linux-headers/linux/iommufd.h @@ -56,6 +56,7 @@ enum { IOMMUFD_CMD_VDEVICE_ALLOC = 0x91, IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92, IOMMUFD_CMD_VEVENTQ_ALLOC = 0x93, + IOMMUFD_CMD_HW_QUEUE_ALLOC = 0x94, }; /** @@ -590,17 +591,44 @@ struct iommu_hw_info_arm_smmuv3 { __u32 aidr; }; +/** + * struct iommu_hw_info_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Hardware + * Information (IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV) + * + * @flags: Must be 0 + * @version: Version number for the CMDQ-V HW for PARAM bits[03:00] + * @log2vcmdqs: Log2 of the total number of VCMDQs for PARAM bits[07:04] + * @log2vsids: Log2 of the total number of SID replacements for PARAM bits[15:12] + * @__reserved: Must be 0 + * + * VMM can use these fields directly in its emulated global PARAM register. Note + * that only one Virtual Interface (VINTF) should be exposed to a VM, i.e. PARAM + * bits[11:08] should be set to 0 for log2 of the total number of VINTFs. + */ +struct iommu_hw_info_tegra241_cmdqv { + __u32 flags; + __u8 version; + __u8 log2vcmdqs; + __u8 log2vsids; + __u8 __reserved; +}; + /** * enum iommu_hw_info_type - IOMMU Hardware Info Types - * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware + * @IOMMU_HW_INFO_TYPE_NONE: Output by the drivers that do not report hardware * info + * @IOMMU_HW_INFO_TYPE_DEFAULT: Input to request for a default type * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type * @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type + * @IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM + * SMMUv3) info type */ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_NONE = 0, + IOMMU_HW_INFO_TYPE_DEFAULT = 0, IOMMU_HW_INFO_TYPE_INTEL_VTD = 1, IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2, + IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV = 3, }; /** @@ -625,6 +653,15 @@ enum iommufd_hw_capabilities { IOMMU_HW_CAP_PCI_PASID_PRIV = 1 << 2, }; +/** + * enum iommufd_hw_info_flags - Flags for iommu_hw_info + * @IOMMU_HW_INFO_FLAG_INPUT_TYPE: If set, @in_data_type carries an input type + * for user space to request for a specific info + */ +enum iommufd_hw_info_flags { + IOMMU_HW_INFO_FLAG_INPUT_TYPE = 1 << 0, +}; + /** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) @@ -634,6 +671,12 @@ enum iommufd_hw_capabilities { * data that kernel supports * @data_uptr: User pointer to a user-space buffer used by the kernel to fill * the iommu type specific hardware information data + * @in_data_type: This shares the same field with @out_data_type, making it be + * a bidirectional field. When IOMMU_HW_INFO_FLAG_INPUT_TYPE is + * set, an input type carried via this @in_data_type field will + * be valid, requesting for the info data to the given type. If + * IOMMU_HW_INFO_FLAG_INPUT_TYPE is unset, any input value will + * be seen as IOMMU_HW_INFO_TYPE_DEFAULT * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. * @out_capabilities: Output the generic iommu capability info type as defined @@ -663,7 +706,10 @@ struct iommu_hw_info { __u32 dev_id; __u32 data_len; __aligned_u64 data_uptr; - __u32 out_data_type; + union { + __u32 in_data_type; + __u32 out_data_type; + }; __u8 out_max_pasid_log2; __u8 __reserved[3]; __aligned_u64 out_capabilities; @@ -951,10 +997,29 @@ struct iommu_fault_alloc { * enum iommu_viommu_type - Virtual IOMMU Type * @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use * @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type + * @IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM + * SMMUv3) enabled ARM SMMUv3 type */ enum iommu_viommu_type { IOMMU_VIOMMU_TYPE_DEFAULT = 0, IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1, + IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV = 2, +}; + +/** + * struct iommu_viommu_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Virtual Interface + * (IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV) + * @out_vintf_mmap_offset: mmap offset argument for VINTF's page0 + * @out_vintf_mmap_length: mmap length argument for VINTF's page0 + * + * Both @out_vintf_mmap_offset and @out_vintf_mmap_length are reported by kernel + * for user space to mmap the VINTF page0 from the host physical address space + * to the guest physical address space so that a guest kernel can directly R/W + * access to the VINTF page0 in order to control its virtual command queues. + */ +struct iommu_viommu_tegra241_cmdqv { + __aligned_u64 out_vintf_mmap_offset; + __aligned_u64 out_vintf_mmap_length; }; /** @@ -965,6 +1030,9 @@ enum iommu_viommu_type { * @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU * @hwpt_id: ID of a nesting parent HWPT to associate to * @out_viommu_id: Output virtual IOMMU ID for the allocated object + * @data_len: Length of the type specific data + * @__reserved: Must be 0 + * @data_uptr: User pointer to a driver-specific virtual IOMMU data * * Allocate a virtual IOMMU object, representing the underlying physical IOMMU's * virtualization support that is a security-isolated slice of the real IOMMU HW @@ -985,6 +1053,9 @@ struct iommu_viommu_alloc { __u32 dev_id; __u32 hwpt_id; __u32 out_viommu_id; + __u32 data_len; + __u32 __reserved; + __aligned_u64 data_uptr; }; #define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC) @@ -995,10 +1066,15 @@ struct iommu_viommu_alloc { * @dev_id: The physical device to allocate a virtual instance on the vIOMMU * @out_vdevice_id: Object handle for the vDevice. Pass to IOMMU_DESTORY * @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID - * of AMD IOMMU, and vRID of a nested Intel VT-d to a Context Table + * of AMD IOMMU, and vRID of Intel VT-d * * Allocate a virtual device instance (for a physical device) against a vIOMMU. * This instance holds the device's information (related to its vIOMMU) in a VM. + * User should use IOMMU_DESTROY to destroy the virtual device before + * destroying the physical device (by closing vfio_cdev fd). Otherwise the + * virtual device would be forcibly destroyed on physical device destruction, + * its vdevice_id would be permanently leaked (unremovable & unreusable) until + * iommu fd closed. */ struct iommu_vdevice_alloc { __u32 size; @@ -1075,10 +1151,12 @@ struct iommufd_vevent_header { * enum iommu_veventq_type - Virtual Event Queue Type * @IOMMU_VEVENTQ_TYPE_DEFAULT: Reserved for future use * @IOMMU_VEVENTQ_TYPE_ARM_SMMUV3: ARM SMMUv3 Virtual Event Queue + * @IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV Extension IRQ */ enum iommu_veventq_type { IOMMU_VEVENTQ_TYPE_DEFAULT = 0, IOMMU_VEVENTQ_TYPE_ARM_SMMUV3 = 1, + IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV = 2, }; /** @@ -1102,6 +1180,19 @@ struct iommu_vevent_arm_smmuv3 { __aligned_le64 evt[4]; }; +/** + * struct iommu_vevent_tegra241_cmdqv - Tegra241 CMDQV IRQ + * (IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV) + * @lvcmdq_err_map: 128-bit logical vcmdq error map, little-endian. + * (Refer to register LVCMDQ_ERR_MAPs per VINTF ) + * + * The 128-bit register value from HW exclusively reflect the error bits for a + * Virtual Interface represented by a vIOMMU object. Read and report directly. + */ +struct iommu_vevent_tegra241_cmdqv { + __aligned_le64 lvcmdq_err_map[2]; +}; + /** * struct iommu_veventq_alloc - ioctl(IOMMU_VEVENTQ_ALLOC) * @size: sizeof(struct iommu_veventq_alloc) @@ -1141,4 +1232,61 @@ struct iommu_veventq_alloc { __u32 __reserved; }; #define IOMMU_VEVENTQ_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VEVENTQ_ALLOC) + +/** + * enum iommu_hw_queue_type - HW Queue Type + * @IOMMU_HW_QUEUE_TYPE_DEFAULT: Reserved for future use + * @IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM + * SMMUv3) Virtual Command Queue (VCMDQ) + */ +enum iommu_hw_queue_type { + IOMMU_HW_QUEUE_TYPE_DEFAULT = 0, + /* + * TEGRA241_CMDQV requirements (otherwise, allocation will fail) + * - alloc starts from the lowest @index=0 in ascending order + * - destroy starts from the last allocated @index in descending order + * - @base_addr must be aligned to @length in bytes and mapped in IOAS + * - @length must be a power of 2, with a minimum 32 bytes and a maximum + * 2 ^ idr[1].CMDQS * 16 bytes (use GET_HW_INFO call to read idr[1] + * from struct iommu_hw_info_arm_smmuv3) + * - suggest to back the queue memory with contiguous physical pages or + * a single huge page with alignment of the queue size, and limit the + * emulated vSMMU's IDR1.CMDQS to log2(huge page size / 16 bytes) + */ + IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV = 1, +}; + +/** + * struct iommu_hw_queue_alloc - ioctl(IOMMU_HW_QUEUE_ALLOC) + * @size: sizeof(struct iommu_hw_queue_alloc) + * @flags: Must be 0 + * @viommu_id: Virtual IOMMU ID to associate the HW queue with + * @type: One of enum iommu_hw_queue_type + * @index: The logical index to the HW queue per virtual IOMMU for a multi-queue + * model + * @out_hw_queue_id: The ID of the new HW queue + * @nesting_parent_iova: Base address of the queue memory in the guest physical + * address space + * @length: Length of the queue memory + * + * Allocate a HW queue object for a vIOMMU-specific HW-accelerated queue, which + * allows HW to access a guest queue memory described using @nesting_parent_iova + * and @length. + * + * A vIOMMU can allocate multiple queues, but it must use a different @index per + * type to separate each allocation, e.g:: + * + * Type1 HW queue0, Type1 HW queue1, Type2 HW queue0, ... + */ +struct iommu_hw_queue_alloc { + __u32 size; + __u32 flags; + __u32 viommu_id; + __u32 type; + __u32 index; + __u32 out_hw_queue_id; + __aligned_u64 nesting_parent_iova; + __aligned_u64 length; +}; +#define IOMMU_HW_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HW_QUEUE_ALLOC) #endif diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index 32c5885a3c200..be704965d8651 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -636,6 +636,7 @@ struct kvm_ioeventfd { #define KVM_X86_DISABLE_EXITS_HLT (1 << 1) #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2) #define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3) +#define KVM_X86_DISABLE_EXITS_APERFMPERF (1 << 4) /* for KVM_ENABLE_CAP */ struct kvm_enable_cap { @@ -952,6 +953,7 @@ struct kvm_enable_cap { #define KVM_CAP_ARM_EL2 240 #define KVM_CAP_ARM_EL2_E2H0 241 #define KVM_CAP_RISCV_MP_STATE_RESET 242 +#define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243 struct kvm_irq_routing_irqchip { __u32 irqchip; diff --git a/linux-headers/linux/mshv.h b/linux-headers/linux/mshv.h new file mode 100644 index 0000000000000..5bc83db6a3254 --- /dev/null +++ b/linux-headers/linux/mshv.h @@ -0,0 +1,291 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Userspace interfaces for /dev/mshv* devices and derived fds + * + * This file is divided into sections containing data structures and IOCTLs for + * a particular set of related devices or derived file descriptors. + * + * The IOCTL definitions are at the end of each section. They are grouped by + * device/fd, so that new IOCTLs can easily be added with a monotonically + * increasing number. + */ +#ifndef _LINUX_MSHV_H +#define _LINUX_MSHV_H + +#include + +#define MSHV_IOCTL 0xB8 + +/* + ******************************************* + * Entry point to main VMM APIs: /dev/mshv * + ******************************************* + */ + +enum { + MSHV_PT_BIT_LAPIC, + MSHV_PT_BIT_X2APIC, + MSHV_PT_BIT_GPA_SUPER_PAGES, + MSHV_PT_BIT_COUNT, +}; + +#define MSHV_PT_FLAGS_MASK ((1 << MSHV_PT_BIT_COUNT) - 1) + +enum { + MSHV_PT_ISOLATION_NONE, + MSHV_PT_ISOLATION_COUNT, +}; + +/** + * struct mshv_create_partition - arguments for MSHV_CREATE_PARTITION + * @pt_flags: Bitmask of 1 << MSHV_PT_BIT_* + * @pt_isolation: MSHV_PT_ISOLATION_* + * + * Returns a file descriptor to act as a handle to a guest partition. + * At this point the partition is not yet initialized in the hypervisor. + * Some operations must be done with the partition in this state, e.g. setting + * so-called "early" partition properties. The partition can then be + * initialized with MSHV_INITIALIZE_PARTITION. + */ +struct mshv_create_partition { + __u64 pt_flags; + __u64 pt_isolation; +}; + +/* /dev/mshv */ +#define MSHV_CREATE_PARTITION _IOW(MSHV_IOCTL, 0x00, struct mshv_create_partition) + +/* + ************************ + * Child partition APIs * + ************************ + */ + +struct mshv_create_vp { + __u32 vp_index; +}; + +enum { + MSHV_SET_MEM_BIT_WRITABLE, + MSHV_SET_MEM_BIT_EXECUTABLE, + MSHV_SET_MEM_BIT_UNMAP, + MSHV_SET_MEM_BIT_COUNT +}; + +#define MSHV_SET_MEM_FLAGS_MASK ((1 << MSHV_SET_MEM_BIT_COUNT) - 1) + +/* The hypervisor's "native" page size */ +#define MSHV_HV_PAGE_SIZE 0x1000 + +/** + * struct mshv_user_mem_region - arguments for MSHV_SET_GUEST_MEMORY + * @size: Size of the memory region (bytes). Must be aligned to + * MSHV_HV_PAGE_SIZE + * @guest_pfn: Base guest page number to map + * @userspace_addr: Base address of userspace memory. Must be aligned to + * MSHV_HV_PAGE_SIZE + * @flags: Bitmask of 1 << MSHV_SET_MEM_BIT_*. If (1 << MSHV_SET_MEM_BIT_UNMAP) + * is set, ignore other bits. + * @rsvd: MBZ + * + * Map or unmap a region of userspace memory to Guest Physical Addresses (GPA). + * Mappings can't overlap in GPA space or userspace. + * To unmap, these fields must match an existing mapping. + */ +struct mshv_user_mem_region { + __u64 size; + __u64 guest_pfn; + __u64 userspace_addr; + __u8 flags; + __u8 rsvd[7]; +}; + +enum { + MSHV_IRQFD_BIT_DEASSIGN, + MSHV_IRQFD_BIT_RESAMPLE, + MSHV_IRQFD_BIT_COUNT, +}; + +#define MSHV_IRQFD_FLAGS_MASK ((1 << MSHV_IRQFD_BIT_COUNT) - 1) + +struct mshv_user_irqfd { + __s32 fd; + __s32 resamplefd; + __u32 gsi; + __u32 flags; +}; + +enum { + MSHV_IOEVENTFD_BIT_DATAMATCH, + MSHV_IOEVENTFD_BIT_PIO, + MSHV_IOEVENTFD_BIT_DEASSIGN, + MSHV_IOEVENTFD_BIT_COUNT, +}; + +#define MSHV_IOEVENTFD_FLAGS_MASK ((1 << MSHV_IOEVENTFD_BIT_COUNT) - 1) + +struct mshv_user_ioeventfd { + __u64 datamatch; + __u64 addr; /* legal pio/mmio address */ + __u32 len; /* 1, 2, 4, or 8 bytes */ + __s32 fd; + __u32 flags; + __u8 rsvd[4]; +}; + +struct mshv_user_irq_entry { + __u32 gsi; + __u32 address_lo; + __u32 address_hi; + __u32 data; +}; + +struct mshv_user_irq_table { + __u32 nr; + __u32 rsvd; /* MBZ */ + struct mshv_user_irq_entry entries[]; +}; + +enum { + MSHV_GPAP_ACCESS_TYPE_ACCESSED, + MSHV_GPAP_ACCESS_TYPE_DIRTY, + MSHV_GPAP_ACCESS_TYPE_COUNT /* Count of enum members */ +}; + +enum { + MSHV_GPAP_ACCESS_OP_NOOP, + MSHV_GPAP_ACCESS_OP_CLEAR, + MSHV_GPAP_ACCESS_OP_SET, + MSHV_GPAP_ACCESS_OP_COUNT /* Count of enum members */ +}; + +/** + * struct mshv_gpap_access_bitmap - arguments for MSHV_GET_GPAP_ACCESS_BITMAP + * @access_type: MSHV_GPAP_ACCESS_TYPE_* - The type of access to record in the + * bitmap + * @access_op: MSHV_GPAP_ACCESS_OP_* - Allows an optional clear or set of all + * the access states in the range, after retrieving the current + * states. + * @rsvd: MBZ + * @page_count: Number of pages + * @gpap_base: Base gpa page number + * @bitmap_ptr: Output buffer for bitmap, at least (page_count + 7) / 8 bytes + * + * Retrieve a bitmap of either ACCESSED or DIRTY bits for a given range of guest + * memory, and optionally clear or set the bits. + */ +struct mshv_gpap_access_bitmap { + __u8 access_type; + __u8 access_op; + __u8 rsvd[6]; + __u64 page_count; + __u64 gpap_base; + __u64 bitmap_ptr; +}; + +/** + * struct mshv_root_hvcall - arguments for MSHV_ROOT_HVCALL + * @code: Hypercall code (HVCALL_*) + * @reps: in: Rep count ('repcount') + * out: Reps completed ('repcomp'). MBZ unless rep hvcall + * @in_sz: Size of input incl rep data. <= MSHV_HV_PAGE_SIZE + * @out_sz: Size of output buffer. <= MSHV_HV_PAGE_SIZE. MBZ if out_ptr is 0 + * @status: in: MBZ + * out: HV_STATUS_* from hypercall + * @rsvd: MBZ + * @in_ptr: Input data buffer (struct hv_input_*). If used with partition or + * vp fd, partition id field is populated by kernel. + * @out_ptr: Output data buffer (optional) + */ +struct mshv_root_hvcall { + __u16 code; + __u16 reps; + __u16 in_sz; + __u16 out_sz; + __u16 status; + __u8 rsvd[6]; + __u64 in_ptr; + __u64 out_ptr; +}; + +/* Partition fds created with MSHV_CREATE_PARTITION */ +#define MSHV_INITIALIZE_PARTITION _IO(MSHV_IOCTL, 0x00) +#define MSHV_CREATE_VP _IOW(MSHV_IOCTL, 0x01, struct mshv_create_vp) +#define MSHV_SET_GUEST_MEMORY _IOW(MSHV_IOCTL, 0x02, struct mshv_user_mem_region) +#define MSHV_IRQFD _IOW(MSHV_IOCTL, 0x03, struct mshv_user_irqfd) +#define MSHV_IOEVENTFD _IOW(MSHV_IOCTL, 0x04, struct mshv_user_ioeventfd) +#define MSHV_SET_MSI_ROUTING _IOW(MSHV_IOCTL, 0x05, struct mshv_user_irq_table) +#define MSHV_GET_GPAP_ACCESS_BITMAP _IOWR(MSHV_IOCTL, 0x06, struct mshv_gpap_access_bitmap) +/* Generic hypercall */ +#define MSHV_ROOT_HVCALL _IOWR(MSHV_IOCTL, 0x07, struct mshv_root_hvcall) + +/* + ******************************** + * VP APIs for child partitions * + ******************************** + */ + +#define MSHV_RUN_VP_BUF_SZ 256 + +/* + * VP state pages may be mapped to userspace via mmap(). + * To specify which state page, use MSHV_VP_MMAP_OFFSET_ values multiplied by + * the system page size. + * e.g. + * long page_size = sysconf(_SC_PAGE_SIZE); + * void *reg_page = mmap(NULL, MSHV_HV_PAGE_SIZE, PROT_READ|PROT_WRITE, + * MAP_SHARED, vp_fd, + * MSHV_VP_MMAP_OFFSET_REGISTERS * page_size); + */ +enum { + MSHV_VP_MMAP_OFFSET_REGISTERS, + MSHV_VP_MMAP_OFFSET_INTERCEPT_MESSAGE, + MSHV_VP_MMAP_OFFSET_GHCB, + MSHV_VP_MMAP_OFFSET_COUNT +}; + +/** + * struct mshv_run_vp - argument for MSHV_RUN_VP + * @msg_buf: On success, the intercept message is copied here. It can be + * interpreted using the relevant hypervisor definitions. + */ +struct mshv_run_vp { + __u8 msg_buf[MSHV_RUN_VP_BUF_SZ]; +}; + +enum { + MSHV_VP_STATE_LAPIC, /* Local interrupt controller state (either arch) */ + MSHV_VP_STATE_XSAVE, /* XSAVE data in compacted form (x86_64) */ + MSHV_VP_STATE_SIMP, + MSHV_VP_STATE_SIEFP, + MSHV_VP_STATE_SYNTHETIC_TIMERS, + MSHV_VP_STATE_COUNT, +}; + +/** + * struct mshv_get_set_vp_state - arguments for MSHV_[GET,SET]_VP_STATE + * @type: MSHV_VP_STATE_* + * @rsvd: MBZ + * @buf_sz: in: 4k page-aligned size of buffer + * out: Actual size of data (on EINVAL, check this to see if buffer + * was too small) + * @buf_ptr: 4k page-aligned data buffer + */ +struct mshv_get_set_vp_state { + __u8 type; + __u8 rsvd[3]; + __u32 buf_sz; + __u64 buf_ptr; +}; + +/* VP fds created with MSHV_CREATE_VP */ +#define MSHV_RUN_VP _IOR(MSHV_IOCTL, 0x00, struct mshv_run_vp) +#define MSHV_GET_VP_STATE _IOWR(MSHV_IOCTL, 0x01, struct mshv_get_set_vp_state) +#define MSHV_SET_VP_STATE _IOWR(MSHV_IOCTL, 0x02, struct mshv_get_set_vp_state) +/* + * Generic hypercall + * Defined above in partition IOCTLs, avoid redefining it here + * #define MSHV_ROOT_HVCALL _IOWR(MSHV_IOCTL, 0x07, struct mshv_root_hvcall) + */ + +#endif diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index 79bf8c0cc5e40..4d96d1fc12faf 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -905,10 +905,12 @@ struct vfio_device_feature { * VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18, * struct vfio_device_bind_iommufd) * @argsz: User filled size of this data. - * @flags: Must be 0. + * @flags: Must be 0 or a bit flags of VFIO_DEVICE_BIND_* * @iommufd: iommufd to bind. * @out_devid: The device id generated by this bind. devid is a handle for * this device/iommufd bond and can be used in IOMMUFD commands. + * @token_uuid_ptr: Valid if VFIO_DEVICE_BIND_FLAG_TOKEN. Points to a 16 byte + * UUID in the same format as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN. * * Bind a vfio_device to the specified iommufd. * @@ -917,13 +919,21 @@ struct vfio_device_feature { * * Unbind is automatically conducted when device fd is closed. * + * A token is sometimes required to open the device, unless this is known to be + * needed VFIO_DEVICE_BIND_FLAG_TOKEN should not be set and token_uuid_ptr is + * ignored. The only case today is a PF/VF relationship where the VF bind must + * be provided the same token as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN provided to + * the PF. + * * Return: 0 on success, -errno on failure. */ struct vfio_device_bind_iommufd { __u32 argsz; __u32 flags; +#define VFIO_DEVICE_BIND_FLAG_TOKEN (1 << 0) __s32 iommufd; __u32 out_devid; + __aligned_u64 token_uuid_ptr; }; #define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18) diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h index d4b3e2ae1314d..283348b64af9a 100644 --- a/linux-headers/linux/vhost.h +++ b/linux-headers/linux/vhost.h @@ -235,4 +235,39 @@ */ #define VHOST_VDPA_GET_VRING_SIZE _IOWR(VHOST_VIRTIO, 0x82, \ struct vhost_vring_state) + +/* Extended features manipulation */ +#define VHOST_GET_FEATURES_ARRAY _IOR(VHOST_VIRTIO, 0x83, \ + struct vhost_features_array) +#define VHOST_SET_FEATURES_ARRAY _IOW(VHOST_VIRTIO, 0x83, \ + struct vhost_features_array) + +/* fork_owner values for vhost */ +#define VHOST_FORK_OWNER_KTHREAD 0 +#define VHOST_FORK_OWNER_TASK 1 + +/** + * VHOST_SET_FORK_FROM_OWNER - Set the fork_owner flag for the vhost device, + * This ioctl must called before VHOST_SET_OWNER. + * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y + * + * @param fork_owner: An 8-bit value that determines the vhost thread mode + * + * When fork_owner is set to VHOST_FORK_OWNER_TASK(default value): + * - Vhost will create vhost worker as tasks forked from the owner, + * inheriting all of the owner's attributes. + * + * When fork_owner is set to VHOST_FORK_OWNER_KTHREAD: + * - Vhost will create vhost workers as kernel threads. + */ +#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x83, __u8) + +/** + * VHOST_GET_FORK_OWNER - Get the current fork_owner flag for the vhost device. + * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y + * + * @return: An 8-bit value indicating the current thread mode. + */ +#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x84, __u8) + #endif diff --git a/linux-user/aarch64/cpu_loop.c b/linux-user/aarch64/cpu_loop.c index fea43cefa6bc9..7f66a879ea98d 100644 --- a/linux-user/aarch64/cpu_loop.c +++ b/linux-user/aarch64/cpu_loop.c @@ -27,18 +27,144 @@ #include "target/arm/syndrome.h" #include "target/arm/cpu-features.h" +/* Use the exception syndrome to map a cpu exception to a signal. */ +static void signal_for_exception(CPUARMState *env, vaddr addr) +{ + uint32_t syn = env->exception.syndrome; + int si_code, si_signo; + + /* Let signal delivery see that ESR is live. */ + env->cp15.esr_el[1] = syn; + + switch (syn_get_ec(syn)) { + case EC_DATAABORT: + case EC_INSNABORT: + /* Both EC have the same format for FSC, or close enough. */ + switch (extract32(syn, 0, 6)) { + case 0x04 ... 0x07: /* Translation fault, level {0-3} */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_MAPERR; + break; + case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */ + case 0x0d ... 0x0f: /* Permission fault, level {1-3} */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_ACCERR; + break; + case 0x11: /* Synchronous Tag Check Fault */ + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_MTESERR; + break; + case 0x21: /* Alignment fault */ + si_signo = TARGET_SIGBUS; + si_code = TARGET_BUS_ADRALN; + break; + default: + g_assert_not_reached(); + } + break; + + case EC_PCALIGNMENT: + si_signo = TARGET_SIGBUS; + si_code = TARGET_BUS_ADRALN; + break; + + case EC_UNCATEGORIZED: /* E.g. undefined instruction */ + case EC_SYSTEMREGISTERTRAP: /* E.g. inaccessible register */ + case EC_SMETRAP: /* E.g. invalid insn in streaming state */ + case EC_BTITRAP: /* E.g. invalid guarded branch target */ + case EC_ILLEGALSTATE: + /* + * Illegal state happens via an ERET from a privileged mode, + * so is not normally possible from user-only. However, gdbstub + * is not prevented from writing CPSR_IL, aka PSTATE.IL, which + * would generate a trap from the next translated block. + * In the kernel, default case -> el0_inv -> bad_el0_sync. + */ + si_signo = TARGET_SIGILL; + si_code = TARGET_ILL_ILLOPC; + break; + + case EC_PACFAIL: + si_signo = TARGET_SIGILL; + si_code = TARGET_ILL_ILLOPN; + break; + + case EC_GCS: + si_signo = TARGET_SIGSEGV; + si_code = TARGET_SEGV_CPERR; + break; + + case EC_MOP: + /* + * FIXME: The kernel fixes up wrong-option exceptions. + * For QEMU linux-user mode, you can only get these if + * the process is doing something silly (not executing + * the MOPS instructions in the required P/M/E sequence), + * so it is not a problem in practice that we do not. + * + * We ought ideally to implement the same "rewind to the + * start of the sequence" logic that the kernel does in + * arm64_mops_reset_regs(). In the meantime, deliver + * the guest a SIGILL, with the same ILLOPN si_code + * we've always used for this. + */ + si_signo = TARGET_SIGILL; + si_code = TARGET_ILL_ILLOPN; + break; + + case EC_WFX_TRAP: /* user-only WFI implemented as NOP */ + case EC_CP15RTTRAP: /* AArch32 */ + case EC_CP15RRTTRAP: /* AArch32 */ + case EC_CP14RTTRAP: /* AArch32 */ + case EC_CP14DTTRAP: /* AArch32 */ + case EC_ADVSIMDFPACCESSTRAP: /* user-only does not disable fpu */ + case EC_FPIDTRAP: /* AArch32 */ + case EC_PACTRAP: /* user-only does not disable pac regs */ + case EC_BXJTRAP: /* AArch32 */ + case EC_CP14RRTTRAP: /* AArch32 */ + case EC_AA32_SVC: /* AArch32 */ + case EC_AA32_HVC: /* AArch32 */ + case EC_AA32_SMC: /* AArch32 */ + case EC_AA64_SVC: /* generates EXCP_SWI */ + case EC_AA64_HVC: /* user-only generates EC_UNCATEGORIZED */ + case EC_AA64_SMC: /* user-only generates EC_UNCATEGORIZED */ + case EC_SVEACCESSTRAP: /* user-only does not disable sve */ + case EC_ERETTRAP: /* user-only generates EC_UNCATEGORIZED */ + case EC_GPC: /* user-only has no EL3 gpc tables */ + case EC_INSNABORT_SAME_EL: /* el0 cannot trap to el0 */ + case EC_DATAABORT_SAME_EL: /* el0 cannot trap to el0 */ + case EC_SPALIGNMENT: /* sp alignment checks not implemented */ + case EC_AA32_FPTRAP: /* fp exceptions not implemented */ + case EC_AA64_FPTRAP: /* fp exceptions not implemented */ + case EC_SERROR: /* user-only does not have hw faults */ + case EC_BREAKPOINT: /* user-only does not have hw debug */ + case EC_BREAKPOINT_SAME_EL: /* user-only does not have hw debug */ + case EC_SOFTWARESTEP: /* user-only does not have hw debug */ + case EC_SOFTWARESTEP_SAME_EL: /* user-only does not have hw debug */ + case EC_WATCHPOINT: /* user-only does not have hw debug */ + case EC_WATCHPOINT_SAME_EL: /* user-only does not have hw debug */ + case EC_AA32_BKPT: /* AArch32 */ + case EC_VECTORCATCH: /* AArch32 */ + case EC_AA64_BKPT: /* generates EXCP_BKPT */ + default: + g_assert_not_reached(); + } + + force_sig_fault(si_signo, si_code, addr); +} + /* AArch64 main loop */ void cpu_loop(CPUARMState *env) { CPUState *cs = env_cpu(env); - int trapnr, ec, fsc, si_code, si_signo; + int trapnr; abi_long ret; for (;;) { cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_SWI: @@ -63,46 +189,11 @@ void cpu_loop(CPUARMState *env) /* just indicate that signals should be handled asap */ break; case EXCP_UDEF: - force_sig_fault(TARGET_SIGILL, TARGET_ILL_ILLOPN, env->pc); + signal_for_exception(env, env->pc); break; case EXCP_PREFETCH_ABORT: case EXCP_DATA_ABORT: - ec = syn_get_ec(env->exception.syndrome); - switch (ec) { - case EC_DATAABORT: - case EC_INSNABORT: - /* Both EC have the same format for FSC, or close enough. */ - fsc = extract32(env->exception.syndrome, 0, 6); - switch (fsc) { - case 0x04 ... 0x07: /* Translation fault, level {0-3} */ - si_signo = TARGET_SIGSEGV; - si_code = TARGET_SEGV_MAPERR; - break; - case 0x09 ... 0x0b: /* Access flag fault, level {1-3} */ - case 0x0d ... 0x0f: /* Permission fault, level {1-3} */ - si_signo = TARGET_SIGSEGV; - si_code = TARGET_SEGV_ACCERR; - break; - case 0x11: /* Synchronous Tag Check Fault */ - si_signo = TARGET_SIGSEGV; - si_code = TARGET_SEGV_MTESERR; - break; - case 0x21: /* Alignment fault */ - si_signo = TARGET_SIGBUS; - si_code = TARGET_BUS_ADRALN; - break; - default: - g_assert_not_reached(); - } - break; - case EC_PCALIGNMENT: - si_signo = TARGET_SIGBUS; - si_code = TARGET_BUS_ADRALN; - break; - default: - g_assert_not_reached(); - } - force_sig_fault(si_signo, si_code, env->exception.vaddress); + signal_for_exception(env, env->exception.vaddress); break; case EXCP_DEBUG: case EXCP_BKPT: @@ -137,13 +228,10 @@ void cpu_loop(CPUARMState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { + CPUARMState *env = cpu_env(cs); ARMCPU *cpu = env_archcpu(env); - CPUState *cs = env_cpu(env); - TaskState *ts = get_task_state(cs); - struct image_info *info = ts->info; - int i; if (!(arm_feature(env, ARM_FEATURE_AARCH64))) { fprintf(stderr, @@ -151,14 +239,12 @@ void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) exit(EXIT_FAILURE); } - for (i = 0; i < 31; i++) { - env->xregs[i] = regs->regs[i]; - } - env->pc = regs->pc; - env->xregs[31] = regs->sp; + env->pc = info->entry & ~0x3ULL; + env->xregs[31] = info->start_stack; + #if TARGET_BIG_ENDIAN env->cp15.sctlr_el[1] |= SCTLR_E0E; - for (i = 1; i < 4; ++i) { + for (int i = 1; i < 4; ++i) { env->cp15.sctlr_el[i] |= SCTLR_EE; } arm_rebuild_hflags(env); @@ -167,9 +253,4 @@ void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) if (cpu_isar_feature(aa64_pauth, cpu)) { qemu_guest_getrandom_nofail(&env->keys, sizeof(env->keys)); } - - ts->stack_base = info->start_stack; - ts->heap_base = info->brk; - /* This will be filled in on the first SYS_HEAPINFO call. */ - ts->heap_limit = 0; } diff --git a/linux-user/aarch64/elfload.c b/linux-user/aarch64/elfload.c new file mode 100644 index 0000000000000..3af5a3777616a --- /dev/null +++ b/linux-user/aarch64/elfload.c @@ -0,0 +1,381 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu.h" +#include "loader.h" +#include "target/arm/cpu-features.h" +#include "target_elf.h" +#include "elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "any"; +} + +enum { + ARM_HWCAP_A64_FP = 1 << 0, + ARM_HWCAP_A64_ASIMD = 1 << 1, + ARM_HWCAP_A64_EVTSTRM = 1 << 2, + ARM_HWCAP_A64_AES = 1 << 3, + ARM_HWCAP_A64_PMULL = 1 << 4, + ARM_HWCAP_A64_SHA1 = 1 << 5, + ARM_HWCAP_A64_SHA2 = 1 << 6, + ARM_HWCAP_A64_CRC32 = 1 << 7, + ARM_HWCAP_A64_ATOMICS = 1 << 8, + ARM_HWCAP_A64_FPHP = 1 << 9, + ARM_HWCAP_A64_ASIMDHP = 1 << 10, + ARM_HWCAP_A64_CPUID = 1 << 11, + ARM_HWCAP_A64_ASIMDRDM = 1 << 12, + ARM_HWCAP_A64_JSCVT = 1 << 13, + ARM_HWCAP_A64_FCMA = 1 << 14, + ARM_HWCAP_A64_LRCPC = 1 << 15, + ARM_HWCAP_A64_DCPOP = 1 << 16, + ARM_HWCAP_A64_SHA3 = 1 << 17, + ARM_HWCAP_A64_SM3 = 1 << 18, + ARM_HWCAP_A64_SM4 = 1 << 19, + ARM_HWCAP_A64_ASIMDDP = 1 << 20, + ARM_HWCAP_A64_SHA512 = 1 << 21, + ARM_HWCAP_A64_SVE = 1 << 22, + ARM_HWCAP_A64_ASIMDFHM = 1 << 23, + ARM_HWCAP_A64_DIT = 1 << 24, + ARM_HWCAP_A64_USCAT = 1 << 25, + ARM_HWCAP_A64_ILRCPC = 1 << 26, + ARM_HWCAP_A64_FLAGM = 1 << 27, + ARM_HWCAP_A64_SSBS = 1 << 28, + ARM_HWCAP_A64_SB = 1 << 29, + ARM_HWCAP_A64_PACA = 1 << 30, + ARM_HWCAP_A64_PACG = 1ULL << 31, + ARM_HWCAP_A64_GCS = 1ULL << 32, + ARM_HWCAP_A64_CMPBR = 1ULL << 33, + ARM_HWCAP_A64_FPRCVT = 1ULL << 34, + ARM_HWCAP_A64_F8MM8 = 1ULL << 35, + ARM_HWCAP_A64_F8MM4 = 1ULL << 36, + ARM_HWCAP_A64_SVE_F16MM = 1ULL << 37, + ARM_HWCAP_A64_SVE_ELTPERM = 1ULL << 38, + ARM_HWCAP_A64_SVE_AES2 = 1ULL << 39, + ARM_HWCAP_A64_SVE_BFSCALE = 1ULL << 40, + ARM_HWCAP_A64_SVE2P2 = 1ULL << 41, + ARM_HWCAP_A64_SME2P2 = 1ULL << 42, + ARM_HWCAP_A64_SME_SBITPERM = 1ULL << 43, + ARM_HWCAP_A64_SME_AES = 1ULL << 44, + ARM_HWCAP_A64_SME_SFEXPA = 1ULL << 45, + ARM_HWCAP_A64_SME_STMOP = 1ULL << 46, + ARM_HWCAP_A64_SME_SMOP4 = 1ULL << 47, + + ARM_HWCAP2_A64_DCPODP = 1 << 0, + ARM_HWCAP2_A64_SVE2 = 1 << 1, + ARM_HWCAP2_A64_SVEAES = 1 << 2, + ARM_HWCAP2_A64_SVEPMULL = 1 << 3, + ARM_HWCAP2_A64_SVEBITPERM = 1 << 4, + ARM_HWCAP2_A64_SVESHA3 = 1 << 5, + ARM_HWCAP2_A64_SVESM4 = 1 << 6, + ARM_HWCAP2_A64_FLAGM2 = 1 << 7, + ARM_HWCAP2_A64_FRINT = 1 << 8, + ARM_HWCAP2_A64_SVEI8MM = 1 << 9, + ARM_HWCAP2_A64_SVEF32MM = 1 << 10, + ARM_HWCAP2_A64_SVEF64MM = 1 << 11, + ARM_HWCAP2_A64_SVEBF16 = 1 << 12, + ARM_HWCAP2_A64_I8MM = 1 << 13, + ARM_HWCAP2_A64_BF16 = 1 << 14, + ARM_HWCAP2_A64_DGH = 1 << 15, + ARM_HWCAP2_A64_RNG = 1 << 16, + ARM_HWCAP2_A64_BTI = 1 << 17, + ARM_HWCAP2_A64_MTE = 1 << 18, + ARM_HWCAP2_A64_ECV = 1 << 19, + ARM_HWCAP2_A64_AFP = 1 << 20, + ARM_HWCAP2_A64_RPRES = 1 << 21, + ARM_HWCAP2_A64_MTE3 = 1 << 22, + ARM_HWCAP2_A64_SME = 1 << 23, + ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, + ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, + ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, + ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, + ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, + ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, + ARM_HWCAP2_A64_SME_FA64 = 1 << 30, + ARM_HWCAP2_A64_WFXT = 1ULL << 31, + ARM_HWCAP2_A64_EBF16 = 1ULL << 32, + ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33, + ARM_HWCAP2_A64_CSSC = 1ULL << 34, + ARM_HWCAP2_A64_RPRFM = 1ULL << 35, + ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36, + ARM_HWCAP2_A64_SME2 = 1ULL << 37, + ARM_HWCAP2_A64_SME2P1 = 1ULL << 38, + ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39, + ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40, + ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41, + ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42, + ARM_HWCAP2_A64_MOPS = 1ULL << 43, + ARM_HWCAP2_A64_HBC = 1ULL << 44, + ARM_HWCAP2_A64_SVE_B16B16 = 1ULL << 45, + ARM_HWCAP2_A64_LRCPC3 = 1ULL << 46, + ARM_HWCAP2_A64_LSE128 = 1ULL << 47, + ARM_HWCAP2_A64_FPMR = 1ULL << 48, + ARM_HWCAP2_A64_LUT = 1ULL << 49, + ARM_HWCAP2_A64_FAMINMAX = 1ULL << 50, + ARM_HWCAP2_A64_F8CVT = 1ULL << 51, + ARM_HWCAP2_A64_F8FMA = 1ULL << 52, + ARM_HWCAP2_A64_F8DP4 = 1ULL << 53, + ARM_HWCAP2_A64_F8DP2 = 1ULL << 54, + ARM_HWCAP2_A64_F8E4M3 = 1ULL << 55, + ARM_HWCAP2_A64_F8E5M2 = 1ULL << 56, + ARM_HWCAP2_A64_SME_LUTV2 = 1ULL << 57, + ARM_HWCAP2_A64_SME_F8F16 = 1ULL << 58, + ARM_HWCAP2_A64_SME_F8F32 = 1ULL << 59, + ARM_HWCAP2_A64_SME_SF8FMA = 1ULL << 60, + ARM_HWCAP2_A64_SME_SF8DP4 = 1ULL << 61, + ARM_HWCAP2_A64_SME_SF8DP2 = 1ULL << 62, + ARM_HWCAP2_A64_POE = 1ULL << 63, +}; + +#define GET_FEATURE_ID(feat, hwcap) \ + do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + abi_ulong hwcaps = 0; + + hwcaps |= ARM_HWCAP_A64_FP; + hwcaps |= ARM_HWCAP_A64_ASIMD; + hwcaps |= ARM_HWCAP_A64_CPUID; + + /* probe for the extra features */ + + GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); + GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); + GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); + GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); + GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); + GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); + GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); + GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); + GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); + GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); + GET_FEATURE_ID(aa64_lse, ARM_HWCAP_A64_ATOMICS); + GET_FEATURE_ID(aa64_lse2, ARM_HWCAP_A64_USCAT); + GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); + GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); + GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); + GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); + GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG); + GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM); + GET_FEATURE_ID(aa64_dit, ARM_HWCAP_A64_DIT); + GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT); + GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB); + GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM); + GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP); + GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC); + GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC); + GET_FEATURE_ID(aa64_gcs, ARM_HWCAP_A64_GCS); + + return hwcaps; +} + +abi_ulong get_elf_hwcap2(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + abi_ulong hwcaps = 0; + + GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP); + GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2); + GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES); + GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL); + GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM); + GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3); + GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4); + GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2); + GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT); + GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM); + GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM); + GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM); + GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16); + GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM); + GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16); + GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); + GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); + GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); + GET_FEATURE_ID(aa64_mte3, ARM_HWCAP2_A64_MTE3); + GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | + ARM_HWCAP2_A64_SME_F32F32 | + ARM_HWCAP2_A64_SME_B16F32 | + ARM_HWCAP2_A64_SME_F16F32 | + ARM_HWCAP2_A64_SME_I8I32)); + GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); + GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); + GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); + GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); + GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS); + GET_FEATURE_ID(aa64_sve2p1, ARM_HWCAP2_A64_SVE2P1); + GET_FEATURE_ID(aa64_sme2, (ARM_HWCAP2_A64_SME2 | + ARM_HWCAP2_A64_SME_I16I32 | + ARM_HWCAP2_A64_SME_BI32I32)); + GET_FEATURE_ID(aa64_sme2p1, ARM_HWCAP2_A64_SME2P1); + GET_FEATURE_ID(aa64_sme_b16b16, ARM_HWCAP2_A64_SME_B16B16); + GET_FEATURE_ID(aa64_sme_f16f16, ARM_HWCAP2_A64_SME_F16F16); + GET_FEATURE_ID(aa64_sve_b16b16, ARM_HWCAP2_A64_SVE_B16B16); + GET_FEATURE_ID(aa64_cssc, ARM_HWCAP2_A64_CSSC); + GET_FEATURE_ID(aa64_lse128, ARM_HWCAP2_A64_LSE128); + + return hwcaps; +} + +const char *elf_hwcap_str(uint32_t bit) +{ + static const char * const hwcap_str[] = { + [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp", + [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd", + [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm", + [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes", + [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull", + [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1", + [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2", + [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32", + [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics", + [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp", + [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp", + [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid", + [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm", + [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt", + [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma", + [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc", + [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop", + [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3", + [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3", + [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4", + [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp", + [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512", + [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve", + [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm", + [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit", + [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat", + [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc", + [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm", + [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs", + [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb", + [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca", + [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg", + [__builtin_ctzll(ARM_HWCAP_A64_GCS )] = "gcs", + [__builtin_ctzll(ARM_HWCAP_A64_CMPBR )] = "cmpbr", + [__builtin_ctzll(ARM_HWCAP_A64_FPRCVT)] = "fprcvt", + [__builtin_ctzll(ARM_HWCAP_A64_F8MM8 )] = "f8mm8", + [__builtin_ctzll(ARM_HWCAP_A64_F8MM4 )] = "f8mm4", + [__builtin_ctzll(ARM_HWCAP_A64_SVE_F16MM)] = "svef16mm", + [__builtin_ctzll(ARM_HWCAP_A64_SVE_ELTPERM)] = "sveeltperm", + [__builtin_ctzll(ARM_HWCAP_A64_SVE_AES2)] = "sveaes2", + [__builtin_ctzll(ARM_HWCAP_A64_SVE_BFSCALE)] = "svebfscale", + [__builtin_ctzll(ARM_HWCAP_A64_SVE2P2)] = "sve2p2", + [__builtin_ctzll(ARM_HWCAP_A64_SME2P2)] = "sme2p2", + [__builtin_ctzll(ARM_HWCAP_A64_SME_SBITPERM)] = "smesbitperm", + [__builtin_ctzll(ARM_HWCAP_A64_SME_AES)] = "smeaes", + [__builtin_ctzll(ARM_HWCAP_A64_SME_SFEXPA)] = "smesfexpa", + [__builtin_ctzll(ARM_HWCAP_A64_SME_STMOP)] = "smestmop", + [__builtin_ctzll(ARM_HWCAP_A64_SME_SMOP4)] = "smesmop4", + }; + + return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; +} + +const char *elf_hwcap2_str(uint32_t bit) +{ + static const char * const hwcap_str[] = { + [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp", + [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2", + [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes", + [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull", + [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm", + [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3", + [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4", + [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2", + [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint", + [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm", + [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm", + [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm", + [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16", + [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm", + [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16", + [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh", + [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng", + [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti", + [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte", + [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv", + [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp", + [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres", + [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3", + [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme", + [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64", + [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64", + [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32", + [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32", + [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32", + [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32", + [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64", + [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt", + [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16", + [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16", + [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc", + [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm", + [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1", + [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2", + [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16", + [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops", + [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc", + [__builtin_ctzll(ARM_HWCAP2_A64_SVE_B16B16 )] = "sveb16b16", + [__builtin_ctzll(ARM_HWCAP2_A64_LRCPC3 )] = "lrcpc3", + [__builtin_ctzll(ARM_HWCAP2_A64_LSE128 )] = "lse128", + [__builtin_ctzll(ARM_HWCAP2_A64_FPMR )] = "fpmr", + [__builtin_ctzll(ARM_HWCAP2_A64_LUT )] = "lut", + [__builtin_ctzll(ARM_HWCAP2_A64_FAMINMAX )] = "faminmax", + [__builtin_ctzll(ARM_HWCAP2_A64_F8CVT )] = "f8cvt", + [__builtin_ctzll(ARM_HWCAP2_A64_F8FMA )] = "f8fma", + [__builtin_ctzll(ARM_HWCAP2_A64_F8DP4 )] = "f8dp4", + [__builtin_ctzll(ARM_HWCAP2_A64_F8DP2 )] = "f8dp2", + [__builtin_ctzll(ARM_HWCAP2_A64_F8E4M3 )] = "f8e4m3", + [__builtin_ctzll(ARM_HWCAP2_A64_F8E5M2 )] = "f8e5m2", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_LUTV2 )] = "smelutv2", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F16 )] = "smef8f16", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F32 )] = "smef8f32", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP4 )] = "smesf8dp4", + [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP2 )] = "smesf8dp2", + [__builtin_ctzll(ARM_HWCAP2_A64_POE )] = "poe", + }; + + return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; +} + +const char *get_elf_platform(CPUState *cs) +{ + return TARGET_BIG_ENDIAN ? "aarch64_be" : "aarch64"; +} + +bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, + const uint32_t *data, + struct image_info *info, + Error **errp) +{ + if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { + if (pr_datasz != sizeof(uint32_t)) { + error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND"); + return false; + } + /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */ + info->note_flags = *data; + } + return true; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUARMState *env) +{ + for (int i = 0; i < 31; i++) { + r->pt.regs[i] = tswap64(env->xregs[i]); + } + r->pt.sp = tswap64(env->xregs[31]); + r->pt.pc = tswap64(env->pc); + r->pt.pstate = tswap64(pstate_read((CPUARMState *)env)); +} diff --git a/linux-user/aarch64/gcs-internal.h b/linux-user/aarch64/gcs-internal.h new file mode 100644 index 0000000000000..e586c7e80e223 --- /dev/null +++ b/linux-user/aarch64/gcs-internal.h @@ -0,0 +1,38 @@ +/* + * AArch64 gcs functions for linux-user + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#ifndef AARCH64_GCS_INTERNAL_H +#define AARCH64_GCS_INTERNAL_H + +#ifndef PR_SHADOW_STACK_ENABLE +# define PR_SHADOW_STACK_ENABLE (1U << 0) +# define PR_SHADOW_STACK_WRITE (1U << 1) +# define PR_SHADOW_STACK_PUSH (1U << 2) +#endif + +static inline uint64_t gcs_get_el0_mode(CPUArchState *env) +{ + uint64_t cr = env->cp15.gcscr_el[0]; + abi_ulong flags = 0; + + flags |= cr & GCSCR_PCRSEL ? PR_SHADOW_STACK_ENABLE : 0; + flags |= cr & GCSCR_STREN ? PR_SHADOW_STACK_WRITE : 0; + flags |= cr & GCSCR_PUSHMEN ? PR_SHADOW_STACK_PUSH : 0; + + return flags; +} + +static inline void gcs_set_el0_mode(CPUArchState *env, uint64_t flags) +{ + uint64_t cr = GCSCRE0_NTR; + + cr |= flags & PR_SHADOW_STACK_ENABLE ? GCSCR_RVCHKEN | GCSCR_PCRSEL : 0; + cr |= flags & PR_SHADOW_STACK_WRITE ? GCSCR_STREN : 0; + cr |= flags & PR_SHADOW_STACK_PUSH ? GCSCR_PUSHMEN : 0; + + env->cp15.gcscr_el[0] = cr; +} + +#endif diff --git a/linux-user/aarch64/signal.c b/linux-user/aarch64/signal.c index d50cab78d83ab..f7edfa249e5d5 100644 --- a/linux-user/aarch64/signal.c +++ b/linux-user/aarch64/signal.c @@ -22,6 +22,7 @@ #include "signal-common.h" #include "linux-user/trace.h" #include "target/arm/cpu-features.h" +#include "gcs-internal.h" struct target_sigcontext { uint64_t fault_address; @@ -65,6 +66,13 @@ struct target_fpsimd_context { uint64_t vregs[32 * 2]; /* really uint128_t vregs[32] */ }; +#define TARGET_ESR_MAGIC 0x45535201 + +struct target_esr_context { + struct target_aarch64_ctx head; + uint64_t esr; +}; + #define TARGET_EXTRA_MAGIC 0x45585401 struct target_extra_context { @@ -121,6 +129,40 @@ struct target_za_context { #define TARGET_ZA_SIG_CONTEXT_SIZE(VQ) \ TARGET_ZA_SIG_ZAV_OFFSET(VQ, VQ * TARGET_SVE_VQ_BYTES) +#define TARGET_TPIDR2_MAGIC 0x54504902 + +struct target_tpidr2_context { + struct target_aarch64_ctx head; + uint64_t tpidr2; +}; + +#define TARGET_ZT_MAGIC 0x5a544e01 + +struct target_zt_context { + struct target_aarch64_ctx head; + uint16_t nregs; + uint16_t reserved[3]; + /* ZTn register data immediately follows */ +}; + +#define TARGET_ZT_SIG_REG_BYTES (512 / 8) +#define TARGET_ZT_SIG_REGS_SIZE(n) (TARGET_ZT_SIG_REG_BYTES * (n)) +#define TARGET_ZT_SIG_CONTEXT_SIZE(n) (sizeof(struct target_zt_context) + \ + TARGET_ZT_SIG_REGS_SIZE(n)) +#define TARGET_ZT_SIG_REGS_OFFSET sizeof(struct target_zt_context) +QEMU_BUILD_BUG_ON(TARGET_ZT_SIG_REG_BYTES != \ + sizeof_field(CPUARMState, za_state.zt0)); + +#define TARGET_GCS_MAGIC 0x47435300 +#define GCS_SIGNAL_CAP(X) ((X) & TARGET_PAGE_MASK) + +struct target_gcs_context { + struct target_aarch64_ctx head; + uint64_t gcspr; + uint64_t features_enabled; + uint64_t reserved; +}; + struct target_rt_sigframe { struct target_siginfo info; struct target_ucontext uc; @@ -177,6 +219,14 @@ static void target_setup_fpsimd_record(struct target_fpsimd_context *fpsimd, } } +static void target_setup_esr_record(struct target_esr_context *ctx, + CPUARMState *env) +{ + __put_user(TARGET_ESR_MAGIC, &ctx->head.magic); + __put_user(sizeof(*ctx), &ctx->head.size); + __put_user(env->cp15.esr_el[1], &ctx->esr); +} + static void target_setup_extra_record(struct target_extra_context *extra, uint64_t datap, uint32_t extra_size) { @@ -253,6 +303,65 @@ static void target_setup_za_record(struct target_za_context *za, } } +static void target_setup_tpidr2_record(struct target_tpidr2_context *tpidr2, + CPUARMState *env) +{ + __put_user(TARGET_TPIDR2_MAGIC, &tpidr2->head.magic); + __put_user(sizeof(struct target_tpidr2_context), &tpidr2->head.size); + __put_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2); +} + +static void target_setup_zt_record(struct target_zt_context *zt, + CPUARMState *env, int size) +{ + uint64_t *z; + + memset(zt, 0, sizeof(*zt)); + __put_user(TARGET_ZT_MAGIC, &zt->head.magic); + __put_user(size, &zt->head.size); + /* + * The record format allows for multiple ZT regs, but + * currently there is only one, ZT0. + */ + __put_user(1, &zt->nregs); + assert(size == TARGET_ZT_SIG_CONTEXT_SIZE(1)); + + /* ZT0 is the same byte-stream format as SVE regs and ZA */ + z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET; + for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) { + __put_user_e(env->za_state.zt0[i], z + i, le); + } +} + +static bool target_setup_gcs_record(struct target_gcs_context *ctx, + CPUARMState *env, uint64_t return_addr) +{ + uint64_t mode = gcs_get_el0_mode(env); + uint64_t gcspr = env->cp15.gcspr_el[0]; + + if (mode & PR_SHADOW_STACK_ENABLE) { + /* Push a cap for the signal frame. */ + gcspr -= 8; + if (put_user_u64(GCS_SIGNAL_CAP(gcspr), gcspr)) { + return false; + } + + /* Push a gcs entry for the trampoline. */ + if (put_user_u64(return_addr, gcspr - 8)) { + return false; + } + env->cp15.gcspr_el[0] = gcspr - 8; + } + + __put_user(TARGET_GCS_MAGIC, &ctx->head.magic); + __put_user(sizeof(*ctx), &ctx->head.size); + __put_user(gcspr, &ctx->gcspr); + __put_user(mode, &ctx->features_enabled); + __put_user(0, &ctx->reserved); + + return true; +} + static void target_restore_general_frame(CPUARMState *env, struct target_rt_sigframe *sf) { @@ -403,6 +512,94 @@ static bool target_restore_za_record(CPUARMState *env, return true; } +static void target_restore_tpidr2_record(CPUARMState *env, + struct target_tpidr2_context *tpidr2) +{ + __get_user(env->cp15.tpidr2_el0, &tpidr2->tpidr2); +} + +static bool target_restore_zt_record(CPUARMState *env, + struct target_zt_context *zt, int size, + int svcr) +{ + uint16_t nregs; + uint64_t *z; + + if (!(FIELD_EX64(svcr, SVCR, ZA))) { + return false; + } + + __get_user(nregs, &zt->nregs); + + if (nregs != 1) { + return false; + } + + z = (void *)zt + TARGET_ZT_SIG_REGS_OFFSET; + for (int i = 0; i < ARRAY_SIZE(env->za_state.zt0); i++) { + __get_user_e(env->za_state.zt0[i], z + i, le); + } + return true; +} + +static bool target_restore_gcs_record(CPUARMState *env, + struct target_gcs_context *ctx, + bool *rebuild_hflags) +{ + TaskState *ts = get_task_state(env_cpu(env)); + uint64_t cur_mode = gcs_get_el0_mode(env); + uint64_t new_mode, gcspr; + + __get_user(new_mode, &ctx->features_enabled); + __get_user(gcspr, &ctx->gcspr); + + /* + * The kernel pushes the value through the hw register: + * write_sysreg_s(gcspr, SYS_GCSPR_EL0) in restore_gcs_context, + * then read_sysreg_s(SYS_GCSPR_EL0) in gcs_restore_signal. + * Since the bottom 3 bits are RES0, this can (CONSTRAINED UNPREDICTABLE) + * force align the value. Mirror the choice from gcspr_write(). + */ + gcspr &= ~7; + + if (new_mode & ~(PR_SHADOW_STACK_ENABLE | + PR_SHADOW_STACK_WRITE | + PR_SHADOW_STACK_PUSH)) { + return false; + } + if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) { + return false; + } + if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) { + return false; + } + + if (new_mode & PR_SHADOW_STACK_ENABLE) { + uint64_t cap; + + /* Pop and clear the signal cap. */ + if (get_user_u64(cap, gcspr)) { + return false; + } + if (cap != GCS_SIGNAL_CAP(gcspr)) { + return false; + } + if (put_user_u64(0, gcspr)) { + return false; + } + gcspr += 8; + } else { + new_mode = 0; + } + + env->cp15.gcspr_el[0] = gcspr; + if (new_mode != cur_mode) { + *rebuild_hflags = true; + gcs_set_el0_mode(env, new_mode); + } + return true; +} + static int target_restore_sigframe(CPUARMState *env, struct target_rt_sigframe *sf) { @@ -410,10 +607,15 @@ static int target_restore_sigframe(CPUARMState *env, struct target_fpsimd_context *fpsimd = NULL; struct target_sve_context *sve = NULL; struct target_za_context *za = NULL; + struct target_tpidr2_context *tpidr2 = NULL; + struct target_zt_context *zt = NULL; + struct target_gcs_context *gcs = NULL; uint64_t extra_datap = 0; bool used_extra = false; + bool rebuild_hflags = false; int sve_size = 0; int za_size = 0; + int zt_size = 0; int svcr = 0; target_restore_general_frame(env, sf); @@ -444,6 +646,9 @@ static int target_restore_sigframe(CPUARMState *env, fpsimd = (struct target_fpsimd_context *)ctx; break; + case TARGET_ESR_MAGIC: + break; /* ignore */ + case TARGET_SVE_MAGIC: if (sve || size < sizeof(struct target_sve_context)) { goto err; @@ -460,6 +665,32 @@ static int target_restore_sigframe(CPUARMState *env, za_size = size; break; + case TARGET_TPIDR2_MAGIC: + if (tpidr2 || size != sizeof(struct target_tpidr2_context) || + !cpu_isar_feature(aa64_sme, env_archcpu(env))) { + goto err; + } + tpidr2 = (struct target_tpidr2_context *)ctx; + break; + + case TARGET_ZT_MAGIC: + if (zt || size != TARGET_ZT_SIG_CONTEXT_SIZE(1) || + !cpu_isar_feature(aa64_sme2, env_archcpu(env))) { + goto err; + } + zt = (struct target_zt_context *)ctx; + zt_size = size; + break; + + case TARGET_GCS_MAGIC: + if (gcs + || size != sizeof(struct target_gcs_context) + || !cpu_isar_feature(aa64_gcs, env_archcpu(env))) { + goto err; + } + gcs = (struct target_gcs_context *)ctx; + break; + case TARGET_EXTRA_MAGIC: if (extra || size != sizeof(struct target_extra_context)) { goto err; @@ -490,6 +721,10 @@ static int target_restore_sigframe(CPUARMState *env, goto err; } + if (gcs && !target_restore_gcs_record(env, gcs, &rebuild_hflags)) { + goto err; + } + /* SVE data, if present, overwrites FPSIMD data. */ if (sve && !target_restore_sve_record(env, sve, sve_size, &svcr)) { goto err; @@ -497,8 +732,21 @@ static int target_restore_sigframe(CPUARMState *env, if (za && !target_restore_za_record(env, za, za_size, &svcr)) { goto err; } + if (tpidr2) { + target_restore_tpidr2_record(env, tpidr2); + } + /* + * NB that we must restore ZT after ZA so the check that there's + * no ZT record if SVCR.ZA is 0 gets the right value of SVCR. + */ + if (zt && !target_restore_zt_record(env, zt, zt_size, svcr)) { + goto err; + } if (env->svcr != svcr) { env->svcr = svcr; + rebuild_hflags = true; + } + if (rebuild_hflags) { arm_rebuild_hflags(env); } unlock_user(extra, extra_datap, 0); @@ -568,8 +816,9 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, .total_size = offsetof(struct target_rt_sigframe, uc.tuc_mcontext.__reserved), }; - int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0; - int sve_size = 0, za_size = 0; + int fpsimd_ofs, fr_ofs, sve_ofs = 0, za_ofs = 0, tpidr2_ofs = 0; + int zt_ofs = 0, esr_ofs = 0, gcs_ofs = 0; + int sve_size = 0, za_size = 0, tpidr2_size = 0, zt_size = 0; struct target_rt_sigframe *frame; struct target_rt_frame_record *fr; abi_ulong frame_addr, return_addr; @@ -578,6 +827,20 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, fpsimd_ofs = alloc_sigframe_space(sizeof(struct target_fpsimd_context), &layout); + /* + * In user mode, ESR_EL1 is only set by cpu_loop while queueing the + * signal, and it's only valid for the one sync insn. + */ + if (env->cp15.esr_el[1]) { + esr_ofs = alloc_sigframe_space(sizeof(struct target_esr_context), + &layout); + } + + if (env->cp15.gcspr_el[0]) { + gcs_ofs = alloc_sigframe_space(sizeof(struct target_gcs_context), + &layout); + } + /* SVE state needs saving only if it exists. */ if (cpu_isar_feature(aa64_sve, env_archcpu(env)) || cpu_isar_feature(aa64_sme, env_archcpu(env))) { @@ -585,6 +848,8 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, sve_ofs = alloc_sigframe_space(sve_size, &layout); } if (cpu_isar_feature(aa64_sme, env_archcpu(env))) { + tpidr2_size = sizeof(struct target_tpidr2_context); + tpidr2_ofs = alloc_sigframe_space(tpidr2_size, &layout); /* ZA state needs saving only if it is enabled. */ if (FIELD_EX64(env->svcr, SVCR, ZA)) { za_size = TARGET_ZA_SIG_CONTEXT_SIZE(sme_vq(env)); @@ -593,6 +858,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, } za_ofs = alloc_sigframe_space(za_size, &layout); } + if (cpu_isar_feature(aa64_sme2, env_archcpu(env)) && + FIELD_EX64(env->svcr, SVCR, ZA)) { + /* If SME ZA storage is enabled, we must also save SME2 ZT0 */ + zt_size = TARGET_ZT_SIG_CONTEXT_SIZE(1); + zt_ofs = alloc_sigframe_space(zt_size, &layout); + } if (layout.extra_ofs) { /* Reserve space for the extra end marker. The standard end marker @@ -629,8 +900,23 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, goto give_sigsegv; } + if (ka->sa_flags & TARGET_SA_RESTORER) { + return_addr = ka->sa_restorer; + } else { + return_addr = default_rt_sigreturn; + } + target_setup_general_frame(frame, env, set); target_setup_fpsimd_record((void *)frame + fpsimd_ofs, env); + if (esr_ofs) { + target_setup_esr_record((void *)frame + esr_ofs, env); + /* Leave ESR_EL1 clear while it's not relevant. */ + env->cp15.esr_el[1] = 0; + } + if (gcs_ofs && + !target_setup_gcs_record((void *)frame + gcs_ofs, env, return_addr)) { + goto give_sigsegv; + } target_setup_end_record((void *)frame + layout.std_end_ofs); if (layout.extra_ofs) { target_setup_extra_record((void *)frame + layout.extra_ofs, @@ -644,17 +930,18 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, if (za_ofs) { target_setup_za_record((void *)frame + za_ofs, env, za_size); } + if (tpidr2_ofs) { + target_setup_tpidr2_record((void *)frame + tpidr2_ofs, env); + } + if (zt_ofs) { + target_setup_zt_record((void *)frame + zt_ofs, env, zt_size); + } /* Set up the stack frame for unwinding. */ fr = (void *)frame + fr_ofs; __put_user(env->xregs[29], &fr->fp); __put_user(env->xregs[30], &fr->lr); - if (ka->sa_flags & TARGET_SA_RESTORER) { - return_addr = ka->sa_restorer; - } else { - return_addr = default_rt_sigreturn; - } env->xregs[0] = usig; env->xregs[29] = frame_addr + fr_ofs; env->xregs[30] = return_addr; @@ -666,8 +953,12 @@ static void target_setup_frame(int usig, struct target_sigaction *ka, env->btype = 2; } - /* Invoke the signal handler with both SM and ZA disabled. */ + /* + * Invoke the signal handler with a clean SME state: both SM and ZA + * disabled and TPIDR2_EL0 cleared. + */ aarch64_set_svcr(env, 0, R_SVCR_SM_MASK | R_SVCR_ZA_MASK); + env->cp15.tpidr2_el0 = 0; if (info) { frame->info = *info; diff --git a/linux-user/aarch64/target_elf.h b/linux-user/aarch64/target_elf.h index a7eb962fba754..4cdeb64b0d4e1 100644 --- a/linux-user/aarch64/target_elf.h +++ b/linux-user/aarch64/target_elf.h @@ -7,8 +7,30 @@ #ifndef AARCH64_TARGET_ELF_H #define AARCH64_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "any"; -} + +#include "target_ptrace.h" + +#define ELF_MACHINE EM_AARCH64 +#define ELF_CLASS ELFCLASS64 + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_HWCAP2 1 +#define HAVE_ELF_PLATFORM 1 +#define HAVE_ELF_CORE_DUMP 1 +#define HAVE_ELF_GNU_PROPERTY 1 + +/* + * See linux kernel: arch/arm64/include/asm/elf.h, where + * elf_gregset_t is mapped to struct user_pt_regs via sizeof. + */ +typedef struct target_elf_gregset_t { + struct target_user_pt_regs pt; +} target_elf_gregset_t; + +#if TARGET_BIG_ENDIAN +# define VDSO_HEADER "vdso-be.c.inc" +#else +# define VDSO_HEADER "vdso-le.c.inc" +#endif + #endif diff --git a/linux-user/aarch64/target_prctl.h b/linux-user/aarch64/target_prctl.h index ed75b9e4b5ae8..621be5727fc3a 100644 --- a/linux-user/aarch64/target_prctl.h +++ b/linux-user/aarch64/target_prctl.h @@ -6,8 +6,10 @@ #ifndef AARCH64_TARGET_PRCTL_H #define AARCH64_TARGET_PRCTL_H +#include "qemu/units.h" #include "target/arm/cpu-features.h" #include "mte_user_helper.h" +#include "gcs-internal.h" static abi_long do_prctl_sve_get_vl(CPUArchState *env) { @@ -206,4 +208,98 @@ static abi_long do_prctl_get_tagged_addr_ctrl(CPUArchState *env) } #define do_prctl_get_tagged_addr_ctrl do_prctl_get_tagged_addr_ctrl +static abi_long do_prctl_get_shadow_stack_status(CPUArchState *env, + abi_long arg2) +{ + ARMCPU *cpu = env_archcpu(env); + + if (!cpu_isar_feature(aa64_gcs, cpu)) { + return -TARGET_EINVAL; + } + return put_user_ual(gcs_get_el0_mode(env), arg2); +} +#define do_prctl_get_shadow_stack_status do_prctl_get_shadow_stack_status + +static abi_long gcs_alloc(abi_ulong hint, abi_ulong size) +{ + /* + * Without softmmu, we cannot protect GCS memory properly. + * Make do with normal read/write permissions. This at least allows + * emulation of correct programs which don't access the gcs stack + * with normal instructions. + */ + return target_mmap(hint, size, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | + (hint ? MAP_FIXED_NOREPLACE : 0), -1, 0); +} + +static abi_ulong gcs_new_stack(TaskState *ts) +{ + /* Use guest_stack_size as a proxy for RLIMIT_STACK. */ + abi_ulong size = MIN(MAX(guest_stack_size / 2, TARGET_PAGE_SIZE), 2 * GiB); + abi_ulong base = gcs_alloc(0, size); + + if (base == -1) { + return -1; + } + + ts->gcs_base = base; + ts->gcs_size = size; + return base + size - 8; +} + +static abi_long do_prctl_set_shadow_stack_status(CPUArchState *env, + abi_long new_mode) +{ + ARMCPU *cpu = env_archcpu(env); + TaskState *ts = get_task_state(env_cpu(env)); + abi_long cur_mode; + + if (!cpu_isar_feature(aa64_gcs, cpu)) { + return -TARGET_EINVAL; + } + if (new_mode & ~(PR_SHADOW_STACK_ENABLE | + PR_SHADOW_STACK_WRITE | + PR_SHADOW_STACK_PUSH)) { + return -TARGET_EINVAL; + } + + cur_mode = gcs_get_el0_mode(env); + if ((new_mode ^ cur_mode) & ts->gcs_el0_locked) { + return -TARGET_EBUSY; + } + + if (new_mode & ~cur_mode & PR_SHADOW_STACK_ENABLE) { + abi_long gcspr; + + if (ts->gcs_base || env->cp15.gcspr_el[0]) { + return -EINVAL; + } + gcspr = gcs_new_stack(ts); + if (gcspr == -1) { + return -TARGET_ENOMEM; + } + env->cp15.gcspr_el[0] = gcspr; + } + + gcs_set_el0_mode(env, new_mode); + arm_rebuild_hflags(env); + return 0; +} +#define do_prctl_set_shadow_stack_status do_prctl_set_shadow_stack_status + +static abi_long do_prctl_lock_shadow_stack_status(CPUArchState *env, + abi_long arg2) +{ + ARMCPU *cpu = env_archcpu(env); + TaskState *ts = get_task_state(env_cpu(env)); + + if (!cpu_isar_feature(aa64_gcs, cpu)) { + return -EINVAL; + } + ts->gcs_el0_locked |= arg2; + return 0; +} +#define do_prctl_lock_shadow_stack_status do_prctl_lock_shadow_stack_status + #endif /* AARCH64_TARGET_PRCTL_H */ diff --git a/linux-user/aarch64/target_ptrace.h b/linux-user/aarch64/target_ptrace.h new file mode 100644 index 0000000000000..10681338ba45d --- /dev/null +++ b/linux-user/aarch64/target_ptrace.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef AARCH64_TARGET_PTRACE_H +#define AARCH64_TARGET_PTRACE_H + +/* See arch/arm64/include/uapi/asm/ptrace.h. */ +struct target_user_pt_regs { + uint64_t regs[31]; + uint64_t sp; + uint64_t pc; + uint64_t pstate; +}; + +#endif /* AARCH64_TARGET_PTRACE_H */ diff --git a/linux-user/aarch64/target_signal.h b/linux-user/aarch64/target_signal.h index 6f66a50bfd2a6..e509ac10327b0 100644 --- a/linux-user/aarch64/target_signal.h +++ b/linux-user/aarch64/target_signal.h @@ -7,6 +7,7 @@ #define TARGET_SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */ #define TARGET_SEGV_MTESERR 9 /* Synchronous ARM MTE exception */ +#define TARGET_SEGV_CPERR 10 /* Control protection fault */ #define TARGET_ARCH_HAS_SETUP_FRAME #define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 diff --git a/linux-user/aarch64/target_syscall.h b/linux-user/aarch64/target_syscall.h index c055133725eca..bd05f6c7fe5d3 100644 --- a/linux-user/aarch64/target_syscall.h +++ b/linux-user/aarch64/target_syscall.h @@ -1,13 +1,6 @@ #ifndef AARCH64_TARGET_SYSCALL_H #define AARCH64_TARGET_SYSCALL_H -struct target_pt_regs { - uint64_t regs[31]; - uint64_t sp; - uint64_t pc; - uint64_t pstate; -}; - #if TARGET_BIG_ENDIAN #define UNAME_MACHINE "aarch64_be" #else diff --git a/linux-user/aarch64/vdso-be.so b/linux-user/aarch64/vdso-be.so index d43c3b19cdf65..4089838b30535 100755 Binary files a/linux-user/aarch64/vdso-be.so and b/linux-user/aarch64/vdso-be.so differ diff --git a/linux-user/aarch64/vdso-le.so b/linux-user/aarch64/vdso-le.so index aaedc9d85e568..240802821c329 100755 Binary files a/linux-user/aarch64/vdso-le.so and b/linux-user/aarch64/vdso-le.so differ diff --git a/linux-user/aarch64/vdso.S b/linux-user/aarch64/vdso.S index a0ac1487b0991..59dd94dc8ffd4 100644 --- a/linux-user/aarch64/vdso.S +++ b/linux-user/aarch64/vdso.S @@ -71,5 +71,7 @@ vdso_syscall __kernel_clock_getres, __NR_clock_getres __kernel_rt_sigreturn: /* No BTI C insn here -- we arrive via RET. */ mov x8, #__NR_rt_sigreturn +sigreturn_region_start: svc #0 +sigreturn_region_end: endf __kernel_rt_sigreturn diff --git a/linux-user/alpha/cpu_loop.c b/linux-user/alpha/cpu_loop.c index 80ad536c5f235..f93597c400db1 100644 --- a/linux-user/alpha/cpu_loop.c +++ b/linux-user/alpha/cpu_loop.c @@ -35,7 +35,7 @@ void cpu_loop(CPUAlphaState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_RESET: @@ -94,11 +94,6 @@ void cpu_loop(CPUAlphaState *env) break; case 0x86: /* IMB */ - /* ??? We can probably elide the code using page_unprotect - that is checking for self-modifying code. Instead we - could simply call tb_flush here. Until we work out the - changes required to turn off the extra write protection, - this can be a no-op. */ break; case 0x9E: /* RDUNIQUE */ @@ -173,13 +168,10 @@ void cpu_loop(CPUAlphaState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - int i; + CPUArchState *env = cpu_env(cs); - for(i = 0; i < 28; i++) { - env->ir[i] = ((abi_ulong *)regs)[i]; - } - env->ir[IR_SP] = regs->usp; - env->pc = regs->pc; + env->pc = info->entry; + env->ir[IR_SP] = info->start_stack; } diff --git a/linux-user/alpha/elfload.c b/linux-user/alpha/elfload.c new file mode 100644 index 0000000000000..1e44475c47586 --- /dev/null +++ b/linux-user/alpha/elfload.c @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "ev67"; +} diff --git a/linux-user/alpha/target_elf.h b/linux-user/alpha/target_elf.h index b77d638f6d4a8..864dc6e2e6fe7 100644 --- a/linux-user/alpha/target_elf.h +++ b/linux-user/alpha/target_elf.h @@ -7,8 +7,8 @@ #ifndef ALPHA_TARGET_ELF_H #define ALPHA_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "ev67"; -} + +#define ELF_CLASS ELFCLASS64 +#define ELF_MACHINE EM_ALPHA + #endif diff --git a/linux-user/alpha/target_syscall.h b/linux-user/alpha/target_syscall.h index fda3a49f29be9..53706b749fc0c 100644 --- a/linux-user/alpha/target_syscall.h +++ b/linux-user/alpha/target_syscall.h @@ -1,46 +1,6 @@ #ifndef ALPHA_TARGET_SYSCALL_H #define ALPHA_TARGET_SYSCALL_H -/* default linux values for the selectors */ -#define __USER_DS (1) - -struct target_pt_regs { - abi_ulong r0; - abi_ulong r1; - abi_ulong r2; - abi_ulong r3; - abi_ulong r4; - abi_ulong r5; - abi_ulong r6; - abi_ulong r7; - abi_ulong r8; - abi_ulong r19; - abi_ulong r20; - abi_ulong r21; - abi_ulong r22; - abi_ulong r23; - abi_ulong r24; - abi_ulong r25; - abi_ulong r26; - abi_ulong r27; - abi_ulong r28; - abi_ulong hae; -/* JRP - These are the values provided to a0-a2 by PALcode */ - abi_ulong trap_a0; - abi_ulong trap_a1; - abi_ulong trap_a2; -/* These are saved by PAL-code: */ - abi_ulong ps; - abi_ulong pc; - abi_ulong gp; - abi_ulong r16; - abi_ulong r17; - abi_ulong r18; -/* Those is needed by qemu to temporary store the user stack pointer */ - abi_ulong usp; - abi_ulong unique; -}; - #define UNAME_MACHINE "alpha" #define UNAME_MINIMUM_RELEASE "2.6.32" diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c index 33f63951a958a..cd89b7d6f5e88 100644 --- a/linux-user/arm/cpu_loop.c +++ b/linux-user/arm/cpu_loop.c @@ -295,7 +295,7 @@ void cpu_loop(CPUARMState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch(trapnr) { case EXCP_UDEF: @@ -480,32 +480,57 @@ void cpu_loop(CPUARMState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - CPUState *cpu = env_cpu(env); - TaskState *ts = get_task_state(cpu); - struct image_info *info = ts->info; - int i; - - cpsr_write(env, regs->uregs[16], CPSR_USER | CPSR_EXEC, - CPSRWriteByInstr); - for(i = 0; i < 16; i++) { - env->regs[i] = regs->uregs[i]; - } -#if TARGET_BIG_ENDIAN - /* Enable BE8. */ - if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4 - && (info->elf_flags & EF_ARM_BE8)) { - env->uncached_cpsr |= CPSR_E; - env->cp15.sctlr_el[1] |= SCTLR_E0E; - } else { - env->cp15.sctlr_el[1] |= SCTLR_B; + CPUARMState *env = cpu_env(cs); + abi_ptr stack = info->start_stack; + abi_ptr entry = info->entry; + + cpsr_write(env, ARM_CPU_MODE_USR | (entry & 1 ? CPSR_T : 0), + CPSR_USER | CPSR_EXEC, CPSRWriteByInstr); + + env->regs[15] = entry & 0xfffffffe; + env->regs[13] = stack; + + /* + * Per the SVR4 ABI, r0 contains a pointer to a function to be + * registered with atexit. A value of 0 means we have no such handler. + */ + env->regs[0] = 0; + + /* For uClinux PIC binaries. */ + /* XXX: Linux does this only on ARM with no MMU (do we care?) */ + env->regs[10] = info->start_data; + + /* Support ARM FDPIC. */ + if (info_is_fdpic(info)) { + /* + * As described in the ABI document, r7 points to the loadmap info + * prepared by the kernel. If an interpreter is needed, r8 points + * to the interpreter loadmap and r9 points to the interpreter + * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and + * r9 points to the main program PT_DYNAMIC info. + */ + env->regs[7] = info->loadmap_addr; + if (info->interpreter_loadmap_addr) { + /* Executable is dynamically loaded. */ + env->regs[8] = info->interpreter_loadmap_addr; + env->regs[9] = info->interpreter_pt_dynamic_addr; + } else { + env->regs[8] = 0; + env->regs[9] = info->pt_dynamic_addr; + } } - arm_rebuild_hflags(env); -#endif - ts->stack_base = info->start_stack; - ts->heap_base = info->brk; - /* This will be filled in on the first SYS_HEAPINFO call. */ - ts->heap_limit = 0; + if (TARGET_BIG_ENDIAN) { + /* Enable BE8. */ + if (EF_ARM_EABI_VERSION(info->elf_flags) >= EF_ARM_EABI_VER4 + && (info->elf_flags & EF_ARM_BE8)) { + env->uncached_cpsr |= CPSR_E; + env->cp15.sctlr_el[1] |= SCTLR_E0E; + } else { + env->cp15.sctlr_el[1] |= SCTLR_B; + } + arm_rebuild_hflags(env); + } } diff --git a/linux-user/arm/elfload.c b/linux-user/arm/elfload.c new file mode 100644 index 0000000000000..fef61022a3df7 --- /dev/null +++ b/linux-user/arm/elfload.c @@ -0,0 +1,276 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "user-internals.h" +#include "target_elf.h" +#include "target/arm/cpu-features.h" +#include "target_elf.h" +#include "elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "any"; +} + +enum +{ + ARM_HWCAP_ARM_SWP = 1 << 0, + ARM_HWCAP_ARM_HALF = 1 << 1, + ARM_HWCAP_ARM_THUMB = 1 << 2, + ARM_HWCAP_ARM_26BIT = 1 << 3, + ARM_HWCAP_ARM_FAST_MULT = 1 << 4, + ARM_HWCAP_ARM_FPA = 1 << 5, + ARM_HWCAP_ARM_VFP = 1 << 6, + ARM_HWCAP_ARM_EDSP = 1 << 7, + ARM_HWCAP_ARM_JAVA = 1 << 8, + ARM_HWCAP_ARM_IWMMXT = 1 << 9, + ARM_HWCAP_ARM_CRUNCH = 1 << 10, + ARM_HWCAP_ARM_THUMBEE = 1 << 11, + ARM_HWCAP_ARM_NEON = 1 << 12, + ARM_HWCAP_ARM_VFPv3 = 1 << 13, + ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, + ARM_HWCAP_ARM_TLS = 1 << 15, + ARM_HWCAP_ARM_VFPv4 = 1 << 16, + ARM_HWCAP_ARM_IDIVA = 1 << 17, + ARM_HWCAP_ARM_IDIVT = 1 << 18, + ARM_HWCAP_ARM_VFPD32 = 1 << 19, + ARM_HWCAP_ARM_LPAE = 1 << 20, + ARM_HWCAP_ARM_EVTSTRM = 1 << 21, + ARM_HWCAP_ARM_FPHP = 1 << 22, + ARM_HWCAP_ARM_ASIMDHP = 1 << 23, + ARM_HWCAP_ARM_ASIMDDP = 1 << 24, + ARM_HWCAP_ARM_ASIMDFHM = 1 << 25, + ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26, + ARM_HWCAP_ARM_I8MM = 1 << 27, +}; + +enum { + ARM_HWCAP2_ARM_AES = 1 << 0, + ARM_HWCAP2_ARM_PMULL = 1 << 1, + ARM_HWCAP2_ARM_SHA1 = 1 << 2, + ARM_HWCAP2_ARM_SHA2 = 1 << 3, + ARM_HWCAP2_ARM_CRC32 = 1 << 4, + ARM_HWCAP2_ARM_SB = 1 << 5, + ARM_HWCAP2_ARM_SSBS = 1 << 6, +}; + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + abi_ulong hwcaps = 0; + + hwcaps |= ARM_HWCAP_ARM_SWP; + hwcaps |= ARM_HWCAP_ARM_HALF; + hwcaps |= ARM_HWCAP_ARM_THUMB; + hwcaps |= ARM_HWCAP_ARM_FAST_MULT; + + /* probe for the extra features */ +#define GET_FEATURE(feat, hwcap) \ + do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) + +#define GET_FEATURE_ID(feat, hwcap) \ + do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) + + /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ + GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); + GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); + GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); + GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); + GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); + GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); + GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); + GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP); + + if (cpu_isar_feature(aa32_fpsp_v3, cpu) || + cpu_isar_feature(aa32_fpdp_v3, cpu)) { + hwcaps |= ARM_HWCAP_ARM_VFPv3; + if (cpu_isar_feature(aa32_simd_r32, cpu)) { + hwcaps |= ARM_HWCAP_ARM_VFPD32; + } else { + hwcaps |= ARM_HWCAP_ARM_VFPv3D16; + } + } + GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); + /* + * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same + * isar_feature function for both. The kernel reports them as two hwcaps. + */ + GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP); + GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP); + GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP); + GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM); + GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16); + GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM); + + return hwcaps; +} + +abi_ulong get_elf_hwcap2(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + abi_ulong hwcaps = 0; + + GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); + GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); + GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); + GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); + GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); + GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB); + GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS); + return hwcaps; +} + +const char *elf_hwcap_str(uint32_t bit) +{ + static const char *hwcap_str[] = { + [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp", + [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half", + [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb", + [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit", + [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult", + [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa", + [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp", + [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp", + [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java", + [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt", + [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch", + [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee", + [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon", + [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3", + [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16", + [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls", + [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4", + [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva", + [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt", + [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32", + [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae", + [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm", + [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp", + [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp", + [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp", + [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm", + [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16", + [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm", + }; + + return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; +} + +const char *elf_hwcap2_str(uint32_t bit) +{ + static const char *hwcap_str[] = { + [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes", + [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull", + [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1", + [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2", + [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32", + [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb", + [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs", + }; + + return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; +} + +const char *get_elf_platform(CPUState *cs) +{ + CPUARMState *env = cpu_env(cs); + +#if TARGET_BIG_ENDIAN +# define END "b" +#else +# define END "l" +#endif + + if (arm_feature(env, ARM_FEATURE_V8)) { + return "v8" END; + } else if (arm_feature(env, ARM_FEATURE_V7)) { + if (arm_feature(env, ARM_FEATURE_M)) { + return "v7m" END; + } else { + return "v7" END; + } + } else if (arm_feature(env, ARM_FEATURE_V6)) { + return "v6" END; + } else if (arm_feature(env, ARM_FEATURE_V5)) { + return "v5" END; + } else { + return "v4" END; + } + +#undef END +} + +bool init_guest_commpage(void) +{ + ARMCPU *cpu = ARM_CPU(thread_cpu); + int host_page_size = qemu_real_host_page_size(); + abi_ptr commpage; + void *want; + void *addr; + + /* + * M-profile allocates maximum of 2GB address space, so can never + * allocate the commpage. Skip it. + */ + if (arm_feature(&cpu->env, ARM_FEATURE_M)) { + return true; + } + + commpage = HI_COMMPAGE & -host_page_size; + want = g2h_untagged(commpage); + addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE | + (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE), + -1, 0); + + if (addr == MAP_FAILED) { + perror("Allocating guest commpage"); + exit(EXIT_FAILURE); + } + if (addr != want) { + return false; + } + + /* Set kernel helper versions; rest of page is 0. */ + __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); + + if (mprotect(addr, host_page_size, PROT_READ)) { + perror("Protecting guest commpage"); + exit(EXIT_FAILURE); + } + + page_set_flags(commpage, commpage | (host_page_size - 1), + PAGE_READ | PAGE_EXEC | PAGE_VALID, PAGE_VALID); + return true; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUARMState *env) +{ + for (int i = 0; i < 16; ++i) { + r->pt.regs[i] = tswapal(env->regs[i]); + } + r->pt.cpsr = tswapal(cpsr_read((CPUARMState *)env)); + r->pt.orig_r0 = tswapal(env->regs[0]); /* FIXME */ +} + +#if TARGET_BIG_ENDIAN +# include "vdso-be8.c.inc" +# include "vdso-be32.c.inc" +#else +# include "vdso-le.c.inc" +#endif + +const VdsoImageInfo *get_vdso_image_info(uint32_t elf_flags) +{ +#if TARGET_BIG_ENDIAN + return (EF_ARM_EABI_VERSION(elf_flags) >= EF_ARM_EABI_VER4 + && (elf_flags & EF_ARM_BE8) + ? &vdso_be8_image_info + : &vdso_be32_image_info); +#else + return &vdso_image_info; +#endif +} diff --git a/linux-user/arm/signal.c b/linux-user/arm/signal.c index 8db1c4b233877..3b387cd6d78fd 100644 --- a/linux-user/arm/signal.c +++ b/linux-user/arm/signal.c @@ -76,21 +76,7 @@ struct target_vfp_sigframe { struct target_user_vfp_exc ufp_exc; } __attribute__((__aligned__(8))); -struct target_iwmmxt_sigframe { - abi_ulong magic; - abi_ulong size; - uint64_t regs[16]; - /* Note that not all the coprocessor control registers are stored here */ - uint32_t wcssf; - uint32_t wcasf; - uint32_t wcgr0; - uint32_t wcgr1; - uint32_t wcgr2; - uint32_t wcgr3; -} __attribute__((__aligned__(8))); - #define TARGET_VFP_MAGIC 0x56465001 -#define TARGET_IWMMXT_MAGIC 0x12ef842a struct sigframe { @@ -267,25 +253,6 @@ static abi_ulong *setup_sigframe_vfp(abi_ulong *regspace, CPUARMState *env) return (abi_ulong*)(vfpframe+1); } -static abi_ulong *setup_sigframe_iwmmxt(abi_ulong *regspace, CPUARMState *env) -{ - int i; - struct target_iwmmxt_sigframe *iwmmxtframe; - iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; - __put_user(TARGET_IWMMXT_MAGIC, &iwmmxtframe->magic); - __put_user(sizeof(*iwmmxtframe), &iwmmxtframe->size); - for (i = 0; i < 16; i++) { - __put_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); - } - __put_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); - __put_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); - __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); - __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); - __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); - __put_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); - return (abi_ulong*)(iwmmxtframe+1); -} - static void setup_sigframe(struct target_ucontext *uc, target_sigset_t *set, CPUARMState *env) { @@ -306,9 +273,6 @@ static void setup_sigframe(struct target_ucontext *uc, if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) { regspace = setup_sigframe_vfp(regspace, env); } - if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - regspace = setup_sigframe_iwmmxt(regspace, env); - } /* Write terminating magic word */ __put_user(0, regspace); @@ -435,31 +399,6 @@ static abi_ulong *restore_sigframe_vfp(CPUARMState *env, abi_ulong *regspace) return (abi_ulong*)(vfpframe + 1); } -static abi_ulong *restore_sigframe_iwmmxt(CPUARMState *env, - abi_ulong *regspace) -{ - int i; - abi_ulong magic, sz; - struct target_iwmmxt_sigframe *iwmmxtframe; - iwmmxtframe = (struct target_iwmmxt_sigframe *)regspace; - - __get_user(magic, &iwmmxtframe->magic); - __get_user(sz, &iwmmxtframe->size); - if (magic != TARGET_IWMMXT_MAGIC || sz != sizeof(*iwmmxtframe)) { - return 0; - } - for (i = 0; i < 16; i++) { - __get_user(env->iwmmxt.regs[i], &iwmmxtframe->regs[i]); - } - __get_user(env->vfp.xregs[ARM_IWMMXT_wCSSF], &iwmmxtframe->wcssf); - __get_user(env->vfp.xregs[ARM_IWMMXT_wCASF], &iwmmxtframe->wcssf); - __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR0], &iwmmxtframe->wcgr0); - __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR1], &iwmmxtframe->wcgr1); - __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR2], &iwmmxtframe->wcgr2); - __get_user(env->vfp.xregs[ARM_IWMMXT_wCGR3], &iwmmxtframe->wcgr3); - return (abi_ulong*)(iwmmxtframe + 1); -} - static int do_sigframe_return(CPUARMState *env, target_ulong context_addr, struct target_ucontext *uc) @@ -482,12 +421,6 @@ static int do_sigframe_return(CPUARMState *env, return 1; } } - if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - regspace = restore_sigframe_iwmmxt(env, regspace); - if (!regspace) { - return 1; - } - } target_restore_altstack(&uc->tuc_stack, env); diff --git a/linux-user/arm/target_elf.h b/linux-user/arm/target_elf.h index 58ff6a0986fbe..12cdc8e5a71a0 100644 --- a/linux-user/arm/target_elf.h +++ b/linux-user/arm/target_elf.h @@ -7,8 +7,27 @@ #ifndef ARM_TARGET_ELF_H #define ARM_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "any"; -} + +#include "target_ptrace.h" + +#define ELF_MACHINE EM_ARM +#define ELF_CLASS ELFCLASS32 +#define EXSTACK_DEFAULT true + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_HWCAP2 1 +#define HAVE_ELF_PLATFORM 1 +#define HAVE_ELF_CORE_DUMP 1 +#define HAVE_VDSO_IMAGE_INFO 1 + +#define HI_COMMPAGE ((intptr_t)0xffff0f00u) + +/* + * See linux kernel: arch/arm/include/asm/elf.h, where + * elf_gregset_t is mapped to struct pt_regs via sizeof. + */ +typedef struct target_elf_gregset_t { + struct target_pt_regs pt; +} target_elf_gregset_t; + #endif diff --git a/linux-user/arm/target_proc.h b/linux-user/arm/target_proc.h index ac75af9ca6498..a28d7231cdcfa 100644 --- a/linux-user/arm/target_proc.h +++ b/linux-user/arm/target_proc.h @@ -6,12 +6,14 @@ #ifndef ARM_TARGET_PROC_H #define ARM_TARGET_PROC_H +#include "target/arm/cpu-features.h" /* for MIDR_EL1 field definitions */ + static int open_cpuinfo(CPUArchState *cpu_env, int fd) { ARMCPU *cpu = env_archcpu(cpu_env); int arch, midr_rev, midr_part, midr_var, midr_impl; - target_ulong elf_hwcap = get_elf_hwcap(); - target_ulong elf_hwcap2 = get_elf_hwcap2(); + target_ulong elf_hwcap = get_elf_hwcap(env_cpu(cpu_env)); + target_ulong elf_hwcap2 = get_elf_hwcap2(env_cpu(cpu_env)); const char *elf_name; int num_cpus, len_part, len_var; diff --git a/linux-user/arm/target_ptrace.h b/linux-user/arm/target_ptrace.h new file mode 100644 index 0000000000000..1610b8e03c193 --- /dev/null +++ b/linux-user/arm/target_ptrace.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef ARM_TARGET_PTRACE_H +#define ARM_TARGET_PTRACE_H + +/* + * See arch/arm/include/uapi/asm/ptrace.h. + * Instead of an array and ARM_xx defines, use proper fields. + */ +struct target_pt_regs { + abi_ulong regs[16]; + abi_ulong cpsr; + abi_ulong orig_r0; +}; + +#endif /* ARM_TARGET_PTRACE_H */ diff --git a/linux-user/arm/target_syscall.h b/linux-user/arm/target_syscall.h index 412ad434cfc20..8c4ddba717737 100644 --- a/linux-user/arm/target_syscall.h +++ b/linux-user/arm/target_syscall.h @@ -1,14 +1,6 @@ #ifndef ARM_TARGET_SYSCALL_H #define ARM_TARGET_SYSCALL_H -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -/* uregs[0..15] are r0 to r15; uregs[16] is CPSR; uregs[17] is ORIG_r0 */ -struct target_pt_regs { - abi_long uregs[18]; -}; - #define ARM_SYSCALL_BASE 0x900000 #define ARM_THUMB_SYSCALL 0 diff --git a/linux-user/arm/vdso-be32.so b/linux-user/arm/vdso-be32.so index b896d3d545ebf..6d71cd9c36d18 100755 Binary files a/linux-user/arm/vdso-be32.so and b/linux-user/arm/vdso-be32.so differ diff --git a/linux-user/arm/vdso-be8.so b/linux-user/arm/vdso-be8.so index 784b7bdb2a930..6446a96fcf637 100755 Binary files a/linux-user/arm/vdso-be8.so and b/linux-user/arm/vdso-be8.so differ diff --git a/linux-user/arm/vdso-le.so b/linux-user/arm/vdso-le.so index 38d3d51047372..d34e577b69e6c 100755 Binary files a/linux-user/arm/vdso-le.so and b/linux-user/arm/vdso-le.so differ diff --git a/linux-user/arm/vdso.S b/linux-user/arm/vdso.S index b3bb6491dc2eb..d84d964730803 100644 --- a/linux-user/arm/vdso.S +++ b/linux-user/arm/vdso.S @@ -140,6 +140,7 @@ SYSCALL __vdso_gettimeofday, __NR_gettimeofday .balign 16 sigreturn_codes: +sigreturn_region_start: /* [EO]ABI sigreturn */ slot 0 raw_syscall __NR_sigreturn @@ -172,3 +173,4 @@ sigreturn_codes: .balign 16 endf sigreturn_codes +sigreturn_region_end: diff --git a/linux-user/elfload.c b/linux-user/elfload.c index ea214105ff86b..0002d5be2f5f9 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -28,6 +28,7 @@ #include "qemu/lockable.h" #include "qapi/error.h" #include "qemu/error-report.h" +#include "target_elf.h" #include "target_signal.h" #include "tcg/debuginfo.h" @@ -35,2091 +36,98 @@ #include "target/arm/cpu-features.h" #endif -#ifdef _ARCH_PPC64 -#undef ARCH_DLINFO -#undef ELF_PLATFORM -#undef ELF_HWCAP -#undef ELF_HWCAP2 -#undef ELF_CLASS -#undef ELF_DATA -#undef ELF_ARCH -#endif - -#ifndef TARGET_ARCH_HAS_SIGTRAMP_PAGE -#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 -#endif - -typedef struct { - const uint8_t *image; - const uint32_t *relocs; - unsigned image_size; - unsigned reloc_count; - unsigned sigreturn_ofs; - unsigned rt_sigreturn_ofs; -} VdsoImageInfo; - -#define ELF_OSABI ELFOSABI_SYSV - -/* from personality.h */ - -/* - * Flags for bug emulation. - * - * These occupy the top three bytes. - */ -enum { - ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ - FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to - descriptors (signal handling) */ - MMAP_PAGE_ZERO = 0x0100000, - ADDR_COMPAT_LAYOUT = 0x0200000, - READ_IMPLIES_EXEC = 0x0400000, - ADDR_LIMIT_32BIT = 0x0800000, - SHORT_INODE = 0x1000000, - WHOLE_SECONDS = 0x2000000, - STICKY_TIMEOUTS = 0x4000000, - ADDR_LIMIT_3GB = 0x8000000, -}; - -/* - * Personality types. - * - * These go in the low byte. Avoid using the top bit, it will - * conflict with error returns. - */ -enum { - PER_LINUX = 0x0000, - PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, - PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, - PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, - PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, - PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, - PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, - PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, - PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, - PER_BSD = 0x0006, - PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, - PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, - PER_LINUX32 = 0x0008, - PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, - PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ - PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ - PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ - PER_RISCOS = 0x000c, - PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, - PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, - PER_OSF4 = 0x000f, /* OSF/1 v4 */ - PER_HPUX = 0x0010, - PER_MASK = 0x00ff, -}; - -/* - * Return the base personality without flags. - */ -#define personality(pers) (pers & PER_MASK) - -int info_is_fdpic(struct image_info *info) -{ - return info->personality == PER_LINUX_FDPIC; -} - -/* this flag is uneffective under linux too, should be deleted */ -#ifndef MAP_DENYWRITE -#define MAP_DENYWRITE 0 -#endif - -/* should probably go in elf.h */ -#ifndef ELIBBAD -#define ELIBBAD 80 -#endif - -#if TARGET_BIG_ENDIAN -#define ELF_DATA ELFDATA2MSB -#else -#define ELF_DATA ELFDATA2LSB -#endif - -#ifdef TARGET_ABI_MIPSN32 -typedef abi_ullong target_elf_greg_t; -#define tswapreg(ptr) tswap64(ptr) -#else -typedef abi_ulong target_elf_greg_t; -#define tswapreg(ptr) tswapal(ptr) -#endif - -#ifdef USE_UID16 -typedef abi_ushort target_uid_t; -typedef abi_ushort target_gid_t; -#else -typedef abi_uint target_uid_t; -typedef abi_uint target_gid_t; -#endif -typedef abi_int target_pid_t; - -#ifdef TARGET_I386 - -#define ELF_HWCAP get_elf_hwcap() - -static uint32_t get_elf_hwcap(void) -{ - X86CPU *cpu = X86_CPU(thread_cpu); - - return cpu->env.features[FEAT_1_EDX]; -} - -#ifdef TARGET_X86_64 -#define ELF_CLASS ELFCLASS64 -#define ELF_ARCH EM_X86_64 - -#define ELF_PLATFORM "x86_64" - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - regs->rax = 0; - regs->rsp = infop->start_stack; - regs->rip = infop->entry; -} - -#define ELF_NREG 27 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -/* - * Note that ELF_NREG should be 29 as there should be place for - * TRAPNO and ERR "registers" as well but linux doesn't dump - * those. - * - * See linux kernel: arch/x86/include/asm/elf.h - */ -static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) -{ - (*regs)[0] = tswapreg(env->regs[15]); - (*regs)[1] = tswapreg(env->regs[14]); - (*regs)[2] = tswapreg(env->regs[13]); - (*regs)[3] = tswapreg(env->regs[12]); - (*regs)[4] = tswapreg(env->regs[R_EBP]); - (*regs)[5] = tswapreg(env->regs[R_EBX]); - (*regs)[6] = tswapreg(env->regs[11]); - (*regs)[7] = tswapreg(env->regs[10]); - (*regs)[8] = tswapreg(env->regs[9]); - (*regs)[9] = tswapreg(env->regs[8]); - (*regs)[10] = tswapreg(env->regs[R_EAX]); - (*regs)[11] = tswapreg(env->regs[R_ECX]); - (*regs)[12] = tswapreg(env->regs[R_EDX]); - (*regs)[13] = tswapreg(env->regs[R_ESI]); - (*regs)[14] = tswapreg(env->regs[R_EDI]); - (*regs)[15] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax); - (*regs)[16] = tswapreg(env->eip); - (*regs)[17] = tswapreg(env->segs[R_CS].selector & 0xffff); - (*regs)[18] = tswapreg(env->eflags); - (*regs)[19] = tswapreg(env->regs[R_ESP]); - (*regs)[20] = tswapreg(env->segs[R_SS].selector & 0xffff); - (*regs)[21] = tswapreg(env->segs[R_FS].selector & 0xffff); - (*regs)[22] = tswapreg(env->segs[R_GS].selector & 0xffff); - (*regs)[23] = tswapreg(env->segs[R_DS].selector & 0xffff); - (*regs)[24] = tswapreg(env->segs[R_ES].selector & 0xffff); - (*regs)[25] = tswapreg(env->segs[R_FS].selector & 0xffff); - (*regs)[26] = tswapreg(env->segs[R_GS].selector & 0xffff); -} - -#if ULONG_MAX > UINT32_MAX -#define INIT_GUEST_COMMPAGE -static bool init_guest_commpage(void) -{ - /* - * The vsyscall page is at a high negative address aka kernel space, - * which means that we cannot actually allocate it with target_mmap. - * We still should be able to use page_set_flags, unless the user - * has specified -R reserved_va, which would trigger an assert(). - */ - if (reserved_va != 0 && - TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { - error_report("Cannot allocate vsyscall page"); - exit(EXIT_FAILURE); - } - page_set_flags(TARGET_VSYSCALL_PAGE, - TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, - PAGE_EXEC | PAGE_VALID); - return true; -} -#endif -#else - -/* - * This is used to ensure we don't load something for the wrong architecture. - */ -#define elf_check_arch(x) ( ((x) == EM_386) || ((x) == EM_486) ) - -/* - * These are used to set parameters in the core dumps. - */ -#define ELF_CLASS ELFCLASS32 -#define ELF_ARCH EM_386 - -#define ELF_PLATFORM get_elf_platform() -#define EXSTACK_DEFAULT true - -static const char *get_elf_platform(void) -{ - static char elf_platform[] = "i386"; - int family = object_property_get_int(OBJECT(thread_cpu), "family", NULL); - if (family > 6) { - family = 6; - } - if (family >= 3) { - elf_platform[1] = '0' + family; - } - return elf_platform; -} - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->esp = infop->start_stack; - regs->eip = infop->entry; - - /* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program - starts %edx contains a pointer to a function which might be - registered using `atexit'. This provides a mean for the - dynamic linker to call DT_FINI functions for shared libraries - that have been loaded before the code runs. - - A value of 0 tells we have no such handler. */ - regs->edx = 0; -} - -#define ELF_NREG 17 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -/* - * Note that ELF_NREG should be 19 as there should be place for - * TRAPNO and ERR "registers" as well but linux doesn't dump - * those. - * - * See linux kernel: arch/x86/include/asm/elf.h - */ -static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUX86State *env) -{ - (*regs)[0] = tswapreg(env->regs[R_EBX]); - (*regs)[1] = tswapreg(env->regs[R_ECX]); - (*regs)[2] = tswapreg(env->regs[R_EDX]); - (*regs)[3] = tswapreg(env->regs[R_ESI]); - (*regs)[4] = tswapreg(env->regs[R_EDI]); - (*regs)[5] = tswapreg(env->regs[R_EBP]); - (*regs)[6] = tswapreg(env->regs[R_EAX]); - (*regs)[7] = tswapreg(env->segs[R_DS].selector & 0xffff); - (*regs)[8] = tswapreg(env->segs[R_ES].selector & 0xffff); - (*regs)[9] = tswapreg(env->segs[R_FS].selector & 0xffff); - (*regs)[10] = tswapreg(env->segs[R_GS].selector & 0xffff); - (*regs)[11] = tswapreg(get_task_state(env_cpu_const(env))->orig_ax); - (*regs)[12] = tswapreg(env->eip); - (*regs)[13] = tswapreg(env->segs[R_CS].selector & 0xffff); - (*regs)[14] = tswapreg(env->eflags); - (*regs)[15] = tswapreg(env->regs[R_ESP]); - (*regs)[16] = tswapreg(env->segs[R_SS].selector & 0xffff); -} - -/* - * i386 is the only target which supplies AT_SYSINFO for the vdso. - * All others only supply AT_SYSINFO_EHDR. - */ -#define DLINFO_ARCH_ITEMS (vdso_info != NULL) -#define ARCH_DLINFO \ - do { \ - if (vdso_info) { \ - NEW_AUX_ENT(AT_SYSINFO, vdso_info->entry); \ - } \ - } while (0) - -#endif /* TARGET_X86_64 */ - -#define VDSO_HEADER "vdso.c.inc" - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -#endif /* TARGET_I386 */ - -#ifdef TARGET_ARM - -#ifndef TARGET_AARCH64 -/* 32 bit ARM definitions */ - -#define ELF_ARCH EM_ARM -#define ELF_CLASS ELFCLASS32 -#define EXSTACK_DEFAULT true - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - abi_long stack = infop->start_stack; - memset(regs, 0, sizeof(*regs)); - - regs->uregs[16] = ARM_CPU_MODE_USR; - if (infop->entry & 1) { - regs->uregs[16] |= CPSR_T; - } - regs->uregs[15] = infop->entry & 0xfffffffe; - regs->uregs[13] = infop->start_stack; - /* FIXME - what to for failure of get_user()? */ - get_user_ual(regs->uregs[2], stack + 8); /* envp */ - get_user_ual(regs->uregs[1], stack + 4); /* envp */ - /* XXX: it seems that r0 is zeroed after ! */ - regs->uregs[0] = 0; - /* For uClinux PIC binaries. */ - /* XXX: Linux does this only on ARM with no MMU (do we care ?) */ - regs->uregs[10] = infop->start_data; - - /* Support ARM FDPIC. */ - if (info_is_fdpic(infop)) { - /* As described in the ABI document, r7 points to the loadmap info - * prepared by the kernel. If an interpreter is needed, r8 points - * to the interpreter loadmap and r9 points to the interpreter - * PT_DYNAMIC info. If no interpreter is needed, r8 is zero, and - * r9 points to the main program PT_DYNAMIC info. - */ - regs->uregs[7] = infop->loadmap_addr; - if (infop->interpreter_loadmap_addr) { - /* Executable is dynamically loaded. */ - regs->uregs[8] = infop->interpreter_loadmap_addr; - regs->uregs[9] = infop->interpreter_pt_dynamic_addr; - } else { - regs->uregs[8] = 0; - regs->uregs[9] = infop->pt_dynamic_addr; - } - } -} - -#define ELF_NREG 18 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUARMState *env) -{ - (*regs)[0] = tswapreg(env->regs[0]); - (*regs)[1] = tswapreg(env->regs[1]); - (*regs)[2] = tswapreg(env->regs[2]); - (*regs)[3] = tswapreg(env->regs[3]); - (*regs)[4] = tswapreg(env->regs[4]); - (*regs)[5] = tswapreg(env->regs[5]); - (*regs)[6] = tswapreg(env->regs[6]); - (*regs)[7] = tswapreg(env->regs[7]); - (*regs)[8] = tswapreg(env->regs[8]); - (*regs)[9] = tswapreg(env->regs[9]); - (*regs)[10] = tswapreg(env->regs[10]); - (*regs)[11] = tswapreg(env->regs[11]); - (*regs)[12] = tswapreg(env->regs[12]); - (*regs)[13] = tswapreg(env->regs[13]); - (*regs)[14] = tswapreg(env->regs[14]); - (*regs)[15] = tswapreg(env->regs[15]); - - (*regs)[16] = tswapreg(cpsr_read((CPUARMState *)env)); - (*regs)[17] = tswapreg(env->regs[0]); /* XXX */ -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -enum -{ - ARM_HWCAP_ARM_SWP = 1 << 0, - ARM_HWCAP_ARM_HALF = 1 << 1, - ARM_HWCAP_ARM_THUMB = 1 << 2, - ARM_HWCAP_ARM_26BIT = 1 << 3, - ARM_HWCAP_ARM_FAST_MULT = 1 << 4, - ARM_HWCAP_ARM_FPA = 1 << 5, - ARM_HWCAP_ARM_VFP = 1 << 6, - ARM_HWCAP_ARM_EDSP = 1 << 7, - ARM_HWCAP_ARM_JAVA = 1 << 8, - ARM_HWCAP_ARM_IWMMXT = 1 << 9, - ARM_HWCAP_ARM_CRUNCH = 1 << 10, - ARM_HWCAP_ARM_THUMBEE = 1 << 11, - ARM_HWCAP_ARM_NEON = 1 << 12, - ARM_HWCAP_ARM_VFPv3 = 1 << 13, - ARM_HWCAP_ARM_VFPv3D16 = 1 << 14, - ARM_HWCAP_ARM_TLS = 1 << 15, - ARM_HWCAP_ARM_VFPv4 = 1 << 16, - ARM_HWCAP_ARM_IDIVA = 1 << 17, - ARM_HWCAP_ARM_IDIVT = 1 << 18, - ARM_HWCAP_ARM_VFPD32 = 1 << 19, - ARM_HWCAP_ARM_LPAE = 1 << 20, - ARM_HWCAP_ARM_EVTSTRM = 1 << 21, - ARM_HWCAP_ARM_FPHP = 1 << 22, - ARM_HWCAP_ARM_ASIMDHP = 1 << 23, - ARM_HWCAP_ARM_ASIMDDP = 1 << 24, - ARM_HWCAP_ARM_ASIMDFHM = 1 << 25, - ARM_HWCAP_ARM_ASIMDBF16 = 1 << 26, - ARM_HWCAP_ARM_I8MM = 1 << 27, -}; - -enum { - ARM_HWCAP2_ARM_AES = 1 << 0, - ARM_HWCAP2_ARM_PMULL = 1 << 1, - ARM_HWCAP2_ARM_SHA1 = 1 << 2, - ARM_HWCAP2_ARM_SHA2 = 1 << 3, - ARM_HWCAP2_ARM_CRC32 = 1 << 4, - ARM_HWCAP2_ARM_SB = 1 << 5, - ARM_HWCAP2_ARM_SSBS = 1 << 6, -}; - -/* The commpage only exists for 32 bit kernels */ - -#define HI_COMMPAGE (intptr_t)0xffff0f00u - -static bool init_guest_commpage(void) -{ - ARMCPU *cpu = ARM_CPU(thread_cpu); - int host_page_size = qemu_real_host_page_size(); - abi_ptr commpage; - void *want; - void *addr; - - /* - * M-profile allocates maximum of 2GB address space, so can never - * allocate the commpage. Skip it. - */ - if (arm_feature(&cpu->env, ARM_FEATURE_M)) { - return true; - } - - commpage = HI_COMMPAGE & -host_page_size; - want = g2h_untagged(commpage); - addr = mmap(want, host_page_size, PROT_READ | PROT_WRITE, - MAP_ANONYMOUS | MAP_PRIVATE | - (commpage < reserved_va ? MAP_FIXED : MAP_FIXED_NOREPLACE), - -1, 0); - - if (addr == MAP_FAILED) { - perror("Allocating guest commpage"); - exit(EXIT_FAILURE); - } - if (addr != want) { - return false; - } - - /* Set kernel helper versions; rest of page is 0. */ - __put_user(5, (uint32_t *)g2h_untagged(0xffff0ffcu)); - - if (mprotect(addr, host_page_size, PROT_READ)) { - perror("Protecting guest commpage"); - exit(EXIT_FAILURE); - } - - page_set_flags(commpage, commpage | (host_page_size - 1), - PAGE_READ | PAGE_EXEC | PAGE_VALID); - return true; -} - -#define ELF_HWCAP get_elf_hwcap() -#define ELF_HWCAP2 get_elf_hwcap2() - -uint32_t get_elf_hwcap(void) -{ - ARMCPU *cpu = ARM_CPU(thread_cpu); - uint32_t hwcaps = 0; - - hwcaps |= ARM_HWCAP_ARM_SWP; - hwcaps |= ARM_HWCAP_ARM_HALF; - hwcaps |= ARM_HWCAP_ARM_THUMB; - hwcaps |= ARM_HWCAP_ARM_FAST_MULT; - - /* probe for the extra features */ -#define GET_FEATURE(feat, hwcap) \ - do { if (arm_feature(&cpu->env, feat)) { hwcaps |= hwcap; } } while (0) - -#define GET_FEATURE_ID(feat, hwcap) \ - do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) - - /* EDSP is in v5TE and above, but all our v5 CPUs are v5TE */ - GET_FEATURE(ARM_FEATURE_V5, ARM_HWCAP_ARM_EDSP); - GET_FEATURE(ARM_FEATURE_IWMMXT, ARM_HWCAP_ARM_IWMMXT); - GET_FEATURE(ARM_FEATURE_THUMB2EE, ARM_HWCAP_ARM_THUMBEE); - GET_FEATURE(ARM_FEATURE_NEON, ARM_HWCAP_ARM_NEON); - GET_FEATURE(ARM_FEATURE_V6K, ARM_HWCAP_ARM_TLS); - GET_FEATURE(ARM_FEATURE_LPAE, ARM_HWCAP_ARM_LPAE); - GET_FEATURE_ID(aa32_arm_div, ARM_HWCAP_ARM_IDIVA); - GET_FEATURE_ID(aa32_thumb_div, ARM_HWCAP_ARM_IDIVT); - GET_FEATURE_ID(aa32_vfp, ARM_HWCAP_ARM_VFP); - - if (cpu_isar_feature(aa32_fpsp_v3, cpu) || - cpu_isar_feature(aa32_fpdp_v3, cpu)) { - hwcaps |= ARM_HWCAP_ARM_VFPv3; - if (cpu_isar_feature(aa32_simd_r32, cpu)) { - hwcaps |= ARM_HWCAP_ARM_VFPD32; - } else { - hwcaps |= ARM_HWCAP_ARM_VFPv3D16; - } - } - GET_FEATURE_ID(aa32_simdfmac, ARM_HWCAP_ARM_VFPv4); - /* - * MVFR1.FPHP and .SIMDHP must be in sync, and QEMU uses the same - * isar_feature function for both. The kernel reports them as two hwcaps. - */ - GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_FPHP); - GET_FEATURE_ID(aa32_fp16_arith, ARM_HWCAP_ARM_ASIMDHP); - GET_FEATURE_ID(aa32_dp, ARM_HWCAP_ARM_ASIMDDP); - GET_FEATURE_ID(aa32_fhm, ARM_HWCAP_ARM_ASIMDFHM); - GET_FEATURE_ID(aa32_bf16, ARM_HWCAP_ARM_ASIMDBF16); - GET_FEATURE_ID(aa32_i8mm, ARM_HWCAP_ARM_I8MM); - - return hwcaps; -} - -uint64_t get_elf_hwcap2(void) -{ - ARMCPU *cpu = ARM_CPU(thread_cpu); - uint64_t hwcaps = 0; - - GET_FEATURE_ID(aa32_aes, ARM_HWCAP2_ARM_AES); - GET_FEATURE_ID(aa32_pmull, ARM_HWCAP2_ARM_PMULL); - GET_FEATURE_ID(aa32_sha1, ARM_HWCAP2_ARM_SHA1); - GET_FEATURE_ID(aa32_sha2, ARM_HWCAP2_ARM_SHA2); - GET_FEATURE_ID(aa32_crc32, ARM_HWCAP2_ARM_CRC32); - GET_FEATURE_ID(aa32_sb, ARM_HWCAP2_ARM_SB); - GET_FEATURE_ID(aa32_ssbs, ARM_HWCAP2_ARM_SSBS); - return hwcaps; -} - -const char *elf_hwcap_str(uint32_t bit) -{ - static const char *hwcap_str[] = { - [__builtin_ctz(ARM_HWCAP_ARM_SWP )] = "swp", - [__builtin_ctz(ARM_HWCAP_ARM_HALF )] = "half", - [__builtin_ctz(ARM_HWCAP_ARM_THUMB )] = "thumb", - [__builtin_ctz(ARM_HWCAP_ARM_26BIT )] = "26bit", - [__builtin_ctz(ARM_HWCAP_ARM_FAST_MULT)] = "fast_mult", - [__builtin_ctz(ARM_HWCAP_ARM_FPA )] = "fpa", - [__builtin_ctz(ARM_HWCAP_ARM_VFP )] = "vfp", - [__builtin_ctz(ARM_HWCAP_ARM_EDSP )] = "edsp", - [__builtin_ctz(ARM_HWCAP_ARM_JAVA )] = "java", - [__builtin_ctz(ARM_HWCAP_ARM_IWMMXT )] = "iwmmxt", - [__builtin_ctz(ARM_HWCAP_ARM_CRUNCH )] = "crunch", - [__builtin_ctz(ARM_HWCAP_ARM_THUMBEE )] = "thumbee", - [__builtin_ctz(ARM_HWCAP_ARM_NEON )] = "neon", - [__builtin_ctz(ARM_HWCAP_ARM_VFPv3 )] = "vfpv3", - [__builtin_ctz(ARM_HWCAP_ARM_VFPv3D16 )] = "vfpv3d16", - [__builtin_ctz(ARM_HWCAP_ARM_TLS )] = "tls", - [__builtin_ctz(ARM_HWCAP_ARM_VFPv4 )] = "vfpv4", - [__builtin_ctz(ARM_HWCAP_ARM_IDIVA )] = "idiva", - [__builtin_ctz(ARM_HWCAP_ARM_IDIVT )] = "idivt", - [__builtin_ctz(ARM_HWCAP_ARM_VFPD32 )] = "vfpd32", - [__builtin_ctz(ARM_HWCAP_ARM_LPAE )] = "lpae", - [__builtin_ctz(ARM_HWCAP_ARM_EVTSTRM )] = "evtstrm", - [__builtin_ctz(ARM_HWCAP_ARM_FPHP )] = "fphp", - [__builtin_ctz(ARM_HWCAP_ARM_ASIMDHP )] = "asimdhp", - [__builtin_ctz(ARM_HWCAP_ARM_ASIMDDP )] = "asimddp", - [__builtin_ctz(ARM_HWCAP_ARM_ASIMDFHM )] = "asimdfhm", - [__builtin_ctz(ARM_HWCAP_ARM_ASIMDBF16)] = "asimdbf16", - [__builtin_ctz(ARM_HWCAP_ARM_I8MM )] = "i8mm", - }; - - return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; -} - -const char *elf_hwcap2_str(uint32_t bit) -{ - static const char *hwcap_str[] = { - [__builtin_ctz(ARM_HWCAP2_ARM_AES )] = "aes", - [__builtin_ctz(ARM_HWCAP2_ARM_PMULL)] = "pmull", - [__builtin_ctz(ARM_HWCAP2_ARM_SHA1 )] = "sha1", - [__builtin_ctz(ARM_HWCAP2_ARM_SHA2 )] = "sha2", - [__builtin_ctz(ARM_HWCAP2_ARM_CRC32)] = "crc32", - [__builtin_ctz(ARM_HWCAP2_ARM_SB )] = "sb", - [__builtin_ctz(ARM_HWCAP2_ARM_SSBS )] = "ssbs", - }; - - return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; -} - -#undef GET_FEATURE -#undef GET_FEATURE_ID - -#define ELF_PLATFORM get_elf_platform() - -static const char *get_elf_platform(void) -{ - CPUARMState *env = cpu_env(thread_cpu); - -#if TARGET_BIG_ENDIAN -# define END "b" -#else -# define END "l" -#endif - - if (arm_feature(env, ARM_FEATURE_V8)) { - return "v8" END; - } else if (arm_feature(env, ARM_FEATURE_V7)) { - if (arm_feature(env, ARM_FEATURE_M)) { - return "v7m" END; - } else { - return "v7" END; - } - } else if (arm_feature(env, ARM_FEATURE_V6)) { - return "v6" END; - } else if (arm_feature(env, ARM_FEATURE_V5)) { - return "v5" END; - } else { - return "v4" END; - } - -#undef END -} - -#if TARGET_BIG_ENDIAN -#include "elf.h" -#include "vdso-be8.c.inc" -#include "vdso-be32.c.inc" - -static const VdsoImageInfo *vdso_image_info(uint32_t elf_flags) -{ - return (EF_ARM_EABI_VERSION(elf_flags) >= EF_ARM_EABI_VER4 - && (elf_flags & EF_ARM_BE8) - ? &vdso_be8_image_info - : &vdso_be32_image_info); -} -#define vdso_image_info vdso_image_info -#else -# define VDSO_HEADER "vdso-le.c.inc" -#endif - -#else -/* 64 bit ARM definitions */ - -#define ELF_ARCH EM_AARCH64 -#define ELF_CLASS ELFCLASS64 -#if TARGET_BIG_ENDIAN -# define ELF_PLATFORM "aarch64_be" -#else -# define ELF_PLATFORM "aarch64" -#endif - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - abi_long stack = infop->start_stack; - memset(regs, 0, sizeof(*regs)); - - regs->pc = infop->entry & ~0x3ULL; - regs->sp = stack; -} - -#define ELF_NREG 34 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -static void elf_core_copy_regs(target_elf_gregset_t *regs, - const CPUARMState *env) -{ - int i; - - for (i = 0; i < 32; i++) { - (*regs)[i] = tswapreg(env->xregs[i]); - } - (*regs)[32] = tswapreg(env->pc); - (*regs)[33] = tswapreg(pstate_read((CPUARMState *)env)); -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -enum { - ARM_HWCAP_A64_FP = 1 << 0, - ARM_HWCAP_A64_ASIMD = 1 << 1, - ARM_HWCAP_A64_EVTSTRM = 1 << 2, - ARM_HWCAP_A64_AES = 1 << 3, - ARM_HWCAP_A64_PMULL = 1 << 4, - ARM_HWCAP_A64_SHA1 = 1 << 5, - ARM_HWCAP_A64_SHA2 = 1 << 6, - ARM_HWCAP_A64_CRC32 = 1 << 7, - ARM_HWCAP_A64_ATOMICS = 1 << 8, - ARM_HWCAP_A64_FPHP = 1 << 9, - ARM_HWCAP_A64_ASIMDHP = 1 << 10, - ARM_HWCAP_A64_CPUID = 1 << 11, - ARM_HWCAP_A64_ASIMDRDM = 1 << 12, - ARM_HWCAP_A64_JSCVT = 1 << 13, - ARM_HWCAP_A64_FCMA = 1 << 14, - ARM_HWCAP_A64_LRCPC = 1 << 15, - ARM_HWCAP_A64_DCPOP = 1 << 16, - ARM_HWCAP_A64_SHA3 = 1 << 17, - ARM_HWCAP_A64_SM3 = 1 << 18, - ARM_HWCAP_A64_SM4 = 1 << 19, - ARM_HWCAP_A64_ASIMDDP = 1 << 20, - ARM_HWCAP_A64_SHA512 = 1 << 21, - ARM_HWCAP_A64_SVE = 1 << 22, - ARM_HWCAP_A64_ASIMDFHM = 1 << 23, - ARM_HWCAP_A64_DIT = 1 << 24, - ARM_HWCAP_A64_USCAT = 1 << 25, - ARM_HWCAP_A64_ILRCPC = 1 << 26, - ARM_HWCAP_A64_FLAGM = 1 << 27, - ARM_HWCAP_A64_SSBS = 1 << 28, - ARM_HWCAP_A64_SB = 1 << 29, - ARM_HWCAP_A64_PACA = 1 << 30, - ARM_HWCAP_A64_PACG = 1ULL << 31, - ARM_HWCAP_A64_GCS = 1ULL << 32, - ARM_HWCAP_A64_CMPBR = 1ULL << 33, - ARM_HWCAP_A64_FPRCVT = 1ULL << 34, - ARM_HWCAP_A64_F8MM8 = 1ULL << 35, - ARM_HWCAP_A64_F8MM4 = 1ULL << 36, - ARM_HWCAP_A64_SVE_F16MM = 1ULL << 37, - ARM_HWCAP_A64_SVE_ELTPERM = 1ULL << 38, - ARM_HWCAP_A64_SVE_AES2 = 1ULL << 39, - ARM_HWCAP_A64_SVE_BFSCALE = 1ULL << 40, - ARM_HWCAP_A64_SVE2P2 = 1ULL << 41, - ARM_HWCAP_A64_SME2P2 = 1ULL << 42, - ARM_HWCAP_A64_SME_SBITPERM = 1ULL << 43, - ARM_HWCAP_A64_SME_AES = 1ULL << 44, - ARM_HWCAP_A64_SME_SFEXPA = 1ULL << 45, - ARM_HWCAP_A64_SME_STMOP = 1ULL << 46, - ARM_HWCAP_A64_SME_SMOP4 = 1ULL << 47, - - ARM_HWCAP2_A64_DCPODP = 1 << 0, - ARM_HWCAP2_A64_SVE2 = 1 << 1, - ARM_HWCAP2_A64_SVEAES = 1 << 2, - ARM_HWCAP2_A64_SVEPMULL = 1 << 3, - ARM_HWCAP2_A64_SVEBITPERM = 1 << 4, - ARM_HWCAP2_A64_SVESHA3 = 1 << 5, - ARM_HWCAP2_A64_SVESM4 = 1 << 6, - ARM_HWCAP2_A64_FLAGM2 = 1 << 7, - ARM_HWCAP2_A64_FRINT = 1 << 8, - ARM_HWCAP2_A64_SVEI8MM = 1 << 9, - ARM_HWCAP2_A64_SVEF32MM = 1 << 10, - ARM_HWCAP2_A64_SVEF64MM = 1 << 11, - ARM_HWCAP2_A64_SVEBF16 = 1 << 12, - ARM_HWCAP2_A64_I8MM = 1 << 13, - ARM_HWCAP2_A64_BF16 = 1 << 14, - ARM_HWCAP2_A64_DGH = 1 << 15, - ARM_HWCAP2_A64_RNG = 1 << 16, - ARM_HWCAP2_A64_BTI = 1 << 17, - ARM_HWCAP2_A64_MTE = 1 << 18, - ARM_HWCAP2_A64_ECV = 1 << 19, - ARM_HWCAP2_A64_AFP = 1 << 20, - ARM_HWCAP2_A64_RPRES = 1 << 21, - ARM_HWCAP2_A64_MTE3 = 1 << 22, - ARM_HWCAP2_A64_SME = 1 << 23, - ARM_HWCAP2_A64_SME_I16I64 = 1 << 24, - ARM_HWCAP2_A64_SME_F64F64 = 1 << 25, - ARM_HWCAP2_A64_SME_I8I32 = 1 << 26, - ARM_HWCAP2_A64_SME_F16F32 = 1 << 27, - ARM_HWCAP2_A64_SME_B16F32 = 1 << 28, - ARM_HWCAP2_A64_SME_F32F32 = 1 << 29, - ARM_HWCAP2_A64_SME_FA64 = 1 << 30, - ARM_HWCAP2_A64_WFXT = 1ULL << 31, - ARM_HWCAP2_A64_EBF16 = 1ULL << 32, - ARM_HWCAP2_A64_SVE_EBF16 = 1ULL << 33, - ARM_HWCAP2_A64_CSSC = 1ULL << 34, - ARM_HWCAP2_A64_RPRFM = 1ULL << 35, - ARM_HWCAP2_A64_SVE2P1 = 1ULL << 36, - ARM_HWCAP2_A64_SME2 = 1ULL << 37, - ARM_HWCAP2_A64_SME2P1 = 1ULL << 38, - ARM_HWCAP2_A64_SME_I16I32 = 1ULL << 39, - ARM_HWCAP2_A64_SME_BI32I32 = 1ULL << 40, - ARM_HWCAP2_A64_SME_B16B16 = 1ULL << 41, - ARM_HWCAP2_A64_SME_F16F16 = 1ULL << 42, - ARM_HWCAP2_A64_MOPS = 1ULL << 43, - ARM_HWCAP2_A64_HBC = 1ULL << 44, - ARM_HWCAP2_A64_SVE_B16B16 = 1ULL << 45, - ARM_HWCAP2_A64_LRCPC3 = 1ULL << 46, - ARM_HWCAP2_A64_LSE128 = 1ULL << 47, - ARM_HWCAP2_A64_FPMR = 1ULL << 48, - ARM_HWCAP2_A64_LUT = 1ULL << 49, - ARM_HWCAP2_A64_FAMINMAX = 1ULL << 50, - ARM_HWCAP2_A64_F8CVT = 1ULL << 51, - ARM_HWCAP2_A64_F8FMA = 1ULL << 52, - ARM_HWCAP2_A64_F8DP4 = 1ULL << 53, - ARM_HWCAP2_A64_F8DP2 = 1ULL << 54, - ARM_HWCAP2_A64_F8E4M3 = 1ULL << 55, - ARM_HWCAP2_A64_F8E5M2 = 1ULL << 56, - ARM_HWCAP2_A64_SME_LUTV2 = 1ULL << 57, - ARM_HWCAP2_A64_SME_F8F16 = 1ULL << 58, - ARM_HWCAP2_A64_SME_F8F32 = 1ULL << 59, - ARM_HWCAP2_A64_SME_SF8FMA = 1ULL << 60, - ARM_HWCAP2_A64_SME_SF8DP4 = 1ULL << 61, - ARM_HWCAP2_A64_SME_SF8DP2 = 1ULL << 62, - ARM_HWCAP2_A64_POE = 1ULL << 63, -}; - -#define ELF_HWCAP get_elf_hwcap() -#define ELF_HWCAP2 get_elf_hwcap2() - -#define GET_FEATURE_ID(feat, hwcap) \ - do { if (cpu_isar_feature(feat, cpu)) { hwcaps |= hwcap; } } while (0) - -uint32_t get_elf_hwcap(void) -{ - ARMCPU *cpu = ARM_CPU(thread_cpu); - uint32_t hwcaps = 0; - - hwcaps |= ARM_HWCAP_A64_FP; - hwcaps |= ARM_HWCAP_A64_ASIMD; - hwcaps |= ARM_HWCAP_A64_CPUID; - - /* probe for the extra features */ - - GET_FEATURE_ID(aa64_aes, ARM_HWCAP_A64_AES); - GET_FEATURE_ID(aa64_pmull, ARM_HWCAP_A64_PMULL); - GET_FEATURE_ID(aa64_sha1, ARM_HWCAP_A64_SHA1); - GET_FEATURE_ID(aa64_sha256, ARM_HWCAP_A64_SHA2); - GET_FEATURE_ID(aa64_sha512, ARM_HWCAP_A64_SHA512); - GET_FEATURE_ID(aa64_crc32, ARM_HWCAP_A64_CRC32); - GET_FEATURE_ID(aa64_sha3, ARM_HWCAP_A64_SHA3); - GET_FEATURE_ID(aa64_sm3, ARM_HWCAP_A64_SM3); - GET_FEATURE_ID(aa64_sm4, ARM_HWCAP_A64_SM4); - GET_FEATURE_ID(aa64_fp16, ARM_HWCAP_A64_FPHP | ARM_HWCAP_A64_ASIMDHP); - GET_FEATURE_ID(aa64_atomics, ARM_HWCAP_A64_ATOMICS); - GET_FEATURE_ID(aa64_lse2, ARM_HWCAP_A64_USCAT); - GET_FEATURE_ID(aa64_rdm, ARM_HWCAP_A64_ASIMDRDM); - GET_FEATURE_ID(aa64_dp, ARM_HWCAP_A64_ASIMDDP); - GET_FEATURE_ID(aa64_fcma, ARM_HWCAP_A64_FCMA); - GET_FEATURE_ID(aa64_sve, ARM_HWCAP_A64_SVE); - GET_FEATURE_ID(aa64_pauth, ARM_HWCAP_A64_PACA | ARM_HWCAP_A64_PACG); - GET_FEATURE_ID(aa64_fhm, ARM_HWCAP_A64_ASIMDFHM); - GET_FEATURE_ID(aa64_dit, ARM_HWCAP_A64_DIT); - GET_FEATURE_ID(aa64_jscvt, ARM_HWCAP_A64_JSCVT); - GET_FEATURE_ID(aa64_sb, ARM_HWCAP_A64_SB); - GET_FEATURE_ID(aa64_condm_4, ARM_HWCAP_A64_FLAGM); - GET_FEATURE_ID(aa64_dcpop, ARM_HWCAP_A64_DCPOP); - GET_FEATURE_ID(aa64_rcpc_8_3, ARM_HWCAP_A64_LRCPC); - GET_FEATURE_ID(aa64_rcpc_8_4, ARM_HWCAP_A64_ILRCPC); - - return hwcaps; -} - -uint64_t get_elf_hwcap2(void) -{ - ARMCPU *cpu = ARM_CPU(thread_cpu); - uint64_t hwcaps = 0; - - GET_FEATURE_ID(aa64_dcpodp, ARM_HWCAP2_A64_DCPODP); - GET_FEATURE_ID(aa64_sve2, ARM_HWCAP2_A64_SVE2); - GET_FEATURE_ID(aa64_sve2_aes, ARM_HWCAP2_A64_SVEAES); - GET_FEATURE_ID(aa64_sve2_pmull128, ARM_HWCAP2_A64_SVEPMULL); - GET_FEATURE_ID(aa64_sve2_bitperm, ARM_HWCAP2_A64_SVEBITPERM); - GET_FEATURE_ID(aa64_sve2_sha3, ARM_HWCAP2_A64_SVESHA3); - GET_FEATURE_ID(aa64_sve2_sm4, ARM_HWCAP2_A64_SVESM4); - GET_FEATURE_ID(aa64_condm_5, ARM_HWCAP2_A64_FLAGM2); - GET_FEATURE_ID(aa64_frint, ARM_HWCAP2_A64_FRINT); - GET_FEATURE_ID(aa64_sve_i8mm, ARM_HWCAP2_A64_SVEI8MM); - GET_FEATURE_ID(aa64_sve_f32mm, ARM_HWCAP2_A64_SVEF32MM); - GET_FEATURE_ID(aa64_sve_f64mm, ARM_HWCAP2_A64_SVEF64MM); - GET_FEATURE_ID(aa64_sve_bf16, ARM_HWCAP2_A64_SVEBF16); - GET_FEATURE_ID(aa64_i8mm, ARM_HWCAP2_A64_I8MM); - GET_FEATURE_ID(aa64_bf16, ARM_HWCAP2_A64_BF16); - GET_FEATURE_ID(aa64_rndr, ARM_HWCAP2_A64_RNG); - GET_FEATURE_ID(aa64_bti, ARM_HWCAP2_A64_BTI); - GET_FEATURE_ID(aa64_mte, ARM_HWCAP2_A64_MTE); - GET_FEATURE_ID(aa64_mte3, ARM_HWCAP2_A64_MTE3); - GET_FEATURE_ID(aa64_sme, (ARM_HWCAP2_A64_SME | - ARM_HWCAP2_A64_SME_F32F32 | - ARM_HWCAP2_A64_SME_B16F32 | - ARM_HWCAP2_A64_SME_F16F32 | - ARM_HWCAP2_A64_SME_I8I32)); - GET_FEATURE_ID(aa64_sme_f64f64, ARM_HWCAP2_A64_SME_F64F64); - GET_FEATURE_ID(aa64_sme_i16i64, ARM_HWCAP2_A64_SME_I16I64); - GET_FEATURE_ID(aa64_sme_fa64, ARM_HWCAP2_A64_SME_FA64); - GET_FEATURE_ID(aa64_hbc, ARM_HWCAP2_A64_HBC); - GET_FEATURE_ID(aa64_mops, ARM_HWCAP2_A64_MOPS); - GET_FEATURE_ID(aa64_sve2p1, ARM_HWCAP2_A64_SVE2P1); - GET_FEATURE_ID(aa64_sme2, (ARM_HWCAP2_A64_SME2 | - ARM_HWCAP2_A64_SME_I16I32 | - ARM_HWCAP2_A64_SME_BI32I32)); - GET_FEATURE_ID(aa64_sme2p1, ARM_HWCAP2_A64_SME2P1); - GET_FEATURE_ID(aa64_sme_b16b16, ARM_HWCAP2_A64_SME_B16B16); - GET_FEATURE_ID(aa64_sme_f16f16, ARM_HWCAP2_A64_SME_F16F16); - GET_FEATURE_ID(aa64_sve_b16b16, ARM_HWCAP2_A64_SVE_B16B16); - - return hwcaps; -} - -const char *elf_hwcap_str(uint32_t bit) -{ - static const char * const hwcap_str[] = { - [__builtin_ctz(ARM_HWCAP_A64_FP )] = "fp", - [__builtin_ctz(ARM_HWCAP_A64_ASIMD )] = "asimd", - [__builtin_ctz(ARM_HWCAP_A64_EVTSTRM )] = "evtstrm", - [__builtin_ctz(ARM_HWCAP_A64_AES )] = "aes", - [__builtin_ctz(ARM_HWCAP_A64_PMULL )] = "pmull", - [__builtin_ctz(ARM_HWCAP_A64_SHA1 )] = "sha1", - [__builtin_ctz(ARM_HWCAP_A64_SHA2 )] = "sha2", - [__builtin_ctz(ARM_HWCAP_A64_CRC32 )] = "crc32", - [__builtin_ctz(ARM_HWCAP_A64_ATOMICS )] = "atomics", - [__builtin_ctz(ARM_HWCAP_A64_FPHP )] = "fphp", - [__builtin_ctz(ARM_HWCAP_A64_ASIMDHP )] = "asimdhp", - [__builtin_ctz(ARM_HWCAP_A64_CPUID )] = "cpuid", - [__builtin_ctz(ARM_HWCAP_A64_ASIMDRDM)] = "asimdrdm", - [__builtin_ctz(ARM_HWCAP_A64_JSCVT )] = "jscvt", - [__builtin_ctz(ARM_HWCAP_A64_FCMA )] = "fcma", - [__builtin_ctz(ARM_HWCAP_A64_LRCPC )] = "lrcpc", - [__builtin_ctz(ARM_HWCAP_A64_DCPOP )] = "dcpop", - [__builtin_ctz(ARM_HWCAP_A64_SHA3 )] = "sha3", - [__builtin_ctz(ARM_HWCAP_A64_SM3 )] = "sm3", - [__builtin_ctz(ARM_HWCAP_A64_SM4 )] = "sm4", - [__builtin_ctz(ARM_HWCAP_A64_ASIMDDP )] = "asimddp", - [__builtin_ctz(ARM_HWCAP_A64_SHA512 )] = "sha512", - [__builtin_ctz(ARM_HWCAP_A64_SVE )] = "sve", - [__builtin_ctz(ARM_HWCAP_A64_ASIMDFHM)] = "asimdfhm", - [__builtin_ctz(ARM_HWCAP_A64_DIT )] = "dit", - [__builtin_ctz(ARM_HWCAP_A64_USCAT )] = "uscat", - [__builtin_ctz(ARM_HWCAP_A64_ILRCPC )] = "ilrcpc", - [__builtin_ctz(ARM_HWCAP_A64_FLAGM )] = "flagm", - [__builtin_ctz(ARM_HWCAP_A64_SSBS )] = "ssbs", - [__builtin_ctz(ARM_HWCAP_A64_SB )] = "sb", - [__builtin_ctz(ARM_HWCAP_A64_PACA )] = "paca", - [__builtin_ctz(ARM_HWCAP_A64_PACG )] = "pacg", - [__builtin_ctzll(ARM_HWCAP_A64_GCS )] = "gcs", - [__builtin_ctzll(ARM_HWCAP_A64_CMPBR )] = "cmpbr", - [__builtin_ctzll(ARM_HWCAP_A64_FPRCVT)] = "fprcvt", - [__builtin_ctzll(ARM_HWCAP_A64_F8MM8 )] = "f8mm8", - [__builtin_ctzll(ARM_HWCAP_A64_F8MM4 )] = "f8mm4", - [__builtin_ctzll(ARM_HWCAP_A64_SVE_F16MM)] = "svef16mm", - [__builtin_ctzll(ARM_HWCAP_A64_SVE_ELTPERM)] = "sveeltperm", - [__builtin_ctzll(ARM_HWCAP_A64_SVE_AES2)] = "sveaes2", - [__builtin_ctzll(ARM_HWCAP_A64_SVE_BFSCALE)] = "svebfscale", - [__builtin_ctzll(ARM_HWCAP_A64_SVE2P2)] = "sve2p2", - [__builtin_ctzll(ARM_HWCAP_A64_SME2P2)] = "sme2p2", - [__builtin_ctzll(ARM_HWCAP_A64_SME_SBITPERM)] = "smesbitperm", - [__builtin_ctzll(ARM_HWCAP_A64_SME_AES)] = "smeaes", - [__builtin_ctzll(ARM_HWCAP_A64_SME_SFEXPA)] = "smesfexpa", - [__builtin_ctzll(ARM_HWCAP_A64_SME_STMOP)] = "smestmop", - [__builtin_ctzll(ARM_HWCAP_A64_SME_SMOP4)] = "smesmop4", - }; - - return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; -} - -const char *elf_hwcap2_str(uint32_t bit) -{ - static const char * const hwcap_str[] = { - [__builtin_ctz(ARM_HWCAP2_A64_DCPODP )] = "dcpodp", - [__builtin_ctz(ARM_HWCAP2_A64_SVE2 )] = "sve2", - [__builtin_ctz(ARM_HWCAP2_A64_SVEAES )] = "sveaes", - [__builtin_ctz(ARM_HWCAP2_A64_SVEPMULL )] = "svepmull", - [__builtin_ctz(ARM_HWCAP2_A64_SVEBITPERM )] = "svebitperm", - [__builtin_ctz(ARM_HWCAP2_A64_SVESHA3 )] = "svesha3", - [__builtin_ctz(ARM_HWCAP2_A64_SVESM4 )] = "svesm4", - [__builtin_ctz(ARM_HWCAP2_A64_FLAGM2 )] = "flagm2", - [__builtin_ctz(ARM_HWCAP2_A64_FRINT )] = "frint", - [__builtin_ctz(ARM_HWCAP2_A64_SVEI8MM )] = "svei8mm", - [__builtin_ctz(ARM_HWCAP2_A64_SVEF32MM )] = "svef32mm", - [__builtin_ctz(ARM_HWCAP2_A64_SVEF64MM )] = "svef64mm", - [__builtin_ctz(ARM_HWCAP2_A64_SVEBF16 )] = "svebf16", - [__builtin_ctz(ARM_HWCAP2_A64_I8MM )] = "i8mm", - [__builtin_ctz(ARM_HWCAP2_A64_BF16 )] = "bf16", - [__builtin_ctz(ARM_HWCAP2_A64_DGH )] = "dgh", - [__builtin_ctz(ARM_HWCAP2_A64_RNG )] = "rng", - [__builtin_ctz(ARM_HWCAP2_A64_BTI )] = "bti", - [__builtin_ctz(ARM_HWCAP2_A64_MTE )] = "mte", - [__builtin_ctz(ARM_HWCAP2_A64_ECV )] = "ecv", - [__builtin_ctz(ARM_HWCAP2_A64_AFP )] = "afp", - [__builtin_ctz(ARM_HWCAP2_A64_RPRES )] = "rpres", - [__builtin_ctz(ARM_HWCAP2_A64_MTE3 )] = "mte3", - [__builtin_ctz(ARM_HWCAP2_A64_SME )] = "sme", - [__builtin_ctz(ARM_HWCAP2_A64_SME_I16I64 )] = "smei16i64", - [__builtin_ctz(ARM_HWCAP2_A64_SME_F64F64 )] = "smef64f64", - [__builtin_ctz(ARM_HWCAP2_A64_SME_I8I32 )] = "smei8i32", - [__builtin_ctz(ARM_HWCAP2_A64_SME_F16F32 )] = "smef16f32", - [__builtin_ctz(ARM_HWCAP2_A64_SME_B16F32 )] = "smeb16f32", - [__builtin_ctz(ARM_HWCAP2_A64_SME_F32F32 )] = "smef32f32", - [__builtin_ctz(ARM_HWCAP2_A64_SME_FA64 )] = "smefa64", - [__builtin_ctz(ARM_HWCAP2_A64_WFXT )] = "wfxt", - [__builtin_ctzll(ARM_HWCAP2_A64_EBF16 )] = "ebf16", - [__builtin_ctzll(ARM_HWCAP2_A64_SVE_EBF16 )] = "sveebf16", - [__builtin_ctzll(ARM_HWCAP2_A64_CSSC )] = "cssc", - [__builtin_ctzll(ARM_HWCAP2_A64_RPRFM )] = "rprfm", - [__builtin_ctzll(ARM_HWCAP2_A64_SVE2P1 )] = "sve2p1", - [__builtin_ctzll(ARM_HWCAP2_A64_SME2 )] = "sme2", - [__builtin_ctzll(ARM_HWCAP2_A64_SME2P1 )] = "sme2p1", - [__builtin_ctzll(ARM_HWCAP2_A64_SME_I16I32 )] = "smei16i32", - [__builtin_ctzll(ARM_HWCAP2_A64_SME_BI32I32)] = "smebi32i32", - [__builtin_ctzll(ARM_HWCAP2_A64_SME_B16B16 )] = "smeb16b16", - [__builtin_ctzll(ARM_HWCAP2_A64_SME_F16F16 )] = "smef16f16", - [__builtin_ctzll(ARM_HWCAP2_A64_MOPS )] = "mops", - [__builtin_ctzll(ARM_HWCAP2_A64_HBC )] = "hbc", - [__builtin_ctzll(ARM_HWCAP2_A64_SVE_B16B16 )] = "sveb16b16", - [__builtin_ctzll(ARM_HWCAP2_A64_LRCPC3 )] = "lrcpc3", - [__builtin_ctzll(ARM_HWCAP2_A64_LSE128 )] = "lse128", - [__builtin_ctzll(ARM_HWCAP2_A64_FPMR )] = "fpmr", - [__builtin_ctzll(ARM_HWCAP2_A64_LUT )] = "lut", - [__builtin_ctzll(ARM_HWCAP2_A64_FAMINMAX )] = "faminmax", - [__builtin_ctzll(ARM_HWCAP2_A64_F8CVT )] = "f8cvt", - [__builtin_ctzll(ARM_HWCAP2_A64_F8FMA )] = "f8fma", - [__builtin_ctzll(ARM_HWCAP2_A64_F8DP4 )] = "f8dp4", - [__builtin_ctzll(ARM_HWCAP2_A64_F8DP2 )] = "f8dp2", - [__builtin_ctzll(ARM_HWCAP2_A64_F8E4M3 )] = "f8e4m3", - [__builtin_ctzll(ARM_HWCAP2_A64_F8E5M2 )] = "f8e5m2", - [__builtin_ctzll(ARM_HWCAP2_A64_SME_LUTV2 )] = "smelutv2", - [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F16 )] = "smef8f16", - [__builtin_ctzll(ARM_HWCAP2_A64_SME_F8F32 )] = "smef8f32", - [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP4 )] = "smesf8dp4", - [__builtin_ctzll(ARM_HWCAP2_A64_SME_SF8DP2 )] = "smesf8dp2", - [__builtin_ctzll(ARM_HWCAP2_A64_POE )] = "poe", - }; - - return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; -} - -#undef GET_FEATURE_ID - -#if TARGET_BIG_ENDIAN -# define VDSO_HEADER "vdso-be.c.inc" -#else -# define VDSO_HEADER "vdso-le.c.inc" -#endif - -#endif /* not TARGET_AARCH64 */ - -#endif /* TARGET_ARM */ - -#ifdef TARGET_SPARC - -#ifndef TARGET_SPARC64 -# define ELF_CLASS ELFCLASS32 -# define ELF_ARCH EM_SPARC -#elif defined(TARGET_ABI32) -# define ELF_CLASS ELFCLASS32 -# define elf_check_arch(x) ((x) == EM_SPARC32PLUS || (x) == EM_SPARC) -#else -# define ELF_CLASS ELFCLASS64 -# define ELF_ARCH EM_SPARCV9 -#endif - -#include "elf.h" - -#define ELF_HWCAP get_elf_hwcap() - -static uint32_t get_elf_hwcap(void) -{ - /* There are not many sparc32 hwcap bits -- we have all of them. */ - uint32_t r = HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | - HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV; - -#ifdef TARGET_SPARC64 - CPUSPARCState *env = cpu_env(thread_cpu); - uint32_t features = env->def.features; - - r |= HWCAP_SPARC_V9 | HWCAP_SPARC_V8PLUS; - /* 32x32 multiply and divide are efficient. */ - r |= HWCAP_SPARC_MUL32 | HWCAP_SPARC_DIV32; - /* We don't have an internal feature bit for this. */ - r |= HWCAP_SPARC_POPC; - r |= features & CPU_FEATURE_FSMULD ? HWCAP_SPARC_FSMULD : 0; - r |= features & CPU_FEATURE_VIS1 ? HWCAP_SPARC_VIS : 0; - r |= features & CPU_FEATURE_VIS2 ? HWCAP_SPARC_VIS2 : 0; - r |= features & CPU_FEATURE_FMAF ? HWCAP_SPARC_FMAF : 0; - r |= features & CPU_FEATURE_VIS3 ? HWCAP_SPARC_VIS3 : 0; - r |= features & CPU_FEATURE_IMA ? HWCAP_SPARC_IMA : 0; -#endif - - return r; -} - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - /* Note that target_cpu_copy_regs does not read psr/tstate. */ - regs->pc = infop->entry; - regs->npc = regs->pc + 4; - regs->y = 0; - regs->u_regs[14] = (infop->start_stack - 16 * sizeof(abi_ulong) - - TARGET_STACK_BIAS); -} -#endif /* TARGET_SPARC */ - -#ifdef TARGET_PPC - -#define ELF_MACHINE PPC_ELF_MACHINE - -#if defined(TARGET_PPC64) - -#define elf_check_arch(x) ( (x) == EM_PPC64 ) - -#define ELF_CLASS ELFCLASS64 - -#else - -#define ELF_CLASS ELFCLASS32 -#define EXSTACK_DEFAULT true - -#endif - -#define ELF_ARCH EM_PPC - -/* Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). - See arch/powerpc/include/asm/cputable.h. */ -enum { - QEMU_PPC_FEATURE_32 = 0x80000000, - QEMU_PPC_FEATURE_64 = 0x40000000, - QEMU_PPC_FEATURE_601_INSTR = 0x20000000, - QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, - QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, - QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, - QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, - QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, - QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, - QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, - QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, - QEMU_PPC_FEATURE_NO_TB = 0x00100000, - QEMU_PPC_FEATURE_POWER4 = 0x00080000, - QEMU_PPC_FEATURE_POWER5 = 0x00040000, - QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, - QEMU_PPC_FEATURE_CELL = 0x00010000, - QEMU_PPC_FEATURE_BOOKE = 0x00008000, - QEMU_PPC_FEATURE_SMT = 0x00004000, - QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, - QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, - QEMU_PPC_FEATURE_PA6T = 0x00000800, - QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, - QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, - QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, - QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, - QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, - - QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, - QEMU_PPC_FEATURE_PPC_LE = 0x00000001, - - /* Feature definitions in AT_HWCAP2. */ - QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ - QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ - QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ - QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ - QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ - QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ - QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000, - QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000, - QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */ - QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */ - QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */ - QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */ - QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */ - QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */ - QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */ -}; - -#define ELF_HWCAP get_elf_hwcap() - -static uint32_t get_elf_hwcap(void) -{ - PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); - uint32_t features = 0; - - /* We don't have to be terribly complete here; the high points are - Altivec/FP/SPE support. Anything else is just a bonus. */ -#define GET_FEATURE(flag, feature) \ - do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) -#define GET_FEATURE2(flags, feature) \ - do { \ - if ((cpu->env.insns_flags2 & flags) == flags) { \ - features |= feature; \ - } \ - } while (0) - GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); - GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); - GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); - GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); - GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); - GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); - GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); - GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); - GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); - GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); - GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | - PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), - QEMU_PPC_FEATURE_ARCH_2_06); -#undef GET_FEATURE -#undef GET_FEATURE2 - - return features; -} - -#define ELF_HWCAP2 get_elf_hwcap2() - -static uint32_t get_elf_hwcap2(void) -{ - PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); - uint32_t features = 0; - -#define GET_FEATURE(flag, feature) \ - do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) -#define GET_FEATURE2(flag, feature) \ - do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) - - GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); - GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); - GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | - PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 | - QEMU_PPC_FEATURE2_VEC_CRYPTO); - GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | - QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); - GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 | - QEMU_PPC_FEATURE2_MMA); - -#undef GET_FEATURE -#undef GET_FEATURE2 - - return features; -} - -/* - * The requirements here are: - * - keep the final alignment of sp (sp & 0xf) - * - make sure the 32-bit value at the first 16 byte aligned position of - * AUXV is greater than 16 for glibc compatibility. - * AT_IGNOREPPC is used for that. - * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, - * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. - */ -#define DLINFO_ARCH_ITEMS 5 -#define ARCH_DLINFO \ - do { \ - PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ - /* \ - * Handle glibc compatibility: these magic entries must \ - * be at the lowest addresses in the final auxv. \ - */ \ - NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ - NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ - NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ - NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ - NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ - } while (0) - -static inline void init_thread(struct target_pt_regs *_regs, struct image_info *infop) -{ - _regs->gpr[1] = infop->start_stack; -#if defined(TARGET_PPC64) - if (get_ppc64_abi(infop) < 2) { - uint64_t val; - get_user_u64(val, infop->entry + 8); - _regs->gpr[2] = val + infop->load_bias; - get_user_u64(val, infop->entry); - infop->entry = val + infop->load_bias; - } else { - _regs->gpr[12] = infop->entry; /* r12 set to global entry address */ - } -#endif - _regs->nip = infop->entry; -} - -/* See linux kernel: arch/powerpc/include/asm/elf.h. */ -#define ELF_NREG 48 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *env) -{ - int i; - target_ulong ccr = 0; - - for (i = 0; i < ARRAY_SIZE(env->gpr); i++) { - (*regs)[i] = tswapreg(env->gpr[i]); - } - - (*regs)[32] = tswapreg(env->nip); - (*regs)[33] = tswapreg(env->msr); - (*regs)[35] = tswapreg(env->ctr); - (*regs)[36] = tswapreg(env->lr); - (*regs)[37] = tswapreg(cpu_read_xer(env)); - - ccr = ppc_get_cr(env); - (*regs)[38] = tswapreg(ccr); -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -#ifndef TARGET_PPC64 -# define VDSO_HEADER "vdso-32.c.inc" -#elif TARGET_BIG_ENDIAN -# define VDSO_HEADER "vdso-64.c.inc" -#else -# define VDSO_HEADER "vdso-64le.c.inc" -#endif - -#endif - -#ifdef TARGET_LOONGARCH64 - -#define ELF_CLASS ELFCLASS64 -#define ELF_ARCH EM_LOONGARCH -#define EXSTACK_DEFAULT true - -#define elf_check_arch(x) ((x) == EM_LOONGARCH) - -#define VDSO_HEADER "vdso.c.inc" - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - /*Set crmd PG,DA = 1,0 */ - regs->csr.crmd = 2 << 3; - regs->csr.era = infop->entry; - regs->regs[3] = infop->start_stack; -} - -/* See linux kernel: arch/loongarch/include/asm/elf.h */ -#define ELF_NREG 45 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -enum { - TARGET_EF_R0 = 0, - TARGET_EF_CSR_ERA = TARGET_EF_R0 + 33, - TARGET_EF_CSR_BADV = TARGET_EF_R0 + 34, -}; - -static void elf_core_copy_regs(target_elf_gregset_t *regs, - const CPULoongArchState *env) -{ - int i; - - (*regs)[TARGET_EF_R0] = 0; - - for (i = 1; i < ARRAY_SIZE(env->gpr); i++) { - (*regs)[TARGET_EF_R0 + i] = tswapreg(env->gpr[i]); - } - - (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->pc); - (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV); -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -#define ELF_HWCAP get_elf_hwcap() - -/* See arch/loongarch/include/uapi/asm/hwcap.h */ -enum { - HWCAP_LOONGARCH_CPUCFG = (1 << 0), - HWCAP_LOONGARCH_LAM = (1 << 1), - HWCAP_LOONGARCH_UAL = (1 << 2), - HWCAP_LOONGARCH_FPU = (1 << 3), - HWCAP_LOONGARCH_LSX = (1 << 4), - HWCAP_LOONGARCH_LASX = (1 << 5), - HWCAP_LOONGARCH_CRC32 = (1 << 6), - HWCAP_LOONGARCH_COMPLEX = (1 << 7), - HWCAP_LOONGARCH_CRYPTO = (1 << 8), - HWCAP_LOONGARCH_LVZ = (1 << 9), - HWCAP_LOONGARCH_LBT_X86 = (1 << 10), - HWCAP_LOONGARCH_LBT_ARM = (1 << 11), - HWCAP_LOONGARCH_LBT_MIPS = (1 << 12), -}; - -static uint32_t get_elf_hwcap(void) -{ - LoongArchCPU *cpu = LOONGARCH_CPU(thread_cpu); - uint32_t hwcaps = 0; - - hwcaps |= HWCAP_LOONGARCH_CRC32; - - if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) { - hwcaps |= HWCAP_LOONGARCH_UAL; - } - - if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) { - hwcaps |= HWCAP_LOONGARCH_FPU; - } - - if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) { - hwcaps |= HWCAP_LOONGARCH_LAM; - } - - if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { - hwcaps |= HWCAP_LOONGARCH_LSX; - } - - if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { - hwcaps |= HWCAP_LOONGARCH_LASX; - } - - return hwcaps; -} - -#define ELF_PLATFORM "loongarch" - -#endif /* TARGET_LOONGARCH64 */ - -#ifdef TARGET_MIPS - -#ifdef TARGET_MIPS64 -#define ELF_CLASS ELFCLASS64 -#else -#define ELF_CLASS ELFCLASS32 -#endif -#define ELF_ARCH EM_MIPS -#define EXSTACK_DEFAULT true - -#ifdef TARGET_ABI_MIPSN32 -#define elf_check_abi(x) ((x) & EF_MIPS_ABI2) -#else -#define elf_check_abi(x) (!((x) & EF_MIPS_ABI2)) -#endif - -#define ELF_BASE_PLATFORM get_elf_base_platform() - -#define MATCH_PLATFORM_INSN(_flags, _base_platform) \ - do { if ((cpu->env.insn_flags & (_flags)) == _flags) \ - { return _base_platform; } } while (0) - -static const char *get_elf_base_platform(void) -{ - MIPSCPU *cpu = MIPS_CPU(thread_cpu); - - /* 64 bit ISAs goes first */ - MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6"); - MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5"); - MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2"); - MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64"); - MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5"); - MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4"); - MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3"); - - /* 32 bit ISAs */ - MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6"); - MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5"); - MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2"); - MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32"); - MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2"); - - /* Fallback */ - return "mips"; -} -#undef MATCH_PLATFORM_INSN - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->cp0_status = 2 << CP0St_KSU; - regs->cp0_epc = infop->entry; - regs->regs[29] = infop->start_stack; -} - -/* See linux kernel: arch/mips/include/asm/elf.h. */ -#define ELF_NREG 45 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -/* See linux kernel: arch/mips/include/asm/reg.h. */ -enum { -#ifdef TARGET_MIPS64 - TARGET_EF_R0 = 0, -#else - TARGET_EF_R0 = 6, -#endif - TARGET_EF_R26 = TARGET_EF_R0 + 26, - TARGET_EF_R27 = TARGET_EF_R0 + 27, - TARGET_EF_LO = TARGET_EF_R0 + 32, - TARGET_EF_HI = TARGET_EF_R0 + 33, - TARGET_EF_CP0_EPC = TARGET_EF_R0 + 34, - TARGET_EF_CP0_BADVADDR = TARGET_EF_R0 + 35, - TARGET_EF_CP0_STATUS = TARGET_EF_R0 + 36, - TARGET_EF_CP0_CAUSE = TARGET_EF_R0 + 37 -}; - -/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ -static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMIPSState *env) -{ - int i; - - for (i = 0; i < TARGET_EF_R0; i++) { - (*regs)[i] = 0; - } - (*regs)[TARGET_EF_R0] = 0; - - for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { - (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); - } - - (*regs)[TARGET_EF_R26] = 0; - (*regs)[TARGET_EF_R27] = 0; - (*regs)[TARGET_EF_LO] = tswapreg(env->active_tc.LO[0]); - (*regs)[TARGET_EF_HI] = tswapreg(env->active_tc.HI[0]); - (*regs)[TARGET_EF_CP0_EPC] = tswapreg(env->active_tc.PC); - (*regs)[TARGET_EF_CP0_BADVADDR] = tswapreg(env->CP0_BadVAddr); - (*regs)[TARGET_EF_CP0_STATUS] = tswapreg(env->CP0_Status); - (*regs)[TARGET_EF_CP0_CAUSE] = tswapreg(env->CP0_Cause); -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -/* See arch/mips/include/uapi/asm/hwcap.h. */ -enum { - HWCAP_MIPS_R6 = (1 << 0), - HWCAP_MIPS_MSA = (1 << 1), - HWCAP_MIPS_CRC32 = (1 << 2), - HWCAP_MIPS_MIPS16 = (1 << 3), - HWCAP_MIPS_MDMX = (1 << 4), - HWCAP_MIPS_MIPS3D = (1 << 5), - HWCAP_MIPS_SMARTMIPS = (1 << 6), - HWCAP_MIPS_DSP = (1 << 7), - HWCAP_MIPS_DSP2 = (1 << 8), - HWCAP_MIPS_DSP3 = (1 << 9), - HWCAP_MIPS_MIPS16E2 = (1 << 10), - HWCAP_LOONGSON_MMI = (1 << 11), - HWCAP_LOONGSON_EXT = (1 << 12), - HWCAP_LOONGSON_EXT2 = (1 << 13), - HWCAP_LOONGSON_CPUCFG = (1 << 14), -}; - -#define ELF_HWCAP get_elf_hwcap() - -#define GET_FEATURE_INSN(_flag, _hwcap) \ - do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0) - -#define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \ - do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0) - -#define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \ - do { \ - if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \ - hwcaps |= _hwcap; \ - } \ - } while (0) - -static uint32_t get_elf_hwcap(void) -{ - MIPSCPU *cpu = MIPS_CPU(thread_cpu); - uint32_t hwcaps = 0; - - GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH, - 2, HWCAP_MIPS_R6); - GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA); - GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI); - GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT); - - return hwcaps; -} - -#undef GET_FEATURE_REG_EQU -#undef GET_FEATURE_REG_SET -#undef GET_FEATURE_INSN - -#endif /* TARGET_MIPS */ - -#ifdef TARGET_MICROBLAZE - -#define elf_check_arch(x) ( (x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) - -#define ELF_CLASS ELFCLASS32 -#define ELF_ARCH EM_MICROBLAZE - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->pc = infop->entry; - regs->r1 = infop->start_stack; - -} - -#define ELF_EXEC_PAGESIZE 4096 - -#define USE_ELF_CORE_DUMP -#define ELF_NREG 38 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ -static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env) -{ - int i, pos = 0; - - for (i = 0; i < 32; i++) { - (*regs)[pos++] = tswapreg(env->regs[i]); - } - - (*regs)[pos++] = tswapreg(env->pc); - (*regs)[pos++] = tswapreg(mb_cpu_read_msr(env)); - (*regs)[pos++] = 0; - (*regs)[pos++] = tswapreg(env->ear); - (*regs)[pos++] = 0; - (*regs)[pos++] = tswapreg(env->esr); -} - -#endif /* TARGET_MICROBLAZE */ - -#ifdef TARGET_OPENRISC - -#define ELF_ARCH EM_OPENRISC -#define ELF_CLASS ELFCLASS32 -#define ELF_DATA ELFDATA2MSB - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->pc = infop->entry; - regs->gpr[1] = infop->start_stack; -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 8192 - -/* See linux kernel arch/openrisc/include/asm/elf.h. */ -#define ELF_NREG 34 /* gprs and pc, sr */ -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -static void elf_core_copy_regs(target_elf_gregset_t *regs, - const CPUOpenRISCState *env) -{ - int i; - - for (i = 0; i < 32; i++) { - (*regs)[i] = tswapreg(cpu_get_gpr(env, i)); - } - (*regs)[32] = tswapreg(env->pc); - (*regs)[33] = tswapreg(cpu_get_sr(env)); -} -#define ELF_HWCAP 0 -#define ELF_PLATFORM NULL - -#endif /* TARGET_OPENRISC */ - -#ifdef TARGET_SH4 - -#define ELF_CLASS ELFCLASS32 -#define ELF_ARCH EM_SH - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - /* Check other registers XXXXX */ - regs->pc = infop->entry; - regs->regs[15] = infop->start_stack; -} - -/* See linux kernel: arch/sh/include/asm/elf.h. */ -#define ELF_NREG 23 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -/* See linux kernel: arch/sh/include/asm/ptrace.h. */ -enum { - TARGET_REG_PC = 16, - TARGET_REG_PR = 17, - TARGET_REG_SR = 18, - TARGET_REG_GBR = 19, - TARGET_REG_MACH = 20, - TARGET_REG_MACL = 21, - TARGET_REG_SYSCALL = 22 -}; - -static inline void elf_core_copy_regs(target_elf_gregset_t *regs, - const CPUSH4State *env) -{ - int i; - - for (i = 0; i < 16; i++) { - (*regs)[i] = tswapreg(env->gregs[i]); - } - - (*regs)[TARGET_REG_PC] = tswapreg(env->pc); - (*regs)[TARGET_REG_PR] = tswapreg(env->pr); - (*regs)[TARGET_REG_SR] = tswapreg(env->sr); - (*regs)[TARGET_REG_GBR] = tswapreg(env->gbr); - (*regs)[TARGET_REG_MACH] = tswapreg(env->mach); - (*regs)[TARGET_REG_MACL] = tswapreg(env->macl); - (*regs)[TARGET_REG_SYSCALL] = 0; /* FIXME */ -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -enum { - SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ - SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ - SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ - SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ - SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ - SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ - SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ - SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ - SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ - SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ -}; - -#define ELF_HWCAP get_elf_hwcap() - -static uint32_t get_elf_hwcap(void) -{ - SuperHCPU *cpu = SUPERH_CPU(thread_cpu); - uint32_t hwcap = 0; - - hwcap |= SH_CPU_HAS_FPU; - - if (cpu->env.features & SH_FEATURE_SH4A) { - hwcap |= SH_CPU_HAS_LLSC; - } - - return hwcap; -} - -#endif - -#ifdef TARGET_M68K - -#define ELF_CLASS ELFCLASS32 -#define ELF_ARCH EM_68K - -/* ??? Does this need to do anything? - #define ELF_PLAT_INIT(_r) */ - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->usp = infop->start_stack; - regs->sr = 0; - regs->pc = infop->entry; -} - -/* See linux kernel: arch/m68k/include/asm/elf.h. */ -#define ELF_NREG 20 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - -static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUM68KState *env) -{ - (*regs)[0] = tswapreg(env->dregs[1]); - (*regs)[1] = tswapreg(env->dregs[2]); - (*regs)[2] = tswapreg(env->dregs[3]); - (*regs)[3] = tswapreg(env->dregs[4]); - (*regs)[4] = tswapreg(env->dregs[5]); - (*regs)[5] = tswapreg(env->dregs[6]); - (*regs)[6] = tswapreg(env->dregs[7]); - (*regs)[7] = tswapreg(env->aregs[0]); - (*regs)[8] = tswapreg(env->aregs[1]); - (*regs)[9] = tswapreg(env->aregs[2]); - (*regs)[10] = tswapreg(env->aregs[3]); - (*regs)[11] = tswapreg(env->aregs[4]); - (*regs)[12] = tswapreg(env->aregs[5]); - (*regs)[13] = tswapreg(env->aregs[6]); - (*regs)[14] = tswapreg(env->dregs[0]); - (*regs)[15] = tswapreg(env->aregs[7]); - (*regs)[16] = tswapreg(env->dregs[0]); /* FIXME: orig_d0 */ - (*regs)[17] = tswapreg(env->sr); - (*regs)[18] = tswapreg(env->pc); - (*regs)[19] = 0; /* FIXME: regs->format | regs->vector */ -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 8192 - +#ifndef TARGET_ARCH_HAS_SIGTRAMP_PAGE +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 0 #endif -#ifdef TARGET_ALPHA - -#define ELF_CLASS ELFCLASS64 -#define ELF_ARCH EM_ALPHA - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->pc = infop->entry; - regs->ps = 8; - regs->usp = infop->start_stack; -} - -#define ELF_EXEC_PAGESIZE 8192 - -#endif /* TARGET_ALPHA */ - -#ifdef TARGET_S390X - -#define ELF_CLASS ELFCLASS64 -#define ELF_DATA ELFDATA2MSB -#define ELF_ARCH EM_S390 - -#include "elf.h" - -#define ELF_HWCAP get_elf_hwcap() - -#define GET_FEATURE(_feat, _hwcap) \ - do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0) - -uint32_t get_elf_hwcap(void) -{ - /* - * Let's assume we always have esan3 and zarch. - * 31-bit processes can use 64-bit registers (high gprs). - */ - uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS; - - GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE); - GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA); - GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP); - GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM); - if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) && - s390_has_feat(S390_FEAT_ETF3_ENH)) { - hwcap |= HWCAP_S390_ETF3EH; - } - GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS); - GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT); - GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2); - - return hwcap; -} - -const char *elf_hwcap_str(uint32_t bit) -{ - static const char *hwcap_str[] = { - [HWCAP_S390_NR_ESAN3] = "esan3", - [HWCAP_S390_NR_ZARCH] = "zarch", - [HWCAP_S390_NR_STFLE] = "stfle", - [HWCAP_S390_NR_MSA] = "msa", - [HWCAP_S390_NR_LDISP] = "ldisp", - [HWCAP_S390_NR_EIMM] = "eimm", - [HWCAP_S390_NR_DFP] = "dfp", - [HWCAP_S390_NR_HPAGE] = "edat", - [HWCAP_S390_NR_ETF3EH] = "etf3eh", - [HWCAP_S390_NR_HIGH_GPRS] = "highgprs", - [HWCAP_S390_NR_TE] = "te", - [HWCAP_S390_NR_VXRS] = "vx", - [HWCAP_S390_NR_VXRS_BCD] = "vxd", - [HWCAP_S390_NR_VXRS_EXT] = "vxe", - [HWCAP_S390_NR_GS] = "gs", - [HWCAP_S390_NR_VXRS_EXT2] = "vxe2", - [HWCAP_S390_NR_VXRS_PDE] = "vxp", - [HWCAP_S390_NR_SORT] = "sort", - [HWCAP_S390_NR_DFLT] = "dflt", - [HWCAP_S390_NR_NNPA] = "nnpa", - [HWCAP_S390_NR_PCI_MIO] = "pcimio", - [HWCAP_S390_NR_SIE] = "sie", - }; - - return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; -} - -static inline void init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - regs->psw.addr = infop->entry; - regs->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | \ - PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | \ - PSW_MASK_32; - regs->gprs[15] = infop->start_stack; -} +#define ELF_OSABI ELFOSABI_SYSV -/* See linux kernel: arch/s390/include/uapi/asm/ptrace.h (s390_regs). */ -#define ELF_NREG 27 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; +/* from personality.h */ +/* + * Flags for bug emulation. + * + * These occupy the top three bytes. + */ enum { - TARGET_REG_PSWM = 0, - TARGET_REG_PSWA = 1, - TARGET_REG_GPRS = 2, - TARGET_REG_ARS = 18, - TARGET_REG_ORIG_R2 = 26, + ADDR_NO_RANDOMIZE = 0x0040000, /* disable randomization of VA space */ + FDPIC_FUNCPTRS = 0x0080000, /* userspace function ptrs point to + descriptors (signal handling) */ + MMAP_PAGE_ZERO = 0x0100000, + ADDR_COMPAT_LAYOUT = 0x0200000, + READ_IMPLIES_EXEC = 0x0400000, + ADDR_LIMIT_32BIT = 0x0800000, + SHORT_INODE = 0x1000000, + WHOLE_SECONDS = 0x2000000, + STICKY_TIMEOUTS = 0x4000000, + ADDR_LIMIT_3GB = 0x8000000, }; -static void elf_core_copy_regs(target_elf_gregset_t *regs, - const CPUS390XState *env) -{ - int i; - uint32_t *aregs; - - (*regs)[TARGET_REG_PSWM] = tswapreg(env->psw.mask); - (*regs)[TARGET_REG_PSWA] = tswapreg(env->psw.addr); - for (i = 0; i < 16; i++) { - (*regs)[TARGET_REG_GPRS + i] = tswapreg(env->regs[i]); - } - aregs = (uint32_t *)&((*regs)[TARGET_REG_ARS]); - for (i = 0; i < 16; i++) { - aregs[i] = tswap32(env->aregs[i]); - } - (*regs)[TARGET_REG_ORIG_R2] = 0; -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -#define VDSO_HEADER "vdso.c.inc" - -#endif /* TARGET_S390X */ - -#ifdef TARGET_RISCV - -#define ELF_ARCH EM_RISCV - -#ifdef TARGET_RISCV32 -#define ELF_CLASS ELFCLASS32 -#define VDSO_HEADER "vdso-32.c.inc" -#else -#define ELF_CLASS ELFCLASS64 -#define VDSO_HEADER "vdso-64.c.inc" -#endif - -#define ELF_HWCAP get_elf_hwcap() - -static uint32_t get_elf_hwcap(void) -{ -#define MISA_BIT(EXT) (1 << (EXT - 'A')) - RISCVCPU *cpu = RISCV_CPU(thread_cpu); - uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') - | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C') - | MISA_BIT('V'); - - return cpu->env.misa_ext & mask; -#undef MISA_BIT -} - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->sepc = infop->entry; - regs->sp = infop->start_stack; -} - -#define ELF_EXEC_PAGESIZE 4096 - -#endif /* TARGET_RISCV */ - -#ifdef TARGET_HPPA - -#define ELF_CLASS ELFCLASS32 -#define ELF_ARCH EM_PARISC -#define ELF_PLATFORM "PARISC" -#define STACK_GROWS_DOWN 0 -#define STACK_ALIGNMENT 64 - -#define VDSO_HEADER "vdso.c.inc" - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->iaoq[0] = infop->entry | PRIV_USER; - regs->iaoq[1] = regs->iaoq[0] + 4; - regs->gr[23] = 0; - regs->gr[24] = infop->argv; - regs->gr[25] = infop->argc; - /* The top-of-stack contains a linkage buffer. */ - regs->gr[30] = infop->start_stack + 64; - regs->gr[31] = infop->entry; -} - -#define LO_COMMPAGE 0 - -static bool init_guest_commpage(void) -{ - /* If reserved_va, then we have already mapped 0 page on the host. */ - if (!reserved_va) { - void *want, *addr; - - want = g2h_untagged(LO_COMMPAGE); - addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE, - MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0); - if (addr == MAP_FAILED) { - perror("Allocating guest commpage"); - exit(EXIT_FAILURE); - } - if (addr != want) { - return false; - } - } - - /* - * On Linux, page zero is normally marked execute only + gateway. - * Normal read or write is supposed to fail (thus PROT_NONE above), - * but specific offsets have kernel code mapped to raise permissions - * and implement syscalls. Here, simply mark the page executable. - * Special case the entry points during translation (see do_page_zero). - */ - page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, - PAGE_EXEC | PAGE_VALID); - return true; -} - -#endif /* TARGET_HPPA */ - -#ifdef TARGET_XTENSA - -#define ELF_CLASS ELFCLASS32 -#define ELF_ARCH EM_XTENSA - -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) -{ - regs->windowbase = 0; - regs->windowstart = 1; - regs->areg[1] = infop->start_stack; - regs->pc = infop->entry; - if (info_is_fdpic(infop)) { - regs->areg[4] = infop->loadmap_addr; - regs->areg[5] = infop->interpreter_loadmap_addr; - if (infop->interpreter_loadmap_addr) { - regs->areg[6] = infop->interpreter_pt_dynamic_addr; - } else { - regs->areg[6] = infop->pt_dynamic_addr; - } - } -} - -/* See linux kernel: arch/xtensa/include/asm/elf.h. */ -#define ELF_NREG 128 -typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; - +/* + * Personality types. + * + * These go in the low byte. Avoid using the top bit, it will + * conflict with error returns. + */ enum { - TARGET_REG_PC, - TARGET_REG_PS, - TARGET_REG_LBEG, - TARGET_REG_LEND, - TARGET_REG_LCOUNT, - TARGET_REG_SAR, - TARGET_REG_WINDOWSTART, - TARGET_REG_WINDOWBASE, - TARGET_REG_THREADPTR, - TARGET_REG_AR0 = 64, + PER_LINUX = 0x0000, + PER_LINUX_32BIT = 0x0000 | ADDR_LIMIT_32BIT, + PER_LINUX_FDPIC = 0x0000 | FDPIC_FUNCPTRS, + PER_SVR4 = 0x0001 | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, + PER_SVR3 = 0x0002 | STICKY_TIMEOUTS | SHORT_INODE, + PER_SCOSVR3 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS | SHORT_INODE, + PER_OSR5 = 0x0003 | STICKY_TIMEOUTS | WHOLE_SECONDS, + PER_WYSEV386 = 0x0004 | STICKY_TIMEOUTS | SHORT_INODE, + PER_ISCR4 = 0x0005 | STICKY_TIMEOUTS, + PER_BSD = 0x0006, + PER_SUNOS = 0x0006 | STICKY_TIMEOUTS, + PER_XENIX = 0x0007 | STICKY_TIMEOUTS | SHORT_INODE, + PER_LINUX32 = 0x0008, + PER_LINUX32_3GB = 0x0008 | ADDR_LIMIT_3GB, + PER_IRIX32 = 0x0009 | STICKY_TIMEOUTS,/* IRIX5 32-bit */ + PER_IRIXN32 = 0x000a | STICKY_TIMEOUTS,/* IRIX6 new 32-bit */ + PER_IRIX64 = 0x000b | STICKY_TIMEOUTS,/* IRIX6 64-bit */ + PER_RISCOS = 0x000c, + PER_SOLARIS = 0x000d | STICKY_TIMEOUTS, + PER_UW7 = 0x000e | STICKY_TIMEOUTS | MMAP_PAGE_ZERO, + PER_OSF4 = 0x000f, /* OSF/1 v4 */ + PER_HPUX = 0x0010, + PER_MASK = 0x00ff, }; -static void elf_core_copy_regs(target_elf_gregset_t *regs, - const CPUXtensaState *env) -{ - unsigned i; - - (*regs)[TARGET_REG_PC] = tswapreg(env->pc); - (*regs)[TARGET_REG_PS] = tswapreg(env->sregs[PS] & ~PS_EXCM); - (*regs)[TARGET_REG_LBEG] = tswapreg(env->sregs[LBEG]); - (*regs)[TARGET_REG_LEND] = tswapreg(env->sregs[LEND]); - (*regs)[TARGET_REG_LCOUNT] = tswapreg(env->sregs[LCOUNT]); - (*regs)[TARGET_REG_SAR] = tswapreg(env->sregs[SAR]); - (*regs)[TARGET_REG_WINDOWSTART] = tswapreg(env->sregs[WINDOW_START]); - (*regs)[TARGET_REG_WINDOWBASE] = tswapreg(env->sregs[WINDOW_BASE]); - (*regs)[TARGET_REG_THREADPTR] = tswapreg(env->uregs[THREADPTR]); - xtensa_sync_phys_from_window((CPUXtensaState *)env); - for (i = 0; i < env->config->nareg; ++i) { - (*regs)[TARGET_REG_AR0 + i] = tswapreg(env->phys_regs[i]); - } -} - -#define USE_ELF_CORE_DUMP -#define ELF_EXEC_PAGESIZE 4096 - -#endif /* TARGET_XTENSA */ - -#ifdef TARGET_HEXAGON - -#define ELF_CLASS ELFCLASS32 -#define ELF_ARCH EM_HEXAGON +/* + * Return the base personality without flags. + */ +#define personality(pers) (pers & PER_MASK) -static inline void init_thread(struct target_pt_regs *regs, - struct image_info *infop) +int info_is_fdpic(struct image_info *info) { - regs->sepc = infop->entry; - regs->sp = infop->start_stack; + return info->personality == PER_LINUX_FDPIC; } -#endif /* TARGET_HEXAGON */ - -#ifndef ELF_BASE_PLATFORM -#define ELF_BASE_PLATFORM (NULL) -#endif - -#ifndef ELF_PLATFORM -#define ELF_PLATFORM (NULL) +#if TARGET_BIG_ENDIAN +#define ELF_DATA ELFDATA2MSB +#else +#define ELF_DATA ELFDATA2LSB #endif -#ifndef ELF_MACHINE -#define ELF_MACHINE ELF_ARCH +#ifdef USE_UID16 +typedef abi_ushort target_uid_t; +typedef abi_ushort target_gid_t; +#else +typedef abi_uint target_uid_t; +typedef abi_uint target_gid_t; #endif +typedef abi_int target_pid_t; -#ifndef elf_check_arch -#define elf_check_arch(x) ((x) == ELF_ARCH) +#ifndef elf_check_machine +#define elf_check_machine(x) ((x) == ELF_MACHINE) #endif #ifndef elf_check_abi #define elf_check_abi(x) (1) #endif -#ifndef ELF_HWCAP -#define ELF_HWCAP 0 -#endif - #ifndef STACK_GROWS_DOWN #define STACK_GROWS_DOWN 1 #endif @@ -2139,59 +147,36 @@ static inline void init_thread(struct target_pt_regs *regs, #define EXSTACK_DEFAULT false #endif -#include "elf.h" - -/* We must delay the following stanzas until after "elf.h". */ -#if defined(TARGET_AARCH64) - -static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, - const uint32_t *data, - struct image_info *info, - Error **errp) -{ - if (pr_type == GNU_PROPERTY_AARCH64_FEATURE_1_AND) { - if (pr_datasz != sizeof(uint32_t)) { - error_setg(errp, "Ill-formed GNU_PROPERTY_AARCH64_FEATURE_1_AND"); - return false; - } - /* We will extract GNU_PROPERTY_AARCH64_FEATURE_1_BTI later. */ - info->note_flags = *data; - } - return true; -} -#define ARCH_USE_GNU_PROPERTY 1 - -#else +/* + * Provide fallback definitions that the target may omit. + * One way or another, we'll get a link error if the setting of + * HAVE_* doesn't match the implementation. + */ +#ifndef HAVE_ELF_HWCAP +abi_ulong get_elf_hwcap(CPUState *cs) { return 0; } +#endif +#ifndef HAVE_ELF_HWCAP2 +abi_ulong get_elf_hwcap2(CPUState *cs) { g_assert_not_reached(); } +#define HAVE_ELF_HWCAP2 0 +#endif +#ifndef HAVE_ELF_PLATFORM +const char *get_elf_platform(CPUState *cs) { return NULL; } +#endif +#ifndef HAVE_ELF_BASE_PLATFORM +const char *get_elf_base_platform(CPUState *cs) { return NULL; } +#endif -static bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, - const uint32_t *data, - struct image_info *info, - Error **errp) +#ifndef HAVE_ELF_GNU_PROPERTY +bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, + const uint32_t *data, struct image_info *info, + Error **errp) { g_assert_not_reached(); } -#define ARCH_USE_GNU_PROPERTY 0 - +#define HAVE_ELF_GNU_PROPERTY 0 #endif -struct exec -{ - unsigned int a_info; /* Use macros N_MAGIC, etc for access */ - unsigned int a_text; /* length of text, in bytes */ - unsigned int a_data; /* length of data, in bytes */ - unsigned int a_bss; /* length of uninitialized data area, in bytes */ - unsigned int a_syms; /* length of symbol table data in file, in bytes */ - unsigned int a_entry; /* start address */ - unsigned int a_trsize; /* length of relocation info for text, in bytes */ - unsigned int a_drsize; /* length of relocation info for data, in bytes */ -}; - - -#define N_MAGIC(exec) ((exec).a_info & 0xffff) -#define OMAGIC 0407 -#define NMAGIC 0410 -#define ZMAGIC 0413 -#define QMAGIC 0314 +#include "elf.h" #define DLINFO_ITEMS 16 @@ -2286,9 +271,9 @@ static void bswap_mips_abiflags(Mips_elf_abiflags_v0 *abiflags) } #endif -#ifdef USE_ELF_CORE_DUMP +#ifdef HAVE_ELF_CORE_DUMP static int elf_core_dump(int, const CPUArchState *); -#endif /* USE_ELF_CORE_DUMP */ +#endif /* HAVE_ELF_CORE_DUMP */ static void load_symbols(struct elfhdr *hdr, const ImageSource *src, abi_ulong load_bias); @@ -2309,7 +294,7 @@ static bool elf_check_ident(struct elfhdr *ehdr) This has to wait until after bswapping the header. */ static bool elf_check_ehdr(struct elfhdr *ehdr) { - return (elf_check_arch(ehdr->e_machine) + return (elf_check_machine(ehdr->e_machine) && elf_check_abi(ehdr->e_flags) && ehdr->e_ehsize == sizeof(struct elfhdr) && ehdr->e_phentsize == sizeof(struct elf_phdr) @@ -2592,7 +577,7 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, } u_base_platform = 0; - k_base_platform = ELF_BASE_PLATFORM; + k_base_platform = get_elf_base_platform(thread_cpu); if (k_base_platform) { size_t len = strlen(k_base_platform) + 1; if (STACK_GROWS_DOWN) { @@ -2608,7 +593,7 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, } u_platform = 0; - k_platform = ELF_PLATFORM; + k_platform = get_elf_platform(thread_cpu); if (k_platform) { size_t len = strlen(k_platform) + 1; if (STACK_GROWS_DOWN) { @@ -2660,9 +645,9 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, #ifdef DLINFO_ARCH_ITEMS size += DLINFO_ARCH_ITEMS * 2; #endif -#ifdef ELF_HWCAP2 - size += 2; -#endif + if (HAVE_ELF_HWCAP2) { + size += 2; + } info->auxv_len = size * n; size += envc + argc + 2; @@ -2716,16 +701,15 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, NEW_AUX_ENT(AT_EUID, (abi_ulong) geteuid()); NEW_AUX_ENT(AT_GID, (abi_ulong) getgid()); NEW_AUX_ENT(AT_EGID, (abi_ulong) getegid()); - NEW_AUX_ENT(AT_HWCAP, (abi_ulong) ELF_HWCAP); + NEW_AUX_ENT(AT_HWCAP, get_elf_hwcap(thread_cpu)); NEW_AUX_ENT(AT_CLKTCK, (abi_ulong) sysconf(_SC_CLK_TCK)); NEW_AUX_ENT(AT_RANDOM, (abi_ulong) u_rand_bytes); NEW_AUX_ENT(AT_SECURE, (abi_ulong) qemu_getauxval(AT_SECURE)); NEW_AUX_ENT(AT_EXECFN, info->file_string); -#ifdef ELF_HWCAP2 - NEW_AUX_ENT(AT_HWCAP2, (abi_ulong) ELF_HWCAP2); -#endif - + if (HAVE_ELF_HWCAP2) { + NEW_AUX_ENT(AT_HWCAP2, get_elf_hwcap(thread_cpu)); + } if (u_base_platform) { NEW_AUX_ENT(AT_BASE_PLATFORM, u_base_platform); } @@ -2771,8 +755,8 @@ static abi_ulong create_elf_tables(abi_ulong p, int argc, int envc, #else #define HI_COMMPAGE 0 #define LO_COMMPAGE -1 -#ifndef INIT_GUEST_COMMPAGE -#define init_guest_commpage() true +#ifndef HAVE_GUEST_COMMPAGE +bool init_guest_commpage(void) { return true; } #endif #endif @@ -3207,7 +1191,7 @@ static bool parse_elf_properties(const ImageSource *src, uint32_t prev_type; /* Unless the arch requires properties, ignore them. */ - if (!ARCH_USE_GNU_PROPERTY) { + if (!HAVE_ELF_GNU_PROPERTY) { return true; } @@ -3630,14 +1614,17 @@ static void load_elf_interp(const char *filename, struct image_info *info, load_elf_image(filename, &src, info, &ehdr, NULL); } -#ifndef vdso_image_info +#ifndef HAVE_VDSO_IMAGE_INFO +const VdsoImageInfo *get_vdso_image_info(uint32_t elf_flags) +{ #ifdef VDSO_HEADER #include VDSO_HEADER -#define vdso_image_info(flags) &vdso_image_info + return &vdso_image_info; #else -#define vdso_image_info(flags) NULL -#endif /* VDSO_HEADER */ -#endif /* vdso_image_info */ + return NULL; +#endif +} +#endif /* HAVE_VDSO_IMAGE_INFO */ static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso) { @@ -3672,6 +1659,11 @@ static void load_elf_vdso(struct image_info *info, const VdsoImageInfo *vdso) if (vdso->rt_sigreturn_ofs) { default_rt_sigreturn = load_addr + vdso->rt_sigreturn_ofs; } + if (vdso->sigreturn_region_start_ofs) { + vdso_sigreturn_region_start = + load_addr + vdso->sigreturn_region_start_ofs; + vdso_sigreturn_region_end = load_addr + vdso->sigreturn_region_end_ofs; + } /* Remove write from VDSO segment. */ target_mprotect(info->start_data, info->end_data - info->start_data, @@ -3968,7 +1960,7 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) * Load a vdso if available, which will amongst other things contain the * signal trampolines. Otherwise, allocate a separate page for them. */ - const VdsoImageInfo *vdso = vdso_image_info(info->elf_flags); + const VdsoImageInfo *vdso = get_vdso_image_info(info->elf_flags); if (vdso) { load_elf_vdso(&vdso_info, vdso); info->vdso = vdso_info.load_bias; @@ -3982,6 +1974,8 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) setup_sigtramp(tramp_page); target_mprotect(tramp_page, TARGET_PAGE_SIZE, PROT_READ | PROT_EXEC); + vdso_sigreturn_region_start = tramp_page; + vdso_sigreturn_region_end = tramp_page + TARGET_PAGE_SIZE; } bprm->p = create_elf_tables(bprm->p, bprm->argc, bprm->envc, &ehdr, info, @@ -3999,14 +1993,14 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) g_free(elf_interpreter); } -#ifdef USE_ELF_CORE_DUMP +#ifdef HAVE_ELF_CORE_DUMP bprm->core_dump = &elf_core_dump; #endif return 0; } -#ifdef USE_ELF_CORE_DUMP +#ifdef HAVE_ELF_CORE_DUMP /* * Definitions to generate Intel SVR4-like core files. @@ -4022,23 +2016,18 @@ int load_elf_binary(struct linux_binprm *bprm, struct image_info *info) * Core dump code is copied from linux kernel (fs/binfmt_elf.c). * * Porting ELF coredump for target is (quite) simple process. First you - * define USE_ELF_CORE_DUMP in target ELF code (where init_thread() for + * define HAVE_ELF_CORE_DUMP in target ELF code (where init_thread() for * the target resides): * - * #define USE_ELF_CORE_DUMP + * #define HAVE_ELF_CORE_DUMP * - * Next you define type of register set used for dumping. ELF specification - * says that it needs to be array of elf_greg_t that has size of ELF_NREG. - * - * typedef target_elf_greg_t; - * #define ELF_NREG - * typedef taret_elf_greg_t target_elf_gregset_t[ELF_NREG]; + * Next you define type of register set used for dumping: + * typedef struct target_elf_gregset_t { ... } target_elf_gregset_t; * * Last step is to implement target specific function that copies registers * from given cpu into just specified register set. Prototype is: * - * static void elf_core_copy_regs(taret_elf_gregset_t *regs, - * const CPUArchState *env); + * void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUArchState *env); * * Parameters: * regs - copy register values into here (allocated and zeroed by caller) @@ -4138,8 +2127,8 @@ static void bswap_note(struct elf_note *en) */ static size_t vma_dump_size(vaddr start, vaddr end, int flags) { - /* The area must be readable. */ - if (!(flags & PAGE_READ)) { + /* The area must be readable and dumpable. */ + if (!(flags & PAGE_READ) || (flags & PAGE_DONTDUMP)) { return 0; } @@ -4382,7 +2371,7 @@ static int wmr_fill_region_phdr(void *opaque, vaddr start, phdr->p_flags = (flags & PAGE_READ ? PF_R : 0) | (flags & PAGE_WRITE_ORG ? PF_W : 0) | (flags & PAGE_EXEC ? PF_X : 0); - phdr->p_align = ELF_EXEC_PAGESIZE; + phdr->p_align = TARGET_PAGE_SIZE; bswap_phdr(phdr, 1); d->phdr = phdr + 1; @@ -4490,7 +2479,7 @@ static int elf_core_dump(int signr, const CPUArchState *env) offset += size_note("CORE", sizeof(struct target_elf_prpsinfo)); offset += size_note("CORE", sizeof(struct target_elf_prstatus)) * cpus; note_size = offset - note_offset; - data_offset = ROUND_UP(offset, ELF_EXEC_PAGESIZE); + data_offset = TARGET_PAGE_ALIGN(offset); /* Do not dump if the corefile size exceeds the limit. */ if (dumpsize.rlim_cur != RLIM_INFINITY @@ -4569,9 +2558,4 @@ static int elf_core_dump(int signr, const CPUArchState *env) } return ret; } -#endif /* USE_ELF_CORE_DUMP */ - -void do_init_thread(struct target_pt_regs *regs, struct image_info *infop) -{ - init_thread(regs, infop); -} +#endif /* HAVE_ELF_CORE_DUMP */ diff --git a/linux-user/gen-vdso-elfn.c.inc b/linux-user/gen-vdso-elfn.c.inc index b47019e136c45..c2677a146c679 100644 --- a/linux-user/gen-vdso-elfn.c.inc +++ b/linux-user/gen-vdso-elfn.c.inc @@ -84,9 +84,12 @@ static void elfN(search_symtab)(ElfN(Shdr) *shdr, unsigned sym_idx, if (sigreturn_sym && strcmp(sigreturn_sym, name) == 0) { sigreturn_addr = sym.st_value; - } - if (rt_sigreturn_sym && strcmp(rt_sigreturn_sym, name) == 0) { + } else if (rt_sigreturn_sym && strcmp(rt_sigreturn_sym, name) == 0) { rt_sigreturn_addr = sym.st_value; + } else if (strcmp("sigreturn_region_start", name) == 0) { + sigreturn_region_start_addr = sym.st_value; + } else if (strcmp("sigreturn_region_end", name) == 0) { + sigreturn_region_end_addr = sym.st_value; } } } diff --git a/linux-user/gen-vdso.c b/linux-user/gen-vdso.c index aeaa927db8fc9..d6a2cdaa835cc 100644 --- a/linux-user/gen-vdso.c +++ b/linux-user/gen-vdso.c @@ -36,6 +36,8 @@ static const char *rt_sigreturn_sym; static unsigned sigreturn_addr; static unsigned rt_sigreturn_addr; +static unsigned sigreturn_region_start_addr; +static unsigned sigreturn_region_end_addr; #define N 32 #define elfN(x) elf32_##x @@ -215,6 +217,10 @@ int main(int argc, char **argv) fprintf(outf, " .reloc_count = ARRAY_SIZE(%s_relocs),\n", prefix); fprintf(outf, " .sigreturn_ofs = 0x%x,\n", sigreturn_addr); fprintf(outf, " .rt_sigreturn_ofs = 0x%x,\n", rt_sigreturn_addr); + fprintf(outf, " .sigreturn_region_start_ofs = 0x%x,\n", + sigreturn_region_start_addr); + fprintf(outf, " .sigreturn_region_end_ofs = 0x%x,\n", + sigreturn_region_end_addr); fprintf(outf, "};\n"); ret = EXIT_SUCCESS; diff --git a/linux-user/hexagon/cpu_loop.c b/linux-user/hexagon/cpu_loop.c index e18a0183b5016..1941f4c9c1625 100644 --- a/linux-user/hexagon/cpu_loop.c +++ b/linux-user/hexagon/cpu_loop.c @@ -36,7 +36,7 @@ void cpu_loop(CPUHexagonState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_INTERRUPT: @@ -79,9 +79,11 @@ void cpu_loop(CPUHexagonState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - env->gpr[HEX_REG_PC] = regs->sepc; - env->gpr[HEX_REG_SP] = regs->sp; + CPUArchState *env = cpu_env(cs); + + env->gpr[HEX_REG_PC] = info->entry; + env->gpr[HEX_REG_SP] = info->start_stack; env->gpr[HEX_REG_USR] = 0x56000; } diff --git a/linux-user/hexagon/elfload.c b/linux-user/hexagon/elfload.c new file mode 100644 index 0000000000000..d8b545032ab61 --- /dev/null +++ b/linux-user/hexagon/elfload.c @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + static char buf[32]; + int err; + + /* For now, treat anything newer than v5 as a v73 */ + /* FIXME - Disable instructions that are newer than the specified arch */ + if (eflags == 0x04 || /* v5 */ + eflags == 0x05 || /* v55 */ + eflags == 0x60 || /* v60 */ + eflags == 0x61 || /* v61 */ + eflags == 0x62 || /* v62 */ + eflags == 0x65 || /* v65 */ + eflags == 0x66 || /* v66 */ + eflags == 0x67 || /* v67 */ + eflags == 0x8067 || /* v67t */ + eflags == 0x68 || /* v68 */ + eflags == 0x69 || /* v69 */ + eflags == 0x71 || /* v71 */ + eflags == 0x8071 || /* v71t */ + eflags == 0x73 /* v73 */ + ) { + return "v73"; + } + + err = snprintf(buf, sizeof(buf), "unknown (0x%x)", eflags); + return err >= 0 && err < sizeof(buf) ? buf : "unknown"; +} diff --git a/linux-user/hexagon/signal.c b/linux-user/hexagon/signal.c index 492b51f155005..183ecfaa19a8b 100644 --- a/linux-user/hexagon/signal.c +++ b/linux-user/hexagon/signal.c @@ -23,30 +23,32 @@ #include "signal-common.h" #include "linux-user/trace.h" -struct target_sigcontext { - target_ulong r0, r1, r2, r3; - target_ulong r4, r5, r6, r7; - target_ulong r8, r9, r10, r11; - target_ulong r12, r13, r14, r15; - target_ulong r16, r17, r18, r19; - target_ulong r20, r21, r22, r23; - target_ulong r24, r25, r26, r27; - target_ulong r28, r29, r30, r31; - target_ulong sa0; - target_ulong lc0; - target_ulong sa1; - target_ulong lc1; - target_ulong m0; - target_ulong m1; - target_ulong usr; - target_ulong gp; - target_ulong ugp; - target_ulong pc; - target_ulong cause; - target_ulong badva; - target_ulong pred[NUM_PREGS]; +struct target_user_regs_struct { + abi_ulong gpr[32]; + abi_ulong sa0; + abi_ulong lc0; + abi_ulong sa1; + abi_ulong lc1; + abi_ulong m0; + abi_ulong m1; + abi_ulong usr; + abi_ulong p3_0; + abi_ulong gp; + abi_ulong ugp; + abi_ulong pc; + abi_ulong cause; + abi_ulong badva; + abi_ulong cs0; + abi_ulong cs1; + abi_ulong pad1; /* pad to 48 words */ }; +QEMU_BUILD_BUG_ON(sizeof(struct target_user_regs_struct) != 48 * 4); + +struct target_sigcontext { + struct target_user_regs_struct sc_regs; +} QEMU_ALIGNED(8); + struct target_ucontext { unsigned long uc_flags; target_ulong uc_link; /* target pointer */ @@ -76,53 +78,34 @@ static abi_ulong get_sigframe(struct target_sigaction *ka, static void setup_sigcontext(struct target_sigcontext *sc, CPUHexagonState *env) { - __put_user(env->gpr[HEX_REG_R00], &sc->r0); - __put_user(env->gpr[HEX_REG_R01], &sc->r1); - __put_user(env->gpr[HEX_REG_R02], &sc->r2); - __put_user(env->gpr[HEX_REG_R03], &sc->r3); - __put_user(env->gpr[HEX_REG_R04], &sc->r4); - __put_user(env->gpr[HEX_REG_R05], &sc->r5); - __put_user(env->gpr[HEX_REG_R06], &sc->r6); - __put_user(env->gpr[HEX_REG_R07], &sc->r7); - __put_user(env->gpr[HEX_REG_R08], &sc->r8); - __put_user(env->gpr[HEX_REG_R09], &sc->r9); - __put_user(env->gpr[HEX_REG_R10], &sc->r10); - __put_user(env->gpr[HEX_REG_R11], &sc->r11); - __put_user(env->gpr[HEX_REG_R12], &sc->r12); - __put_user(env->gpr[HEX_REG_R13], &sc->r13); - __put_user(env->gpr[HEX_REG_R14], &sc->r14); - __put_user(env->gpr[HEX_REG_R15], &sc->r15); - __put_user(env->gpr[HEX_REG_R16], &sc->r16); - __put_user(env->gpr[HEX_REG_R17], &sc->r17); - __put_user(env->gpr[HEX_REG_R18], &sc->r18); - __put_user(env->gpr[HEX_REG_R19], &sc->r19); - __put_user(env->gpr[HEX_REG_R20], &sc->r20); - __put_user(env->gpr[HEX_REG_R21], &sc->r21); - __put_user(env->gpr[HEX_REG_R22], &sc->r22); - __put_user(env->gpr[HEX_REG_R23], &sc->r23); - __put_user(env->gpr[HEX_REG_R24], &sc->r24); - __put_user(env->gpr[HEX_REG_R25], &sc->r25); - __put_user(env->gpr[HEX_REG_R26], &sc->r26); - __put_user(env->gpr[HEX_REG_R27], &sc->r27); - __put_user(env->gpr[HEX_REG_R28], &sc->r28); - __put_user(env->gpr[HEX_REG_R29], &sc->r29); - __put_user(env->gpr[HEX_REG_R30], &sc->r30); - __put_user(env->gpr[HEX_REG_R31], &sc->r31); - __put_user(env->gpr[HEX_REG_SA0], &sc->sa0); - __put_user(env->gpr[HEX_REG_LC0], &sc->lc0); - __put_user(env->gpr[HEX_REG_SA1], &sc->sa1); - __put_user(env->gpr[HEX_REG_LC1], &sc->lc1); - __put_user(env->gpr[HEX_REG_M0], &sc->m0); - __put_user(env->gpr[HEX_REG_M1], &sc->m1); - __put_user(env->gpr[HEX_REG_USR], &sc->usr); - __put_user(env->gpr[HEX_REG_GP], &sc->gp); - __put_user(env->gpr[HEX_REG_UGP], &sc->ugp); - __put_user(env->gpr[HEX_REG_PC], &sc->pc); + abi_ulong preds = 0; - int i; - for (i = 0; i < NUM_PREGS; i++) { - __put_user(env->pred[i], &(sc->pred[i])); + for (int i = 0; i < 32; i++) { + __put_user(env->gpr[HEX_REG_R00 + i], &sc->sc_regs.gpr[i]); + } + __put_user(env->gpr[HEX_REG_SA0], &sc->sc_regs.sa0); + __put_user(env->gpr[HEX_REG_LC0], &sc->sc_regs.lc0); + __put_user(env->gpr[HEX_REG_SA1], &sc->sc_regs.sa1); + __put_user(env->gpr[HEX_REG_LC1], &sc->sc_regs.lc1); + __put_user(env->gpr[HEX_REG_M0], &sc->sc_regs.m0); + __put_user(env->gpr[HEX_REG_M1], &sc->sc_regs.m1); + __put_user(env->gpr[HEX_REG_USR], &sc->sc_regs.usr); + __put_user(env->gpr[HEX_REG_GP], &sc->sc_regs.gp); + __put_user(env->gpr[HEX_REG_UGP], &sc->sc_regs.ugp); + __put_user(env->gpr[HEX_REG_PC], &sc->sc_regs.pc); + + /* Consolidate predicates into p3_0 */ + for (int i = 0; i < NUM_PREGS; i++) { + preds |= (env->pred[i] & 0xff) << (i * 8); } + __put_user(preds, &sc->sc_regs.p3_0); + + /* Set cause and badva to 0 - these are set by kernel on exceptions */ + __put_user(0, &sc->sc_regs.cause); + __put_user(0, &sc->sc_regs.badva); + + __put_user(env->gpr[HEX_REG_CS0], &sc->sc_regs.cs0); + __put_user(env->gpr[HEX_REG_CS1], &sc->sc_regs.cs1); } static void setup_ucontext(struct target_ucontext *uc, @@ -192,53 +175,30 @@ void setup_rt_frame(int sig, struct target_sigaction *ka, static void restore_sigcontext(CPUHexagonState *env, struct target_sigcontext *sc) { - __get_user(env->gpr[HEX_REG_R00], &sc->r0); - __get_user(env->gpr[HEX_REG_R01], &sc->r1); - __get_user(env->gpr[HEX_REG_R02], &sc->r2); - __get_user(env->gpr[HEX_REG_R03], &sc->r3); - __get_user(env->gpr[HEX_REG_R04], &sc->r4); - __get_user(env->gpr[HEX_REG_R05], &sc->r5); - __get_user(env->gpr[HEX_REG_R06], &sc->r6); - __get_user(env->gpr[HEX_REG_R07], &sc->r7); - __get_user(env->gpr[HEX_REG_R08], &sc->r8); - __get_user(env->gpr[HEX_REG_R09], &sc->r9); - __get_user(env->gpr[HEX_REG_R10], &sc->r10); - __get_user(env->gpr[HEX_REG_R11], &sc->r11); - __get_user(env->gpr[HEX_REG_R12], &sc->r12); - __get_user(env->gpr[HEX_REG_R13], &sc->r13); - __get_user(env->gpr[HEX_REG_R14], &sc->r14); - __get_user(env->gpr[HEX_REG_R15], &sc->r15); - __get_user(env->gpr[HEX_REG_R16], &sc->r16); - __get_user(env->gpr[HEX_REG_R17], &sc->r17); - __get_user(env->gpr[HEX_REG_R18], &sc->r18); - __get_user(env->gpr[HEX_REG_R19], &sc->r19); - __get_user(env->gpr[HEX_REG_R20], &sc->r20); - __get_user(env->gpr[HEX_REG_R21], &sc->r21); - __get_user(env->gpr[HEX_REG_R22], &sc->r22); - __get_user(env->gpr[HEX_REG_R23], &sc->r23); - __get_user(env->gpr[HEX_REG_R24], &sc->r24); - __get_user(env->gpr[HEX_REG_R25], &sc->r25); - __get_user(env->gpr[HEX_REG_R26], &sc->r26); - __get_user(env->gpr[HEX_REG_R27], &sc->r27); - __get_user(env->gpr[HEX_REG_R28], &sc->r28); - __get_user(env->gpr[HEX_REG_R29], &sc->r29); - __get_user(env->gpr[HEX_REG_R30], &sc->r30); - __get_user(env->gpr[HEX_REG_R31], &sc->r31); - __get_user(env->gpr[HEX_REG_SA0], &sc->sa0); - __get_user(env->gpr[HEX_REG_LC0], &sc->lc0); - __get_user(env->gpr[HEX_REG_SA1], &sc->sa1); - __get_user(env->gpr[HEX_REG_LC1], &sc->lc1); - __get_user(env->gpr[HEX_REG_M0], &sc->m0); - __get_user(env->gpr[HEX_REG_M1], &sc->m1); - __get_user(env->gpr[HEX_REG_USR], &sc->usr); - __get_user(env->gpr[HEX_REG_GP], &sc->gp); - __get_user(env->gpr[HEX_REG_UGP], &sc->ugp); - __get_user(env->gpr[HEX_REG_PC], &sc->pc); + abi_ulong preds; - int i; - for (i = 0; i < NUM_PREGS; i++) { - __get_user(env->pred[i], &(sc->pred[i])); + for (int i = 0; i < 32; i++) { + __get_user(env->gpr[HEX_REG_R00 + i], &sc->sc_regs.gpr[i]); } + __get_user(env->gpr[HEX_REG_SA0], &sc->sc_regs.sa0); + __get_user(env->gpr[HEX_REG_LC0], &sc->sc_regs.lc0); + __get_user(env->gpr[HEX_REG_SA1], &sc->sc_regs.sa1); + __get_user(env->gpr[HEX_REG_LC1], &sc->sc_regs.lc1); + __get_user(env->gpr[HEX_REG_M0], &sc->sc_regs.m0); + __get_user(env->gpr[HEX_REG_M1], &sc->sc_regs.m1); + __get_user(env->gpr[HEX_REG_USR], &sc->sc_regs.usr); + __get_user(env->gpr[HEX_REG_GP], &sc->sc_regs.gp); + __get_user(env->gpr[HEX_REG_UGP], &sc->sc_regs.ugp); + __get_user(env->gpr[HEX_REG_PC], &sc->sc_regs.pc); + + /* Restore predicates from p3_0 */ + __get_user(preds, &sc->sc_regs.p3_0); + for (int i = 0; i < NUM_PREGS; i++) { + env->pred[i] = (preds >> (i * 8)) & 0xff; + } + + __get_user(env->gpr[HEX_REG_CS0], &sc->sc_regs.cs0); + __get_user(env->gpr[HEX_REG_CS1], &sc->sc_regs.cs1); } static void restore_ucontext(CPUHexagonState *env, struct target_ucontext *uc) diff --git a/linux-user/hexagon/target_elf.h b/linux-user/hexagon/target_elf.h index 36056fc9f016e..f81ae3895a6b9 100644 --- a/linux-user/hexagon/target_elf.h +++ b/linux-user/hexagon/target_elf.h @@ -18,33 +18,7 @@ #ifndef HEXAGON_TARGET_ELF_H #define HEXAGON_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - static char buf[32]; - int err; - - /* For now, treat anything newer than v5 as a v73 */ - /* FIXME - Disable instructions that are newer than the specified arch */ - if (eflags == 0x04 || /* v5 */ - eflags == 0x05 || /* v55 */ - eflags == 0x60 || /* v60 */ - eflags == 0x61 || /* v61 */ - eflags == 0x62 || /* v62 */ - eflags == 0x65 || /* v65 */ - eflags == 0x66 || /* v66 */ - eflags == 0x67 || /* v67 */ - eflags == 0x8067 || /* v67t */ - eflags == 0x68 || /* v68 */ - eflags == 0x69 || /* v69 */ - eflags == 0x71 || /* v71 */ - eflags == 0x8071 || /* v71t */ - eflags == 0x73 /* v73 */ - ) { - return "v73"; - } - - err = snprintf(buf, sizeof(buf), "unknown (0x%x)", eflags); - return err >= 0 && err < sizeof(buf) ? buf : "unknown"; -} +#define ELF_CLASS ELFCLASS32 +#define ELF_MACHINE EM_HEXAGON #endif diff --git a/linux-user/hexagon/target_syscall.h b/linux-user/hexagon/target_syscall.h index 7f91a4abc77a5..d9c94737a5c86 100644 --- a/linux-user/hexagon/target_syscall.h +++ b/linux-user/hexagon/target_syscall.h @@ -18,11 +18,6 @@ #ifndef HEXAGON_TARGET_SYSCALL_H #define HEXAGON_TARGET_SYSCALL_H -struct target_pt_regs { - abi_long sepc; - abi_long sp; -}; - #define UNAME_MACHINE "hexagon" #define UNAME_MINIMUM_RELEASE "4.15.0" diff --git a/linux-user/hppa/cpu_loop.c b/linux-user/hppa/cpu_loop.c index 9abaad5ef81d0..356cb48acc300 100644 --- a/linux-user/hppa/cpu_loop.c +++ b/linux-user/hppa/cpu_loop.c @@ -119,7 +119,7 @@ void cpu_loop(CPUHPPAState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_SYSCALL: @@ -196,12 +196,16 @@ void cpu_loop(CPUHPPAState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - int i; - for (i = 1; i < 32; i++) { - env->gr[i] = regs->gr[i]; - } - env->iaoq_f = regs->iaoq[0]; - env->iaoq_b = regs->iaoq[1]; + CPUArchState *env = cpu_env(cs); + + env->iaoq_f = info->entry | PRIV_USER; + env->iaoq_b = env->iaoq_f + 4; + env->gr[23] = 0; + env->gr[24] = info->argv; + env->gr[25] = info->argc; + /* The top-of-stack contains a linkage buffer. */ + env->gr[30] = info->start_stack + 64; + env->gr[31] = info->entry; } diff --git a/linux-user/hppa/elfload.c b/linux-user/hppa/elfload.c new file mode 100644 index 0000000000000..4600708702077 --- /dev/null +++ b/linux-user/hppa/elfload.c @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "hppa"; +} + +const char *get_elf_platform(CPUState *cs) +{ + return "PARISC"; +} + +bool init_guest_commpage(void) +{ + /* If reserved_va, then we have already mapped 0 page on the host. */ + if (!reserved_va) { + void *want, *addr; + + want = g2h_untagged(LO_COMMPAGE); + addr = mmap(want, TARGET_PAGE_SIZE, PROT_NONE, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED_NOREPLACE, -1, 0); + if (addr == MAP_FAILED) { + perror("Allocating guest commpage"); + exit(EXIT_FAILURE); + } + if (addr != want) { + return false; + } + } + + /* + * On Linux, page zero is normally marked execute only + gateway. + * Normal read or write is supposed to fail (thus PROT_NONE above), + * but specific offsets have kernel code mapped to raise permissions + * and implement syscalls. Here, simply mark the page executable. + * Special case the entry points during translation (see do_page_zero). + */ + page_set_flags(LO_COMMPAGE, LO_COMMPAGE | ~TARGET_PAGE_MASK, + PAGE_EXEC | PAGE_VALID, PAGE_VALID); + return true; +} diff --git a/linux-user/hppa/target_elf.h b/linux-user/hppa/target_elf.h index 19cae8bd65ddc..76930c9369ea6 100644 --- a/linux-user/hppa/target_elf.h +++ b/linux-user/hppa/target_elf.h @@ -7,8 +7,15 @@ #ifndef HPPA_TARGET_ELF_H #define HPPA_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "hppa"; -} + +#define ELF_CLASS ELFCLASS32 +#define ELF_MACHINE EM_PARISC + +#define HAVE_ELF_PLATFORM 1 + +#define LO_COMMPAGE 0 +#define STACK_GROWS_DOWN 0 +#define STACK_ALIGNMENT 64 +#define VDSO_HEADER "vdso.c.inc" + #endif diff --git a/linux-user/hppa/target_syscall.h b/linux-user/hppa/target_syscall.h index 9a8f8ca628175..4b21e85371330 100644 --- a/linux-user/hppa/target_syscall.h +++ b/linux-user/hppa/target_syscall.h @@ -1,24 +1,6 @@ #ifndef HPPA_TARGET_SYSCALL_H #define HPPA_TARGET_SYSCALL_H -struct target_pt_regs { - target_ulong gr[32]; - uint64_t fr[32]; - target_ulong sr[8]; - target_ulong iasq[2]; - target_ulong iaoq[2]; - target_ulong cr27; - target_ulong __pad0; - target_ulong orig_r28; - target_ulong ksp; - target_ulong kpc; - target_ulong sar; - target_ulong iir; - target_ulong isr; - target_ulong ior; - target_ulong ipsw; -}; - #define UNAME_MACHINE "parisc" #define UNAME_MINIMUM_RELEASE "2.6.32" #define TARGET_CLONE_BACKWARDS diff --git a/linux-user/hppa/vdso.S b/linux-user/hppa/vdso.S index 5be14d2f7009a..a6f8da2981ef9 100644 --- a/linux-user/hppa/vdso.S +++ b/linux-user/hppa/vdso.S @@ -156,8 +156,10 @@ __kernel_sigtramp_rt: ldi 0, %r25 ldi __NR_rt_sigreturn, %r20 +sigreturn_region_start: be,l 0x100(%sr2, %r0), %sr0, %r31 nop +sigreturn_region_end: .cfi_endproc .size __kernel_sigtramp_rt, . - __kernel_sigtramp_rt diff --git a/linux-user/hppa/vdso.so b/linux-user/hppa/vdso.so index e1ddd70c37e9e..68baf80fd3943 100755 Binary files a/linux-user/hppa/vdso.so and b/linux-user/hppa/vdso.so differ diff --git a/linux-user/i386/cpu_loop.c b/linux-user/i386/cpu_loop.c index d96d5553fafc9..f3f58576af5c6 100644 --- a/linux-user/i386/cpu_loop.c +++ b/linux-user/i386/cpu_loop.c @@ -214,7 +214,7 @@ void cpu_loop(CPUX86State *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch(trapnr) { case 0x80: @@ -331,11 +331,10 @@ static void target_cpu_free(void *obj) g_free(obj); } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cpu, struct image_info *info) { - CPUState *cpu = env_cpu(env); + CPUArchState *env = cpu_env(cpu); bool is64 = (env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_LM) != 0; - int i; OBJECT(cpu)->free = target_cpu_free; env->cr[0] = CR0_PG_MASK | CR0_WP_MASK | CR0_PE_MASK; @@ -361,28 +360,25 @@ void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) /* flags setup : we activate the IRQs by default as in user mode */ env->eflags |= IF_MASK; - /* linux register setup */ -#ifndef TARGET_ABI32 - env->regs[R_EAX] = regs->rax; - env->regs[R_EBX] = regs->rbx; - env->regs[R_ECX] = regs->rcx; - env->regs[R_EDX] = regs->rdx; - env->regs[R_ESI] = regs->rsi; - env->regs[R_EDI] = regs->rdi; - env->regs[R_EBP] = regs->rbp; - env->regs[R_ESP] = regs->rsp; - env->eip = regs->rip; -#else - env->regs[R_EAX] = regs->eax; - env->regs[R_EBX] = regs->ebx; - env->regs[R_ECX] = regs->ecx; - env->regs[R_EDX] = regs->edx; - env->regs[R_ESI] = regs->esi; - env->regs[R_EDI] = regs->edi; - env->regs[R_EBP] = regs->ebp; - env->regs[R_ESP] = regs->esp; - env->eip = regs->eip; -#endif + /* + * Linux register setup. + * + * SVR4/i386 ABI (pages 3-31, 3-32) says that when the program + * starts %edx contains a pointer to a function which might be + * registered using `atexit'. This provides a mean for the + * dynamic linker to call DT_FINI functions for shared libraries + * that have been loaded before the code runs. + * A value of 0 tells we have no such handler. + * + * This applies to x86_64 as well as i386. + * + * That said, the kernel's ELF_PLAT_INIT simply zeros all of the general + * registers. Note that x86_cpu_reset_hold will set %edx to cpuid_version; + * clear all general registers defensively. + */ + memset(env->regs, 0, sizeof(env->regs)); + env->regs[R_ESP] = info->start_stack; + env->eip = info->entry; /* linux interrupt setup */ #ifndef TARGET_ABI32 @@ -394,7 +390,7 @@ void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, -1, 0); idt_table = g2h_untagged(env->idt.base); - for (i = 0; i < 20; i++) { + for (int i = 0; i < 20; i++) { set_idt(i, 0, is64); } set_idt(3, 3, is64); diff --git a/linux-user/i386/elfload.c b/linux-user/i386/elfload.c new file mode 100644 index 0000000000000..26b12001a3e0b --- /dev/null +++ b/linux-user/i386/elfload.c @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "max"; +} + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + return cpu_env(cs)->features[FEAT_1_EDX]; +} + +const char *get_elf_platform(CPUState *cs) +{ + static const char elf_platform[4][5] = { "i386", "i486", "i586", "i686" }; + int family = object_property_get_int(OBJECT(cs), "family", NULL); + + family = MAX(MIN(family, 6), 3); + return elf_platform[family - 3]; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUX86State *env) +{ + r->pt.bx = tswapal(env->regs[R_EBX]); + r->pt.cx = tswapal(env->regs[R_ECX]); + r->pt.dx = tswapal(env->regs[R_EDX]); + r->pt.si = tswapal(env->regs[R_ESI]); + r->pt.di = tswapal(env->regs[R_EDI]); + r->pt.bp = tswapal(env->regs[R_EBP]); + r->pt.ax = tswapal(env->regs[R_EAX]); + r->pt.ds = tswapal(env->segs[R_DS].selector & 0xffff); + r->pt.es = tswapal(env->segs[R_ES].selector & 0xffff); + r->pt.fs = tswapal(env->segs[R_FS].selector & 0xffff); + r->pt.gs = tswapal(env->segs[R_GS].selector & 0xffff); + r->pt.orig_ax = tswapal(get_task_state(env_cpu_const(env))->orig_ax); + r->pt.ip = tswapal(env->eip); + r->pt.cs = tswapal(env->segs[R_CS].selector & 0xffff); + r->pt.flags = tswapal(env->eflags); + r->pt.sp = tswapal(env->regs[R_ESP]); + r->pt.ss = tswapal(env->segs[R_SS].selector & 0xffff); +} diff --git a/linux-user/i386/target_elf.h b/linux-user/i386/target_elf.h index 238a9aba738a0..eafac8f382179 100644 --- a/linux-user/i386/target_elf.h +++ b/linux-user/i386/target_elf.h @@ -7,8 +7,41 @@ #ifndef I386_TARGET_ELF_H #define I386_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "max"; -} + +#include "target_ptrace.h" + +#define ELF_CLASS ELFCLASS32 +#define ELF_MACHINE EM_386 +#define EXSTACK_DEFAULT true +#define VDSO_HEADER "vdso.c.inc" + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_PLATFORM 1 +#define HAVE_ELF_CORE_DUMP 1 + +/* + * See linux kernel: arch/x86/include/asm/elf.h, where elf_gregset_t + * is mapped to struct user_regs_struct via sizeof. + */ +typedef struct target_elf_gregset_t { + struct target_user_regs_struct pt; +} target_elf_gregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_machine(x) ((x) == EM_386 || (x) == EM_486) + +/* + * i386 is the only target which supplies AT_SYSINFO for the vdso. + * All others only supply AT_SYSINFO_EHDR. + */ +#define DLINFO_ARCH_ITEMS (vdso_info != NULL) +#define ARCH_DLINFO \ + do { \ + if (vdso_info) { \ + NEW_AUX_ENT(AT_SYSINFO, vdso_info->entry); \ + } \ + } while (0) + #endif diff --git a/linux-user/i386/target_ptrace.h b/linux-user/i386/target_ptrace.h new file mode 100644 index 0000000000000..bc57926f2542a --- /dev/null +++ b/linux-user/i386/target_ptrace.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef I386_TARGET_PTRACE_H +#define I386_TARGET_PTRACE_H + +/* + * Note that arch/x86/include/uapi/asm/ptrace.h (struct pt_regs) and + * arch/x86/include/asm/user_32.h (struct user_regs_struct) have the + * same layout, though the exact types differ (int vs long vs unsigned). + * Define user_regs_struct because that's what's actually used. + */ +struct target_user_regs_struct { + abi_ulong bx; + abi_ulong cx; + abi_ulong dx; + abi_ulong si; + abi_ulong di; + abi_ulong bp; + abi_ulong ax; + abi_ulong ds; + abi_ulong es; + abi_ulong fs; + abi_ulong gs; + abi_ulong orig_ax; + abi_ulong ip; + abi_ulong cs; + abi_ulong flags; + abi_ulong sp; + abi_ulong ss; +}; + +#endif /* I386_TARGET_PTRACE_H */ diff --git a/linux-user/i386/target_syscall.h b/linux-user/i386/target_syscall.h index aaade06b136ac..c214a909a6ff0 100644 --- a/linux-user/i386/target_syscall.h +++ b/linux-user/i386/target_syscall.h @@ -5,24 +5,6 @@ #define __USER_CS (0x23) #define __USER_DS (0x2B) -struct target_pt_regs { - long ebx; - long ecx; - long edx; - long esi; - long edi; - long ebp; - long eax; - int xds; - int xes; - long orig_eax; - long eip; - int xcs; - long eflags; - long esp; - int xss; -}; - /* ioctls */ #define TARGET_LDT_ENTRIES 8192 diff --git a/linux-user/i386/vdso.S b/linux-user/i386/vdso.S index e7a1f333a1ee6..8df77b5a948a3 100644 --- a/linux-user/i386/vdso.S +++ b/linux-user/i386/vdso.S @@ -114,6 +114,7 @@ vdso_syscall3 __vdso_getcpu, __NR_gettimeofday */ nop +sigreturn_region_start: __kernel_sigreturn: popl %eax /* pop sig */ .cfi_adjust_cfa_offset -4 @@ -128,6 +129,7 @@ __kernel_rt_sigreturn: movl $__NR_rt_sigreturn, %eax int $0x80 endf __kernel_rt_sigreturn +sigreturn_region_end: .cfi_endproc diff --git a/linux-user/i386/vdso.so b/linux-user/i386/vdso.so index bdece5dfcf8da..e01c3818d0d2e 100755 Binary files a/linux-user/i386/vdso.so and b/linux-user/i386/vdso.so differ diff --git a/linux-user/linuxload.c b/linux-user/linuxload.c index 37f132be4aff0..85d700953e233 100644 --- a/linux-user/linuxload.c +++ b/linux-user/linuxload.c @@ -139,8 +139,7 @@ abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, } int loader_exec(int fdexec, const char *filename, char **argv, char **envp, - struct target_pt_regs *regs, struct image_info *infop, - struct linux_binprm *bprm) + struct image_info *infop, struct linux_binprm *bprm) { int retval; @@ -175,8 +174,7 @@ int loader_exec(int fdexec, const char *filename, char **argv, char **envp, return retval; } - /* Success. Initialize important registers. */ - do_init_thread(regs, infop); + /* Success. */ return 0; } diff --git a/linux-user/loader.h b/linux-user/loader.h index e102e6f4108f4..da9ad28db5de5 100644 --- a/linux-user/loader.h +++ b/linux-user/loader.h @@ -82,12 +82,10 @@ struct linux_binprm { int (*core_dump)(int, const CPUArchState *); /* coredump routine */ }; -void do_init_thread(struct target_pt_regs *regs, struct image_info *infop); abi_ulong loader_build_argptr(int envc, int argc, abi_ulong sp, abi_ulong stringp, int push_ptr); int loader_exec(int fdexec, const char *filename, char **argv, char **envp, - struct target_pt_regs *regs, struct image_info *infop, - struct linux_binprm *); + struct image_info *infop, struct linux_binprm *); uint32_t get_elf_eflags(int fd); int load_elf_binary(struct linux_binprm *bprm, struct image_info *info); @@ -98,13 +96,37 @@ abi_long memcpy_to_target(abi_ulong dest, const void *src, extern unsigned long guest_stack_size; -#if defined(TARGET_S390X) || defined(TARGET_AARCH64) || defined(TARGET_ARM) -uint32_t get_elf_hwcap(void); +/* Note that Elf32 and Elf64 use uint32_t for e_flags. */ +const char *get_elf_cpu_model(uint32_t eflags); + +abi_ulong get_elf_hwcap(CPUState *cs); +abi_ulong get_elf_hwcap2(CPUState *cs); const char *elf_hwcap_str(uint32_t bit); -#endif -#if defined(TARGET_AARCH64) || defined(TARGET_ARM) -uint64_t get_elf_hwcap2(void); const char *elf_hwcap2_str(uint32_t bit); -#endif +const char *get_elf_platform(CPUState *cs); +const char *get_elf_base_platform(CPUState *cs); +bool init_guest_commpage(void); + +struct target_elf_gregset_t; +void elf_core_copy_regs(struct target_elf_gregset_t *, const CPUArchState *); + +typedef struct { + const uint8_t *image; + const uint32_t *relocs; + unsigned image_size; + unsigned reloc_count; + unsigned sigreturn_ofs; + unsigned rt_sigreturn_ofs; + unsigned sigreturn_region_start_ofs; + unsigned sigreturn_region_end_ofs; +} VdsoImageInfo; + +/* Note that both Elf32_Word and Elf64_Word are uint32_t. */ +const VdsoImageInfo *get_vdso_image_info(uint32_t elf_flags); + +bool arch_parse_elf_property(uint32_t pr_type, uint32_t pr_datasz, + const uint32_t *data, + struct image_info *info, + Error **errp); #endif /* LINUX_USER_LOADER_H */ diff --git a/linux-user/loongarch64/cpu_loop.c b/linux-user/loongarch64/cpu_loop.c index ec8a06c88c79e..26a5ce3a936c4 100644 --- a/linux-user/loongarch64/cpu_loop.c +++ b/linux-user/loongarch64/cpu_loop.c @@ -27,7 +27,7 @@ void cpu_loop(CPULoongArchState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_INTERRUPT: @@ -120,13 +120,10 @@ void cpu_loop(CPULoongArchState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - int i; - - for (i = 0; i < 32; i++) { - env->gpr[i] = regs->regs[i]; - } - env->pc = regs->csr.era; + CPUArchState *env = cpu_env(cs); + env->pc = info->entry; + env->gpr[3] = info->start_stack; } diff --git a/linux-user/loongarch64/elfload.c b/linux-user/loongarch64/elfload.c new file mode 100644 index 0000000000000..ce3bd0c6079c9 --- /dev/null +++ b/linux-user/loongarch64/elfload.c @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "la464"; +} + +/* See arch/loongarch/include/uapi/asm/hwcap.h */ +enum { + HWCAP_LOONGARCH_CPUCFG = (1 << 0), + HWCAP_LOONGARCH_LAM = (1 << 1), + HWCAP_LOONGARCH_UAL = (1 << 2), + HWCAP_LOONGARCH_FPU = (1 << 3), + HWCAP_LOONGARCH_LSX = (1 << 4), + HWCAP_LOONGARCH_LASX = (1 << 5), + HWCAP_LOONGARCH_CRC32 = (1 << 6), + HWCAP_LOONGARCH_COMPLEX = (1 << 7), + HWCAP_LOONGARCH_CRYPTO = (1 << 8), + HWCAP_LOONGARCH_LVZ = (1 << 9), + HWCAP_LOONGARCH_LBT_X86 = (1 << 10), + HWCAP_LOONGARCH_LBT_ARM = (1 << 11), + HWCAP_LOONGARCH_LBT_MIPS = (1 << 12), +}; + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(cs); + abi_ulong hwcaps = 0; + + hwcaps |= HWCAP_LOONGARCH_CRC32; + + if (FIELD_EX32(cpu->env.cpucfg[1], CPUCFG1, UAL)) { + hwcaps |= HWCAP_LOONGARCH_UAL; + } + + if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, FP)) { + hwcaps |= HWCAP_LOONGARCH_FPU; + } + + if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LAM)) { + hwcaps |= HWCAP_LOONGARCH_LAM; + } + + if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LSX)) { + hwcaps |= HWCAP_LOONGARCH_LSX; + } + + if (FIELD_EX32(cpu->env.cpucfg[2], CPUCFG2, LASX)) { + hwcaps |= HWCAP_LOONGARCH_LASX; + } + + return hwcaps; +} + +const char *get_elf_platform(CPUState *cs) +{ + return "loongarch"; +} + +#define tswapreg(ptr) tswapal(ptr) + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPULoongArchState *env) +{ + r->pt.regs[0] = 0; + + for (int i = 1; i < ARRAY_SIZE(env->gpr); i++) { + r->pt.regs[i] = tswapreg(env->gpr[i]); + } + + r->pt.csr_era = tswapreg(env->pc); + r->pt.csr_badv = tswapreg(env->CSR_BADV); +} diff --git a/linux-user/loongarch64/target_elf.h b/linux-user/loongarch64/target_elf.h index 95c3f05a46d87..3aa8c83958df3 100644 --- a/linux-user/loongarch64/target_elf.h +++ b/linux-user/loongarch64/target_elf.h @@ -5,8 +5,21 @@ #ifndef LOONGARCH_TARGET_ELF_H #define LOONGARCH_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "la464"; -} + +#include "target_ptrace.h" + +#define ELF_CLASS ELFCLASS64 +#define ELF_MACHINE EM_LOONGARCH +#define EXSTACK_DEFAULT true +#define VDSO_HEADER "vdso.c.inc" + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_PLATFORM 1 +#define HAVE_ELF_CORE_DUMP 1 + +/* See linux kernel: arch/loongarch/include/asm/elf.h */ +typedef struct target_elf_gregset_t { + struct target_user_pt_regs pt; +} target_elf_gregset_t; + #endif diff --git a/linux-user/loongarch64/target_ptrace.h b/linux-user/loongarch64/target_ptrace.h new file mode 100644 index 0000000000000..2578e09207e16 --- /dev/null +++ b/linux-user/loongarch64/target_ptrace.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef LOONGARCH64_TARGET_PTRACE_H +#define LOONGARCH64_TARGET_PTRACE_H + +/* See arch/loongarch/include/uapi/asm/ptrace.h. */ +struct target_user_pt_regs { + abi_ulong regs[32]; + abi_ulong orig_a0; + abi_ulong csr_era; + abi_ulong csr_badv; + abi_ulong reserved[10]; +}; + +#endif /* LOONGARCH64_TARGET_PTRACE_H */ diff --git a/linux-user/loongarch64/target_syscall.h b/linux-user/loongarch64/target_syscall.h index 39f229bb9c881..f7ced7b2be354 100644 --- a/linux-user/loongarch64/target_syscall.h +++ b/linux-user/loongarch64/target_syscall.h @@ -8,29 +8,6 @@ #include "qemu/units.h" -/* - * this struct defines the way the registers are stored on the - * stack during a system call. - */ - -struct target_pt_regs { - /* Saved main processor registers. */ - target_ulong regs[32]; - - /* Saved special registers. */ - struct { - target_ulong era; - target_ulong badv; - target_ulong crmd; - target_ulong prmd; - target_ulong euen; - target_ulong ecfg; - target_ulong estat; - } csr; - target_ulong orig_a0; - target_ulong __last[0]; -}; - #define UNAME_MACHINE "loongarch64" #define UNAME_MINIMUM_RELEASE "5.19.0" diff --git a/linux-user/loongarch64/vdso.S b/linux-user/loongarch64/vdso.S index 780a5fda12f3d..2409d95476d3c 100644 --- a/linux-user/loongarch64/vdso.S +++ b/linux-user/loongarch64/vdso.S @@ -125,6 +125,8 @@ vdso_syscall __vdso_getcpu, __NR_getcpu __vdso_rt_sigreturn: li.w $a7, __NR_rt_sigreturn +sigreturn_region_start: syscall 0 +sigreturn_region_end: .cfi_endproc endf __vdso_rt_sigreturn diff --git a/linux-user/loongarch64/vdso.so b/linux-user/loongarch64/vdso.so index 7c2de6c50e706..3704834f0d5d4 100755 Binary files a/linux-user/loongarch64/vdso.so and b/linux-user/loongarch64/vdso.so differ diff --git a/linux-user/m68k/cpu_loop.c b/linux-user/m68k/cpu_loop.c index 5da91b997ae38..2c9f628241f4c 100644 --- a/linux-user/m68k/cpu_loop.c +++ b/linux-user/m68k/cpu_loop.c @@ -33,7 +33,7 @@ void cpu_loop(CPUM68KState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch(trapnr) { case EXCP_ILLEGAL: @@ -92,33 +92,11 @@ void cpu_loop(CPUM68KState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - CPUState *cpu = env_cpu(env); - TaskState *ts = get_task_state(cpu); - struct image_info *info = ts->info; + CPUArchState *env = cpu_env(cs); - env->pc = regs->pc; - env->dregs[0] = regs->d0; - env->dregs[1] = regs->d1; - env->dregs[2] = regs->d2; - env->dregs[3] = regs->d3; - env->dregs[4] = regs->d4; - env->dregs[5] = regs->d5; - env->dregs[6] = regs->d6; - env->dregs[7] = regs->d7; - env->aregs[0] = regs->a0; - env->aregs[1] = regs->a1; - env->aregs[2] = regs->a2; - env->aregs[3] = regs->a3; - env->aregs[4] = regs->a4; - env->aregs[5] = regs->a5; - env->aregs[6] = regs->a6; - env->aregs[7] = regs->usp; - env->sr = regs->sr; - - ts->stack_base = info->start_stack; - ts->heap_base = info->brk; - /* This will be filled in on the first SYS_HEAPINFO call. */ - ts->heap_limit = 0; + env->pc = info->entry; + env->aregs[7] = info->start_stack; + env->sr = 0; } diff --git a/linux-user/m68k/elfload.c b/linux-user/m68k/elfload.c new file mode 100644 index 0000000000000..423d1f680af11 --- /dev/null +++ b/linux-user/m68k/elfload.c @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "elf.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + if (eflags == 0 || (eflags & EF_M68K_M68000)) { + /* 680x0 */ + return "m68040"; + } + + /* Coldfire */ + return "any"; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUM68KState *env) +{ + r->d1 = tswapal(env->dregs[1]); + r->d2 = tswapal(env->dregs[2]); + r->d3 = tswapal(env->dregs[3]); + r->d4 = tswapal(env->dregs[4]); + r->d5 = tswapal(env->dregs[5]); + r->d6 = tswapal(env->dregs[6]); + r->d7 = tswapal(env->dregs[7]); + r->a0 = tswapal(env->aregs[0]); + r->a1 = tswapal(env->aregs[1]); + r->a2 = tswapal(env->aregs[2]); + r->a3 = tswapal(env->aregs[3]); + r->a4 = tswapal(env->aregs[4]); + r->a5 = tswapal(env->aregs[5]); + r->a6 = tswapal(env->aregs[6]); + r->d0 = tswapal(env->dregs[0]); + r->usp = tswapal(env->aregs[7]); + r->orig_d0 = tswapal(env->dregs[0]); /* FIXME */ + r->sr = tswapal(env->sr); + r->pc = tswapal(env->pc); + /* FIXME: regs->format | regs->vector */ +} diff --git a/linux-user/m68k/target_elf.h b/linux-user/m68k/target_elf.h index 998fe0fe2f322..b997fa0b6d662 100644 --- a/linux-user/m68k/target_elf.h +++ b/linux-user/m68k/target_elf.h @@ -7,14 +7,32 @@ #ifndef M68K_TARGET_ELF_H #define M68K_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - if (eflags == 0 || (eflags & EF_M68K_M68000)) { - /* 680x0 */ - return "m68040"; - } - /* Coldfire */ - return "any"; -} +#define ELF_CLASS ELFCLASS32 +#define ELF_MACHINE EM_68K + +#define HAVE_ELF_CORE_DUMP 1 + +/* + * See linux kernel: arch/m68k/include/asm/elf.h, where + * elf_gregset_t is mapped to struct user_regs_struct via sizeof. + * + * Note that user_regs_struct has + * short stkadj, sr; + * ... + * short fmtvec, __fill; + * but ELF_CORE_COPY_REGS writes to unsigned longs. + * Therefore adjust the sr and fmtvec fields to match. + */ +typedef struct target_elf_gregset_t { + abi_ulong d1, d2, d3, d4, d5, d6, d7; + abi_ulong a0, a1, a2, a3, a4, a5, a6; + abi_ulong d0; + abi_ulong usp; + abi_ulong orig_d0; + abi_ulong sr; + abi_ulong pc; + abi_ulong fmtvec; +} target_elf_gregset_t; + #endif diff --git a/linux-user/m68k/target_syscall.h b/linux-user/m68k/target_syscall.h index 8d4ddbd76c8f4..3ca0231c70951 100644 --- a/linux-user/m68k/target_syscall.h +++ b/linux-user/m68k/target_syscall.h @@ -1,22 +1,6 @@ #ifndef M68K_TARGET_SYSCALL_H #define M68K_TARGET_SYSCALL_H -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct target_pt_regs { - abi_long d1, d2, d3, d4, d5, d6, d7; - abi_long a0, a1, a2, a3, a4, a5, a6; - abi_ulong d0; - abi_ulong usp; - abi_ulong orig_d0; - int16_t stkadj; - uint16_t sr; - abi_ulong pc; - uint16_t fntvex; - uint16_t __fill; -}; - #define UNAME_MACHINE "m68k" #define UNAME_MINIMUM_RELEASE "2.6.32" diff --git a/linux-user/main.c b/linux-user/main.c index 68972f00a155a..db751c07576f2 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -49,7 +49,6 @@ #include "qemu/guest-random.h" #include "elf.h" #include "trace/control.h" -#include "target_elf.h" #include "user/cpu_loop.h" #include "crypto/init.h" #include "fd-trans.h" @@ -190,11 +189,6 @@ bool qemu_cpu_is_self(CPUState *cpu) return thread_cpu == cpu; } -void qemu_cpu_kick(CPUState *cpu) -{ - cpu_exit(cpu); -} - void task_settid(TaskState *ts) { if (ts->ts_tid == 0) { @@ -234,6 +228,8 @@ void init_task_state(TaskState *ts) ts->start_boottime += bt.tv_nsec * (uint64_t) ticks_per_sec / NANOSECONDS_PER_SECOND; } + + ts->sys_dispatch_len = -1; } CPUArchState *cpu_copy(CPUArchState *env) @@ -341,16 +337,6 @@ static void handle_arg_ld_prefix(const char *arg) interp_prefix = strdup(arg); } -static void handle_arg_pagesize(const char *arg) -{ - unsigned size, want = qemu_real_host_page_size(); - - if (qemu_strtoui(arg, NULL, 10, &size) || size != want) { - warn_report("Deprecated page size option cannot " - "change host page size (%u)", want); - } -} - static void handle_arg_seed(const char *arg) { seed_optarg = arg; @@ -523,8 +509,6 @@ static const struct qemu_argument arg_table[] = { "range[,...]","filter logging based on address range"}, {"D", "QEMU_LOG_FILENAME", true, handle_arg_log_filename, "logfile", "write logs to 'logfile' (default stderr)"}, - {"p", "QEMU_PAGESIZE", true, handle_arg_pagesize, - "pagesize", "deprecated change to host page size"}, {"one-insn-per-tb", "QEMU_ONE_INSN_PER_TB", false, handle_arg_one_insn_per_tb, "", "run with one guest instruction per emulated TB"}, @@ -697,7 +681,6 @@ static int parse_args(int argc, char **argv) int main(int argc, char **argv, char **envp) { - struct target_pt_regs regs1, *regs = ®s1; struct image_info info1, *info = &info1; struct linux_binprm bprm; TaskState *ts; @@ -763,9 +746,6 @@ int main(int argc, char **argv, char **envp) trace_init_file(); qemu_plugin_load_list(&plugins, &error_fatal); - /* Zero out regs */ - memset(regs, 0, sizeof(struct target_pt_regs)); - /* Zero out image_info */ memset(info, 0, sizeof(struct image_info)); @@ -809,7 +789,7 @@ int main(int argc, char **argv, char **envp) } if (cpu_model == NULL) { - cpu_model = cpu_get_model(get_elf_eflags(execfd)); + cpu_model = get_elf_cpu_model(get_elf_eflags(execfd)); } cpu_type = parse_cpu_option(cpu_model); @@ -989,8 +969,8 @@ int main(int argc, char **argv, char **envp) fd_trans_init(); - ret = loader_exec(execfd, exec_path, target_argv, target_environ, regs, - info, &bprm); + ret = loader_exec(execfd, exec_path, target_argv, target_environ, + info, &bprm); if (ret != 0) { printf("Error while loading %s: %s\n", exec_path, strerror(-ret)); _exit(EXIT_FAILURE); @@ -1042,7 +1022,7 @@ int main(int argc, char **argv, char **envp) the real value of GUEST_BASE into account. */ tcg_prologue_init(); - target_cpu_copy_regs(env, regs); + init_main_thread(cpu, info); if (gdbstub) { gdbserver_start(gdbstub, &error_fatal); diff --git a/linux-user/microblaze/cpu_loop.c b/linux-user/microblaze/cpu_loop.c index 87236c166f2b7..78506ab23d9bb 100644 --- a/linux-user/microblaze/cpu_loop.c +++ b/linux-user/microblaze/cpu_loop.c @@ -32,7 +32,7 @@ void cpu_loop(CPUMBState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_INTERRUPT: @@ -127,39 +127,10 @@ void cpu_loop(CPUMBState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - env->regs[0] = regs->r0; - env->regs[1] = regs->r1; - env->regs[2] = regs->r2; - env->regs[3] = regs->r3; - env->regs[4] = regs->r4; - env->regs[5] = regs->r5; - env->regs[6] = regs->r6; - env->regs[7] = regs->r7; - env->regs[8] = regs->r8; - env->regs[9] = regs->r9; - env->regs[10] = regs->r10; - env->regs[11] = regs->r11; - env->regs[12] = regs->r12; - env->regs[13] = regs->r13; - env->regs[14] = regs->r14; - env->regs[15] = regs->r15; - env->regs[16] = regs->r16; - env->regs[17] = regs->r17; - env->regs[18] = regs->r18; - env->regs[19] = regs->r19; - env->regs[20] = regs->r20; - env->regs[21] = regs->r21; - env->regs[22] = regs->r22; - env->regs[23] = regs->r23; - env->regs[24] = regs->r24; - env->regs[25] = regs->r25; - env->regs[26] = regs->r26; - env->regs[27] = regs->r27; - env->regs[28] = regs->r28; - env->regs[29] = regs->r29; - env->regs[30] = regs->r30; - env->regs[31] = regs->r31; - env->pc = regs->pc; + CPUArchState *env = cpu_env(cs); + + env->pc = info->entry; + env->regs[1] = info->start_stack; } diff --git a/linux-user/microblaze/elfload.c b/linux-user/microblaze/elfload.c new file mode 100644 index 0000000000000..bdc0a953d593d --- /dev/null +++ b/linux-user/microblaze/elfload.c @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return TARGET_BIG_ENDIAN ? "any,little-endian=off" + : "any,little-endian=on"; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUMBState *env) +{ + for (int i = 0; i < 32; i++) { + r->pt.r[i] = tswapal(env->regs[i]); + } + + r->pt.pc = tswapal(env->pc); + r->pt.msr = tswapal(mb_cpu_read_msr(env)); + r->pt.ear = tswapal(env->ear); + r->pt.esr = tswapal(env->esr); +} diff --git a/linux-user/microblaze/signal.c b/linux-user/microblaze/signal.c index f6d47d76ff616..e874e4def1e93 100644 --- a/linux-user/microblaze/signal.c +++ b/linux-user/microblaze/signal.c @@ -21,6 +21,7 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" +#include "target_ptrace.h" struct target_sigcontext { struct target_pt_regs regs; /* needs to be first */ @@ -50,75 +51,17 @@ struct target_rt_sigframe { static void setup_sigcontext(struct target_sigcontext *sc, CPUMBState *env) { - __put_user(env->regs[0], &sc->regs.r0); - __put_user(env->regs[1], &sc->regs.r1); - __put_user(env->regs[2], &sc->regs.r2); - __put_user(env->regs[3], &sc->regs.r3); - __put_user(env->regs[4], &sc->regs.r4); - __put_user(env->regs[5], &sc->regs.r5); - __put_user(env->regs[6], &sc->regs.r6); - __put_user(env->regs[7], &sc->regs.r7); - __put_user(env->regs[8], &sc->regs.r8); - __put_user(env->regs[9], &sc->regs.r9); - __put_user(env->regs[10], &sc->regs.r10); - __put_user(env->regs[11], &sc->regs.r11); - __put_user(env->regs[12], &sc->regs.r12); - __put_user(env->regs[13], &sc->regs.r13); - __put_user(env->regs[14], &sc->regs.r14); - __put_user(env->regs[15], &sc->regs.r15); - __put_user(env->regs[16], &sc->regs.r16); - __put_user(env->regs[17], &sc->regs.r17); - __put_user(env->regs[18], &sc->regs.r18); - __put_user(env->regs[19], &sc->regs.r19); - __put_user(env->regs[20], &sc->regs.r20); - __put_user(env->regs[21], &sc->regs.r21); - __put_user(env->regs[22], &sc->regs.r22); - __put_user(env->regs[23], &sc->regs.r23); - __put_user(env->regs[24], &sc->regs.r24); - __put_user(env->regs[25], &sc->regs.r25); - __put_user(env->regs[26], &sc->regs.r26); - __put_user(env->regs[27], &sc->regs.r27); - __put_user(env->regs[28], &sc->regs.r28); - __put_user(env->regs[29], &sc->regs.r29); - __put_user(env->regs[30], &sc->regs.r30); - __put_user(env->regs[31], &sc->regs.r31); + for (int i = 0; i < 32; ++i) { + __put_user(env->regs[i], &sc->regs.r[i]); + } __put_user(env->pc, &sc->regs.pc); } static void restore_sigcontext(struct target_sigcontext *sc, CPUMBState *env) { - __get_user(env->regs[0], &sc->regs.r0); - __get_user(env->regs[1], &sc->regs.r1); - __get_user(env->regs[2], &sc->regs.r2); - __get_user(env->regs[3], &sc->regs.r3); - __get_user(env->regs[4], &sc->regs.r4); - __get_user(env->regs[5], &sc->regs.r5); - __get_user(env->regs[6], &sc->regs.r6); - __get_user(env->regs[7], &sc->regs.r7); - __get_user(env->regs[8], &sc->regs.r8); - __get_user(env->regs[9], &sc->regs.r9); - __get_user(env->regs[10], &sc->regs.r10); - __get_user(env->regs[11], &sc->regs.r11); - __get_user(env->regs[12], &sc->regs.r12); - __get_user(env->regs[13], &sc->regs.r13); - __get_user(env->regs[14], &sc->regs.r14); - __get_user(env->regs[15], &sc->regs.r15); - __get_user(env->regs[16], &sc->regs.r16); - __get_user(env->regs[17], &sc->regs.r17); - __get_user(env->regs[18], &sc->regs.r18); - __get_user(env->regs[19], &sc->regs.r19); - __get_user(env->regs[20], &sc->regs.r20); - __get_user(env->regs[21], &sc->regs.r21); - __get_user(env->regs[22], &sc->regs.r22); - __get_user(env->regs[23], &sc->regs.r23); - __get_user(env->regs[24], &sc->regs.r24); - __get_user(env->regs[25], &sc->regs.r25); - __get_user(env->regs[26], &sc->regs.r26); - __get_user(env->regs[27], &sc->regs.r27); - __get_user(env->regs[28], &sc->regs.r28); - __get_user(env->regs[29], &sc->regs.r29); - __get_user(env->regs[30], &sc->regs.r30); - __get_user(env->regs[31], &sc->regs.r31); + for (int i = 0; i < 32; ++i) { + __get_user(env->regs[i], &sc->regs.r[i]); + } __get_user(env->pc, &sc->regs.pc); } diff --git a/linux-user/microblaze/target_elf.h b/linux-user/microblaze/target_elf.h index 8a8f1debff9e7..7b3ef70d23288 100644 --- a/linux-user/microblaze/target_elf.h +++ b/linux-user/microblaze/target_elf.h @@ -7,8 +7,22 @@ #ifndef MICROBLAZE_TARGET_ELF_H #define MICROBLAZE_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "any"; -} + +#include "target_ptrace.h" + +#define ELF_CLASS ELFCLASS32 +#define ELF_MACHINE EM_MICROBLAZE + +#define elf_check_machine(x) ((x) == EM_MICROBLAZE || (x) == EM_MICROBLAZE_OLD) + +#define HAVE_ELF_CORE_DUMP 1 + +/* + * See linux kernel: arch/microblaze/include/asm/elf.h, where + * elf_gregset_t is mapped to struct pt_regs via sizeof. + */ +typedef struct target_elf_gregset_t { + struct target_pt_regs pt; +} target_elf_gregset_t; + #endif diff --git a/linux-user/microblaze/target_ptrace.h b/linux-user/microblaze/target_ptrace.h new file mode 100644 index 0000000000000..ead913e5a4521 --- /dev/null +++ b/linux-user/microblaze/target_ptrace.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef MICROBLAZE_TARGET_PTRACE_H +#define MICROBLAZE_TARGET_PTRACE_H + +/* We use microblaze_reg_t to keep things similar to the kernel sources. */ +typedef uint32_t microblaze_reg_t; + +struct target_pt_regs { + /* Note the kernel enumerates all 32 registers. */ + microblaze_reg_t r[32]; + microblaze_reg_t pc; + microblaze_reg_t msr; + microblaze_reg_t ear; + microblaze_reg_t esr; + microblaze_reg_t fsr; + uint32_t kernel_mode; +}; + +#endif /* MICROBLAZE_TARGET_PTRACE_H */ diff --git a/linux-user/microblaze/target_syscall.h b/linux-user/microblaze/target_syscall.h index 43362a1664caf..66f5a9ebe22e6 100644 --- a/linux-user/microblaze/target_syscall.h +++ b/linux-user/microblaze/target_syscall.h @@ -4,50 +4,6 @@ #define UNAME_MACHINE "microblaze" #define UNAME_MINIMUM_RELEASE "2.6.32" -/* We use microblaze_reg_t to keep things similar to the kernel sources. */ -typedef uint32_t microblaze_reg_t; - -struct target_pt_regs { - microblaze_reg_t r0; - microblaze_reg_t r1; - microblaze_reg_t r2; - microblaze_reg_t r3; - microblaze_reg_t r4; - microblaze_reg_t r5; - microblaze_reg_t r6; - microblaze_reg_t r7; - microblaze_reg_t r8; - microblaze_reg_t r9; - microblaze_reg_t r10; - microblaze_reg_t r11; - microblaze_reg_t r12; - microblaze_reg_t r13; - microblaze_reg_t r14; - microblaze_reg_t r15; - microblaze_reg_t r16; - microblaze_reg_t r17; - microblaze_reg_t r18; - microblaze_reg_t r19; - microblaze_reg_t r20; - microblaze_reg_t r21; - microblaze_reg_t r22; - microblaze_reg_t r23; - microblaze_reg_t r24; - microblaze_reg_t r25; - microblaze_reg_t r26; - microblaze_reg_t r27; - microblaze_reg_t r28; - microblaze_reg_t r29; - microblaze_reg_t r30; - microblaze_reg_t r31; - microblaze_reg_t pc; - microblaze_reg_t msr; - microblaze_reg_t ear; - microblaze_reg_t esr; - microblaze_reg_t fsr; - uint32_t kernel_mode; -}; - #define TARGET_CLONE_BACKWARDS #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 diff --git a/linux-user/mips/cpu_loop.c b/linux-user/mips/cpu_loop.c index 6405806eb0267..2365de1de1a2f 100644 --- a/linux-user/mips/cpu_loop.c +++ b/linux-user/mips/cpu_loop.c @@ -74,7 +74,7 @@ void cpu_loop(CPUMIPSState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch(trapnr) { case EXCP_SYSCALL: @@ -211,12 +211,9 @@ void cpu_loop(CPUMIPSState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - CPUState *cpu = env_cpu(env); - TaskState *ts = get_task_state(cpu); - struct image_info *info = ts->info; - int i; + CPUArchState *env = cpu_env(cs); struct mode_req { bool single; @@ -245,12 +242,11 @@ void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) struct mode_req prog_req; struct mode_req interp_req; + target_ulong entry = info->entry; - for(i = 0; i < 32; i++) { - env->active_tc.gpr[i] = regs->regs[i]; - } - env->active_tc.PC = regs->cp0_epc & ~(target_ulong)1; - if (regs->cp0_epc & 1) { + env->active_tc.gpr[29] = info->start_stack; + env->active_tc.PC = entry & ~(target_ulong)1; + if (entry & 1) { env->hflags |= MIPS_HFLAG_M16; } diff --git a/linux-user/mips/elfload.c b/linux-user/mips/elfload.c new file mode 100644 index 0000000000000..cc5bbf05ab2be --- /dev/null +++ b/linux-user/mips/elfload.c @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "elf.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ +#ifdef TARGET_MIPS64 + switch (eflags & EF_MIPS_MACH) { + case EF_MIPS_MACH_OCTEON: + case EF_MIPS_MACH_OCTEON2: + case EF_MIPS_MACH_OCTEON3: + return "Octeon68XX"; + case EF_MIPS_MACH_LS2E: + return "Loongson-2E"; + case EF_MIPS_MACH_LS2F: + return "Loongson-2F"; + case EF_MIPS_MACH_LS3A: + return "Loongson-3A1000"; + default: + break; + } + switch (eflags & EF_MIPS_ARCH) { + case EF_MIPS_ARCH_64R6: + return "I6400"; + case EF_MIPS_ARCH_64R2: + return "MIPS64R2-generic"; + default: + break; + } + return "5KEf"; +#else + if ((eflags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) { + return "mips32r6-generic"; + } + if ((eflags & EF_MIPS_ARCH_ASE) == EF_MIPS_ARCH_ASE_MICROMIPS) { + return "M14Kc"; + } + if ((eflags & EF_MIPS_ARCH_ASE) == EF_MIPS_ARCH_ASE_M16) { + return "74Kf"; + } + if (eflags & EF_MIPS_NAN2008) { + return "P5600"; + } + return "24Kf"; +#endif +} + +/* See arch/mips/include/uapi/asm/hwcap.h. */ +enum { + HWCAP_MIPS_R6 = (1 << 0), + HWCAP_MIPS_MSA = (1 << 1), + HWCAP_MIPS_CRC32 = (1 << 2), + HWCAP_MIPS_MIPS16 = (1 << 3), + HWCAP_MIPS_MDMX = (1 << 4), + HWCAP_MIPS_MIPS3D = (1 << 5), + HWCAP_MIPS_SMARTMIPS = (1 << 6), + HWCAP_MIPS_DSP = (1 << 7), + HWCAP_MIPS_DSP2 = (1 << 8), + HWCAP_MIPS_DSP3 = (1 << 9), + HWCAP_MIPS_MIPS16E2 = (1 << 10), + HWCAP_LOONGSON_MMI = (1 << 11), + HWCAP_LOONGSON_EXT = (1 << 12), + HWCAP_LOONGSON_EXT2 = (1 << 13), + HWCAP_LOONGSON_CPUCFG = (1 << 14), +}; + +#define GET_FEATURE_INSN(_flag, _hwcap) \ + do { if (cpu->env.insn_flags & (_flag)) { hwcaps |= _hwcap; } } while (0) + +#define GET_FEATURE_REG_SET(_reg, _mask, _hwcap) \ + do { if (cpu->env._reg & (_mask)) { hwcaps |= _hwcap; } } while (0) + +#define GET_FEATURE_REG_EQU(_reg, _start, _length, _val, _hwcap) \ + do { \ + if (extract32(cpu->env._reg, (_start), (_length)) == (_val)) { \ + hwcaps |= _hwcap; \ + } \ + } while (0) + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + abi_ulong hwcaps = 0; + + GET_FEATURE_REG_EQU(CP0_Config0, CP0C0_AR, CP0C0_AR_LENGTH, + 2, HWCAP_MIPS_R6); + GET_FEATURE_REG_SET(CP0_Config3, 1 << CP0C3_MSAP, HWCAP_MIPS_MSA); + GET_FEATURE_INSN(ASE_LMMI, HWCAP_LOONGSON_MMI); + GET_FEATURE_INSN(ASE_LEXT, HWCAP_LOONGSON_EXT); + + return hwcaps; +} + +#undef GET_FEATURE_REG_EQU +#undef GET_FEATURE_REG_SET +#undef GET_FEATURE_INSN + +#define MATCH_PLATFORM_INSN(_flags, _base_platform) \ + do { if ((cpu->env.insn_flags & (_flags)) == _flags) \ + { return _base_platform; } } while (0) + +const char *get_elf_base_platform(CPUState *cs) +{ + MIPSCPU *cpu = MIPS_CPU(cs); + + /* 64 bit ISAs goes first */ + MATCH_PLATFORM_INSN(CPU_MIPS64R6, "mips64r6"); + MATCH_PLATFORM_INSN(CPU_MIPS64R5, "mips64r5"); + MATCH_PLATFORM_INSN(CPU_MIPS64R2, "mips64r2"); + MATCH_PLATFORM_INSN(CPU_MIPS64R1, "mips64"); + MATCH_PLATFORM_INSN(CPU_MIPS5, "mips5"); + MATCH_PLATFORM_INSN(CPU_MIPS4, "mips4"); + MATCH_PLATFORM_INSN(CPU_MIPS3, "mips3"); + + /* 32 bit ISAs */ + MATCH_PLATFORM_INSN(CPU_MIPS32R6, "mips32r6"); + MATCH_PLATFORM_INSN(CPU_MIPS32R5, "mips32r5"); + MATCH_PLATFORM_INSN(CPU_MIPS32R2, "mips32r2"); + MATCH_PLATFORM_INSN(CPU_MIPS32R1, "mips32"); + MATCH_PLATFORM_INSN(CPU_MIPS2, "mips2"); + + /* Fallback */ + return "mips"; +} + +#undef MATCH_PLATFORM_INSN + +/* See linux kernel: arch/mips/kernel/process.c:elf_dump_regs. */ +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUMIPSState *env) +{ + for (int i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { + r->pt.regs[i] = tswapl(env->active_tc.gpr[i]); + } + + r->pt.regs[26] = 0; + r->pt.regs[27] = 0; + r->pt.lo = tswapl(env->active_tc.LO[0]); + r->pt.hi = tswapl(env->active_tc.HI[0]); + r->pt.cp0_epc = tswapl(env->active_tc.PC); + r->pt.cp0_badvaddr = tswapl(env->CP0_BadVAddr); + r->pt.cp0_status = tswapl(env->CP0_Status); + r->pt.cp0_cause = tswapl(env->CP0_Cause); +} diff --git a/linux-user/mips/target_elf.h b/linux-user/mips/target_elf.h index 71a32315a858a..157306f7a0ab4 100644 --- a/linux-user/mips/target_elf.h +++ b/linux-user/mips/target_elf.h @@ -7,14 +7,23 @@ #ifndef MIPS_TARGET_ELF_H #define MIPS_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - if ((eflags & EF_MIPS_ARCH) == EF_MIPS_ARCH_32R6) { - return "mips32r6-generic"; - } - if (eflags & EF_MIPS_NAN2008) { - return "P5600"; - } - return "24Kf"; -} + +#include "target_ptrace.h" + +#define ELF_CLASS ELFCLASS32 +#define ELF_MACHINE EM_MIPS +#define EXSTACK_DEFAULT true + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_BASE_PLATFORM 1 +#define HAVE_ELF_CORE_DUMP 1 + +/* See linux kernel: arch/mips/include/asm/elf.h. */ +typedef struct target_elf_gregset_t { + union { + abi_ulong reserved[45]; + struct target_pt_regs pt; + }; +} target_elf_gregset_t; + #endif diff --git a/linux-user/mips/target_ptrace.h b/linux-user/mips/target_ptrace.h new file mode 100644 index 0000000000000..2f63b27ac49f2 --- /dev/null +++ b/linux-user/mips/target_ptrace.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef MIPS_TARGET_PTRACE_H +#define MIPS_TARGET_PTRACE_H + +struct target_pt_regs { + abi_ulong pad0[6]; + abi_ulong regs[32]; + abi_ulong lo; + abi_ulong hi; + abi_ulong cp0_epc; + abi_ulong cp0_badvaddr; + abi_ulong cp0_status; + abi_ulong cp0_cause; +}; + +#endif /* MIPS_TARGET_PTRACE_H */ diff --git a/linux-user/mips/target_syscall.h b/linux-user/mips/target_syscall.h index 08ead678104fb..dfcdf320b7c1e 100644 --- a/linux-user/mips/target_syscall.h +++ b/linux-user/mips/target_syscall.h @@ -1,25 +1,6 @@ #ifndef MIPS_TARGET_SYSCALL_H #define MIPS_TARGET_SYSCALL_H -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct target_pt_regs { - /* Pad bytes for argument save space on the stack. */ - abi_ulong pad0[6]; - - /* Saved main processor registers. */ - abi_ulong regs[32]; - - /* Saved special registers. */ - abi_ulong cp0_status; - abi_ulong lo; - abi_ulong hi; - abi_ulong cp0_badvaddr; - abi_ulong cp0_cause; - abi_ulong cp0_epc; -}; - #define UNAME_MACHINE "mips" #define UNAME_MINIMUM_RELEASE "2.6.32" diff --git a/linux-user/mips64/elfload.c b/linux-user/mips64/elfload.c new file mode 100644 index 0000000000000..b719555e6599a --- /dev/null +++ b/linux-user/mips64/elfload.c @@ -0,0 +1 @@ +#include "../mips/elfload.c" diff --git a/linux-user/mips64/target_elf.h b/linux-user/mips64/target_elf.h index 502af9d2781d5..061471a0f129f 100644 --- a/linux-user/mips64/target_elf.h +++ b/linux-user/mips64/target_elf.h @@ -7,30 +7,29 @@ #ifndef MIPS64_TARGET_ELF_H #define MIPS64_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - switch (eflags & EF_MIPS_MACH) { - case EF_MIPS_MACH_OCTEON: - case EF_MIPS_MACH_OCTEON2: - case EF_MIPS_MACH_OCTEON3: - return "Octeon68XX"; - case EF_MIPS_MACH_LS2E: - return "Loongson-2E"; - case EF_MIPS_MACH_LS2F: - return "Loongson-2F"; - case EF_MIPS_MACH_LS3A: - return "Loongson-3A1000"; - default: - break; - } - switch (eflags & EF_MIPS_ARCH) { - case EF_MIPS_ARCH_64R6: - return "I6400"; - case EF_MIPS_ARCH_64R2: - return "MIPS64R2-generic"; - default: - break; - } - return "5KEf"; -} + +#include "target_ptrace.h" + +#define ELF_CLASS ELFCLASS64 +#define ELF_MACHINE EM_MIPS +#define EXSTACK_DEFAULT true + +#ifdef TARGET_ABI_MIPSN32 +#define elf_check_abi(x) ((x) & EF_MIPS_ABI2) +#else +#define elf_check_abi(x) (!((x) & EF_MIPS_ABI2)) +#endif + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_BASE_PLATFORM 1 +#define HAVE_ELF_CORE_DUMP 1 + +/* See linux kernel: arch/mips/include/asm/elf.h. */ +typedef struct target_elf_gregset_t { + union { + target_ulong reserved[45]; + struct target_pt_regs pt; + }; +} target_elf_gregset_t; + #endif diff --git a/linux-user/mips64/target_ptrace.h b/linux-user/mips64/target_ptrace.h new file mode 100644 index 0000000000000..41f0bf6c1c253 --- /dev/null +++ b/linux-user/mips64/target_ptrace.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef MIPS64_TARGET_PTRACE_H +#define MIPS64_TARGET_PTRACE_H + +struct target_pt_regs { + target_ulong regs[32]; + target_ulong lo; + target_ulong hi; + target_ulong cp0_epc; + target_ulong cp0_badvaddr; + target_ulong cp0_status; + target_ulong cp0_cause; +}; + +#endif /* MIPS64_TARGET_PTRACE_H */ diff --git a/linux-user/mips64/target_syscall.h b/linux-user/mips64/target_syscall.h index 358dc2d64c998..9135bf5e8b623 100644 --- a/linux-user/mips64/target_syscall.h +++ b/linux-user/mips64/target_syscall.h @@ -1,22 +1,6 @@ #ifndef MIPS64_TARGET_SYSCALL_H #define MIPS64_TARGET_SYSCALL_H -/* this struct defines the way the registers are stored on the - stack during a system call. */ - -struct target_pt_regs { - /* Saved main processor registers. */ - target_ulong regs[32]; - - /* Saved special registers. */ - target_ulong cp0_status; - target_ulong lo; - target_ulong hi; - target_ulong cp0_badvaddr; - target_ulong cp0_cause; - target_ulong cp0_epc; -}; - #define UNAME_MACHINE "mips64" #define UNAME_MINIMUM_RELEASE "2.6.32" diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 002e1e668e631..423c77856a3aa 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -22,8 +22,6 @@ #include "exec/log.h" #include "exec/page-protection.h" #include "exec/mmap-lock.h" -#include "exec/tb-flush.h" -#include "exec/translation-block.h" #include "qemu.h" #include "user/page-protection.h" #include "user-internals.h" @@ -167,6 +165,13 @@ static int target_to_host_prot(int prot) (prot & PROT_EXEC ? PROT_READ : 0); } +/* Target bits to be cleared by mprotect if not present in target_prot. */ +#ifdef TARGET_AARCH64 +#define TARGET_PAGE_NOTSTICKY PAGE_BTI +#else +#define TARGET_PAGE_NOTSTICKY 0 +#endif + /* NOTE: all the constants are the HOST ones, but addresses are target. */ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) { @@ -264,7 +269,7 @@ int target_mprotect(abi_ulong start, abi_ulong len, int target_prot) } } - page_set_flags(start, last, page_flags); + page_set_flags(start, last, page_flags, PAGE_RWX | TARGET_PAGE_NOTSTICKY); ret = 0; error: @@ -563,17 +568,17 @@ static abi_long mmap_end(abi_ulong start, abi_ulong last, if (flags & MAP_ANONYMOUS) { page_flags |= PAGE_ANON; } - page_flags |= PAGE_RESET; if (passthrough_start > passthrough_last) { - page_set_flags(start, last, page_flags); + page_set_flags(start, last, page_flags, PAGE_VALID); } else { if (start < passthrough_start) { - page_set_flags(start, passthrough_start - 1, page_flags); + page_set_flags(start, passthrough_start - 1, + page_flags, PAGE_VALID); } page_set_flags(passthrough_start, passthrough_last, - page_flags | PAGE_PASSTHROUGH); + page_flags | PAGE_PASSTHROUGH, PAGE_VALID); if (passthrough_last < last) { - page_set_flags(passthrough_last + 1, last, page_flags); + page_set_flags(passthrough_last + 1, last, page_flags, PAGE_VALID); } } shm_region_rm_complete(start, last); @@ -1007,11 +1012,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int target_prot, * be atomic with respect to an external process. */ if (ret != -1 && (flags & MAP_TYPE) != MAP_PRIVATE) { - CPUState *cpu = thread_cpu; - if (!tcg_cflags_has(cpu, CF_PARALLEL)) { - tcg_cflags_set(cpu, CF_PARALLEL); - tb_flush(cpu); - } + begin_parallel_context(thread_cpu); } return ret; @@ -1094,7 +1095,7 @@ int target_munmap(abi_ulong start, abi_ulong len) mmap_lock(); ret = mmap_reserve_or_unmap(start, len); if (likely(ret == 0)) { - page_set_flags(start, start + len - 1, 0); + page_set_flags(start, start + len - 1, 0, PAGE_VALID); shm_region_rm_complete(start, start + len - 1); } mmap_unlock(); @@ -1185,10 +1186,10 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size, } else { new_addr = h2g(host_addr); prot = page_get_flags(old_addr); - page_set_flags(old_addr, old_addr + old_size - 1, 0); + page_set_flags(old_addr, old_addr + old_size - 1, 0, PAGE_VALID); shm_region_rm_complete(old_addr, old_addr + old_size - 1); page_set_flags(new_addr, new_addr + new_size - 1, - prot | PAGE_VALID | PAGE_RESET); + prot | PAGE_VALID, PAGE_VALID); shm_region_rm_complete(new_addr, new_addr + new_size - 1); } mmap_unlock(); @@ -1247,6 +1248,12 @@ abi_long target_madvise(abi_ulong start, abi_ulong len_in, int advice) */ mmap_lock(); switch (advice) { + case MADV_DONTDUMP: + page_set_flags(start, start + len - 1, PAGE_DONTDUMP, 0); + break; + case MADV_DODUMP: + page_set_flags(start, start + len - 1, 0, PAGE_DONTDUMP); + break; case MADV_WIPEONFORK: case MADV_KEEPONFORK: ret = -EINVAL; @@ -1434,9 +1441,10 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid, last = shmaddr + m_len - 1; page_set_flags(shmaddr, last, - PAGE_VALID | PAGE_RESET | PAGE_READ | + PAGE_VALID | PAGE_READ | (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) | - (shmflg & SHM_EXEC ? PAGE_EXEC : 0)); + (shmflg & SHM_EXEC ? PAGE_EXEC : 0), + PAGE_VALID); shm_region_rm_complete(shmaddr, last); shm_region_add(shmaddr, last); @@ -1448,10 +1456,7 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid, * supported by the host -- anything that requires EXCP_ATOMIC will not * be atomic with respect to an external process. */ - if (!tcg_cflags_has(cpu, CF_PARALLEL)) { - tcg_cflags_set(cpu, CF_PARALLEL); - tb_flush(cpu); - } + begin_parallel_context(cpu); if (qemu_loglevel_mask(CPU_LOG_PAGE)) { FILE *f = qemu_log_trylock(); @@ -1480,7 +1485,7 @@ abi_long target_shmdt(abi_ulong shmaddr) if (rv == 0) { abi_ulong size = last - shmaddr + 1; - page_set_flags(shmaddr, last, 0); + page_set_flags(shmaddr, last, 0, PAGE_VALID); shm_region_rm_complete(shmaddr, last); mmap_reserve_or_unmap(shmaddr, size); } diff --git a/linux-user/openrisc/cpu_loop.c b/linux-user/openrisc/cpu_loop.c index 306b4f8eb4345..2167d880d5551 100644 --- a/linux-user/openrisc/cpu_loop.c +++ b/linux-user/openrisc/cpu_loop.c @@ -33,7 +33,7 @@ void cpu_loop(CPUOpenRISCState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_SYSCALL: @@ -83,13 +83,10 @@ void cpu_loop(CPUOpenRISCState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - int i; + CPUArchState *env = cpu_env(cs); - for (i = 0; i < 32; i++) { - cpu_set_gpr(env, i, regs->gpr[i]); - } - env->pc = regs->pc; - cpu_set_sr(env, regs->sr); + env->pc = info->entry; + cpu_set_gpr(env, 1, info->start_stack); } diff --git a/linux-user/openrisc/elfload.c b/linux-user/openrisc/elfload.c new file mode 100644 index 0000000000000..6bf02bf58d7aa --- /dev/null +++ b/linux-user/openrisc/elfload.c @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "any"; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUOpenRISCState *env) +{ + for (int i = 0; i < 32; i++) { + r->pt.gpr[i] = tswapal(cpu_get_gpr(env, i)); + } + r->pt.pc = tswapal(env->pc); + r->pt.sr = tswapal(cpu_get_sr(env)); +} diff --git a/linux-user/openrisc/signal.c b/linux-user/openrisc/signal.c index cb74a9fe5e2bf..40249095f2301 100644 --- a/linux-user/openrisc/signal.c +++ b/linux-user/openrisc/signal.c @@ -21,9 +21,10 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" +#include "target_ptrace.h" typedef struct target_sigcontext { - struct target_pt_regs regs; + struct target_user_regs_struct regs; abi_ulong oldmask; } target_sigcontext; diff --git a/linux-user/openrisc/target_elf.h b/linux-user/openrisc/target_elf.h index 265ecd30794d4..e8554f5339e86 100644 --- a/linux-user/openrisc/target_elf.h +++ b/linux-user/openrisc/target_elf.h @@ -7,8 +7,20 @@ #ifndef OPENRISC_TARGET_ELF_H #define OPENRISC_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "any"; -} + +#include "target_ptrace.h" + +#define ELF_MACHINE EM_OPENRISC +#define ELF_CLASS ELFCLASS32 + +#define HAVE_ELF_CORE_DUMP 1 + +/* + * See linux kernel: arch/openrisc/include/uapi/asm/elf.h, where + * elf_gregset_t is mapped to struct user_regs_struct via sizeof. + */ +typedef struct target_elf_gregset_t { + struct target_user_regs_struct pt; +} target_elf_gregset_t; + #endif diff --git a/linux-user/openrisc/target_ptrace.h b/linux-user/openrisc/target_ptrace.h new file mode 100644 index 0000000000000..563c64852595a --- /dev/null +++ b/linux-user/openrisc/target_ptrace.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef OPENRISC_TARGET_PTRACE_H +#define OPENRISC_TARGET_PTRACE_H + +/* See arch/openrisc/include/uapi/asm/ptrace.h. */ +struct target_user_regs_struct { + abi_ulong gpr[32]; + abi_ulong pc; + abi_ulong sr; +}; + +#endif /* OPENRISC_TARGET_PTRACE_H */ diff --git a/linux-user/openrisc/target_syscall.h b/linux-user/openrisc/target_syscall.h index 7fe5b73d3bea4..c8394e9dcd7a3 100644 --- a/linux-user/openrisc/target_syscall.h +++ b/linux-user/openrisc/target_syscall.h @@ -1,17 +1,6 @@ #ifndef OPENRISC_TARGET_SYSCALL_H #define OPENRISC_TARGET_SYSCALL_H -/* Note that in linux/arch/openrisc/include/uapi/asm/ptrace.h, - * this is called user_regs_struct. Given that this is what - * is used within struct sigcontext we need this definition. - * However, elfload.c wants this name. - */ -struct target_pt_regs { - abi_ulong gpr[32]; - abi_ulong pc; - abi_ulong sr; -}; - #define UNAME_MACHINE "openrisc" #define UNAME_MINIMUM_RELEASE "2.6.32" diff --git a/linux-user/ppc/cpu_loop.c b/linux-user/ppc/cpu_loop.c index 2a0efaffcd6a5..b0b0cb14b41d9 100644 --- a/linux-user/ppc/cpu_loop.c +++ b/linux-user/ppc/cpu_loop.c @@ -77,7 +77,7 @@ void cpu_loop(CPUPPCState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); arch_interrupt = true; switch (trapnr) { @@ -378,21 +378,31 @@ void cpu_loop(CPUPPCState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - int i; + CPUArchState *env = cpu_env(cs); + abi_ptr entry = info->entry; + + env->gpr[1] = info->start_stack; + +#ifdef TARGET_PPC64 + if (get_ppc64_abi(info) < 2) { + uint64_t val; + get_user_u64(val, entry + 8); + env->gpr[2] = val + info->load_bias; + get_user_u64(val, entry); + entry = val + info->load_bias; + } else { + env->gpr[12] = entry; /* r12 set to global entry address */ + } -#if defined(TARGET_PPC64) int flag = (env->insns_flags2 & PPC2_BOOKE206) ? MSR_CM : MSR_SF; #if defined(TARGET_ABI32) ppc_store_msr(env, env->msr & ~((target_ulong)1 << flag)); #else ppc_store_msr(env, env->msr | (target_ulong)1 << flag); #endif -#endif +#endif /* TARGET_PPC64 */ - env->nip = regs->nip; - for(i = 0; i < 32; i++) { - env->gpr[i] = regs->gpr[i]; - } + env->nip = entry; } diff --git a/linux-user/ppc/elfload.c b/linux-user/ppc/elfload.c new file mode 100644 index 0000000000000..0d54da980396c --- /dev/null +++ b/linux-user/ppc/elfload.c @@ -0,0 +1,146 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ +#ifdef TARGET_PPC64 + return "POWER9"; +#else + return "750"; +#endif +} + +/* + * Feature masks for the Aux Vector Hardware Capabilities (AT_HWCAP). + * See arch/powerpc/include/asm/cputable.h. + */ +enum { + QEMU_PPC_FEATURE_32 = 0x80000000, + QEMU_PPC_FEATURE_64 = 0x40000000, + QEMU_PPC_FEATURE_601_INSTR = 0x20000000, + QEMU_PPC_FEATURE_HAS_ALTIVEC = 0x10000000, + QEMU_PPC_FEATURE_HAS_FPU = 0x08000000, + QEMU_PPC_FEATURE_HAS_MMU = 0x04000000, + QEMU_PPC_FEATURE_HAS_4xxMAC = 0x02000000, + QEMU_PPC_FEATURE_UNIFIED_CACHE = 0x01000000, + QEMU_PPC_FEATURE_HAS_SPE = 0x00800000, + QEMU_PPC_FEATURE_HAS_EFP_SINGLE = 0x00400000, + QEMU_PPC_FEATURE_HAS_EFP_DOUBLE = 0x00200000, + QEMU_PPC_FEATURE_NO_TB = 0x00100000, + QEMU_PPC_FEATURE_POWER4 = 0x00080000, + QEMU_PPC_FEATURE_POWER5 = 0x00040000, + QEMU_PPC_FEATURE_POWER5_PLUS = 0x00020000, + QEMU_PPC_FEATURE_CELL = 0x00010000, + QEMU_PPC_FEATURE_BOOKE = 0x00008000, + QEMU_PPC_FEATURE_SMT = 0x00004000, + QEMU_PPC_FEATURE_ICACHE_SNOOP = 0x00002000, + QEMU_PPC_FEATURE_ARCH_2_05 = 0x00001000, + QEMU_PPC_FEATURE_PA6T = 0x00000800, + QEMU_PPC_FEATURE_HAS_DFP = 0x00000400, + QEMU_PPC_FEATURE_POWER6_EXT = 0x00000200, + QEMU_PPC_FEATURE_ARCH_2_06 = 0x00000100, + QEMU_PPC_FEATURE_HAS_VSX = 0x00000080, + QEMU_PPC_FEATURE_PSERIES_PERFMON_COMPAT = 0x00000040, + + QEMU_PPC_FEATURE_TRUE_LE = 0x00000002, + QEMU_PPC_FEATURE_PPC_LE = 0x00000001, + + /* Feature definitions in AT_HWCAP2. */ + QEMU_PPC_FEATURE2_ARCH_2_07 = 0x80000000, /* ISA 2.07 */ + QEMU_PPC_FEATURE2_HAS_HTM = 0x40000000, /* Hardware Transactional Memory */ + QEMU_PPC_FEATURE2_HAS_DSCR = 0x20000000, /* Data Stream Control Register */ + QEMU_PPC_FEATURE2_HAS_EBB = 0x10000000, /* Event Base Branching */ + QEMU_PPC_FEATURE2_HAS_ISEL = 0x08000000, /* Integer Select */ + QEMU_PPC_FEATURE2_HAS_TAR = 0x04000000, /* Target Address Register */ + QEMU_PPC_FEATURE2_VEC_CRYPTO = 0x02000000, + QEMU_PPC_FEATURE2_HTM_NOSC = 0x01000000, + QEMU_PPC_FEATURE2_ARCH_3_00 = 0x00800000, /* ISA 3.00 */ + QEMU_PPC_FEATURE2_HAS_IEEE128 = 0x00400000, /* VSX IEEE Bin Float 128-bit */ + QEMU_PPC_FEATURE2_DARN = 0x00200000, /* darn random number insn */ + QEMU_PPC_FEATURE2_SCV = 0x00100000, /* scv syscall */ + QEMU_PPC_FEATURE2_HTM_NO_SUSPEND = 0x00080000, /* TM w/o suspended state */ + QEMU_PPC_FEATURE2_ARCH_3_1 = 0x00040000, /* ISA 3.1 */ + QEMU_PPC_FEATURE2_MMA = 0x00020000, /* Matrix-Multiply Assist */ +}; + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + uint32_t features = 0; + + /* + * We don't have to be terribly complete here; the high points are + * Altivec/FP/SPE support. Anything else is just a bonus. + */ +#define GET_FEATURE(flag, feature) \ + do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) +#define GET_FEATURE2(flags, feature) \ + do { \ + if ((cpu->env.insns_flags2 & flags) == flags) { \ + features |= feature; \ + } \ + } while (0) + GET_FEATURE(PPC_64B, QEMU_PPC_FEATURE_64); + GET_FEATURE(PPC_FLOAT, QEMU_PPC_FEATURE_HAS_FPU); + GET_FEATURE(PPC_ALTIVEC, QEMU_PPC_FEATURE_HAS_ALTIVEC); + GET_FEATURE(PPC_SPE, QEMU_PPC_FEATURE_HAS_SPE); + GET_FEATURE(PPC_SPE_SINGLE, QEMU_PPC_FEATURE_HAS_EFP_SINGLE); + GET_FEATURE(PPC_SPE_DOUBLE, QEMU_PPC_FEATURE_HAS_EFP_DOUBLE); + GET_FEATURE(PPC_BOOKE, QEMU_PPC_FEATURE_BOOKE); + GET_FEATURE(PPC_405_MAC, QEMU_PPC_FEATURE_HAS_4xxMAC); + GET_FEATURE2(PPC2_DFP, QEMU_PPC_FEATURE_HAS_DFP); + GET_FEATURE2(PPC2_VSX, QEMU_PPC_FEATURE_HAS_VSX); + GET_FEATURE2((PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 | PPC2_ATOMIC_ISA206 | + PPC2_FP_CVT_ISA206 | PPC2_FP_TST_ISA206), + QEMU_PPC_FEATURE_ARCH_2_06); + +#undef GET_FEATURE +#undef GET_FEATURE2 + + return features; +} + +abi_ulong get_elf_hwcap2(CPUState *cs) +{ + PowerPCCPU *cpu = POWERPC_CPU(cs); + uint32_t features = 0; + +#define GET_FEATURE(flag, feature) \ + do { if (cpu->env.insns_flags & flag) { features |= feature; } } while (0) +#define GET_FEATURE2(flag, feature) \ + do { if (cpu->env.insns_flags2 & flag) { features |= feature; } } while (0) + + GET_FEATURE(PPC_ISEL, QEMU_PPC_FEATURE2_HAS_ISEL); + GET_FEATURE2(PPC2_BCTAR_ISA207, QEMU_PPC_FEATURE2_HAS_TAR); + GET_FEATURE2((PPC2_BCTAR_ISA207 | PPC2_LSQ_ISA207 | PPC2_ALTIVEC_207 | + PPC2_ISA207S), QEMU_PPC_FEATURE2_ARCH_2_07 | + QEMU_PPC_FEATURE2_VEC_CRYPTO); + GET_FEATURE2(PPC2_ISA300, QEMU_PPC_FEATURE2_ARCH_3_00 | + QEMU_PPC_FEATURE2_DARN | QEMU_PPC_FEATURE2_HAS_IEEE128); + GET_FEATURE2(PPC2_ISA310, QEMU_PPC_FEATURE2_ARCH_3_1 | + QEMU_PPC_FEATURE2_MMA); + +#undef GET_FEATURE +#undef GET_FEATURE2 + + return features; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUPPCState *env) +{ + for (int i = 0; i < ARRAY_SIZE(env->gpr); i++) { + r->pt.gpr[i] = tswapal(env->gpr[i]); + } + + r->pt.nip = tswapal(env->nip); + r->pt.msr = tswapal(env->msr); + r->pt.ctr = tswapal(env->ctr); + r->pt.link = tswapal(env->lr); + r->pt.xer = tswapal(cpu_read_xer(env)); + r->pt.ccr = tswapal(ppc_get_cr(env)); +} diff --git a/linux-user/ppc/target_elf.h b/linux-user/ppc/target_elf.h index 061661885423f..22854cf52fc4e 100644 --- a/linux-user/ppc/target_elf.h +++ b/linux-user/ppc/target_elf.h @@ -7,12 +7,64 @@ #ifndef PPC_TARGET_ELF_H #define PPC_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ + +#include "target_ptrace.h" + +#define ELF_MACHINE PPC_ELF_MACHINE + #ifdef TARGET_PPC64 - return "POWER9"; +# define ELF_CLASS ELFCLASS64 +#else +# define ELF_CLASS ELFCLASS32 +# define EXSTACK_DEFAULT true +#endif + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_HWCAP2 1 +#define HAVE_ELF_CORE_DUMP 1 + +/* + * The size of 48 words is set in arch/powerpc/include/uapi/asm/elf.h. + * However PPC_ELF_CORE_COPY_REGS in arch/powerpc/include/asm/elf.h + * open-codes a memcpy from struct pt_regs, then zeros the rest. + */ +typedef struct target_elf_gregset_t { + union { + struct target_pt_regs pt; + abi_ulong reserved[48]; + }; +} target_elf_gregset_t; + +#ifndef TARGET_PPC64 +# define VDSO_HEADER "vdso-32.c.inc" +#elif TARGET_BIG_ENDIAN +# define VDSO_HEADER "vdso-64.c.inc" #else - return "750"; +# define VDSO_HEADER "vdso-64le.c.inc" #endif -} + +/* + * The requirements here are: + * - keep the final alignment of sp (sp & 0xf) + * - make sure the 32-bit value at the first 16 byte aligned position of + * AUXV is greater than 16 for glibc compatibility. + * AT_IGNOREPPC is used for that. + * - for compatibility with glibc ARCH_DLINFO must always be defined on PPC, + * even if DLINFO_ARCH_ITEMS goes to zero or is undefined. + */ +#define DLINFO_ARCH_ITEMS 5 +#define ARCH_DLINFO \ + do { \ + PowerPCCPU *cpu = POWERPC_CPU(thread_cpu); \ + /* \ + * Handle glibc compatibility: these magic entries must \ + * be at the lowest addresses in the final auxv. \ + */ \ + NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ + NEW_AUX_ENT(AT_IGNOREPPC, AT_IGNOREPPC); \ + NEW_AUX_ENT(AT_DCACHEBSIZE, cpu->env.dcache_line_size); \ + NEW_AUX_ENT(AT_ICACHEBSIZE, cpu->env.icache_line_size); \ + NEW_AUX_ENT(AT_UCACHEBSIZE, 0); \ + } while (0) + #endif diff --git a/linux-user/ppc/target_ptrace.h b/linux-user/ppc/target_ptrace.h new file mode 100644 index 0000000000000..df77bfde73ee1 --- /dev/null +++ b/linux-user/ppc/target_ptrace.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef PPC_TARGET_PTRACE_H +#define PPC_TARGET_PTRACE_H + +struct target_pt_regs { + abi_ulong gpr[32]; + abi_ulong nip; + abi_ulong msr; + abi_ulong orig_gpr3; /* Used for restarting system calls */ + abi_ulong ctr; + abi_ulong link; + abi_ulong xer; + abi_ulong ccr; +#if defined(TARGET_PPC64) + abi_ulong softe; +#else + abi_ulong mq; /* 601 only (not used at present) */ +#endif + abi_ulong trap; /* Reason for being here */ + abi_ulong dar; /* Fault registers */ + abi_ulong dsisr; + abi_ulong result; /* Result of a system call */ +}; + +#endif /* PPC_TARGET_PTRACE_H */ diff --git a/linux-user/ppc/target_syscall.h b/linux-user/ppc/target_syscall.h index 77b36d0b46e67..976b4bb7e9a2b 100644 --- a/linux-user/ppc/target_syscall.h +++ b/linux-user/ppc/target_syscall.h @@ -20,34 +20,6 @@ #ifndef PPC_TARGET_SYSCALL_H #define PPC_TARGET_SYSCALL_H -/* XXX: ABSOLUTELY BUGGY: - * for now, this is quite just a cut-and-paste from i386 target... - */ - -/* default linux values for the selectors */ -#define __USER_DS (1) - -struct target_pt_regs { - abi_ulong gpr[32]; - abi_ulong nip; - abi_ulong msr; - abi_ulong orig_gpr3; /* Used for restarting system calls */ - abi_ulong ctr; - abi_ulong link; - abi_ulong xer; - abi_ulong ccr; -#if defined(TARGET_PPC64) - abi_ulong softe; -#else - abi_ulong mq; /* 601 only (not used at present) */ -#endif - /* Used on APUS to hold IPL value. */ - abi_ulong trap; /* Reason for being here */ - abi_ulong dar; /* Fault registers */ - abi_ulong dsisr; - abi_ulong result; /* Result of a system call */ -}; - /* ioctls */ struct target_revectored_struct { abi_ulong __map[8]; /* 256 bits */ diff --git a/linux-user/ppc/vdso-32.so b/linux-user/ppc/vdso-32.so index 0dc55e0dddff6..03476052fbeb1 100755 Binary files a/linux-user/ppc/vdso-32.so and b/linux-user/ppc/vdso-32.so differ diff --git a/linux-user/ppc/vdso-64.so b/linux-user/ppc/vdso-64.so index ac1ab2582e467..b89f2a0e018a7 100755 Binary files a/linux-user/ppc/vdso-64.so and b/linux-user/ppc/vdso-64.so differ diff --git a/linux-user/ppc/vdso-64le.so b/linux-user/ppc/vdso-64le.so index 424abb4290b7d..22499d2701530 100755 Binary files a/linux-user/ppc/vdso-64le.so and b/linux-user/ppc/vdso-64le.so differ diff --git a/linux-user/ppc/vdso.S b/linux-user/ppc/vdso.S index 2e79ea98086a1..e9256a2deafc9 100644 --- a/linux-user/ppc/vdso.S +++ b/linux-user/ppc/vdso.S @@ -220,6 +220,7 @@ endf __kernel_sync_dicache nop +sigreturn_region_start: __kernel_sigtramp_rt: raw_syscall __NR_rt_sigreturn endf __kernel_sigtramp_rt @@ -235,5 +236,6 @@ __kernel_sigtramp32: raw_syscall __NR_sigreturn endf __kernel_sigtramp32 #endif +sigreturn_region_end: .cfi_endproc diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 0b19fa43e6511..85e68eff7b34a 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -122,12 +122,11 @@ struct TaskState { #ifdef TARGET_M68K abi_ulong tp_value; #endif -#if defined(TARGET_ARM) || defined(TARGET_M68K) || defined(TARGET_RISCV) - /* Extra fields for semihosted binaries. */ - abi_ulong heap_base; - abi_ulong heap_limit; +#if defined(TARGET_AARCH64) + vaddr gcs_base; + abi_ulong gcs_size; + abi_ulong gcs_el0_locked; #endif - abi_ulong stack_base; int used; /* non zero if used */ struct image_info *info; struct linux_binprm *bprm; @@ -161,6 +160,11 @@ struct TaskState { /* This thread's sigaltstack, if it has one */ struct target_sigaltstack sigaltstack_used; + /* This thread's SYSCALL_USER_DISPATCH state, len=~0 means disabled */ + vaddr sys_dispatch; + vaddr sys_dispatch_selector; + abi_ulong sys_dispatch_len; + /* Start time of task after system boot in clock ticks */ uint64_t start_boottime; }; @@ -365,4 +369,6 @@ void *lock_user_string(abi_ulong guest_addr); /* Clone cpu state */ CPUArchState *cpu_copy(CPUArchState *env); +void init_main_thread(CPUState *cs, struct image_info *info); + #endif /* QEMU_H */ diff --git a/linux-user/riscv/cpu_loop.c b/linux-user/riscv/cpu_loop.c index 3ac8bbfec1f49..ce542540c28bb 100644 --- a/linux-user/riscv/cpu_loop.c +++ b/linux-user/riscv/cpu_loop.c @@ -36,7 +36,7 @@ void cpu_loop(CPURISCVState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_INTERRUPT: @@ -94,23 +94,16 @@ void cpu_loop(CPURISCVState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - CPUState *cpu = env_cpu(env); - TaskState *ts = get_task_state(cpu); - struct image_info *info = ts->info; + CPUArchState *env = cpu_env(cs); - env->pc = regs->sepc; - env->gpr[xSP] = regs->sp; + env->pc = info->entry; + env->gpr[xSP] = info->start_stack; env->elf_flags = info->elf_flags; if ((env->misa_ext & RVE) && !(env->elf_flags & EF_RISCV_RVE)) { error_report("Incompatible ELF: RVE cpu requires RVE ABI binary"); exit(EXIT_FAILURE); } - - ts->stack_base = info->start_stack; - ts->heap_base = info->brk; - /* This will be filled in on the first SYS_HEAPINFO call. */ - ts->heap_limit = 0; } diff --git a/linux-user/riscv/elfload.c b/linux-user/riscv/elfload.c new file mode 100644 index 0000000000000..2e7d622232360 --- /dev/null +++ b/linux-user/riscv/elfload.c @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "max"; +} + +abi_ulong get_elf_hwcap(CPUState *cs) +{ +#define MISA_BIT(EXT) (1 << (EXT - 'A')) + RISCVCPU *cpu = RISCV_CPU(cs); + uint32_t mask = MISA_BIT('I') | MISA_BIT('M') | MISA_BIT('A') + | MISA_BIT('F') | MISA_BIT('D') | MISA_BIT('C') + | MISA_BIT('V'); + + return cpu->env.misa_ext & mask; +#undef MISA_BIT +} diff --git a/linux-user/riscv/target_elf.h b/linux-user/riscv/target_elf.h index dedd5956f3afe..dbbfdf54d39c7 100644 --- a/linux-user/riscv/target_elf.h +++ b/linux-user/riscv/target_elf.h @@ -7,8 +7,17 @@ #ifndef RISCV_TARGET_ELF_H #define RISCV_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "max"; -} + +#define ELF_MACHINE EM_RISCV + +#ifdef TARGET_RISCV32 +#define ELF_CLASS ELFCLASS32 +#define VDSO_HEADER "vdso-32.c.inc" +#else +#define ELF_CLASS ELFCLASS64 +#define VDSO_HEADER "vdso-64.c.inc" +#endif + +#define HAVE_ELF_HWCAP 1 + #endif diff --git a/linux-user/riscv/target_syscall.h b/linux-user/riscv/target_syscall.h index 7601f10c28e72..69a7b753eb467 100644 --- a/linux-user/riscv/target_syscall.h +++ b/linux-user/riscv/target_syscall.h @@ -8,41 +8,6 @@ #ifndef LINUX_USER_RISCV_TARGET_SYSCALL_H #define LINUX_USER_RISCV_TARGET_SYSCALL_H -struct target_pt_regs { - abi_long sepc; - abi_long ra; - abi_long sp; - abi_long gp; - abi_long tp; - abi_long t0; - abi_long t1; - abi_long t2; - abi_long s0; - abi_long s1; - abi_long a0; - abi_long a1; - abi_long a2; - abi_long a3; - abi_long a4; - abi_long a5; - abi_long a6; - abi_long a7; - abi_long s2; - abi_long s3; - abi_long s4; - abi_long s5; - abi_long s6; - abi_long s7; - abi_long s8; - abi_long s9; - abi_long s10; - abi_long s11; - abi_long t3; - abi_long t4; - abi_long t5; - abi_long t6; -}; - #ifdef TARGET_RISCV32 #define UNAME_MACHINE "riscv32" #define UNAME_MINIMUM_RELEASE "5.4.0" diff --git a/linux-user/riscv/vdso-32.so b/linux-user/riscv/vdso-32.so index c2ce2a4757900..4818a994f0394 100755 Binary files a/linux-user/riscv/vdso-32.so and b/linux-user/riscv/vdso-32.so differ diff --git a/linux-user/riscv/vdso-64.so b/linux-user/riscv/vdso-64.so index ae49f5b043b59..cc6f7e974be67 100755 Binary files a/linux-user/riscv/vdso-64.so and b/linux-user/riscv/vdso-64.so differ diff --git a/linux-user/riscv/vdso.S b/linux-user/riscv/vdso.S index c37275233a065..1d780db771748 100644 --- a/linux-user/riscv/vdso.S +++ b/linux-user/riscv/vdso.S @@ -181,7 +181,9 @@ endf __vdso_flush_icache nop __vdso_rt_sigreturn: +sigreturn_region_start: raw_syscall __NR_rt_sigreturn +sigreturn_region_end: endf __vdso_rt_sigreturn .cfi_endproc diff --git a/linux-user/s390x/cpu_loop.c b/linux-user/s390x/cpu_loop.c index c9124444ed207..4929b32e1fcce 100644 --- a/linux-user/s390x/cpu_loop.c +++ b/linux-user/s390x/cpu_loop.c @@ -64,7 +64,7 @@ void cpu_loop(CPUS390XState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case EXCP_INTERRUPT: @@ -180,12 +180,13 @@ void cpu_loop(CPUS390XState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - int i; - for (i = 0; i < 16; i++) { - env->regs[i] = regs->gprs[i]; - } - env->psw.mask = regs->psw.mask; - env->psw.addr = regs->psw.addr; + CPUArchState *env = cpu_env(cs); + + env->psw.addr = info->entry; + env->psw.mask = PSW_MASK_DAT | PSW_MASK_IO | PSW_MASK_EXT | + PSW_MASK_MCHECK | PSW_MASK_PSTATE | PSW_MASK_64 | + PSW_MASK_32; + env->regs[15] = info->start_stack; } diff --git a/linux-user/s390x/elfload.c b/linux-user/s390x/elfload.c new file mode 100644 index 0000000000000..27109279e28cb --- /dev/null +++ b/linux-user/s390x/elfload.c @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "elf.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "qemu"; +} + +#define GET_FEATURE(_feat, _hwcap) \ + do { if (s390_has_feat(_feat)) { hwcap |= _hwcap; } } while (0) + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + /* + * Let's assume we always have esan3 and zarch. + * 31-bit processes can use 64-bit registers (high gprs). + */ + uint32_t hwcap = HWCAP_S390_ESAN3 | HWCAP_S390_ZARCH | HWCAP_S390_HIGH_GPRS; + + GET_FEATURE(S390_FEAT_STFLE, HWCAP_S390_STFLE); + GET_FEATURE(S390_FEAT_MSA, HWCAP_S390_MSA); + GET_FEATURE(S390_FEAT_LONG_DISPLACEMENT, HWCAP_S390_LDISP); + GET_FEATURE(S390_FEAT_EXTENDED_IMMEDIATE, HWCAP_S390_EIMM); + if (s390_has_feat(S390_FEAT_EXTENDED_TRANSLATION_3) && + s390_has_feat(S390_FEAT_ETF3_ENH)) { + hwcap |= HWCAP_S390_ETF3EH; + } + GET_FEATURE(S390_FEAT_VECTOR, HWCAP_S390_VXRS); + GET_FEATURE(S390_FEAT_VECTOR_ENH, HWCAP_S390_VXRS_EXT); + GET_FEATURE(S390_FEAT_VECTOR_ENH2, HWCAP_S390_VXRS_EXT2); + + return hwcap; +} + +const char *elf_hwcap_str(uint32_t bit) +{ + static const char *hwcap_str[] = { + [HWCAP_S390_NR_ESAN3] = "esan3", + [HWCAP_S390_NR_ZARCH] = "zarch", + [HWCAP_S390_NR_STFLE] = "stfle", + [HWCAP_S390_NR_MSA] = "msa", + [HWCAP_S390_NR_LDISP] = "ldisp", + [HWCAP_S390_NR_EIMM] = "eimm", + [HWCAP_S390_NR_DFP] = "dfp", + [HWCAP_S390_NR_HPAGE] = "edat", + [HWCAP_S390_NR_ETF3EH] = "etf3eh", + [HWCAP_S390_NR_HIGH_GPRS] = "highgprs", + [HWCAP_S390_NR_TE] = "te", + [HWCAP_S390_NR_VXRS] = "vx", + [HWCAP_S390_NR_VXRS_BCD] = "vxd", + [HWCAP_S390_NR_VXRS_EXT] = "vxe", + [HWCAP_S390_NR_GS] = "gs", + [HWCAP_S390_NR_VXRS_EXT2] = "vxe2", + [HWCAP_S390_NR_VXRS_PDE] = "vxp", + [HWCAP_S390_NR_SORT] = "sort", + [HWCAP_S390_NR_DFLT] = "dflt", + [HWCAP_S390_NR_NNPA] = "nnpa", + [HWCAP_S390_NR_PCI_MIO] = "pcimio", + [HWCAP_S390_NR_SIE] = "sie", + }; + + return bit < ARRAY_SIZE(hwcap_str) ? hwcap_str[bit] : NULL; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUS390XState *env) +{ + r->pt.psw.mask = tswapal(env->psw.mask); + r->pt.psw.addr = tswapal(env->psw.addr); + for (int i = 0; i < 16; i++) { + r->pt.gprs[i] = tswapal(env->regs[i]); + } + for (int i = 0; i < 16; i++) { + r->pt.acrs[i] = tswap32(env->aregs[i]); + } + r->pt.orig_gpr2 = 0; +} diff --git a/linux-user/s390x/signal.c b/linux-user/s390x/signal.c index df49c2470802b..96d1c8d11ca5d 100644 --- a/linux-user/s390x/signal.c +++ b/linux-user/s390x/signal.c @@ -22,6 +22,7 @@ #include "signal-common.h" #include "linux-user/trace.h" #include "vdso-asmoffset.h" +#include "target_ptrace.h" #define __NUM_GPRS 16 #define __NUM_FPRS 16 diff --git a/linux-user/s390x/target_elf.h b/linux-user/s390x/target_elf.h index 8114b59c1d67a..ef5edbd86099f 100644 --- a/linux-user/s390x/target_elf.h +++ b/linux-user/s390x/target_elf.h @@ -7,8 +7,22 @@ #ifndef S390X_TARGET_ELF_H #define S390X_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "qemu"; -} + +#include "target_ptrace.h" + +#define ELF_CLASS ELFCLASS64 +#define ELF_MACHINE EM_S390 +#define VDSO_HEADER "vdso.c.inc" + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_CORE_DUMP 1 + +/* + * See linux kernel: arch/s390/include/asm/elf.h, where + * elf_gregset_t is typedef'd to struct s390_regs. + */ +typedef struct target_elf_gregset_t { + struct target_s390_regs pt; +} target_elf_gregset_t; + #endif diff --git a/linux-user/s390x/target_proc.h b/linux-user/s390x/target_proc.h index a4a4821ea5cff..60cc22d3b462f 100644 --- a/linux-user/s390x/target_proc.h +++ b/linux-user/s390x/target_proc.h @@ -48,7 +48,7 @@ static void show_cpu_summary(CPUArchState *cpu_env, int fd) { S390CPUModel *model = env_archcpu(cpu_env)->model; int num_cpus = sysconf(_SC_NPROCESSORS_ONLN); - uint32_t elf_hwcap = get_elf_hwcap(); + uint32_t elf_hwcap = get_elf_hwcap(env_cpu(cpu_env)); const char *hwcap_str; int i; diff --git a/linux-user/s390x/target_ptrace.h b/linux-user/s390x/target_ptrace.h new file mode 100644 index 0000000000000..a5ceb75a747a4 --- /dev/null +++ b/linux-user/s390x/target_ptrace.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef S390X_TARGET_PTRACE_H +#define S390X_TARGET_PTRACE_H + +typedef struct { + abi_ulong mask; + abi_ulong addr; +} target_psw_t; + +struct target_s390_regs { + target_psw_t psw; + abi_ulong gprs[16]; + abi_uint acrs[16]; + abi_ulong orig_gpr2; +}; + +#endif /* S390X_TARGET_PTRACE_H */ diff --git a/linux-user/s390x/target_syscall.h b/linux-user/s390x/target_syscall.h index 4018988a25e1b..f01f9a0baa2a4 100644 --- a/linux-user/s390x/target_syscall.h +++ b/linux-user/s390x/target_syscall.h @@ -1,28 +1,6 @@ #ifndef S390X_TARGET_SYSCALL_H #define S390X_TARGET_SYSCALL_H -/* this typedef defines how a Program Status Word looks like */ -typedef struct { - abi_ulong mask; - abi_ulong addr; -} __attribute__ ((aligned(8))) target_psw_t; - -/* - * The pt_regs struct defines the way the registers are stored on - * the stack during a system call. - */ - -#define TARGET_NUM_GPRS 16 - -struct target_pt_regs { - abi_ulong args[1]; - target_psw_t psw; - abi_ulong gprs[TARGET_NUM_GPRS]; - abi_ulong orig_gpr2; - unsigned short ilen; - unsigned short trap; -}; - #define UNAME_MACHINE "s390x" #define UNAME_MINIMUM_RELEASE "2.6.32" diff --git a/linux-user/s390x/vdso.S b/linux-user/s390x/vdso.S index 3332492477183..c60e9ed086a01 100644 --- a/linux-user/s390x/vdso.S +++ b/linux-user/s390x/vdso.S @@ -52,6 +52,7 @@ vdso_syscall __kernel_getcpu, __NR_getcpu * by all users. Without it we get the fallback signal frame handling. */ +sigreturn_region_start: __kernel_sigreturn: raw_syscall __NR_sigreturn endf __kernel_sigreturn @@ -59,3 +60,4 @@ endf __kernel_sigreturn __kernel_rt_sigreturn: raw_syscall __NR_rt_sigreturn endf __kernel_rt_sigreturn +sigreturn_region_end: diff --git a/linux-user/s390x/vdso.so b/linux-user/s390x/vdso.so index 64130f6f33526..a669a6b7dda7b 100755 Binary files a/linux-user/s390x/vdso.so and b/linux-user/s390x/vdso.so differ diff --git a/linux-user/sh4/cpu_loop.c b/linux-user/sh4/cpu_loop.c index ee9eff3428a92..0c9d7e9c46b5f 100644 --- a/linux-user/sh4/cpu_loop.c +++ b/linux-user/sh4/cpu_loop.c @@ -34,7 +34,7 @@ void cpu_loop(CPUSH4State *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case 0x160: @@ -81,12 +81,10 @@ void cpu_loop(CPUSH4State *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - int i; + CPUArchState *env = cpu_env(cs); - for(i = 0; i < 16; i++) { - env->gregs[i] = regs->regs[i]; - } - env->pc = regs->pc; + env->pc = info->entry; + env->gregs[15] = info->start_stack; } diff --git a/linux-user/sh4/elfload.c b/linux-user/sh4/elfload.c new file mode 100644 index 0000000000000..ddf2aaaed7ca0 --- /dev/null +++ b/linux-user/sh4/elfload.c @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "sh7785"; +} + +enum { + SH_CPU_HAS_FPU = 0x0001, /* Hardware FPU support */ + SH_CPU_HAS_P2_FLUSH_BUG = 0x0002, /* Need to flush the cache in P2 area */ + SH_CPU_HAS_MMU_PAGE_ASSOC = 0x0004, /* SH3: TLB way selection bit support */ + SH_CPU_HAS_DSP = 0x0008, /* SH-DSP: DSP support */ + SH_CPU_HAS_PERF_COUNTER = 0x0010, /* Hardware performance counters */ + SH_CPU_HAS_PTEA = 0x0020, /* PTEA register */ + SH_CPU_HAS_LLSC = 0x0040, /* movli.l/movco.l */ + SH_CPU_HAS_L2_CACHE = 0x0080, /* Secondary cache / URAM */ + SH_CPU_HAS_OP32 = 0x0100, /* 32-bit instruction support */ + SH_CPU_HAS_PTEAEX = 0x0200, /* PTE ASID Extension support */ +}; + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + SuperHCPU *cpu = SUPERH_CPU(cs); + abi_ulong hwcap = 0; + + hwcap |= SH_CPU_HAS_FPU; + + if (cpu->env.features & SH_FEATURE_SH4A) { + hwcap |= SH_CPU_HAS_LLSC; + } + + return hwcap; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUSH4State *env) +{ + for (int i = 0; i < 16; i++) { + r->pt.regs[i] = tswapal(env->gregs[i]); + } + + r->pt.pc = tswapal(env->pc); + r->pt.pr = tswapal(env->pr); + r->pt.sr = tswapal(env->sr); + r->pt.gbr = tswapal(env->gbr); + r->pt.mach = tswapal(env->mach); + r->pt.macl = tswapal(env->macl); +} diff --git a/linux-user/sh4/target_elf.h b/linux-user/sh4/target_elf.h index f485e0cef279b..d9e253d4250e0 100644 --- a/linux-user/sh4/target_elf.h +++ b/linux-user/sh4/target_elf.h @@ -7,8 +7,21 @@ #ifndef SH4_TARGET_ELF_H #define SH4_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "sh7785"; -} + +#include "target_ptrace.h" + +#define ELF_CLASS ELFCLASS32 +#define ELF_MACHINE EM_SH + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_CORE_DUMP 1 + +/* + * See linux kernel: arch/sh/include/asm/elf.h, where + * elf_gregset_t is mapped to struct pt_regs via sizeof. + */ +typedef struct target_elf_gregset_t { + struct target_pt_regs pt; +} target_elf_gregset_t; + #endif diff --git a/linux-user/sh4/target_ptrace.h b/linux-user/sh4/target_ptrace.h new file mode 100644 index 0000000000000..b80218526b110 --- /dev/null +++ b/linux-user/sh4/target_ptrace.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef SH4_TARGET_PTRACE_H +#define SH4_TARGET_PTRACE_H + +/* See arch/sh/include/uapi/asm/ptrace_32.h. */ +struct target_pt_regs { + abi_ulong regs[16]; + abi_ulong pc; + abi_ulong pr; + abi_ulong sr; + abi_ulong gbr; + abi_ulong mach; + abi_ulong macl; + abi_long tra; +}; + +#endif /* SH4_TARGET_PTRACE_H */ diff --git a/linux-user/sh4/target_syscall.h b/linux-user/sh4/target_syscall.h index 148398855dfee..2f3557742d305 100644 --- a/linux-user/sh4/target_syscall.h +++ b/linux-user/sh4/target_syscall.h @@ -1,17 +1,6 @@ #ifndef SH4_TARGET_SYSCALL_H #define SH4_TARGET_SYSCALL_H -struct target_pt_regs { - unsigned long regs[16]; - unsigned long pc; - unsigned long pr; - unsigned long sr; - unsigned long gbr; - unsigned long mach; - unsigned long macl; - long tra; -}; - #define UNAME_MACHINE "sh4" #define UNAME_MINIMUM_RELEASE "2.6.32" diff --git a/linux-user/signal-common.h b/linux-user/signal-common.h index 196d2406f86ee..8a44714251add 100644 --- a/linux-user/signal-common.h +++ b/linux-user/signal-common.h @@ -25,6 +25,13 @@ /* Fallback addresses into sigtramp page. */ extern abi_ulong default_sigreturn; extern abi_ulong default_rt_sigreturn; +extern abi_ulong vdso_sigreturn_region_start; +extern abi_ulong vdso_sigreturn_region_end; + +static inline bool is_vdso_sigreturn(abi_ulong pc) +{ + return pc >= vdso_sigreturn_region_start && pc < vdso_sigreturn_region_end; +} void setup_sigtramp(abi_ulong tramp_page); diff --git a/linux-user/signal.c b/linux-user/signal.c index cd0e7398aa46a..804096bd4470b 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -50,6 +50,8 @@ static void host_signal_handler(int host_signum, siginfo_t *info, /* Fallback addresses into sigtramp page. */ abi_ulong default_sigreturn; abi_ulong default_rt_sigreturn; +abi_ulong vdso_sigreturn_region_start; +abi_ulong vdso_sigreturn_region_end; /* * System includes define _NSIG as SIGRTMAX + 1, but qemu (like the kernel) diff --git a/linux-user/sparc/cpu_loop.c b/linux-user/sparc/cpu_loop.c index 68f1e8ecd43bf..7391e2add8d4a 100644 --- a/linux-user/sparc/cpu_loop.c +++ b/linux-user/sparc/cpu_loop.c @@ -220,7 +220,7 @@ void cpu_loop (CPUSPARCState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); switch (trapnr) { case TARGET_TT_SYSCALL: @@ -357,14 +357,12 @@ void cpu_loop (CPUSPARCState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - int i; - env->pc = regs->pc; - env->npc = regs->npc; - env->y = regs->y; - for(i = 0; i < 8; i++) - env->gregs[i] = regs->u_regs[i]; - for(i = 0; i < 8; i++) - env->regwptr[i] = regs->u_regs[i + 8]; + CPUArchState *env = cpu_env(cs); + + env->pc = info->entry; + env->npc = env->pc + 4; + env->regwptr[WREG_SP] = (info->start_stack - 16 * sizeof(abi_ulong) + - TARGET_STACK_BIAS); } diff --git a/linux-user/sparc/elfload.c b/linux-user/sparc/elfload.c new file mode 100644 index 0000000000000..32ca1b05b1a3f --- /dev/null +++ b/linux-user/sparc/elfload.c @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ +#ifdef TARGET_SPARC64 + return "TI UltraSparc II"; +#else + return "Fujitsu MB86904"; +#endif +} + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + /* There are not many sparc32 hwcap bits -- we have all of them. */ + uint32_t r = HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | + HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV; + +#ifdef TARGET_SPARC64 + CPUSPARCState *env = cpu_env(cs); + uint32_t features = env->def.features; + + r |= HWCAP_SPARC_V9 | HWCAP_SPARC_V8PLUS; + /* 32x32 multiply and divide are efficient. */ + r |= HWCAP_SPARC_MUL32 | HWCAP_SPARC_DIV32; + /* We don't have an internal feature bit for this. */ + r |= HWCAP_SPARC_POPC; + r |= features & CPU_FEATURE_FSMULD ? HWCAP_SPARC_FSMULD : 0; + r |= features & CPU_FEATURE_VIS1 ? HWCAP_SPARC_VIS : 0; + r |= features & CPU_FEATURE_VIS2 ? HWCAP_SPARC_VIS2 : 0; + r |= features & CPU_FEATURE_FMAF ? HWCAP_SPARC_FMAF : 0; + r |= features & CPU_FEATURE_VIS3 ? HWCAP_SPARC_VIS3 : 0; + r |= features & CPU_FEATURE_IMA ? HWCAP_SPARC_IMA : 0; +#endif + + return r; +} diff --git a/linux-user/sparc/signal.c b/linux-user/sparc/signal.c index 8181b8b92c1a5..d339f89928879 100644 --- a/linux-user/sparc/signal.c +++ b/linux-user/sparc/signal.c @@ -21,6 +21,8 @@ #include "user-internals.h" #include "signal-common.h" #include "linux-user/trace.h" +#include "target_ptrace.h" + /* A Sparc register window */ struct target_reg_window { diff --git a/linux-user/sparc/target_elf.h b/linux-user/sparc/target_elf.h index a510ceb6129ce..7827767bcb2b4 100644 --- a/linux-user/sparc/target_elf.h +++ b/linux-user/sparc/target_elf.h @@ -7,12 +7,18 @@ #ifndef SPARC_TARGET_ELF_H #define SPARC_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ -#ifdef TARGET_SPARC64 - return "TI UltraSparc II"; + +#ifndef TARGET_SPARC64 +# define ELF_CLASS ELFCLASS32 +# define ELF_MACHINE EM_SPARC +#elif defined(TARGET_ABI32) +# define ELF_CLASS ELFCLASS32 +# define elf_check_machine(x) ((x) == EM_SPARC32PLUS || (x) == EM_SPARC) #else - return "Fujitsu MB86904"; +# define ELF_CLASS ELFCLASS64 +# define ELF_MACHINE EM_SPARCV9 #endif -} + +#define HAVE_ELF_HWCAP 1 + #endif diff --git a/linux-user/sparc/target_ptrace.h b/linux-user/sparc/target_ptrace.h new file mode 100644 index 0000000000000..a4d5416c1f971 --- /dev/null +++ b/linux-user/sparc/target_ptrace.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef SPARC_TARGET_PTRACE_H +#define SPARC_TARGET_PTRACE_H + +/* See arch/sparc/include/uapi/asm/ptrace.h. */ +struct target_pt_regs { +#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) + abi_ulong u_regs[16]; + abi_ulong tstate; + abi_ulong pc; + abi_ulong npc; + uint32_t y; + uint32_t magic; +#else + abi_ulong psr; + abi_ulong pc; + abi_ulong npc; + abi_ulong y; + abi_ulong u_regs[16]; +#endif +}; + +#endif /* SPARC_TARGET_PTRACE_H */ diff --git a/linux-user/sparc/target_syscall.h b/linux-user/sparc/target_syscall.h index e4211653574ed..a90ed2983a5a8 100644 --- a/linux-user/sparc/target_syscall.h +++ b/linux-user/sparc/target_syscall.h @@ -1,25 +1,6 @@ #ifndef SPARC_TARGET_SYSCALL_H #define SPARC_TARGET_SYSCALL_H -#if defined(TARGET_SPARC64) && !defined(TARGET_ABI32) -struct target_pt_regs { - abi_ulong u_regs[16]; - abi_ulong tstate; - abi_ulong pc; - abi_ulong npc; - uint32_t y; - uint32_t magic; -}; -#else -struct target_pt_regs { - abi_ulong psr; - abi_ulong pc; - abi_ulong npc; - abi_ulong y; - abi_ulong u_regs[16]; -}; -#endif - #ifdef TARGET_SPARC64 # define UNAME_MACHINE "sparc64" #else diff --git a/linux-user/strace.c b/linux-user/strace.c index 3b744ccd4a721..758c5d32b6c0f 100644 --- a/linux-user/strace.c +++ b/linux-user/strace.c @@ -54,7 +54,7 @@ struct flags { }; /* No 'struct flags' element should have a zero mask. */ -#define FLAG_BASIC(V, M, N) { V, M | QEMU_BUILD_BUG_ON_ZERO(!(M)), N } +#define FLAG_BASIC(V, M, N) { V, M | QEMU_BUILD_BUG_ON_ZERO((M) == 0), N } /* common flags for all architectures */ #define FLAG_GENERIC_MASK(V, M) FLAG_BASIC(V, M, #V) @@ -499,113 +499,119 @@ print_socket_type(int type) static void print_socket_protocol(int domain, int type, int protocol) { - if (domain == AF_PACKET || - (domain == AF_INET && type == TARGET_SOCK_PACKET)) { + const char *name = NULL; + + switch (domain) { + case AF_PACKET: switch (protocol) { - case 0x0003: - qemu_log("ETH_P_ALL"); + case 3: + name = "ETH_P_ALL"; break; - default: - qemu_log("%d", protocol); } - return; - } + break; - if (domain == PF_NETLINK) { + case PF_NETLINK: switch (protocol) { case NETLINK_ROUTE: - qemu_log("NETLINK_ROUTE"); + name = "NETLINK_ROUTE"; break; case NETLINK_UNUSED: - qemu_log("NETLINK_UNUSED"); + name = "NETLINK_UNUSED"; break; case NETLINK_USERSOCK: - qemu_log("NETLINK_USERSOCK"); + name = "NETLINK_USERSOCK"; break; case NETLINK_FIREWALL: - qemu_log("NETLINK_FIREWALL"); + name = "NETLINK_FIREWALL"; break; case NETLINK_SOCK_DIAG: - qemu_log("NETLINK_SOCK_DIAG"); + name = "NETLINK_SOCK_DIAG"; break; case NETLINK_NFLOG: - qemu_log("NETLINK_NFLOG"); + name = "NETLINK_NFLOG"; break; case NETLINK_XFRM: - qemu_log("NETLINK_XFRM"); + name = "NETLINK_XFRM"; break; case NETLINK_SELINUX: - qemu_log("NETLINK_SELINUX"); + name = "NETLINK_SELINUX"; break; case NETLINK_ISCSI: - qemu_log("NETLINK_ISCSI"); + name = "NETLINK_ISCSI"; break; case NETLINK_AUDIT: - qemu_log("NETLINK_AUDIT"); + name = "NETLINK_AUDIT"; break; case NETLINK_FIB_LOOKUP: - qemu_log("NETLINK_FIB_LOOKUP"); + name = "NETLINK_FIB_LOOKUP"; break; case NETLINK_CONNECTOR: - qemu_log("NETLINK_CONNECTOR"); + name = "NETLINK_CONNECTOR"; break; case NETLINK_NETFILTER: - qemu_log("NETLINK_NETFILTER"); + name = "NETLINK_NETFILTER"; break; case NETLINK_IP6_FW: - qemu_log("NETLINK_IP6_FW"); + name = "NETLINK_IP6_FW"; break; case NETLINK_DNRTMSG: - qemu_log("NETLINK_DNRTMSG"); + name = "NETLINK_DNRTMSG"; break; case NETLINK_KOBJECT_UEVENT: - qemu_log("NETLINK_KOBJECT_UEVENT"); + name = "NETLINK_KOBJECT_UEVENT"; break; case NETLINK_GENERIC: - qemu_log("NETLINK_GENERIC"); + name = "NETLINK_GENERIC"; break; case NETLINK_SCSITRANSPORT: - qemu_log("NETLINK_SCSITRANSPORT"); + name = "NETLINK_SCSITRANSPORT"; break; case NETLINK_ECRYPTFS: - qemu_log("NETLINK_ECRYPTFS"); + name = "NETLINK_ECRYPTFS"; break; case NETLINK_RDMA: - qemu_log("NETLINK_RDMA"); + name = "NETLINK_RDMA"; break; case NETLINK_CRYPTO: - qemu_log("NETLINK_CRYPTO"); + name = "NETLINK_CRYPTO"; break; case NETLINK_SMC: - qemu_log("NETLINK_SMC"); + name = "NETLINK_SMC"; break; - default: - qemu_log("%d", protocol); + } + break; + + case AF_INET: + case AF_INET6: + switch (protocol) { + case 3: + if (domain == AF_INET && type == TARGET_SOCK_PACKET) { + name = "ETH_P_ALL"; + } + break; + case IPPROTO_IP: + name = "IPPROTO_IP"; + break; + case IPPROTO_TCP: + name = "IPPROTO_TCP"; + break; + case IPPROTO_UDP: + name = "IPPROTO_UDP"; + break; + case IPPROTO_RAW: + name = "IPPROTO_RAW"; break; } - return; + break; } - switch (protocol) { - case IPPROTO_IP: - qemu_log("IPPROTO_IP"); - break; - case IPPROTO_TCP: - qemu_log("IPPROTO_TCP"); - break; - case IPPROTO_UDP: - qemu_log("IPPROTO_UDP"); - break; - case IPPROTO_RAW: - qemu_log("IPPROTO_RAW"); - break; - default: + if (name) { + qemu_log("%s", name); + } else { qemu_log("%d", protocol); - break; } } - #ifdef TARGET_NR__newselect static void print_fdset(int n, abi_ulong target_fds_addr) diff --git a/linux-user/strace.list b/linux-user/strace.list index fdf94ef32ad2e..51b5ead9696c9 100644 --- a/linux-user/strace.list +++ b/linux-user/strace.list @@ -1716,3 +1716,9 @@ { TARGET_NR_clock_gettime64, "clock_gettime64" , NULL, print_clock_gettime64, print_syscall_ret_clock_gettime64 }, #endif +#ifdef TARGET_NR_riscv_hwprobe +{ TARGET_NR_riscv_hwprobe, "riscv_hwprobe" , "%s(%p,%d,%d,%d,%d,%d)", NULL, NULL }, +#endif +#ifdef TARGET_NR_rseq +{ TARGET_NR_rseq, "rseq" , "%s(%p,%u,%d,%#x)", NULL, NULL }, +#endif diff --git a/linux-user/syscall.c b/linux-user/syscall.c index 91360a072c7f4..8546f48a05b05 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -27,8 +27,6 @@ #include "target_mman.h" #include "exec/page-protection.h" #include "exec/mmap-lock.h" -#include "exec/tb-flush.h" -#include "exec/translation-block.h" #include #include #include @@ -6344,6 +6342,10 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) #endif #ifndef PR_SET_SYSCALL_USER_DISPATCH # define PR_SET_SYSCALL_USER_DISPATCH 59 +# define PR_SYS_DISPATCH_OFF 0 +# define PR_SYS_DISPATCH_ON 1 +# define SYSCALL_DISPATCH_FILTER_ALLOW 0 +# define SYSCALL_DISPATCH_FILTER_BLOCK 1 #endif #ifndef PR_SME_SET_VL # define PR_SME_SET_VL 63 @@ -6351,6 +6353,17 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) # define PR_SME_VL_LEN_MASK 0xffff # define PR_SME_VL_INHERIT (1 << 17) #endif +#ifndef PR_GET_SHADOW_STACK_STATUS +# define PR_GET_SHADOW_STACK_STATUS 74 +# define PR_SET_SHADOW_STACK_STATUS 75 +# define PR_LOCK_SHADOW_STACK_STATUS 76 +#endif +#ifndef SHADOW_STACK_SET_TOKEN +# define SHADOW_STACK_SET_TOKEN (1u << 0) +#endif +#ifndef SHADOW_STACK_SET_MARKER +# define SHADOW_STACK_SET_MARKER (1u << 1) +#endif #include "target_prctl.h" @@ -6397,6 +6410,45 @@ static abi_long do_prctl_inval1(CPUArchState *env, abi_long arg2) #ifndef do_prctl_sme_set_vl #define do_prctl_sme_set_vl do_prctl_inval1 #endif +#ifndef do_prctl_get_shadow_stack_status +#define do_prctl_get_shadow_stack_status do_prctl_inval1 +#endif +#ifndef do_prctl_set_shadow_stack_status +#define do_prctl_set_shadow_stack_status do_prctl_inval1 +#endif +#ifndef do_prctl_lock_shadow_stack_status +#define do_prctl_lock_shadow_stack_status do_prctl_inval1 +#endif + +static abi_long do_prctl_syscall_user_dispatch(CPUArchState *env, + abi_ulong arg2, abi_ulong arg3, + abi_ulong arg4, abi_ulong arg5) +{ + CPUState *cpu = env_cpu(env); + TaskState *ts = get_task_state(cpu); + + switch (arg2) { + case PR_SYS_DISPATCH_OFF: + if (arg3 || arg4 || arg5) { + return -TARGET_EINVAL; + } + ts->sys_dispatch_len = -1; + return 0; + case PR_SYS_DISPATCH_ON: + if (arg3 && arg3 + arg4 <= arg3) { + return -TARGET_EINVAL; + } + if (arg5 && !access_ok(cpu, VERIFY_READ, arg5, 1)) { + return -TARGET_EFAULT; + } + ts->sys_dispatch = arg3; + ts->sys_dispatch_len = arg4; + ts->sys_dispatch_selector = arg5; + return 0; + default: + return -TARGET_EINVAL; + } +} static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) @@ -6467,12 +6519,30 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, return -TARGET_EINVAL; } return do_prctl_get_tagged_addr_ctrl(env); + case PR_GET_SHADOW_STACK_STATUS: + if (arg3 || arg4 || arg5) { + return -TARGET_EINVAL; + } + return do_prctl_get_shadow_stack_status(env, arg2); + case PR_SET_SHADOW_STACK_STATUS: + if (arg3 || arg4 || arg5) { + return -TARGET_EINVAL; + } + return do_prctl_set_shadow_stack_status(env, arg2); + case PR_LOCK_SHADOW_STACK_STATUS: + if (arg3 || arg4 || arg5) { + return -TARGET_EINVAL; + } + return do_prctl_lock_shadow_stack_status(env, arg2); case PR_GET_UNALIGN: return do_prctl_get_unalign(env, arg2); case PR_SET_UNALIGN: return do_prctl_set_unalign(env, arg2); + case PR_SET_SYSCALL_USER_DISPATCH: + return do_prctl_syscall_user_dispatch(env, arg2, arg3, arg4, arg5); + case PR_CAP_AMBIENT: case PR_CAPBSET_READ: case PR_CAPBSET_DROP: @@ -6527,7 +6597,6 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, case PR_SET_MM: case PR_GET_SECCOMP: case PR_SET_SECCOMP: - case PR_SET_SYSCALL_USER_DISPATCH: case PR_GET_THP_DISABLE: case PR_SET_THP_DISABLE: case PR_GET_TSC: @@ -6542,6 +6611,54 @@ static abi_long do_prctl(CPUArchState *env, abi_long option, abi_long arg2, } } +#ifdef TARGET_AARCH64 +static abi_long do_map_shadow_stack(CPUArchState *env, abi_ulong addr, + abi_ulong size, abi_int flags) +{ + ARMCPU *cpu = env_archcpu(env); + abi_ulong alloc_size; + + if (!cpu_isar_feature(aa64_gcs, cpu)) { + return -TARGET_EOPNOTSUPP; + } + if (flags & ~(SHADOW_STACK_SET_TOKEN | SHADOW_STACK_SET_MARKER)) { + return -TARGET_EINVAL; + } + if (addr & ~TARGET_PAGE_MASK) { + return -TARGET_EINVAL; + } + if (size == 8 || !QEMU_IS_ALIGNED(size, 8)) { + return -TARGET_EINVAL; + } + + alloc_size = TARGET_PAGE_ALIGN(size); + if (alloc_size < size) { + return -TARGET_EOVERFLOW; + } + + mmap_lock(); + addr = gcs_alloc(addr, alloc_size); + if (addr != -1) { + if (flags & SHADOW_STACK_SET_TOKEN) { + abi_ptr cap_ptr = addr + size - 8; + uint64_t cap_val; + + if (flags & SHADOW_STACK_SET_MARKER) { + /* Leave an extra empty frame at top-of-stack. */ + cap_ptr -= 8; + } + cap_val = (cap_ptr & TARGET_PAGE_MASK) | 1; + if (put_user_u64(cap_val, cap_ptr)) { + /* Allocation succeeded above. */ + g_assert_not_reached(); + } + } + } + mmap_unlock(); + return get_errno(addr); +} +#endif + #define NEW_STACK_SIZE 0x40000 @@ -6623,6 +6740,21 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, ts = g_new0(TaskState, 1); init_task_state(ts); +#ifdef TARGET_AARCH64 + /* + * If GCS is enabled in the parent thread, it is also enabled + * in the child thread, but with a newly allocated stack. + */ + abi_long new_gcspr = 0; + if (env->cp15.gcscr_el[0] & GCSCR_PCRSEL) { + new_gcspr = gcs_new_stack(ts); + if (new_gcspr == -1) { + g_free(ts); + return -TARGET_ENOMEM; + } + } +#endif + /* Grab a mutex so that thread setup appears atomic. */ pthread_mutex_lock(&clone_lock); @@ -6631,10 +6763,7 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, * generate code for parallel execution and flush old translations. * Do this now so that the copy gets CF_PARALLEL too. */ - if (!tcg_cflags_has(cpu, CF_PARALLEL)) { - tcg_cflags_set(cpu, CF_PARALLEL); - tb_flush(cpu); - } + begin_parallel_context(cpu); /* we create a new CPU instance. */ new_env = cpu_copy(env); @@ -6647,6 +6776,11 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, ts->info = parent_ts->info; ts->signal_mask = parent_ts->signal_mask; +#ifdef TARGET_AARCH64 + ts->gcs_el0_locked = parent_ts->gcs_el0_locked; + new_env->cp15.gcspr_el[0] = new_gcspr; +#endif + if (flags & CLONE_CHILD_CLEARTID) { ts->child_tidptr = child_tidptr; } @@ -8992,6 +9126,29 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) #define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) #define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) #define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35) +#define RISCV_HWPROBE_EXT_ZIHINTPAUSE (1ULL << 36) +#define RISCV_HWPROBE_EXT_ZVE32X (1ULL << 37) +#define RISCV_HWPROBE_EXT_ZVE32F (1ULL << 38) +#define RISCV_HWPROBE_EXT_ZVE64X (1ULL << 39) +#define RISCV_HWPROBE_EXT_ZVE64F (1ULL << 40) +#define RISCV_HWPROBE_EXT_ZVE64D (1ULL << 41) +#define RISCV_HWPROBE_EXT_ZIMOP (1ULL << 42) +#define RISCV_HWPROBE_EXT_ZCA (1ULL << 43) +#define RISCV_HWPROBE_EXT_ZCB (1ULL << 44) +#define RISCV_HWPROBE_EXT_ZCD (1ULL << 45) +#define RISCV_HWPROBE_EXT_ZCF (1ULL << 46) +#define RISCV_HWPROBE_EXT_ZCMOP (1ULL << 47) +#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) +#define RISCV_HWPROBE_EXT_SUPM (1ULL << 49) +#define RISCV_HWPROBE_EXT_ZICNTR (1ULL << 50) +#define RISCV_HWPROBE_EXT_ZIHPM (1ULL << 51) +#define RISCV_HWPROBE_EXT_ZFBFMIN (1ULL << 52) +#define RISCV_HWPROBE_EXT_ZVFBFMIN (1ULL << 53) +#define RISCV_HWPROBE_EXT_ZVFBFWMA (1ULL << 54) +#define RISCV_HWPROBE_EXT_ZICBOM (1ULL << 55) +#define RISCV_HWPROBE_EXT_ZAAMO (1ULL << 56) +#define RISCV_HWPROBE_EXT_ZALRSC (1ULL << 57) +#define RISCV_HWPROBE_EXT_ZABHA (1ULL << 58) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) @@ -9002,6 +9159,22 @@ static int do_getdents64(abi_long dirfd, abi_long arg2, abi_long count) #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0) #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 +#define RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS 7 +#define RISCV_HWPROBE_KEY_TIME_CSR_FREQ 8 +#define RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF 9 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN 0 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED 1 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW 2 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_FAST 3 +#define RISCV_HWPROBE_MISALIGNED_SCALAR_UNSUPPORTED 4 +#define RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF 10 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN 0 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW 2 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_FAST 3 +#define RISCV_HWPROBE_MISALIGNED_VECTOR_UNSUPPORTED 4 +#define RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0 11 +#define RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE 12 +#define RISCV_HWPROBE_KEY_VENDOR_EXT_SIFIVE_0 13 struct riscv_hwprobe { abi_llong key; @@ -9110,6 +9283,52 @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env, RISCV_HWPROBE_EXT_ZACAS : 0; value |= cfg->ext_zicond ? RISCV_HWPROBE_EXT_ZICOND : 0; + value |= cfg->ext_zihintpause ? + RISCV_HWPROBE_EXT_ZIHINTPAUSE : 0; + value |= cfg->ext_zve32x ? + RISCV_HWPROBE_EXT_ZVE32X : 0; + value |= cfg->ext_zve32f ? + RISCV_HWPROBE_EXT_ZVE32F : 0; + value |= cfg->ext_zve64x ? + RISCV_HWPROBE_EXT_ZVE64X : 0; + value |= cfg->ext_zve64f ? + RISCV_HWPROBE_EXT_ZVE64F : 0; + value |= cfg->ext_zve64d ? + RISCV_HWPROBE_EXT_ZVE64D : 0; + value |= cfg->ext_zimop ? + RISCV_HWPROBE_EXT_ZIMOP : 0; + value |= cfg->ext_zca ? + RISCV_HWPROBE_EXT_ZCA : 0; + value |= cfg->ext_zcb ? + RISCV_HWPROBE_EXT_ZCB : 0; + value |= cfg->ext_zcd ? + RISCV_HWPROBE_EXT_ZCD : 0; + value |= cfg->ext_zcf ? + RISCV_HWPROBE_EXT_ZCF : 0; + value |= cfg->ext_zcmop ? + RISCV_HWPROBE_EXT_ZCMOP : 0; + value |= cfg->ext_zawrs ? + RISCV_HWPROBE_EXT_ZAWRS : 0; + value |= cfg->ext_supm ? + RISCV_HWPROBE_EXT_SUPM : 0; + value |= cfg->ext_zicntr ? + RISCV_HWPROBE_EXT_ZICNTR : 0; + value |= cfg->ext_zihpm ? + RISCV_HWPROBE_EXT_ZIHPM : 0; + value |= cfg->ext_zfbfmin ? + RISCV_HWPROBE_EXT_ZFBFMIN : 0; + value |= cfg->ext_zvfbfmin ? + RISCV_HWPROBE_EXT_ZVFBFMIN : 0; + value |= cfg->ext_zvfbfwma ? + RISCV_HWPROBE_EXT_ZVFBFWMA : 0; + value |= cfg->ext_zicbom ? + RISCV_HWPROBE_EXT_ZICBOM : 0; + value |= cfg->ext_zaamo ? + RISCV_HWPROBE_EXT_ZAAMO : 0; + value |= cfg->ext_zalrsc ? + RISCV_HWPROBE_EXT_ZALRSC : 0; + value |= cfg->ext_zabha ? + RISCV_HWPROBE_EXT_ZABHA : 0; __put_user(value, &pair->value); break; case RISCV_HWPROBE_KEY_CPUPERF_0: @@ -9119,6 +9338,10 @@ static void risc_hwprobe_fill_pairs(CPURISCVState *env, value = cfg->ext_zicboz ? cfg->cboz_blocksize : 0; __put_user(value, &pair->value); break; + case RISCV_HWPROBE_KEY_ZICBOM_BLOCK_SIZE: + value = cfg->ext_zicbom ? cfg->cbom_blocksize : 0; + __put_user(value, &pair->value); + break; default: __put_user(-1, &pair->key); break; @@ -9260,6 +9483,12 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, FUTEX_WAKE, INT_MAX, NULL, NULL, 0); } +#ifdef TARGET_AARCH64 + if (ts->gcs_base) { + target_munmap(ts->gcs_base, ts->gcs_size); + } +#endif + object_unparent(OBJECT(cpu)); object_unref(OBJECT(cpu)); /* @@ -13890,6 +14119,11 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return do_riscv_hwprobe(cpu_env, arg1, arg2, arg3, arg4, arg5); #endif +#ifdef TARGET_AARCH64 + case TARGET_NR_map_shadow_stack: + return do_map_shadow_stack(cpu_env, arg1, arg2, arg3); +#endif + default: qemu_log_mask(LOG_UNIMP, "Unsupported syscall: %d\n", num); return -TARGET_ENOSYS; @@ -13897,12 +14131,46 @@ static abi_long do_syscall1(CPUArchState *cpu_env, int num, abi_long arg1, return ret; } +static bool sys_dispatch(CPUState *cpu, TaskState *ts) +{ + abi_ptr pc; + + if (likely(ts->sys_dispatch_len == -1)) { + return false; + } + + pc = cpu->cc->get_pc(cpu); + if (likely(pc - ts->sys_dispatch < ts->sys_dispatch_len)) { + return false; + } + if (unlikely(is_vdso_sigreturn(pc))) { + return false; + } + if (likely(ts->sys_dispatch_selector)) { + uint8_t sb; + if (get_user_u8(sb, ts->sys_dispatch_selector)) { + force_sig(TARGET_SIGSEGV); + return true; + } + if (likely(sb == SYSCALL_DISPATCH_FILTER_ALLOW)) { + return false; + } + if (unlikely(sb != SYSCALL_DISPATCH_FILTER_BLOCK)) { + force_sig(TARGET_SIGSYS); + return true; + } + } + force_sig_fault(TARGET_SIGSYS, TARGET_SYS_USER_DISPATCH, pc); + return true; +} + abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8) { CPUState *cpu = env_cpu(cpu_env); + TaskState *ts = get_task_state(cpu); abi_long ret; #ifdef DEBUG_ERESTARTSYS @@ -13919,6 +14187,10 @@ abi_long do_syscall(CPUArchState *cpu_env, int num, abi_long arg1, } #endif + if (sys_dispatch(cpu, ts)) { + return -QEMU_ESIGRETURN; + } + record_syscall_start(cpu, num, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8); diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index df26a2d28f255..cd9ff709b876b 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -689,6 +689,12 @@ typedef struct target_siginfo { #define TARGET_TRAP_HWBKPT (4) /* hardware breakpoint/watchpoint */ #define TARGET_TRAP_UNK (5) /* undiagnosed trap */ +/* + * SIGSYS si_codes + */ +#define TARGET_SYS_SECCOMP (1) /* seccomp triggered */ +#define TARGET_SYS_USER_DISPATCH (2) /* syscall user dispatch triggered */ + /* * SIGEMT si_codes */ diff --git a/linux-user/user-internals.h b/linux-user/user-internals.h index 691b9a1775f1e..7099349ec8190 100644 --- a/linux-user/user-internals.h +++ b/linux-user/user-internals.h @@ -20,6 +20,8 @@ #include "user/thunk.h" #include "qemu/log.h" +#include "exec/tb-flush.h" +#include "exec/translation-block.h" extern char *exec_path; void init_task_state(TaskState *ts); @@ -172,6 +174,20 @@ static inline int regpairs_aligned(CPUArchState *cpu_env, int num) { return 0; } */ void preexit_cleanup(CPUArchState *env, int code); +/** + * begin_parallel_context + * @cs: the CPU context + * + * Called when starting the second vcpu, or joining shared memory. + */ +static inline void begin_parallel_context(CPUState *cs) +{ + if (!tcg_cflags_has(cs, CF_PARALLEL)) { + tb_flush__exclusive_or_serial(); + tcg_cflags_set(cs, CF_PARALLEL); + } +} + /* * Include target-specific struct and function definitions; * they may need access to the target-independent structures diff --git a/linux-user/x86_64/elfload.c b/linux-user/x86_64/elfload.c new file mode 100644 index 0000000000000..5914f76e83353 --- /dev/null +++ b/linux-user/x86_64/elfload.c @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu.h" +#include "loader.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return "max"; +} + +abi_ulong get_elf_hwcap(CPUState *cs) +{ + return cpu_env(cs)->features[FEAT_1_EDX]; +} + +const char *get_elf_platform(CPUState *cs) +{ + return "x86_64"; +} + +bool init_guest_commpage(void) +{ + /* + * The vsyscall page is at a high negative address aka kernel space, + * which means that we cannot actually allocate it with target_mmap. + * We still should be able to use page_set_flags, unless the user + * has specified -R reserved_va, which would trigger an assert(). + */ + if (reserved_va != 0 && + TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { + error_report("Cannot allocate vsyscall page"); + exit(EXIT_FAILURE); + } + page_set_flags(TARGET_VSYSCALL_PAGE, + TARGET_VSYSCALL_PAGE | ~TARGET_PAGE_MASK, + PAGE_EXEC | PAGE_VALID, PAGE_VALID); + return true; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUX86State *env) +{ + r->pt.r15 = tswapal(env->regs[15]); + r->pt.r14 = tswapal(env->regs[14]); + r->pt.r13 = tswapal(env->regs[13]); + r->pt.r12 = tswapal(env->regs[12]); + r->pt.bp = tswapal(env->regs[R_EBP]); + r->pt.bx = tswapal(env->regs[R_EBX]); + r->pt.r11 = tswapal(env->regs[11]); + r->pt.r10 = tswapal(env->regs[10]); + r->pt.r9 = tswapal(env->regs[9]); + r->pt.r8 = tswapal(env->regs[8]); + r->pt.ax = tswapal(env->regs[R_EAX]); + r->pt.cx = tswapal(env->regs[R_ECX]); + r->pt.dx = tswapal(env->regs[R_EDX]); + r->pt.si = tswapal(env->regs[R_ESI]); + r->pt.di = tswapal(env->regs[R_EDI]); + r->pt.orig_ax = tswapal(get_task_state(env_cpu_const(env))->orig_ax); + r->pt.ip = tswapal(env->eip); + r->pt.cs = tswapal(env->segs[R_CS].selector & 0xffff); + r->pt.flags = tswapal(env->eflags); + r->pt.sp = tswapal(env->regs[R_ESP]); + r->pt.ss = tswapal(env->segs[R_SS].selector & 0xffff); + r->pt.fs_base = tswapal(env->segs[R_FS].base); + r->pt.gs_base = tswapal(env->segs[R_GS].base); + r->pt.ds = tswapal(env->segs[R_DS].selector & 0xffff); + r->pt.es = tswapal(env->segs[R_ES].selector & 0xffff); + r->pt.fs = tswapal(env->segs[R_FS].selector & 0xffff); + r->pt.gs = tswapal(env->segs[R_GS].selector & 0xffff); +} diff --git a/linux-user/x86_64/target_elf.h b/linux-user/x86_64/target_elf.h index 3f628f8d66197..840bddf5ec60f 100644 --- a/linux-user/x86_64/target_elf.h +++ b/linux-user/x86_64/target_elf.h @@ -7,8 +7,24 @@ #ifndef X86_64_TARGET_ELF_H #define X86_64_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return "max"; -} + +#include "target_ptrace.h" + +#define ELF_CLASS ELFCLASS64 +#define ELF_MACHINE EM_X86_64 +#define VDSO_HEADER "vdso.c.inc" + +#define HAVE_ELF_HWCAP 1 +#define HAVE_ELF_PLATFORM 1 +#define HAVE_ELF_CORE_DUMP 1 +#define HAVE_GUEST_COMMPAGE 1 + +/* + * See linux kernel: arch/x86/include/asm/elf.h, where + * elf_gregset_t is mapped to struct user_regs_struct via sizeof. + */ +typedef struct target_elf_gregset_t { + struct target_user_regs_struct pt; +} target_elf_gregset_t; + #endif diff --git a/linux-user/x86_64/target_ptrace.h b/linux-user/x86_64/target_ptrace.h new file mode 100644 index 0000000000000..33527127cb200 --- /dev/null +++ b/linux-user/x86_64/target_ptrace.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef X86_64_TARGET_PTRACE_H +#define X86_64_TARGET_PTRACE_H + +/* + * The struct pt_regs in arch/x86/include/uapi/asm/ptrace.h has missing + * register values and is not used. See arch/x86/include/asm/user_64.h. + */ +struct target_user_regs_struct { + abi_ulong r15; + abi_ulong r14; + abi_ulong r13; + abi_ulong r12; + abi_ulong bp; + abi_ulong bx; + abi_ulong r11; + abi_ulong r10; + abi_ulong r9; + abi_ulong r8; + abi_ulong ax; + abi_ulong cx; + abi_ulong dx; + abi_ulong si; + abi_ulong di; + abi_ulong orig_ax; + abi_ulong ip; + abi_ulong cs; + abi_ulong flags; + abi_ulong sp; + abi_ulong ss; + abi_ulong fs_base; + abi_ulong gs_base; + abi_ulong ds; + abi_ulong es; + abi_ulong fs; + abi_ulong gs; +}; + +#endif /* X86_64_TARGET_PTRACE_H */ diff --git a/linux-user/x86_64/target_syscall.h b/linux-user/x86_64/target_syscall.h index fb558345d30bc..68f55f8e7b4f6 100644 --- a/linux-user/x86_64/target_syscall.h +++ b/linux-user/x86_64/target_syscall.h @@ -4,34 +4,6 @@ #define __USER_CS (0x33) #define __USER_DS (0x2B) -struct target_pt_regs { - abi_ulong r15; - abi_ulong r14; - abi_ulong r13; - abi_ulong r12; - abi_ulong rbp; - abi_ulong rbx; -/* arguments: non interrupts/non tracing syscalls only save up to here */ - abi_ulong r11; - abi_ulong r10; - abi_ulong r9; - abi_ulong r8; - abi_ulong rax; - abi_ulong rcx; - abi_ulong rdx; - abi_ulong rsi; - abi_ulong rdi; - abi_ulong orig_rax; -/* end of arguments */ -/* cpu exception frame or undefined */ - abi_ulong rip; - abi_ulong cs; - abi_ulong eflags; - abi_ulong rsp; - abi_ulong ss; -/* top of stack page */ -}; - /* Maximum number of LDT entries supported. */ #define TARGET_LDT_ENTRIES 8192 /* The size of each LDT entry. */ diff --git a/linux-user/xtensa/cpu_loop.c b/linux-user/xtensa/cpu_loop.c index c0fcf743e7013..a0ff10eff82f2 100644 --- a/linux-user/xtensa/cpu_loop.c +++ b/linux-user/xtensa/cpu_loop.c @@ -133,7 +133,7 @@ void cpu_loop(CPUXtensaState *env) cpu_exec_start(cs); trapnr = cpu_exec(cs); cpu_exec_end(cs); - process_queued_cpu_work(cs); + qemu_process_cpu_events(cs); env->sregs[PS] &= ~PS_EXCM; switch (trapnr) { @@ -238,12 +238,22 @@ void cpu_loop(CPUXtensaState *env) } } -void target_cpu_copy_regs(CPUArchState *env, target_pt_regs *regs) +void init_main_thread(CPUState *cs, struct image_info *info) { - int i; - for (i = 0; i < 16; ++i) { - env->regs[i] = regs->areg[i]; + CPUArchState *env = cpu_env(cs); + + env->sregs[WINDOW_BASE] = 0; + env->sregs[WINDOW_START] = 1; + env->regs[1] = info->start_stack; + env->pc = info->entry; + + if (info_is_fdpic(info)) { + env->regs[4] = info->loadmap_addr; + env->regs[5] = info->interpreter_loadmap_addr; + if (info->interpreter_loadmap_addr) { + env->regs[6] = info->interpreter_pt_dynamic_addr; + } else { + env->regs[6] = info->pt_dynamic_addr; + } } - env->sregs[WINDOW_START] = regs->windowstart; - env->pc = regs->pc; } diff --git a/linux-user/xtensa/elfload.c b/linux-user/xtensa/elfload.c new file mode 100644 index 0000000000000..68aeed855f991 --- /dev/null +++ b/linux-user/xtensa/elfload.c @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "loader.h" +#include "target_elf.h" + + +const char *get_elf_cpu_model(uint32_t eflags) +{ + return XTENSA_DEFAULT_CPU_MODEL; +} + +void elf_core_copy_regs(target_elf_gregset_t *r, const CPUXtensaState *env) +{ + r->pt.pc = tswap32(env->pc); + r->pt.ps = tswap32(env->sregs[PS] & ~PS_EXCM); + r->pt.lbeg = tswap32(env->sregs[LBEG]); + r->pt.lend = tswap32(env->sregs[LEND]); + r->pt.lcount = tswap32(env->sregs[LCOUNT]); + r->pt.sar = tswap32(env->sregs[SAR]); + r->pt.windowstart = tswap32(env->sregs[WINDOW_START]); + r->pt.windowbase = tswap32(env->sregs[WINDOW_BASE]); + r->pt.threadptr = tswap32(env->uregs[THREADPTR]); + + xtensa_sync_phys_from_window((CPUXtensaState *)env); + + for (unsigned i = 0; i < env->config->nareg; ++i) { + r->pt.a[i] = tswap32(env->phys_regs[i]); + } +} diff --git a/linux-user/xtensa/target_elf.h b/linux-user/xtensa/target_elf.h index a9a3fabd89bea..1bf8f2a14a4da 100644 --- a/linux-user/xtensa/target_elf.h +++ b/linux-user/xtensa/target_elf.h @@ -8,9 +8,19 @@ #ifndef XTENSA_TARGET_ELF_H #define XTENSA_TARGET_ELF_H -static inline const char *cpu_get_model(uint32_t eflags) -{ - return XTENSA_DEFAULT_CPU_MODEL; -} +#include "target_ptrace.h" + +#define ELF_CLASS ELFCLASS32 +#define ELF_MACHINE EM_XTENSA + +#define HAVE_ELF_CORE_DUMP 1 + +/* + * See linux kernel: arch/xtensa/include/asm/elf.h, where elf_gregset_t + * is mapped to struct user_pt_regs via typedef and sizeof. + */ +typedef struct target_elf_gregset_t { + struct target_user_pt_regs pt; +} target_elf_gregset_t; #endif diff --git a/linux-user/xtensa/target_ptrace.h b/linux-user/xtensa/target_ptrace.h new file mode 100644 index 0000000000000..32443d0deeced --- /dev/null +++ b/linux-user/xtensa/target_ptrace.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef XTENSA_TARGET_PTRACE_H +#define XTENSA_TARGET_PTRACE_H + +/* See arch/xtensa/include/uapi/asm/ptrace.h. */ +struct target_user_pt_regs { + uint32_t pc; + uint32_t ps; + uint32_t lbeg; + uint32_t lend; + uint32_t lcount; + uint32_t sar; + uint32_t windowstart; + uint32_t windowbase; + uint32_t threadptr; + uint32_t syscall; + uint32_t reserved[6 + 48]; + uint32_t a[64]; +}; + +#endif /* XTENSA_TARGET_PTRACE_H */ diff --git a/linux-user/xtensa/target_syscall.h b/linux-user/xtensa/target_syscall.h index afc86a153fc04..5d4352a4d1931 100644 --- a/linux-user/xtensa/target_syscall.h +++ b/linux-user/xtensa/target_syscall.h @@ -8,41 +8,6 @@ #define MMAP_SHIFT TARGET_PAGE_BITS -typedef uint32_t xtensa_reg_t; -typedef struct { -} xtregs_opt_t; /* TODO */ - -struct target_pt_regs { - xtensa_reg_t pc; /* 4 */ - xtensa_reg_t ps; /* 8 */ - xtensa_reg_t depc; /* 12 */ - xtensa_reg_t exccause; /* 16 */ - xtensa_reg_t excvaddr; /* 20 */ - xtensa_reg_t debugcause; /* 24 */ - xtensa_reg_t wmask; /* 28 */ - xtensa_reg_t lbeg; /* 32 */ - xtensa_reg_t lend; /* 36 */ - xtensa_reg_t lcount; /* 40 */ - xtensa_reg_t sar; /* 44 */ - xtensa_reg_t windowbase; /* 48 */ - xtensa_reg_t windowstart; /* 52 */ - xtensa_reg_t syscall; /* 56 */ - xtensa_reg_t icountlevel; /* 60 */ - xtensa_reg_t scompare1; /* 64 */ - xtensa_reg_t threadptr; /* 68 */ - - /* Additional configurable registers that are used by the compiler. */ - xtregs_opt_t xtregs_opt; - - /* Make sure the areg field is 16 bytes aligned. */ - int align[0] __attribute__ ((aligned(16))); - - /* current register frame. - * Note: The ESF for kernel exceptions ends after 16 registers! - */ - xtensa_reg_t areg[16]; -}; - #define TARGET_MCL_CURRENT 1 #define TARGET_MCL_FUTURE 2 #define TARGET_MCL_ONFAULT 4 diff --git a/meson.build b/meson.build index c2bc3eeedce91..c5710a6a47c2b 100644 --- a/meson.build +++ b/meson.build @@ -94,12 +94,12 @@ have_rust = have_rust and add_languages('rust', native: true, required: get_option('rust').disable_auto_if(not have_system)) if have_rust rustc = meson.get_compiler('rust') - if rustc.version().version_compare('<1.77.0') + if rustc.version().version_compare('<1.83.0') if get_option('rust').enabled() - error('rustc version ' + rustc.version() + ' is unsupported. Please upgrade to at least 1.77.0') + error('rustc version ' + rustc.version() + ' is unsupported. Please upgrade to at least 1.83.0') else warning('rustc version ' + rustc.version() + ' is unsupported, disabling Rust compilation.') - message('Please upgrade to at least 1.77.0 to use Rust.') + message('Please upgrade to at least 1.83.0 to use Rust.') have_rust = false endif endif @@ -295,8 +295,6 @@ elif cpu == 'ppc' kvm_targets = ['ppc-softmmu'] elif cpu == 'ppc64' kvm_targets = ['ppc-softmmu', 'ppc64-softmmu'] -elif cpu == 'mips' - kvm_targets = ['mips-softmmu', 'mipsel-softmmu'] elif cpu == 'mips64' kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu'] elif cpu == 'riscv32' @@ -334,6 +332,7 @@ elif cpu == 'x86_64' 'CONFIG_HVF': ['x86_64-softmmu'], 'CONFIG_NVMM': ['i386-softmmu', 'x86_64-softmmu'], 'CONFIG_WHPX': ['i386-softmmu', 'x86_64-softmmu'], + 'CONFIG_MSHV': ['x86_64-softmmu'], } endif @@ -681,6 +680,9 @@ if get_option('cfi') error('-fsanitize-cfi-icall-generalize-pointers is not supported by the compiler') endif if get_option('cfi_debug') + if get_option('safe_stack') + error('cfi_debug is not compatible with safe_stack') + endif if cc.compiles('int main () { return 0; }', name: '-fno-sanitize-trap=cfi-icall', args: ['-flto', '-fsanitize=cfi-icall', @@ -709,11 +711,7 @@ hardening_flags = [ # # NB: Clang 17 is broken and SEGVs # https://github.com/llvm/llvm-project/issues/75168 -# -# NB2: This clashes with the "retguard" extension of OpenBSD's Clang -# https://gitlab.com/qemu-project/qemu/-/issues/2278 -if host_os != 'openbsd' and \ - cc.compiles('extern struct { void (*cb)(void); } s; void f(void) { s.cb(); }', +if cc.compiles('extern struct { void (*cb)(void); } s; void f(void) { s.cb(); }', name: '-fzero-call-used-regs=used-gpr', args: ['-O2', '-fzero-call-used-regs=used-gpr']) hardening_flags += '-fzero-call-used-regs=used-gpr' @@ -884,6 +882,14 @@ accelerators = [] if get_option('kvm').allowed() and host_os == 'linux' accelerators += 'CONFIG_KVM' endif + +if get_option('mshv').allowed() and host_os == 'linux' + if get_option('mshv').enabled() and host_machine.cpu() != 'x86_64' + error('mshv accelerator requires x64_64 host') + endif + accelerators += 'CONFIG_MSHV' +endif + if get_option('whpx').allowed() and host_os == 'windows' if get_option('whpx').enabled() and host_machine.cpu() != 'x86_64' error('WHPX requires 64-bit host') @@ -953,6 +959,9 @@ endif if 'CONFIG_WHPX' not in accelerators and get_option('whpx').enabled() error('WHPX not available on this platform') endif +if 'CONFIG_MSHV' not in accelerators and get_option('mshv').enabled() + error('mshv not available on this platform') +endif xen = not_found if get_option('xen').enabled() or (get_option('xen').auto() and have_system) @@ -1090,9 +1099,6 @@ glib = declare_dependency(dependencies: [glib_pc, gmodule], # TODO: remove this check and the corresponding workaround (qtree) when # the minimum supported glib is >= 2.75.3 glib_has_gslice = glib.version().version_compare('<2.75.3') -# Check whether glib has the aligned_alloc family of functions. -# -glib_has_aligned_alloc = glib.version().version_compare('>=2.72.0') # override glib dep to include the above refinements meson.override_dependency('glib-2.0', glib) @@ -1287,6 +1293,7 @@ endif enable_passt = get_option('passt') \ .require(host_os == 'linux', error_message: 'passt is supported only on Linux') \ + .require(gio.found(), error_message: 'passt requires gio') \ .allowed() vde = not_found @@ -1356,7 +1363,13 @@ if get_option('spice') \ endif spice_headers = spice.partial_dependency(compile_args: true, includes: true) -rt = cc.find_library('rt', required: false) +rt = not_found +if host_os != 'windows' + have_shm_open = cc.has_function('shm_open') + if not have_shm_open + rt = cc.find_library('rt', required: true) + endif +endif libiscsi = not_found if not get_option('libiscsi').auto() or have_block @@ -1586,9 +1599,11 @@ if not get_option('brlapi').auto() or have_system brlapi = cc.find_library('brlapi', has_headers: ['brlapi.h'], required: get_option('brlapi')) if brlapi.found() and not cc.links(''' - #include - #include - int main(void) { return brlapi__openConnection (NULL, NULL, NULL); }''', dependencies: brlapi) + #include + #include + int main(void) { + return brlapi__openConnection(NULL, NULL, NULL) == BRLAPI_INVALID_FILE_DESCRIPTOR; + }''', dependencies: brlapi) brlapi = not_found if get_option('brlapi').enabled() error('could not link brlapi') @@ -1809,6 +1824,7 @@ endif gnutls = not_found gnutls_crypto = not_found +gnutls_bug1717_workaround = false if get_option('gnutls').enabled() or (get_option('gnutls').auto() and have_system) # For general TLS support our min gnutls matches # that implied by our platform support matrix @@ -1834,6 +1850,12 @@ if get_option('gnutls').enabled() or (get_option('gnutls').auto() and have_syste method: 'pkg-config', required: get_option('gnutls')) endif + + #if gnutls.found() and not get_option('gnutls-bug1717-workaround').disabled() + # XXX: when bug 1717 is resolved, add logic to probe for + # the GNUTLS fixed version number to handle the 'auto' case + # gnutls_bug1717_workaround = true + #endif endif # We prefer use of gnutls for crypto, unless the options @@ -2382,11 +2404,11 @@ dbus_display = get_option('dbus_display') \ .allowed() have_virtfs = get_option('virtfs') \ - .require(host_os == 'linux' or host_os == 'darwin', - error_message: 'virtio-9p (virtfs) requires Linux or macOS') \ - .require(host_os == 'linux' or cc.has_function('pthread_fchdir_np'), + .require(host_os == 'linux' or host_os == 'darwin' or host_os == 'freebsd', + error_message: 'virtio-9p (virtfs) requires Linux or macOS or FreeBSD') \ + .require(host_os != 'darwin' or cc.has_function('pthread_fchdir_np'), error_message: 'virtio-9p (virtfs) on macOS requires the presence of pthread_fchdir_np') \ - .require(host_os == 'darwin' or libattr.found(), + .require(host_os != 'linux' or libattr.found(), error_message: 'virtio-9p (virtfs) on Linux requires libattr-devel') \ .disable_auto_if(not have_tools and not have_system) \ .allowed() @@ -2585,6 +2607,7 @@ config_host_data.set('CONFIG_KEYUTILS', keyutils.found()) config_host_data.set('CONFIG_GETTID', has_gettid) config_host_data.set('CONFIG_GNUTLS', gnutls.found()) config_host_data.set('CONFIG_GNUTLS_CRYPTO', gnutls_crypto.found()) +config_host_data.set('CONFIG_GNUTLS_BUG1717_WORKAROUND', gnutls_bug1717_workaround) config_host_data.set('CONFIG_TASN1', tasn1.found()) config_host_data.set('CONFIG_GCRYPT', gcrypt.found()) config_host_data.set('CONFIG_NETTLE', nettle.found()) @@ -2696,7 +2719,6 @@ config_host_data.set('CONFIG_GETLOADAVG', cc.has_function('getloadavg')) config_host_data.set('HAVE_COPY_FILE_RANGE', cc.has_function('copy_file_range')) config_host_data.set('HAVE_GETIFADDRS', cc.has_function('getifaddrs')) config_host_data.set('HAVE_GLIB_WITH_SLICE_ALLOCATOR', glib_has_gslice) -config_host_data.set('HAVE_GLIB_WITH_ALIGNED_ALLOC', glib_has_aligned_alloc) config_host_data.set('HAVE_OPENPTY', cc.has_function('openpty', dependencies: util)) config_host_data.set('HAVE_STRCHRNUL', cc.has_function('strchrnul', prefix: osdep_prefix)) config_host_data.set('HAVE_SYSTEM_FUNCTION', cc.has_function('system', prefix: '#include ')) @@ -3644,6 +3666,7 @@ if have_system trace_events_subdirs += [ 'accel/hvf', 'accel/kvm', + 'accel/mshv', 'audio', 'backends', 'backends/tpm', @@ -3809,14 +3832,10 @@ util_ss = util_ss.apply({}) libqemuutil = static_library('qemuutil', build_by_default: false, sources: util_ss.sources() + stub_ss.sources() + genh, - dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc]) -qemuutil_deps = [event_loop_base] -if host_os != 'windows' - qemuutil_deps += [rt] -endif + dependencies: [util_ss.dependencies(), libm, threads, glib, socket, malloc, rt]) qemuutil = declare_dependency(link_with: libqemuutil, sources: genh + version_res, - dependencies: qemuutil_deps) + dependencies: [event_loop_base]) if have_system or have_user decodetree = generator(find_program('scripts/decodetree.py'), @@ -3866,7 +3885,8 @@ if have_block endif common_ss.add(files('cpu-common.c')) -specific_ss.add(files('cpu-target.c')) +user_ss.add(files('cpu-target.c')) +system_ss.add(files('cpu-target.c')) subdir('system') @@ -3888,7 +3908,7 @@ if get_option('b_lto') pagevary = declare_dependency(link_with: pagevary) endif common_ss.add(pagevary) -specific_ss.add(files('page-target.c', 'page-vary-target.c')) +specific_ss.add(files('page-vary-target.c')) common_ss.add(files('target-info.c')) specific_ss.add(files('target-info-stub.c')) @@ -4229,8 +4249,8 @@ if have_rust '--no-layout-tests', '--no-prepend-enum-name', '--allowlist-file', meson.project_source_root() + '/include/.*', - '--allowlist-file', meson.project_source_root() + '/.*', - '--allowlist-file', meson.project_build_root() + '/.*' + '--allowlist-file', meson.project_build_root() + '/.*', + '--blocklist-file', glib_pc.get_variable('includedir') + '/glib-2.0/.*', ] if not rustfmt.found() if bindgen.version().version_compare('<0.65.0') @@ -4248,6 +4268,10 @@ if have_rust else bindgen_args_common += ['--merge-extern-blocks'] endif + bindgen_c_args = [] + if host_arch == 'wasm32' + bindgen_c_args += ['-fvisibility=default'] + endif subdir('rust') endif @@ -4317,7 +4341,11 @@ foreach target : target_dirs ) if 'CONFIG_LINUX_USER' in config_target dir = base_dir / abi - arch_srcs += files(dir / 'signal.c', dir / 'cpu_loop.c') + arch_srcs += files( + dir / 'cpu_loop.c', + dir / 'elfload.c', + dir / 'signal.c', + ) if config_target.has_key('TARGET_SYSTBL_ABI') arch_srcs += \ syscall_nr_generators[abi].process(base_dir / abi / config_target['TARGET_SYSTBL'], @@ -4810,6 +4838,7 @@ if have_system summary_info += {'HVF support': config_all_accel.has_key('CONFIG_HVF')} summary_info += {'WHPX support': config_all_accel.has_key('CONFIG_WHPX')} summary_info += {'NVMM support': config_all_accel.has_key('CONFIG_NVMM')} + summary_info += {'MSHV support': config_all_accel.has_key('CONFIG_MSHV')} summary_info += {'Xen support': xen.found()} if xen.found() summary_info += {'xen ctrl version': xen.version()} @@ -4869,6 +4898,7 @@ summary_info += {'TLS priority': get_option('tls_priority')} summary_info += {'GNUTLS support': gnutls} if gnutls.found() summary_info += {' GNUTLS crypto': gnutls_crypto.found()} + summary_info += {' GNUTLS bug 1717 workaround': gnutls_bug1717_workaround } endif summary_info += {'libgcrypt': gcrypt} summary_info += {'nettle': nettle} @@ -5027,6 +5057,14 @@ elif host_long_bits < 64 message() message('Support for 32-bit CPU host architecture ' + cpu + ' is going') message('to be dropped in a future QEMU release.') +elif host_arch == 'mips' + message() + warning('DEPRECATED HOST CPU') + message() + message('Support for CPU host architecture ' + cpu + ' is going to be') + message('dropped as soon as the QEMU project stops supporting Debian 12') + message('("Bookworm"). Going forward, the QEMU project will not guarantee') + message('that QEMU will compile or work on this host CPU.') endif if not supported_oses.contains(host_os) diff --git a/meson_options.txt b/meson_options.txt index fff1521e580de..2836156257af6 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -36,6 +36,8 @@ option('trace_file', type: 'string', value: 'trace', option('coroutine_backend', type: 'combo', choices: ['ucontext', 'sigaltstack', 'windows', 'wasm', 'auto'], value: 'auto', description: 'coroutine backend to use') +option('gdb', type: 'string', value: '', + description: 'Path to GDB') # Everything else can be set via --enable/--disable-* option # on the configure script command line. After adding an option @@ -71,6 +73,8 @@ option('malloc', type : 'combo', choices : ['system', 'tcmalloc', 'jemalloc'], option('kvm', type: 'feature', value: 'auto', description: 'KVM acceleration support') +option('mshv', type: 'feature', value: 'auto', + description: 'MSHV acceleration support') option('whpx', type: 'feature', value: 'auto', description: 'WHPX acceleration support') option('hvf', type: 'feature', value: 'auto', diff --git a/migration/colo.c b/migration/colo.c index e0f713c837f5d..db783f6fa7750 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -686,11 +686,10 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, bql_lock(); cpu_synchronize_all_states(); - ret = qemu_loadvm_state_main(mis->from_src_file, mis); + ret = qemu_loadvm_state_main(mis->from_src_file, mis, errp); bql_unlock(); if (ret < 0) { - error_setg(errp, "Load VM's live state (ram) error"); return; } @@ -729,9 +728,8 @@ static void colo_incoming_process_checkpoint(MigrationIncomingState *mis, bql_lock(); vmstate_loading = true; colo_flush_ram_cache(); - ret = qemu_load_device_state(fb); + ret = qemu_load_device_state(fb, errp); if (ret < 0) { - error_setg(errp, "COLO: load device state failed"); vmstate_loading = false; bql_unlock(); return; @@ -849,17 +847,16 @@ static void *colo_process_incoming_thread(void *opaque) failover_init_state(); mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); - if (!mis->to_src_file) { - error_report("COLO incoming thread: Open QEMUFile to_src_file failed"); - goto out; - } /* * Note: the communication between Primary side and Secondary side * should be sequential, we set the fd to unblocked in migration incoming * coroutine, and here we are in the COLO incoming thread, so it is ok to * set the fd back to blocked. */ - qemu_file_set_blocking(mis->from_src_file, true); + if (!qemu_file_set_blocking(mis->from_src_file, true, &local_err)) { + error_report_err(local_err); + goto out; + } colo_incoming_start_dirty_log(); diff --git a/migration/cpr-exec.c b/migration/cpr-exec.c new file mode 100644 index 0000000000000..d57714bc5da89 --- /dev/null +++ b/migration/cpr-exec.c @@ -0,0 +1,194 @@ +/* + * Copyright (c) 2021-2025 Oracle and/or its affiliates. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "qemu/memfd.h" +#include "qapi/error.h" +#include "qapi/type-helpers.h" +#include "io/channel-file.h" +#include "io/channel-socket.h" +#include "block/block-global-state.h" +#include "qemu/main-loop.h" +#include "migration/cpr.h" +#include "migration/qemu-file.h" +#include "migration/migration.h" +#include "migration/misc.h" +#include "migration/vmstate.h" +#include "system/runstate.h" +#include "trace.h" + +#define CPR_EXEC_STATE_NAME "QEMU_CPR_EXEC_STATE" + +static QEMUFile *qemu_file_new_fd_input(int fd, const char *name) +{ + g_autoptr(QIOChannelFile) fioc = qio_channel_file_new_fd(fd); + QIOChannel *ioc = QIO_CHANNEL(fioc); + qio_channel_set_name(ioc, name); + return qemu_file_new_input(ioc); +} + +static QEMUFile *qemu_file_new_fd_output(int fd, const char *name) +{ + g_autoptr(QIOChannelFile) fioc = qio_channel_file_new_fd(fd); + QIOChannel *ioc = QIO_CHANNEL(fioc); + qio_channel_set_name(ioc, name); + return qemu_file_new_output(ioc); +} + +void cpr_exec_persist_state(QEMUFile *f) +{ + QIOChannelFile *fioc = QIO_CHANNEL_FILE(qemu_file_get_ioc(f)); + int mfd = dup(fioc->fd); + char val[16]; + + /* Remember mfd in environment for post-exec load */ + qemu_clear_cloexec(mfd); + snprintf(val, sizeof(val), "%d", mfd); + g_setenv(CPR_EXEC_STATE_NAME, val, 1); +} + +static int cpr_exec_find_state(void) +{ + const char *val = g_getenv(CPR_EXEC_STATE_NAME); + int mfd; + + assert(val); + g_unsetenv(CPR_EXEC_STATE_NAME); + assert(!qemu_strtoi(val, NULL, 10, &mfd)); + return mfd; +} + +bool cpr_exec_has_state(void) +{ + return g_getenv(CPR_EXEC_STATE_NAME) != NULL; +} + +void cpr_exec_unpersist_state(void) +{ + int mfd; + const char *val = g_getenv(CPR_EXEC_STATE_NAME); + + g_unsetenv(CPR_EXEC_STATE_NAME); + assert(val); + assert(!qemu_strtoi(val, NULL, 10, &mfd)); + close(mfd); +} + +QEMUFile *cpr_exec_output(Error **errp) +{ + int mfd; + +#ifdef CONFIG_LINUX + mfd = qemu_memfd_create(CPR_EXEC_STATE_NAME, 0, false, 0, 0, errp); +#else + mfd = -1; +#endif + + if (mfd < 0) { + return NULL; + } + + return qemu_file_new_fd_output(mfd, CPR_EXEC_STATE_NAME); +} + +QEMUFile *cpr_exec_input(Error **errp) +{ + int mfd = cpr_exec_find_state(); + + lseek(mfd, 0, SEEK_SET); + return qemu_file_new_fd_input(mfd, CPR_EXEC_STATE_NAME); +} + +static bool preserve_fd(int fd) +{ + qemu_clear_cloexec(fd); + return true; +} + +static bool unpreserve_fd(int fd) +{ + qemu_set_cloexec(fd); + return true; +} + +static void cpr_exec_preserve_fds(void) +{ + cpr_walk_fd(preserve_fd); +} + +void cpr_exec_unpreserve_fds(void) +{ + cpr_walk_fd(unpreserve_fd); +} + +static void cpr_exec_cb(void *opaque) +{ + MigrationState *s = migrate_get_current(); + char **argv = strv_from_str_list(s->parameters.cpr_exec_command); + Error *err = NULL; + + /* + * Clear the close-on-exec flag for all preserved fd's. We cannot do so + * earlier because they should not persist across miscellaneous fork and + * exec calls that are performed during normal operation. + */ + cpr_exec_preserve_fds(); + + trace_cpr_exec(); + execvp(argv[0], argv); + + /* + * exec should only fail if argv[0] is bogus, or has a permissions problem, + * or the system is very short on resources. + */ + g_strfreev(argv); + cpr_exec_unpreserve_fds(); + + error_setg_errno(&err, errno, "execvp %s failed", argv[0]); + error_report_err(error_copy(err)); + migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED); + migrate_set_error(s, err); + + /* Note, we can go from state COMPLETED to FAILED */ + migration_call_notifiers(s, MIG_EVENT_PRECOPY_FAILED, NULL); + + err = NULL; + if (!migration_block_activate(&err)) { + /* error was already reported */ + error_free(err); + return; + } + + if (runstate_is_live(s->vm_old_state)) { + vm_start(); + } +} + +static int cpr_exec_notifier(NotifierWithReturn *notifier, MigrationEvent *e, + Error **errp) +{ + MigrationState *s = migrate_get_current(); + + if (e->type == MIG_EVENT_PRECOPY_DONE) { + QEMUBH *cpr_exec_bh = qemu_bh_new(cpr_exec_cb, NULL); + assert(s->state == MIGRATION_STATUS_COMPLETED); + qemu_bh_schedule(cpr_exec_bh); + qemu_notify_event(); + } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { + cpr_exec_unpersist_state(); + } + return 0; +} + +void cpr_exec_init(void) +{ + static NotifierWithReturn exec_notifier; + + migration_add_notifier_mode(&exec_notifier, cpr_exec_notifier, + MIG_MODE_CPR_EXEC); +} diff --git a/migration/cpr.c b/migration/cpr.c index 42ad0b0d500e5..22dbac7c728c3 100644 --- a/migration/cpr.c +++ b/migration/cpr.c @@ -6,7 +6,9 @@ */ #include "qemu/osdep.h" +#include "qemu/error-report.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "hw/vfio/vfio-device.h" #include "migration/cpr.h" #include "migration/misc.h" @@ -100,10 +102,10 @@ void cpr_resave_fd(const char *name, int id, int fd) if (old_fd < 0) { cpr_save_fd(name, id, fd); } else if (old_fd != fd) { - error_setg(&error_fatal, - "internal error: cpr fd '%s' id %d value %d " - "already saved with a different value %d", - name, id, fd, old_fd); + error_report("internal error: cpr fd '%s' id %d value %d " + "already saved with a different value %d", + name, id, fd, old_fd); + g_assert_not_reached(); } } @@ -121,6 +123,19 @@ int cpr_open_fd(const char *path, int flags, const char *name, int id, return fd; } +bool cpr_walk_fd(cpr_walk_fd_cb cb) +{ + CprFd *elem; + + QLIST_FOREACH(elem, &cpr_state.fds, next) { + g_assert(elem->fd >= 0); + if (!cb(elem->fd)) { + return false; + } + } + return true; +} + /*************************************************************************/ static const VMStateDescription vmstate_cpr_state = { .name = CPR_STATE, @@ -172,6 +187,8 @@ int cpr_state_save(MigrationChannel *channel, Error **errp) if (mode == MIG_MODE_CPR_TRANSFER) { g_assert(channel); f = cpr_transfer_output(channel, errp); + } else if (mode == MIG_MODE_CPR_EXEC) { + f = cpr_exec_output(errp); } else { return 0; } @@ -182,13 +199,16 @@ int cpr_state_save(MigrationChannel *channel, Error **errp) qemu_put_be32(f, QEMU_CPR_FILE_MAGIC); qemu_put_be32(f, QEMU_CPR_FILE_VERSION); - ret = vmstate_save_state(f, &vmstate_cpr_state, &cpr_state, 0); + ret = vmstate_save_state(f, &vmstate_cpr_state, &cpr_state, 0, errp); if (ret) { - error_setg(errp, "vmstate_save_state error %d", ret); qemu_fclose(f); return ret; } + if (migrate_mode() == MIG_MODE_CPR_EXEC) { + cpr_exec_persist_state(f); + } + /* * Close the socket only partially so we can later detect when the other * end closes by getting a HUP event. @@ -207,7 +227,13 @@ int cpr_state_load(MigrationChannel *channel, Error **errp) QEMUFile *f; MigMode mode = 0; - if (channel) { + if (cpr_exec_has_state()) { + mode = MIG_MODE_CPR_EXEC; + f = cpr_exec_input(errp); + if (channel) { + warn_report("ignoring cpr channel for migration mode cpr-exec"); + } + } else if (channel) { mode = MIG_MODE_CPR_TRANSFER; cpr_set_incoming_mode(mode); f = cpr_transfer_input(channel, errp); @@ -219,6 +245,7 @@ int cpr_state_load(MigrationChannel *channel, Error **errp) } trace_cpr_state_load(MigMode_str(mode)); + cpr_set_incoming_mode(mode); v = qemu_get_be32(f); if (v != QEMU_CPR_FILE_MAGIC) { @@ -233,13 +260,17 @@ int cpr_state_load(MigrationChannel *channel, Error **errp) return -ENOTSUP; } - ret = vmstate_load_state(f, &vmstate_cpr_state, &cpr_state, 1); + ret = vmstate_load_state(f, &vmstate_cpr_state, &cpr_state, 1, errp); if (ret) { - error_setg(errp, "vmstate_load_state error %d", ret); qemu_fclose(f); return ret; } + if (migrate_mode() == MIG_MODE_CPR_EXEC) { + /* Set cloexec to prevent fd leaks from fork until the next cpr-exec */ + cpr_exec_unpreserve_fds(); + } + /* * Let the caller decide when to close the socket (and generate a HUP event * for the sending side). @@ -260,7 +291,7 @@ void cpr_state_close(void) bool cpr_incoming_needed(void *opaque) { MigMode mode = migrate_mode(); - return mode == MIG_MODE_CPR_TRANSFER; + return mode == MIG_MODE_CPR_TRANSFER || mode == MIG_MODE_CPR_EXEC; } /* diff --git a/migration/meson.build b/migration/meson.build index 9aa48b290e2a5..16909d54c5110 100644 --- a/migration/meson.build +++ b/migration/meson.build @@ -16,6 +16,7 @@ system_ss.add(files( 'channel-block.c', 'cpr.c', 'cpr-transfer.c', + 'cpr-exec.c', 'cpu-throttle.c', 'dirtyrate.c', 'exec.c', @@ -31,6 +32,7 @@ system_ss.add(files( 'multifd-zero-page.c', 'options.c', 'postcopy-ram.c', + 'ram.c', 'savevm.c', 'socket.c', 'tls.c', @@ -48,7 +50,6 @@ system_ss.add(when: zstd, if_true: files('multifd-zstd.c')) system_ss.add(when: qpl, if_true: files('multifd-qpl.c')) system_ss.add(when: uadk, if_true: files('multifd-uadk.c')) system_ss.add(when: qatzip, if_true: files('multifd-qatzip.c')) - -specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', - if_true: files('ram.c', - 'target.c')) +system_ss.add(when: 'CONFIG_VFIO', + if_true: files('vfio.c'), + if_false: files('vfio-stub.c')) diff --git a/migration/migration-hmp-cmds.c b/migration/migration-hmp-cmds.c index cef5608210e83..847d18faaacd0 100644 --- a/migration/migration-hmp-cmds.c +++ b/migration/migration-hmp-cmds.c @@ -57,11 +57,9 @@ static const gchar *format_time_str(uint64_t us) const char *units[] = {"us", "ms", "sec"}; int index = 0; - while (us > 1000) { + while (us >= 1000 && index + 1 < ARRAY_SIZE(units)) { us /= 1000; - if (++index >= (sizeof(units) - 1)) { - break; - } + index++; } return g_strdup_printf("%"PRIu64" %s", us, units[index]); @@ -153,7 +151,9 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) if (info->has_status) { monitor_printf(mon, "Status: \t\t%s", MigrationStatus_str(info->status)); - if (info->status == MIGRATION_STATUS_FAILED && info->error_desc) { + if ((info->status == MIGRATION_STATUS_FAILED || + info->status == MIGRATION_STATUS_POSTCOPY_PAUSED) && + info->error_desc) { monitor_printf(mon, " (%s)\n", info->error_desc); } else { monitor_printf(mon, "\n"); @@ -306,6 +306,18 @@ void hmp_info_migrate_capabilities(Monitor *mon, const QDict *qdict) qapi_free_MigrationCapabilityStatusList(caps); } +static void monitor_print_cpr_exec_command(Monitor *mon, strList *args) +{ + monitor_printf(mon, "%s:", + MigrationParameter_str(MIGRATION_PARAMETER_CPR_EXEC_COMMAND)); + + while (args) { + monitor_printf(mon, " %s", args->value); + args = args->next; + } + monitor_printf(mon, "\n"); +} + void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) { MigrationParameters *params; @@ -353,6 +365,10 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) monitor_printf(mon, "%s: '%s'\n", MigrationParameter_str(MIGRATION_PARAMETER_TLS_HOSTNAME), params->tls_hostname); + assert(params->tls_authz); + monitor_printf(mon, "%s: '%s'\n", + MigrationParameter_str(MIGRATION_PARAMETER_TLS_AUTHZ), + params->tls_authz); assert(params->has_max_bandwidth); monitor_printf(mon, "%s: %" PRIu64 " bytes/second\n", MigrationParameter_str(MIGRATION_PARAMETER_MAX_BANDWIDTH), @@ -361,6 +377,10 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) monitor_printf(mon, "%s: %" PRIu64 " bytes/second\n", MigrationParameter_str(MIGRATION_PARAMETER_AVAIL_SWITCHOVER_BANDWIDTH), params->avail_switchover_bandwidth); + assert(params->has_max_postcopy_bandwidth); + monitor_printf(mon, "%s: %" PRIu64 " bytes/second\n", + MigrationParameter_str(MIGRATION_PARAMETER_MAX_POSTCOPY_BANDWIDTH), + params->max_postcopy_bandwidth); assert(params->has_downtime_limit); monitor_printf(mon, "%s: %" PRIu64 " ms\n", MigrationParameter_str(MIGRATION_PARAMETER_DOWNTIME_LIMIT), @@ -383,12 +403,6 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) monitor_printf(mon, "%s: %" PRIu64 " bytes\n", MigrationParameter_str(MIGRATION_PARAMETER_XBZRLE_CACHE_SIZE), params->xbzrle_cache_size); - monitor_printf(mon, "%s: %" PRIu64 "\n", - MigrationParameter_str(MIGRATION_PARAMETER_MAX_POSTCOPY_BANDWIDTH), - params->max_postcopy_bandwidth); - monitor_printf(mon, "%s: '%s'\n", - MigrationParameter_str(MIGRATION_PARAMETER_TLS_AUTHZ), - params->tls_authz); if (params->has_block_bitmap_mapping) { const BitmapMigrationNodeAliasList *bmnal; @@ -435,6 +449,9 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) MIGRATION_PARAMETER_DIRECT_IO), params->direct_io ? "on" : "off"); } + + assert(params->has_cpr_exec_command); + monitor_print_cpr_exec_command(mon, params->cpr_exec_command); } qapi_free_MigrationParameters(params); @@ -716,6 +733,21 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) p->has_direct_io = true; visit_type_bool(v, param, &p->direct_io, &err); break; + case MIGRATION_PARAMETER_CPR_EXEC_COMMAND: { + g_autofree char **strv = NULL; + g_autoptr(GError) gerr = NULL; + strList **tail = &p->cpr_exec_command; + + if (!g_shell_parse_argv(valuestr, NULL, &strv, &gerr)) { + error_setg(&err, "%s", gerr->message); + break; + } + for (int i = 0; strv[i]; i++) { + QAPI_LIST_APPEND(tail, strv[i]); + } + p->has_cpr_exec_command = true; + break; + } default: g_assert_not_reached(); } diff --git a/migration/migration.c b/migration/migration.c index 10c216d25dec0..a63b46bbef90f 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -74,11 +74,7 @@ #define INMIGRATE_DEFAULT_EXIT_ON_ERROR true -static NotifierWithReturnList migration_state_notifiers[] = { - NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_NORMAL), - NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_REBOOT), - NOTIFIER_ELEM_INIT(migration_state_notifiers, MIG_MODE_CPR_TRANSFER), -}; +static GSList *migration_state_notifiers[MIG_MODE__MAX]; /* Messages sent on the return path from destination to source */ enum mig_rp_message_type { @@ -337,6 +333,7 @@ void migration_object_init(void) ram_mig_init(); dirty_bitmap_mig_init(); + cpr_exec_init(); /* Initialize cpu throttle timers */ cpu_throttle_init(); @@ -623,22 +620,22 @@ void migration_incoming_disable_colo(void) migration_colo_enabled = false; } -int migration_incoming_enable_colo(void) +int migration_incoming_enable_colo(Error **errp) { #ifndef CONFIG_REPLICATION - error_report("ENABLE_COLO command come in migration stream, but the " - "replication module is not built in"); + error_setg(errp, "ENABLE_COLO command come in migration stream, but the " + "replication module is not built in"); return -ENOTSUP; #endif if (!migrate_colo()) { - error_report("ENABLE_COLO command come in migration stream, but x-colo " - "capability is not set"); + error_setg(errp, "ENABLE_COLO command come in migration stream" + ", but x-colo capability is not set"); return -EINVAL; } if (ram_block_discard_disable(true)) { - error_report("COLO: cannot disable RAM discard"); + error_setg(errp, "COLO: cannot disable RAM discard"); return -EBUSY; } migration_colo_enabled = true; @@ -881,7 +878,7 @@ process_incoming_migration_co(void *opaque) MIGRATION_STATUS_ACTIVE); mis->loadvm_co = qemu_coroutine_self(); - ret = qemu_loadvm_state(mis->from_src_file); + ret = qemu_loadvm_state(mis->from_src_file, &local_err); mis->loadvm_co = NULL; trace_vmstate_downtime_checkpoint("dst-precopy-loadvm-completed"); @@ -908,7 +905,8 @@ process_incoming_migration_co(void *opaque) } if (ret < 0) { - error_setg(&local_err, "load of migration failed: %s", strerror(-ret)); + error_prepend(&local_err, "load of migration failed: %s: ", + strerror(-ret)); goto fail; } @@ -935,6 +933,15 @@ process_incoming_migration_co(void *opaque) } exit(EXIT_FAILURE); + } else { + /* + * Report the error here in case that QEMU abruptly exits + * when postcopy is enabled. + */ + WITH_QEMU_LOCK_GUARD(&s->error_mutex) { + error_report_err(s->error); + s->error = NULL; + } } out: /* Pairs with the refcount taken in qmp_migrate_incoming() */ @@ -951,7 +958,7 @@ static void migration_incoming_setup(QEMUFile *f) assert(!mis->from_src_file); mis->from_src_file = f; - qemu_file_set_blocking(f, false); + qemu_file_set_blocking(f, false, &error_abort); } void migration_incoming_process(void) @@ -971,7 +978,7 @@ static bool postcopy_try_recover(void) /* This should be set already in migration_incoming_setup() */ assert(mis->from_src_file); /* Postcopy has standalone thread to do vm load */ - qemu_file_set_blocking(mis->from_src_file, true); + qemu_file_set_blocking(mis->from_src_file, true, &error_abort); /* Re-configure the return path */ mis->to_src_file = qemu_file_get_return_path(mis->from_src_file); @@ -1665,23 +1672,51 @@ void migration_cancel(void) } } +static int get_modes(MigMode mode, va_list ap); + +static void add_notifiers(NotifierWithReturn *notify, int modes) +{ + for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { + if (modes & BIT(mode)) { + migration_state_notifiers[mode] = + g_slist_prepend(migration_state_notifiers[mode], notify); + } + } +} + +void migration_add_notifier_modes(NotifierWithReturn *notify, + MigrationNotifyFunc func, MigMode mode, ...) +{ + int modes; + va_list ap; + + va_start(ap, mode); + modes = get_modes(mode, ap); + va_end(ap); + + notify->notify = (NotifierWithReturnFunc)func; + add_notifiers(notify, modes); +} + void migration_add_notifier_mode(NotifierWithReturn *notify, MigrationNotifyFunc func, MigMode mode) { - notify->notify = (NotifierWithReturnFunc)func; - notifier_with_return_list_add(&migration_state_notifiers[mode], notify); + migration_add_notifier_modes(notify, func, mode, -1); } void migration_add_notifier(NotifierWithReturn *notify, MigrationNotifyFunc func) { - migration_add_notifier_mode(notify, func, MIG_MODE_NORMAL); + migration_add_notifier_modes(notify, func, MIG_MODE_NORMAL, -1); } void migration_remove_notifier(NotifierWithReturn *notify) { if (notify->notify) { - notifier_with_return_remove(notify); + for (MigMode mode = 0; mode < MIG_MODE__MAX; mode++) { + migration_blockers[mode] = + g_slist_remove(migration_state_notifiers[mode], notify); + } notify->notify = NULL; } } @@ -1691,18 +1726,29 @@ int migration_call_notifiers(MigrationState *s, MigrationEventType type, { MigMode mode = s->parameters.mode; MigrationEvent e; + NotifierWithReturn *notifier; + GSList *elem, *next; int ret; e.type = type; - ret = notifier_with_return_list_notify(&migration_state_notifiers[mode], - &e, errp); - assert(!ret || type == MIG_EVENT_PRECOPY_SETUP); - return ret; + + for (elem = migration_state_notifiers[mode]; elem; elem = next) { + next = elem->next; + notifier = (NotifierWithReturn *)elem->data; + ret = notifier->notify(notifier, &e, errp); + if (ret) { + assert(type == MIG_EVENT_PRECOPY_SETUP); + return ret; + } + } + + return 0; } bool migration_has_failed(MigrationState *s) { - return (s->state == MIGRATION_STATUS_CANCELLED || + return (s->state == MIGRATION_STATUS_CANCELLING || + s->state == MIGRATION_STATUS_CANCELLED || s->state == MIGRATION_STATUS_FAILED); } @@ -1762,7 +1808,8 @@ bool migrate_mode_is_cpr(MigrationState *s) { MigMode mode = s->parameters.mode; return mode == MIG_MODE_CPR_REBOOT || - mode == MIG_MODE_CPR_TRANSFER; + mode == MIG_MODE_CPR_TRANSFER || + mode == MIG_MODE_CPR_EXEC; } int migrate_init(MigrationState *s, Error **errp) @@ -2111,6 +2158,12 @@ static bool migrate_prepare(MigrationState *s, bool resume, Error **errp) return false; } + if (migrate_mode() == MIG_MODE_CPR_EXEC && + !s->parameters.has_cpr_exec_command) { + error_setg(errp, "cpr-exec mode requires setting cpr-exec-command"); + return false; + } + if (migration_is_blocked(errp)) { return false; } @@ -2646,12 +2699,9 @@ static void *source_return_path_thread(void *opaque) return NULL; } -static int open_return_path_on_source(MigrationState *ms) +static void open_return_path_on_source(MigrationState *ms) { ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file); - if (!ms->rp_state.from_dst_file) { - return -1; - } trace_open_return_path_on_source(); @@ -2660,8 +2710,6 @@ static int open_return_path_on_source(MigrationState *ms) ms->rp_state.rp_thread_created = true; trace_open_return_path_on_source_continue(); - - return 0; } /* Return true if error detected, or false otherwise */ @@ -2872,8 +2920,9 @@ static int postcopy_start(MigrationState *ms, Error **errp) fail_closefb: qemu_fclose(fb); fail: - migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, - MIGRATION_STATUS_FAILED); + if (ms->state != MIGRATION_STATUS_CANCELLING) { + migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED); + } migration_block_activate(NULL); migration_call_notifiers(ms, MIG_EVENT_PRECOPY_FAILED, NULL); bql_unlock(); @@ -4002,7 +4051,9 @@ void migration_connect(MigrationState *s, Error *error_in) } migration_rate_set(rate_limit); - qemu_file_set_blocking(s->to_dst_file, true); + if (!qemu_file_set_blocking(s->to_dst_file, true, &local_err)) { + goto fail; + } /* * Open the return path. For postcopy, it is used exclusively. For @@ -4010,10 +4061,7 @@ void migration_connect(MigrationState *s, Error *error_in) * QEMU uses the return path. */ if (migrate_postcopy_ram() || migrate_return_path()) { - if (open_return_path_on_source(s)) { - error_setg(&local_err, "Unable to open return-path for postcopy"); - goto fail; - } + open_return_path_on_source(s); } /* diff --git a/migration/multifd.c b/migration/multifd.c index b2557788555a7..98873cee74f3c 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -439,6 +439,39 @@ static void multifd_send_set_error(Error *err) } } +/* + * Gracefully shutdown IOChannels. Only needed for successful migrations on + * top of TLS channels. Otherwise it is same to qio_channel_shutdown(). + * + * A successful migration also guarantees multifd sender threads are + * properly flushed and halted. It is only safe to send BYE in the + * migration thread here when we know there's no other thread writting to + * the channel, because GnuTLS doesn't support concurrent writers. + */ +static void migration_ioc_shutdown_gracefully(QIOChannel *ioc) +{ + g_autoptr(Error) local_err = NULL; + + if (!migration_has_failed(migrate_get_current()) && + object_dynamic_cast((Object *)ioc, TYPE_QIO_CHANNEL_TLS)) { + + /* + * The destination expects the TLS session to always be properly + * terminated. This helps to detect a premature termination in the + * middle of the stream. Note that older QEMUs always break the + * connection on the source and the destination always sees + * GNUTLS_E_PREMATURE_TERMINATION. + */ + migration_tls_channel_end(ioc, &local_err); + if (local_err) { + warn_report("Failed to gracefully terminate TLS connection: %s", + error_get_pretty(local_err)); + } + } + + qio_channel_shutdown(ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); +} + static void multifd_send_terminate_threads(void) { int i; @@ -460,7 +493,7 @@ static void multifd_send_terminate_threads(void) qemu_sem_post(&p->sem); if (p->c) { - qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); + migration_ioc_shutdown_gracefully(p->c); } } @@ -547,36 +580,6 @@ void multifd_send_shutdown(void) return; } - for (i = 0; i < migrate_multifd_channels(); i++) { - MultiFDSendParams *p = &multifd_send_state->params[i]; - - /* thread_created implies the TLS handshake has succeeded */ - if (p->tls_thread_created && p->thread_created) { - Error *local_err = NULL; - /* - * The destination expects the TLS session to always be - * properly terminated. This helps to detect a premature - * termination in the middle of the stream. Note that - * older QEMUs always break the connection on the source - * and the destination always sees - * GNUTLS_E_PREMATURE_TERMINATION. - */ - migration_tls_channel_end(p->c, &local_err); - - /* - * The above can return an error in case the migration has - * already failed. If the migration succeeded, errors are - * not expected but there's no need to kill the source. - */ - if (local_err && !migration_has_failed(migrate_get_current())) { - warn_report( - "multifd_send_%d: Failed to terminate TLS connection: %s", - p->id, error_get_pretty(local_err)); - break; - } - } - } - multifd_send_terminate_threads(); for (i = 0; i < migrate_multifd_channels(); i++) { diff --git a/migration/options.c b/migration/options.c index 4e923a2e072a7..51831127754ae 100644 --- a/migration/options.c +++ b/migration/options.c @@ -959,6 +959,9 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) params->zero_page_detection = s->parameters.zero_page_detection; params->has_direct_io = true; params->direct_io = s->parameters.direct_io; + params->has_cpr_exec_command = true; + params->cpr_exec_command = QAPI_CLONE(strList, + s->parameters.cpr_exec_command); return params; } @@ -993,6 +996,7 @@ void migrate_params_init(MigrationParameters *params) params->has_mode = true; params->has_zero_page_detection = true; params->has_direct_io = true; + params->has_cpr_exec_command = true; } /* @@ -1297,6 +1301,10 @@ static void migrate_params_test_apply(MigrateSetParameters *params, if (params->has_direct_io) { dest->direct_io = params->direct_io; } + + if (params->has_cpr_exec_command) { + dest->cpr_exec_command = params->cpr_exec_command; + } } static void migrate_params_apply(MigrateSetParameters *params, Error **errp) @@ -1429,6 +1437,12 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) if (params->has_direct_io) { s->parameters.direct_io = params->direct_io; } + + if (params->has_cpr_exec_command) { + qapi_free_strList(s->parameters.cpr_exec_command); + s->parameters.cpr_exec_command = + QAPI_CLONE(strList, params->cpr_exec_command); + } } void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 45af9a361e8ea..5471efb4f0135 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -681,6 +681,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) */ static int init_range(RAMBlock *rb, void *opaque) { + Error **errp = opaque; const char *block_name = qemu_ram_get_idstr(rb); void *host_addr = qemu_ram_get_host_addr(rb); ram_addr_t offset = qemu_ram_get_offset(rb); @@ -701,6 +702,8 @@ static int init_range(RAMBlock *rb, void *opaque) * (Precopy will just overwrite this data, so doesn't need the discard) */ if (ram_discard_range(block_name, 0, length)) { + error_setg(errp, "failed to discard RAM block %s len=%zu", + block_name, length); return -1; } @@ -749,9 +752,9 @@ static int cleanup_range(RAMBlock *rb, void *opaque) * postcopy later; must be called prior to any precopy. * called from arch_init's similarly named ram_postcopy_incoming_init */ -int postcopy_ram_incoming_init(MigrationIncomingState *mis) +int postcopy_ram_incoming_init(MigrationIncomingState *mis, Error **errp) { - if (foreach_not_ignored_block(init_range, NULL)) { + if (foreach_not_ignored_block(init_range, errp)) { return -1; } @@ -1703,7 +1706,7 @@ bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) return false; } -int postcopy_ram_incoming_init(MigrationIncomingState *mis) +int postcopy_ram_incoming_init(MigrationIncomingState *mis, Error **errp) { error_report("postcopy_ram_incoming_init: No OS support"); return -1; @@ -1909,7 +1912,7 @@ void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) * The new loading channel has its own threads, so it needs to be * blocked too. It's by default true, just be explicit. */ - qemu_file_set_blocking(file, true); + qemu_file_set_blocking(file, true, &error_abort); mis->postcopy_qemufile_dst = file; qemu_sem_post(&mis->postcopy_qemufile_dst_done); trace_postcopy_preempt_new_channel(); diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h index 3852141d7e37a..ca19433b24689 100644 --- a/migration/postcopy-ram.h +++ b/migration/postcopy-ram.h @@ -30,7 +30,7 @@ int postcopy_ram_incoming_setup(MigrationIncomingState *mis); * postcopy later; must be called prior to any precopy. * called from ram.c's similarly named ram_postcopy_incoming_init */ -int postcopy_ram_incoming_init(MigrationIncomingState *mis); +int postcopy_ram_incoming_init(MigrationIncomingState *mis, Error **errp); /* * At the end of a migration where postcopy_ram_incoming_init was called. diff --git a/migration/qemu-file.c b/migration/qemu-file.c index b6ac190034f77..2d4ce174a5ff6 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -125,7 +125,6 @@ static QEMUFile *qemu_file_new_impl(QIOChannel *ioc, bool is_writable) /* * Result: QEMUFile* for a 'return path' for comms in the opposite direction - * NULL if not available */ QEMUFile *qemu_file_get_return_path(QEMUFile *f) { @@ -340,7 +339,8 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f) do { struct iovec iov = { f->buf + pending, IO_BUF_SIZE - pending }; - len = qio_channel_readv_full(f->ioc, &iov, 1, pfds, pnfd, 0, + len = qio_channel_readv_full(f->ioc, &iov, 1, pfds, pnfd, + QIO_CHANNEL_READ_FLAG_FD_PRESERVE_BLOCKING, &local_error); if (len == QIO_CHANNEL_ERR_BLOCK) { if (qemu_in_coroutine()) { @@ -348,17 +348,13 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f) } else { qio_channel_wait(f->ioc, G_IO_IN); } - } else if (len < 0) { - len = -EIO; } } while (len == QIO_CHANNEL_ERR_BLOCK); if (len > 0) { f->buf_size += len; - } else if (len == 0) { - qemu_file_set_error_obj(f, -EIO, local_error); } else { - qemu_file_set_error_obj(f, len, local_error); + qemu_file_set_error_obj(f, -EIO, local_error); } for (int i = 0; i < nfd; i++) { @@ -887,9 +883,9 @@ void qemu_put_counted_string(QEMUFile *f, const char *str) * both directions, and thus changing the blocking on the main * QEMUFile can also affect the return path. */ -void qemu_file_set_blocking(QEMUFile *f, bool block) +bool qemu_file_set_blocking(QEMUFile *f, bool block, Error **errp) { - qio_channel_set_blocking(f->ioc, block, NULL); + return qio_channel_set_blocking(f->ioc, block, errp); } /* diff --git a/migration/qemu-file.h b/migration/qemu-file.h index f5b9f430e04b1..c13c96716702c 100644 --- a/migration/qemu-file.h +++ b/migration/qemu-file.h @@ -71,7 +71,7 @@ void qemu_file_set_error(QEMUFile *f, int ret); int qemu_file_shutdown(QEMUFile *f); QEMUFile *qemu_file_get_return_path(QEMUFile *f); int qemu_fflush(QEMUFile *f); -void qemu_file_set_blocking(QEMUFile *f, bool block); +bool qemu_file_set_blocking(QEMUFile *f, bool block, Error **errp); int qemu_file_get_to_fd(QEMUFile *f, int fd, size_t size); void qemu_set_offset(QEMUFile *f, off_t off, int whence); off_t qemu_get_offset(QEMUFile *f); diff --git a/migration/ram.c b/migration/ram.c index 7208bc114fb5c..5eef2efc781f1 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -53,6 +53,8 @@ #include "qemu/rcu_queue.h" #include "migration/colo.h" #include "system/cpu-throttle.h" +#include "system/physmem.h" +#include "system/ramblock.h" #include "savevm.h" #include "qemu/iov.h" #include "multifd.h" @@ -228,6 +230,7 @@ bool migrate_ram_is_ignored(RAMBlock *block) MigMode mode = migrate_mode(); return !qemu_ram_is_migratable(block) || mode == MIG_MODE_CPR_TRANSFER || + mode == MIG_MODE_CPR_EXEC || (migrate_ignore_shared() && qemu_ram_is_shared(block) && qemu_ram_is_named_file(block)); } @@ -934,11 +937,86 @@ bool ramblock_page_is_discarded(RAMBlock *rb, ram_addr_t start) return false; } +/* Called with RCU critical section */ +static uint64_t physical_memory_sync_dirty_bitmap(RAMBlock *rb, + ram_addr_t start, + ram_addr_t length) +{ + ram_addr_t addr; + unsigned long word = BIT_WORD((start + rb->offset) >> TARGET_PAGE_BITS); + uint64_t num_dirty = 0; + unsigned long *dest = rb->bmap; + + /* start address and length is aligned at the start of a word? */ + if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == + (start + rb->offset) && + !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { + int k; + int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); + unsigned long * const *src; + unsigned long idx = (word * BITS_PER_LONG) / DIRTY_MEMORY_BLOCK_SIZE; + unsigned long offset = BIT_WORD((word * BITS_PER_LONG) % + DIRTY_MEMORY_BLOCK_SIZE); + unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); + + src = qatomic_rcu_read( + &ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks; + + for (k = page; k < page + nr; k++) { + if (src[idx][offset]) { + unsigned long bits = qatomic_xchg(&src[idx][offset], 0); + unsigned long new_dirty; + new_dirty = ~dest[k]; + dest[k] |= bits; + new_dirty &= bits; + num_dirty += ctpopl(new_dirty); + } + + if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { + offset = 0; + idx++; + } + } + if (num_dirty) { + physical_memory_dirty_bits_cleared(start, length); + } + + if (rb->clear_bmap) { + /* + * Postpone the dirty bitmap clear to the point before we + * really send the pages, also we will split the clear + * dirty procedure into smaller chunks. + */ + clear_bmap_set(rb, start >> TARGET_PAGE_BITS, + length >> TARGET_PAGE_BITS); + } else { + /* Slow path - still do that in a huge chunk */ + memory_region_clear_dirty_bitmap(rb->mr, start, length); + } + } else { + ram_addr_t offset = rb->offset; + + for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) { + if (physical_memory_test_and_clear_dirty( + start + addr + offset, + TARGET_PAGE_SIZE, + DIRTY_MEMORY_MIGRATION)) { + long k = (start + addr) >> TARGET_PAGE_BITS; + if (!test_and_set_bit(k, dest)) { + num_dirty++; + } + } + } + } + + return num_dirty; +} + /* Called with RCU critical section */ static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb) { uint64_t new_dirty_pages = - cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); + physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length); rs->migration_dirty_pages += new_dirty_pages; rs->num_dirty_pages_period += new_dirty_pages; @@ -3575,8 +3653,10 @@ static void colo_init_ram_state(void) * colo cache: this is for secondary VM, we cache the whole * memory of the secondary VM, it is need to hold the global lock * to call this helper. + * + * Returns zero to indicate success or -1 on error. */ -int colo_init_ram_cache(void) +int colo_init_ram_cache(Error **errp) { RAMBlock *block; @@ -3585,16 +3665,16 @@ int colo_init_ram_cache(void) block->colo_cache = qemu_anon_ram_alloc(block->used_length, NULL, false, false); if (!block->colo_cache) { - error_report("%s: Can't alloc memory for COLO cache of block %s," - "size 0x" RAM_ADDR_FMT, __func__, block->idstr, - block->used_length); + error_setg(errp, "Can't alloc memory for COLO cache of " + "block %s, size 0x" RAM_ADDR_FMT, + block->idstr, block->used_length); RAMBLOCK_FOREACH_NOT_IGNORED(block) { if (block->colo_cache) { qemu_anon_ram_free(block->colo_cache, block->used_length); block->colo_cache = NULL; } } - return -errno; + return -1; } if (!machine_dump_guest_core(current_machine)) { qemu_madvise(block->colo_cache, block->used_length, @@ -3716,9 +3796,9 @@ static int ram_load_cleanup(void *opaque) * postcopy-ram. postcopy-ram's similarly names * postcopy_ram_incoming_init does the work. */ -int ram_postcopy_incoming_init(MigrationIncomingState *mis) +int ram_postcopy_incoming_init(MigrationIncomingState *mis, Error **errp) { - return postcopy_ram_incoming_init(mis); + return postcopy_ram_incoming_init(mis, errp); } /** @@ -4367,7 +4447,7 @@ static bool ram_has_postcopy(void *opaque) { RAMBlock *rb; RAMBLOCK_FOREACH_NOT_IGNORED(rb) { - if (ramblock_is_pmem(rb)) { + if (ram_block_is_pmem(rb)) { info_report("Block: %s, host: %p is a nvdimm memory, postcopy" "is not supported now!", rb->idstr, rb->host); return false; diff --git a/migration/ram.h b/migration/ram.h index 921c39a2c5c45..24cd0bf585762 100644 --- a/migration/ram.h +++ b/migration/ram.h @@ -86,7 +86,7 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms); void ram_postcopy_send_discard_bitmap(MigrationState *ms); /* For incoming postcopy discard */ int ram_discard_range(const char *block_name, uint64_t start, size_t length); -int ram_postcopy_incoming_init(MigrationIncomingState *mis); +int ram_postcopy_incoming_init(MigrationIncomingState *mis, Error **errp); int ram_load_postcopy(QEMUFile *f, int channel); void ram_handle_zero(void *host, uint64_t size); @@ -109,7 +109,7 @@ void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset, bool set); /* ram cache */ -int colo_init_ram_cache(void); +int colo_init_ram_cache(Error **errp); void colo_flush_ram_cache(void); void colo_release_ram_cache(void); void colo_incoming_start_dirty_log(void); diff --git a/migration/savevm.c b/migration/savevm.c index fabbeb296ae98..7b35ec4dd007c 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -963,13 +963,20 @@ void vmstate_unregister(VMStateIf *obj, const VMStateDescription *vmsd, } } -static int vmstate_load(QEMUFile *f, SaveStateEntry *se) +static int vmstate_load(QEMUFile *f, SaveStateEntry *se, Error **errp) { + int ret; trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)"); if (!se->vmsd) { /* Old style */ - return se->ops->load_state(f, se->opaque, se->load_version_id); + ret = se->ops->load_state(f, se->opaque, se->load_version_id); + if (ret < 0) { + error_setg(errp, "Failed to load vmstate version_id: %d, ret: %d", + se->load_version_id, ret); + } + return ret; } - return vmstate_load_state(f, se->vmsd, se->opaque, se->load_version_id); + return vmstate_load_state(f, se->vmsd, se->opaque, se->load_version_id, + errp); } static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se, @@ -1049,8 +1056,8 @@ static int vmstate_save(QEMUFile *f, SaveStateEntry *se, JSONWriter *vmdesc, if (!se->vmsd) { vmstate_save_old_style(f, se, vmdesc); } else { - ret = vmstate_save_state_with_err(f, se->vmsd, se->opaque, vmdesc, - errp); + ret = vmstate_save_state(f, se->vmsd, se->opaque, vmdesc, + errp); if (ret) { return ret; } @@ -1278,6 +1285,7 @@ void qemu_savevm_state_header(QEMUFile *f) { MigrationState *s = migrate_get_current(); JSONWriter *vmdesc = s->vmdesc; + Error *local_err = NULL; trace_savevm_state_header(); qemu_put_be32(f, QEMU_VM_FILE_MAGIC); @@ -1296,7 +1304,11 @@ void qemu_savevm_state_header(QEMUFile *f) json_writer_start_object(vmdesc, "configuration"); } - vmstate_save_state(f, &vmstate_configuration, &savevm_state, vmdesc); + vmstate_save_state(f, &vmstate_configuration, &savevm_state, + vmdesc, &local_err); + if (local_err) { + error_report_err(local_err); + } if (vmdesc) { json_writer_end_object(vmdesc); @@ -1905,39 +1917,39 @@ enum LoadVMExitCodes { * quickly. */ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, - uint16_t len) + uint16_t len, Error **errp) { PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_ADVISE); uint64_t remote_pagesize_summary, local_pagesize_summary, remote_tps; size_t page_size = qemu_target_page_size(); - Error *local_err = NULL; trace_loadvm_postcopy_handle_advise(); if (ps != POSTCOPY_INCOMING_NONE) { - error_report("CMD_POSTCOPY_ADVISE in wrong postcopy state (%d)", ps); + error_setg(errp, "CMD_POSTCOPY_ADVISE in wrong postcopy state (%d)", + ps); return -1; } switch (len) { case 0: if (migrate_postcopy_ram()) { - error_report("RAM postcopy is enabled but have 0 byte advise"); + error_setg(errp, "RAM postcopy is enabled but have 0 byte advise"); return -EINVAL; } return 0; case 8 + 8: if (!migrate_postcopy_ram()) { - error_report("RAM postcopy is disabled but have 16 byte advise"); + error_setg(errp, + "RAM postcopy is disabled but have 16 byte advise"); return -EINVAL; } break; default: - error_report("CMD_POSTCOPY_ADVISE invalid length (%d)", len); + error_setg(errp, "CMD_POSTCOPY_ADVISE invalid length (%d)", len); return -EINVAL; } - if (!postcopy_ram_supported_by_host(mis, &local_err)) { - error_report_err(local_err); + if (!postcopy_ram_supported_by_host(mis, errp)) { postcopy_state_set(POSTCOPY_INCOMING_NONE); return -1; } @@ -1960,9 +1972,10 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, * also fails when passed to an older qemu that doesn't * do huge pages. */ - error_report("Postcopy needs matching RAM page sizes (s=%" PRIx64 - " d=%" PRIx64 ")", - remote_pagesize_summary, local_pagesize_summary); + error_setg(errp, + "Postcopy needs matching RAM page sizes " + "(s=%" PRIx64 " d=%" PRIx64 ")", + remote_pagesize_summary, local_pagesize_summary); return -1; } @@ -1972,17 +1985,18 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, * Again, some differences could be dealt with, but for now keep it * simple. */ - error_report("Postcopy needs matching target page sizes (s=%d d=%zd)", - (int)remote_tps, page_size); + error_setg(errp, + "Postcopy needs matching target page sizes (s=%d d=%zd)", + (int)remote_tps, page_size); return -1; } - if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_ADVISE, &local_err)) { - error_report_err(local_err); + if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_ADVISE, errp)) { return -1; } - if (ram_postcopy_incoming_init(mis)) { + if (ram_postcopy_incoming_init(mis, errp) < 0) { + error_prepend(errp, "Postcopy RAM incoming init failed: "); return -1; } @@ -1995,7 +2009,7 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis, * There can be 0..many of these messages, each encoding multiple pages. */ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, - uint16_t len) + uint16_t len, Error **errp) { int tmp; char ramid[256]; @@ -2008,6 +2022,7 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, /* 1st discard */ tmp = postcopy_ram_prepare_discard(mis); if (tmp) { + error_setg(errp, "Failed to prepare for RAM discard: %d", tmp); return tmp; } break; @@ -2017,8 +2032,9 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, break; default: - error_report("CMD_POSTCOPY_RAM_DISCARD in wrong postcopy state (%d)", - ps); + error_setg(errp, + "CMD_POSTCOPY_RAM_DISCARD in wrong postcopy state (%d)", + ps); return -1; } /* We're expecting a @@ -2027,29 +2043,30 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, * then at least 1 16 byte chunk */ if (len < (1 + 1 + 1 + 1 + 2 * 8)) { - error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); + error_setg(errp, "CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); return -1; } tmp = qemu_get_byte(mis->from_src_file); if (tmp != postcopy_ram_discard_version) { - error_report("CMD_POSTCOPY_RAM_DISCARD invalid version (%d)", tmp); + error_setg(errp, "CMD_POSTCOPY_RAM_DISCARD invalid version (%d)", tmp); return -1; } if (!qemu_get_counted_string(mis->from_src_file, ramid)) { - error_report("CMD_POSTCOPY_RAM_DISCARD Failed to read RAMBlock ID"); + error_setg(errp, + "CMD_POSTCOPY_RAM_DISCARD Failed to read RAMBlock ID"); return -1; } tmp = qemu_get_byte(mis->from_src_file); if (tmp != 0) { - error_report("CMD_POSTCOPY_RAM_DISCARD missing nil (%d)", tmp); + error_setg(errp, "CMD_POSTCOPY_RAM_DISCARD missing nil (%d)", tmp); return -1; } len -= 3 + strlen(ramid); if (len % 16) { - error_report("CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); + error_setg(errp, "CMD_POSTCOPY_RAM_DISCARD invalid length (%d)", len); return -1; } trace_loadvm_postcopy_ram_handle_discard_header(ramid, len); @@ -2061,6 +2078,7 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, len -= 16; int ret = ram_discard_range(ramid, start_addr, block_length); if (ret) { + error_setg(errp, "Failed to discard RAM range %s: %d", ramid, ret); return ret; } } @@ -2082,6 +2100,7 @@ static void *postcopy_ram_listen_thread(void *opaque) QEMUFile *f = mis->from_src_file; int load_res; MigrationState *migr = migrate_get_current(); + Error *local_err = NULL; object_ref(OBJECT(migr)); @@ -2095,10 +2114,10 @@ static void *postcopy_ram_listen_thread(void *opaque) * Because we're a thread and not a coroutine we can't yield * in qemu_file, and thus we must be blocking now. */ - qemu_file_set_blocking(f, true); + qemu_file_set_blocking(f, true, &error_fatal); /* TODO: sanity check that only postcopiable data will be loaded here */ - load_res = qemu_loadvm_state_main(f, mis); + load_res = qemu_loadvm_state_main(f, mis, &local_err); /* * This is tricky, but, mis->from_src_file can change after it @@ -2108,7 +2127,7 @@ static void *postcopy_ram_listen_thread(void *opaque) f = mis->from_src_file; /* And non-blocking again so we don't block in any cleanup */ - qemu_file_set_blocking(f, false); + qemu_file_set_blocking(f, false, &error_fatal); trace_postcopy_ram_listen_thread_exit(); if (load_res < 0) { @@ -2124,7 +2143,10 @@ static void *postcopy_ram_listen_thread(void *opaque) __func__, load_res); load_res = 0; /* prevent further exit() */ } else { - error_report("%s: loadvm failed: %d", __func__, load_res); + error_prepend(&local_err, + "loadvm failed during postcopy: %d: ", load_res); + migrate_set_error(migr, local_err); + error_report_err(local_err); migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_ACTIVE, MIGRATION_STATUS_FAILED); } @@ -2172,15 +2194,16 @@ static void *postcopy_ram_listen_thread(void *opaque) } /* After this message we must be able to immediately receive postcopy data */ -static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) +static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis, + Error **errp) { PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_LISTENING); - Error *local_err = NULL; trace_loadvm_postcopy_handle_listen("enter"); if (ps != POSTCOPY_INCOMING_ADVISE && ps != POSTCOPY_INCOMING_DISCARD) { - error_report("CMD_POSTCOPY_LISTEN in wrong postcopy state (%d)", ps); + error_setg(errp, + "CMD_POSTCOPY_LISTEN in wrong postcopy state (%d)", ps); return -1; } if (ps == POSTCOPY_INCOMING_ADVISE) { @@ -2203,14 +2226,14 @@ static int loadvm_postcopy_handle_listen(MigrationIncomingState *mis) if (migrate_postcopy_ram()) { if (postcopy_ram_incoming_setup(mis)) { postcopy_ram_incoming_cleanup(mis); + error_setg(errp, "Failed to setup incoming postcopy RAM blocks"); return -1; } } trace_loadvm_postcopy_handle_listen("after uffd"); - if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_LISTEN, &local_err)) { - error_report_err(local_err); + if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_LISTEN, errp)) { return -1; } @@ -2263,13 +2286,13 @@ static void loadvm_postcopy_handle_run_bh(void *opaque) } /* After all discards we can start running and asking for pages */ -static int loadvm_postcopy_handle_run(MigrationIncomingState *mis) +static int loadvm_postcopy_handle_run(MigrationIncomingState *mis, Error **errp) { PostcopyState ps = postcopy_state_get(); trace_loadvm_postcopy_handle_run(); if (ps != POSTCOPY_INCOMING_LISTENING) { - error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); + error_setg(errp, "CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps); return -1; } @@ -2327,12 +2350,12 @@ static void migrate_send_rp_req_pages_pending(MigrationIncomingState *mis) } } -static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) +static void loadvm_postcopy_handle_resume(MigrationIncomingState *mis) { if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) { - error_report("%s: illegal resume received", __func__); + warn_report("%s: illegal resume received", __func__); /* Don't fail the load, only for this. */ - return 0; + return; } /* @@ -2384,8 +2407,6 @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) /* Kick the fast ram load thread too */ qemu_sem_post(&mis->postcopy_pause_sem_fast_load); } - - return 0; } /** @@ -2398,7 +2419,7 @@ static int loadvm_postcopy_handle_resume(MigrationIncomingState *mis) * Returns: Negative values on error * */ -static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) +static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis, Error **errp) { int ret; size_t length; @@ -2408,7 +2429,7 @@ static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) trace_loadvm_handle_cmd_packaged(length); if (length > MAX_VM_CMD_PACKAGED_SIZE) { - error_report("Unreasonably large packaged state: %zu", length); + error_setg(errp, "Unreasonably large packaged state: %zu", length); return -1; } @@ -2419,8 +2440,8 @@ static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) length); if (ret != length) { object_unref(OBJECT(bioc)); - error_report("CMD_PACKAGED: Buffer receive fail ret=%d length=%zu", - ret, length); + error_setg(errp, "CMD_PACKAGED: Buffer receive fail ret=%d length=%zu", + ret, length); return (ret < 0) ? ret : -EAGAIN; } bioc->usage += length; @@ -2449,7 +2470,7 @@ static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) qemu_coroutine_yield(); } while (1); - ret = qemu_loadvm_state_main(packf, mis); + ret = qemu_loadvm_state_main(packf, mis, errp); trace_loadvm_handle_cmd_packaged_main(ret); qemu_fclose(packf); object_unref(OBJECT(bioc)); @@ -2464,32 +2485,35 @@ static int loadvm_handle_cmd_packaged(MigrationIncomingState *mis) * len (1 byte) + ramblock_name (<255 bytes) */ static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis, - uint16_t len) + uint16_t len, Error **errp) { QEMUFile *file = mis->from_src_file; RAMBlock *rb; char block_name[256]; size_t cnt; + int ret; cnt = qemu_get_counted_string(file, block_name); if (!cnt) { - error_report("%s: failed to read block name", __func__); + error_setg(errp, "failed to read block name"); return -EINVAL; } /* Validate before using the data */ - if (qemu_file_get_error(file)) { - return qemu_file_get_error(file); + ret = qemu_file_get_error(file); + if (ret < 0) { + error_setg(errp, "loadvm failed: stream error: %d", ret); + return ret; } if (len != cnt + 1) { - error_report("%s: invalid payload length (%d)", __func__, len); + error_setg(errp, "invalid payload length (%d)", len); return -EINVAL; } rb = qemu_ram_block_by_name(block_name); if (!rb) { - error_report("%s: block '%s' not found", __func__, block_name); + error_setg(errp, "block '%s' not found", block_name); return -EINVAL; } @@ -2500,20 +2524,26 @@ static int loadvm_handle_recv_bitmap(MigrationIncomingState *mis, return 0; } -static int loadvm_process_enable_colo(MigrationIncomingState *mis) +static int loadvm_process_enable_colo(MigrationIncomingState *mis, + Error **errp) { - int ret = migration_incoming_enable_colo(); + ERRP_GUARD(); + int ret; - if (!ret) { - ret = colo_init_ram_cache(); - if (ret) { - migration_incoming_disable_colo(); - } + ret = migration_incoming_enable_colo(errp); + if (ret < 0) { + return ret; + } + + ret = colo_init_ram_cache(errp); + if (ret) { + error_prepend(errp, "failed to init colo RAM cache: %d: ", ret); + migration_incoming_disable_colo(); } return ret; } -static int loadvm_postcopy_handle_switchover_start(void) +static int loadvm_postcopy_handle_switchover_start(Error **errp) { SaveStateEntry *se; @@ -2526,6 +2556,7 @@ static int loadvm_postcopy_handle_switchover_start(void) ret = se->ops->switchover_start(se->opaque); if (ret < 0) { + error_setg(errp, "Switchover start failed: %d", ret); return ret; } } @@ -2539,32 +2570,37 @@ static int loadvm_postcopy_handle_switchover_start(void) * LOADVM_QUIT All good, but exit the loop * <0 Error */ -static int loadvm_process_command(QEMUFile *f) +static int loadvm_process_command(QEMUFile *f, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); uint16_t cmd; uint16_t len; uint32_t tmp32; + int ret; cmd = qemu_get_be16(f); len = qemu_get_be16(f); /* Check validity before continue processing of cmds */ - if (qemu_file_get_error(f)) { - return qemu_file_get_error(f); + ret = qemu_file_get_error(f); + if (ret) { + error_setg(errp, + "Failed to load VM process command: stream error: %d", + ret); + return ret; } if (cmd >= MIG_CMD_MAX || cmd == MIG_CMD_INVALID) { - error_report("MIG_CMD 0x%x unknown (len 0x%x)", cmd, len); + error_setg(errp, "MIG_CMD 0x%x unknown (len 0x%x)", cmd, len); return -EINVAL; } trace_loadvm_process_command(mig_cmd_args[cmd].name, len); if (mig_cmd_args[cmd].len != -1 && mig_cmd_args[cmd].len != len) { - error_report("%s received with bad length - expecting %zu, got %d", - mig_cmd_args[cmd].name, - (size_t)mig_cmd_args[cmd].len, len); + error_setg(errp, "%s received with bad length - expecting %zu, got %d", + mig_cmd_args[cmd].name, + (size_t)mig_cmd_args[cmd].len, len); return -ERANGE; } @@ -2576,10 +2612,6 @@ static int loadvm_process_command(QEMUFile *f) return 0; } mis->to_src_file = qemu_file_get_return_path(f); - if (!mis->to_src_file) { - error_report("CMD_OPEN_RETURN_PATH failed"); - return -1; - } /* * Switchover ack is enabled but no device uses it, so send an ACK to @@ -2587,11 +2619,10 @@ static int loadvm_process_command(QEMUFile *f) * been created. */ if (migrate_switchover_ack() && !mis->switchover_ack_pending_num) { - int ret = migrate_send_rp_switchover_ack(mis); + ret = migrate_send_rp_switchover_ack(mis); if (ret) { - error_report( - "Could not send switchover ack RP MSG, err %d (%s)", ret, - strerror(-ret)); + error_setg_errno(errp, -ret, + "Could not send switchover ack RP MSG"); return ret; } } @@ -2601,39 +2632,40 @@ static int loadvm_process_command(QEMUFile *f) tmp32 = qemu_get_be32(f); trace_loadvm_process_command_ping(tmp32); if (!mis->to_src_file) { - error_report("CMD_PING (0x%x) received with no return path", - tmp32); + error_setg(errp, "CMD_PING (0x%x) received with no return path", + tmp32); return -1; } migrate_send_rp_pong(mis, tmp32); break; case MIG_CMD_PACKAGED: - return loadvm_handle_cmd_packaged(mis); + return loadvm_handle_cmd_packaged(mis, errp); case MIG_CMD_POSTCOPY_ADVISE: - return loadvm_postcopy_handle_advise(mis, len); + return loadvm_postcopy_handle_advise(mis, len, errp); case MIG_CMD_POSTCOPY_LISTEN: - return loadvm_postcopy_handle_listen(mis); + return loadvm_postcopy_handle_listen(mis, errp); case MIG_CMD_POSTCOPY_RUN: - return loadvm_postcopy_handle_run(mis); + return loadvm_postcopy_handle_run(mis, errp); case MIG_CMD_POSTCOPY_RAM_DISCARD: - return loadvm_postcopy_ram_handle_discard(mis, len); + return loadvm_postcopy_ram_handle_discard(mis, len, errp); case MIG_CMD_POSTCOPY_RESUME: - return loadvm_postcopy_handle_resume(mis); + loadvm_postcopy_handle_resume(mis); + return 0; case MIG_CMD_RECV_BITMAP: - return loadvm_handle_recv_bitmap(mis, len); + return loadvm_handle_recv_bitmap(mis, len, errp); case MIG_CMD_ENABLE_COLO: - return loadvm_process_enable_colo(mis); + return loadvm_process_enable_colo(mis, errp); case MIG_CMD_SWITCHOVER_START: - return loadvm_postcopy_handle_switchover_start(); + return loadvm_postcopy_handle_switchover_start(errp); } return 0; @@ -2683,8 +2715,9 @@ static bool check_section_footer(QEMUFile *f, SaveStateEntry *se) } static int -qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) +qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type, Error **errp) { + ERRP_GUARD(); bool trace_downtime = (type == QEMU_VM_SECTION_FULL); uint32_t instance_id, version_id, section_id; int64_t start_ts, end_ts; @@ -2695,8 +2728,8 @@ qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) /* Read section start */ section_id = qemu_get_be32(f); if (!qemu_get_counted_string(f, idstr)) { - error_report("Unable to read ID string for section %u", - section_id); + error_setg(errp, "Unable to read ID string for section %u", + section_id); return -EINVAL; } instance_id = qemu_get_be32(f); @@ -2704,8 +2737,7 @@ qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) ret = qemu_file_get_error(f); if (ret) { - error_report("%s: Failed to read instance/version ID: %d", - __func__, ret); + error_setg(errp, "Failed to read instance/version ID: %d", ret); return ret; } @@ -2714,17 +2746,17 @@ qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) /* Find savevm section */ se = find_se(idstr, instance_id); if (se == NULL) { - error_report("Unknown savevm section or instance '%s' %"PRIu32". " - "Make sure that your current VM setup matches your " - "saved VM setup, including any hotplugged devices", - idstr, instance_id); + error_setg(errp, "Unknown section or instance '%s' %"PRIu32". " + "Make sure that your current VM setup matches your " + "saved VM setup, including any hotplugged devices", + idstr, instance_id); return -EINVAL; } /* Validate version */ if (version_id > se->version_id) { - error_report("savevm: unsupported version %d for '%s' v%d", - version_id, idstr, se->version_id); + error_setg(errp, "unsupported version %d for '%s' v%d", + version_id, idstr, se->version_id); return -EINVAL; } se->load_version_id = version_id; @@ -2732,7 +2764,7 @@ qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) /* Validate if it is a device's state */ if (xen_enabled() && se->is_ram) { - error_report("loadvm: %s RAM loading not allowed on Xen", idstr); + error_setg(errp, "loadvm: %s RAM loading not allowed on Xen", idstr); return -EINVAL; } @@ -2740,10 +2772,11 @@ qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); } - ret = vmstate_load(f, se); + ret = vmstate_load(f, se, errp); if (ret < 0) { - error_report("error while loading state for instance 0x%"PRIx32" of" - " device '%s'", instance_id, idstr); + error_prepend(errp, + "error while loading state for instance 0x%"PRIx32" of" + " device '%s': ", instance_id, idstr); return ret; } @@ -2754,6 +2787,8 @@ qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) } if (!check_section_footer(f, se)) { + error_setg(errp, "Section footer error, section_id: %d", + section_id); return -EINVAL; } @@ -2761,7 +2796,7 @@ qemu_loadvm_section_start_full(QEMUFile *f, uint8_t type) } static int -qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type) +qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type, Error **errp) { bool trace_downtime = (type == QEMU_VM_SECTION_END); int64_t start_ts, end_ts; @@ -2773,8 +2808,7 @@ qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type) ret = qemu_file_get_error(f); if (ret) { - error_report("%s: Failed to read section ID: %d", - __func__, ret); + error_setg(errp, "Failed to read section ID: %d", ret); return ret; } @@ -2785,7 +2819,7 @@ qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type) } } if (se == NULL) { - error_report("Unknown savevm section %d", section_id); + error_setg(errp, "Unknown section %d", section_id); return -EINVAL; } @@ -2793,10 +2827,8 @@ qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type) start_ts = qemu_clock_get_us(QEMU_CLOCK_REALTIME); } - ret = vmstate_load(f, se); + ret = vmstate_load(f, se, errp); if (ret < 0) { - error_report("error while loading state section id %d(%s)", - section_id, se->idstr); return ret; } @@ -2807,40 +2839,50 @@ qemu_loadvm_section_part_end(QEMUFile *f, uint8_t type) } if (!check_section_footer(f, se)) { + error_setg(errp, "Section footer error, section_id: %d", + section_id); return -EINVAL; } return 0; } -static int qemu_loadvm_state_header(QEMUFile *f) +static int qemu_loadvm_state_header(QEMUFile *f, Error **errp) { unsigned int v; int ret; v = qemu_get_be32(f); if (v != QEMU_VM_FILE_MAGIC) { - error_report("Not a migration stream"); + error_setg(errp, "Not a migration stream, magic: %x != %x", + v, QEMU_VM_FILE_MAGIC); return -EINVAL; } v = qemu_get_be32(f); if (v == QEMU_VM_FILE_VERSION_COMPAT) { - error_report("SaveVM v2 format is obsolete and don't work anymore"); + error_setg(errp, + "SaveVM v2 format is obsolete and no longer supported"); + return -ENOTSUP; } if (v != QEMU_VM_FILE_VERSION) { - error_report("Unsupported migration stream version"); + error_setg(errp, "Unsupported migration stream version, " + "file version %x != %x", + v, QEMU_VM_FILE_VERSION); return -ENOTSUP; } if (migrate_get_current()->send_configuration) { - if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { - error_report("Configuration section missing"); + v = qemu_get_byte(f); + if (v != QEMU_VM_CONFIGURATION) { + error_setg(errp, "Configuration section missing, %x != %x", + v, QEMU_VM_CONFIGURATION); return -EINVAL; } - ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0); + ret = vmstate_load_state(f, &vmstate_configuration, &savevm_state, 0, + errp); if (ret) { return ret; } @@ -3028,8 +3070,10 @@ static bool postcopy_pause_incoming(MigrationIncomingState *mis) return true; } -int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) +int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis, + Error **errp) { + ERRP_GUARD(); uint8_t section_type; int ret = 0; @@ -3037,8 +3081,11 @@ int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) while (true) { section_type = qemu_get_byte(f); - ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, NULL); + ret = qemu_file_get_error_obj_any(f, mis->postcopy_qemufile_dst, errp); if (ret) { + error_prepend(errp, + "Failed to load section ID: stream error: %d: ", + ret); break; } @@ -3046,20 +3093,20 @@ int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) switch (section_type) { case QEMU_VM_SECTION_START: case QEMU_VM_SECTION_FULL: - ret = qemu_loadvm_section_start_full(f, section_type); + ret = qemu_loadvm_section_start_full(f, section_type, errp); if (ret < 0) { goto out; } break; case QEMU_VM_SECTION_PART: case QEMU_VM_SECTION_END: - ret = qemu_loadvm_section_part_end(f, section_type); + ret = qemu_loadvm_section_part_end(f, section_type, errp); if (ret < 0) { goto out; } break; case QEMU_VM_COMMAND: - ret = loadvm_process_command(f); + ret = loadvm_process_command(f, errp); trace_qemu_loadvm_state_section_command(ret); if ((ret < 0) || (ret == LOADVM_QUIT)) { goto out; @@ -3069,7 +3116,7 @@ int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) /* This is the end of migration */ goto out; default: - error_report("Unknown savevm section type %d", section_type); + error_setg(errp, "Unknown section type %d", section_type); ret = -EINVAL; goto out; } @@ -3097,33 +3144,31 @@ int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) migrate_postcopy_ram() && postcopy_pause_incoming(mis)) { /* Reset f to point to the newly created channel */ f = mis->from_src_file; + error_free_or_abort(errp); goto retry; } } return ret; } -int qemu_loadvm_state(QEMUFile *f) +int qemu_loadvm_state(QEMUFile *f, Error **errp) { MigrationState *s = migrate_get_current(); MigrationIncomingState *mis = migration_incoming_get_current(); - Error *local_err = NULL; int ret; - if (qemu_savevm_state_blocked(&local_err)) { - error_report_err(local_err); + if (qemu_savevm_state_blocked(errp)) { return -EINVAL; } qemu_loadvm_thread_pool_create(mis); - ret = qemu_loadvm_state_header(f); + ret = qemu_loadvm_state_header(f, errp); if (ret) { return ret; } - if (qemu_loadvm_state_setup(f, &local_err) != 0) { - error_report_err(local_err); + if (qemu_loadvm_state_setup(f, errp) != 0) { return -EINVAL; } @@ -3133,7 +3178,7 @@ int qemu_loadvm_state(QEMUFile *f) cpu_synchronize_all_pre_loadvm(); - ret = qemu_loadvm_state_main(f, mis); + ret = qemu_loadvm_state_main(f, mis, errp); qemu_event_set(&mis->main_thread_load_event); trace_qemu_loadvm_state_post_main(ret); @@ -3151,8 +3196,15 @@ int qemu_loadvm_state(QEMUFile *f) if (migrate_has_error(migrate_get_current()) || !qemu_loadvm_thread_pool_wait(s, mis)) { ret = -EINVAL; + error_setg(errp, + "Error while loading vmstate"); } else { ret = qemu_file_get_error(f); + if (ret < 0) { + error_setg(errp, + "Error while loading vmstate: stream error: %d", + ret); + } } } /* @@ -3201,15 +3253,14 @@ int qemu_loadvm_state(QEMUFile *f) return ret; } -int qemu_load_device_state(QEMUFile *f) +int qemu_load_device_state(QEMUFile *f, Error **errp) { MigrationIncomingState *mis = migration_incoming_get_current(); int ret; /* Load QEMU_VM_SECTION_FULL section */ - ret = qemu_loadvm_state_main(f, mis); + ret = qemu_loadvm_state_main(f, mis, errp); if (ret < 0) { - error_report("Failed to load device state: %d", ret); return ret; } @@ -3417,6 +3468,7 @@ void qmp_xen_save_devices_state(const char *filename, bool has_live, bool live, void qmp_xen_load_devices_state(const char *filename, Error **errp) { + ERRP_GUARD(); QEMUFile *f; QIOChannelFile *ioc; int ret; @@ -3438,10 +3490,10 @@ void qmp_xen_load_devices_state(const char *filename, Error **errp) f = qemu_file_new_input(QIO_CHANNEL(ioc)); object_unref(OBJECT(ioc)); - ret = qemu_loadvm_state(f); + ret = qemu_loadvm_state(f, errp); qemu_fclose(f); if (ret < 0) { - error_setg(errp, "loading Xen device state failed"); + error_prepend(errp, "loading Xen device state failed: "); } migration_incoming_state_destroy(); } @@ -3512,13 +3564,12 @@ bool load_snapshot(const char *name, const char *vmstate, ret = -EINVAL; goto err_drain; } - ret = qemu_loadvm_state(f); + ret = qemu_loadvm_state(f, errp); migration_incoming_state_destroy(); bdrv_drain_all_end(); if (ret < 0) { - error_setg(errp, "Error %d while loading VM state", ret); return false; } diff --git a/migration/savevm.h b/migration/savevm.h index 2d5e9c716686f..c337e3e3d111a 100644 --- a/migration/savevm.h +++ b/migration/savevm.h @@ -64,10 +64,11 @@ void qemu_savevm_send_colo_enable(QEMUFile *f); void qemu_savevm_live_state(QEMUFile *f); int qemu_save_device_state(QEMUFile *f); -int qemu_loadvm_state(QEMUFile *f); +int qemu_loadvm_state(QEMUFile *f, Error **errp); void qemu_loadvm_state_cleanup(MigrationIncomingState *mis); -int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis); -int qemu_load_device_state(QEMUFile *f); +int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis, + Error **errp); +int qemu_load_device_state(QEMUFile *f, Error **errp); int qemu_loadvm_approve_switchover(void); int qemu_savevm_state_complete_precopy_non_iterable(QEMUFile *f, bool in_postcopy); diff --git a/migration/tls.c b/migration/tls.c index 5cbf952383691..284a6194b2bf7 100644 --- a/migration/tls.c +++ b/migration/tls.c @@ -90,6 +90,10 @@ void migration_tls_channel_process_incoming(MigrationState *s, trace_migration_tls_incoming_handshake_start(); qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-incoming"); + if (migrate_postcopy_ram() || migrate_return_path()) { + qio_channel_set_feature(QIO_CHANNEL(tioc), + QIO_CHANNEL_FEATURE_CONCURRENT_IO); + } qio_channel_tls_handshake(tioc, migration_tls_incoming_handshake, NULL, @@ -149,6 +153,11 @@ void migration_tls_channel_connect(MigrationState *s, s->hostname = g_strdup(hostname); trace_migration_tls_outgoing_handshake_start(hostname); qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-outgoing"); + + if (migrate_postcopy_ram() || migrate_return_path()) { + qio_channel_set_feature(QIO_CHANNEL(tioc), + QIO_CHANNEL_FEATURE_CONCURRENT_IO); + } qio_channel_tls_handshake(tioc, migration_tls_outgoing_handshake, s, diff --git a/migration/trace-events b/migration/trace-events index 706db97def9c5..e8edd1fbbadf4 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -354,6 +354,7 @@ cpr_state_save(const char *mode) "%s mode" cpr_state_load(const char *mode) "%s mode" cpr_transfer_input(const char *path) "%s" cpr_transfer_output(const char *path) "%s" +cpr_exec(void) "" # block-dirty-bitmap.c send_bitmap_header_enter(void) "" diff --git a/migration/vfio-stub.c b/migration/vfio-stub.c new file mode 100644 index 0000000000000..f59ebe075dc73 --- /dev/null +++ b/migration/vfio-stub.c @@ -0,0 +1,16 @@ +/* + * QEMU live migration - stubs for VFIO + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "migration.h" + +void migration_populate_vfio_info(MigrationInfo *info) +{ +} + +void migration_reset_vfio_bytes_transferred(void) +{ +} diff --git a/migration/target.c b/migration/vfio.c similarity index 67% rename from migration/target.c rename to migration/vfio.c index 12fd399f0c521..af6ae2c1e19e6 100644 --- a/migration/target.c +++ b/migration/vfio.c @@ -1,5 +1,5 @@ /* - * QEMU live migration - functions that need to be compiled target-specific + * QEMU live migration - VFIO * * This work is licensed under the terms of the GNU GPL, version 2 * or (at your option) any later version. @@ -8,13 +8,8 @@ #include "qemu/osdep.h" #include "qapi/qapi-types-migration.h" #include "migration.h" -#include CONFIG_DEVICES - -#ifdef CONFIG_VFIO #include "hw/vfio/vfio-migration.h" -#endif -#ifdef CONFIG_VFIO void migration_populate_vfio_info(MigrationInfo *info) { if (vfio_migration_active()) { @@ -27,12 +22,3 @@ void migration_reset_vfio_bytes_transferred(void) { vfio_migration_reset_bytes_transferred(); } -#else -void migration_populate_vfio_info(MigrationInfo *info) -{ -} - -void migration_reset_vfio_bytes_transferred(void) -{ -} -#endif diff --git a/migration/vmstate-types.c b/migration/vmstate-types.c index 741a588b7e18c..4b01dc19c277c 100644 --- a/migration/vmstate-types.c +++ b/migration/vmstate-types.c @@ -19,6 +19,7 @@ #include "qemu/error-report.h" #include "qemu/queue.h" #include "trace.h" +#include "qapi/error.h" /* bool */ @@ -321,6 +322,10 @@ static int get_fd(QEMUFile *f, void *pv, size_t size, const VMStateField *field) { int32_t *v = pv; + if (migrate_mode() == MIG_MODE_CPR_EXEC) { + qemu_get_sbe32s(f, v); + return 0; + } *v = qemu_file_get_fd(f); return 0; } @@ -329,6 +334,10 @@ static int put_fd(QEMUFile *f, void *pv, size_t size, const VMStateField *field, JSONWriter *vmdesc) { int32_t *v = pv; + if (migrate_mode() == MIG_MODE_CPR_EXEC) { + qemu_put_sbe32s(f, v); + return 0; + } return qemu_file_put_fd(f, *v); } @@ -543,13 +552,17 @@ static int get_tmp(QEMUFile *f, void *pv, size_t size, const VMStateField *field) { int ret; + Error *local_err = NULL; const VMStateDescription *vmsd = field->vmsd; int version_id = field->version_id; void *tmp = g_malloc(size); /* Writes the parent field which is at the start of the tmp */ *(void **)tmp = pv; - ret = vmstate_load_state(f, vmsd, tmp, version_id); + ret = vmstate_load_state(f, vmsd, tmp, version_id, &local_err); + if (ret < 0) { + error_report_err(local_err); + } g_free(tmp); return ret; } @@ -560,10 +573,14 @@ static int put_tmp(QEMUFile *f, void *pv, size_t size, const VMStateDescription *vmsd = field->vmsd; void *tmp = g_malloc(size); int ret; + Error *local_err = NULL; /* Writes the parent field which is at the start of the tmp */ *(void **)tmp = pv; - ret = vmstate_save_state(f, vmsd, tmp, vmdesc); + ret = vmstate_save_state(f, vmsd, tmp, vmdesc, &local_err); + if (ret) { + error_report_err(local_err); + } g_free(tmp); return ret; @@ -626,6 +643,7 @@ static int get_qtailq(QEMUFile *f, void *pv, size_t unused_size, const VMStateField *field) { int ret = 0; + Error *local_err = NULL; const VMStateDescription *vmsd = field->vmsd; /* size of a QTAILQ element */ size_t size = field->size; @@ -649,8 +667,9 @@ static int get_qtailq(QEMUFile *f, void *pv, size_t unused_size, while (qemu_get_byte(f)) { elm = g_malloc(size); - ret = vmstate_load_state(f, vmsd, elm, version_id); + ret = vmstate_load_state(f, vmsd, elm, version_id, &local_err); if (ret) { + error_report_err(local_err); return ret; } QTAILQ_RAW_INSERT_TAIL(pv, elm, entry_offset); @@ -669,13 +688,15 @@ static int put_qtailq(QEMUFile *f, void *pv, size_t unused_size, size_t entry_offset = field->start; void *elm; int ret; + Error *local_err = NULL; trace_put_qtailq(vmsd->name, vmsd->version_id); QTAILQ_RAW_FOREACH(elm, pv, entry_offset) { qemu_put_byte(f, true); - ret = vmstate_save_state(f, vmsd, elm, vmdesc); + ret = vmstate_save_state(f, vmsd, elm, vmdesc, &local_err); if (ret) { + error_report_err(local_err); return ret; } } @@ -704,6 +725,7 @@ static gboolean put_gtree_elem(gpointer key, gpointer value, gpointer data) struct put_gtree_data *capsule = (struct put_gtree_data *)data; QEMUFile *f = capsule->f; int ret; + Error *local_err = NULL; qemu_put_byte(f, true); @@ -711,16 +733,20 @@ static gboolean put_gtree_elem(gpointer key, gpointer value, gpointer data) if (!capsule->key_vmsd) { qemu_put_be64(f, (uint64_t)(uintptr_t)(key)); /* direct key */ } else { - ret = vmstate_save_state(f, capsule->key_vmsd, key, capsule->vmdesc); + ret = vmstate_save_state(f, capsule->key_vmsd, key, capsule->vmdesc, + &local_err); if (ret) { + error_report_err(local_err); capsule->ret = ret; return true; } } /* put the data */ - ret = vmstate_save_state(f, capsule->val_vmsd, value, capsule->vmdesc); + ret = vmstate_save_state(f, capsule->val_vmsd, value, capsule->vmdesc, + &local_err); if (ret) { + error_report_err(local_err); capsule->ret = ret; return true; } @@ -772,6 +798,7 @@ static int get_gtree(QEMUFile *f, void *pv, size_t unused_size, GTree *tree = *pval; void *key, *val; int ret = 0; + Error *local_err = NULL; /* in case of direct key, the key vmsd can be {}, ie. check fields */ if (!direct_key && version_id > key_vmsd->version_id) { @@ -803,18 +830,16 @@ static int get_gtree(QEMUFile *f, void *pv, size_t unused_size, key = (void *)(uintptr_t)qemu_get_be64(f); } else { key = g_malloc0(key_size); - ret = vmstate_load_state(f, key_vmsd, key, version_id); + ret = vmstate_load_state(f, key_vmsd, key, version_id, &local_err); if (ret) { - error_report("%s : failed to load %s (%d)", - field->name, key_vmsd->name, ret); + error_report_err(local_err); goto key_error; } } val = g_malloc0(val_size); - ret = vmstate_load_state(f, val_vmsd, val, version_id); + ret = vmstate_load_state(f, val_vmsd, val, version_id, &local_err); if (ret) { - error_report("%s : failed to load %s (%d)", - field->name, val_vmsd->name, ret); + error_report_err(local_err); goto val_error; } g_tree_insert(tree, key, val); @@ -851,14 +876,14 @@ static int put_qlist(QEMUFile *f, void *pv, size_t unused_size, size_t entry_offset = field->start; void *elm; int ret; + Error *local_err = NULL; trace_put_qlist(field->name, vmsd->name, vmsd->version_id); QLIST_RAW_FOREACH(elm, pv, entry_offset) { qemu_put_byte(f, true); - ret = vmstate_save_state(f, vmsd, elm, vmdesc); + ret = vmstate_save_state(f, vmsd, elm, vmdesc, &local_err); if (ret) { - error_report("%s: failed to save %s (%d)", field->name, - vmsd->name, ret); + error_report_err(local_err); return ret; } } @@ -872,6 +897,7 @@ static int get_qlist(QEMUFile *f, void *pv, size_t unused_size, const VMStateField *field) { int ret = 0; + Error *local_err = NULL; const VMStateDescription *vmsd = field->vmsd; /* size of a QLIST element */ size_t size = field->size; @@ -892,10 +918,9 @@ static int get_qlist(QEMUFile *f, void *pv, size_t unused_size, while (qemu_get_byte(f)) { elm = g_malloc(size); - ret = vmstate_load_state(f, vmsd, elm, version_id); + ret = vmstate_load_state(f, vmsd, elm, version_id, &local_err); if (ret) { - error_report("%s: failed to load %s (%d)", field->name, - vmsd->name, ret); + error_report_err(local_err); g_free(elm); return ret; } diff --git a/migration/vmstate.c b/migration/vmstate.c index 5feaa3244d259..81eadde553dd2 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -25,7 +25,7 @@ static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, JSONWriter *vmdesc, Error **errp); static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, - void *opaque); + void *opaque, Error **errp); /* Whether this field should exist for either save or load the VM? */ static bool @@ -132,29 +132,43 @@ static void vmstate_handle_alloc(void *ptr, const VMStateField *field, } int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, - void *opaque, int version_id) + void *opaque, int version_id, Error **errp) { + ERRP_GUARD(); const VMStateField *field = vmsd->fields; int ret = 0; trace_vmstate_load_state(vmsd->name, version_id); if (version_id > vmsd->version_id) { - error_report("%s: incoming version_id %d is too new " - "for local version_id %d", - vmsd->name, version_id, vmsd->version_id); + error_setg(errp, "%s: incoming version_id %d is too new " + "for local version_id %d", + vmsd->name, version_id, vmsd->version_id); trace_vmstate_load_state_end(vmsd->name, "too new", -EINVAL); return -EINVAL; } if (version_id < vmsd->minimum_version_id) { - error_report("%s: incoming version_id %d is too old " - "for local minimum version_id %d", - vmsd->name, version_id, vmsd->minimum_version_id); + error_setg(errp, "%s: incoming version_id %d is too old " + "for local minimum version_id %d", + vmsd->name, version_id, vmsd->minimum_version_id); trace_vmstate_load_state_end(vmsd->name, "too old", -EINVAL); return -EINVAL; } - if (vmsd->pre_load) { + if (vmsd->pre_load_errp) { + ret = vmsd->pre_load_errp(opaque, errp); + if (ret < 0) { + error_prepend(errp, "pre load hook failed for: '%s', " + "version_id: %d, minimum version_id: %d, " + "ret: %d: ", vmsd->name, vmsd->version_id, + vmsd->minimum_version_id, ret); + return ret; + } + } else if (vmsd->pre_load) { ret = vmsd->pre_load(opaque); if (ret) { + error_setg(errp, "pre load hook failed for: '%s', " + "version_id: %d, minimum version_id: %d, ret: %d", + vmsd->name, vmsd->version_id, vmsd->minimum_version_id, + ret); return ret; } } @@ -192,13 +206,21 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, if (inner_field->flags & VMS_STRUCT) { ret = vmstate_load_state(f, inner_field->vmsd, curr_elem, - inner_field->vmsd->version_id); + inner_field->vmsd->version_id, + errp); } else if (inner_field->flags & VMS_VSTRUCT) { ret = vmstate_load_state(f, inner_field->vmsd, curr_elem, - inner_field->struct_version_id); + inner_field->struct_version_id, + errp); } else { ret = inner_field->info->get(f, curr_elem, size, inner_field); + if (ret < 0) { + error_setg(errp, + "Failed to load element of type %s for %s: " + "%d", inner_field->info->name, + inner_field->name, ret); + } } /* If we used a fake temp field.. free it now */ @@ -208,30 +230,47 @@ int vmstate_load_state(QEMUFile *f, const VMStateDescription *vmsd, if (ret >= 0) { ret = qemu_file_get_error(f); + if (ret < 0) { + error_setg(errp, + "Failed to load %s state: stream error: %d", + vmsd->name, ret); + } } if (ret < 0) { qemu_file_set_error(f, ret); - error_report("Failed to load %s:%s", vmsd->name, - field->name); trace_vmstate_load_field_error(field->name, ret); return ret; } } } else if (field->flags & VMS_MUST_EXIST) { - error_report("Input validation failed: %s/%s", - vmsd->name, field->name); + error_setg(errp, "Input validation failed: %s/%s version_id: %d", + vmsd->name, field->name, vmsd->version_id); return -1; } field++; } assert(field->flags == VMS_END); - ret = vmstate_subsection_load(f, vmsd, opaque); + ret = vmstate_subsection_load(f, vmsd, opaque, errp); if (ret != 0) { qemu_file_set_error(f, ret); return ret; } - if (vmsd->post_load) { + if (vmsd->post_load_errp) { + ret = vmsd->post_load_errp(opaque, version_id, errp); + if (ret < 0) { + error_prepend(errp, "post load hook failed for: %s, version_id: " + "%d, minimum_version: %d, ret: %d: ", vmsd->name, + vmsd->version_id, vmsd->minimum_version_id, ret); + } + } else if (vmsd->post_load) { ret = vmsd->post_load(opaque, version_id); + if (ret < 0) { + error_setg(errp, + "post load hook failed for: %s, version_id: %d, " + "minimum_version: %d, ret: %d", + vmsd->name, vmsd->version_id, vmsd->minimum_version_id, + ret); + } } trace_vmstate_load_state_end(vmsd->name, "end", ret); return ret; @@ -384,12 +423,6 @@ bool vmstate_section_needed(const VMStateDescription *vmsd, void *opaque) int vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd, - void *opaque, JSONWriter *vmdesc_id) -{ - return vmstate_save_state_v(f, vmsd, opaque, vmdesc_id, vmsd->version_id, NULL); -} - -int vmstate_save_state_with_err(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, JSONWriter *vmdesc_id, Error **errp) { return vmstate_save_state_v(f, vmsd, opaque, vmdesc_id, vmsd->version_id, errp); @@ -398,12 +431,20 @@ int vmstate_save_state_with_err(QEMUFile *f, const VMStateDescription *vmsd, int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, void *opaque, JSONWriter *vmdesc, int version_id, Error **errp) { + ERRP_GUARD(); int ret = 0; const VMStateField *field = vmsd->fields; trace_vmstate_save_state_top(vmsd->name); - if (vmsd->pre_save) { + if (vmsd->pre_save_errp) { + ret = vmsd->pre_save_errp(opaque, errp); + trace_vmstate_save_state_pre_save_res(vmsd->name, ret); + if (ret < 0) { + error_prepend(errp, "pre-save for %s failed, ret: %d: ", + vmsd->name, ret); + } + } else if (vmsd->pre_save) { ret = vmsd->pre_save(opaque); trace_vmstate_save_state_pre_save_res(vmsd->name, ret); if (ret) { @@ -490,7 +531,7 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd, if (inner_field->flags & VMS_STRUCT) { ret = vmstate_save_state(f, inner_field->vmsd, - curr_elem, vmdesc_loop); + curr_elem, vmdesc_loop, errp); } else if (inner_field->flags & VMS_VSTRUCT) { ret = vmstate_save_state_v(f, inner_field->vmsd, curr_elem, vmdesc_loop, @@ -566,8 +607,9 @@ vmstate_get_subsection(const VMStateDescription * const *sub, } static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, - void *opaque) + void *opaque, Error **errp) { + ERRP_GUARD(); trace_vmstate_subsection_load(vmsd->name); while (qemu_peek_byte(f, 0) == QEMU_VM_SUBSECTION) { @@ -598,6 +640,8 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, sub_vmsd = vmstate_get_subsection(vmsd->subsections, idstr); if (sub_vmsd == NULL) { trace_vmstate_subsection_load_bad(vmsd->name, idstr, "(lookup)"); + error_setg(errp, "VM subsection '%s' in '%s' does not exist", + idstr, vmsd->name); return -ENOENT; } qemu_file_skip(f, 1); /* subsection */ @@ -605,9 +649,12 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, qemu_file_skip(f, len); /* idstr */ version_id = qemu_get_be32(f); - ret = vmstate_load_state(f, sub_vmsd, opaque, version_id); + ret = vmstate_load_state(f, sub_vmsd, opaque, version_id, errp); if (ret) { trace_vmstate_subsection_load_bad(vmsd->name, idstr, "(child)"); + error_prepend(errp, + "Loading VM subsection '%s' in '%s' failed: %d: ", + idstr, vmsd->name, ret); return ret; } } @@ -646,7 +693,7 @@ static int vmstate_subsection_save(QEMUFile *f, const VMStateDescription *vmsd, qemu_put_byte(f, len); qemu_put_buffer(f, (uint8_t *)vmsdsub->name, len); qemu_put_be32(f, vmsdsub->version_id); - ret = vmstate_save_state_with_err(f, vmsdsub, opaque, vmdesc, errp); + ret = vmstate_save_state(f, vmsdsub, opaque, vmdesc, errp); if (ret) { return ret; } diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index 74a0f56566e71..33a88ce205a82 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -280,14 +280,15 @@ void hmp_log(Monitor *mon, const QDict *qdict) void hmp_gdbserver(Monitor *mon, const QDict *qdict) { + Error *err = NULL; const char *device = qdict_get_try_str(qdict, "device"); + if (!device) { device = "tcp::" DEFAULT_GDBSTUB_PORT; } - if (!gdbserver_start(device, &error_warn)) { - monitor_printf(mon, "Could not open gdbserver on device '%s'\n", - device); + if (!gdbserver_start(device, &err)) { + error_report_err(err); } else if (strcmp(device, "none") == 0) { monitor_printf(mon, "Disabled gdbserver\n"); } else { diff --git a/monitor/qemu-config-qmp.c b/monitor/qemu-config-qmp.c index 9a3b183602dea..8bd28fc232880 100644 --- a/monitor/qemu-config-qmp.c +++ b/monitor/qemu-config-qmp.c @@ -1,5 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ #include "qemu/osdep.h" +#include "qemu/target-info.h" #include "qapi/error.h" #include "qapi/qapi-commands-misc.h" #include "qobject/qlist.h" @@ -128,7 +129,7 @@ static CommandLineParameterInfoList *query_all_machine_properties(void) ObjectProperty *prop; bool is_new; - machines = object_class_get_list(TYPE_MACHINE, false); + machines = object_class_get_list(target_machine_typename(), false); assert(machines); /* Loop over all machine classes */ diff --git a/nbd/server.c b/nbd/server.c index d242be9811520..acec0487a8b13 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -1411,7 +1411,9 @@ static coroutine_fn int nbd_negotiate(NBDClient *client, Error **errp) ....options sent, ending in NBD_OPT_EXPORT_NAME or NBD_OPT_GO.... */ - qio_channel_set_blocking(client->ioc, false, NULL); + if (!qio_channel_set_blocking(client->ioc, false, errp)) { + return -EINVAL; + } qio_channel_set_follow_coroutine_ctx(client->ioc, true); trace_nbd_negotiate_begin(); diff --git a/net/dgram.c b/net/dgram.c index 48f653bceb257..baa126d514b06 100644 --- a/net/dgram.c +++ b/net/dgram.c @@ -226,7 +226,10 @@ static int net_dgram_mcast_create(struct sockaddr_in *mcastaddr, } } - qemu_socket_set_nonblock(fd); + if (!qemu_set_blocking(fd, false, errp)) { + goto fail; + } + return fd; fail: if (fd >= 0) { @@ -284,7 +287,7 @@ static int net_dgram_mcast_init(NetClientState *peer, Error **errp) { NetDgramState *s; - int fd, ret; + int fd; struct sockaddr_in *saddr; if (remote->type != SOCKET_ADDRESS_TYPE_INET) { @@ -332,11 +335,8 @@ static int net_dgram_mcast_init(NetClientState *peer, g_free(saddr); return -1; } - ret = qemu_socket_try_set_nonblock(fd); - if (ret < 0) { + if (!qemu_set_blocking(fd, false, errp)) { g_free(saddr); - error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", - name, fd); return -1; } @@ -504,7 +504,11 @@ int net_init_dgram(const Netdev *netdev, const char *name, close(fd); return -1; } - qemu_socket_set_nonblock(fd); + + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); + return -1; + } dest_len = sizeof(raddr_in); dest_addr = g_malloc(dest_len); @@ -551,7 +555,10 @@ int net_init_dgram(const Netdev *netdev, const char *name, close(fd); return -1; } - qemu_socket_set_nonblock(fd); + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); + return -1; + } dest_len = sizeof(raddr_un); dest_addr = g_malloc(dest_len); @@ -562,10 +569,7 @@ int net_init_dgram(const Netdev *netdev, const char *name, if (fd == -1) { return -1; } - ret = qemu_socket_try_set_nonblock(fd); - if (ret < 0) { - error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", - name, fd); + if (!qemu_set_blocking(fd, false, errp)) { return -1; } dest_addr = NULL; diff --git a/net/l2tpv3.c b/net/l2tpv3.c index b5547cb917af9..cdfc641aa6fe9 100644 --- a/net/l2tpv3.c +++ b/net/l2tpv3.c @@ -648,6 +648,9 @@ int net_init_l2tpv3(const Netdev *netdev, error_setg(errp, "could not bind socket err=%i", errno); goto outerr; } + if (!qemu_set_blocking(fd, false, errp)) { + goto outerr; + } freeaddrinfo(result); @@ -709,8 +712,6 @@ int net_init_l2tpv3(const Netdev *netdev, s->vec = g_new(struct iovec, MAX_L2TPV3_IOVCNT); s->header_buf = g_malloc(s->header_size); - qemu_socket_set_nonblock(fd); - s->fd = fd; s->counter = 0; diff --git a/net/net.c b/net/net.c index da275db86ecf7..27e0d27807166 100644 --- a/net/net.c +++ b/net/net.c @@ -522,6 +522,15 @@ bool qemu_has_uso(NetClientState *nc) return nc->info->has_uso(nc); } +bool qemu_has_tunnel(NetClientState *nc) +{ + if (!nc || !nc->info->has_tunnel) { + return false; + } + + return nc->info->has_tunnel(nc); +} + bool qemu_has_vnet_hdr(NetClientState *nc) { if (!nc || !nc->info->has_vnet_hdr) { @@ -540,14 +549,13 @@ bool qemu_has_vnet_hdr_len(NetClientState *nc, int len) return nc->info->has_vnet_hdr_len(nc, len); } -void qemu_set_offload(NetClientState *nc, int csum, int tso4, int tso6, - int ecn, int ufo, int uso4, int uso6) +void qemu_set_offload(NetClientState *nc, const NetOffloads *ol) { if (!nc || !nc->info->set_offload) { return; } - nc->info->set_offload(nc, csum, tso4, tso6, ecn, ufo, uso4, uso6); + nc->info->set_offload(nc, ol); } int qemu_get_vnet_hdr_len(NetClientState *nc) @@ -567,7 +575,8 @@ void qemu_set_vnet_hdr_len(NetClientState *nc, int len) assert(len == sizeof(struct virtio_net_hdr_mrg_rxbuf) || len == sizeof(struct virtio_net_hdr) || - len == sizeof(struct virtio_net_hdr_v1_hash)); + len == sizeof(struct virtio_net_hdr_v1_hash) || + len == sizeof(struct virtio_net_hdr_v1_hash_tunnel)); nc->vnet_hdr_len = len; nc->info->set_vnet_hdr_len(nc, len); diff --git a/net/netmap.c b/net/netmap.c index 297510e19088e..6cd8f2bdc5f30 100644 --- a/net/netmap.c +++ b/net/netmap.c @@ -366,8 +366,7 @@ static void netmap_set_vnet_hdr_len(NetClientState *nc, int len) } } -static void netmap_set_offload(NetClientState *nc, int csum, int tso4, int tso6, - int ecn, int ufo, int uso4, int uso6) +static void netmap_set_offload(NetClientState *nc, const NetOffloads *ol) { NetmapState *s = DO_UPCAST(NetmapState, nc, nc); diff --git a/net/passt.c b/net/passt.c index 6f616ba3c2583..32ecffb763b48 100644 --- a/net/passt.c +++ b/net/passt.c @@ -103,7 +103,10 @@ static void net_passt_cleanup(NetClientState *nc) #endif kill(s->pid, SIGTERM); - g_remove(s->pidfile); + if (g_remove(s->pidfile) != 0) { + warn_report("Failed to remove passt pidfile %s: %s", + s->pidfile, strerror(errno)); + } g_free(s->pidfile); g_ptr_array_free(s->args, TRUE); } @@ -121,7 +124,7 @@ static gboolean net_passt_send(QIOChannel *ioc, GIOCondition condition, { if (net_stream_data_send(ioc, condition, data) == G_SOURCE_REMOVE) { NetPasstState *s = DO_UPCAST(NetPasstState, data, data); - Error *error; + Error *error = NULL; /* we need to restart passt */ kill(s->pid, SIGTERM); @@ -375,7 +378,8 @@ static int passt_vhost_user_start(NetPasstState *s, VhostUserState *be) net = vhost_net_init(&options); if (!net) { error_report("failed to init passt vhost_net"); - goto err; + passt_vhost_user_stop(s); + return -1; } if (s->vhost_net) { @@ -385,19 +389,11 @@ static int passt_vhost_user_start(NetPasstState *s, VhostUserState *be) s->vhost_net = net; return 0; -err: - if (net) { - vhost_net_cleanup(net); - g_free(net); - } - passt_vhost_user_stop(s); - return -1; } static void passt_vhost_user_event(void *opaque, QEMUChrEvent event) { NetPasstState *s = opaque; - Error *err = NULL; switch (event) { case CHR_EVENT_OPENED: @@ -428,10 +424,6 @@ static void passt_vhost_user_event(void *opaque, QEMUChrEvent event) /* Ignore */ break; } - - if (err) { - error_report_err(err); - } } static int net_passt_vhost_user_init(NetPasstState *s, Error **errp) diff --git a/net/slirp.c b/net/slirp.c index 9657e86a8415b..c627a9dd24b99 100644 --- a/net/slirp.c +++ b/net/slirp.c @@ -258,11 +258,13 @@ static void net_slirp_register_poll_sock(slirp_os_socket fd, void *opaque) { #ifdef WIN32 AioContext *ctxt = qemu_get_aio_context(); + g_autofree char *msg = NULL; if (WSAEventSelect(fd, event_notifier_get_handle(&ctxt->notifier), FD_READ | FD_ACCEPT | FD_CLOSE | FD_CONNECT | FD_WRITE | FD_OOB) != 0) { - error_setg_win32(&error_warn, WSAGetLastError(), "failed to WSAEventSelect()"); + msg = g_win32_error_message(WSAGetLastError()); + warn_report("failed to WSAEventSelect(): %s", msg); } #endif } @@ -270,8 +272,11 @@ static void net_slirp_register_poll_sock(slirp_os_socket fd, void *opaque) static void net_slirp_unregister_poll_sock(slirp_os_socket fd, void *opaque) { #ifdef WIN32 + g_autofree char *msg = NULL; + if (WSAEventSelect(fd, NULL, 0) != 0) { - error_setg_win32(&error_warn, WSAGetLastError(), "failed to WSAEventSelect()"); + msg = g_win32_error_message(WSAGetLastError()); + warn_report("failed to WSAEventSelect(): %s", msg); } #endif } @@ -727,6 +732,7 @@ static SlirpState *slirp_lookup(Monitor *mon, const char *id) void hmp_hostfwd_remove(Monitor *mon, const QDict *qdict) { + /* TODO: support removing unix fwd */ struct sockaddr_in host_addr = { .sin_family = AF_INET, .sin_addr = { @@ -795,12 +801,13 @@ void hmp_hostfwd_remove(Monitor *mon, const QDict *qdict) static int slirp_hostfwd(SlirpState *s, const char *redir_str, Error **errp) { - struct sockaddr_in host_addr = { - .sin_family = AF_INET, - .sin_addr = { - .s_addr = INADDR_ANY, - }, - }; + union { + struct sockaddr_in in; +#if !defined(WIN32) && SLIRP_CHECK_VERSION(4, 7, 0) + struct sockaddr_un un; +#endif + } host_addr = {0}; + struct sockaddr_in guest_addr = { .sin_family = AF_INET, .sin_addr = { @@ -811,9 +818,13 @@ static int slirp_hostfwd(SlirpState *s, const char *redir_str, Error **errp) int host_port, guest_port; const char *p; char buf[256]; - int is_udp; + int is_udp = 0; +#if !defined(WIN32) && SLIRP_CHECK_VERSION(4, 7, 0) + int is_unix = 0; +#endif const char *end; const char *fail_reason = "Unknown reason"; + socklen_t host_addr_size; p = redir_str; if (!p || get_str_sep(buf, sizeof(buf), &p, ':') < 0) { @@ -824,30 +835,83 @@ static int slirp_hostfwd(SlirpState *s, const char *redir_str, Error **errp) is_udp = 0; } else if (!strcmp(buf, "udp")) { is_udp = 1; - } else { - fail_reason = "Bad protocol name"; - goto fail_syntax; } - - if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) { - fail_reason = "Missing : separator"; - goto fail_syntax; +#if !defined(WIN32) && SLIRP_CHECK_VERSION(4, 7, 0) + else if (!strcmp(buf, "unix")) { + is_unix = 1; } - if (buf[0] != '\0' && !inet_aton(buf, &host_addr.sin_addr)) { - fail_reason = "Bad host address"; +#endif + else { + fail_reason = "Bad protocol name"; goto fail_syntax; } - if (get_str_sep(buf, sizeof(buf), &p, '-') < 0) { - fail_reason = "Bad host port separator"; - goto fail_syntax; - } - err = qemu_strtoi(buf, &end, 0, &host_port); - if (err || host_port < 0 || host_port > 65535) { - fail_reason = "Bad host port"; - goto fail_syntax; +#if !defined(WIN32) && SLIRP_CHECK_VERSION(4, 7, 0) + if (is_unix) { + if (get_str_sep(buf, sizeof(buf), &p, '-') < 0) { + fail_reason = "Missing - separator"; + goto fail_syntax; + } + if (buf[0] == '\0') { + fail_reason = "Missing unix socket path"; + goto fail_syntax; + } + if (buf[0] != '/') { + fail_reason = "unix socket path must be absolute"; + goto fail_syntax; + } + + size_t path_len = strlen(buf); + if (path_len > sizeof(host_addr.un.sun_path) - 1) { + fail_reason = "Unix socket path is too long"; + goto fail_syntax; + } + + struct stat st; + if (stat(buf, &st) == 0) { + if (!S_ISSOCK(st.st_mode)) { + fail_reason = "file exists and it's not unix socket"; + goto fail_syntax; + } + + if (unlink(buf) < 0) { + error_setg_errno(errp, errno, "Failed to unlink '%s'", buf); + goto fail_syntax; + } + } + host_addr.un.sun_family = AF_UNIX; + memcpy(host_addr.un.sun_path, buf, path_len); + host_addr_size = sizeof(host_addr.un); + } else +#endif + { + host_addr.in.sin_family = AF_INET; + host_addr.in.sin_addr.s_addr = INADDR_ANY; + + if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) { + fail_reason = "Missing : separator"; + goto fail_syntax; + } + + if (buf[0] != '\0' && !inet_aton(buf, &host_addr.in.sin_addr)) { + fail_reason = "Bad host address"; + goto fail_syntax; + } + + if (get_str_sep(buf, sizeof(buf), &p, '-') < 0) { + fail_reason = "Bad host port separator"; + goto fail_syntax; + } + + err = qemu_strtoi(buf, &end, 0, &host_port); + if (err || host_port < 0 || host_port > 65535) { + fail_reason = "Bad host port"; + goto fail_syntax; + } + + host_addr.in.sin_port = htons(host_port); + host_addr_size = sizeof(host_addr.in); } - host_addr.sin_port = htons(host_port); if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) { fail_reason = "Missing guest address"; @@ -867,12 +931,13 @@ static int slirp_hostfwd(SlirpState *s, const char *redir_str, Error **errp) #if SLIRP_CHECK_VERSION(4, 5, 0) err = slirp_add_hostxfwd(s->slirp, - (struct sockaddr *) &host_addr, sizeof(host_addr), + (struct sockaddr *) &host_addr, host_addr_size, (struct sockaddr *) &guest_addr, sizeof(guest_addr), is_udp ? SLIRP_HOSTFWD_UDP : 0); #else + (void) host_addr_size; err = slirp_add_hostfwd(s->slirp, is_udp, - host_addr.sin_addr, host_port, + host_addr.in.sin_addr, host_port, guest_addr.sin_addr, guest_port); #endif diff --git a/net/socket.c b/net/socket.c index 784dda686f520..1ad03fc9d4558 100644 --- a/net/socket.c +++ b/net/socket.c @@ -295,7 +295,10 @@ static int net_socket_mcast_create(struct sockaddr_in *mcastaddr, } } - qemu_socket_set_nonblock(fd); + if (!qemu_set_blocking(fd, false, errp)) { + goto fail; + } + return fd; fail: if (fd >= 0) @@ -508,7 +511,10 @@ static int net_socket_listen_init(NetClientState *peer, error_setg_errno(errp, errno, "can't create stream socket"); return -1; } - qemu_socket_set_nonblock(fd); + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); + return -1; + } socket_set_fast_reuse(fd); @@ -556,7 +562,10 @@ static int net_socket_connect_init(NetClientState *peer, error_setg_errno(errp, errno, "can't create stream socket"); return -1; } - qemu_socket_set_nonblock(fd); + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); + return -1; + } connected = 0; for(;;) { @@ -671,7 +680,10 @@ static int net_socket_udp_init(NetClientState *peer, close(fd); return -1; } - qemu_socket_set_nonblock(fd); + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); + return -1; + } s = net_socket_fd_init_dgram(peer, model, name, fd, 0, NULL, errp); if (!s) { @@ -706,7 +718,7 @@ int net_init_socket(const Netdev *netdev, const char *name, } if (sock->fd) { - int fd, ret, so_type; + int fd, so_type; fd = monitor_fd_param(monitor_cur(), sock->fd, errp); if (fd == -1) { @@ -716,10 +728,7 @@ int net_init_socket(const Netdev *netdev, const char *name, if (so_type < 0) { return -1; } - ret = qemu_socket_try_set_nonblock(fd); - if (ret < 0) { - error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", - name, fd); + if (!qemu_set_blocking(fd, false, errp)) { return -1; } switch (so_type) { diff --git a/net/stream.c b/net/stream.c index d893f02cabe3d..94f823a2a7c2b 100644 --- a/net/stream.c +++ b/net/stream.c @@ -138,7 +138,6 @@ static void net_stream_server_listening(QIOTask *task, gpointer opaque) NetStreamData *d = opaque; QIOChannelSocket *listen_sioc = QIO_CHANNEL_SOCKET(d->listen_ioc); SocketAddress *addr; - int ret; Error *err = NULL; if (qio_task_propagate_error(task, &err)) { @@ -149,13 +148,11 @@ static void net_stream_server_listening(QIOTask *task, gpointer opaque) addr = qio_channel_socket_get_local_address(listen_sioc, NULL); g_assert(addr != NULL); - ret = qemu_socket_try_set_nonblock(listen_sioc->fd); - if (addr->type == SOCKET_ADDRESS_TYPE_FD && ret < 0) { - qemu_set_info_str(&d->nc, "can't use file descriptor %s (errno %d)", - addr->u.fd.str, -ret); + if (!qemu_set_blocking(listen_sioc->fd, false, &err)) { + qemu_set_info_str(&d->nc, "error: %s", error_get_pretty(err)); + error_free(err); return; } - g_assert(ret == 0); qapi_free_SocketAddress(addr); d->nc.link_down = true; diff --git a/net/stream_data.c b/net/stream_data.c index 5af27e0d1d6ad..03740e9f73e72 100644 --- a/net/stream_data.c +++ b/net/stream_data.c @@ -12,6 +12,7 @@ #include "net/net.h" #include "io/channel.h" #include "io/net-listener.h" +#include "qemu/sockets.h" #include "stream_data.h" @@ -154,7 +155,6 @@ int net_stream_data_client_connected(QIOTask *task, NetStreamData *d) { QIOChannelSocket *sioc = QIO_CHANNEL_SOCKET(d->ioc); SocketAddress *addr; - int ret; Error *err = NULL; if (qio_task_propagate_error(task, &err)) { @@ -166,14 +166,12 @@ int net_stream_data_client_connected(QIOTask *task, NetStreamData *d) addr = qio_channel_socket_get_remote_address(sioc, NULL); g_assert(addr != NULL); - ret = qemu_socket_try_set_nonblock(sioc->fd); - if (addr->type == SOCKET_ADDRESS_TYPE_FD && ret < 0) { - qemu_set_info_str(&d->nc, "can't use file descriptor %s (errno %d)", - addr->u.fd.str, -ret); + if (!qemu_set_blocking(sioc->fd, false, &err)) { + qemu_set_info_str(&d->nc, "error: %s", error_get_pretty(err)); + error_free(err); qapi_free_SocketAddress(addr); goto error; } - g_assert(ret == 0); qapi_free_SocketAddress(addr); net_socket_rs_init(&d->rs, net_stream_data_rs_finalize, false); diff --git a/net/tap-bsd.c b/net/tap-bsd.c index b4c84441ba8b7..bbf84d1828d34 100644 --- a/net/tap-bsd.c +++ b/net/tap-bsd.c @@ -98,7 +98,12 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, return -1; } } - g_unix_set_fd_nonblocking(fd, true, NULL); + + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); + return -1; + } + return fd; } @@ -189,7 +194,10 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, goto error; } - g_unix_set_fd_nonblocking(fd, true, NULL); + if (!qemu_set_blocking(fd, false, errp)) { + goto error; + } + return fd; error: @@ -217,6 +225,11 @@ int tap_probe_has_uso(int fd) return 0; } +bool tap_probe_has_tunnel(int fd) +{ + return false; +} + void tap_fd_set_vnet_hdr_len(int fd, int len) { } @@ -231,8 +244,7 @@ int tap_fd_set_vnet_be(int fd, int is_be) return -EINVAL; } -void tap_fd_set_offload(int fd, int csum, int tso4, - int tso6, int ecn, int ufo, int uso4, int uso6) +void tap_fd_set_offload(int fd, const NetOffloads *ol) { } diff --git a/net/tap-linux.c b/net/tap-linux.c index 22ec2f45d2b7a..2a90b584678e9 100644 --- a/net/tap-linux.c +++ b/net/tap-linux.c @@ -124,7 +124,12 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, return -1; } pstrcpy(ifname, ifname_size, ifr.ifr_name); - g_unix_set_fd_nonblocking(fd, true, NULL); + + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); + return -1; + } + return fd; } @@ -196,6 +201,17 @@ int tap_probe_has_uso(int fd) return 1; } +bool tap_probe_has_tunnel(int fd) +{ + unsigned offload; + + offload = TUN_F_CSUM | TUN_F_TSO4 | TUN_F_UDP_TUNNEL_GSO; + if (ioctl(fd, TUNSETOFFLOAD, offload) < 0) { + return false; + } + return true; +} + void tap_fd_set_vnet_hdr_len(int fd, int len) { if (ioctl(fd, TUNSETVNETHDRSZ, &len) == -1) { @@ -239,8 +255,7 @@ int tap_fd_set_vnet_be(int fd, int is_be) abort(); } -void tap_fd_set_offload(int fd, int csum, int tso4, - int tso6, int ecn, int ufo, int uso4, int uso6) +void tap_fd_set_offload(int fd, const NetOffloads *ol) { unsigned int offload = 0; @@ -249,22 +264,32 @@ void tap_fd_set_offload(int fd, int csum, int tso4, return; } - if (csum) { + if (ol->csum) { offload |= TUN_F_CSUM; - if (tso4) + if (ol->tso4) { offload |= TUN_F_TSO4; - if (tso6) + } + if (ol->tso6) { offload |= TUN_F_TSO6; - if ((tso4 || tso6) && ecn) + } + if ((ol->tso4 || ol->tso6) && ol->ecn) { offload |= TUN_F_TSO_ECN; - if (ufo) + } + if (ol->ufo) { offload |= TUN_F_UFO; - if (uso4) { + } + if (ol->uso4) { offload |= TUN_F_USO4; } - if (uso6) { + if (ol->uso6) { offload |= TUN_F_USO6; } + if (ol->tnl) { + offload |= TUN_F_UDP_TUNNEL_GSO; + } + if (ol->tnl_csum) { + offload |= TUN_F_UDP_TUNNEL_GSO_CSUM; + } } if (ioctl(fd, TUNSETOFFLOAD, offload) != 0) { diff --git a/net/tap-linux.h b/net/tap-linux.h index 9a58cecb7f479..8cd6b5874b988 100644 --- a/net/tap-linux.h +++ b/net/tap-linux.h @@ -53,4 +53,13 @@ #define TUN_F_USO4 0x20 /* I can handle USO for IPv4 packets */ #define TUN_F_USO6 0x40 /* I can handle USO for IPv6 packets */ +/* I can handle TSO/USO for UDP tunneled packets */ +#define TUN_F_UDP_TUNNEL_GSO 0x080 + +/* + * I can handle TSO/USO for UDP tunneled packets requiring csum offload for + * the outer header + */ +#define TUN_F_UDP_TUNNEL_GSO_CSUM 0x100 + #endif /* QEMU_TAP_LINUX_H */ diff --git a/net/tap-solaris.c b/net/tap-solaris.c index 51b7830bef1d4..75397e6c5447b 100644 --- a/net/tap-solaris.c +++ b/net/tap-solaris.c @@ -27,6 +27,7 @@ #include "tap_int.h" #include "qemu/ctype.h" #include "qemu/cutils.h" +#include "net/net.h" #include #include @@ -198,7 +199,12 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, return -1; } } - g_unix_set_fd_nonblocking(fd, true, NULL); + + if (!qemu_set_blocking(fd, false, errp)) { + close(fd); + return -1; + } + return fd; } @@ -221,6 +227,11 @@ int tap_probe_has_uso(int fd) return 0; } +bool tap_probe_has_tunnel(int fd) +{ + return false; +} + void tap_fd_set_vnet_hdr_len(int fd, int len) { } @@ -235,8 +246,7 @@ int tap_fd_set_vnet_be(int fd, int is_be) return -EINVAL; } -void tap_fd_set_offload(int fd, int csum, int tso4, - int tso6, int ecn, int ufo, int uso4, int uso6) +void tap_fd_set_offload(int fd, const NetOffloads *ol) { } diff --git a/net/tap-stub.c b/net/tap-stub.c index 38673434cbd60..f7a5e0c1632e4 100644 --- a/net/tap-stub.c +++ b/net/tap-stub.c @@ -52,6 +52,11 @@ int tap_probe_has_uso(int fd) return 0; } +bool tap_probe_has_tunnel(int fd) +{ + return false; +} + void tap_fd_set_vnet_hdr_len(int fd, int len) { } @@ -66,8 +71,7 @@ int tap_fd_set_vnet_be(int fd, int is_be) return -EINVAL; } -void tap_fd_set_offload(int fd, int csum, int tso4, - int tso6, int ecn, int ufo, int uso4, int uso6) +void tap_fd_set_offload(int fd, const NetOffloads *ol) { } diff --git a/net/tap.c b/net/tap.c index 23536c09b4652..abe3b2d0369b4 100644 --- a/net/tap.c +++ b/net/tap.c @@ -62,6 +62,8 @@ static const int kernel_feature_bits[] = { VIRTIO_F_NOTIFICATION_DATA, VIRTIO_NET_F_RSC_EXT, VIRTIO_NET_F_HASH_REPORT, + VIRTIO_NET_F_GUEST_UDP_TUNNEL_GSO, + VIRTIO_NET_F_HOST_UDP_TUNNEL_GSO, VHOST_INVALID_FEATURE_BIT }; @@ -76,6 +78,7 @@ typedef struct TAPState { bool using_vnet_hdr; bool has_ufo; bool has_uso; + bool has_tunnel; bool enabled; VHostNetState *vhost_net; unsigned host_vnet_hdr_len; @@ -190,6 +193,11 @@ static void tap_send(void *opaque) break; } + if (s->host_vnet_hdr_len && size <= s->host_vnet_hdr_len) { + /* Invalid packet */ + break; + } + if (s->host_vnet_hdr_len && !s->using_vnet_hdr) { buf += s->host_vnet_hdr_len; size -= s->host_vnet_hdr_len; @@ -241,6 +249,14 @@ static bool tap_has_uso(NetClientState *nc) return s->has_uso; } +static bool tap_has_tunnel(NetClientState *nc) +{ + TAPState *s = DO_UPCAST(TAPState, nc, nc); + + assert(nc->info->type == NET_CLIENT_DRIVER_TAP); + return s->has_tunnel; +} + static bool tap_has_vnet_hdr(NetClientState *nc) { TAPState *s = DO_UPCAST(TAPState, nc, nc); @@ -280,15 +296,14 @@ static int tap_set_vnet_be(NetClientState *nc, bool is_be) return tap_fd_set_vnet_be(s->fd, is_be); } -static void tap_set_offload(NetClientState *nc, int csum, int tso4, - int tso6, int ecn, int ufo, int uso4, int uso6) +static void tap_set_offload(NetClientState *nc, const NetOffloads *ol) { TAPState *s = DO_UPCAST(TAPState, nc, nc); if (s->fd < 0) { return; } - tap_fd_set_offload(s->fd, csum, tso4, tso6, ecn, ufo, uso4, uso6); + tap_fd_set_offload(s->fd, ol); } static void tap_exit_notify(Notifier *notifier, void *data) @@ -370,6 +385,7 @@ static NetClientInfo net_tap_info = { .cleanup = tap_cleanup, .has_ufo = tap_has_ufo, .has_uso = tap_has_uso, + .has_tunnel = tap_has_tunnel, .has_vnet_hdr = tap_has_vnet_hdr, .has_vnet_hdr_len = tap_has_vnet_hdr_len, .set_offload = tap_set_offload, @@ -386,6 +402,7 @@ static TAPState *net_tap_fd_init(NetClientState *peer, int fd, int vnet_hdr) { + NetOffloads ol = {}; NetClientState *nc; TAPState *s; @@ -398,8 +415,9 @@ static TAPState *net_tap_fd_init(NetClientState *peer, s->using_vnet_hdr = false; s->has_ufo = tap_probe_has_ufo(s->fd); s->has_uso = tap_probe_has_uso(s->fd); + s->has_tunnel = tap_probe_has_tunnel(s->fd); s->enabled = true; - tap_set_offload(&s->nc, 0, 0, 0, 0, 0, 0, 0); + tap_set_offload(&s->nc, &ol); /* * Make sure host header length is set correctly in tap: * it might have been modified by another instance of qemu. @@ -622,8 +640,7 @@ int net_init_bridge(const Netdev *netdev, const char *name, return -1; } - if (!g_unix_set_fd_nonblocking(fd, true, NULL)) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (!qemu_set_blocking(fd, false, errp)) { return -1; } vnet_hdr = tap_probe_vnet_hdr(fd, errp); @@ -724,9 +741,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, error_propagate(errp, err); goto failed; } - if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { - error_setg_errno(errp, errno, "%s: Can't use file descriptor %d", - name, fd); + if (!qemu_set_blocking(vhostfd, false, errp)) { goto failed; } } else { @@ -736,8 +751,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, "tap: open vhost char device failed"); goto failed; } - if (!g_unix_set_fd_nonblocking(vhostfd, true, NULL)) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (!qemu_set_blocking(vhostfd, false, errp)) { goto failed; } } @@ -834,9 +848,7 @@ int net_init_tap(const Netdev *netdev, const char *name, return -1; } - if (!g_unix_set_fd_nonblocking(fd, true, NULL)) { - error_setg_errno(errp, errno, "%s: Can't use file descriptor %d", - name, fd); + if (!qemu_set_blocking(fd, false, errp)) { close(fd); return -1; } @@ -890,10 +902,8 @@ int net_init_tap(const Netdev *netdev, const char *name, goto free_fail; } - ret = g_unix_set_fd_nonblocking(fd, true, NULL); - if (!ret) { - error_setg_errno(errp, errno, "%s: Can't use file descriptor %d", - name, fd); + if (!qemu_set_blocking(fd, false, errp)) { + ret = -1; goto free_fail; } @@ -946,8 +956,7 @@ int net_init_tap(const Netdev *netdev, const char *name, return -1; } - if (!g_unix_set_fd_nonblocking(fd, true, NULL)) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (!qemu_set_blocking(fd, false, errp)) { return -1; } vnet_hdr = tap_probe_vnet_hdr(fd, errp); diff --git a/net/tap_int.h b/net/tap_int.h index 8857ff299d224..b76a05044bc5a 100644 --- a/net/tap_int.h +++ b/net/tap_int.h @@ -27,6 +27,7 @@ #define NET_TAP_INT_H #include "qapi/qapi-types-net.h" +#include "net/net.h" int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int vnet_hdr_required, int mq_required, Error **errp); @@ -37,8 +38,8 @@ void tap_set_sndbuf(int fd, const NetdevTapOptions *tap, Error **errp); int tap_probe_vnet_hdr(int fd, Error **errp); int tap_probe_has_ufo(int fd); int tap_probe_has_uso(int fd); -void tap_fd_set_offload(int fd, int csum, int tso4, int tso6, int ecn, int ufo, - int uso4, int uso6); +bool tap_probe_has_tunnel(int fd); +void tap_fd_set_offload(int fd, const NetOffloads *ol); void tap_fd_set_vnet_hdr_len(int fd, int len); int tap_fd_set_vnet_le(int fd, int vnet_is_le); int tap_fd_set_vnet_be(int fd, int vnet_is_be); diff --git a/net/vhost-user.c b/net/vhost-user.c index 1c3b8b36f3518..8b96157145a7a 100644 --- a/net/vhost-user.c +++ b/net/vhost-user.c @@ -298,7 +298,6 @@ static void chr_closed_bh(void *opaque) const char *name = opaque; NetClientState *ncs[MAX_QUEUE_NUM]; NetVhostUserState *s; - Error *err = NULL; int queues, i; queues = qemu_find_net_clients_except(name, ncs, @@ -317,9 +316,6 @@ static void chr_closed_bh(void *opaque) qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, net_vhost_user_event, NULL, opaque, NULL, true); - if (err) { - error_report_err(err); - } qapi_event_send_netdev_vhost_user_disconnected(name); } @@ -329,7 +325,6 @@ static void net_vhost_user_event(void *opaque, QEMUChrEvent event) NetClientState *ncs[MAX_QUEUE_NUM]; NetVhostUserState *s; Chardev *chr; - Error *err = NULL; int queues; queues = qemu_find_net_clients_except(name, ncs, @@ -375,10 +370,6 @@ static void net_vhost_user_event(void *opaque, QEMUChrEvent event) /* Ignore */ break; } - - if (err) { - error_report_err(err); - } } static int net_vhost_user_init(NetClientState *peer, const char *device, diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 6a30a44d2bc80..74d26a9497257 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -1840,9 +1840,8 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, &has_cvq, errp); - if (queue_pairs < 0) { - qemu_close(vdpa_device_fd); - return queue_pairs; + if (queue_pairs <= 0) { + goto err; } r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); diff --git a/page-target.c b/page-target.c deleted file mode 100644 index 8fcd5443b526b..0000000000000 --- a/page-target.c +++ /dev/null @@ -1,21 +0,0 @@ -/* - * QEMU page values getters (target independent) - * - * Copyright (c) 2003 Fabrice Bellard - * - * SPDX-License-Identifier: LGPL-2.1-or-later - */ - -#include "qemu/osdep.h" -#include "exec/target_page.h" - -/* Convert target pages to MiB (2**20). */ -size_t qemu_target_pages_to_MiB(size_t pages) -{ - int page_bits = TARGET_PAGE_BITS; - - /* So far, the largest (non-huge) page size is 64k, i.e. 16 bits. */ - g_assert(page_bits < 20); - - return pages >> (20 - page_bits); -} diff --git a/pc-bios/ast27x0_bootrom.bin b/pc-bios/ast27x0_bootrom.bin index 0b9b3a2360e37..a4c94d64da55a 100644 Binary files a/pc-bios/ast27x0_bootrom.bin and b/pc-bios/ast27x0_bootrom.bin differ diff --git a/pc-bios/multiboot_dma.bin b/pc-bios/multiboot_dma.bin index c0e2c3102a335..e6d0c97093b4f 100644 Binary files a/pc-bios/multiboot_dma.bin and b/pc-bios/multiboot_dma.bin differ diff --git a/pc-bios/npcm7xx_bootrom.bin b/pc-bios/npcm7xx_bootrom.bin index 903f126636f9e..92282892b70db 100644 Binary files a/pc-bios/npcm7xx_bootrom.bin and b/pc-bios/npcm7xx_bootrom.bin differ diff --git a/pc-bios/npcm8xx_bootrom.bin b/pc-bios/npcm8xx_bootrom.bin index 6370d6475635c..45fb40fb5987a 100644 Binary files a/pc-bios/npcm8xx_bootrom.bin and b/pc-bios/npcm8xx_bootrom.bin differ diff --git a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin index b2e740010b268..02be3a72a880b 100644 Binary files a/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv32-generic-fw_dynamic.bin differ diff --git a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin index 018b4731a710d..cce35c65c2981 100644 Binary files a/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin and b/pc-bios/opensbi-riscv64-generic-fw_dynamic.bin differ diff --git a/pc-bios/optionrom/multiboot.S b/pc-bios/optionrom/multiboot.S index 181a4b03a3f26..c95e35c9cb628 100644 --- a/pc-bios/optionrom/multiboot.S +++ b/pc-bios/optionrom/multiboot.S @@ -208,7 +208,7 @@ ljmp2: prot_jump: .long prot_mode .short 8 -.align 4, 0 +.align 8, 0 gdt: /* 0x00 */ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 diff --git a/plugins/core.c b/plugins/core.c index c6e9ef14784ae..ead09fd2f1eec 100644 --- a/plugins/core.c +++ b/plugins/core.c @@ -248,7 +248,7 @@ static void plugin_grow_scoreboards__locked(CPUState *cpu) } plugin.scoreboard_alloc_size = scoreboard_size; /* force all tb to be flushed, as scoreboard pointers were changed. */ - tb_flush(cpu); + tb_flush__exclusive_or_serial(); } end_exclusive(); } @@ -684,8 +684,6 @@ void qemu_plugin_user_exit(void) * with the one in fork_start(). That is: * - start_exclusive(), which acquires qemu_cpu_list_lock, * must be called before acquiring plugin.lock. - * - tb_flush(), which acquires mmap_lock(), must be called - * while plugin.lock is not held. */ start_exclusive(); @@ -705,7 +703,7 @@ void qemu_plugin_user_exit(void) } qemu_rec_mutex_unlock(&plugin.lock); - tb_flush(current_cpu); + tb_flush__exclusive_or_serial(); end_exclusive(); /* now it's safe to handle the exit case */ diff --git a/plugins/loader.c b/plugins/loader.c index 8f0d75c9049fe..ba10ebac99357 100644 --- a/plugins/loader.c +++ b/plugins/loader.c @@ -377,8 +377,7 @@ static void plugin_flush_destroy(CPUState *cpu, run_on_cpu_data arg) { struct qemu_plugin_reset_data *data = arg.host_ptr; - g_assert(cpu_in_exclusive_context(cpu)); - tb_flush(cpu); + tb_flush__exclusive_or_serial(); plugin_reset_destroy(data); } diff --git a/python/qemu/machine/README.rst b/python/qemu/machine/README.rst index 8de2c3d77222e..6554c69320137 100644 --- a/python/qemu/machine/README.rst +++ b/python/qemu/machine/README.rst @@ -2,7 +2,7 @@ qemu.machine package ==================== This package provides core utilities used for testing and debugging -QEMU. It is used by the iotests, vm tests, avocado tests, and several +QEMU. It is used by the iotests, vm tests, functional tests, and several other utilities in the ./scripts directory. It is not a fully-fledged SDK and it is subject to change at any time. diff --git a/python/qemu/machine/qtest.py b/python/qemu/machine/qtest.py index 4f5ede85b2372..781f674ffafd6 100644 --- a/python/qemu/machine/qtest.py +++ b/python/qemu/machine/qtest.py @@ -177,6 +177,8 @@ def _post_shutdown(self) -> None: self._qtest_sock_pair[0].close() self._qtest_sock_pair[1].close() self._qtest_sock_pair = None + if self._qtest is not None: + self._qtest.close() super()._post_shutdown() def qtest(self, cmd: str) -> str: diff --git a/python/qemu/qmp/__init__.py b/python/qemu/qmp/__init__.py index 69190d057a5b2..058139dc3cac5 100644 --- a/python/qemu/qmp/__init__.py +++ b/python/qemu/qmp/__init__.py @@ -39,7 +39,8 @@ logging.getLogger('qemu.qmp').addHandler(logging.NullHandler()) -# The order of these fields impact the Sphinx documentation order. +# IMPORTANT: When modifying this list, update the Sphinx overview docs. +# Anything visible in the qemu.qmp namespace should be on the overview page. __all__ = ( # Classes, most to least important 'QMPClient', diff --git a/python/qemu/qmp/error.py b/python/qemu/qmp/error.py index 24ba4d505410b..c87b078f620ad 100644 --- a/python/qemu/qmp/error.py +++ b/python/qemu/qmp/error.py @@ -44,7 +44,10 @@ class ProtocolError(QMPError): :param error_message: Human-readable string describing the error. """ - def __init__(self, error_message: str): - super().__init__(error_message) + def __init__(self, error_message: str, *args: object): + super().__init__(error_message, *args) #: Human-readable error message, without any prefix. self.error_message: str = error_message + + def __str__(self) -> str: + return self.error_message diff --git a/python/qemu/qmp/events.py b/python/qemu/qmp/events.py index 6199776cc6647..cfb5f0ac621f2 100644 --- a/python/qemu/qmp/events.py +++ b/python/qemu/qmp/events.py @@ -12,7 +12,14 @@ ---------------------- In all of the following examples, we assume that we have a `QMPClient` -instantiated named ``qmp`` that is already connected. +instantiated named ``qmp`` that is already connected. For example: + +.. code:: python + + from qemu.qmp import QMPClient + + qmp = QMPClient('example-vm') + await qmp.connect('127.0.0.1', 1234) `listener()` context blocks with one name @@ -87,7 +94,9 @@ event = listener.get() print(f"Event arrived: {event['event']}") -This event stream will never end, so these blocks will never terminate. +This event stream will never end, so these blocks will never +terminate. Even if the QMP connection errors out prematurely, this +listener will go silent without raising an error. Using asyncio.Task to concurrently retrieve events @@ -227,16 +236,20 @@ async def print_events(listener): .. code:: python await qmp.execute('stop') - qmp.events.clear() + discarded = qmp.events.clear() await qmp.execute('cont') event = await qmp.events.get() assert event['event'] == 'RESUME' + assert discarded[0]['event'] == 'STOP' `EventListener` objects are FIFO queues. If events are not consumed, they will remain in the queue until they are witnessed or discarded via `clear()`. FIFO queues will be drained automatically upon leaving a context block, or when calling `remove_listener()`. +Any events removed from the queue in this fashion will be returned by +the clear call. + Accessing listener history ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -350,6 +363,12 @@ def filter(event: Message) -> bool: break +Note that in the above example, we explicitly wait on jobA to conclude +first, and then wait for jobB to do the same. All we have guaranteed is +that the code that waits for jobA will not accidentally consume the +event intended for the jobB waiter. + + Extending the `EventListener` class ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -407,13 +426,13 @@ def accept(self, event) -> bool: These interfaces are not ones I am sure I will keep or otherwise modify heavily. -qmp.listener()’s type signature +qmp.listen()’s type signature ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -`listener()` does not return anything, because it was assumed the caller +`listen()` does not return anything, because it was assumed the caller already had a handle to the listener. However, for -``qmp.listener(EventListener())`` forms, the caller will not have saved -a handle to the listener. +``qmp.listen(EventListener())`` forms, the caller will not have saved a +handle to the listener. Because this function can accept *many* listeners, I found it hard to accurately type in a way where it could be used in both “one” or “many” @@ -497,6 +516,21 @@ def __init__( #: Optional, secondary event filter. self.event_filter: Optional[EventFilter] = event_filter + def __repr__(self) -> str: + args: List[str] = [] + if self.names: + args.append(f"names={self.names!r}") + if self.event_filter: + args.append(f"event_filter={self.event_filter!r}") + + if self._queue.qsize(): + state = f"" + else: + state = '' + + argstr = ", ".join(args) + return f"{type(self).__name__}{state}({argstr})" + @property def history(self) -> Tuple[Message, ...]: """ @@ -618,7 +652,7 @@ class Events: def __init__(self) -> None: self._listeners: List[EventListener] = [] - #: Default, all-events `EventListener`. + #: Default, all-events `EventListener`. See `qmp.events` for more info. self.events: EventListener = EventListener() self.register_listener(self.events) diff --git a/python/qemu/qmp/legacy.py b/python/qemu/qmp/legacy.py index 22a2b5616efa5..060ed0eb9d453 100644 --- a/python/qemu/qmp/legacy.py +++ b/python/qemu/qmp/legacy.py @@ -38,6 +38,7 @@ from .error import QMPError from .protocol import Runstate, SocketAddrT from .qmp_client import QMPClient +from .util import get_or_create_event_loop #: QMPMessage is an entire QMP message of any kind. @@ -86,10 +87,13 @@ def __init__(self, "server argument should be False when passing a socket") self._qmp = QMPClient(nickname) - self._aloop = asyncio.get_event_loop() self._address = address self._timeout: Optional[float] = None + # This is a sync shim intended for use in fully synchronous + # programs. Create and set an event loop if necessary. + self._aloop = get_or_create_event_loop() + if server: assert not isinstance(self._address, socket.socket) self._sync(self._qmp.start_server(self._address)) @@ -231,6 +235,9 @@ def pull_event(self, :return: The first available QMP event, or None. """ + # Kick the event loop to allow events to accumulate + self._sync(asyncio.sleep(0)) + if not wait: # wait is False/0: "do not wait, do not except." if self._qmp.events.empty(): @@ -286,8 +293,8 @@ def settimeout(self, timeout: Optional[float]) -> None: """ Set the timeout for QMP RPC execution. - This timeout affects the `cmd`, `cmd_obj`, and `command` methods. - The `accept`, `pull_event` and `get_event` methods have their + This timeout affects the `cmd`, `cmd_obj`, and `cmd_raw` methods. + The `accept`, `pull_event` and `get_events` methods have their own configurable timeouts. :param timeout: @@ -303,17 +310,30 @@ def send_fd_scm(self, fd: int) -> None: self._qmp.send_fd_scm(fd) def __del__(self) -> None: - if self._qmp.runstate == Runstate.IDLE: - return + if self._qmp.runstate != Runstate.IDLE: + self._qmp.logger.warning( + "QEMUMonitorProtocol object garbage collected without a prior " + "call to close()" + ) if not self._aloop.is_running(): - self.close() - else: - # Garbage collection ran while the event loop was running. - # Nothing we can do about it now, but if we don't raise our - # own error, the user will be treated to a lot of traceback - # they might not understand. + if self._qmp.runstate != Runstate.IDLE: + # If the user neglected to close the QMP session and we + # are not currently running in an asyncio context, we + # have the opportunity to close the QMP session. If we + # do not do this, the error messages presented over + # dangling async resources may not make any sense to the + # user. + self.close() + + if self._qmp.runstate != Runstate.IDLE: + # If QMP is still not quiesced, it means that the garbage + # collector ran from a context within the event loop and we + # are simply too late to take any corrective action. Raise + # our own error to give meaningful feedback to the user in + # order to prevent pages of asyncio stacktrace jargon. raise QMPError( - "QEMUMonitorProtocol.close()" - " was not called before object was garbage collected" + "QEMUMonitorProtocol.close() was not called before object was " + "garbage collected, and could not be closed due to GC running " + "in the event loop" ) diff --git a/python/qemu/qmp/message.py b/python/qemu/qmp/message.py index f76ccc9074670..dabb8ec360eef 100644 --- a/python/qemu/qmp/message.py +++ b/python/qemu/qmp/message.py @@ -28,7 +28,8 @@ class Message(MutableMapping[str, object]): be instantiated from either another mapping (like a `dict`), or from raw `bytes` that still need to be deserialized. - Once instantiated, it may be treated like any other MutableMapping:: + Once instantiated, it may be treated like any other + :py:obj:`~collections.abc.MutableMapping`:: >>> msg = Message(b'{"hello": "world"}') >>> assert msg['hello'] == 'world' @@ -50,12 +51,19 @@ class Message(MutableMapping[str, object]): >>> dict(msg) {'hello': 'world'} + Or pretty-printed:: + + >>> print(str(msg)) + { + "hello": "world" + } :param value: Initial value, if any. :param eager: When `True`, attempt to serialize or deserialize the initial value immediately, so that conversion exceptions are raised during the call to ``__init__()``. + """ # pylint: disable=too-many-ancestors @@ -178,15 +186,15 @@ class DeserializationError(ProtocolError): :param raw: The raw `bytes` that prompted the failure. """ def __init__(self, error_message: str, raw: bytes): - super().__init__(error_message) + super().__init__(error_message, raw) #: The raw `bytes` that were not understood as JSON. self.raw: bytes = raw def __str__(self) -> str: - return "\n".join([ + return "\n".join(( super().__str__(), f" raw bytes were: {str(self.raw)}", - ]) + )) class UnexpectedTypeError(ProtocolError): @@ -197,13 +205,13 @@ class UnexpectedTypeError(ProtocolError): :param value: The deserialized JSON value that wasn't an object. """ def __init__(self, error_message: str, value: object): - super().__init__(error_message) + super().__init__(error_message, value) #: The JSON value that was expected to be an object. self.value: object = value def __str__(self) -> str: strval = json.dumps(self.value, indent=2) - return "\n".join([ + return "\n".join(( super().__str__(), f" json value was: {strval}", - ]) + )) diff --git a/python/qemu/qmp/models.py b/python/qemu/qmp/models.py index da52848d5a737..7e0d0baf0386f 100644 --- a/python/qemu/qmp/models.py +++ b/python/qemu/qmp/models.py @@ -54,7 +54,7 @@ def __repr__(self) -> str: class Greeting(Model): """ - Defined in qmp-spec.rst, section "Server Greeting". + Defined in `interop/qmp-spec`, "Server Greeting" section. :param raw: The raw Greeting object. :raise KeyError: If any required fields are absent. @@ -82,7 +82,7 @@ def _asdict(self) -> Dict[str, object]: class QMPGreeting(Model): """ - Defined in qmp-spec.rst, section "Server Greeting". + Defined in `interop/qmp-spec`, "Server Greeting" section. :param raw: The raw QMPGreeting object. :raise KeyError: If any required fields are absent. @@ -104,7 +104,7 @@ def __init__(self, raw: Mapping[str, Any]): class ErrorResponse(Model): """ - Defined in qmp-spec.rst, section "Error". + Defined in `interop/qmp-spec`, "Error" section. :param raw: The raw ErrorResponse object. :raise KeyError: If any required fields are absent. @@ -126,7 +126,7 @@ def __init__(self, raw: Mapping[str, Any]): class ErrorInfo(Model): """ - Defined in qmp-spec.rst, section "Error". + Defined in `interop/qmp-spec`, "Error" section. :param raw: The raw ErrorInfo object. :raise KeyError: If any required fields are absent. diff --git a/python/qemu/qmp/protocol.py b/python/qemu/qmp/protocol.py index a4ffdfad51bc9..219d092a79200 100644 --- a/python/qemu/qmp/protocol.py +++ b/python/qemu/qmp/protocol.py @@ -15,13 +15,16 @@ import asyncio from asyncio import StreamReader, StreamWriter +from contextlib import asynccontextmanager from enum import Enum from functools import wraps +from inspect import iscoroutinefunction import logging import socket from ssl import SSLContext from typing import ( Any, + AsyncGenerator, Awaitable, Callable, Generic, @@ -36,13 +39,10 @@ from .error import QMPError from .util import ( bottom_half, - create_task, exception_summary, flush, - is_closing, pretty_traceback, upper_half, - wait_closed, ) @@ -54,6 +54,9 @@ UnixAddrT = str SocketAddrT = Union[UnixAddrT, InternetAddrT] +# Maximum allowable size of read buffer, default +_DEFAULT_READBUFLEN = 64 * 1024 + class Runstate(Enum): """Protocol session runstate.""" @@ -76,11 +79,17 @@ class ConnectError(QMPError): This Exception always wraps a "root cause" exception that can be interrogated for additional information. + For example, when connecting to a non-existent socket:: + + await qmp.connect('not_found.sock') + # ConnectError: Failed to establish connection: + # [Errno 2] No such file or directory + :param error_message: Human-readable string describing the error. :param exc: The root-cause exception. """ def __init__(self, error_message: str, exc: Exception): - super().__init__(error_message) + super().__init__(error_message, exc) #: Human-readable error string self.error_message: str = error_message #: Wrapped root cause exception @@ -99,8 +108,8 @@ class StateError(QMPError): An API command (connect, execute, etc) was issued at an inappropriate time. This error is raised when a command like - :py:meth:`~AsyncProtocol.connect()` is issued at an inappropriate - time. + :py:meth:`~AsyncProtocol.connect()` is called when the client is + already connected. :param error_message: Human-readable string describing the state violation. :param state: The actual `Runstate` seen at the time of the violation. @@ -108,11 +117,14 @@ class StateError(QMPError): """ def __init__(self, error_message: str, state: Runstate, required: Runstate): - super().__init__(error_message) + super().__init__(error_message, state, required) self.error_message = error_message self.state = state self.required = required + def __str__(self) -> str: + return self.error_message + F = TypeVar('F', bound=Callable[..., Any]) # pylint: disable=invalid-name @@ -125,6 +137,25 @@ def require(required_state: Runstate) -> Callable[[F], F]: :param required_state: The `Runstate` required to invoke this method. :raise StateError: When the required `Runstate` is not met. """ + def _check(proto: 'AsyncProtocol[Any]') -> None: + name = type(proto).__name__ + if proto.runstate == required_state: + return + + if proto.runstate == Runstate.CONNECTING: + emsg = f"{name} is currently connecting." + elif proto.runstate == Runstate.DISCONNECTING: + emsg = (f"{name} is disconnecting." + " Call disconnect() to return to IDLE state.") + elif proto.runstate == Runstate.RUNNING: + emsg = f"{name} is already connected and running." + elif proto.runstate == Runstate.IDLE: + emsg = f"{name} is disconnected and idle." + else: + assert False + + raise StateError(emsg, proto.runstate, required_state) + def _decorator(func: F) -> F: # _decorator is the decorator that is built by calling the # require() decorator factory; e.g.: @@ -135,29 +166,20 @@ def _decorator(func: F) -> F: @wraps(func) def _wrapper(proto: 'AsyncProtocol[Any]', *args: Any, **kwargs: Any) -> Any: - # _wrapper is the function that gets executed prior to the - # decorated method. - - name = type(proto).__name__ - - if proto.runstate != required_state: - if proto.runstate == Runstate.CONNECTING: - emsg = f"{name} is currently connecting." - elif proto.runstate == Runstate.DISCONNECTING: - emsg = (f"{name} is disconnecting." - " Call disconnect() to return to IDLE state.") - elif proto.runstate == Runstate.RUNNING: - emsg = f"{name} is already connected and running." - elif proto.runstate == Runstate.IDLE: - emsg = f"{name} is disconnected and idle." - else: - assert False - raise StateError(emsg, proto.runstate, required_state) - # No StateError, so call the wrapped method. + _check(proto) return func(proto, *args, **kwargs) - # Return the decorated method; - # Transforming Func to Decorated[Func]. + @wraps(func) + async def _async_wrapper(proto: 'AsyncProtocol[Any]', + *args: Any, **kwargs: Any) -> Any: + _check(proto) + return await func(proto, *args, **kwargs) + + # Return the decorated method; F => Decorated[F] + # Use an async version when applicable, which + # preserves async signature generation in sphinx. + if iscoroutinefunction(func): + return cast(F, _async_wrapper) return cast(F, _wrapper) # Return the decorator instance from the decorator factory. Phew! @@ -200,24 +222,26 @@ class AsyncProtocol(Generic[T]): will log to 'qemu.qmp.protocol', but each individual connection can be given its own logger by giving it a name; messages will then log to 'qemu.qmp.protocol.${name}'. + :param readbuflen: + The maximum read buffer length of the underlying StreamReader + instance. """ # pylint: disable=too-many-instance-attributes #: Logger object for debugging messages from this connection. logger = logging.getLogger(__name__) - # Maximum allowable size of read buffer - _limit = 64 * 1024 - # ------------------------- # Section: Public interface # ------------------------- - def __init__(self, name: Optional[str] = None) -> None: - #: The nickname for this connection, if any. - self.name: Optional[str] = name - if self.name is not None: - self.logger = self.logger.getChild(self.name) + def __init__( + self, name: Optional[str] = None, + readbuflen: int = _DEFAULT_READBUFLEN + ) -> None: + self._name: Optional[str] + self.name = name + self.readbuflen = readbuflen # stream I/O self._reader: Optional[StreamReader] = None @@ -254,6 +278,24 @@ def __repr__(self) -> str: tokens.append(f"runstate={self.runstate.name}") return f"<{cls_name} {' '.join(tokens)}>" + @property + def name(self) -> Optional[str]: + """ + The nickname for this connection, if any. + + This name is used for differentiating instances in debug output. + """ + return self._name + + @name.setter + def name(self, name: Optional[str]) -> None: + logger = logging.getLogger(__name__) + if name: + self.logger = logger.getChild(name) + else: + self.logger = logger + self._name = name + @property # @upper_half def runstate(self) -> Runstate: """The current `Runstate` of the connection.""" @@ -262,7 +304,7 @@ def runstate(self) -> Runstate: @upper_half async def runstate_changed(self) -> Runstate: """ - Wait for the `runstate` to change, then return that runstate. + Wait for the `runstate` to change, then return that `Runstate`. """ await self._runstate_event.wait() return self.runstate @@ -276,9 +318,9 @@ async def start_server_and_accept( """ Accept a connection and begin processing message queues. - If this call fails, `runstate` is guaranteed to be set back to `IDLE`. - This method is precisely equivalent to calling `start_server()` - followed by `accept()`. + If this call fails, `runstate` is guaranteed to be set back to + `IDLE`. This method is precisely equivalent to calling + `start_server()` followed by :py:meth:`~AsyncProtocol.accept()`. :param address: Address to listen on; UNIX socket path or TCP address/port. @@ -291,7 +333,8 @@ async def start_server_and_accept( This exception will wrap a more concrete one. In most cases, the wrapped exception will be `OSError` or `EOFError`. If a protocol-level failure occurs while establishing a new - session, the wrapped error may also be an `QMPError`. + session, the wrapped error may also be a `QMPError`. + """ await self.start_server(address, ssl) await self.accept() @@ -307,8 +350,8 @@ async def start_server(self, address: SocketAddrT, This method starts listening for an incoming connection, but does not block waiting for a peer. This call will return immediately after binding and listening on a socket. A later - call to `accept()` must be made in order to finalize the - incoming connection. + call to :py:meth:`~AsyncProtocol.accept()` must be made in order + to finalize the incoming connection. :param address: Address to listen on; UNIX socket path or TCP address/port. @@ -321,9 +364,8 @@ async def start_server(self, address: SocketAddrT, This exception will wrap a more concrete one. In most cases, the wrapped exception will be `OSError`. """ - await self._session_guard( - self._do_start_server(address, ssl), - 'Failed to establish connection') + async with self._session_guard('Failed to establish connection'): + await self._do_start_server(address, ssl) assert self.runstate == Runstate.CONNECTING @upper_half @@ -332,10 +374,12 @@ async def accept(self) -> None: """ Accept an incoming connection and begin processing message queues. - If this call fails, `runstate` is guaranteed to be set back to `IDLE`. + Used after a previous call to `start_server()` to accept an + incoming connection. If this call fails, `runstate` is + guaranteed to be set back to `IDLE`. :raise StateError: When the `Runstate` is not `CONNECTING`. - :raise QMPError: When `start_server()` was not called yet. + :raise QMPError: When `start_server()` was not called first. :raise ConnectError: When a connection or session cannot be established. @@ -346,12 +390,10 @@ async def accept(self) -> None: """ if self._accepted is None: raise QMPError("Cannot call accept() before start_server().") - await self._session_guard( - self._do_accept(), - 'Failed to establish connection') - await self._session_guard( - self._establish_session(), - 'Failed to establish session') + async with self._session_guard('Failed to establish connection'): + await self._do_accept() + async with self._session_guard('Failed to establish session'): + await self._establish_session() assert self.runstate == Runstate.RUNNING @upper_half @@ -376,12 +418,10 @@ async def connect(self, address: Union[SocketAddrT, socket.socket], protocol-level failure occurs while establishing a new session, the wrapped error may also be an `QMPError`. """ - await self._session_guard( - self._do_connect(address, ssl), - 'Failed to establish connection') - await self._session_guard( - self._establish_session(), - 'Failed to establish session') + async with self._session_guard('Failed to establish connection'): + await self._do_connect(address, ssl) + async with self._session_guard('Failed to establish session'): + await self._establish_session() assert self.runstate == Runstate.RUNNING @upper_half @@ -392,7 +432,11 @@ async def disconnect(self) -> None: If there was an exception that caused the reader/writers to terminate prematurely, it will be raised here. - :raise Exception: When the reader or writer terminate unexpectedly. + :raise Exception: + When the reader or writer terminate unexpectedly. You can + expect to see `EOFError` if the server hangs up, or + `OSError` for connection-related issues. If there was a QMP + protocol-level problem, `ProtocolError` will be seen. """ self.logger.debug("disconnect() called.") self._schedule_disconnect() @@ -402,7 +446,8 @@ async def disconnect(self) -> None: # Section: Session machinery # -------------------------- - async def _session_guard(self, coro: Awaitable[None], emsg: str) -> None: + @asynccontextmanager + async def _session_guard(self, emsg: str) -> AsyncGenerator[None, None]: """ Async guard function used to roll back to `IDLE` on any error. @@ -419,10 +464,9 @@ async def _session_guard(self, coro: Awaitable[None], emsg: str) -> None: :raise ConnectError: When any other error is encountered in the guarded block. """ - # Note: After Python 3.6 support is removed, this should be an - # @asynccontextmanager instead of accepting a callback. try: - await coro + # Caller's code runs here. + yield except BaseException as err: self.logger.error("%s: %s", emsg, exception_summary(err)) self.logger.debug("%s:\n%s\n", emsg, pretty_traceback()) @@ -561,7 +605,7 @@ async def _do_start_server(self, address: SocketAddrT, port=address[1], ssl=ssl, backlog=1, - limit=self._limit, + limit=self.readbuflen, ) else: coro = asyncio.start_unix_server( @@ -569,7 +613,7 @@ async def _do_start_server(self, address: SocketAddrT, path=address, ssl=ssl, backlog=1, - limit=self._limit, + limit=self.readbuflen, ) # Allow runstate watchers to witness 'CONNECTING' state; some @@ -624,7 +668,7 @@ async def _do_connect(self, address: Union[SocketAddrT, socket.socket], "fd=%d, family=%r, type=%r", address.fileno(), address.family, address.type) connect = asyncio.open_connection( - limit=self._limit, + limit=self.readbuflen, ssl=ssl, sock=address, ) @@ -634,14 +678,14 @@ async def _do_connect(self, address: Union[SocketAddrT, socket.socket], address[0], address[1], ssl=ssl, - limit=self._limit, + limit=self.readbuflen, ) else: self.logger.debug("Connecting to file://%s ...", address) connect = asyncio.open_unix_connection( path=address, ssl=ssl, - limit=self._limit, + limit=self.readbuflen, ) self._reader, self._writer = await connect @@ -663,8 +707,8 @@ async def _establish_session(self) -> None: reader_coro = self._bh_loop_forever(self._bh_recv_message, 'Reader') writer_coro = self._bh_loop_forever(self._bh_send_message, 'Writer') - self._reader_task = create_task(reader_coro) - self._writer_task = create_task(writer_coro) + self._reader_task = asyncio.create_task(reader_coro) + self._writer_task = asyncio.create_task(writer_coro) self._bh_tasks = asyncio.gather( self._reader_task, @@ -689,7 +733,7 @@ def _schedule_disconnect(self) -> None: if not self._dc_task: self._set_state(Runstate.DISCONNECTING) self.logger.debug("Scheduling disconnect.") - self._dc_task = create_task(self._bh_disconnect()) + self._dc_task = asyncio.create_task(self._bh_disconnect()) @upper_half async def _wait_disconnect(self) -> None: @@ -825,13 +869,13 @@ async def _bh_close_stream(self, error_pathway: bool = False) -> None: if not self._writer: return - if not is_closing(self._writer): + if not self._writer.is_closing(): self.logger.debug("Closing StreamWriter.") self._writer.close() self.logger.debug("Waiting for StreamWriter to close ...") try: - await wait_closed(self._writer) + await self._writer.wait_closed() except Exception: # pylint: disable=broad-except # It's hard to tell if the Stream is already closed or # not. Even if one of the tasks has failed, it may have diff --git a/python/qemu/qmp/qmp_client.py b/python/qemu/qmp/qmp_client.py index 2a817f9db33b4..8beccfe29d347 100644 --- a/python/qemu/qmp/qmp_client.py +++ b/python/qemu/qmp/qmp_client.py @@ -41,7 +41,7 @@ class _WrappedProtocolError(ProtocolError): :param exc: The root-cause exception. """ def __init__(self, error_message: str, exc: Exception): - super().__init__(error_message) + super().__init__(error_message, exc) self.exc = exc def __str__(self) -> str: @@ -70,21 +70,38 @@ class ExecuteError(QMPError): """ Exception raised by `QMPClient.execute()` on RPC failure. + This exception is raised when the server received, interpreted, and + replied to a command successfully; but the command itself returned a + failure status. + + For example:: + + await qmp.execute('block-dirty-bitmap-add', + {'node': 'foo', 'name': 'my_bitmap'}) + # qemu.qmp.qmp_client.ExecuteError: + # Cannot find device='foo' nor node-name='foo' + :param error_response: The RPC error response object. :param sent: The sent RPC message that caused the failure. :param received: The raw RPC error reply received. """ def __init__(self, error_response: ErrorResponse, sent: Message, received: Message): - super().__init__(error_response.error.desc) + super().__init__(error_response, sent, received) #: The sent `Message` that caused the failure self.sent: Message = sent #: The received `Message` that indicated failure self.received: Message = received #: The parsed error response self.error: ErrorResponse = error_response - #: The QMP error class - self.error_class: str = error_response.error.class_ + + @property + def error_class(self) -> str: + """The QMP error class""" + return self.error.error.class_ + + def __str__(self) -> str: + return self.error.error.desc class ExecInterruptedError(QMPError): @@ -93,9 +110,22 @@ class ExecInterruptedError(QMPError): This error is raised when an `execute()` statement could not be completed. This can occur because the connection itself was - terminated before a reply was received. + terminated before a reply was received. The true cause of the + interruption will be available via `disconnect()`. - The true cause of the interruption will be available via `disconnect()`. + The QMP protocol does not make it possible to know if a command + succeeded or failed after such an event; the client will need to + query the server to determine the state of the server on a + case-by-case basis. + + For example, ECONNRESET might look like this:: + + try: + await qmp.execute('query-block') + # ExecInterruptedError: Disconnected + except ExecInterruptedError: + await qmp.disconnect() + # ConnectionResetError: [Errno 104] Connection reset by peer """ @@ -110,8 +140,8 @@ class _MsgProtocolError(ProtocolError): :param error_message: Human-readable string describing the error. :param msg: The QMP `Message` that caused the error. """ - def __init__(self, error_message: str, msg: Message): - super().__init__(error_message) + def __init__(self, error_message: str, msg: Message, *args: object): + super().__init__(error_message, msg, *args) #: The received `Message` that caused the error. self.msg: Message = msg @@ -150,30 +180,44 @@ class BadReplyError(_MsgProtocolError): :param sent: The message that was sent that prompted the error. """ def __init__(self, error_message: str, msg: Message, sent: Message): - super().__init__(error_message, msg) + super().__init__(error_message, msg, sent) #: The sent `Message` that caused the failure self.sent = sent class QMPClient(AsyncProtocol[Message], Events): - """ - Implements a QMP client connection. + """Implements a QMP client connection. + + `QMPClient` can be used to either connect or listen to a QMP server, + but always acts as the QMP client. - QMP can be used to establish a connection as either the transport - client or server, though this class always acts as the QMP client. + :param name: + Optional nickname for the connection, used to differentiate + instances when logging. - :param name: Optional nickname for the connection, used for logging. + :param readbuflen: + The maximum buffer length for reads and writes to and from the QMP + server, in bytes. Default is 10MB. If `QMPClient` is used to + connect to a guest agent to transfer files via ``guest-file-read``/ + ``guest-file-write``, increasing this value may be required. Basic script-style usage looks like this:: - qmp = QMPClient('my_virtual_machine_name') - await qmp.connect(('127.0.0.1', 1234)) - ... - res = await qmp.execute('block-query') - ... - await qmp.disconnect() + import asyncio + from qemu.qmp import QMPClient + + async def main(): + qmp = QMPClient('my_virtual_machine_name') + await qmp.connect(('127.0.0.1', 1234)) + ... + res = await qmp.execute('query-block') + ... + await qmp.disconnect() - Basic async client-style usage looks like this:: + asyncio.run(main()) + + A more advanced example that starts to take advantage of asyncio + might look like this:: class Client: def __init__(self, name: str): @@ -193,25 +237,32 @@ async def run(self, address='/tmp/qemu.socket'): await self.disconnect() See `qmp.events` for more detail on event handling patterns. + """ #: Logger object used for debugging messages. logger = logging.getLogger(__name__) - # Read buffer limit; 10MB like libvirt default - _limit = 10 * 1024 * 1024 + # Read buffer default limit; 10MB like libvirt default + _readbuflen = 10 * 1024 * 1024 # Type alias for pending execute() result items _PendingT = Union[Message, ExecInterruptedError] - def __init__(self, name: Optional[str] = None) -> None: - super().__init__(name) + def __init__( + self, + name: Optional[str] = None, + readbuflen: int = _readbuflen + ) -> None: + super().__init__(name, readbuflen) Events.__init__(self) #: Whether or not to await a greeting after establishing a connection. + #: Defaults to True; QGA servers expect this to be False. self.await_greeting: bool = True - #: Whether or not to perform capabilities negotiation upon connection. - #: Implies `await_greeting`. + #: Whether or not to perform capabilities negotiation upon + #: connection. Implies `await_greeting`. Defaults to True; QGA + #: servers expect this to be False. self.negotiate: bool = True # Cached Greeting, if one was awaited. @@ -228,7 +279,13 @@ def __init__(self, name: Optional[str] = None) -> None: @property def greeting(self) -> Optional[Greeting]: - """The `Greeting` from the QMP server, if any.""" + """ + The `Greeting` from the QMP server, if any. + + Defaults to ``None``, and will be set after a greeting is + received during the connection process. It is reset at the start + of each connection attempt. + """ return self._greeting @upper_half @@ -369,7 +426,7 @@ async def _on_message(self, msg: Message) -> None: # This is very likely a server parsing error. # It doesn't inherently belong to any pending execution. # Instead of performing clever recovery, just terminate. - # See "NOTE" in qmp-spec.rst, section "Error". + # See "NOTE" in interop/qmp-spec, "Error" section. raise ServerParseError( ("Server sent an error response without an ID, " "but there are no ID-less executions pending. " @@ -377,7 +434,7 @@ async def _on_message(self, msg: Message) -> None: msg ) - # qmp-spec.rst, section "Commands Responses": + # qmp-spec.rst, "Commands Responses" section: # 'Clients should drop all the responses # that have an unknown "id" field.' self.logger.log( @@ -550,7 +607,7 @@ async def _raw( @require(Runstate.RUNNING) async def execute_msg(self, msg: Message) -> object: """ - Execute a QMP command and return its value. + Execute a QMP command on the server and return its value. :param msg: The QMP `Message` to execute. @@ -562,7 +619,9 @@ async def execute_msg(self, msg: Message) -> object: If the QMP `Message` does not have either the 'execute' or 'exec-oob' fields set. :raise ExecuteError: When the server returns an error response. - :raise ExecInterruptedError: if the connection was terminated early. + :raise ExecInterruptedError: + If the connection was disrupted before + receiving a reply from the server. """ if not ('execute' in msg or 'exec-oob' in msg): raise ValueError("Requires 'execute' or 'exec-oob' message") @@ -601,9 +660,11 @@ def make_execute_msg(cls, cmd: str, :param cmd: QMP command name. :param arguments: Arguments (if any). Must be JSON-serializable. - :param oob: If `True`, execute "out of band". + :param oob: + If `True`, execute "out of band". See `interop/qmp-spec` + section "Out-of-band execution". - :return: An executable QMP `Message`. + :return: A QMP `Message` that can be executed with `execute_msg()`. """ msg = Message({'exec-oob' if oob else 'execute': cmd}) if arguments is not None: @@ -615,18 +676,22 @@ async def execute(self, cmd: str, arguments: Optional[Mapping[str, object]] = None, oob: bool = False) -> object: """ - Execute a QMP command and return its value. + Execute a QMP command on the server and return its value. :param cmd: QMP command name. :param arguments: Arguments (if any). Must be JSON-serializable. - :param oob: If `True`, execute "out of band". + :param oob: + If `True`, execute "out of band". See `interop/qmp-spec` + section "Out-of-band execution". :return: The command execution return value from the server. The type of object returned depends on the command that was issued, though most in QEMU return a `dict`. :raise ExecuteError: When the server returns an error response. - :raise ExecInterruptedError: if the connection was terminated early. + :raise ExecInterruptedError: + If the connection was disrupted before + receiving a reply from the server. """ msg = self.make_execute_msg(cmd, arguments, oob=oob) return await self.execute_msg(msg) @@ -634,8 +699,20 @@ async def execute(self, cmd: str, @upper_half @require(Runstate.RUNNING) def send_fd_scm(self, fd: int) -> None: - """ - Send a file descriptor to the remote via SCM_RIGHTS. + """Send a file descriptor to the remote via SCM_RIGHTS. + + This method does not close the file descriptor. + + :param fd: The file descriptor to send to QEMU. + + This is an advanced feature of QEMU where file descriptors can + be passed from client to server. This is usually used as a + security measure to isolate the QEMU process from being able to + open its own files. See the QMP commands ``getfd`` and + ``add-fd`` for more information. + + See `socket.socket.sendmsg` for more information on the Python + implementation for sending file descriptors over a UNIX socket. """ assert self._writer is not None sock = self._writer.transport.get_extra_info('socket') diff --git a/python/qemu/qmp/qmp_shell.py b/python/qemu/qmp/qmp_shell.py index 98e684e9e8a64..f818800568567 100644 --- a/python/qemu/qmp/qmp_shell.py +++ b/python/qemu/qmp/qmp_shell.py @@ -10,9 +10,15 @@ # """ -Low-level QEMU shell on top of QMP. +qmp-shell - An interactive QEMU shell powered by QMP -usage: qmp-shell [-h] [-H] [-N] [-v] [-p] qmp_server +qmp-shell offers a simple shell with a convenient shorthand syntax as an +alternative to typing JSON by hand. This syntax is not standardized and +is not meant to be used as a scriptable interface. This shorthand *may* +change incompatibly in the future, and it is strongly encouraged to use +the QMP library to provide API-stable scripting when needed. + +usage: qmp-shell [-h] [-H] [-v] [-p] [-l LOGFILE] [-N] qmp_server positional arguments: qmp_server < UNIX socket path | TCP address:port > @@ -20,41 +26,52 @@ optional arguments: -h, --help show this help message and exit -H, --hmp Use HMP interface - -N, --skip-negotiation - Skip negotiate (for qemu-ga) -v, --verbose Verbose (echo commands sent and received) -p, --pretty Pretty-print JSON + -l LOGFILE, --logfile LOGFILE + Save log of all QMP messages to PATH + -N, --skip-negotiation + Skip negotiate (for qemu-ga) + +Usage +----- +First, start QEMU with:: -Start QEMU with: + > qemu [...] -qmp unix:./qmp-sock,server=on[,wait=off] -# qemu [...] -qmp unix:./qmp-sock,server +Then run the shell, passing the address of the socket:: -Run the shell: + > qmp-shell ./qmp-sock -$ qmp-shell ./qmp-sock +Syntax +------ -Commands have the following format: +Commands have the following format:: - < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ] + < command-name > [ arg-name1=arg1 ] ... [ arg-nameN=argN ] -For example: +For example, to add a network device:: -(QEMU) device_add driver=e1000 id=net1 -{'return': {}} -(QEMU) + (QEMU) device_add driver=e1000 id=net1 + {'return': {}} + (QEMU) -key=value pairs also support Python or JSON object literal subset notations, -without spaces. Dictionaries/objects {} are supported as are arrays []. +key=value pairs support either Python or JSON object literal notations, +**without spaces**. Dictionaries/objects ``{}`` are supported, as are +arrays ``[]``:: - example-command arg-name1={'key':'value','obj'={'prop':"value"}} + example-command arg-name1={'key':'value','obj'={'prop':"value"}} -Both JSON and Python formatting should work, including both styles of -string literal quotes. Both paradigms of literal values should work, -including null/true/false for JSON and None/True/False for Python. +Either JSON or Python formatting for compound values works, including +both styles of string literal quotes (either single or double +quotes). Both paradigms of literal values are accepted, including +``null/true/false`` for JSON and ``None/True/False`` for Python. +Transactions +------------ -Transactions have the following multi-line format: +Transactions have the following multi-line format:: transaction( action-name1 [ arg-name1=arg1 ] ... [arg-nameN=argN ] @@ -62,11 +79,11 @@ action-nameN [ arg-name1=arg1 ] ... [arg-nameN=argN ] ) -One line transactions are also supported: +One line transactions are also supported:: transaction( action-name1 ... ) -For example: +For example:: (QEMU) transaction( TRANS> block-dirty-bitmap-add node=drive0 name=bitmap1 @@ -75,9 +92,35 @@ {"return": {}} (QEMU) -Use the -v and -p options to activate the verbose and pretty-print options, -which will echo back the properly formatted JSON-compliant QMP that is being -sent to QEMU, which is useful for debugging and documentation generation. +Commands +-------- + +Autocomplete of command names using is supported. Pressing +at a blank CLI prompt will show you a list of all available commands +that the connected QEMU instance supports. + +For documentation on QMP commands and their arguments, please see +`qmp ref`. + +Events +------ + +qmp-shell will display events received from the server, but this version +does not do so asynchronously. To check for new events from the server, +press on a blank line:: + + (QEMU) ⏎ + {'timestamp': {'seconds': 1660071944, 'microseconds': 184667}, + 'event': 'STOP'} + +Display options +--------------- + +Use the -v and -p options to activate the verbose and pretty-print +options, which will echo back the properly formatted JSON-compliant QMP +that is being sent to QEMU. This is useful for debugging to see the +wire-level QMP data being exchanged, and generating output for use in +writing documentation for QEMU. """ import argparse @@ -514,21 +557,29 @@ def die(msg: str) -> NoReturn: sys.exit(1) -def main() -> None: - """ - qmp-shell entry point: parse command line arguments and start the REPL. - """ +def common_parser() -> argparse.ArgumentParser: + """Build common parsing options used by qmp-shell and qmp-shell-wrap.""" parser = argparse.ArgumentParser() parser.add_argument('-H', '--hmp', action='store_true', help='Use HMP interface') - parser.add_argument('-N', '--skip-negotiation', action='store_true', - help='Skip negotiate (for qemu-ga)') parser.add_argument('-v', '--verbose', action='store_true', help='Verbose (echo commands sent and received)') parser.add_argument('-p', '--pretty', action='store_true', help='Pretty-print JSON') parser.add_argument('-l', '--logfile', help='Save log of all QMP messages to PATH') + # NOTE: When changing arguments, update both this module docstring + # and the manpage synopsis in docs/man/qmp_shell.rst. + return parser + + +def main() -> None: + """ + qmp-shell entry point: parse command line arguments and start the REPL. + """ + parser = common_parser() + parser.add_argument('-N', '--skip-negotiation', action='store_true', + help='Skip negotiate (for qemu-ga)') default_server = os.environ.get('QMP_SOCKET') parser.add_argument('qmp_server', action='store', @@ -561,19 +612,37 @@ def main() -> None: def main_wrap() -> None: """ - qmp-shell-wrap entry point: parse command line arguments and - start the REPL. - """ - parser = argparse.ArgumentParser() - parser.add_argument('-H', '--hmp', action='store_true', - help='Use HMP interface') - parser.add_argument('-v', '--verbose', action='store_true', - help='Verbose (echo commands sent and received)') - parser.add_argument('-p', '--pretty', action='store_true', - help='Pretty-print JSON') - parser.add_argument('-l', '--logfile', - help='Save log of all QMP messages to PATH') + qmp-shell-wrap - QEMU + qmp-shell launcher utility + + Launch QEMU and connect to it with `qmp-shell` in a single command. + CLI arguments will be forwarded to qemu, with additional arguments + added to allow `qmp-shell` to then connect to the recently launched + QEMU instance. + + usage: qmp-shell-wrap [-h] [-H] [-v] [-p] [-l LOGFILE] ... + positional arguments: + command QEMU command line to invoke + + optional arguments: + -h, --help show this help message and exit + -H, --hmp Use HMP interface + -v, --verbose Verbose (echo commands sent and received) + -p, --pretty Pretty-print JSON + -l LOGFILE, --logfile LOGFILE + Save log of all QMP messages to PATH + + Usage + ----- + + Prepend "qmp-shell-wrap" to your usual QEMU command line:: + + > qmp-shell-wrap qemu-system-x86_64 -M q35 -m 4096 -display none + Welcome to the QMP low-level shell! + Connected + (QEMU) + """ + parser = common_parser() parser.add_argument('command', nargs=argparse.REMAINDER, help='QEMU command line to invoke') @@ -610,6 +679,8 @@ def main_wrap() -> None: for _ in qemu.repl(): pass + except FileNotFoundError: + sys.stderr.write(f"ERROR: QEMU executable '{cmd[0]}' not found.\n") finally: os.unlink(sockpath) diff --git a/python/qemu/qmp/qmp_tui.py b/python/qemu/qmp/qmp_tui.py index 2d9ebbd20bc7f..d946c205131aa 100644 --- a/python/qemu/qmp/qmp_tui.py +++ b/python/qemu/qmp/qmp_tui.py @@ -21,6 +21,7 @@ import logging from logging import Handler, LogRecord import signal +import sys from typing import ( List, Optional, @@ -30,17 +31,27 @@ cast, ) -from pygments import lexers -from pygments import token as Token -import urwid -import urwid_readline + +try: + from pygments import lexers + from pygments import token as Token + import urwid + import urwid_readline +except ModuleNotFoundError as exc: + print( + f"Module '{exc.name}' not found.", + "You need the optional 'tui' group: pip install qemu.qmp[tui]", + sep='\n', + file=sys.stderr, + ) + sys.exit(1) from .error import ProtocolError from .legacy import QEMUMonitorProtocol, QMPBadPortError from .message import DeserializationError, Message, UnexpectedTypeError from .protocol import ConnectError, Runstate from .qmp_client import ExecInterruptedError, QMPClient -from .util import create_task, pretty_traceback +from .util import get_or_create_event_loop, pretty_traceback # The name of the signal that is used to update the history list @@ -225,7 +236,7 @@ def cb_send_to_server(self, raw_msg: str) -> None: """ try: msg = Message(bytes(raw_msg, encoding='utf-8')) - create_task(self._send_to_server(msg)) + asyncio.create_task(self._send_to_server(msg)) except (DeserializationError, UnexpectedTypeError) as err: raw_msg = format_json(raw_msg) logging.info('Invalid message: %s', err.error_message) @@ -246,7 +257,7 @@ def kill_app(self) -> None: Initiates killing of app. A bridge between asynchronous and synchronous code. """ - create_task(self._kill_app()) + asyncio.create_task(self._kill_app()) async def _kill_app(self) -> None: """ @@ -376,8 +387,7 @@ def run(self, debug: bool = False) -> None: """ screen = urwid.raw_display.Screen() screen.set_terminal_properties(256) - - self.aloop = asyncio.get_event_loop() + self.aloop = get_or_create_event_loop() self.aloop.set_debug(debug) # Gracefully handle SIGTERM and SIGINT signals @@ -393,7 +403,7 @@ def run(self, debug: bool = False) -> None: handle_mouse=True, event_loop=event_loop) - create_task(self.manage_connection(), self.aloop) + self.aloop.create_task(self.manage_connection()) try: main_loop.run() except Exception as err: diff --git a/python/qemu/qmp/util.py b/python/qemu/qmp/util.py index ca6225e9cda04..a8229e5524560 100644 --- a/python/qemu/qmp/util.py +++ b/python/qemu/qmp/util.py @@ -1,25 +1,16 @@ """ Miscellaneous Utilities -This module provides asyncio utilities and compatibility wrappers for -Python 3.6 to provide some features that otherwise become available in -Python 3.7+. - -Various logging and debugging utilities are also provided, such as -`exception_summary()` and `pretty_traceback()`, used primarily for -adding information into the logging stream. +This module provides asyncio and various logging and debugging +utilities, such as `exception_summary()` and `pretty_traceback()`, used +primarily for adding information into the logging stream. """ import asyncio import sys import traceback -from typing import ( - Any, - Coroutine, - Optional, - TypeVar, - cast, -) +from typing import TypeVar, cast +import warnings T = TypeVar('T') @@ -30,9 +21,35 @@ # -------------------------- +def get_or_create_event_loop() -> asyncio.AbstractEventLoop: + """ + Return this thread's current event loop, or create a new one. + + This function behaves similarly to asyncio.get_event_loop() in + Python<=3.13, where if there is no event loop currently associated + with the current context, it will create and register one. It should + generally not be used in any asyncio-native applications. + """ + try: + with warnings.catch_warnings(): + # Python <= 3.13 will trigger deprecation warnings if no + # event loop is set, but will create and set a new loop. + warnings.simplefilter("ignore") + loop = asyncio.get_event_loop() + except RuntimeError: + # Python 3.14+: No event loop set for this thread, + # create and set one. + loop = asyncio.new_event_loop() + # Set this loop as the current thread's loop, to be returned + # by calls to get_event_loop() in the future. + asyncio.set_event_loop(loop) + + return loop + + async def flush(writer: asyncio.StreamWriter) -> None: """ - Utility function to ensure a StreamWriter is *fully* drained. + Utility function to ensure an `asyncio.StreamWriter` is *fully* drained. `asyncio.StreamWriter.drain` only promises we will return to below the "high-water mark". This function ensures we flush the entire @@ -72,102 +89,13 @@ def bottom_half(func: T) -> T: These methods do not, in general, have the ability to directly report information to a caller’s context and will usually be - collected as a Task result instead. + collected as an `asyncio.Task` result instead. They must not call upper-half functions directly. """ return func -# ------------------------------- -# Section: Compatibility Wrappers -# ------------------------------- - - -def create_task(coro: Coroutine[Any, Any, T], - loop: Optional[asyncio.AbstractEventLoop] = None - ) -> 'asyncio.Future[T]': - """ - Python 3.6-compatible `asyncio.create_task` wrapper. - - :param coro: The coroutine to execute in a task. - :param loop: Optionally, the loop to create the task in. - - :return: An `asyncio.Future` object. - """ - if sys.version_info >= (3, 7): - if loop is not None: - return loop.create_task(coro) - return asyncio.create_task(coro) # pylint: disable=no-member - - # Python 3.6: - return asyncio.ensure_future(coro, loop=loop) - - -def is_closing(writer: asyncio.StreamWriter) -> bool: - """ - Python 3.6-compatible `asyncio.StreamWriter.is_closing` wrapper. - - :param writer: The `asyncio.StreamWriter` object. - :return: `True` if the writer is closing, or closed. - """ - if sys.version_info >= (3, 7): - return writer.is_closing() - - # Python 3.6: - transport = writer.transport - assert isinstance(transport, asyncio.WriteTransport) - return transport.is_closing() - - -async def wait_closed(writer: asyncio.StreamWriter) -> None: - """ - Python 3.6-compatible `asyncio.StreamWriter.wait_closed` wrapper. - - :param writer: The `asyncio.StreamWriter` to wait on. - """ - if sys.version_info >= (3, 7): - await writer.wait_closed() - return - - # Python 3.6 - transport = writer.transport - assert isinstance(transport, asyncio.WriteTransport) - - while not transport.is_closing(): - await asyncio.sleep(0) - - # This is an ugly workaround, but it's the best I can come up with. - sock = transport.get_extra_info('socket') - - if sock is None: - # Our transport doesn't have a socket? ... - # Nothing we can reasonably do. - return - - while sock.fileno() != -1: - await asyncio.sleep(0) - - -def asyncio_run(coro: Coroutine[Any, Any, T], *, debug: bool = False) -> T: - """ - Python 3.6-compatible `asyncio.run` wrapper. - - :param coro: A coroutine to execute now. - :return: The return value from the coroutine. - """ - if sys.version_info >= (3, 7): - return asyncio.run(coro, debug=debug) - - # Python 3.6 - loop = asyncio.get_event_loop() - loop.set_debug(debug) - ret = loop.run_until_complete(coro) - loop.close() - - return ret - - # ---------------------------- # Section: Logging & Debugging # ---------------------------- @@ -177,8 +105,11 @@ def exception_summary(exc: BaseException) -> str: """ Return a summary string of an arbitrary exception. - It will be of the form "ExceptionType: Error Message", if the error + It will be of the form "ExceptionType: Error Message" if the error string is non-empty, and just "ExceptionType" otherwise. + + This code is based on CPython's implementation of + `traceback.TracebackException.format_exception_only`. """ name = type(exc).__qualname__ smod = type(exc).__module__ diff --git a/python/qemu/utils/README.rst b/python/qemu/utils/README.rst index d5f2da14540bc..5027f0b5f1188 100644 --- a/python/qemu/utils/README.rst +++ b/python/qemu/utils/README.rst @@ -2,6 +2,6 @@ qemu.utils package ================== This package provides miscellaneous utilities used for testing and -debugging QEMU. It is used primarily by the vm and avocado tests. +debugging QEMU. It is used primarily by the vm and functional tests. See the documentation in ``__init__.py`` for more information. diff --git a/python/scripts/mkvenv.py b/python/scripts/mkvenv.py index 8ac5b0b2a05c9..9aed266df1ba8 100644 --- a/python/scripts/mkvenv.py +++ b/python/scripts/mkvenv.py @@ -84,6 +84,7 @@ Sequence, Tuple, Union, + cast, ) import venv @@ -94,17 +95,39 @@ HAVE_DISTLIB = True try: import distlib.scripts - import distlib.version except ImportError: try: # Reach into pip's cookie jar. pylint and flake8 don't understand # that these imports will be used via distlib.xxx. from pip._vendor import distlib import pip._vendor.distlib.scripts # noqa, pylint: disable=unused-import - import pip._vendor.distlib.version # noqa, pylint: disable=unused-import except ImportError: HAVE_DISTLIB = False +# pip 25.2 does not vendor distlib.version, but it uses vendored +# packaging.version +HAVE_DISTLIB_VERSION = True +try: + import distlib.version # pylint: disable=ungrouped-imports +except ImportError: + try: + # pylint: disable=unused-import,ungrouped-imports + import pip._vendor.distlib.version # noqa + except ImportError: + HAVE_DISTLIB_VERSION = False + +HAVE_PACKAGING_VERSION = True +try: + # Do not bother importing non-vendored packaging, because it is not + # in stdlib. + from pip._vendor import packaging + # pylint: disable=unused-import + import pip._vendor.packaging.requirements # noqa + import pip._vendor.packaging.version # noqa +except ImportError: + HAVE_PACKAGING_VERSION = False + + # Try to load tomllib, with a fallback to tomli. # HAVE_TOMLLIB is checked below, just-in-time, so that mkvenv does not fail # outside the venv or before a potential call to ensurepip in checkpip(). @@ -133,6 +156,43 @@ class Ouch(RuntimeError): """An Exception class we can't confuse with a builtin.""" +class Matcher: + """Compatibility appliance for version/requirement string parsing.""" + def __init__(self, name_and_constraint: str): + """Create a matcher from a requirement-like string.""" + if HAVE_DISTLIB_VERSION: + self._m = distlib.version.LegacyMatcher(name_and_constraint) + elif HAVE_PACKAGING_VERSION: + self._m = packaging.requirements.Requirement(name_and_constraint) + else: + raise Ouch("found neither distlib.version nor packaging.version") + self.name = self._m.name + + def match(self, version_str: str) -> bool: + """Return True if `version` satisfies the stored constraint.""" + if HAVE_DISTLIB_VERSION: + return cast( + bool, + self._m.match(distlib.version.LegacyVersion(version_str)) + ) + + assert HAVE_PACKAGING_VERSION + return cast( + bool, + self._m.specifier.contains( + packaging.version.Version(version_str), prereleases=True + ) + ) + + def __str__(self) -> str: + """String representation delegated to the backend.""" + return str(self._m) + + def __repr__(self) -> str: + """Stable debug representation delegated to the backend.""" + return repr(self._m) + + class QemuEnvBuilder(venv.EnvBuilder): """ An extension of venv.EnvBuilder for building QEMU's configure-time venv. @@ -669,7 +729,7 @@ def _do_ensure( canary = None for name, info in group.items(): constraint = _make_version_constraint(info, False) - matcher = distlib.version.LegacyMatcher(name + constraint) + matcher = Matcher(name + constraint) print(f"mkvenv: checking for {matcher}", file=sys.stderr) dist: Optional[Distribution] = None @@ -683,7 +743,7 @@ def _do_ensure( # Always pass installed package to pip, so that they can be # updated if the requested version changes or not _is_system_package(dist) - or not matcher.match(distlib.version.LegacyVersion(dist.version)) + or not matcher.match(dist.version) ): absent.append(name + _make_version_constraint(info, True)) if len(absent) == 1: diff --git a/python/scripts/vendor.py b/python/scripts/vendor.py index b47db00743a5b..33ac7a45de0fd 100755 --- a/python/scripts/vendor.py +++ b/python/scripts/vendor.py @@ -41,8 +41,8 @@ def main() -> int: parser.parse_args() packages = { - "meson==1.8.1": - "374bbf71247e629475fc10b0bd2ef66fc418c2d8f4890572f74de0f97d0d42da", + "meson==1.9.0": + "45e51ddc41e37d961582d06e78c48e0f9039011587f3495c4d6b0781dad92357", } vendor_dir = Path(__file__, "..", "..", "wheels").resolve() diff --git a/python/tests/protocol.py b/python/tests/protocol.py index 56c4d441f9c4a..e565802516d22 100644 --- a/python/tests/protocol.py +++ b/python/tests/protocol.py @@ -8,7 +8,6 @@ from qemu.qmp import ConnectError, Runstate from qemu.qmp.protocol import AsyncProtocol, StateError -from qemu.qmp.util import asyncio_run, create_task class NullProtocol(AsyncProtocol[None]): @@ -124,7 +123,7 @@ async def _runner(): if allow_cancellation: return raise - return create_task(_runner()) + return asyncio.create_task(_runner()) @contextmanager @@ -228,7 +227,7 @@ def async_test(async_test_method): Decorator; adds SetUp and TearDown to async tests. """ async def _wrapper(self, *args, **kwargs): - loop = asyncio.get_event_loop() + loop = asyncio.get_running_loop() loop.set_debug(True) await self._asyncSetUp() @@ -271,7 +270,7 @@ async def _watcher(): msg=f"Expected state '{state.name}'", ) - self.runstate_watcher = create_task(_watcher()) + self.runstate_watcher = asyncio.create_task(_watcher()) # Kick the loop and force the task to block on the event. await asyncio.sleep(0) @@ -589,7 +588,8 @@ async def _asyncTearDown(self): async def testSmoke(self): with TemporaryDirectory(suffix='.qmp') as tmpdir: sock = os.path.join(tmpdir, type(self.proto).__name__ + ".sock") - server_task = create_task(self.server.start_server_and_accept(sock)) + server_task = asyncio.create_task( + self.server.start_server_and_accept(sock)) # give the server a chance to start listening [...] await asyncio.sleep(0) diff --git a/python/wheels/meson-1.8.1-py3-none-any.whl b/python/wheels/meson-1.8.1-py3-none-any.whl deleted file mode 100644 index a885f0e18cea1..0000000000000 Binary files a/python/wheels/meson-1.8.1-py3-none-any.whl and /dev/null differ diff --git a/python/wheels/meson-1.9.0-py3-none-any.whl b/python/wheels/meson-1.9.0-py3-none-any.whl new file mode 100644 index 0000000000000..57cc75cb13866 Binary files /dev/null and b/python/wheels/meson-1.9.0-py3-none-any.whl differ diff --git a/pythondeps.toml b/pythondeps.toml index b2eec940ce5af..98e99e79005b2 100644 --- a/pythondeps.toml +++ b/pythondeps.toml @@ -19,9 +19,13 @@ [meson] # The install key should match the version in python/wheels/ -meson = { accepted = ">=1.5.0", installed = "1.8.1", canary = "meson" } +meson = { accepted = ">=1.5.0", installed = "1.9.0", canary = "meson" } pycotap = { accepted = ">=1.1.0", installed = "1.3.1" } +[meson-rust] +# The install key should match the version in python/wheels/ +meson = { accepted = ">=1.9.0", installed = "1.9.0", canary = "meson" } + [docs] # Please keep the installed versions in sync with docs/requirements.txt sphinx = { accepted = ">=3.4.3", installed = "6.2.1", canary = "sphinx-build" } @@ -29,3 +33,4 @@ sphinx_rtd_theme = { accepted = ">=0.5", installed = "1.2.2" } [testdeps] qemu.qmp = { accepted = ">=0.0.3", installed = "0.0.3" } +pygdbmi = { accepted = ">=0.11.0.0", installed = "0.11.0.0" } diff --git a/qapi/accelerator.json b/qapi/accelerator.json index 28d5ff4c493a1..2b920608847cc 100644 --- a/qapi/accelerator.json +++ b/qapi/accelerator.json @@ -4,7 +4,9 @@ # SPDX-License-Identifier: GPL-2.0-or-later ## -# = Accelerators +# ************ +# Accelerators +# ************ ## { 'include': 'common.json' } @@ -52,3 +54,57 @@ { 'command': 'x-accel-stats', 'returns': 'HumanReadableText', 'features': [ 'unstable' ] } + +## +# @Accelerator: +# +# Information about support for MSHV acceleration +# +# @hvf: Apple Hypervisor.framework +# +# @kvm: KVM +# +# @mshv: Hyper-V +# +# @nvmm: NetBSD NVMM +# +# @qtest: QTest (dummy accelerator) +# +# @tcg: TCG (dynamic translation) +# +# @whpx: Windows Hypervisor Platform +# +# @xen: Xen +# +# Since: 10.2.0 +## +{ 'enum': 'Accelerator', 'data': ['hvf', 'kvm', 'mshv', 'nvmm', 'qtest', 'tcg', 'whpx', 'xen'] } + +## +# @AcceleratorInfo: +# +# Information about support for various accelerators +# +# @enabled: the accelerator that is in use +# +# @present: the list of accelerators that are built into this executable +# +# Since: 10.2.0 +## +{ 'struct': 'AcceleratorInfo', 'data': {'enabled': 'Accelerator', 'present': ['Accelerator']} } + +## +# @query-accelerators: +# +# Return information about accelerators +# +# Returns: @AcceleratorInfo +# +# Since: 10.2.0 +# +# .. qmp-example:: +# +# -> { "execute": "query-accelerators" } +# <- { "return": { "enabled": "mshv", "present": ["kvm", "mshv", "qtest", "tcg"] } } +## +{ 'command': 'query-accelerators', 'returns': 'AcceleratorInfo' } diff --git a/qapi/acpi-hest.json b/qapi/acpi-hest.json new file mode 100644 index 0000000000000..28af1266a7772 --- /dev/null +++ b/qapi/acpi-hest.json @@ -0,0 +1,36 @@ +# -*- Mode: Python -*- +# vim: filetype=python +# SPDX-License-Identifier: GPL-2.0-or-later + +## +# == GHESv2 CPER Error Injection +# +# Defined since ACPI Specification 6.1, +# section 18.3.2.8 Generic Hardware Error Source version 2. See: +# +# https://uefi.org/sites/default/files/resources/ACPI_6_1.pdf +## + + +## +# @inject-ghes-v2-error: +# +# Inject an error with additional ACPI 6.1 GHESv2 error information +# +# @cper: contains a base64 encoded string with raw data for a single +# CPER record with Generic Error Status Block, Generic Error Data +# Entry and generic error data payload, as described at +# https://uefi.org/specs/UEFI/2.10/Apx_N_Common_Platform_Error_Record.html#format +# +# Features: +# +# @unstable: This command is experimental. +# +# Since: 10.2 +## +{ 'command': 'inject-ghes-v2-error', + 'data': { + 'cper': 'str' + }, + 'features': [ 'unstable' ] +} diff --git a/qapi/block-core.json b/qapi/block-core.json index ebbe95b3d82b0..dc6eb4ae23dd1 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -159,7 +159,14 @@ ## # @ImageInfoSpecificRbd: # -# @encryption-format: Image encryption format +# @encryption-format: Image encryption format. If encryption is enabled for the +# image (see encrypted in BlockNodeInfo), this is the actual format in which the +# image is accessed. If encryption is not enabled, this is the result of +# probing when the image was opened, to give a suggestion which encryption +# format could be enabled. Note that probing results can be changed by the +# guest by writing a (possibly partial) encryption format header to the +# image, so don't treat this information as trusted if the guest is not +# trusted. # # Since: 6.1 ## diff --git a/qapi/dump.json b/qapi/dump.json index 32c8c1f06e403..726b5208703c0 100644 --- a/qapi/dump.json +++ b/qapi/dump.json @@ -79,7 +79,7 @@ # # @detach: if true, QMP will return immediately rather than waiting # for the dump to finish. The user can track progress using -# "query-dump". (since 2.6). +# `query-dump`. (since 2.6). # # @begin: if specified, the starting physical address. # diff --git a/qapi/machine-s390x.json b/qapi/machine-s390x.json index 966dbd61d2e0b..8412668b67123 100644 --- a/qapi/machine-s390x.json +++ b/qapi/machine-s390x.json @@ -119,3 +119,24 @@ { 'command': 'query-s390x-cpu-polarization', 'returns': 'CpuPolarizationInfo', 'features': [ 'unstable' ] } + +## +# @SCLP_CPI_INFO_AVAILABLE: +# +# Emitted when the Control-Program Identification data is available +# in the QOM tree. +# +# Features: +# +# @unstable: This event is experimental. +# +# Since: 10.2 +# +# .. qmp-example:: +# +# <- { "event": "SCLP_CPI_INFO_AVAILABLE", +# "timestamp": { "seconds": 1401385907, "microseconds": 422329 } } +## +{ 'event': 'SCLP_CPI_INFO_AVAILABLE', + 'features': [ 'unstable' ] +} diff --git a/qapi/machine.json b/qapi/machine.json index 6f59f70ca61f1..c6dc6fe69b5cd 100644 --- a/qapi/machine.json +++ b/qapi/machine.json @@ -1839,6 +1839,35 @@ 'returns': 'HumanReadableText', 'features': [ 'unstable' ]} +## +# @FirmwareLog: +# +# @version: Firmware version. +# +# @log: Firmware debug log, in base64 encoding. First and last log +# line might be incomplete. +# +# Since: 10.2 +## +{ 'struct': 'FirmwareLog', + 'data': { '*version': 'str', + 'log': 'str' } } + +## +# @query-firmware-log: +# +# Find firmware memory log buffer in guest memory, return content. +# +# @max-size: limit the amount of log data returned. Up to 1 MiB of +# log data is allowed. In case the amount of log data is +# larger than @max-size the tail of the log is returned. +# +# Since: 10.2 +## +{ 'command': 'query-firmware-log', + 'data': { '*max-size': 'size' }, + 'returns': 'FirmwareLog' } + ## # @dump-skeys: # @@ -2087,7 +2116,7 @@ # # @deprecated-props: an optional list of properties that are flagged as # deprecated by the CPU vendor. The list depends on the -# CpuModelExpansionType: "static" properties are a subset of the +# `CpuModelExpansionType`: "static" properties are a subset of the # enabled-properties for the expanded model; "full" properties are # a set of properties that are deprecated across all models for # the architecture. (since: 10.1 -- since 9.1 on s390x --). diff --git a/qapi/meson.build b/qapi/meson.build index ca6b61a608d03..a46269b5a0c92 100644 --- a/qapi/meson.build +++ b/qapi/meson.build @@ -59,6 +59,7 @@ if have_system qapi_all_modules += [ 'accelerator', 'acpi', + 'acpi-hest', 'audio', 'cryptodev', 'qdev', diff --git a/qapi/migration.json b/qapi/migration.json index e08a99bb82672..be0f3fcc12a9c 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -641,7 +641,7 @@ # # This mode supports VFIO devices provided the user first puts the # guest in the suspended runstate, such as by issuing -# guest-suspend-ram to the QEMU guest agent. +# `guest-suspend-ram` to the QEMU guest agent. # # Best performance is achieved when the memory backend is shared # and the @x-ignore-shared migration capability is set, but this @@ -694,9 +694,32 @@ # until you issue the `migrate-incoming` command. # # (since 10.0) +# +# @cpr-exec: The migrate command stops the VM, saves state to the +# migration channel, directly exec's a new version of QEMU on the +# same host, replacing the original process while retaining its +# PID, and loads state from the channel. Guest RAM is preserved +# in place. Devices and their pinned pages are also preserved for +# VFIO and IOMMUFD. +# +# Old QEMU starts new QEMU by exec'ing the command specified by +# the @cpr-exec-command parameter. The command may be a direct +# invocation of new QEMU, or may be a wrapper that exec's the new +# QEMU binary. +# +# Because old QEMU terminates when new QEMU starts, one cannot +# stream data between the two, so the channel must be a type, +# such as a file, that accepts all data before old QEMU exits. +# Otherwise, old QEMU may quietly block writing to the channel. +# +# Memory-backend objects must have the share=on attribute, but +# memory-backend-epc is not supported. The VM must be started +# with the '-machine aux-ram-share=on' option. +# +# (since 10.2) ## { 'enum': 'MigMode', - 'data': [ 'normal', 'cpr-reboot', 'cpr-transfer' ] } + 'data': [ 'normal', 'cpr-reboot', 'cpr-transfer', 'cpr-exec' ] } ## # @ZeroPageDetection: @@ -924,6 +947,10 @@ # only has effect if the @mapped-ram capability is enabled. # (Since 9.1) # +# @cpr-exec-command: Command to start the new QEMU process when @mode +# is @cpr-exec. The first list element is the program's filename, +# the remainder its arguments. (Since 10.2) +# # Features: # # @unstable: Members @x-checkpoint-delay and @@ -950,7 +977,8 @@ 'vcpu-dirty-limit', 'mode', 'zero-page-detection', - 'direct-io'] } + 'direct-io', + 'cpr-exec-command'] } ## # @MigrateSetParameters: @@ -1105,6 +1133,10 @@ # only has effect if the @mapped-ram capability is enabled. # (Since 9.1) # +# @cpr-exec-command: Command to start the new QEMU process when @mode +# is @cpr-exec. The first list element is the program's filename, +# the remainder its arguments. (Since 10.2) +# # Features: # # @unstable: Members @x-checkpoint-delay and @@ -1146,7 +1178,8 @@ '*vcpu-dirty-limit': 'uint64', '*mode': 'MigMode', '*zero-page-detection': 'ZeroPageDetection', - '*direct-io': 'bool' } } + '*direct-io': 'bool', + '*cpr-exec-command': [ 'str' ]} } ## # @migrate-set-parameters: @@ -1315,6 +1348,10 @@ # only has effect if the @mapped-ram capability is enabled. # (Since 9.1) # +# @cpr-exec-command: Command to start the new QEMU process when @mode +# is @cpr-exec. The first list element is the program's filename, +# the remainder its arguments. (Since 10.2) +# # Features: # # @unstable: Members @x-checkpoint-delay and @@ -1353,7 +1390,8 @@ '*vcpu-dirty-limit': 'uint64', '*mode': 'MigMode', '*zero-page-detection': 'ZeroPageDetection', - '*direct-io': 'bool' } } + '*direct-io': 'bool', + '*cpr-exec-command': [ 'str' ]} } ## # @query-migrate-parameters: @@ -1704,7 +1742,7 @@ # # .. admonition:: Notes # -# 1. The 'query-migrate' command should be used to check +# 1. The `query-migrate` command should be used to check # migration's progress and final result (this information is # provided by the 'status' member). # diff --git a/qapi/misc-i386.json b/qapi/misc-i386.json index c8c91a241cc01..d1ce8caf253b6 100644 --- a/qapi/misc-i386.json +++ b/qapi/misc-i386.json @@ -8,7 +8,7 @@ # # Reset the RTC interrupt reinjection backlog. Can be used if another # mechanism to synchronize guest time is in effect, for example QEMU -# guest agent's guest-set-time command. +# guest agent's `guest-set-time` command. # # Use of this command is only applicable for x86 machines with an RTC, # and on other machines will silently return without performing any diff --git a/qapi/net.json b/qapi/net.json index 78bcc9871e0c1..60d196afe5ce8 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -281,7 +281,7 @@ # # @smbserver: IP address of the built-in SMB server # -# @hostfwd: redirect incoming TCP or UDP host connections to guest +# @hostfwd: redirect incoming TCP, UDP or UNIX host connections to guest # endpoints # # @guestfwd: forward guest TCP connections diff --git a/qapi/qapi-schema.json b/qapi/qapi-schema.json index 82f111ba063cb..b93dd68d94c63 100644 --- a/qapi/qapi-schema.json +++ b/qapi/qapi-schema.json @@ -68,6 +68,7 @@ { 'include': 'misc-i386.json' } { 'include': 'audio.json' } { 'include': 'acpi.json' } +{ 'include': 'acpi-hest.json' } { 'include': 'pci.json' } { 'include': 'stats.json' } { 'include': 'virtio.json' } diff --git a/qapi/run-state.json b/qapi/run-state.json index 54ba5c9a3f51c..4757947ca6bfc 100644 --- a/qapi/run-state.json +++ b/qapi/run-state.json @@ -20,7 +20,7 @@ # @inmigrate: guest is paused waiting for an incoming migration. Note # that this state does not tell whether the machine will start at # the end of the migration. This depends on the command-line -S -# option and any invocation of 'stop' or 'cont' that has happened +# option and any invocation of `stop` or `cont` that has happened # since QEMU was started. # # @internal-error: An internal error that prevents further guest diff --git a/qapi/sockets.json b/qapi/sockets.json index 82046b0b3ab89..32fac5172887b 100644 --- a/qapi/sockets.json +++ b/qapi/sockets.json @@ -143,7 +143,7 @@ # # @str: decimal is for file descriptor number, otherwise it's a file # descriptor name. Named file descriptors are permitted in -# monitor commands, in combination with the 'getfd' command. +# monitor commands, in combination with the `getfd` command. # Decimal file descriptors are permitted at startup or other # contexts where no monitor context is active. # diff --git a/qapi/virtio.json b/qapi/virtio.json index 9d652fe4a8c5d..05295ab66559e 100644 --- a/qapi/virtio.json +++ b/qapi/virtio.json @@ -247,6 +247,7 @@ # }, # "host-features": { # "unknown-dev-features": 1073741824, +# "unknown-dev-features2": 0, # "dev-features": [], # "transports": [ # "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled", @@ -490,14 +491,18 @@ # unique features) # # @unknown-dev-features: Virtio device features bitmap that have not -# been decoded +# been decoded (bits 0-63) +# +# @unknown-dev-features2: Virtio device features bitmap that have not +# been decoded (bits 64-127) (since 10.2) # # Since: 7.2 ## { 'struct': 'VirtioDeviceFeatures', 'data': { 'transports': [ 'str' ], '*dev-features': [ 'str' ], - '*unknown-dev-features': 'uint64' } } + '*unknown-dev-features': 'uint64', + '*unknown-dev-features2': 'uint64' } } ## # @VirtQueueStatus: diff --git a/qemu-options.hx b/qemu-options.hx index ab23f14d21782..0223ceffeb24d 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -28,7 +28,7 @@ DEF("machine", HAS_ARG, QEMU_OPTION_machine, \ "-machine [type=]name[,prop[=value][,...]]\n" " selects emulated machine ('-machine help' for list)\n" " property accel=accel1[:accel2[:...]] selects accelerator\n" - " supported accelerators are kvm, xen, hvf, nvmm, whpx or tcg (default: tcg)\n" + " supported accelerators are kvm, xen, hvf, nvmm, whpx, mshv or tcg (default: tcg)\n" " vmport=on|off|auto controls emulation of vmport (default: auto)\n" " dump-guest-core=on|off include guest memory in a core dump (default=on)\n" " mem-merge=on|off controls memory merge support (default: on)\n" @@ -66,10 +66,10 @@ SRST ``accel=accels1[:accels2[:...]]`` This is used to enable an accelerator. Depending on the target - architecture, kvm, xen, hvf, nvmm, whpx or tcg can be available. - By default, tcg is used. If there is more than one accelerator - specified, the next one is used if the previous one fails to - initialize. + architecture, kvm, xen, hvf, nvmm, whpx, mshv or tcg can be + available. By default, tcg is used. If there is more than one + accelerator specified, the next one is used if the previous one + fails to initialize. ``vmport=on|off|auto`` Enables emulation of VMWare IO port, for vmmouse etc. auto says @@ -226,7 +226,7 @@ ERST DEF("accel", HAS_ARG, QEMU_OPTION_accel, "-accel [accel=]accelerator[,prop[=value][,...]]\n" - " select accelerator (kvm, xen, hvf, nvmm, whpx or tcg; use 'help' for a list)\n" + " select accelerator (kvm, xen, hvf, nvmm, whpx, mshv or tcg; use 'help' for a list)\n" " igd-passthru=on|off (enable Xen integrated Intel graphics passthrough, default=off)\n" " kernel-irqchip=on|off|split controls accelerated irqchip support (default=on)\n" " kvm-shadow-mem=size of KVM shadow MMU in bytes\n" @@ -241,8 +241,8 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel, SRST ``-accel name[,prop=value[,...]]`` This is used to enable an accelerator. Depending on the target - architecture, kvm, xen, hvf, nvmm, whpx or tcg can be available. By - default, tcg is used. If there is more than one accelerator + architecture, kvm, xen, hvf, nvmm, whpx, mshv or tcg can be available. + By default, tcg is used. If there is more than one accelerator specified, the next one is used if the previous one fails to initialize. @@ -1231,6 +1231,36 @@ SRST ``aw-bits=val`` (val between 32 and 64, default depends on machine) This decides the address width of the IOVA address space. +``-device arm-smmuv3,primary-bus=id`` + This is only supported by ``-machine virt`` (ARM). + + ``primary-bus=id`` + Accepts either the default root complex (pcie.0) or a + pxb-pcie based root complex. + +``-device amd-iommu[,option=...]`` + Enables emulation of an AMD-Vi I/O Memory Management Unit (IOMMU). + Only available with ``-machine q35``, it supports the following options: + + ``dma-remap=on|off`` (default: off) + Support for DMA address translation and access permission checking for + guests attaching passthrough devices to paging domains, using the AMD v1 + I/O Page Table format. This enables ``-device vfio-pci,...`` to work + correctly with a guest using the DMA remapping feature of the vIOMMU. + + ``intremap=on|off`` (default: auto) + Generic x86 IOMMU functionality implemented by ``amd-iommu`` device. + Enables interrupt remapping feature in guests, which is also required to + enable x2apic support. + Currently only available with ``kernel-irqchip=off|split``, it is + automatically enabled when either of those modes is in use, and disabled + with ``kernel-irqchip=on``. + + ``xtsup=on|off`` (default: off) + Interrupt remapping table supports x2apic mode, enabling the use of + 128-bit IRTE format with 32-bit destination field by the guest. Required + to support routing interrupts to vCPUs with APIC IDs larger than 0xff. + ERST DEF("name", HAS_ARG, QEMU_OPTION_name, @@ -2693,7 +2723,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, "-smbios file=binary\n" " load SMBIOS entry from binary file\n" "-smbios type=0[,vendor=str][,version=str][,date=str][,release=%d.%d]\n" - " [,uefi=on|off]\n" + " [,uefi=on|off][,vm=on|off]\n" " specify SMBIOS type 0 fields\n" "-smbios type=1[,manufacturer=str][,product=str][,version=str][,serial=str]\n" " [,uuid=uuid][,sku=str][,family=str]\n" @@ -3317,8 +3347,8 @@ SRST Note that a SAMBA server must be installed on the host OS. - ``hostfwd=[tcp|udp]:[hostaddr]:hostport-[guestaddr]:guestport`` - Redirect incoming TCP or UDP connections to the host port + ``hostfwd=[tcp|udp|unix]:[[hostaddr]:hostport|hostpath]-[guestaddr]:guestport`` + Redirect incoming TCP, UDP or UNIX connections to the host port hostport to the guest IP address guestaddr on guest port guestport. If guestaddr is not specified, its value is x.x.x.15 (default first address given by the built-in DHCP server). By @@ -3348,6 +3378,13 @@ SRST Then when you use on the host ``telnet localhost 5555``, you connect to the guest telnet server. + To redirect host unix socket /tmp/vm to guest tcp socket 23 use + following: + + .. parsed-literal:: + # on the host + |qemu_system| -nic user,hostfwd=unix:/tmp/vm-:23 + ``guestfwd=[tcp]:server:port-dev``; \ ``guestfwd=[tcp]:server:port-cmd:command`` Forward guest TCP connections to the IP address server on port port to the character device dev or to a program executed by @@ -5347,13 +5384,6 @@ SRST specified, the former is passed to semihosting as it always takes precedence. ERST -DEF("old-param", 0, QEMU_OPTION_old_param, - "-old-param old param mode\n", QEMU_ARCH_ARM) -SRST -``-old-param`` - Old param mode (ARM only). -ERST - DEF("sandbox", HAS_ARG, QEMU_OPTION_sandbox, \ "-sandbox on[,obsolete=allow|deny][,elevateprivileges=allow|deny|children]\n" \ " [,spawn=allow|deny][,resourcecontrol=allow|deny]\n" \ diff --git a/qga/channel-posix.c b/qga/channel-posix.c index 465d688ecb886..9ccc8b7bd1449 100644 --- a/qga/channel-posix.c +++ b/qga/channel-posix.c @@ -28,6 +28,7 @@ static gboolean ga_channel_listen_accept(GIOChannel *channel, GAChannel *c = data; int ret, client_fd; bool accepted = false; + Error *err = NULL; g_assert(channel != NULL); @@ -36,7 +37,11 @@ static gboolean ga_channel_listen_accept(GIOChannel *channel, g_warning("error converting fd to gsocket: %s", strerror(errno)); goto out; } - qemu_socket_set_nonblock(client_fd); + if (!qemu_set_blocking(client_fd, false, &err)) { + g_warning("%s", error_get_pretty(err)); + error_free(err); + goto out; + } ret = ga_channel_client_add(c, client_fd); if (ret) { g_warning("error setting up connection"); diff --git a/qga/commands-linux.c b/qga/commands-linux.c index 9e8a934b9a610..4a09ddc760cc7 100644 --- a/qga/commands-linux.c +++ b/qga/commands-linux.c @@ -400,10 +400,10 @@ static bool build_guest_fsinfo_for_pci_dev(char const *syspath, Error **errp) { unsigned int pci[4], host, hosts[8], tgt[3]; - int i, nhosts = 0, pcilen; + int i, offset, nhosts = 0, pcilen; GuestPCIAddress *pciaddr = disk->pci_controller; bool has_ata = false, has_host = false, has_tgt = false; - char *p, *q, *driver = NULL; + char *p, *driver = NULL; bool ret = false; p = strstr(syspath, "/devices/pci"); @@ -445,13 +445,13 @@ static bool build_guest_fsinfo_for_pci_dev(char const *syspath, p = strstr(syspath, "/ata"); if (p) { - q = p + 4; + offset = 4; has_ata = true; } else { p = strstr(syspath, "/host"); - q = p + 5; + offset = 5; } - if (p && sscanf(q, "%u", &host) == 1) { + if (p && sscanf(p + offset, "%u", &host) == 1) { has_host = true; nhosts = build_hosts(syspath, p, has_ata, hosts, ARRAY_SIZE(hosts), errp); @@ -1400,20 +1400,22 @@ static bool linux_sys_state_supports_mode(SuspendMode mode, Error **errp) static void linux_sys_state_suspend(SuspendMode mode, Error **errp) { - g_autoptr(GError) local_gerr = NULL; const char *sysfile_strs[3] = {"disk", "mem", NULL}; const char *sysfile_str = sysfile_strs[mode]; + int fd; if (!sysfile_str) { error_setg(errp, "unknown guest suspend mode"); return; } - if (!g_file_set_contents(LINUX_SYS_STATE_FILE, sysfile_str, - -1, &local_gerr)) { - error_setg(errp, "suspend: cannot write to '%s': %s", - LINUX_SYS_STATE_FILE, local_gerr->message); - return; + fd = open(LINUX_SYS_STATE_FILE, O_WRONLY); + if (fd < 0 || write(fd, sysfile_str, strlen(sysfile_str)) < 0) { + error_setg(errp, "suspend: cannot write to '%s': %m", + LINUX_SYS_STATE_FILE); + } + if (fd >= 0) { + close(fd); } } diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 12bc086d79eac..5070f27d7586f 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -503,9 +503,8 @@ int64_t qmp_guest_file_open(const char *path, const char *mode, /* set fd non-blocking to avoid common use cases (like reading from a * named pipe) from hanging the agent */ - if (!g_unix_set_fd_nonblocking(fileno(fh), true, NULL)) { + if (!qemu_set_blocking(fileno(fh), false, errp)) { fclose(fh); - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); return -1; } diff --git a/qga/commands.c b/qga/commands.c index 5a5fad31f89ee..5f20af25d3476 100644 --- a/qga/commands.c +++ b/qga/commands.c @@ -205,13 +205,15 @@ GuestExecStatus *qmp_guest_exec_status(int64_t pid, Error **errp) #endif if (gei->out.length > 0) { ges->out_data = g_base64_encode(gei->out.data, gei->out.length); - ges->has_out_truncated = gei->out.truncated; + ges->has_out_truncated = true; + ges->out_truncated = gei->out.truncated; } g_free(gei->out.data); if (gei->err.length > 0) { ges->err_data = g_base64_encode(gei->err.data, gei->err.length); - ges->has_err_truncated = gei->err.truncated; + ges->has_err_truncated = true; + ges->err_truncated = gei->err.truncated; } g_free(gei->err.data); diff --git a/qga/installer/qemu-ga.wxs b/qga/installer/qemu-ga.wxs index df572adb4ad53..32b8308728dcb 100644 --- a/qga/installer/qemu-ga.wxs +++ b/qga/installer/qemu-ga.wxs @@ -151,6 +151,14 @@ Return="check" > + + @@ -174,8 +182,19 @@ - Installed - NOT REMOVE + + + + + + + NOT REMOVE + + + NOT REMOVE + + + Installed diff --git a/qga/main.c b/qga/main.c index 6c02f3ec386b3..dd1c216f9a13a 100644 --- a/qga/main.c +++ b/qga/main.c @@ -1512,8 +1512,12 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) if (!channel_init(s, s->config->method, s->config->channel_path, s->socket_activation ? FIRST_SOCKET_ACTIVATION_FD : -1)) { - g_critical("failed to initialize guest agent channel"); - return NULL; + if (s->config->retry_path) { + g_info("failed to initialize guest agent channel, will retry"); + } else { + g_critical("failed to initialize guest agent channel"); + return NULL; + } } if (config->daemonize) { @@ -1563,7 +1567,7 @@ static void cleanup_agent(GAState *s) static int run_agent_once(GAState *s) { if (!s->channel && - channel_init(s, s->config->method, s->config->channel_path, + !channel_init(s, s->config->method, s->config->channel_path, s->socket_activation ? FIRST_SOCKET_ACTIVATION_FD : -1)) { g_critical("failed to initialize guest agent channel"); return EXIT_FAILURE; diff --git a/qga/qapi-schema.json b/qga/qapi-schema.json index 6d770f7b8e0a5..8162d888bb3e5 100644 --- a/qga/qapi-schema.json +++ b/qga/qapi-schema.json @@ -96,11 +96,11 @@ # In cases where a partial stale response was previously received by # the client, this cannot always be done reliably. One particular # scenario being if qemu-ga responses are fed character-by-character -# into a JSON parser. In these situations, using guest-sync-delimited +# into a JSON parser. In these situations, using `guest-sync-delimited` # may be optimal. # # For clients that fetch responses line by line and convert them to -# JSON objects, guest-sync should be sufficient, but note that in +# JSON objects, `guest-sync` should be sufficient, but note that in # cases where the channel is dirty some attempts at parsing the # response may result in a parser error. # @@ -202,8 +202,6 @@ # # Get some information about the guest agent. # -# Returns: @GuestAgentInfo -# # Since: 0.15.0 ## { 'command': 'guest-info', @@ -219,7 +217,7 @@ # # This command does NOT return a response on success. Success # condition is indicated by the VM exiting with a zero exit status or, -# when running with --no-shutdown, by issuing the query-status QMP +# when running with --no-shutdown, by issuing the `query-status` QMP # command to confirm the VM status is "shutdown". # # Since: 0.15.0 @@ -249,7 +247,7 @@ # # Close an open file in the guest # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # Since: 0.15.0 ## @@ -280,13 +278,11 @@ # As this command is just for limited, ad-hoc debugging, such as log # file access, the number of bytes to read is limited to 48 MB. # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # @count: maximum number of bytes to read (default is 4KB, maximum is # 48MB) # -# Returns: @GuestFileRead -# # Since: 0.15.0 ## { 'command': 'guest-file-read', @@ -313,15 +309,13 @@ # # Write to an open file in the guest. # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # @buf-b64: base64-encoded string representing data to be written # # @count: bytes to write (actual bytes, after base64-decode), default # is all content in buf-b64 buffer after base64 decoding # -# Returns: @GuestFileWrite -# # Since: 0.15.0 ## { 'command': 'guest-file-write', @@ -346,7 +340,7 @@ ## # @QGASeek: # -# Symbolic names for use in @guest-file-seek +# Symbolic names for use in `guest-file-seek` # # @set: Set to the specified offset (same effect as 'whence':0) # @@ -361,7 +355,7 @@ ## # @GuestFileWhence: # -# Controls the meaning of offset to @guest-file-seek. +# Controls the meaning of offset to `guest-file-seek`. # # @value: Integral value (0 for set, 1 for cur, 2 for end), available # for historical reasons, and might differ from the host's or @@ -381,14 +375,12 @@ # current file position afterward. Also encapsulates ftell()'s # functionality, with offset=0 and whence=1. # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # @offset: bytes to skip over in the file stream # # @whence: Symbolic or numeric code for interpreting offset # -# Returns: @GuestFileSeek -# # Since: 0.15.0 ## { 'command': 'guest-file-seek', @@ -401,7 +393,7 @@ # # Write file changes buffered in userspace to disk/kernel buffers # -# @handle: filehandle returned by guest-file-open +# @handle: filehandle returned by `guest-file-open` # # Since: 0.15.0 ## @@ -428,9 +420,6 @@ # # Get guest fsfreeze state. # -# Returns: GuestFsfreezeStatus ("thawed", "frozen", etc., as defined -# below) -# # .. note:: This may fail to properly report the current state as a # result of some other guest processes having issued an fs # freeze/thaw. @@ -445,12 +434,12 @@ # @guest-fsfreeze-freeze: # # Sync and freeze all freezable, local guest filesystems. If this -# command succeeded, you may call @guest-fsfreeze-thaw later to +# command succeeded, you may call `guest-fsfreeze-thaw` later to # unfreeze. # # On error, all filesystems will be thawed. If no filesystems are -# frozen as a result of this call, then @guest-fsfreeze-status will -# remain "thawed" and calling @guest-fsfreeze-thaw is not necessary. +# frozen as a result of this call, then `guest-fsfreeze-status` will +# remain "thawed" and calling `guest-fsfreeze-thaw` is not necessary. # # Returns: Number of file systems currently frozen. # @@ -468,7 +457,7 @@ # @guest-fsfreeze-freeze-list: # # Sync and freeze specified guest filesystems. See also -# @guest-fsfreeze-freeze. +# `guest-fsfreeze-freeze`. # # On error, all filesystems will be thawed. # @@ -493,7 +482,7 @@ # Returns: Number of file systems thawed by this call # # .. note:: If the return value does not match the previous call to -# guest-fsfreeze-freeze, this likely means some freezable filesystems +# `guest-fsfreeze-freeze`, this likely means some freezable filesystems # were unfrozen before this call, and that the filesystem state may # have changed before issuing this command. # @@ -524,7 +513,7 @@ ## # @GuestFilesystemTrimResponse: # -# @paths: list of @GuestFilesystemTrimResult per path that was trimmed +# @paths: list of `GuestFilesystemTrimResult` per path that was trimmed # # Since: 2.4 ## @@ -545,8 +534,7 @@ # discarded. The default value is zero, meaning "discard every # free block". # -# Returns: A @GuestFilesystemTrimResponse which contains the status of -# all trimmed paths. (since 2.4) +# Returns: status of all trimmed paths. (since 2.4) # # Since: 1.2 ## @@ -569,7 +557,7 @@ # # This command does NOT return a response on success. There is a high # chance the command succeeded if the VM exits with a zero exit status -# or, when running with --no-shutdown, by issuing the query-status QMP +# or, when running with --no-shutdown, by issuing the `query-status` QMP # command to to confirm the VM status is "shutdown". However, the VM # could also exit (or set its status to "shutdown") due to other # reasons. @@ -577,7 +565,7 @@ # Errors: # - If suspend to disk is not supported, Unsupported # -# .. note:: It's strongly recommended to issue the guest-sync command +# .. note:: It's strongly recommended to issue the `guest-sync` command # before sending commands when the guest resumes. # # Since: 1.1 @@ -597,8 +585,8 @@ # - pm-utils (via pm-hibernate) # - manual write into sysfs # -# IMPORTANT: guest-suspend-ram requires working wakeup support in -# QEMU. You should check QMP command query-current-machine returns +# IMPORTANT: `guest-suspend-ram` requires working wakeup support in +# QEMU. You should check QMP command `query-current-machine` returns # wakeup-suspend-support: true before issuing this command. Failure # in doing so can result in a suspended guest that QEMU will not be # able to awaken, forcing the user to power cycle the guest to bring @@ -607,14 +595,14 @@ # This command does NOT return a response on success. There are two # options to check for success: # -# 1. Wait for the SUSPEND QMP event from QEMU -# 2. Issue the query-status QMP command to confirm the VM status is +# 1. Wait for the `SUSPEND` QMP event from QEMU +# 2. Issue the `query-status` QMP command to confirm the VM status is # "suspended" # # Errors: # - If suspend to ram is not supported, Unsupported # -# .. note:: It's strongly recommended to issue the guest-sync command +# .. note:: It's strongly recommended to issue the `guest-sync` command # before sending commands when the guest resumes. # # Since: 1.1 @@ -633,8 +621,8 @@ # - systemd hybrid-sleep # - pm-utils (via pm-suspend-hybrid) # -# IMPORTANT: guest-suspend-hybrid requires working wakeup support in -# QEMU. You should check QMP command query-current-machine returns +# IMPORTANT: `guest-suspend-hybrid` requires working wakeup support in +# QEMU. You should check QMP command `query-current-machine` returns # wakeup-suspend-support: true before issuing this command. Failure # in doing so can result in a suspended guest that QEMU will not be # able to awaken, forcing the user to power cycle the guest to bring @@ -643,14 +631,14 @@ # This command does NOT return a response on success. There are two # options to check for success: # -# 1. Wait for the SUSPEND QMP event from QEMU -# 2. Issue the query-status QMP command to confirm the VM status is +# 1. Wait for the `SUSPEND` QMP event from QEMU +# 2. Issue the `query-status` QMP command to confirm the VM status is # "suspended" # # Errors: # - If hybrid suspend is not supported, Unsupported # -# .. note:: It's strongly recommended to issue the guest-sync command +# .. note:: It's strongly recommended to issue the `guest-sync` command # before sending commands when the guest resumes. # # Since: 1.1 @@ -749,8 +737,6 @@ # # Get list of guest IP addresses, MAC addresses and netmasks. # -# Returns: List of GuestNetworkInterface -# # Since: 1.1 ## { 'command': 'guest-network-get-interfaces', @@ -807,7 +793,7 @@ # There's no restriction on list length or on repeating the same # @logical-id (with possibly different @online field). Preferably # the input list should describe a modified subset of -# @guest-get-vcpus' return value. +# `guest-get-vcpus`' return value. # # Returns: The length of the initial sublist that has been # successfully processed. The guest agent maximizes this value. @@ -1083,7 +1069,7 @@ # # Returns: The list of filesystems information mounted in the guest. # The returned mountpoints may be specified to -# @guest-fsfreeze-freeze-list. Network filesystems (such as CIFS +# `guest-fsfreeze-freeze-list`. Network filesystems (such as CIFS # and NFS) are not listed. # # Since: 2.2 @@ -1185,7 +1171,7 @@ ## # @GuestMemoryBlockResponse: # -# @phys-index: same with the 'phys-index' member of @GuestMemoryBlock. +# @phys-index: same with the 'phys-index' member of `GuestMemoryBlock`. # # @response: the result of memory block operation. # @@ -1215,11 +1201,11 @@ # guest-supported identifiers. There's no restriction on list # length or on repeating the same @phys-index (with possibly # different @online field). Preferably the input list should -# describe a modified subset of @guest-get-memory-blocks' return +# describe a modified subset of `guest-get-memory-blocks`' return # value. # # Returns: The operation results, it is a list of -# @GuestMemoryBlockResponse, which is corresponding to the input +# `GuestMemoryBlockResponse`, which is corresponding to the input # list. # # Note: it will return an empty list if the @mem-blks list was @@ -1251,8 +1237,6 @@ # # Get information relating to guest memory blocks. # -# Returns: @GuestMemoryBlockInfo -# # Since: 2.3 ## { 'command': 'guest-get-memory-block-info', @@ -1274,7 +1258,7 @@ # # @err-data: base64-encoded stderr of the process. Note: @out-data # and @err-data are present only if 'capture-output' was specified -# for 'guest-exec'. This field will only be populated after the +# for `guest-exec`. This field will only be populated after the # process exits. # # @out-truncated: true if stdout was not fully captured due to size @@ -1293,12 +1277,10 @@ # @guest-exec-status: # # Check status of process associated with PID retrieved via -# guest-exec. Reap the process and associated metadata if it has +# `guest-exec`. Reap the process and associated metadata if it has # exited. # -# @pid: pid returned from guest-exec -# -# Returns: GuestExecStatus +# @pid: pid returned from `guest-exec` # # Since: 2.5 ## @@ -1319,7 +1301,7 @@ ## # @GuestExecCaptureOutputMode: # -# An enumeration of guest-exec capture modes. +# An enumeration of `guest-exec` capture modes. # # @none: do not capture any output # @@ -1328,7 +1310,7 @@ # @stderr: only capture stderr # # @separated: capture both stdout and stderr, but separated into -# GuestExecStatus out-data and err-data, respectively +# `GuestExecStatus` out-data and err-data, respectively # # @merged: capture both stdout and stderr, but merge together into # out-data. Not effective on windows guests. @@ -1342,10 +1324,10 @@ ## # @GuestExecCaptureOutput: # -# Controls what guest-exec output gets captures. +# Controls what `guest-exec` output gets captures. # # @flag: captures both stdout and stderr if true. Equivalent to -# GuestExecCaptureOutputMode::all. (since 2.5) +# `GuestExecCaptureOutputMode`::all. (since 2.5) # # @mode: capture mode; preferred interface # @@ -1458,8 +1440,6 @@ # # Retrieves the timezone information from the guest. # -# Returns: A GuestTimezone dictionary. -# # Since: 2.10 ## { 'command': 'guest-get-timezone', @@ -1533,8 +1513,6 @@ # # Retrieve guest operating system information # -# Returns: @GuestOSInfo -# # Since: 2.10 ## { 'command': 'guest-get-osinfo', @@ -1604,8 +1582,6 @@ # # Retrieve information about device drivers in Windows guest # -# Returns: @GuestDeviceInfo -# # Since: 5.2 ## { 'command': 'guest-get-devices', @@ -1633,8 +1609,6 @@ # # @username: the user account to add the authorized keys # -# Returns: @GuestAuthorizedKeys -# # Since: 5.2 ## { 'command': 'guest-ssh-get-authorized-keys', @@ -1966,6 +1940,7 @@ # @guest-network-get-route: # # Retrieve information about route of network. +# # Returns: List of route info of guest. # # Since: 9.1 diff --git a/qga/vss-win32.c b/qga/vss-win32.c index f444a25a70e40..b272bfc7829c4 100644 --- a/qga/vss-win32.c +++ b/qga/vss-win32.c @@ -157,6 +157,8 @@ void qga_vss_fsfreeze(int *nr_volume, bool freeze, .errp = errp, }; + *nr_volume = 0; + g_assert(errp); /* requester.cpp requires it */ func = (QGAVSSRequesterFunc)GetProcAddress(provider_lib, func_name); if (!func) { diff --git a/qga/vss-win32/meson.build b/qga/vss-win32/meson.build index 0ac918910b427..a6b810f12a570 100644 --- a/qga/vss-win32/meson.build +++ b/qga/vss-win32/meson.build @@ -13,13 +13,11 @@ qga_vss = shared_module( link_args: link_args, vs_module_defs: 'qga-vss.def', dependencies: [ - glib, socket, cc.find_library('ole32'), cc.find_library('oleaut32'), cc.find_library('shlwapi'), - cc.find_library('uuid'), - cc.find_library('intl') + cc.find_library('uuid') ] ) diff --git a/qga/vss-win32/requester.cpp b/qga/vss-win32/requester.cpp index 4401d55e3a41b..5615955b6f3e8 100644 --- a/qga/vss-win32/requester.cpp +++ b/qga/vss-win32/requester.cpp @@ -28,8 +28,9 @@ #define err_set(e, err, fmt, ...) { \ (e)->error_setg_win32_wrapper((e)->errp, __FILE__, __LINE__, __func__, \ - err, fmt, ## __VA_ARGS__); \ - qga_debug(fmt, ## __VA_ARGS__); \ + err, fmt ": Windows error 0x%lx", \ + ## __VA_ARGS__, err); \ + qga_debug(fmt ": Windows error 0x%lx", ## __VA_ARGS__, err); \ } /* Bad idea, works only when (e)->errp != NULL: */ #define err_is_set(e) ((e)->errp && *(e)->errp) @@ -347,7 +348,12 @@ void requester_freeze(int *num_vols, void *mountpoints, ErrorSet *errset) goto out; } - assert(pCreateVssBackupComponents != NULL); + if (!pCreateVssBackupComponents) { + err_set(errset, (HRESULT)ERROR_PROC_NOT_FOUND, + "CreateVssBackupComponents proc address absent. Did you call requester_init()?"); + goto out; + } + hr = pCreateVssBackupComponents(&vss_ctx.pVssbc); if (FAILED(hr)) { err_set(errset, hr, "failed to create VSS backup components"); @@ -579,8 +585,16 @@ void requester_thaw(int *num_vols, void *mountpints, ErrorSet *errset) /* Tell the provider that the snapshot is finished. */ SetEvent(vss_ctx.hEventThaw); - assert(vss_ctx.pVssbc); - assert(vss_ctx.pAsyncSnapshot); + if (!vss_ctx.pVssbc) { + err_set(errset, (HRESULT)VSS_E_BAD_STATE, + "CreateVssBackupComponents is missing. Did you freeze the volumes?"); + return; + } + if (!vss_ctx.pAsyncSnapshot) { + err_set(errset, (HRESULT)VSS_E_BAD_STATE, + "AsyncSnapshot set is missing. Did you freeze the volumes?"); + return; + } HRESULT hr = WaitForAsync(vss_ctx.pAsyncSnapshot); switch (hr) { diff --git a/qobject/qdict.c b/qobject/qdict.c index a90ac9ae2f89e..0dafe6d421793 100644 --- a/qobject/qdict.c +++ b/qobject/qdict.c @@ -209,6 +209,19 @@ int64_t qdict_get_int(const QDict *qdict, const char *key) return qnum_get_int(qobject_to(QNum, qdict_get(qdict, key))); } +/** + * qdict_get_uint(): Get an unsigned integer mapped by 'key' + * + * This function assumes that 'key' exists and it stores a + * QNum representable as uint. + * + * Return unsigned integer mapped by 'key'. + */ +uint64_t qdict_get_uint(const QDict *qdict, const char *key) +{ + return qnum_get_uint(qobject_to(QNum, qdict_get(qdict, key))); +} + /** * qdict_get_bool(): Get a bool mapped by 'key' * diff --git a/replay/replay-events.c b/replay/replay-events.c index 8959da9f1fab7..a96e47e774036 100644 --- a/replay/replay-events.c +++ b/replay/replay-events.c @@ -118,7 +118,8 @@ void replay_add_event(ReplayAsyncEventKind event_kind, g_assert(replay_mutex_locked()); QTAILQ_INSERT_TAIL(&events_list, event, events); - qemu_cpu_kick(first_cpu); + /* Kick the TCG thread out of tcg_cpu_exec(). */ + cpu_exit(first_cpu); } void replay_bh_schedule_event(QEMUBH *bh) diff --git a/replay/replay.c b/replay/replay.c index a3e24c967ae9f..b2121788c1d72 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -263,6 +263,8 @@ bool replay_has_interrupt(void) void replay_shutdown_request(ShutdownCause cause) { + replay_save_instructions(); + if (replay_mode == REPLAY_MODE_RECORD) { g_assert(replay_mutex_locked()); replay_put_event(EVENT_SHUTDOWN + cause); diff --git a/roms/Makefile b/roms/Makefile index beff58d9d50c5..4c8793c5bd454 100644 --- a/roms/Makefile +++ b/roms/Makefile @@ -68,6 +68,7 @@ default help: @echo " u-boot.sam460 -- update u-boot.sam460" @echo " npcm7xx_bootrom -- update vbootrom for npcm7xx" @echo " npcm8xx_bootrom -- update vbootrom for npcm8xx" + @echo " ast27x0_bootrom -- update vbootrom for ast27x0" @echo " efi -- update UEFI (edk2) platform firmware" @echo " opensbi32-generic -- update OpenSBI for 32-bit generic machine" @echo " opensbi64-generic -- update OpenSBI for 64-bit generic machine" @@ -193,12 +194,16 @@ qboot: cp qboot/build/bios.bin ../pc-bios/qboot.rom npcm7xx_bootrom: - $(MAKE) -C vbootrom CROSS_COMPILE=$(arm_cross_prefix) - cp vbootrom/npcm7xx_bootrom.bin ../pc-bios/npcm7xx_bootrom.bin + $(MAKE) -C vbootrom/npcm7xx CROSS_COMPILE=$(arm_cross_prefix) + cp vbootrom/npcm7xx/npcm7xx_bootrom.bin ../pc-bios/npcm7xx_bootrom.bin npcm8xx_bootrom: - $(MAKE) -C vbootrom CROSS_COMPILE=$(aarch64_cross_prefix) - cp vbootrom/npcm8xx_bootrom.bin ../pc-bios/npcm8xx_bootrom.bin + $(MAKE) -C vbootrom/npcm8xx CROSS_COMPILE=$(aarch64_cross_prefix) + cp vbootrom/npcm8xx/npcm8xx_bootrom.bin ../pc-bios/npcm8xx_bootrom.bin + +ast27x0_bootrom: + $(MAKE) -C vbootrom/ast27x0 CROSS_COMPILE=$(aarch64_cross_prefix) + cp vbootrom/ast27x0/ast27x0_bootrom.bin ../pc-bios/ast27x0_bootrom.bin hppa-firmware: $(MAKE) -C seabios-hppa parisc diff --git a/roms/opensbi b/roms/opensbi index 43cace6c3671e..a32a91069119e 160000 --- a/roms/opensbi +++ b/roms/opensbi @@ -1 +1 @@ -Subproject commit 43cace6c3671e5172d0df0a8963e552bb04b7b20 +Subproject commit a32a91069119e7a5aa31e6bc51d5e00860be3d80 diff --git a/roms/vbootrom b/roms/vbootrom index 1287b6e42e839..183c9ff8056b7 160000 --- a/roms/vbootrom +++ b/roms/vbootrom @@ -1 +1 @@ -Subproject commit 1287b6e42e839ba2ab0f06268c5b53ae60df3537 +Subproject commit 183c9ff8056b7946db1ae49cc23e8980ac413174 diff --git a/rust/Cargo.lock b/rust/Cargo.lock index b785c718f315b..0c1df625df1de 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -14,6 +14,16 @@ version = "1.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c84fc003e338a6f69fbd4f7fe9f92b535ff13e9af8997f3b14b6ddff8b1df46d" +[[package]] +name = "attrs" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a207d40f43de65285f3de0509bb6cb16bc46098864fce957122bbacce327e5f" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "bilge" version = "0.2.0" @@ -41,7 +51,44 @@ dependencies = [ name = "bits" version = "0.1.0" dependencies = [ - "qemu_api_macros", + "qemu_macros", +] + +[[package]] +name = "bql" +version = "0.1.0" +dependencies = [ + "glib-sys", +] + +[[package]] +name = "cfg-expr" +version = "0.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a2c5f3bf25ec225351aa1c8e230d04d880d3bd89dea133537dafad4ae291e5c" +dependencies = [ + "smallvec", + "target-lexicon", +] + +[[package]] +name = "chardev" +version = "0.1.0" +dependencies = [ + "bql", + "common", + "glib-sys", + "migration", + "qom", + "util", +] + +[[package]] +name = "common" +version = "0.1.0" +dependencies = [ + "libc", + "qemu_macros", ] [[package]] @@ -50,6 +97,12 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + [[package]] name = "foreign" version = "0.3.1" @@ -59,12 +112,64 @@ dependencies = [ "libc", ] +[[package]] +name = "glib-sys" +version = "0.21.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d09d3d0fddf7239521674e57b0465dfbd844632fec54f059f7f56112e3f927e1" +dependencies = [ + "libc", + "system-deps", +] + +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hpet" version = "0.1.0" dependencies = [ - "qemu_api", - "qemu_api_macros", + "bql", + "common", + "hwcore", + "migration", + "qom", + "system", + "util", +] + +[[package]] +name = "hwcore" +version = "0.1.0" +dependencies = [ + "bql", + "chardev", + "common", + "glib-sys", + "migration", + "qemu_macros", + "qom", + "system", + "util", +] + +[[package]] +name = "indexmap" +version = "2.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" +dependencies = [ + "equivalent", + "hashbrown", ] [[package]] @@ -82,6 +187,29 @@ version = "0.2.162" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "18d287de67fe55fd7e1581fe933d965a5a9477b38e949cfa9f8574ef01506398" +[[package]] +name = "memchr" +version = "2.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" + +[[package]] +name = "migration" +version = "0.1.0" +dependencies = [ + "bql", + "common", + "glib-sys", + "qemu_macros", + "util", +] + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + [[package]] name = "pl011" version = "0.1.0" @@ -89,8 +217,16 @@ dependencies = [ "bilge", "bilge-impl", "bits", - "qemu_api", - "qemu_api_macros", + "bql", + "chardev", + "common", + "glib-sys", + "hwcore", + "migration", + "qom", + "system", + "trace", + "util", ] [[package]] @@ -118,30 +254,33 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.84" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6" +checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" dependencies = [ "unicode-ident", ] [[package]] -name = "qemu_api" +name = "qemu_macros" version = "0.1.0" dependencies = [ - "anyhow", - "foreign", - "libc", - "qemu_api_macros", + "attrs", + "proc-macro2", + "quote", + "syn", ] [[package]] -name = "qemu_api_macros" +name = "qom" version = "0.1.0" dependencies = [ - "proc-macro2", - "quote", - "syn", + "bql", + "common", + "glib-sys", + "migration", + "qemu_macros", + "util", ] [[package]] @@ -153,25 +292,179 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "serde" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" +dependencies = [ + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + [[package]] name = "syn" -version = "2.0.66" +version = "2.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" +checksum = "17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "system" +version = "0.1.0" +dependencies = [ + "common", + "glib-sys", + "qom", + "util", +] + +[[package]] +name = "system-deps" +version = "7.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4be53aa0cba896d2dc615bd42bbc130acdcffa239e0a2d965ea5b3b2a86ffdb" +dependencies = [ + "cfg-expr", + "heck", + "pkg-config", + "toml", + "version-compare", +] + +[[package]] +name = "target-lexicon" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e502f78cdbb8ba4718f566c418c52bc729126ffd16baee5baa718cf25dd5a69a" + +[[package]] +name = "tests" +version = "0.1.0" +dependencies = [ + "bql", + "chardev", + "common", + "hwcore", + "migration", + "qom", + "system", + "util", +] + +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "winnow", +] + +[[package]] +name = "trace" +version = "0.1.0" +dependencies = [ + "libc", +] + [[package]] name = "unicode-ident" version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +[[package]] +name = "util" +version = "0.1.0" +dependencies = [ + "anyhow", + "common", + "foreign", + "glib-sys", + "libc", +] + +[[package]] +name = "version-compare" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "852e951cb7832cb45cb1169900d19760cfa39b82bc0ea9c0e5a14ae88411c98b" + [[package]] name = "version_check" version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" + +[[package]] +name = "winnow" +version = "0.7.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +dependencies = [ + "memchr", +] diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 0868e1b426808..783e626802c92 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -2,10 +2,18 @@ resolver = "2" members = [ "bits", - "qemu-api-macros", - "qemu-api", + "bql", + "common", + "migration", + "qemu-macros", + "qom", + "system", + "hw/core", "hw/char/pl011", "hw/timer/hpet", + "trace", + "util", + "tests", ] [workspace.package] @@ -13,12 +21,18 @@ edition = "2021" homepage = "https://www.qemu.org" license = "GPL-2.0-or-later" repository = "https://gitlab.com/qemu-project/qemu/" -rust-version = "1.77.0" +# don't forget to update docs/devel/rust.rst msrv +rust-version = "1.83.0" +authors = ["The QEMU Project Developers "] + +[workspace.dependencies] +anyhow = "~1.0" +foreign = "~0.3.1" +libc = "0.2.162" +glib-sys = { version = "0.21.2", features = ["v2_66"] } [workspace.lints.rust] -unexpected_cfgs = { level = "deny", check-cfg = [ - 'cfg(MESON)', 'cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)', -] } +unexpected_cfgs = { level = "deny", check-cfg = ['cfg(MESON)'] } # Occasionally, we may need to silence warnings and clippy lints that # were only introduced in newer Rust compiler versions. Do not croak @@ -53,7 +67,6 @@ as_ptr_cast_mut = "deny" as_underscore = "deny" assertions_on_result_states = "deny" bool_to_int_with_if = "deny" -borrow_as_ptr = "deny" cast_lossless = "deny" dbg_macro = "deny" debug_assert_with_mut_call = "deny" diff --git a/rust/qemu-api/src/bindings.rs b/rust/bindings/src/lib.rs similarity index 89% rename from rust/qemu-api/src/bindings.rs rename to rust/bindings/src/lib.rs index b8104dea8bea4..5bf03b13706e3 100644 --- a/rust/qemu-api/src/bindings.rs +++ b/rust/bindings/src/lib.rs @@ -6,7 +6,6 @@ non_camel_case_types, non_snake_case, non_upper_case_globals, - unnecessary_transmutes, unsafe_op_in_unsafe_fn, clippy::pedantic, clippy::restriction, @@ -14,8 +13,7 @@ clippy::missing_const_for_fn, clippy::ptr_offset_with_cast, clippy::useless_transmute, - clippy::missing_safety_doc, - clippy::too_many_arguments + clippy::missing_safety_doc )] //! `bindgen`-generated declarations. @@ -56,3 +54,11 @@ unsafe impl Sync for VMStateField {} unsafe impl Send for VMStateInfo {} unsafe impl Sync for VMStateInfo {} + +// bindgen does not derive Default here +#[allow(clippy::derivable_impls)] +impl Default for VMStateFlags { + fn default() -> Self { + Self(0) + } +} diff --git a/rust/bits/Cargo.toml b/rust/bits/Cargo.toml index 1ff38a411756c..7fce972b270b6 100644 --- a/rust/bits/Cargo.toml +++ b/rust/bits/Cargo.toml @@ -13,7 +13,7 @@ repository.workspace = true rust-version.workspace = true [dependencies] -qemu_api_macros = { path = "../qemu-api-macros" } +qemu_macros = { path = "../qemu-macros" } [lints] workspace = true diff --git a/rust/bits/meson.build b/rust/bits/meson.build index 2a41e138c54bf..359ca86f1556c 100644 --- a/rust/bits/meson.build +++ b/rust/bits/meson.build @@ -3,7 +3,7 @@ _bits_rs = static_library( 'src/lib.rs', override_options: ['rust_std=2021', 'build.rust_std=2021'], rust_abi: 'rust', - dependencies: [qemu_api_macros], + dependencies: [qemu_macros], ) bits_rs = declare_dependency(link_with: _bits_rs) diff --git a/rust/bits/src/lib.rs b/rust/bits/src/lib.rs index d485d6bd11050..d1141f7c882a3 100644 --- a/rust/bits/src/lib.rs +++ b/rust/bits/src/lib.rs @@ -165,19 +165,19 @@ macro_rules! bits { #[allow(dead_code)] #[inline(always)] - pub fn set(&mut self, rhs: Self) { + pub const fn set(&mut self, rhs: Self) { self.0 |= rhs.0; } #[allow(dead_code)] #[inline(always)] - pub fn clear(&mut self, rhs: Self) { + pub const fn clear(&mut self, rhs: Self) { self.0 &= !rhs.0; } #[allow(dead_code)] #[inline(always)] - pub fn toggle(&mut self, rhs: Self) { + pub const fn toggle(&mut self, rhs: Self) { self.0 ^= rhs.0; } @@ -380,14 +380,17 @@ macro_rules! bits { }; { $type:ty: $expr:expr } => { - ::qemu_api_macros::bits_const_internal! { $type @ ($expr) } + $crate::bits_const_internal! { $type @ ($expr) } }; { $type:ty as $int_type:ty: $expr:expr } => { - (::qemu_api_macros::bits_const_internal! { $type @ ($expr) }.into_bits()) as $int_type + ($crate::bits_const_internal! { $type @ ($expr) }.into_bits()) as $int_type }; } +#[doc(hidden)] +pub use qemu_macros::bits_const_internal; + #[cfg(test)] mod test { bits! { diff --git a/rust/qemu-api/Cargo.toml b/rust/bql/Cargo.toml similarity index 52% rename from rust/qemu-api/Cargo.toml rename to rust/bql/Cargo.toml index db7000dee4410..8fd8131102848 100644 --- a/rust/qemu-api/Cargo.toml +++ b/rust/bql/Cargo.toml @@ -1,12 +1,11 @@ [package] -name = "qemu_api" +name = "bql" version = "0.1.0" -authors = ["Manos Pitsidianakis "] -description = "Rust bindings for QEMU" -readme = "README.md" +description = "Rust bindings for QEMU/BQL" resolver = "2" publish = false +authors.workspace = true edition.workspace = true homepage.workspace = true license.workspace = true @@ -14,14 +13,10 @@ repository.workspace = true rust-version.workspace = true [dependencies] -qemu_api_macros = { path = "../qemu-api-macros" } -anyhow = "~1.0" -libc = "0.2.162" -foreign = "~0.3.1" +glib-sys.workspace = true [features] default = ["debug_cell"] -allocator = [] debug_cell = [] [lints] diff --git a/rust/bql/build.rs b/rust/bql/build.rs new file mode 120000 index 0000000000000..71a3167885c23 --- /dev/null +++ b/rust/bql/build.rs @@ -0,0 +1 @@ +../util/build.rs \ No newline at end of file diff --git a/rust/bql/meson.build b/rust/bql/meson.build new file mode 100644 index 0000000000000..091372dd7b660 --- /dev/null +++ b/rust/bql/meson.build @@ -0,0 +1,52 @@ +_bql_cfg = run_command(rustc_args, + '--config-headers', config_host_h, '--features', files('Cargo.toml'), + capture: true, check: true).stdout().strip().splitlines() + +if get_option('debug_mutex') + _bql_cfg += ['--cfg', 'feature="debug_cell"'] +endif + +# +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_bql_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common, + c_args: bindgen_c_args, +) + +_bql_rs = static_library( + 'bql', + structured_sources( + [ + 'src/lib.rs', + 'src/bindings.rs', + 'src/cell.rs', + ], + {'.': _bql_bindings_inc_rs} + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: _bql_cfg, + dependencies: [glib_sys_rs], +) + +bql_rs = declare_dependency(link_with: [_bql_rs], + dependencies: [qemuutil]) + +# Doctests are essentially integration tests, so they need the same dependencies. +# Note that running them requires the object files for C code, so place them +# in a separate suite that is run by the "build" CI jobs rather than "check". +rust.doctest('rust-bql-rs-doctests', + _bql_rs, + dependencies: bql_rs, + suite: ['doc', 'rust']) diff --git a/rust/bql/src/bindings.rs b/rust/bql/src/bindings.rs new file mode 100644 index 0000000000000..8c70f3a87ce18 --- /dev/null +++ b/rust/bql/src/bindings.rs @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unnecessary_transmutes, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc, + clippy::too_many_arguments +)] + +use glib_sys::{ + guint, GArray, GHashTable, GHashTableIter, GList, GPollFD, GPtrArray, GQueue, GSList, GSource, +}; + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); diff --git a/rust/qemu-api/src/cell.rs b/rust/bql/src/cell.rs similarity index 70% rename from rust/qemu-api/src/cell.rs rename to rust/bql/src/cell.rs index 27063b049d55f..8ade7db629cfe 100644 --- a/rust/qemu-api/src/cell.rs +++ b/rust/bql/src/cell.rs @@ -75,10 +75,10 @@ //! //! ### Example //! -//! ``` -//! # use qemu_api::prelude::*; -//! # use qemu_api::{cell::BqlRefCell, irq::InterruptSource, irq::IRQState}; -//! # use qemu_api::{sysbus::SysBusDevice, qom::Owned, qom::ParentField}; +//! ```ignore +//! # use bql::BqlRefCell; +//! # use qom::{Owned, ParentField}; +//! # use system::{InterruptSource, IRQState, SysBusDevice}; //! # const N_GPIOS: usize = 8; //! # struct PL061Registers { /* ... */ } //! # unsafe impl ObjectType for PL061State { @@ -141,110 +141,16 @@ //! Multiple immutable borrows are allowed via [`borrow`](BqlRefCell::borrow), //! or a single mutable borrow via [`borrow_mut`](BqlRefCell::borrow_mut). The //! thread will panic if these rules are violated or if the BQL is not held. -//! -//! ## Opaque wrappers -//! -//! The cell types from the previous section are useful at the boundaries -//! of code that requires interior mutability. When writing glue code that -//! interacts directly with C structs, however, it is useful to operate -//! at a lower level. -//! -//! C functions often violate Rust's fundamental assumptions about memory -//! safety by modifying memory even if it is shared. Furthermore, C structs -//! often start their life uninitialized and may be populated lazily. -//! -//! For this reason, this module provides the [`Opaque`] type to opt out -//! of Rust's usual guarantees about the wrapped type. Access to the wrapped -//! value is always through raw pointers, obtained via methods like -//! [`as_mut_ptr()`](Opaque::as_mut_ptr) and [`as_ptr()`](Opaque::as_ptr). These -//! pointers can then be passed to C functions or dereferenced; both actions -//! require `unsafe` blocks, making it clear where safety guarantees must be -//! manually verified. For example -//! -//! ```ignore -//! unsafe { -//! let state = Opaque::::uninit(); -//! qemu_struct_init(state.as_mut_ptr()); -//! } -//! ``` -//! -//! [`Opaque`] will usually be wrapped one level further, so that -//! bridge methods can be added to the wrapper: -//! -//! ```ignore -//! pub struct MyStruct(Opaque); -//! -//! impl MyStruct { -//! fn new() -> Pin> { -//! let result = Box::pin(unsafe { Opaque::uninit() }); -//! unsafe { qemu_struct_init(result.as_mut_ptr()) }; -//! result -//! } -//! } -//! ``` -//! -//! This pattern of wrapping bindgen-generated types in [`Opaque`] provides -//! several advantages: -//! -//! * The choice of traits to be implemented is not limited by the -//! bindgen-generated code. For example, [`Drop`] can be added without -//! disabling [`Copy`] on the underlying bindgen type -//! -//! * [`Send`] and [`Sync`] implementations can be controlled by the wrapper -//! type rather than being automatically derived from the C struct's layout -//! -//! * Methods can be implemented in a separate crate from the bindgen-generated -//! bindings -//! -//! * [`Debug`](std::fmt::Debug) and [`Display`](std::fmt::Display) -//! implementations can be customized to be more readable than the raw C -//! struct representation -//! -//! The [`Opaque`] type does not include BQL validation; it is possible to -//! assert in the code that the right lock is taken, to use it together -//! with a custom lock guard type, or to let C code take the lock, as -//! appropriate. It is also possible to use it with non-thread-safe -//! types, since by default (unlike [`BqlCell`] and [`BqlRefCell`] -//! it is neither `Sync` nor `Send`. -//! -//! While [`Opaque`] is necessary for C interop, it should be used sparingly -//! and only at FFI boundaries. For QEMU-specific types that need interior -//! mutability, prefer [`BqlCell`] or [`BqlRefCell`]. - use std::{ cell::{Cell, UnsafeCell}, cmp::Ordering, fmt, - marker::{PhantomData, PhantomPinned}, - mem::{self, MaybeUninit}, + marker::PhantomData, + mem, ops::{Deref, DerefMut}, ptr::NonNull, }; -use crate::bindings; - -/// An internal function that is used by doctests. -pub fn bql_start_test() { - // SAFETY: integration tests are run with --test-threads=1, while - // unit tests and doctests are not multithreaded and do not have - // any BQL-protected data. Just set bql_locked to true. - unsafe { - bindings::rust_bql_mock_lock(); - } -} - -pub fn bql_locked() -> bool { - // SAFETY: the function does nothing but return a thread-local bool - unsafe { bindings::bql_locked() } -} - -fn bql_block_unlock(increase: bool) { - // SAFETY: this only adjusts a counter - unsafe { - bindings::bql_block_unlock(increase); - } -} - /// A mutable memory location that is protected by the Big QEMU Lock. /// /// # Memory layout @@ -323,8 +229,8 @@ impl BqlCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlCell; + /// # bql::start_test(); /// /// let c = BqlCell::new(5); /// ``` @@ -340,8 +246,8 @@ impl BqlCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlCell; + /// # bql::start_test(); /// /// let c = BqlCell::new(5); /// @@ -358,8 +264,8 @@ impl BqlCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlCell; + /// # bql::start_test(); /// /// let cell = BqlCell::new(5); /// assert_eq!(cell.get(), 5); @@ -368,7 +274,7 @@ impl BqlCell { /// ``` #[inline] pub fn replace(&self, val: T) -> T { - assert!(bql_locked()); + assert!(crate::is_locked()); // SAFETY: This can cause data races if called from multiple threads, // but it won't happen as long as C code accesses the value // under BQL protection only. @@ -380,8 +286,8 @@ impl BqlCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlCell; + /// # bql::start_test(); /// /// let c = BqlCell::new(5); /// let five = c.into_inner(); @@ -389,7 +295,7 @@ impl BqlCell { /// assert_eq!(five, 5); /// ``` pub fn into_inner(self) -> T { - assert!(bql_locked()); + assert!(crate::is_locked()); self.value.into_inner() } } @@ -400,8 +306,8 @@ impl BqlCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlCell; + /// # bql::start_test(); /// /// let c = BqlCell::new(5); /// @@ -409,7 +315,7 @@ impl BqlCell { /// ``` #[inline] pub fn get(&self) -> T { - assert!(bql_locked()); + assert!(crate::is_locked()); // SAFETY: This can cause data races if called from multiple threads, // but it won't happen as long as C code accesses the value // under BQL protection only. @@ -423,8 +329,8 @@ impl BqlCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlCell; + /// # bql::start_test(); /// /// let c = BqlCell::new(5); /// @@ -442,8 +348,8 @@ impl BqlCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlCell; + /// # bql::start_test(); /// /// let c = BqlCell::new(5); /// let five = c.take(); @@ -512,7 +418,7 @@ impl BqlRefCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlRefCell; + /// use bql::BqlRefCell; /// /// let c = BqlRefCell::new(5); /// ``` @@ -571,8 +477,8 @@ impl BqlRefCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlRefCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlRefCell; + /// # bql::start_test(); /// /// let c = BqlRefCell::new(5); /// @@ -583,8 +489,8 @@ impl BqlRefCell { /// An example of panic: /// /// ```should_panic - /// use qemu_api::cell::BqlRefCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlRefCell; + /// # bql::start_test(); /// /// let c = BqlRefCell::new(5); /// @@ -601,7 +507,7 @@ impl BqlRefCell { self.borrowed_at.set(Some(std::panic::Location::caller())); } - bql_block_unlock(true); + crate::block_unlock(true); // SAFETY: `BorrowRef` ensures that there is only immutable access // to the value while borrowed. @@ -625,8 +531,8 @@ impl BqlRefCell { /// # Examples /// /// ``` - /// use qemu_api::cell::BqlRefCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlRefCell; + /// # bql::start_test(); /// /// let c = BqlRefCell::new("hello".to_owned()); /// @@ -638,8 +544,8 @@ impl BqlRefCell { /// An example of panic: /// /// ```should_panic - /// use qemu_api::cell::BqlRefCell; - /// # qemu_api::cell::bql_start_test(); + /// use bql::BqlRefCell; + /// # bql::start_test(); /// /// let c = BqlRefCell::new(5); /// let m = c.borrow(); @@ -656,7 +562,7 @@ impl BqlRefCell { } // SAFETY: this only adjusts a counter - bql_block_unlock(true); + crate::block_unlock(true); // SAFETY: `BorrowRefMut` guarantees unique access. let value = unsafe { NonNull::new_unchecked(self.value.get()) }; @@ -670,12 +576,29 @@ impl BqlRefCell { } } + /// Returns a mutable reference to the underlying data in this cell, + /// while the owner already has a mutable reference to the cell. + /// + /// # Examples + /// + /// ``` + /// use bql::BqlRefCell; + /// + /// let mut c = BqlRefCell::new(5); + /// + /// *c.get_mut() = 10; + /// ``` + #[inline] + pub const fn get_mut(&mut self) -> &mut T { + self.value.get_mut() + } + /// Returns a raw pointer to the underlying data in this cell. /// /// # Examples /// /// ``` - /// use qemu_api::cell::BqlRefCell; + /// use bql::BqlRefCell; /// /// let c = BqlRefCell::new(5); /// @@ -800,7 +723,7 @@ impl Drop for BorrowRef<'_> { let borrow = self.borrow.get(); debug_assert!(is_reading(borrow)); self.borrow.set(borrow - 1); - bql_block_unlock(false) + crate::block_unlock(false) } } @@ -890,7 +813,7 @@ impl Drop for BorrowRefMut<'_> { let borrow = self.borrow.get(); debug_assert!(is_writing(borrow)); self.borrow.set(borrow + 1); - bql_block_unlock(false) + crate::block_unlock(false) } } @@ -935,167 +858,3 @@ impl fmt::Display for BqlRefMut<'_, T> { (**self).fmt(f) } } - -/// Stores an opaque value that is shared with C code. -/// -/// Often, C structs can changed when calling a C function even if they are -/// behind a shared Rust reference, or they can be initialized lazily and have -/// invalid bit patterns (e.g. `3` for a [`bool`]). This goes against Rust's -/// strict aliasing rules, which normally prevent mutation through shared -/// references. -/// -/// Wrapping the struct with `Opaque` ensures that the Rust compiler does not -/// assume the usual constraints that Rust structs require, and allows using -/// shared references on the Rust side. -/// -/// `Opaque` is `#[repr(transparent)]`, so that it matches the memory layout -/// of `T`. -#[repr(transparent)] -pub struct Opaque { - value: UnsafeCell>, - // PhantomPinned also allows multiple references to the `Opaque`, i.e. - // one `&mut Opaque` can coexist with a `&mut T` or any number of `&T`; - // see https://docs.rs/pinned-aliasable/latest/pinned_aliasable/. - _pin: PhantomPinned, -} - -impl Opaque { - /// Creates a new shared reference from a C pointer - /// - /// # Safety - /// - /// The pointer must be valid, though it need not point to a valid value. - pub unsafe fn from_raw<'a>(ptr: *mut T) -> &'a Self { - let ptr = NonNull::new(ptr).unwrap().cast::(); - // SAFETY: Self is a transparent wrapper over T - unsafe { ptr.as_ref() } - } - - /// Creates a new opaque object with uninitialized contents. - /// - /// # Safety - /// - /// Ultimately the pointer to the returned value will be dereferenced - /// in another `unsafe` block, for example when passing it to a C function, - /// but the functions containing the dereference are usually safe. The - /// value returned from `uninit()` must be initialized and pinned before - /// calling them. - #[allow(clippy::missing_const_for_fn)] - pub unsafe fn uninit() -> Self { - Self { - value: UnsafeCell::new(MaybeUninit::uninit()), - _pin: PhantomPinned, - } - } - - /// Creates a new opaque object with zeroed contents. - /// - /// # Safety - /// - /// Ultimately the pointer to the returned value will be dereferenced - /// in another `unsafe` block, for example when passing it to a C function, - /// but the functions containing the dereference are usually safe. The - /// value returned from `uninit()` must be pinned (and possibly initialized) - /// before calling them. - #[allow(clippy::missing_const_for_fn)] - pub unsafe fn zeroed() -> Self { - Self { - value: UnsafeCell::new(MaybeUninit::zeroed()), - _pin: PhantomPinned, - } - } - - /// Returns a raw mutable pointer to the opaque data. - pub const fn as_mut_ptr(&self) -> *mut T { - UnsafeCell::get(&self.value).cast() - } - - /// Returns a raw pointer to the opaque data. - pub const fn as_ptr(&self) -> *const T { - self.as_mut_ptr().cast_const() - } - - /// Returns a raw pointer to the opaque data that can be passed to a - /// C function as `void *`. - pub const fn as_void_ptr(&self) -> *mut std::ffi::c_void { - UnsafeCell::get(&self.value).cast() - } - - /// Converts a raw pointer to the wrapped type. - pub const fn raw_get(slot: *mut Self) -> *mut T { - // Compare with Linux's raw_get method, which goes through an UnsafeCell - // because it takes a *const Self instead. - slot.cast() - } -} - -impl fmt::Debug for Opaque { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let mut name: String = "Opaque<".to_string(); - name += std::any::type_name::(); - name += ">"; - f.debug_tuple(&name).field(&self.as_ptr()).finish() - } -} - -impl Opaque { - /// Creates a new opaque object with default contents. - /// - /// # Safety - /// - /// Ultimately the pointer to the returned value will be dereferenced - /// in another `unsafe` block, for example when passing it to a C function, - /// but the functions containing the dereference are usually safe. The - /// value returned from `uninit()` must be pinned before calling them. - pub unsafe fn new() -> Self { - Self { - value: UnsafeCell::new(MaybeUninit::new(T::default())), - _pin: PhantomPinned, - } - } -} - -/// Annotates [`Self`] as a transparent wrapper for another type. -/// -/// Usually defined via the [`qemu_api_macros::Wrapper`] derive macro. -/// -/// # Examples -/// -/// ``` -/// # use std::mem::ManuallyDrop; -/// # use qemu_api::cell::Wrapper; -/// #[repr(transparent)] -/// pub struct Example { -/// inner: ManuallyDrop, -/// } -/// -/// unsafe impl Wrapper for Example { -/// type Wrapped = String; -/// } -/// ``` -/// -/// # Safety -/// -/// `Self` must be a `#[repr(transparent)]` wrapper for the `Wrapped` type, -/// whether directly or indirectly. -/// -/// # Methods -/// -/// By convention, types that implement Wrapper also implement the following -/// methods: -/// -/// ```ignore -/// pub const unsafe fn from_raw<'a>(value: *mut Self::Wrapped) -> &'a Self; -/// pub const unsafe fn as_mut_ptr(&self) -> *mut Self::Wrapped; -/// pub const unsafe fn as_ptr(&self) -> *const Self::Wrapped; -/// pub const unsafe fn raw_get(slot: *mut Self) -> *const Self::Wrapped; -/// ``` -/// -/// They are not defined here to allow them to be `const`. -pub unsafe trait Wrapper { - type Wrapped; -} - -unsafe impl Wrapper for Opaque { - type Wrapped = T; -} diff --git a/rust/bql/src/lib.rs b/rust/bql/src/lib.rs new file mode 100644 index 0000000000000..ef08221e9c1a0 --- /dev/null +++ b/rust/bql/src/lib.rs @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +mod bindings; +use bindings::{bql_block_unlock, bql_locked, rust_bql_mock_lock}; + +mod cell; +pub use cell::*; + +/// An internal function that is used by doctests. +pub fn start_test() { + // SAFETY: integration tests are run with --test-threads=1, while + // unit tests and doctests are not multithreaded and do not have + // any BQL-protected data. Just set bql_locked to true. + unsafe { + rust_bql_mock_lock(); + } +} + +pub fn is_locked() -> bool { + // SAFETY: the function does nothing but return a thread-local bool + unsafe { bql_locked() } +} + +pub fn block_unlock(increase: bool) { + // SAFETY: this only adjusts a counter + unsafe { + bql_block_unlock(increase); + } +} diff --git a/rust/bql/wrapper.h b/rust/bql/wrapper.h new file mode 100644 index 0000000000000..2ef9a96e1d3c0 --- /dev/null +++ b/rust/bql/wrapper.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * This header file is meant to be used as input to the `bindgen` application + * in order to generate C FFI compatible Rust bindings. + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H +/* + * Fix potential missing stdatomic.h error in case bindgen does not insert the + * correct libclang header paths on its own. We do not use stdatomic.h symbols + * in QEMU code, so it's fine to declare dummy types instead. + */ +typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst, +} memory_order; +#endif /* __CLANG_STDATOMIC_H */ + +#include "qemu/osdep.h" + +#include "qemu/main-loop.h" diff --git a/rust/chardev/Cargo.toml b/rust/chardev/Cargo.toml new file mode 100644 index 0000000000000..f105189dccbb3 --- /dev/null +++ b/rust/chardev/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "chardev" +version = "0.1.0" +description = "Rust bindings for QEMU/chardev" +resolver = "2" +publish = false + +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +glib-sys = { workspace = true } +common = { path = "../common" } +bql = { path = "../bql" } +migration = { path = "../migration" } +qom = { path = "../qom" } +util = { path = "../util" } + +[lints] +workspace = true diff --git a/rust/chardev/build.rs b/rust/chardev/build.rs new file mode 120000 index 0000000000000..71a3167885c23 --- /dev/null +++ b/rust/chardev/build.rs @@ -0,0 +1 @@ +../util/build.rs \ No newline at end of file diff --git a/rust/chardev/meson.build b/rust/chardev/meson.build new file mode 100644 index 0000000000000..d365d8dd0f456 --- /dev/null +++ b/rust/chardev/meson.build @@ -0,0 +1,42 @@ +c_enums = [ + 'QEMUChrEvent', +] +_chardev_bindgen_args = [] +foreach enum : c_enums + _chardev_bindgen_args += ['--rustified-enum', enum] +endforeach + +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_chardev_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common + _chardev_bindgen_args, + c_args: bindgen_c_args, +) + +_chardev_rs = static_library( + 'chardev', + structured_sources( + [ + 'src/lib.rs', + 'src/bindings.rs', + 'src/chardev.rs', + ], + {'.': _chardev_bindings_inc_rs} + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + link_with: [_bql_rs, _migration_rs, _qom_rs, _util_rs], + dependencies: [glib_sys_rs, common_rs, qemu_macros], +) + +chardev_rs = declare_dependency(link_with: [_chardev_rs], dependencies: [chardev, qemuutil]) diff --git a/rust/chardev/src/bindings.rs b/rust/chardev/src/bindings.rs new file mode 100644 index 0000000000000..c95dc89c56dcb --- /dev/null +++ b/rust/chardev/src/bindings.rs @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unnecessary_transmutes, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc, + clippy::too_many_arguments +)] + +use common::Zeroable; +use glib_sys::{ + gboolean, guint, GArray, GHashTable, GHashTableIter, GIOCondition, GList, GMainContext, + GPollFD, GPtrArray, GQueue, GSList, GSource, GSourceFunc, +}; + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); + +// SAFETY: these are implemented in C; the bindings need to assert that the +// BQL is taken, either directly or via `BqlCell` and `BqlRefCell`. +// When bindings for character devices are introduced, this can be +// moved to the Opaque<> wrapper in src/chardev.rs. +unsafe impl Send for CharBackend {} +unsafe impl Sync for CharBackend {} + +unsafe impl Zeroable for CharBackend {} diff --git a/rust/qemu-api/src/chardev.rs b/rust/chardev/src/chardev.rs similarity index 94% rename from rust/qemu-api/src/chardev.rs rename to rust/chardev/src/chardev.rs index 6e0590d758ed5..2014479674f28 100644 --- a/rust/qemu-api/src/chardev.rs +++ b/rust/chardev/src/chardev.rs @@ -18,16 +18,15 @@ use std::{ slice, }; -use crate::{ - bindings, - callbacks::FnCall, - cell::{BqlRefMut, Opaque}, - prelude::*, -}; +use bql::{BqlRefCell, BqlRefMut}; +use common::{callbacks::FnCall, errno, Opaque}; +use qom::prelude::*; + +use crate::bindings; /// A safe wrapper around [`bindings::Chardev`]. #[repr(transparent)] -#[derive(qemu_api_macros::Wrapper)] +#[derive(common::Wrapper)] pub struct Chardev(Opaque); pub type ChardevClass = bindings::ChardevClass; @@ -43,13 +42,15 @@ pub struct CharBackend { _pin: PhantomPinned, } -impl Write for BqlRefMut<'_, bindings::CharBackend> { +pub struct CharBackendMut<'a>(BqlRefMut<'a, bindings::CharBackend>); + +impl Write for CharBackendMut<'_> { fn flush(&mut self) -> io::Result<()> { Ok(()) } fn write(&mut self, buf: &[u8]) -> io::Result { - let chr: &mut bindings::CharBackend = self; + let chr: &mut bindings::CharBackend = &mut self.0; let len = buf.len().try_into().unwrap(); let r = unsafe { bindings::qemu_chr_fe_write(addr_of_mut!(*chr), buf.as_ptr(), len) }; @@ -57,7 +58,7 @@ impl Write for BqlRefMut<'_, bindings::CharBackend> { } fn write_all(&mut self, buf: &[u8]) -> io::Result<()> { - let chr: &mut bindings::CharBackend = self; + let chr: &mut bindings::CharBackend = &mut self.0; let len = buf.len().try_into().unwrap(); let r = unsafe { bindings::qemu_chr_fe_write_all(addr_of_mut!(*chr), buf.as_ptr(), len) }; @@ -138,7 +139,7 @@ impl CharBackend { F::call((owner, event)) } - let _: () = CanReceiveFn::ASSERT_IS_SOME; + const { assert!(CanReceiveFn::IS_SOME) }; let receive_cb: Option = if ReceiveFn::is_some() { Some(rust_receive_cb::) @@ -197,7 +198,7 @@ impl CharBackend { /// the big QEMU lock while the character device is borrowed, as /// that might cause C code to write to the character device. pub fn borrow_mut(&self) -> impl Write + '_ { - self.inner.borrow_mut() + CharBackendMut(self.inner.borrow_mut()) } /// Send a continuous stream of zero bits on the line if `enabled` is diff --git a/rust/chardev/src/lib.rs b/rust/chardev/src/lib.rs new file mode 100644 index 0000000000000..2e549f99d9139 --- /dev/null +++ b/rust/chardev/src/lib.rs @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pub mod bindings; + +mod chardev; +pub use chardev::*; diff --git a/rust/chardev/wrapper.h b/rust/chardev/wrapper.h new file mode 100644 index 0000000000000..65ede6ea6d7d1 --- /dev/null +++ b/rust/chardev/wrapper.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * This header file is meant to be used as input to the `bindgen` application + * in order to generate C FFI compatible Rust bindings. + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H +/* + * Fix potential missing stdatomic.h error in case bindgen does not insert the + * correct libclang header paths on its own. We do not use stdatomic.h symbols + * in QEMU code, so it's fine to declare dummy types instead. + */ +typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst, +} memory_order; +#endif /* __CLANG_STDATOMIC_H */ + +#include "qemu/osdep.h" + +#include "chardev/char-fe.h" +#include "chardev/char-serial.h" diff --git a/rust/common/Cargo.toml b/rust/common/Cargo.toml new file mode 100644 index 0000000000000..0e1b4fc505027 --- /dev/null +++ b/rust/common/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "common" +version = "0.1.0" +description = "Rust common code for QEMU" +resolver = "2" +publish = false + +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +libc.workspace = true +qemu_macros = { path = "../qemu-macros" } + +[lints] +workspace = true diff --git a/rust/common/meson.build b/rust/common/meson.build new file mode 100644 index 0000000000000..aff601d1df27c --- /dev/null +++ b/rust/common/meson.build @@ -0,0 +1,36 @@ +_common_cfg = run_command(rustc_args, + '--config-headers', config_host_h, '--features', files('Cargo.toml'), + capture: true, check: true).stdout().strip().splitlines() + +_common_rs = static_library( + 'common', + structured_sources( + [ + 'src/lib.rs', + 'src/assertions.rs', + 'src/bitops.rs', + 'src/callbacks.rs', + 'src/errno.rs', + 'src/opaque.rs', + 'src/uninit.rs', + 'src/zeroable.rs', + ], + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: _common_cfg, + dependencies: [libc_rs, qemu_macros], +) + +common_rs = declare_dependency(link_with: [_common_rs]) + +rust.test('rust-common-tests', _common_rs, + suite: ['unit', 'rust']) + +# Doctests are essentially integration tests, so they need the same dependencies. +# Note that running them requires the object files for C code, so place them +# in a separate suite that is run by the "build" CI jobs rather than "check". +rust.doctest('rust-common-doctests', + _common_rs, + dependencies: common_rs, + suite: ['doc', 'rust']) diff --git a/rust/qemu-api/src/assertions.rs b/rust/common/src/assertions.rs similarity index 86% rename from rust/qemu-api/src/assertions.rs rename to rust/common/src/assertions.rs index a2d38c877df55..91f83a5d3d43d 100644 --- a/rust/qemu-api/src/assertions.rs +++ b/rust/common/src/assertions.rs @@ -8,7 +8,7 @@ //! types match the expectations of C code. //! //! Documentation is hidden because it only exposes macros, which -//! are exported directly from `qemu_api`. +//! are exported directly from `common`. // Based on https://stackoverflow.com/questions/64251852/x/70978292#70978292 // (stackoverflow answers are released under MIT license). @@ -27,7 +27,7 @@ impl EqType for T { /// # Examples /// /// ``` -/// # use qemu_api::assert_same_type; +/// # use common::assert_same_type; /// # use std::ops::Deref; /// assert_same_type!(u32, u32); /// assert_same_type!( as Deref>::Target, u32); @@ -36,7 +36,7 @@ impl EqType for T { /// Different types will cause a compile failure /// /// ```compile_fail -/// # use qemu_api::assert_same_type; +/// # use common::assert_same_type; /// assert_same_type!(&Box, &u32); /// ``` #[macro_export] @@ -61,7 +61,7 @@ macro_rules! assert_same_type { /// # Examples /// /// ``` -/// # use qemu_api::assert_field_type; +/// # use common::assert_field_type; /// pub struct A { /// field1: u32, /// } @@ -72,7 +72,7 @@ macro_rules! assert_same_type { /// Different types will cause a compile failure /// /// ```compile_fail -/// # use qemu_api::assert_field_type; +/// # use common::assert_field_type; /// # pub struct A { field1: u32 } /// assert_field_type!(A, field1, i32); /// ``` @@ -81,8 +81,8 @@ macro_rules! assert_field_type { (@internal $param_name:ident, $ti:ty, $t:ty, $($field:tt)*) => { const _: () = { #[allow(unused)] - fn assert_field_type($param_name: &$t) { - fn types_must_be_equal(_: &T) + const fn assert_field_type($param_name: &$t) { + const fn types_must_be_equal(_: &T) where T: $crate::assertions::EqType, { @@ -95,10 +95,6 @@ macro_rules! assert_field_type { ($t:ty, $i:tt, $ti:ty) => { $crate::assert_field_type!(@internal v, $ti, $t, v.$i); }; - - ($t:ty, $i:tt, $ti:ty, num = $num:ident) => { - $crate::assert_field_type!(@internal v, $ti, $t, v.$i[0]); - }; } /// Assert that an expression matches a pattern. This can also be @@ -107,7 +103,7 @@ macro_rules! assert_field_type { /// # Examples /// /// ``` -/// # use qemu_api::assert_match; +/// # use common::assert_match; /// // JoinHandle does not implement `Eq`, therefore the result /// // does not either. /// let result: Result, u32> = Err(42); @@ -136,12 +132,12 @@ macro_rules! assert_match { /// # Examples /// /// ``` -/// # use qemu_api::static_assert; +/// # use common::static_assert; /// static_assert!("abc".len() == 3); /// ``` /// /// ```compile_fail -/// # use qemu_api::static_assert; +/// # use common::static_assert; /// static_assert!("abc".len() == 2); // does not compile /// ``` #[macro_export] diff --git a/rust/qemu-api/src/bitops.rs b/rust/common/src/bitops.rs similarity index 98% rename from rust/qemu-api/src/bitops.rs rename to rust/common/src/bitops.rs index b1e3a530ab542..06c78c3b8a7f4 100644 --- a/rust/qemu-api/src/bitops.rs +++ b/rust/common/src/bitops.rs @@ -3,7 +3,6 @@ // SPDX-License-Identifier: GPL-2.0-or-later //! This module provides bit operation extensions to integer types. -//! It is usually included via the `qemu_api` prelude. use std::ops::{ Add, AddAssign, BitAnd, BitAndAssign, BitOr, BitOrAssign, BitXor, BitXorAssign, Div, DivAssign, diff --git a/rust/qemu-api/src/callbacks.rs b/rust/common/src/callbacks.rs similarity index 85% rename from rust/qemu-api/src/callbacks.rs rename to rust/common/src/callbacks.rs index 9642a16eb89bb..b8898fe96f7c7 100644 --- a/rust/qemu-api/src/callbacks.rs +++ b/rust/common/src/callbacks.rs @@ -55,7 +55,7 @@ use std::{mem, ptr::NonNull}; /// # Examples /// /// ``` -/// # use qemu_api::callbacks::FnCall; +/// # use common::callbacks::FnCall; /// fn call_it FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { /// F::call((s,)) /// } @@ -71,7 +71,7 @@ use std::{mem, ptr::NonNull}; /// Attempting to pass a non-zero-sized closure causes a compile-time failure: /// /// ```compile_fail -/// # use qemu_api::callbacks::FnCall; +/// # use common::callbacks::FnCall; /// # fn call_it<'a, F: FnCall<(&'a str,), String>>(_f: &F, s: &'a str) -> String { /// # F::call((s,)) /// # } @@ -82,7 +82,7 @@ use std::{mem, ptr::NonNull}; /// `()` can be used to indicate "no function": /// /// ``` -/// # use qemu_api::callbacks::FnCall; +/// # use common::callbacks::FnCall; /// fn optional FnCall<(&'a str,), String>>(_f: &F, s: &str) -> Option { /// if F::IS_SOME { /// Some(F::call((s,))) @@ -97,7 +97,7 @@ use std::{mem, ptr::NonNull}; /// Invoking `F::call` will then be a run-time error. /// /// ```should_panic -/// # use qemu_api::callbacks::FnCall; +/// # use common::callbacks::FnCall; /// # fn call_it FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { /// # F::call((s,)) /// # } @@ -113,31 +113,6 @@ use std::{mem, ptr::NonNull}; /// This is always true for zero-capture closures and function pointers, as long /// as the code is able to name the function in the first place. pub unsafe trait FnCall: 'static + Sync + Sized { - /// Referring to this internal constant asserts that the `Self` type is - /// zero-sized. Can be replaced by an inline const expression in - /// Rust 1.79.0+. - const ASSERT_ZERO_SIZED: () = { assert!(mem::size_of::() == 0) }; - - /// Referring to this constant asserts that the `Self` type is an actual - /// function type, which can be used to catch incorrect use of `()` - /// at compile time. - /// - /// # Examples - /// - /// ```compile_fail - /// # use qemu_api::callbacks::FnCall; - /// fn call_it FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { - /// let _: () = F::ASSERT_IS_SOME; - /// F::call((s,)) - /// } - /// - /// let s: String = call_it((), "hello world"); // does not compile - /// ``` - /// - /// Note that this can be more simply `const { assert!(F::IS_SOME) }` in - /// Rust 1.79.0 or newer. - const ASSERT_IS_SOME: () = { assert!(Self::IS_SOME) }; - /// `true` if `Self` is an actual function type and not `()`. /// /// # Examples @@ -145,7 +120,7 @@ pub unsafe trait FnCall: 'static + Sync + Sized { /// You can use `IS_SOME` to catch this at compile time: /// /// ```compile_fail - /// # use qemu_api::callbacks::FnCall; + /// # use common::callbacks::FnCall; /// fn call_it FnCall<(&'a str,), String>>(_f: &F, s: &str) -> String { /// const { assert!(F::IS_SOME) } /// F::call((s,)) @@ -195,7 +170,7 @@ macro_rules! impl_call { #[inline(always)] fn call(a: ($($args,)*)) -> R { - let _: () = Self::ASSERT_ZERO_SIZED; + const { assert!(mem::size_of::() == 0) }; // SAFETY: the safety of this method is the condition for implementing // `FnCall`. As to the `NonNull` idiom to create a zero-sized type, diff --git a/rust/qemu-api/src/errno.rs b/rust/common/src/errno.rs similarity index 96% rename from rust/qemu-api/src/errno.rs rename to rust/common/src/errno.rs index 18d101448b936..64b2933b07877 100644 --- a/rust/qemu-api/src/errno.rs +++ b/rust/common/src/errno.rs @@ -7,7 +7,10 @@ //! convention. This module provides functions to portably convert an integer //! into an [`io::Result`] and back. -use std::{convert::TryFrom, io, io::ErrorKind}; +use std::{ + convert::{self, TryFrom}, + io::{self, ErrorKind}, +}; /// An `errno` value that can be converted into an [`io::Error`] pub struct Errno(pub u16); @@ -99,6 +102,12 @@ impl From for Errno { } } +impl From for Errno { + fn from(_value: convert::Infallible) -> Errno { + panic!("unreachable") + } +} + /// Internal traits; used to enable [`into_io_result`] and [`into_neg_errno`] /// for the "right" set of types. mod traits { @@ -176,7 +185,7 @@ use traits::{GetErrno, MergeErrno}; /// are interpreted as negated `errno` and turned into an `Err`. /// /// ``` -/// # use qemu_api::errno::into_io_result; +/// # use common::errno::into_io_result; /// # use std::io::ErrorKind; /// let ok = into_io_result(1i32).unwrap(); /// assert_eq!(ok, 1u32); @@ -192,7 +201,7 @@ use traits::{GetErrno, MergeErrno}; /// likely overflows and will panic: /// /// ```should_panic -/// # use qemu_api::errno::into_io_result; +/// # use common::errno::into_io_result; /// # #[allow(dead_code)] /// let err = into_io_result(-0x1234_5678i32); // panic /// ``` @@ -204,7 +213,7 @@ pub fn into_io_result(value: T) -> io::Result { /// values to report errors. /// /// ``` -/// # use qemu_api::errno::into_neg_errno; +/// # use common::errno::into_neg_errno; /// # use std::io::{self, ErrorKind}; /// let ok: io::Result<()> = Ok(()); /// assert_eq!(into_neg_errno(ok), 0); @@ -223,7 +232,7 @@ pub fn into_io_result(value: T) -> io::Result { /// positive: /// /// ```should_panic -/// # use qemu_api::errno::into_neg_errno; +/// # use common::errno::into_neg_errno; /// # use std::io; /// let err: io::Result = Ok(0x8899_AABB); /// into_neg_errno(err) // panic diff --git a/rust/common/src/lib.rs b/rust/common/src/lib.rs new file mode 100644 index 0000000000000..8311bf945da84 --- /dev/null +++ b/rust/common/src/lib.rs @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pub use qemu_macros::{TryInto, Wrapper}; + +pub mod assertions; + +pub mod bitops; + +pub mod callbacks; +pub use callbacks::FnCall; + +pub mod errno; +pub use errno::Errno; + +pub mod opaque; +pub use opaque::{Opaque, Wrapper}; + +pub mod uninit; +pub use uninit::MaybeUninitField; + +pub mod zeroable; +pub use zeroable::Zeroable; diff --git a/rust/common/src/opaque.rs b/rust/common/src/opaque.rs new file mode 100644 index 0000000000000..c941fb45462d8 --- /dev/null +++ b/rust/common/src/opaque.rs @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: MIT + +//! ## Opaque wrappers +//! +//! The cell types from the previous section are useful at the boundaries +//! of code that requires interior mutability. When writing glue code that +//! interacts directly with C structs, however, it is useful to operate +//! at a lower level. +//! +//! C functions often violate Rust's fundamental assumptions about memory +//! safety by modifying memory even if it is shared. Furthermore, C structs +//! often start their life uninitialized and may be populated lazily. +//! +//! For this reason, this module provides the [`Opaque`] type to opt out +//! of Rust's usual guarantees about the wrapped type. Access to the wrapped +//! value is always through raw pointers, obtained via methods like +//! [`as_mut_ptr()`](Opaque::as_mut_ptr) and [`as_ptr()`](Opaque::as_ptr). These +//! pointers can then be passed to C functions or dereferenced; both actions +//! require `unsafe` blocks, making it clear where safety guarantees must be +//! manually verified. For example +//! +//! ```ignore +//! unsafe { +//! let state = Opaque::::uninit(); +//! qemu_struct_init(state.as_mut_ptr()); +//! } +//! ``` +//! +//! [`Opaque`] will usually be wrapped one level further, so that +//! bridge methods can be added to the wrapper: +//! +//! ```ignore +//! pub struct MyStruct(Opaque); +//! +//! impl MyStruct { +//! fn new() -> Pin> { +//! let result = Box::pin(unsafe { Opaque::uninit() }); +//! unsafe { qemu_struct_init(result.as_mut_ptr()) }; +//! result +//! } +//! } +//! ``` +//! +//! This pattern of wrapping bindgen-generated types in [`Opaque`] provides +//! several advantages: +//! +//! * The choice of traits to be implemented is not limited by the +//! bindgen-generated code. For example, [`Drop`] can be added without +//! disabling [`Copy`] on the underlying bindgen type +//! +//! * [`Send`] and [`Sync`] implementations can be controlled by the wrapper +//! type rather than being automatically derived from the C struct's layout +//! +//! * Methods can be implemented in a separate crate from the bindgen-generated +//! bindings +//! +//! * [`Debug`](std::fmt::Debug) and [`Display`](std::fmt::Display) +//! implementations can be customized to be more readable than the raw C +//! struct representation +//! +//! The [`Opaque`] type does not include BQL validation; it is possible to +//! assert in the code that the right lock is taken, to use it together +//! with a custom lock guard type, or to let C code take the lock, as +//! appropriate. It is also possible to use it with non-thread-safe +//! types, since by default (unlike [`BqlCell`] and [`BqlRefCell`] +//! it is neither `Sync` nor `Send`. +//! +//! While [`Opaque`] is necessary for C interop, it should be used sparingly +//! and only at FFI boundaries. For QEMU-specific types that need interior +//! mutability, prefer [`BqlCell`] or [`BqlRefCell`]. +//! +//! [`BqlCell`]: ../../bql/cell/struct.BqlCell.html +//! [`BqlRefCell`]: ../../bql/cell/struct.BqlRefCell.html +use std::{cell::UnsafeCell, fmt, marker::PhantomPinned, mem::MaybeUninit, ptr::NonNull}; + +/// Stores an opaque value that is shared with C code. +/// +/// Often, C structs can changed when calling a C function even if they are +/// behind a shared Rust reference, or they can be initialized lazily and have +/// invalid bit patterns (e.g. `3` for a [`bool`]). This goes against Rust's +/// strict aliasing rules, which normally prevent mutation through shared +/// references. +/// +/// Wrapping the struct with `Opaque` ensures that the Rust compiler does not +/// assume the usual constraints that Rust structs require, and allows using +/// shared references on the Rust side. +/// +/// `Opaque` is `#[repr(transparent)]`, so that it matches the memory layout +/// of `T`. +#[repr(transparent)] +pub struct Opaque { + value: UnsafeCell>, + // PhantomPinned also allows multiple references to the `Opaque`, i.e. + // one `&mut Opaque` can coexist with a `&mut T` or any number of `&T`; + // see https://docs.rs/pinned-aliasable/latest/pinned_aliasable/. + _pin: PhantomPinned, +} + +impl Opaque { + /// Creates a new shared reference from a C pointer + /// + /// # Safety + /// + /// The pointer must be valid, though it need not point to a valid value. + pub unsafe fn from_raw<'a>(ptr: *mut T) -> &'a Self { + let ptr = NonNull::new(ptr).unwrap().cast::(); + // SAFETY: Self is a transparent wrapper over T + unsafe { ptr.as_ref() } + } + + /// Creates a new opaque object with uninitialized contents. + /// + /// # Safety + /// + /// Ultimately the pointer to the returned value will be dereferenced + /// in another `unsafe` block, for example when passing it to a C function, + /// but the functions containing the dereference are usually safe. The + /// value returned from `uninit()` must be initialized and pinned before + /// calling them. + pub const unsafe fn uninit() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::uninit()), + _pin: PhantomPinned, + } + } + + /// Creates a new opaque object with zeroed contents. + /// + /// # Safety + /// + /// Ultimately the pointer to the returned value will be dereferenced + /// in another `unsafe` block, for example when passing it to a C function, + /// but the functions containing the dereference are usually safe. The + /// value returned from `uninit()` must be pinned (and possibly initialized) + /// before calling them. + pub const unsafe fn zeroed() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::zeroed()), + _pin: PhantomPinned, + } + } + + /// Returns a raw mutable pointer to the opaque data. + pub const fn as_mut_ptr(&self) -> *mut T { + UnsafeCell::get(&self.value).cast() + } + + /// Returns a raw pointer to the opaque data. + pub const fn as_ptr(&self) -> *const T { + self.as_mut_ptr().cast_const() + } + + /// Returns a raw pointer to the opaque data that can be passed to a + /// C function as `void *`. + pub const fn as_void_ptr(&self) -> *mut std::ffi::c_void { + UnsafeCell::get(&self.value).cast() + } + + /// Converts a raw pointer to the wrapped type. + pub const fn raw_get(slot: *mut Self) -> *mut T { + // Compare with Linux's raw_get method, which goes through an UnsafeCell + // because it takes a *const Self instead. + slot.cast() + } +} + +impl fmt::Debug for Opaque { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let mut name: String = "Opaque<".to_string(); + name += std::any::type_name::(); + name += ">"; + f.debug_tuple(&name).field(&self.as_ptr()).finish() + } +} + +impl Opaque { + /// Creates a new opaque object with default contents. + /// + /// # Safety + /// + /// Ultimately the pointer to the returned value will be dereferenced + /// in another `unsafe` block, for example when passing it to a C function, + /// but the functions containing the dereference are usually safe. The + /// value returned from `uninit()` must be pinned before calling them. + pub unsafe fn new() -> Self { + Self { + value: UnsafeCell::new(MaybeUninit::new(T::default())), + _pin: PhantomPinned, + } + } +} + +/// Annotates [`Self`] as a transparent wrapper for another type. +/// +/// Usually defined via the [`crate::Wrapper`] derive macro. +/// +/// # Examples +/// +/// ``` +/// # use std::mem::ManuallyDrop; +/// # use common::opaque::Wrapper; +/// #[repr(transparent)] +/// pub struct Example { +/// inner: ManuallyDrop, +/// } +/// +/// unsafe impl Wrapper for Example { +/// type Wrapped = String; +/// } +/// ``` +/// +/// # Safety +/// +/// `Self` must be a `#[repr(transparent)]` wrapper for the `Wrapped` type, +/// whether directly or indirectly. +/// +/// # Methods +/// +/// By convention, types that implement Wrapper also implement the following +/// methods: +/// +/// ```ignore +/// pub const unsafe fn from_raw<'a>(value: *mut Self::Wrapped) -> &'a Self; +/// pub const unsafe fn as_mut_ptr(&self) -> *mut Self::Wrapped; +/// pub const unsafe fn as_ptr(&self) -> *const Self::Wrapped; +/// pub const unsafe fn raw_get(slot: *mut Self) -> *const Self::Wrapped; +/// ``` +/// +/// They are not defined here to allow them to be `const`. +pub unsafe trait Wrapper { + type Wrapped; +} + +unsafe impl Wrapper for Opaque { + type Wrapped = T; +} diff --git a/rust/qemu-api/src/uninit.rs b/rust/common/src/uninit.rs similarity index 87% rename from rust/qemu-api/src/uninit.rs rename to rust/common/src/uninit.rs index 04123b4ae99d1..8d021b1dfc6e9 100644 --- a/rust/qemu-api/src/uninit.rs +++ b/rust/common/src/uninit.rs @@ -12,7 +12,7 @@ pub struct MaybeUninitField<'a, T, U> { impl<'a, T, U> MaybeUninitField<'a, T, U> { #[doc(hidden)] - pub fn new(parent: &'a mut MaybeUninit, child: *mut U) -> Self { + pub const fn new(parent: &'a mut MaybeUninit, child: *mut U) -> Self { MaybeUninitField { parent, child } } @@ -21,7 +21,7 @@ impl<'a, T, U> MaybeUninitField<'a, T, U> { /// Because the `MaybeUninitField` remembers the containing object, /// it is possible to use it in foreign APIs that initialize the /// child. - pub fn parent(f: &Self) -> *const T { + pub const fn parent(f: &Self) -> *const T { f.parent.as_ptr() } @@ -30,12 +30,12 @@ impl<'a, T, U> MaybeUninitField<'a, T, U> { /// Because the `MaybeUninitField` remembers the containing object, /// it is possible to use it in foreign APIs that initialize the /// child. - pub fn parent_mut(f: &mut Self) -> *mut T { + pub const fn parent_mut(f: &mut Self) -> *mut T { f.parent.as_mut_ptr() } } -impl<'a, T, U> Deref for MaybeUninitField<'a, T, U> { +impl Deref for MaybeUninitField<'_, T, U> { type Target = MaybeUninit; fn deref(&self) -> &MaybeUninit { @@ -46,7 +46,7 @@ impl<'a, T, U> Deref for MaybeUninitField<'a, T, U> { } } -impl<'a, T, U> DerefMut for MaybeUninitField<'a, T, U> { +impl DerefMut for MaybeUninitField<'_, T, U> { fn deref_mut(&mut self) -> &mut MaybeUninit { // SAFETY: self.child was obtained by dereferencing a valid mutable // reference; the content of the memory may be invalid or uninitialized @@ -63,7 +63,7 @@ impl<'a, T, U> DerefMut for MaybeUninitField<'a, T, U> { /// } /// /// # use std::mem::MaybeUninit; -/// # use qemu_api::{assert_match, uninit_field_mut}; +/// # use common::{assert_match, uninit_field_mut}; /// /// let mut s: MaybeUninit = MaybeUninit::zeroed(); /// uninit_field_mut!(s, x).write(5); diff --git a/rust/common/src/zeroable.rs b/rust/common/src/zeroable.rs new file mode 100644 index 0000000000000..fd056deb1f641 --- /dev/null +++ b/rust/common/src/zeroable.rs @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +//! Defines a trait for structs that can be safely initialized with zero bytes. + +/// Encapsulates the requirement that +/// `MaybeUninit::::zeroed().assume_init()` does not cause undefined +/// behavior. +/// +/// # Safety +/// +/// Do not add this trait to a type unless all-zeroes is a valid value for the +/// type. In particular, raw pointers can be zero, but references and +/// `NonNull` cannot. +pub unsafe trait Zeroable: Default { + /// Return a value of Self whose memory representation consists of all + /// zeroes, with the possible exclusion of padding bytes. + const ZERO: Self = unsafe { ::core::mem::MaybeUninit::::zeroed().assume_init() }; +} diff --git a/rust/hw/char/pl011/Cargo.toml b/rust/hw/char/pl011/Cargo.toml index 003ef9613d4e9..5b319455ee30a 100644 --- a/rust/hw/char/pl011/Cargo.toml +++ b/rust/hw/char/pl011/Cargo.toml @@ -12,15 +12,20 @@ license.workspace = true repository.workspace = true rust-version.workspace = true -[lib] -crate-type = ["staticlib"] - [dependencies] +glib-sys.workspace = true bilge = { version = "0.2.0" } bilge-impl = { version = "0.2.0" } bits = { path = "../../../bits" } -qemu_api = { path = "../../../qemu-api" } -qemu_api_macros = { path = "../../../qemu-api-macros" } +common = { path = "../../../common" } +util = { path = "../../../util" } +bql = { path = "../../../bql" } +migration = { path = "../../../migration" } +qom = { path = "../../../qom" } +chardev = { path = "../../../chardev" } +system = { path = "../../../system" } +hwcore = { path = "../../../hw/core" } +trace = { path = "../../../trace" } [lints] workspace = true diff --git a/rust/hw/char/pl011/build.rs b/rust/hw/char/pl011/build.rs new file mode 120000 index 0000000000000..5f5060db35668 --- /dev/null +++ b/rust/hw/char/pl011/build.rs @@ -0,0 +1 @@ +../../../util/build.rs \ No newline at end of file diff --git a/rust/hw/char/pl011/meson.build b/rust/hw/char/pl011/meson.build index 2a1be329abc16..33b91f21911c5 100644 --- a/rust/hw/char/pl011/meson.build +++ b/rust/hw/char/pl011/meson.build @@ -1,21 +1,51 @@ +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_libpl011_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common, + c_args: bindgen_c_args, +) + _libpl011_rs = static_library( 'pl011', - files('src/lib.rs'), + structured_sources( + [ + 'src/lib.rs', + 'src/bindings.rs', + 'src/device.rs', + 'src/registers.rs', + ], + {'.' : _libpl011_bindings_inc_rs}, + ), override_options: ['rust_std=2021', 'build.rust_std=2021'], rust_abi: 'rust', dependencies: [ bilge_rs, bilge_impl_rs, bits_rs, - qemu_api, - qemu_api_macros, + common_rs, + glib_sys_rs, + util_rs, + migration_rs, + bql_rs, + qom_rs, + chardev_rs, + system_rs, + hwcore_rs, + trace_rs ], ) rust_devices_ss.add(when: 'CONFIG_X_PL011_RUST', if_true: [declare_dependency( link_whole: [_libpl011_rs], - # Putting proc macro crates in `dependencies` is necessary for Meson to find - # them when compiling the root per-target static rust lib. - dependencies: [bilge_impl_rs, qemu_api_macros], variables: {'crate': 'pl011'}, )]) diff --git a/rust/hw/char/pl011/src/bindings.rs b/rust/hw/char/pl011/src/bindings.rs new file mode 100644 index 0000000000000..52a76d0de5cba --- /dev/null +++ b/rust/hw/char/pl011/src/bindings.rs @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unnecessary_transmutes, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc, + clippy::too_many_arguments +)] + +//! `bindgen`-generated declarations. + +use glib_sys::{ + gboolean, guint, GArray, GByteArray, GHashTable, GHashTableIter, GIOCondition, GList, + GMainContext, GPollFD, GPtrArray, GQueue, GSList, GSource, GSourceFunc, GString, +}; + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); diff --git a/rust/hw/char/pl011/src/device.rs b/rust/hw/char/pl011/src/device.rs index 5b53f2649f161..8889d6e54fbd0 100644 --- a/rust/hw/char/pl011/src/device.rs +++ b/rust/hw/char/pl011/src/device.rs @@ -4,26 +4,24 @@ use std::{ffi::CStr, mem::size_of}; -use qemu_api::{ - chardev::{CharBackend, Chardev, Event}, - impl_vmstate_forward, - irq::{IRQState, InterruptSource}, - log::Log, - log_mask_ln, - memory::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder}, - prelude::*, - qdev::{Clock, ClockEvent, DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, - qom::{ObjectImpl, Owned, ParentField, ParentInit}, - static_assert, - sysbus::{SysBusDevice, SysBusDeviceImpl}, - uninit_field_mut, - vmstate::VMStateDescription, +use bql::BqlRefCell; +use chardev::{CharBackend, Chardev, Event}; +use common::{static_assert, uninit_field_mut}; +use hwcore::{ + Clock, ClockEvent, DeviceImpl, DeviceMethods, DeviceState, IRQState, InterruptSource, + ResetType, ResettablePhasesImpl, SysBusDevice, SysBusDeviceImpl, SysBusDeviceMethods, }; - -use crate::{ - device_class, - registers::{self, Interrupt, RegisterOffset}, +use migration::{ + self, impl_vmstate_forward, impl_vmstate_struct, vmstate_fields, vmstate_of, + vmstate_subsections, vmstate_unused, VMStateDescription, VMStateDescriptionBuilder, }; +use qom::{prelude::*, ObjectImpl, Owned, ParentField, ParentInit}; +use system::{hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder}; +use util::{log::Log, log_mask_ln}; + +use crate::registers::{self, Interrupt, RegisterOffset}; + +::trace::include_trace!("hw_char"); // TODO: You must disable the UART before any of the control registers are // reprogrammed. When the UART is disabled in the middle of transmission or @@ -101,12 +99,13 @@ pub struct PL011Registers { } #[repr(C)] -#[derive(qemu_api_macros::Object)] +#[derive(qom::Object, hwcore::Device)] /// PL011 Device Model in QEMU pub struct PL011State { pub parent_obj: ParentField, pub iomem: MemoryRegion, #[doc(alias = "chr")] + #[property(rename = "chardev")] pub char_backend: CharBackend, pub regs: BqlRefCell, /// QEMU interrupts @@ -125,6 +124,7 @@ pub struct PL011State { #[doc(alias = "clk")] pub clock: Owned, #[doc(alias = "migrate_clk")] + #[property(rename = "migrate-clk", default = true)] pub migrate_clock: bool, } @@ -132,7 +132,7 @@ pub struct PL011State { // structs, so the size of the Rust version must not be any larger // than the size of the C one. If this assert triggers you need to // expand the padding_for_rust[] array in the C PL011State struct. -static_assert!(size_of::() <= size_of::()); +static_assert!(size_of::() <= size_of::()); qom_isa!(PL011State : SysBusDevice, DeviceState, Object); @@ -172,13 +172,8 @@ impl ObjectImpl for PL011State { } impl DeviceImpl for PL011State { - fn properties() -> &'static [Property] { - &device_class::PL011_PROPERTIES - } - fn vmsd() -> Option<&'static VMStateDescription> { - Some(&device_class::VMSTATE_PL011) - } - const REALIZE: Option qemu_api::Result<()>> = Some(Self::realize); + const VMSTATE: Option> = Some(VMSTATE_PL011); + const REALIZE: Option util::Result<()>> = Some(Self::realize); } impl ResettablePhasesImpl for PL011State { @@ -215,13 +210,7 @@ impl PL011Registers { (update, result) } - pub(self) fn write( - &mut self, - offset: RegisterOffset, - value: u32, - char_backend: &CharBackend, - ) -> bool { - // eprintln!("write offset {offset} value {value}"); + pub(self) fn write(&mut self, offset: RegisterOffset, value: u32, device: &PL011State) -> bool { use RegisterOffset::*; match offset { DR => return self.write_data_register(value), @@ -236,9 +225,11 @@ impl PL011Registers { } IBRD => { self.ibrd = value; + device.trace_baudrate_change(self.ibrd, self.fbrd); } FBRD => { self.fbrd = value; + device.trace_baudrate_change(self.ibrd, self.fbrd); } LCR_H => { let new_val: registers::LineControl = value.into(); @@ -249,7 +240,7 @@ impl PL011Registers { } let update = (self.line_control.send_break() != new_val.send_break()) && { let break_enable = new_val.send_break(); - let _ = char_backend.send_break(break_enable); + let _ = device.char_backend.send_break(break_enable); self.loopback_break(break_enable) }; self.line_control = new_val; @@ -286,12 +277,13 @@ impl PL011Registers { } fn read_data_register(&mut self, update: &mut bool) -> u32 { + let depth = self.fifo_depth(); self.flags.set_receive_fifo_full(false); let c = self.read_fifo[self.read_pos]; if self.read_count > 0 { self.read_count -= 1; - self.read_pos = (self.read_pos + 1) & (self.fifo_depth() - 1); + self.read_pos = (self.read_pos + 1) & (depth - 1); } if self.read_count == 0 { self.flags.set_receive_fifo_empty(true); @@ -299,6 +291,7 @@ impl PL011Registers { if self.read_count + 1 == self.read_trigger { self.int_level &= !Interrupt::RX; } + trace::trace_pl011_read_fifo(self.read_count, depth); self.receive_status_error_clear.set_from_data(c); *update = true; u32::from(c) @@ -454,7 +447,9 @@ impl PL011Registers { self.read_fifo[slot] = value; self.read_count += 1; self.flags.set_receive_fifo_empty(false); + trace::trace_pl011_fifo_rx_put(value.into(), self.read_count, depth); if self.read_count == depth { + trace::trace_pl011_fifo_rx_full(); self.flags.set_receive_fifo_full(true); } @@ -465,10 +460,10 @@ impl PL011Registers { false } - pub fn post_load(&mut self) -> Result<(), ()> { + pub fn post_load(&mut self) -> Result<(), migration::InvalidError> { /* Sanity-check input state */ if self.read_pos >= self.read_fifo.len() || self.read_count > self.read_fifo.len() { - return Err(()); + return Err(migration::InvalidError); } if !self.fifo_enabled() && self.read_count > 0 && self.read_pos > 0 { @@ -523,8 +518,25 @@ impl PL011State { uninit_field_mut!(*this, clock).write(clock); } - const fn clock_update(&self, _event: ClockEvent) { - /* pl011_trace_baudrate_change(s); */ + pub fn trace_baudrate_change(&self, ibrd: u32, fbrd: u32) { + let divider = 4.0 / f64::from(ibrd * (FBRD_MASK + 1) + fbrd); + let hz = self.clock.hz(); + let rate = if ibrd == 0 { + 0 + } else { + ((hz as f64) * divider) as u32 + }; + trace::trace_pl011_baudrate_change(rate, hz, ibrd, fbrd); + } + + fn clock_update(&self, _event: ClockEvent) { + let regs = self.regs.borrow(); + let (ibrd, fbrd) = (regs.ibrd, regs.fbrd); + self.trace_baudrate_change(ibrd, fbrd) + } + + pub fn clock_needed(&self) -> bool { + self.migrate_clock } fn post_init(&self) { @@ -546,6 +558,7 @@ impl PL011State { } Ok(field) => { let (update_irq, result) = self.regs.borrow_mut().read(field); + trace::trace_pl011_read(offset, result, c""); if update_irq { self.update(); self.char_backend.accept_input(); @@ -560,6 +573,7 @@ impl PL011State { if let Ok(field) = RegisterOffset::try_from(offset) { // qemu_chr_fe_write_all() calls into the can_receive // callback, so handle writes before entering PL011Registers. + trace::trace_pl011_write(offset, value as u32, c""); if field == RegisterOffset::DR { // ??? Check if transmitter is enabled. let ch: [u8; 1] = [value as u8]; @@ -568,10 +582,7 @@ impl PL011State { let _ = self.char_backend.write_all(&ch); } - update_irq = self - .regs - .borrow_mut() - .write(field, value as u32, &self.char_backend); + update_irq = self.regs.borrow_mut().write(field, value as u32, self); } else { log_mask_ln!( Log::GuestError, @@ -585,11 +596,19 @@ impl PL011State { fn can_receive(&self) -> u32 { let regs = self.regs.borrow(); - // trace_pl011_can_receive(s->lcr, s->read_count, r); - regs.fifo_depth() - regs.read_count + let fifo_available = regs.fifo_depth() - regs.read_count; + trace::trace_pl011_can_receive( + regs.line_control.into(), + regs.read_count, + regs.fifo_depth(), + fifo_available, + ); + fifo_available } fn receive(&self, buf: &[u8]) { + trace::trace_pl011_receive(buf.len()); + let mut regs = self.regs.borrow_mut(); if regs.loopback_enabled() { // In loopback mode, the RX input signal is internally disconnected @@ -625,7 +644,7 @@ impl PL011State { } } - fn realize(&self) -> qemu_api::Result<()> { + fn realize(&self) -> util::Result<()> { self.char_backend .enable_handlers(self, Self::can_receive, Self::receive, Self::event); Ok(()) @@ -638,12 +657,13 @@ impl PL011State { fn update(&self) { let regs = self.regs.borrow(); let flags = regs.int_level & regs.int_enabled; + trace::trace_pl011_irq_state(flags != 0); for (irq, i) in self.interrupts.iter().zip(IRQMASK) { irq.set(flags.any_set(i)); } } - pub fn post_load(&self, _version_id: u32) -> Result<(), ()> { + pub fn post_load(&self, _version_id: u8) -> Result<(), migration::InvalidError> { self.regs.borrow_mut().post_load() } } @@ -686,7 +706,7 @@ pub unsafe extern "C" fn pl011_create( } #[repr(C)] -#[derive(qemu_api_macros::Object)] +#[derive(qom::Object, hwcore::Device)] /// PL011 Luminary device model. pub struct PL011Luminary { parent_obj: ParentField, @@ -712,3 +732,56 @@ impl PL011Impl for PL011Luminary { impl DeviceImpl for PL011Luminary {} impl ResettablePhasesImpl for PL011Luminary {} impl SysBusDeviceImpl for PL011Luminary {} + +/// Migration subsection for [`PL011State`] clock. +static VMSTATE_PL011_CLOCK: VMStateDescription = + VMStateDescriptionBuilder::::new() + .name(c"pl011/clock") + .version_id(1) + .minimum_version_id(1) + .needed(&PL011State::clock_needed) + .fields(vmstate_fields! { + vmstate_of!(PL011State, clock), + }) + .build(); + +impl_vmstate_struct!( + PL011Registers, + VMStateDescriptionBuilder::::new() + .name(c"pl011/regs") + .version_id(2) + .minimum_version_id(2) + .fields(vmstate_fields! { + vmstate_of!(PL011Registers, flags), + vmstate_of!(PL011Registers, line_control), + vmstate_of!(PL011Registers, receive_status_error_clear), + vmstate_of!(PL011Registers, control), + vmstate_of!(PL011Registers, dmacr), + vmstate_of!(PL011Registers, int_enabled), + vmstate_of!(PL011Registers, int_level), + vmstate_of!(PL011Registers, read_fifo), + vmstate_of!(PL011Registers, ilpr), + vmstate_of!(PL011Registers, ibrd), + vmstate_of!(PL011Registers, fbrd), + vmstate_of!(PL011Registers, ifl), + vmstate_of!(PL011Registers, read_pos), + vmstate_of!(PL011Registers, read_count), + vmstate_of!(PL011Registers, read_trigger), + }) + .build() +); + +pub const VMSTATE_PL011: VMStateDescription = + VMStateDescriptionBuilder::::new() + .name(c"pl011") + .version_id(2) + .minimum_version_id(2) + .post_load(&PL011State::post_load) + .fields(vmstate_fields! { + vmstate_unused!(core::mem::size_of::()), + vmstate_of!(PL011State, regs), + }) + .subsections(vmstate_subsections! { + VMSTATE_PL011_CLOCK + }) + .build(); diff --git a/rust/hw/char/pl011/src/device_class.rs b/rust/hw/char/pl011/src/device_class.rs deleted file mode 100644 index d328d846323f6..0000000000000 --- a/rust/hw/char/pl011/src/device_class.rs +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2024, Linaro Limited -// Author(s): Manos Pitsidianakis -// SPDX-License-Identifier: GPL-2.0-or-later - -use std::{ - ffi::{c_int, c_void}, - ptr::NonNull, -}; - -use qemu_api::{ - bindings::{qdev_prop_bool, qdev_prop_chr}, - prelude::*, - vmstate::VMStateDescription, - vmstate_clock, vmstate_fields, vmstate_of, vmstate_struct, vmstate_subsections, vmstate_unused, - zeroable::Zeroable, -}; - -use crate::device::{PL011Registers, PL011State}; - -extern "C" fn pl011_clock_needed(opaque: *mut c_void) -> bool { - let state = NonNull::new(opaque).unwrap().cast::(); - unsafe { state.as_ref().migrate_clock } -} - -/// Migration subsection for [`PL011State`] clock. -static VMSTATE_PL011_CLOCK: VMStateDescription = VMStateDescription { - name: c"pl011/clock".as_ptr(), - version_id: 1, - minimum_version_id: 1, - needed: Some(pl011_clock_needed), - fields: vmstate_fields! { - vmstate_clock!(PL011State, clock), - }, - ..Zeroable::ZERO -}; - -extern "C" fn pl011_post_load(opaque: *mut c_void, version_id: c_int) -> c_int { - let state = NonNull::new(opaque).unwrap().cast::(); - let result = unsafe { state.as_ref().post_load(version_id as u32) }; - if result.is_err() { - -1 - } else { - 0 - } -} - -static VMSTATE_PL011_REGS: VMStateDescription = VMStateDescription { - name: c"pl011/regs".as_ptr(), - version_id: 2, - minimum_version_id: 2, - fields: vmstate_fields! { - vmstate_of!(PL011Registers, flags), - vmstate_of!(PL011Registers, line_control), - vmstate_of!(PL011Registers, receive_status_error_clear), - vmstate_of!(PL011Registers, control), - vmstate_of!(PL011Registers, dmacr), - vmstate_of!(PL011Registers, int_enabled), - vmstate_of!(PL011Registers, int_level), - vmstate_of!(PL011Registers, read_fifo), - vmstate_of!(PL011Registers, ilpr), - vmstate_of!(PL011Registers, ibrd), - vmstate_of!(PL011Registers, fbrd), - vmstate_of!(PL011Registers, ifl), - vmstate_of!(PL011Registers, read_pos), - vmstate_of!(PL011Registers, read_count), - vmstate_of!(PL011Registers, read_trigger), - }, - ..Zeroable::ZERO -}; - -pub static VMSTATE_PL011: VMStateDescription = VMStateDescription { - name: c"pl011".as_ptr(), - version_id: 2, - minimum_version_id: 2, - post_load: Some(pl011_post_load), - fields: vmstate_fields! { - vmstate_unused!(core::mem::size_of::()), - vmstate_struct!(PL011State, regs, &VMSTATE_PL011_REGS, BqlRefCell), - }, - subsections: vmstate_subsections! { - VMSTATE_PL011_CLOCK - }, - ..Zeroable::ZERO -}; - -qemu_api::declare_properties! { - PL011_PROPERTIES, - qemu_api::define_property!( - c"chardev", - PL011State, - char_backend, - unsafe { &qdev_prop_chr }, - CharBackend - ), - qemu_api::define_property!( - c"migrate-clk", - PL011State, - migrate_clock, - unsafe { &qdev_prop_bool }, - bool, - default = true - ), -} diff --git a/rust/hw/char/pl011/src/lib.rs b/rust/hw/char/pl011/src/lib.rs index 5c4fbc9d148fc..0c19b708c0ade 100644 --- a/rust/hw/char/pl011/src/lib.rs +++ b/rust/hw/char/pl011/src/lib.rs @@ -12,8 +12,8 @@ //! See [`PL011State`](crate::device::PL011State) for the device model type and //! the [`registers`] module for register types. +mod bindings; mod device; -mod device_class; mod registers; pub use device::pl011_create; diff --git a/rust/hw/char/pl011/src/registers.rs b/rust/hw/char/pl011/src/registers.rs index 7ececd39f8616..fa572811b29c0 100644 --- a/rust/hw/char/pl011/src/registers.rs +++ b/rust/hw/char/pl011/src/registers.rs @@ -10,13 +10,13 @@ use bilge::prelude::*; use bits::bits; -use qemu_api::{impl_vmstate_bitsized, impl_vmstate_forward}; +use migration::{impl_vmstate_bitsized, impl_vmstate_forward}; /// Offset of each register from the base memory address of the device. #[doc(alias = "offset")] #[allow(non_camel_case_types)] #[repr(u64)] -#[derive(Debug, Eq, PartialEq, qemu_api_macros::TryInto)] +#[derive(Debug, Eq, PartialEq, common::TryInto)] pub enum RegisterOffset { /// Data Register /// @@ -255,6 +255,7 @@ pub enum Mode { #[bitsize(2)] #[derive(Clone, Copy, Debug, Eq, FromBits, PartialEq)] +#[allow(clippy::enum_variant_names)] /// `WLEN` Word length, field of [Line Control register](LineControl). /// /// These bits indicate the number of data bits transmitted or received in a diff --git a/rust/hw/char/pl011/wrapper.h b/rust/hw/char/pl011/wrapper.h new file mode 100644 index 0000000000000..87a5a589c8eac --- /dev/null +++ b/rust/hw/char/pl011/wrapper.h @@ -0,0 +1,51 @@ +/* + * QEMU System Emulator + * + * Copyright (c) 2024 Linaro Ltd. + * + * Authors: Manos Pitsidianakis + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + + +/* + * This header file is meant to be used as input to the `bindgen` application + * in order to generate C FFI compatible Rust bindings. + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H +/* + * Fix potential missing stdatomic.h error in case bindgen does not insert the + * correct libclang header paths on its own. We do not use stdatomic.h symbols + * in QEMU code, so it's fine to declare dummy types instead. + */ +typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst, +} memory_order; +#endif /* __CLANG_STDATOMIC_H */ + +#include "qemu/osdep.h" +#include "hw/char/pl011.h" diff --git a/rust/hw/core/Cargo.toml b/rust/hw/core/Cargo.toml new file mode 100644 index 0000000000000..ecfb56471843d --- /dev/null +++ b/rust/hw/core/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "hwcore" +version = "0.1.0" +description = "Rust bindings for QEMU/hwcore" +resolver = "2" +publish = false + +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +glib-sys.workspace = true +qemu_macros = { path = "../../qemu-macros" } +common = { path = "../../common" } +bql = { path = "../../bql" } +qom = { path = "../../qom" } +chardev = { path = "../../chardev" } +migration = { path = "../../migration" } +system = { path = "../../system" } +util = { path = "../../util" } + +[lints] +workspace = true diff --git a/rust/hw/core/build.rs b/rust/hw/core/build.rs new file mode 120000 index 0000000000000..2a79ee31b8c8f --- /dev/null +++ b/rust/hw/core/build.rs @@ -0,0 +1 @@ +../../util/build.rs \ No newline at end of file diff --git a/rust/hw/core/meson.build b/rust/hw/core/meson.build new file mode 100644 index 0000000000000..1560dd20c6b22 --- /dev/null +++ b/rust/hw/core/meson.build @@ -0,0 +1,81 @@ +_hwcore_bindgen_args = [] +c_enums = [ + 'DeviceCategory', + 'GpioPolarity', + 'MachineInitPhase', + 'ResetType', +] +foreach enum : c_enums + _hwcore_bindgen_args += ['--rustified-enum', enum] +endforeach + +blocked_type = [ + 'Chardev', + 'Error', + 'ObjectClass', + 'MemoryRegion', + 'VMStateDescription', +] +foreach type: blocked_type + _hwcore_bindgen_args += ['--blocklist-type', type] +endforeach + +c_bitfields = [ + 'ClockEvent', +] +foreach enum : c_bitfields + _hwcore_bindgen_args += ['--bitfield-enum', enum] +endforeach + +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_hwcore_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common + _hwcore_bindgen_args, + c_args: bindgen_c_args, +) + +_hwcore_rs = static_library( + 'hwcore', + structured_sources( + [ + 'src/lib.rs', + 'src/bindings.rs', + 'src/irq.rs', + 'src/qdev.rs', + 'src/sysbus.rs', + ], + {'.': _hwcore_bindings_inc_rs} + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + link_with: [_bql_rs, _chardev_rs, _migration_rs, _qom_rs, _system_rs, _util_rs], + dependencies: [glib_sys_rs, qemu_macros, common_rs], +) + +hwcore_rs = declare_dependency(link_with: [_hwcore_rs], + dependencies: [qom_rs, hwcore]) + +test('rust-hwcore-rs-integration', + executable( + 'rust-hwcore-rs-integration', + files('tests/tests.rs'), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_args: ['--test'], + install: false, + dependencies: [common_rs, hwcore_rs, bql_rs, migration_rs, util_rs]), + args: [ + '--test', '--test-threads', '1', + '--format', 'pretty', + ], + protocol: 'rust', + suite: ['unit', 'rust']) diff --git a/rust/hw/core/src/bindings.rs b/rust/hw/core/src/bindings.rs new file mode 100644 index 0000000000000..65b9aae75360e --- /dev/null +++ b/rust/hw/core/src/bindings.rs @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unnecessary_transmutes, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc, + clippy::too_many_arguments +)] + +use chardev::bindings::Chardev; +use common::Zeroable; +use glib_sys::{ + GArray, GByteArray, GHashTable, GHashTableIter, GList, GPtrArray, GQueue, GSList, GString, +}; +use migration::bindings::VMStateDescription; +use qom::bindings::ObjectClass; +use system::bindings::MemoryRegion; +use util::bindings::Error; + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); + +unsafe impl Send for Property {} +unsafe impl Sync for Property {} + +unsafe impl Send for TypeInfo {} +unsafe impl Sync for TypeInfo {} + +unsafe impl Zeroable for Property__bindgen_ty_1 {} +unsafe impl Zeroable for Property {} diff --git a/rust/qemu-api/src/irq.rs b/rust/hw/core/src/irq.rs similarity index 93% rename from rust/qemu-api/src/irq.rs rename to rust/hw/core/src/irq.rs index 1526e6f63a178..e0d7784d97bcc 100644 --- a/rust/qemu-api/src/irq.rs +++ b/rust/hw/core/src/irq.rs @@ -10,16 +10,15 @@ use std::{ ptr, }; -use crate::{ - bindings::{self, qemu_set_irq}, - cell::Opaque, - prelude::*, - qom::ObjectClass, -}; +use bql::BqlCell; +use common::Opaque; +use qom::{prelude::*, ObjectClass}; + +use crate::bindings::{self, qemu_set_irq}; /// An opaque wrapper around [`bindings::IRQState`]. #[repr(transparent)] -#[derive(Debug, qemu_api_macros::Wrapper)] +#[derive(Debug, common::Wrapper)] pub struct IRQState(Opaque); /// Interrupt sources are used by devices to pass changes to a value (typically @@ -34,7 +33,7 @@ pub struct IRQState(Opaque); /// /// Interrupts are implemented as a pointer to the interrupt "sink", which has /// type [`IRQState`]. A device exposes its source as a QOM link property using -/// a function such as [`SysBusDeviceMethods::init_irq`], and +/// a function such as [`crate::sysbus::SysBusDeviceMethods::init_irq`], and /// initially leaves the pointer to a NULL value, representing an unconnected /// interrupt. To connect it, whoever creates the device fills the pointer with /// the sink's `IRQState *`, for example using `sysbus_connect_irq`. Because @@ -112,4 +111,5 @@ unsafe impl ObjectType for IRQState { const TYPE_NAME: &'static CStr = unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_IRQ) }; } + qom_isa!(IRQState: Object); diff --git a/rust/hw/core/src/lib.rs b/rust/hw/core/src/lib.rs new file mode 100644 index 0000000000000..b40801eb843f7 --- /dev/null +++ b/rust/hw/core/src/lib.rs @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pub use qemu_macros::Device; +pub use qom; + +pub mod bindings; + +mod irq; +pub use irq::*; + +mod qdev; +pub use qdev::*; + +mod sysbus; +pub use sysbus::*; diff --git a/rust/qemu-api/src/qdev.rs b/rust/hw/core/src/qdev.rs similarity index 74% rename from rust/qemu-api/src/qdev.rs rename to rust/hw/core/src/qdev.rs index 36f02fb57dbff..c3097a284d738 100644 --- a/rust/qemu-api/src/qdev.rs +++ b/rust/hw/core/src/qdev.rs @@ -6,26 +6,24 @@ use std::{ ffi::{c_int, c_void, CStr, CString}, - ptr::NonNull, + ptr::{addr_of, NonNull}, }; -pub use bindings::{ClockEvent, DeviceClass, Property, ResetType}; +use chardev::Chardev; +use common::{callbacks::FnCall, Opaque}; +use migration::{impl_vmstate_c_struct, VMStateDescription}; +use qom::{prelude::*, ObjectClass, ObjectImpl, Owned, ParentInit}; +use util::{Error, Result}; +pub use crate::bindings::{ClockEvent, DeviceClass, Property, ResetType}; use crate::{ bindings::{self, qdev_init_gpio_in, qdev_init_gpio_out, ResettableClass}, - callbacks::FnCall, - cell::{bql_locked, Opaque}, - chardev::Chardev, - error::{Error, Result}, irq::InterruptSource, - prelude::*, - qom::{ObjectClass, ObjectImpl, Owned, ParentInit}, - vmstate::VMStateDescription, }; /// A safe wrapper around [`bindings::Clock`]. #[repr(transparent)] -#[derive(Debug, qemu_api_macros::Wrapper)] +#[derive(Debug, common::Wrapper)] pub struct Clock(Opaque); unsafe impl Send for Clock {} @@ -33,7 +31,7 @@ unsafe impl Sync for Clock {} /// A safe wrapper around [`bindings::DeviceState`]. #[repr(transparent)] -#[derive(Debug, qemu_api_macros::Wrapper)] +#[derive(Debug, common::Wrapper)] pub struct DeviceState(Opaque); unsafe impl Send for DeviceState {} @@ -101,8 +99,80 @@ unsafe extern "C" fn rust_resettable_exit_fn( T::EXIT.unwrap()(unsafe { state.as_ref() }, typ); } +/// Helper trait to return pointer to a [`bindings::PropertyInfo`] for a type. +/// +/// This trait is used by [`qemu_macros::Device`] derive macro. +/// +/// Base types that already have `qdev_prop_*` globals in the QEMU API should +/// use those values as exported by the [`bindings`] module, instead of +/// redefining them. +/// +/// # Safety +/// +/// This trait is marked as `unsafe` because `BASE_INFO` and `BIT_INFO` must be +/// valid raw references to [`bindings::PropertyInfo`]. +/// +/// Note we could not use a regular reference: +/// +/// ```text +/// const VALUE: &bindings::PropertyInfo = ... +/// ``` +/// +/// because this results in the following compiler error: +/// +/// ```text +/// constructing invalid value: encountered reference to `extern` static in `const` +/// ``` +/// +/// This is because the compiler generally might dereference a normal reference +/// during const evaluation, but not in this case (if it did, it'd need to +/// dereference the raw pointer so using a `*const` would also fail to compile). +/// +/// It is the implementer's responsibility to provide a valid +/// [`bindings::PropertyInfo`] pointer for the trait implementation to be safe. +pub unsafe trait QDevProp { + const BASE_INFO: *const bindings::PropertyInfo; + const BIT_INFO: *const bindings::PropertyInfo = { + panic!("invalid type for bit property"); + }; +} + +macro_rules! impl_qdev_prop { + ($type:ty,$info:ident$(, $bit_info:ident)?) => { + unsafe impl $crate::qdev::QDevProp for $type { + const BASE_INFO: *const $crate::bindings::PropertyInfo = + addr_of!($crate::bindings::$info); + $(const BIT_INFO: *const $crate::bindings::PropertyInfo = + addr_of!($crate::bindings::$bit_info);)? + } + }; +} + +impl_qdev_prop!(bool, qdev_prop_bool); +impl_qdev_prop!(u8, qdev_prop_uint8); +impl_qdev_prop!(u16, qdev_prop_uint16); +impl_qdev_prop!(u32, qdev_prop_uint32, qdev_prop_bit); +impl_qdev_prop!(u64, qdev_prop_uint64, qdev_prop_bit64); +impl_qdev_prop!(usize, qdev_prop_usize); +impl_qdev_prop!(i32, qdev_prop_int32); +impl_qdev_prop!(i64, qdev_prop_int64); +impl_qdev_prop!(chardev::CharBackend, qdev_prop_chr); + +/// Trait to define device properties. +/// +/// # Safety +/// +/// Caller is responsible for the validity of properties array. +pub unsafe trait DevicePropertiesImpl { + /// An array providing the properties that the user can set on the + /// device. + const PROPERTIES: &'static [Property] = &[]; +} + /// Trait providing the contents of [`DeviceClass`]. -pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl + IsA { +pub trait DeviceImpl: + ObjectImpl + ResettablePhasesImpl + DevicePropertiesImpl + IsA +{ /// _Realization_ is the second stage of device creation. It contains /// all operations that depend on device properties and can fail (note: /// this is not yet supported for Rust devices). @@ -111,19 +181,10 @@ pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl + IsA { /// with the function pointed to by `REALIZE`. const REALIZE: Option Result<()>> = None; - /// An array providing the properties that the user can set on the - /// device. Not a `const` because referencing statics in constants - /// is unstable until Rust 1.83.0. - fn properties() -> &'static [Property] { - &[] - } - /// A `VMStateDescription` providing the migration format for the device /// Not a `const` because referencing statics in constants is unstable /// until Rust 1.83.0. - fn vmsd() -> Option<&'static VMStateDescription> { - None - } + const VMSTATE: Option> = None; } /// # Safety @@ -135,7 +196,7 @@ pub trait DeviceImpl: ObjectImpl + ResettablePhasesImpl + IsA { /// readable/writeable from one thread at any time. unsafe extern "C" fn rust_realize_fn( dev: *mut bindings::DeviceState, - errp: *mut *mut bindings::Error, + errp: *mut *mut util::bindings::Error, ) { let state = NonNull::new(dev).unwrap().cast::(); let result = T::REALIZE.unwrap()(unsafe { state.as_ref() }); @@ -172,10 +233,10 @@ impl DeviceClass { if ::REALIZE.is_some() { self.realize = Some(rust_realize_fn::); } - if let Some(vmsd) = ::vmsd() { - self.vmsd = vmsd; + if let Some(ref vmsd) = ::VMSTATE { + self.vmsd = vmsd.as_ref(); } - let prop = ::properties(); + let prop = ::PROPERTIES; if !prop.is_empty() { unsafe { bindings::device_class_set_props_n(self, prop.as_ptr(), prop.len()); @@ -187,64 +248,12 @@ impl DeviceClass { } } -#[macro_export] -macro_rules! define_property { - ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty, bit = $bitnr:expr, default = $defval:expr$(,)*) => { - $crate::bindings::Property { - // use associated function syntax for type checking - name: ::std::ffi::CStr::as_ptr($name), - info: $prop, - offset: ::std::mem::offset_of!($state, $field) as isize, - bitnr: $bitnr, - set_default: true, - defval: $crate::bindings::Property__bindgen_ty_1 { u: $defval as u64 }, - ..$crate::zeroable::Zeroable::ZERO - } - }; - ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty, default = $defval:expr$(,)*) => { - $crate::bindings::Property { - // use associated function syntax for type checking - name: ::std::ffi::CStr::as_ptr($name), - info: $prop, - offset: ::std::mem::offset_of!($state, $field) as isize, - set_default: true, - defval: $crate::bindings::Property__bindgen_ty_1 { u: $defval as u64 }, - ..$crate::zeroable::Zeroable::ZERO - } - }; - ($name:expr, $state:ty, $field:ident, $prop:expr, $type:ty$(,)*) => { - $crate::bindings::Property { - // use associated function syntax for type checking - name: ::std::ffi::CStr::as_ptr($name), - info: $prop, - offset: ::std::mem::offset_of!($state, $field) as isize, - set_default: false, - ..$crate::zeroable::Zeroable::ZERO - } - }; -} - -#[macro_export] -macro_rules! declare_properties { - ($ident:ident, $($prop:expr),*$(,)*) => { - pub static $ident: [$crate::bindings::Property; { - let mut len = 0; - $({ - _ = stringify!($prop); - len += 1; - })* - len - }] = [ - $($prop),*, - ]; - }; -} - unsafe impl ObjectType for DeviceState { type Class = DeviceClass; const TYPE_NAME: &'static CStr = unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_DEVICE) }; } + qom_isa!(DeviceState: Object); /// Initialization methods take a [`ParentInit`] and can be called as @@ -275,7 +284,7 @@ impl DeviceState { cb: Option, events: ClockEvent, ) -> Owned { - assert!(bql_locked()); + assert!(bql::is_locked()); // SAFETY: the clock is heap allocated, but qdev_init_clock_in() // does not gift the reference to its caller; so use Owned::from to @@ -346,7 +355,7 @@ where Self::Target: IsA, { fn prop_set_chr(&self, propname: &str, chr: &Owned) { - assert!(bql_locked()); + assert!(bql::is_locked()); let c_propname = CString::new(propname).unwrap(); let chr: &Chardev = chr; unsafe { @@ -373,7 +382,7 @@ where } } - let _: () = F::ASSERT_IS_SOME; + const { assert!(F::IS_SOME) }; unsafe extern "C" fn rust_irq_handler FnCall<(&'a T, u32, u32)>>( opaque: *mut c_void, line: c_int, @@ -402,9 +411,45 @@ where impl DeviceMethods for R where R::Target: IsA {} +impl Clock { + pub const PERIOD_1SEC: u64 = bindings::CLOCK_PERIOD_1SEC; + + pub const fn period_from_ns(ns: u64) -> u64 { + ns * Self::PERIOD_1SEC / 1_000_000_000 + } + + pub const fn period_from_hz(hz: u64) -> u64 { + if hz == 0 { + 0 + } else { + Self::PERIOD_1SEC / hz + } + } + + pub const fn period_to_hz(period: u64) -> u64 { + if period == 0 { + 0 + } else { + Self::PERIOD_1SEC / period + } + } + + pub const fn period(&self) -> u64 { + // SAFETY: Clock is returned by init_clock_in with zero value for period + unsafe { &*self.0.as_ptr() }.period + } + + pub const fn hz(&self) -> u64 { + Self::period_to_hz(self.period()) + } +} + unsafe impl ObjectType for Clock { type Class = ObjectClass; const TYPE_NAME: &'static CStr = unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_CLOCK) }; } + qom_isa!(Clock: Object); + +impl_vmstate_c_struct!(Clock, bindings::vmstate_clock); diff --git a/rust/qemu-api/src/sysbus.rs b/rust/hw/core/src/sysbus.rs similarity index 90% rename from rust/qemu-api/src/sysbus.rs rename to rust/hw/core/src/sysbus.rs index e92502a8fe65d..282315fce99ff 100644 --- a/rust/qemu-api/src/sysbus.rs +++ b/rust/hw/core/src/sysbus.rs @@ -7,20 +7,19 @@ use std::{ffi::CStr, ptr::addr_of_mut}; pub use bindings::SysBusDeviceClass; +use common::Opaque; +use qom::{prelude::*, Owned}; +use system::MemoryRegion; use crate::{ bindings, - cell::{bql_locked, Opaque}, irq::{IRQState, InterruptSource}, - memory::MemoryRegion, - prelude::*, qdev::{DeviceImpl, DeviceState}, - qom::Owned, }; /// A safe wrapper around [`bindings::SysBusDevice`]. #[repr(transparent)] -#[derive(Debug, qemu_api_macros::Wrapper)] +#[derive(Debug, common::Wrapper)] pub struct SysBusDevice(Opaque); unsafe impl Send for SysBusDevice {} @@ -31,6 +30,7 @@ unsafe impl ObjectType for SysBusDevice { const TYPE_NAME: &'static CStr = unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_SYS_BUS_DEVICE) }; } + qom_isa!(SysBusDevice: DeviceState, Object); // TODO: add virtual methods @@ -55,7 +55,7 @@ where /// region with a number that corresponds to the order of calls to /// `init_mmio`. fn init_mmio(&self, iomem: &MemoryRegion) { - assert!(bql_locked()); + assert!(bql::is_locked()); unsafe { bindings::sysbus_init_mmio(self.upcast().as_mut_ptr(), iomem.as_mut_ptr()); } @@ -66,7 +66,7 @@ where /// whoever creates the sysbus device will refer to the interrupts with /// a number that corresponds to the order of calls to `init_irq`. fn init_irq(&self, irq: &InterruptSource) { - assert!(bql_locked()); + assert!(bql::is_locked()); unsafe { bindings::sysbus_init_irq(self.upcast().as_mut_ptr(), irq.as_ptr()); } @@ -74,7 +74,7 @@ where // TODO: do we want a type like GuestAddress here? fn mmio_addr(&self, id: u32) -> Option { - assert!(bql_locked()); + assert!(bql::is_locked()); // SAFETY: the BQL ensures that no one else writes to sbd.mmio[], and // the SysBusDevice must be initialized to get an IsA. let sbd = unsafe { *self.upcast().as_ptr() }; @@ -88,7 +88,7 @@ where // TODO: do we want a type like GuestAddress here? fn mmio_map(&self, id: u32, addr: u64) { - assert!(bql_locked()); + assert!(bql::is_locked()); let id: i32 = id.try_into().unwrap(); unsafe { bindings::sysbus_mmio_map(self.upcast().as_mut_ptr(), id, addr); @@ -99,7 +99,7 @@ where // object_property_set_link) adds a reference to the IRQState, // which can prolong its life fn connect_irq(&self, id: u32, irq: &Owned) { - assert!(bql_locked()); + assert!(bql::is_locked()); let id: i32 = id.try_into().unwrap(); let irq: &IRQState = irq; unsafe { @@ -109,11 +109,11 @@ where fn sysbus_realize(&self) { // TODO: return an Error - assert!(bql_locked()); + assert!(bql::is_locked()); unsafe { bindings::sysbus_realize( self.upcast().as_mut_ptr(), - addr_of_mut!(bindings::error_fatal), + addr_of_mut!(util::bindings::error_fatal), ); } } diff --git a/rust/qemu-api/tests/tests.rs b/rust/hw/core/tests/tests.rs similarity index 78% rename from rust/qemu-api/tests/tests.rs rename to rust/hw/core/tests/tests.rs index a658a49fcfdda..247d812866dff 100644 --- a/rust/qemu-api/tests/tests.rs +++ b/rust/hw/core/tests/tests.rs @@ -4,31 +4,23 @@ use std::{ffi::CStr, ptr::addr_of}; -use qemu_api::{ - bindings::{module_call_init, module_init_type, qdev_prop_bool}, - cell::{self, BqlCell}, - declare_properties, define_property, - prelude::*, - qdev::{DeviceImpl, DeviceState, Property, ResettablePhasesImpl}, - qom::{ObjectImpl, ParentField}, - sysbus::SysBusDevice, - vmstate::VMStateDescription, - zeroable::Zeroable, -}; - -mod vmstate_tests; +use bql::BqlCell; +use hwcore::{DeviceImpl, DeviceState, ResettablePhasesImpl, SysBusDevice}; +use migration::{VMStateDescription, VMStateDescriptionBuilder}; +use qom::{prelude::*, ObjectImpl, ParentField}; +use util::bindings::{module_call_init, module_init_type}; // Test that macros can compile. -pub static VMSTATE: VMStateDescription = VMStateDescription { - name: c"name".as_ptr(), - unmigratable: true, - ..Zeroable::ZERO -}; +pub const VMSTATE: VMStateDescription = VMStateDescriptionBuilder::::new() + .name(c"name") + .unmigratable() + .build(); #[repr(C)] -#[derive(qemu_api_macros::Object)] +#[derive(qom::Object, hwcore::Device)] pub struct DummyState { parent: ParentField, + #[property(rename = "migrate-clk", default = true)] migrate_clock: bool, } @@ -44,17 +36,6 @@ impl DummyClass { } } -declare_properties! { - DUMMY_PROPERTIES, - define_property!( - c"migrate-clk", - DummyState, - migrate_clock, - unsafe { &qdev_prop_bool }, - bool - ), -} - unsafe impl ObjectType for DummyState { type Class = DummyClass; const TYPE_NAME: &'static CStr = c"dummy"; @@ -69,16 +50,11 @@ impl ObjectImpl for DummyState { impl ResettablePhasesImpl for DummyState {} impl DeviceImpl for DummyState { - fn properties() -> &'static [Property] { - &DUMMY_PROPERTIES - } - fn vmsd() -> Option<&'static VMStateDescription> { - Some(&VMSTATE) - } + const VMSTATE: Option> = Some(VMSTATE); } #[repr(C)] -#[derive(qemu_api_macros::Object)] +#[derive(qom::Object, hwcore::Device)] pub struct DummyChildState { parent: ParentField, } @@ -112,7 +88,7 @@ impl DummyChildClass { fn init_qom() { static ONCE: BqlCell = BqlCell::new(false); - cell::bql_start_test(); + bql::start_test(); if !ONCE.get() { unsafe { module_call_init(module_init_type::MODULE_INIT_QOM); diff --git a/rust/hw/core/wrapper.h b/rust/hw/core/wrapper.h new file mode 100644 index 0000000000000..3bdbd1249e41d --- /dev/null +++ b/rust/hw/core/wrapper.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * This header file is meant to be used as input to the `bindgen` application + * in order to generate C FFI compatible Rust bindings. + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H +/* + * Fix potential missing stdatomic.h error in case bindgen does not insert the + * correct libclang header paths on its own. We do not use stdatomic.h symbols + * in QEMU code, so it's fine to declare dummy types instead. + */ +typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst, +} memory_order; +#endif /* __CLANG_STDATOMIC_H */ + +#include "qemu/osdep.h" + +#include "hw/sysbus.h" +#include "hw/clock.h" +#include "hw/qdev-clock.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/irq.h" diff --git a/rust/hw/timer/hpet/Cargo.toml b/rust/hw/timer/hpet/Cargo.toml index 6f075027843aa..f781b28d8b3ca 100644 --- a/rust/hw/timer/hpet/Cargo.toml +++ b/rust/hw/timer/hpet/Cargo.toml @@ -10,12 +10,14 @@ license.workspace = true repository.workspace = true rust-version.workspace = true -[lib] -crate-type = ["staticlib"] - [dependencies] -qemu_api = { path = "../../../qemu-api" } -qemu_api_macros = { path = "../../../qemu-api-macros" } +common = { path = "../../../common" } +util = { path = "../../../util" } +migration = { path = "../../../migration" } +bql = { path = "../../../bql" } +qom = { path = "../../../qom" } +system = { path = "../../../system" } +hwcore = { path = "../../../hw/core" } [lints] workspace = true diff --git a/rust/hw/timer/hpet/meson.build b/rust/hw/timer/hpet/meson.build index c2d7c0532ca4e..bb64b96672e55 100644 --- a/rust/hw/timer/hpet/meson.build +++ b/rust/hw/timer/hpet/meson.build @@ -4,15 +4,17 @@ _libhpet_rs = static_library( override_options: ['rust_std=2021', 'build.rust_std=2021'], rust_abi: 'rust', dependencies: [ - qemu_api, - qemu_api_macros, + common_rs, + util_rs, + migration_rs, + bql_rs, + qom_rs, + system_rs, + hwcore_rs, ], ) rust_devices_ss.add(when: 'CONFIG_X_HPET_RUST', if_true: [declare_dependency( link_whole: [_libhpet_rs], - # Putting proc macro crates in `dependencies` is necessary for Meson to find - # them when compiling the root per-target static rust lib. - dependencies: [qemu_api_macros], variables: {'crate': 'hpet'}, )]) diff --git a/rust/hw/timer/hpet/src/device.rs b/rust/hw/timer/hpet/src/device.rs index acf7251029e91..86638c0766668 100644 --- a/rust/hw/timer/hpet/src/device.rs +++ b/rust/hw/timer/hpet/src/device.rs @@ -3,34 +3,29 @@ // SPDX-License-Identifier: GPL-2.0-or-later use std::{ - ffi::{c_int, c_void, CStr}, + ffi::CStr, mem::MaybeUninit, pin::Pin, ptr::{addr_of_mut, null_mut, NonNull}, slice::from_ref, }; -use qemu_api::{ - bindings::{ - address_space_memory, address_space_stl_le, qdev_prop_bit, qdev_prop_bool, - qdev_prop_uint32, qdev_prop_usize, - }, - cell::{BqlCell, BqlRefCell}, - irq::InterruptSource, - memory::{ - hwaddr, MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder, MEMTXATTRS_UNSPECIFIED, - }, - prelude::*, - qdev::{DeviceImpl, DeviceState, Property, ResetType, ResettablePhasesImpl}, - qom::{ObjectImpl, ObjectType, ParentField, ParentInit}, - qom_isa, - sysbus::{SysBusDevice, SysBusDeviceImpl}, - timer::{Timer, CLOCK_VIRTUAL, NANOSECONDS_PER_SECOND}, - uninit_field_mut, - vmstate::VMStateDescription, - vmstate_fields, vmstate_of, vmstate_struct, vmstate_subsections, vmstate_validate, - zeroable::Zeroable, +use bql::{BqlCell, BqlRefCell}; +use common::{bitops::IntegerExt, uninit_field_mut}; +use hwcore::{ + DeviceImpl, DeviceMethods, DeviceState, InterruptSource, ResetType, ResettablePhasesImpl, + SysBusDevice, SysBusDeviceImpl, SysBusDeviceMethods, }; +use migration::{ + self, impl_vmstate_struct, vmstate_fields, vmstate_of, vmstate_subsections, vmstate_validate, + VMStateDescription, VMStateDescriptionBuilder, +}; +use qom::{prelude::*, ObjectImpl, ParentField, ParentInit}; +use system::{ + bindings::{address_space_memory, address_space_stl_le, hwaddr}, + MemoryRegion, MemoryRegionOps, MemoryRegionOpsBuilder, MEMTXATTRS_UNSPECIFIED, +}; +use util::timer::{Timer, CLOCK_VIRTUAL, NANOSECONDS_PER_SECOND}; use crate::fw_cfg::HPETFwConfig; @@ -101,7 +96,7 @@ const HPET_TN_CFG_FSB_CAP_SHIFT: usize = 15; /// Timer N Interrupt Routing Capability (bits 32:63) const HPET_TN_CFG_INT_ROUTE_CAP_SHIFT: usize = 32; -#[derive(qemu_api_macros::TryInto)] +#[derive(common::TryInto)] #[repr(u64)] #[allow(non_camel_case_types)] /// Timer registers, masked by 0x18 @@ -114,7 +109,7 @@ enum TimerRegister { ROUTE = 16, } -#[derive(qemu_api_macros::TryInto)] +#[derive(common::TryInto)] #[repr(u64)] #[allow(non_camel_case_types)] /// Global registers @@ -213,6 +208,10 @@ pub struct HPETTimer { last: u64, } +// SAFETY: Sync is not automatically derived due to the `state` field, +// which is always dereferenced to a shared reference. +unsafe impl Sync for HPETTimer {} + impl HPETTimer { fn new(index: u8, state: *const HPETState) -> HPETTimer { HPETTimer { @@ -520,7 +519,7 @@ impl HPETTimer { /// HPET Event Timer Block Abstraction #[repr(C)] -#[derive(qemu_api_macros::Object)] +#[derive(qom::Object, hwcore::Device)] pub struct HPETState { parent_obj: ParentField, iomem: MemoryRegion, @@ -540,10 +539,12 @@ pub struct HPETState { // Internal state /// Capabilities that QEMU HPET supports. /// bit 0: MSI (or FSB) support. + #[property(rename = "msi", bit = HPET_FLAG_MSI_SUPPORT_SHIFT as u8, default = false)] flags: u32, /// Offset of main counter relative to qemu clock. hpet_offset: BqlCell, + #[property(rename = "hpet-offset-saved", default = true)] hpet_offset_saved: bool, irqs: [InterruptSource; HPET_NUM_IRQ_ROUTES], @@ -555,11 +556,13 @@ pub struct HPETState { /// the timers' interrupt can be routed, and is encoded in the /// bits 32:64 of timer N's config register: #[doc(alias = "intcap")] + #[property(rename = "hpet-intcap", default = 0)] int_route_cap: u32, /// HPET timer array managed by this timer block. #[doc(alias = "timer")] timers: [BqlRefCell; HPET_MAX_TIMERS], + #[property(rename = "timers", default = HPET_MIN_TIMERS)] num_timers: usize, num_timers_save: BqlCell, @@ -724,7 +727,7 @@ impl HPETState { } } - fn realize(&self) -> qemu_api::Result<()> { + fn realize(&self) -> util::Result<()> { if self.num_timers < HPET_MIN_TIMERS || self.num_timers > HPET_MAX_TIMERS { Err(format!( "hpet.num_timers must be between {HPET_MIN_TIMERS} and {HPET_MAX_TIMERS}" @@ -841,7 +844,7 @@ impl HPETState { } } - fn pre_save(&self) -> i32 { + fn pre_save(&self) -> Result<(), migration::Infallible> { if self.is_hpet_enabled() { self.counter.set(self.get_ticks()); } @@ -852,10 +855,10 @@ impl HPETState { * that was configured. */ self.num_timers_save.set(self.num_timers as u8); - 0 + Ok(()) } - fn post_load(&self, _version_id: u8) -> i32 { + fn post_load(&self, _version_id: u8) -> Result<(), migration::Infallible> { for timer in self.timers.iter().take(self.num_timers) { let mut t = timer.borrow_mut(); @@ -869,7 +872,7 @@ impl HPETState { .set(ticks_to_ns(self.counter.get()) - CLOCK_VIRTUAL.get_ns()); } - 0 + Ok(()) } fn is_rtc_irq_level_needed(&self) -> bool { @@ -901,146 +904,71 @@ impl ObjectImpl for HPETState { const CLASS_INIT: fn(&mut Self::Class) = Self::Class::class_init::; } -// TODO: Make these properties user-configurable! -qemu_api::declare_properties! { - HPET_PROPERTIES, - qemu_api::define_property!( - c"timers", - HPETState, - num_timers, - unsafe { &qdev_prop_usize }, - u8, - default = HPET_MIN_TIMERS - ), - qemu_api::define_property!( - c"msi", - HPETState, - flags, - unsafe { &qdev_prop_bit }, - u32, - bit = HPET_FLAG_MSI_SUPPORT_SHIFT as u8, - default = false, - ), - qemu_api::define_property!( - c"hpet-intcap", - HPETState, - int_route_cap, - unsafe { &qdev_prop_uint32 }, - u32, - default = 0 - ), - qemu_api::define_property!( - c"hpet-offset-saved", - HPETState, - hpet_offset_saved, - unsafe { &qdev_prop_bool }, - bool, - default = true - ), -} - -unsafe extern "C" fn hpet_rtc_irq_level_needed(opaque: *mut c_void) -> bool { - // SAFETY: - // the pointer is convertible to a reference - let state: &HPETState = unsafe { NonNull::new(opaque.cast::()).unwrap().as_ref() }; - state.is_rtc_irq_level_needed() -} - -unsafe extern "C" fn hpet_offset_needed(opaque: *mut c_void) -> bool { - // SAFETY: - // the pointer is convertible to a reference - let state: &HPETState = unsafe { NonNull::new(opaque.cast::()).unwrap().as_ref() }; - state.is_offset_needed() -} - -unsafe extern "C" fn hpet_pre_save(opaque: *mut c_void) -> c_int { - // SAFETY: - // the pointer is convertible to a reference - let state: &mut HPETState = - unsafe { NonNull::new(opaque.cast::()).unwrap().as_mut() }; - state.pre_save() as c_int -} - -unsafe extern "C" fn hpet_post_load(opaque: *mut c_void, version_id: c_int) -> c_int { - // SAFETY: - // the pointer is convertible to a reference - let state: &mut HPETState = - unsafe { NonNull::new(opaque.cast::()).unwrap().as_mut() }; - let version: u8 = version_id.try_into().unwrap(); - state.post_load(version) as c_int -} - -static VMSTATE_HPET_RTC_IRQ_LEVEL: VMStateDescription = VMStateDescription { - name: c"hpet/rtc_irq_level".as_ptr(), - version_id: 1, - minimum_version_id: 1, - needed: Some(hpet_rtc_irq_level_needed), - fields: vmstate_fields! { - vmstate_of!(HPETState, rtc_irq_level), - }, - ..Zeroable::ZERO -}; - -static VMSTATE_HPET_OFFSET: VMStateDescription = VMStateDescription { - name: c"hpet/offset".as_ptr(), - version_id: 1, - minimum_version_id: 1, - needed: Some(hpet_offset_needed), - fields: vmstate_fields! { - vmstate_of!(HPETState, hpet_offset), - }, - ..Zeroable::ZERO -}; - -static VMSTATE_HPET_TIMER: VMStateDescription = VMStateDescription { - name: c"hpet_timer".as_ptr(), - version_id: 1, - minimum_version_id: 1, - fields: vmstate_fields! { - vmstate_of!(HPETTimer, index), - vmstate_of!(HPETTimer, config), - vmstate_of!(HPETTimer, cmp), - vmstate_of!(HPETTimer, fsb), - vmstate_of!(HPETTimer, period), - vmstate_of!(HPETTimer, wrap_flag), - vmstate_of!(HPETTimer, qemu_timer), - }, - ..Zeroable::ZERO -}; +static VMSTATE_HPET_RTC_IRQ_LEVEL: VMStateDescription = + VMStateDescriptionBuilder::::new() + .name(c"hpet/rtc_irq_level") + .version_id(1) + .minimum_version_id(1) + .needed(&HPETState::is_rtc_irq_level_needed) + .fields(vmstate_fields! { + vmstate_of!(HPETState, rtc_irq_level), + }) + .build(); + +static VMSTATE_HPET_OFFSET: VMStateDescription = + VMStateDescriptionBuilder::::new() + .name(c"hpet/offset") + .version_id(1) + .minimum_version_id(1) + .needed(&HPETState::is_offset_needed) + .fields(vmstate_fields! { + vmstate_of!(HPETState, hpet_offset), + }) + .build(); + +const VMSTATE_HPET_TIMER: VMStateDescription = + VMStateDescriptionBuilder::::new() + .name(c"hpet_timer") + .version_id(1) + .minimum_version_id(1) + .fields(vmstate_fields! { + vmstate_of!(HPETTimer, index), + vmstate_of!(HPETTimer, config), + vmstate_of!(HPETTimer, cmp), + vmstate_of!(HPETTimer, fsb), + vmstate_of!(HPETTimer, period), + vmstate_of!(HPETTimer, wrap_flag), + vmstate_of!(HPETTimer, qemu_timer), + }) + .build(); +impl_vmstate_struct!(HPETTimer, VMSTATE_HPET_TIMER); const VALIDATE_TIMERS_NAME: &CStr = c"num_timers must match"; -static VMSTATE_HPET: VMStateDescription = VMStateDescription { - name: c"hpet".as_ptr(), - version_id: 2, - minimum_version_id: 2, - pre_save: Some(hpet_pre_save), - post_load: Some(hpet_post_load), - fields: vmstate_fields! { - vmstate_of!(HPETState, config), - vmstate_of!(HPETState, int_status), - vmstate_of!(HPETState, counter), - vmstate_of!(HPETState, num_timers_save), - vmstate_validate!(HPETState, VALIDATE_TIMERS_NAME, HPETState::validate_num_timers), - vmstate_struct!(HPETState, timers[0 .. num_timers_save], &VMSTATE_HPET_TIMER, BqlRefCell, HPETState::validate_num_timers).with_version_id(0), - }, - subsections: vmstate_subsections! { - VMSTATE_HPET_RTC_IRQ_LEVEL, - VMSTATE_HPET_OFFSET, - }, - ..Zeroable::ZERO -}; +const VMSTATE_HPET: VMStateDescription = + VMStateDescriptionBuilder::::new() + .name(c"hpet") + .version_id(2) + .minimum_version_id(2) + .pre_save(&HPETState::pre_save) + .post_load(&HPETState::post_load) + .fields(vmstate_fields! { + vmstate_of!(HPETState, config), + vmstate_of!(HPETState, int_status), + vmstate_of!(HPETState, counter), + vmstate_of!(HPETState, num_timers_save), + vmstate_validate!(HPETState, VALIDATE_TIMERS_NAME, HPETState::validate_num_timers), + vmstate_of!(HPETState, timers[0 .. num_timers_save], HPETState::validate_num_timers).with_version_id(0), + }) + .subsections(vmstate_subsections!( + VMSTATE_HPET_RTC_IRQ_LEVEL, + VMSTATE_HPET_OFFSET, + )) + .build(); impl DeviceImpl for HPETState { - fn properties() -> &'static [Property] { - &HPET_PROPERTIES - } - - fn vmsd() -> Option<&'static VMStateDescription> { - Some(&VMSTATE_HPET) - } - - const REALIZE: Option qemu_api::Result<()>> = Some(Self::realize); + const VMSTATE: Option> = Some(VMSTATE_HPET); + const REALIZE: Option util::Result<()>> = Some(Self::realize); } impl ResettablePhasesImpl for HPETState { diff --git a/rust/hw/timer/hpet/src/fw_cfg.rs b/rust/hw/timer/hpet/src/fw_cfg.rs index 619d662ee1e75..bb4ea8909ad9e 100644 --- a/rust/hw/timer/hpet/src/fw_cfg.rs +++ b/rust/hw/timer/hpet/src/fw_cfg.rs @@ -4,7 +4,7 @@ use std::ptr::addr_of_mut; -use qemu_api::{cell::bql_locked, zeroable::Zeroable}; +use common::Zeroable; /// Each `HPETState` represents a Event Timer Block. The v1 spec supports /// up to 8 blocks. QEMU only uses 1 block (in PC machine). @@ -37,10 +37,10 @@ pub static mut hpet_fw_cfg: HPETFwConfig = HPETFwConfig { impl HPETFwConfig { pub(crate) fn assign_hpet_id() -> Result { - assert!(bql_locked()); + assert!(bql::is_locked()); // SAFETY: all accesses go through these methods, which guarantee // that the accesses are protected by the BQL. - let mut fw_cfg = unsafe { *addr_of_mut!(hpet_fw_cfg) }; + let fw_cfg = unsafe { &mut *addr_of_mut!(hpet_fw_cfg) }; if fw_cfg.count == u8::MAX { // first instance @@ -57,10 +57,10 @@ impl HPETFwConfig { } pub(crate) fn update_hpet_cfg(hpet_id: usize, timer_block_id: u32, address: u64) { - assert!(bql_locked()); + assert!(bql::is_locked()); // SAFETY: all accesses go through these methods, which guarantee // that the accesses are protected by the BQL. - let mut fw_cfg = unsafe { *addr_of_mut!(hpet_fw_cfg) }; + let fw_cfg = unsafe { &mut *addr_of_mut!(hpet_fw_cfg) }; fw_cfg.hpet[hpet_id].event_timer_block_id = timer_block_id; fw_cfg.hpet[hpet_id].address = address; diff --git a/rust/meson.build b/rust/meson.build index 331f11b7e72a4..76e10699b371a 100644 --- a/rust/meson.build +++ b/rust/meson.build @@ -2,30 +2,41 @@ subproject('anyhow-1-rs', required: true) subproject('bilge-0.2-rs', required: true) subproject('bilge-impl-0.2-rs', required: true) subproject('foreign-0.3-rs', required: true) +subproject('glib-sys-0.21-rs', required: true) subproject('libc-0.2-rs', required: true) anyhow_rs = dependency('anyhow-1-rs') bilge_rs = dependency('bilge-0.2-rs') bilge_impl_rs = dependency('bilge-impl-0.2-rs') foreign_rs = dependency('foreign-0.3-rs') +glib_sys_rs = dependency('glib-sys-0.21-rs') libc_rs = dependency('libc-0.2-rs') subproject('proc-macro2-1-rs', required: true) subproject('quote-1-rs', required: true) subproject('syn-2-rs', required: true) +subproject('attrs-0.2-rs', required: true) quote_rs_native = dependency('quote-1-rs', native: true) syn_rs_native = dependency('syn-2-rs', native: true) proc_macro2_rs_native = dependency('proc-macro2-1-rs', native: true) - -qemuutil_rs = qemuutil.partial_dependency(link_args: true, links: true) +attrs_rs_native = dependency('attrs-0.2-rs', native: true) genrs = [] -subdir('qemu-api-macros') -subdir('bits') -subdir('qemu-api') +subdir('qemu-macros') +subdir('common') +subdir('bits') +subdir('util') +subdir('bql') +subdir('migration') +subdir('qom') +subdir('system') +subdir('chardev') +subdir('hw/core') +subdir('tests') +subdir('trace') subdir('hw') cargo = find_program('cargo', required: false) diff --git a/rust/migration/Cargo.toml b/rust/migration/Cargo.toml new file mode 100644 index 0000000000000..415457496d647 --- /dev/null +++ b/rust/migration/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "migration" +version = "0.1.0" +description = "Rust bindings for QEMU/migration" +resolver = "2" +publish = false + +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +bql = { path = "../bql" } +common = { path = "../common" } +qemu_macros = { path = "../qemu-macros" } +util = { path = "../util" } +glib-sys.workspace = true + +[lints] +workspace = true diff --git a/rust/migration/build.rs b/rust/migration/build.rs new file mode 120000 index 0000000000000..71a3167885c23 --- /dev/null +++ b/rust/migration/build.rs @@ -0,0 +1 @@ +../util/build.rs \ No newline at end of file diff --git a/rust/migration/meson.build b/rust/migration/meson.build new file mode 100644 index 0000000000000..444494700ad02 --- /dev/null +++ b/rust/migration/meson.build @@ -0,0 +1,54 @@ +_migration_bindgen_args = [] +c_bitfields = [ + 'MigrationPolicy', + 'MigrationPriority', + 'VMStateFlags', +] +foreach enum : c_bitfields + _migration_bindgen_args += ['--bitfield-enum', enum] +endforeach +# +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_migration_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common + _migration_bindgen_args, + c_args: bindgen_c_args, +) + +_migration_rs = static_library( + 'migration', + structured_sources( + [ + 'src/lib.rs', + 'src/bindings.rs', + 'src/migratable.rs', + 'src/vmstate.rs', + ], + {'.' : _migration_bindings_inc_rs}, + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + link_with: [_util_rs, _bql_rs], + dependencies: [common_rs, glib_sys_rs, qemu_macros], +) + +migration_rs = declare_dependency(link_with: [_migration_rs], + dependencies: [bql_rs, migration, qemuutil]) + +# Doctests are essentially integration tests, so they need the same dependencies. +# Note that running them requires the object files for C code, so place them +# in a separate suite that is run by the "build" CI jobs rather than "check". +rust.doctest('rust-migration-rs-doctests', + _migration_rs, + dependencies: migration_rs, + suite: ['doc', 'rust']) diff --git a/rust/migration/src/bindings.rs b/rust/migration/src/bindings.rs new file mode 100644 index 0000000000000..24503eb69bd68 --- /dev/null +++ b/rust/migration/src/bindings.rs @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unnecessary_transmutes, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc, + clippy::too_many_arguments +)] + +use common::Zeroable; +use glib_sys::{GHashTable, GHashTableIter, GList, GPtrArray, GQueue, GSList}; + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); + +unsafe impl Send for VMStateDescription {} +unsafe impl Sync for VMStateDescription {} + +unsafe impl Send for VMStateField {} +unsafe impl Sync for VMStateField {} + +unsafe impl Send for VMStateInfo {} +unsafe impl Sync for VMStateInfo {} + +// bindgen does not derive Default here +#[allow(clippy::derivable_impls)] +impl Default for VMStateFlags { + fn default() -> Self { + Self(0) + } +} + +unsafe impl Zeroable for VMStateFlags {} +unsafe impl Zeroable for VMStateField {} +unsafe impl Zeroable for VMStateDescription {} diff --git a/rust/migration/src/lib.rs b/rust/migration/src/lib.rs new file mode 100644 index 0000000000000..c9bdf0d413358 --- /dev/null +++ b/rust/migration/src/lib.rs @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pub mod bindings; + +pub use qemu_macros::ToMigrationState; + +pub mod migratable; +pub use migratable::*; + +pub mod vmstate; +pub use vmstate::*; diff --git a/rust/migration/src/migratable.rs b/rust/migration/src/migratable.rs new file mode 100644 index 0000000000000..ded6fe8f4a6c5 --- /dev/null +++ b/rust/migration/src/migratable.rs @@ -0,0 +1,442 @@ +// Copyright 2025 Red Hat, Inc. +// Author(s): Paolo Bonzini +// SPDX-License-Identifier: GPL-2.0-or-later + +use std::{ + fmt, + mem::size_of, + ptr::{self, addr_of, NonNull}, + sync::{Arc, Mutex}, +}; + +use bql::{BqlCell, BqlRefCell}; +use common::Zeroable; + +use crate::{ + bindings, vmstate_fields_ref, vmstate_of, InvalidError, VMState, VMStateDescriptionBuilder, +}; + +/// Enables QEMU migration support even when a type is wrapped with +/// synchronization primitives (like `Mutex`) that the C migration +/// code cannot directly handle. The trait provides methods to +/// extract essential state for migration and restore it after +/// migration completes. +/// +/// On top of extracting data from synchronization wrappers during save +/// and restoring it during load, it's also possible to use `ToMigrationState` +/// to convert runtime representations to migration-safe formats. +/// +/// # Examples +/// +/// ``` +/// use bql::BqlCell; +/// use migration::{InvalidError, ToMigrationState, VMState}; +/// # use migration::VMStateField; +/// +/// # #[derive(Debug, PartialEq, Eq)] +/// struct DeviceState { +/// counter: BqlCell, +/// enabled: bool, +/// } +/// +/// # #[derive(Debug)] +/// #[derive(Default)] +/// struct DeviceMigrationState { +/// counter: u32, +/// enabled: bool, +/// } +/// +/// # unsafe impl VMState for DeviceMigrationState { +/// # const BASE: VMStateField = ::common::Zeroable::ZERO; +/// # } +/// impl ToMigrationState for DeviceState { +/// type Migrated = DeviceMigrationState; +/// +/// fn snapshot_migration_state( +/// &self, +/// target: &mut Self::Migrated, +/// ) -> Result<(), InvalidError> { +/// target.counter = self.counter.get(); +/// target.enabled = self.enabled; +/// Ok(()) +/// } +/// +/// fn restore_migrated_state_mut( +/// &mut self, +/// source: Self::Migrated, +/// _version_id: u8, +/// ) -> Result<(), InvalidError> { +/// self.counter.set(source.counter); +/// self.enabled = source.enabled; +/// Ok(()) +/// } +/// } +/// # bql::start_test(); +/// # let dev = DeviceState { counter: 10.into(), enabled: true }; +/// # let mig = dev.to_migration_state().unwrap(); +/// # assert!(matches!(*mig, DeviceMigrationState { counter: 10, enabled: true })); +/// # let mut dev2 = DeviceState { counter: 42.into(), enabled: false }; +/// # dev2.restore_migrated_state_mut(*mig, 1).unwrap(); +/// # assert_eq!(dev2, dev); +/// ``` +/// +/// More commonly, the trait is derived through the +/// [`derive(ToMigrationState)`](qemu_macros::ToMigrationState) procedural +/// macro. +pub trait ToMigrationState { + /// The type used to represent the migrated state. + type Migrated: Default + VMState; + + /// Capture the current state into a migration-safe format, failing + /// if the state cannot be migrated. + fn snapshot_migration_state(&self, target: &mut Self::Migrated) -> Result<(), InvalidError>; + + /// Restores state from a migrated representation, failing if the + /// state cannot be restored. + fn restore_migrated_state_mut( + &mut self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError>; + + /// Convenience method to combine allocation and state capture + /// into a single operation. + fn to_migration_state(&self) -> Result, InvalidError> { + let mut migrated = Box::::default(); + self.snapshot_migration_state(&mut migrated)?; + Ok(migrated) + } +} + +// Implementations for primitive types. Do not use a blanket implementation +// for all Copy types, because [T; N] is Copy if T is Copy; that would conflict +// with the below implementation for arrays. +macro_rules! impl_for_primitive { + ($($t:ty),*) => { + $( + impl ToMigrationState for $t { + type Migrated = Self; + + fn snapshot_migration_state( + &self, + target: &mut Self::Migrated, + ) -> Result<(), InvalidError> { + *target = *self; + Ok(()) + } + + fn restore_migrated_state_mut( + &mut self, + source: Self::Migrated, + _version_id: u8, + ) -> Result<(), InvalidError> { + *self = source; + Ok(()) + } + } + )* + }; +} + +impl_for_primitive!(u8, u16, u32, u64, i8, i16, i32, i64, bool); + +impl ToMigrationState for [T; N] +where + [T::Migrated; N]: Default, +{ + type Migrated = [T::Migrated; N]; + + fn snapshot_migration_state(&self, target: &mut Self::Migrated) -> Result<(), InvalidError> { + for (item, target_item) in self.iter().zip(target.iter_mut()) { + item.snapshot_migration_state(target_item)?; + } + Ok(()) + } + + fn restore_migrated_state_mut( + &mut self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError> { + for (item, source_item) in self.iter_mut().zip(source) { + item.restore_migrated_state_mut(source_item, version_id)?; + } + Ok(()) + } +} + +impl ToMigrationState for Mutex { + type Migrated = T::Migrated; + + fn snapshot_migration_state(&self, target: &mut Self::Migrated) -> Result<(), InvalidError> { + self.lock().unwrap().snapshot_migration_state(target) + } + + fn restore_migrated_state_mut( + &mut self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError> { + self.get_mut() + .unwrap() + .restore_migrated_state_mut(source, version_id) + } +} + +impl ToMigrationState for BqlRefCell { + type Migrated = T::Migrated; + + fn snapshot_migration_state(&self, target: &mut Self::Migrated) -> Result<(), InvalidError> { + self.borrow().snapshot_migration_state(target) + } + + fn restore_migrated_state_mut( + &mut self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError> { + self.get_mut() + .restore_migrated_state_mut(source, version_id) + } +} + +/// Extension trait for types that support migration state restoration +/// through interior mutability. +/// +/// This trait extends [`ToMigrationState`] for types that can restore +/// their state without requiring mutable access. While user structs +/// will generally use `ToMigrationState`, the device will have multiple +/// references and therefore the device struct has to employ an interior +/// mutability wrapper like [`Mutex`] or [`BqlRefCell`]. +/// +/// Anything that implements this trait can in turn be used within +/// [`Migratable`], which makes no assumptions on how to achieve mutable +/// access to the runtime state. +/// +/// # Examples +/// +/// ``` +/// use std::sync::Mutex; +/// +/// use migration::ToMigrationStateShared; +/// +/// let device_state = Mutex::new(42); +/// // Can restore without &mut access +/// device_state.restore_migrated_state(100, 1).unwrap(); +/// assert_eq!(*device_state.lock().unwrap(), 100); +/// ``` +pub trait ToMigrationStateShared: ToMigrationState { + /// Restores state from a migrated representation to an interior-mutable + /// object. Similar to `restore_migrated_state_mut`, but requires a + /// shared reference; therefore it can be used to restore a device's + /// state even though devices have multiple references to them. + fn restore_migrated_state( + &self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError>; +} + +impl ToMigrationStateShared for [T; N] +where + [T::Migrated; N]: Default, +{ + fn restore_migrated_state( + &self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError> { + for (item, source_item) in self.iter().zip(source) { + item.restore_migrated_state(source_item, version_id)?; + } + Ok(()) + } +} + +// Arc requires the contained object to be interior-mutable +impl ToMigrationState for Arc { + type Migrated = T::Migrated; + + fn snapshot_migration_state(&self, target: &mut Self::Migrated) -> Result<(), InvalidError> { + (**self).snapshot_migration_state(target) + } + + fn restore_migrated_state_mut( + &mut self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError> { + (**self).restore_migrated_state(source, version_id) + } +} + +impl ToMigrationStateShared for Arc { + fn restore_migrated_state( + &self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError> { + (**self).restore_migrated_state(source, version_id) + } +} + +// Interior-mutable types. Note how they only require ToMigrationState for +// the inner type! + +impl ToMigrationStateShared for Mutex { + fn restore_migrated_state( + &self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError> { + self.lock() + .unwrap() + .restore_migrated_state_mut(source, version_id) + } +} + +impl ToMigrationStateShared for BqlRefCell { + fn restore_migrated_state( + &self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), InvalidError> { + self.borrow_mut() + .restore_migrated_state_mut(source, version_id) + } +} + +/// A wrapper that enables QEMU migration for types with shared state. +/// +/// `Migratable` provides a bridge between Rust types that use interior +/// mutability (like `Mutex`) and QEMU's C-based migration infrastructure. +/// It manages the lifecycle of migration state and provides automatic +/// conversion between runtime and migration representations. +/// +/// ``` +/// # use std::sync::Mutex; +/// # use migration::{Migratable, ToMigrationState, VMState, VMStateField}; +/// +/// #[derive(ToMigrationState)] +/// pub struct DeviceRegs { +/// status: u32, +/// } +/// # unsafe impl VMState for DeviceRegsMigration { +/// # const BASE: VMStateField = ::common::Zeroable::ZERO; +/// # } +/// +/// pub struct SomeDevice { +/// // ... +/// registers: Migratable>, +/// } +/// ``` +#[repr(C)] +pub struct Migratable { + /// Pointer to migration state, valid only during migration operations. + /// C vmstate does not support NULL pointers, so no `Option>`. + migration_state: BqlCell<*mut T::Migrated>, + + /// The runtime state that can be accessed during normal operation + runtime_state: T, +} + +impl std::ops::Deref for Migratable { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.runtime_state + } +} + +impl std::ops::DerefMut for Migratable { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.runtime_state + } +} + +impl Migratable { + /// Creates a new `Migratable` wrapper around the given runtime state. + /// + /// # Returns + /// A new `Migratable` instance ready for use and migration + pub fn new(runtime_state: T) -> Self { + Self { + migration_state: BqlCell::new(ptr::null_mut()), + runtime_state, + } + } + + fn pre_save(&self) -> Result<(), InvalidError> { + let state = self.runtime_state.to_migration_state()?; + self.migration_state.set(Box::into_raw(state)); + Ok(()) + } + + fn post_save(&self) -> Result<(), InvalidError> { + let state = unsafe { Box::from_raw(self.migration_state.replace(ptr::null_mut())) }; + drop(state); + Ok(()) + } + + fn pre_load(&self) -> Result<(), InvalidError> { + self.migration_state + .set(Box::into_raw(Box::::default())); + Ok(()) + } + + fn post_load(&self, version_id: u8) -> Result<(), InvalidError> { + let state = unsafe { Box::from_raw(self.migration_state.replace(ptr::null_mut())) }; + self.runtime_state + .restore_migrated_state(*state, version_id) + } +} + +impl fmt::Debug for Migratable +where + T::Migrated: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut struct_f = f.debug_struct("Migratable"); + struct_f.field("runtime_state", &self.runtime_state); + + let state = NonNull::new(self.migration_state.get()).map(|x| unsafe { x.as_ref() }); + struct_f.field("migration_state", &state); + struct_f.finish() + } +} + +impl Default for Migratable { + fn default() -> Self { + Self::new(T::default()) + } +} + +impl Migratable { + const FIELD: bindings::VMStateField = vmstate_of!(Self, migration_state); + + const FIELDS: &[bindings::VMStateField] = vmstate_fields_ref! { + Migratable::::FIELD + }; + + const VMSD: &'static bindings::VMStateDescription = VMStateDescriptionBuilder::::new() + .version_id(1) + .minimum_version_id(1) + .pre_save(&Self::pre_save) + .pre_load(&Self::pre_load) + .post_save(&Self::post_save) + .post_load(&Self::post_load) + .fields(Self::FIELDS) + .build() + .as_ref(); +} + +unsafe impl VMState for Migratable { + const BASE: bindings::VMStateField = { + bindings::VMStateField { + vmsd: addr_of!(*Self::VMSD), + size: size_of::(), + flags: bindings::VMStateFlags::VMS_STRUCT, + ..Zeroable::ZERO + } + }; +} diff --git a/rust/qemu-api/src/vmstate.rs b/rust/migration/src/vmstate.rs similarity index 55% rename from rust/qemu-api/src/vmstate.rs rename to rust/migration/src/vmstate.rs index 812f390d78028..42e5df8d818ff 100644 --- a/rust/qemu-api/src/vmstate.rs +++ b/rust/migration/src/vmstate.rs @@ -11,10 +11,11 @@ //! migration format for a struct. This is based on the [`VMState`] trait, //! which is defined by all migratable types. //! -//! * [`impl_vmstate_forward`](crate::impl_vmstate_forward) and -//! [`impl_vmstate_bitsized`](crate::impl_vmstate_bitsized), which help with -//! the definition of the [`VMState`] trait (respectively for transparent -//! structs and for `bilge`-defined types) +//! * [`impl_vmstate_forward`](crate::impl_vmstate_forward), +//! [`impl_vmstate_bitsized`](crate::impl_vmstate_bitsized), and +//! [`impl_vmstate_struct`](crate::impl_vmstate_struct), which help with the +//! definition of the [`VMState`] trait (respectively for transparent structs, +//! nested structs and `bilge`-defined types) //! //! * helper macros to declare a device model state struct, in particular //! [`vmstate_subsections`](crate::vmstate_subsections) and @@ -24,14 +25,25 @@ //! `include/migration/vmstate.h`. These are not type-safe and only provide //! functionality that is missing from `vmstate_of!`. -use core::{marker::PhantomData, mem, ptr::NonNull}; -use std::ffi::{c_int, c_void}; +pub use std::convert::Infallible; +use std::{ + error::Error, + ffi::{c_int, c_void, CStr}, + fmt, io, + marker::PhantomData, + mem, + ptr::{addr_of, NonNull}, +}; -pub use crate::bindings::{VMStateDescription, VMStateField}; -use crate::{ - bindings::VMStateFlags, callbacks::FnCall, prelude::*, qom::Owned, zeroable::Zeroable, +use common::{ + callbacks::FnCall, + errno::{into_neg_errno, Errno}, + Zeroable, }; +use crate::bindings::{self, VMStateFlags}; +pub use crate::bindings::{MigrationPriority, VMStateField}; + /// This macro is used to call a function with a generic argument bound /// to the type of a field. The function must take a /// [`PhantomData`]`` argument; `T` is the type of @@ -40,7 +52,7 @@ use crate::{ /// # Examples /// /// ``` -/// # use qemu_api::call_func_with_field; +/// # use migration::call_func_with_field; /// # use core::marker::PhantomData; /// const fn size_of_field(_: PhantomData) -> usize { /// std::mem::size_of::() @@ -60,6 +72,7 @@ macro_rules! call_func_with_field { ($func:expr, $typ:ty, $($field:tt).+) => { $func(loop { #![allow(unreachable_code)] + #![allow(unused_variables)] const fn phantom__(_: &T) -> ::core::marker::PhantomData { ::core::marker::PhantomData } // Unreachable code is exempt from checks on uninitialized values. // Use that trick to infer the type of this PhantomData. @@ -69,70 +82,6 @@ macro_rules! call_func_with_field { }; } -/// Workaround for lack of `const_refs_static`: references to global variables -/// can be included in a `static`, but not in a `const`; unfortunately, this -/// is exactly what would go in the `VMStateField`'s `info` member. -/// -/// This enum contains the contents of the `VMStateField`'s `info` member, -/// but as an `enum` instead of a pointer. -#[allow(non_camel_case_types)] -pub enum VMStateFieldType { - null, - vmstate_info_bool, - vmstate_info_int8, - vmstate_info_int16, - vmstate_info_int32, - vmstate_info_int64, - vmstate_info_uint8, - vmstate_info_uint16, - vmstate_info_uint32, - vmstate_info_uint64, - vmstate_info_timer, -} - -/// Workaround for lack of `const_refs_static`. Converts a `VMStateFieldType` -/// to a `*const VMStateInfo`, for inclusion in a `VMStateField`. -#[macro_export] -macro_rules! info_enum_to_ref { - ($e:expr) => { - unsafe { - match $e { - $crate::vmstate::VMStateFieldType::null => ::core::ptr::null(), - $crate::vmstate::VMStateFieldType::vmstate_info_bool => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_bool) - } - $crate::vmstate::VMStateFieldType::vmstate_info_int8 => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_int8) - } - $crate::vmstate::VMStateFieldType::vmstate_info_int16 => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_int16) - } - $crate::vmstate::VMStateFieldType::vmstate_info_int32 => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_int32) - } - $crate::vmstate::VMStateFieldType::vmstate_info_int64 => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_int64) - } - $crate::vmstate::VMStateFieldType::vmstate_info_uint8 => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint8) - } - $crate::vmstate::VMStateFieldType::vmstate_info_uint16 => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint16) - } - $crate::vmstate::VMStateFieldType::vmstate_info_uint32 => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint32) - } - $crate::vmstate::VMStateFieldType::vmstate_info_uint64 => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_uint64) - } - $crate::vmstate::VMStateFieldType::vmstate_info_timer => { - ::core::ptr::addr_of!($crate::bindings::vmstate_info_timer) - } - } - } - }; -} - /// A trait for types that can be included in a device's migration stream. It /// provides the base contents of a `VMStateField` (minus the name and offset). /// @@ -143,12 +92,6 @@ macro_rules! info_enum_to_ref { /// to implement it except via macros that do it for you, such as /// `impl_vmstate_bitsized!`. pub unsafe trait VMState { - /// The `info` member of a `VMStateField` is a pointer and as such cannot - /// yet be included in the [`BASE`](VMState::BASE) associated constant; - /// this is only allowed by Rust 1.83.0 and newer. For now, include the - /// member as an enum which is stored in a separate constant. - const SCALAR_TYPE: VMStateFieldType = VMStateFieldType::null; - /// The base contents of a `VMStateField` (minus the name and offset) for /// the type that is implementing the trait. const BASE: VMStateField; @@ -163,12 +106,6 @@ pub unsafe trait VMState { }; } -/// Internal utility function to retrieve a type's `VMStateFieldType`; -/// used by [`vmstate_of!`](crate::vmstate_of). -pub const fn vmstate_scalar_type(_: PhantomData) -> VMStateFieldType { - T::SCALAR_TYPE -} - /// Internal utility function to retrieve a type's `VMStateField`; /// used by [`vmstate_of!`](crate::vmstate_of). pub const fn vmstate_base(_: PhantomData) -> VMStateField { @@ -189,32 +126,31 @@ pub const fn vmstate_varray_flag(_: PhantomData) -> VMStateFlags /// * scalar types (integer and `bool`) /// * the C struct `QEMUTimer` /// * a transparent wrapper for any of the above (`Cell`, `UnsafeCell`, -/// [`BqlCell`], [`BqlRefCell`] +/// [`BqlCell`], [`BqlRefCell`]) /// * a raw pointer to any of the above /// * a `NonNull` pointer, a `Box` or an [`Owned`] for any of the above /// * an array of any of the above /// /// In order to support other types, the trait `VMState` must be implemented -/// for them. The macros -/// [`impl_vmstate_bitsized!`](crate::impl_vmstate_bitsized) -/// and [`impl_vmstate_forward!`](crate::impl_vmstate_forward) help with this. +/// for them. The macros [`impl_vmstate_forward`](crate::impl_vmstate_forward), +/// [`impl_vmstate_bitsized`](crate::impl_vmstate_bitsized), and +/// [`impl_vmstate_struct`](crate::impl_vmstate_struct) help with this. +/// +/// [`BqlCell`]: ../../bql/cell/struct.BqlCell.html +/// [`BqlRefCell`]: ../../bql/cell/struct.BqlRefCell.html +/// [`Owned`]: ../../qom/qom/struct.Owned.html #[macro_export] macro_rules! vmstate_of { ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])? $(, $test_fn:expr)? $(,)?) => { $crate::bindings::VMStateField { name: ::core::concat!(::core::stringify!($field_name), "\0") .as_bytes() - .as_ptr() as *const ::std::os::raw::c_char, + .as_ptr().cast::<::std::os::raw::c_char>(), offset: ::std::mem::offset_of!($struct_name, $field_name), $(num_offset: ::std::mem::offset_of!($struct_name, $num),)? $(field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn),)? // The calls to `call_func_with_field!` are the magic that // computes most of the VMStateField from the type of the field. - info: $crate::info_enum_to_ref!($crate::call_func_with_field!( - $crate::vmstate::vmstate_scalar_type, - $struct_name, - $field_name - )), ..$crate::call_func_with_field!( $crate::vmstate::vmstate_base, $struct_name, @@ -228,7 +164,11 @@ macro_rules! vmstate_of { }; } -impl VMStateFlags { +pub trait VMStateFlagsExt { + const VMS_VARRAY_FLAGS: VMStateFlags; +} + +impl VMStateFlagsExt for VMStateFlags { const VMS_VARRAY_FLAGS: VMStateFlags = VMStateFlags( VMStateFlags::VMS_VARRAY_INT32.0 | VMStateFlags::VMS_VARRAY_UINT8.0 @@ -274,7 +214,7 @@ impl VMStateField { } #[must_use] - pub const fn with_varray_flag_unchecked(mut self, flag: VMStateFlags) -> VMStateField { + pub const fn with_varray_flag_unchecked(mut self, flag: VMStateFlags) -> Self { self.flags = VMStateFlags(self.flags.0 & !VMStateFlags::VMS_ARRAY.0); self.flags = VMStateFlags(self.flags.0 | flag.0); self.num = 0; // varray uses num_offset instead of num. @@ -283,13 +223,13 @@ impl VMStateField { #[must_use] #[allow(unused_mut)] - pub const fn with_varray_flag(mut self, flag: VMStateFlags) -> VMStateField { + pub const fn with_varray_flag(mut self, flag: VMStateFlags) -> Self { assert!((self.flags.0 & VMStateFlags::VMS_ARRAY.0) != 0); self.with_varray_flag_unchecked(flag) } #[must_use] - pub const fn with_varray_multiply(mut self, num: u32) -> VMStateField { + pub const fn with_varray_multiply(mut self, num: u32) -> Self { assert!(num <= 0x7FFF_FFFFu32); self.flags = VMStateFlags(self.flags.0 | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0); self.num = num as i32; @@ -304,7 +244,7 @@ impl VMStateField { /// # Examples /// /// ``` -/// # use qemu_api::impl_vmstate_forward; +/// # use migration::impl_vmstate_forward; /// pub struct Fifo([u8; 16]); /// impl_vmstate_forward!(Fifo); /// ``` @@ -315,8 +255,6 @@ macro_rules! impl_vmstate_forward { // the first field of the tuple ($tuple:ty) => { unsafe impl $crate::vmstate::VMState for $tuple { - const SCALAR_TYPE: $crate::vmstate::VMStateFieldType = - $crate::call_func_with_field!($crate::vmstate::vmstate_scalar_type, $tuple, 0); const BASE: $crate::bindings::VMStateField = $crate::call_func_with_field!($crate::vmstate::vmstate_base, $tuple, 0); } @@ -325,34 +263,30 @@ macro_rules! impl_vmstate_forward { // Transparent wrappers: just use the internal type +#[macro_export] macro_rules! impl_vmstate_transparent { ($type:ty where $base:tt: VMState $($where:tt)*) => { - unsafe impl<$base> VMState for $type where $base: VMState $($where)* { - const SCALAR_TYPE: VMStateFieldType = <$base as VMState>::SCALAR_TYPE; - const BASE: VMStateField = VMStateField { + unsafe impl<$base> $crate::vmstate::VMState for $type where $base: $crate::vmstate::VMState $($where)* { + const BASE: $crate::vmstate::VMStateField = $crate::vmstate::VMStateField { size: mem::size_of::<$type>(), - ..<$base as VMState>::BASE + ..<$base as $crate::vmstate::VMState>::BASE }; - const VARRAY_FLAG: VMStateFlags = <$base as VMState>::VARRAY_FLAG; + const VARRAY_FLAG: $crate::bindings::VMStateFlags = <$base as $crate::vmstate::VMState>::VARRAY_FLAG; } }; } +impl_vmstate_transparent!(bql::BqlCell where T: VMState); +impl_vmstate_transparent!(bql::BqlRefCell where T: VMState); impl_vmstate_transparent!(std::cell::Cell where T: VMState); impl_vmstate_transparent!(std::cell::UnsafeCell where T: VMState); impl_vmstate_transparent!(std::pin::Pin where T: VMState); -impl_vmstate_transparent!(crate::cell::BqlCell where T: VMState); -impl_vmstate_transparent!(crate::cell::BqlRefCell where T: VMState); -impl_vmstate_transparent!(crate::cell::Opaque where T: VMState); +impl_vmstate_transparent!(common::Opaque where T: VMState); #[macro_export] macro_rules! impl_vmstate_bitsized { ($type:ty) => { unsafe impl $crate::vmstate::VMState for $type { - const SCALAR_TYPE: $crate::vmstate::VMStateFieldType = - <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt - as ::bilge::prelude::Number>::UnderlyingType - as $crate::vmstate::VMState>::SCALAR_TYPE; const BASE: $crate::bindings::VMStateField = <<<$type as ::bilge::prelude::Bitsized>::ArbitraryInt as ::bilge::prelude::Number>::UnderlyingType @@ -362,6 +296,25 @@ macro_rules! impl_vmstate_bitsized { as ::bilge::prelude::Number>::UnderlyingType as $crate::vmstate::VMState>::VARRAY_FLAG; } + + impl $crate::migratable::ToMigrationState for $type { + type Migrated = <<$type as ::bilge::prelude::Bitsized>::ArbitraryInt + as ::bilge::prelude::Number>::UnderlyingType; + + fn snapshot_migration_state(&self, target: &mut Self::Migrated) -> Result<(), $crate::InvalidError> { + *target = Self::Migrated::from(*self); + Ok(()) + } + + fn restore_migrated_state_mut( + &mut self, + source: Self::Migrated, + version_id: u8, + ) -> Result<(), $crate::InvalidError> { + *self = Self::from(source); + Ok(()) + } + } }; } @@ -369,12 +322,12 @@ macro_rules! impl_vmstate_bitsized { macro_rules! impl_vmstate_scalar { ($info:ident, $type:ty$(, $varray_flag:ident)?) => { - unsafe impl VMState for $type { - const SCALAR_TYPE: VMStateFieldType = VMStateFieldType::$info; - const BASE: VMStateField = VMStateField { + unsafe impl $crate::vmstate::VMState for $type { + const BASE: $crate::vmstate::VMStateField = $crate::vmstate::VMStateField { + info: addr_of!(bindings::$info), size: mem::size_of::<$type>(), - flags: VMStateFlags::VMS_SINGLE, - ..Zeroable::ZERO + flags: $crate::vmstate::VMStateFlags::VMS_SINGLE, + ..::common::zeroable::Zeroable::ZERO }; $(const VARRAY_FLAG: VMStateFlags = VMStateFlags::$varray_flag;)? } @@ -390,17 +343,31 @@ impl_vmstate_scalar!(vmstate_info_uint8, u8, VMS_VARRAY_UINT8); impl_vmstate_scalar!(vmstate_info_uint16, u16, VMS_VARRAY_UINT16); impl_vmstate_scalar!(vmstate_info_uint32, u32, VMS_VARRAY_UINT32); impl_vmstate_scalar!(vmstate_info_uint64, u64); -impl_vmstate_scalar!(vmstate_info_timer, crate::timer::Timer); +impl_vmstate_scalar!(vmstate_info_timer, util::timer::Timer); + +#[macro_export] +macro_rules! impl_vmstate_c_struct { + ($type:ty, $vmsd:expr) => { + unsafe impl $crate::vmstate::VMState for $type { + const BASE: $crate::bindings::VMStateField = $crate::bindings::VMStateField { + vmsd: ::std::ptr::addr_of!($vmsd), + size: ::std::mem::size_of::<$type>(), + flags: $crate::bindings::VMStateFlags::VMS_STRUCT, + ..::common::zeroable::Zeroable::ZERO + }; + } + }; +} // Pointer types using the underlying type's VMState plus VMS_POINTER // Note that references are not supported, though references to cells // could be allowed. +#[macro_export] macro_rules! impl_vmstate_pointer { ($type:ty where $base:tt: VMState $($where:tt)*) => { - unsafe impl<$base> VMState for $type where $base: VMState $($where)* { - const SCALAR_TYPE: VMStateFieldType = ::SCALAR_TYPE; - const BASE: VMStateField = <$base as VMState>::BASE.with_pointer_flag(); + unsafe impl<$base> $crate::vmstate::VMState for $type where $base: $crate::vmstate::VMState $($where)* { + const BASE: $crate::vmstate::VMStateField = <$base as $crate::vmstate::VMState>::BASE.with_pointer_flag(); } }; } @@ -412,13 +379,11 @@ impl_vmstate_pointer!(NonNull where T: VMState); // Unlike C pointers, Box is always non-null therefore there is no need // to specify VMS_ALLOC. impl_vmstate_pointer!(Box where T: VMState); -impl_vmstate_pointer!(Owned where T: VMState + ObjectType); // Arrays using the underlying type's VMState plus // VMS_ARRAY/VMS_ARRAY_OF_POINTER unsafe impl VMState for [T; N] { - const SCALAR_TYPE: VMStateFieldType = ::SCALAR_TYPE; const BASE: VMStateField = ::BASE.with_array_flag(N); } @@ -431,7 +396,7 @@ macro_rules! vmstate_unused { size: $size, info: unsafe { ::core::ptr::addr_of!($crate::bindings::vmstate_info_unused_buffer) }, flags: $crate::bindings::VMStateFlags::VMS_BUFFER, - ..$crate::zeroable::Zeroable::ZERO + ..::common::Zeroable::ZERO } }}; } @@ -440,7 +405,7 @@ pub extern "C" fn rust_vms_test_field_exists FnCall<(&'a T, u8), b opaque: *mut c_void, version_id: c_int, ) -> bool { - // SAFETY: the opaque was passed as a reference to `T`. + // SAFETY: the function is used in T's implementation of VMState let owner: &T = unsafe { &*(opaque.cast::()) }; let version: u8 = version_id.try_into().unwrap(); F::call((owner, version)) @@ -454,10 +419,10 @@ pub type VMSFieldExistCb = unsafe extern "C" fn( #[macro_export] macro_rules! vmstate_exist_fn { ($struct_name:ty, $test_fn:expr) => {{ - const fn test_cb_builder__ $crate::callbacks::FnCall<(&'a T, u8), bool>>( + const fn test_cb_builder__ ::common::FnCall<(&'a T, u8), bool>>( _phantom: ::core::marker::PhantomData, ) -> $crate::vmstate::VMSFieldExistCb { - let _: () = F::ASSERT_IS_SOME; + const { assert!(F::IS_SOME) }; $crate::vmstate::rust_vms_test_field_exists:: } @@ -468,74 +433,19 @@ macro_rules! vmstate_exist_fn { }}; } -// FIXME: including the `vmsd` field in a `const` is not possible without -// the const_refs_static feature (stabilized in Rust 1.83.0). Without it, -// it is not possible to use VMS_STRUCT in a transparent manner using -// `vmstate_of!`. While VMSTATE_CLOCK can at least try to be type-safe, -// VMSTATE_STRUCT includes $type only for documentation purposes; it -// is checked against $field_name and $struct_name, but not against $vmsd -// which is what really would matter. -#[doc(alias = "VMSTATE_STRUCT")] +/// Add a terminator to the fields in the arguments, and return +/// a reference to the resulting array of values. #[macro_export] -macro_rules! vmstate_struct { - ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])?, $vmsd:expr, $type:ty $(, $test_fn:expr)? $(,)?) => { - $crate::bindings::VMStateField { - name: ::core::concat!(::core::stringify!($field_name), "\0") - .as_bytes() - .as_ptr() as *const ::std::os::raw::c_char, - $(num_offset: ::std::mem::offset_of!($struct_name, $num),)? - offset: { - $crate::assert_field_type!($struct_name, $field_name, $type $(, num = $num)?); - ::std::mem::offset_of!($struct_name, $field_name) - }, - size: ::core::mem::size_of::<$type>(), - flags: $crate::bindings::VMStateFlags::VMS_STRUCT, - vmsd: $vmsd, - $(field_exists: $crate::vmstate_exist_fn!($struct_name, $test_fn),)? - ..$crate::zeroable::Zeroable::ZERO - } $(.with_varray_flag_unchecked( - $crate::call_func_with_field!( - $crate::vmstate::vmstate_varray_flag, - $struct_name, - $num - ) - ) - $(.with_varray_multiply($factor))?)? - }; -} - -#[doc(alias = "VMSTATE_CLOCK")] -#[macro_export] -macro_rules! vmstate_clock { - ($struct_name:ty, $field_name:ident $([0 .. $num:ident $(* $factor:expr)?])?) => {{ - $crate::bindings::VMStateField { - name: ::core::concat!(::core::stringify!($field_name), "\0") - .as_bytes() - .as_ptr() as *const ::std::os::raw::c_char, - offset: { - $crate::assert_field_type!( - $struct_name, - $field_name, - $crate::qom::Owned<$crate::qdev::Clock> $(, num = $num)? - ); - ::std::mem::offset_of!($struct_name, $field_name) - }, - size: ::core::mem::size_of::<*const $crate::qdev::Clock>(), - flags: $crate::bindings::VMStateFlags( - $crate::bindings::VMStateFlags::VMS_STRUCT.0 - | $crate::bindings::VMStateFlags::VMS_POINTER.0, - ), - vmsd: unsafe { ::core::ptr::addr_of!($crate::bindings::vmstate_clock) }, - ..$crate::zeroable::Zeroable::ZERO - } $(.with_varray_flag_unchecked( - $crate::call_func_with_field!( - $crate::vmstate::vmstate_varray_flag, - $struct_name, - $num - ) - ) - $(.with_varray_multiply($factor))?)? - }}; +macro_rules! vmstate_fields_ref { + ($($field:expr),*$(,)*) => { + &[ + $($field),*, + $crate::bindings::VMStateField { + flags: $crate::bindings::VMStateFlags::VMS_END, + ..::common::zeroable::Zeroable::ZERO + } + ] + } } /// Helper macro to declare a list of @@ -544,14 +454,10 @@ macro_rules! vmstate_clock { #[macro_export] macro_rules! vmstate_fields { ($($field:expr),*$(,)*) => {{ - static _FIELDS: &[$crate::bindings::VMStateField] = &[ + static _FIELDS: &[$crate::bindings::VMStateField] = $crate::vmstate_fields_ref!( $($field),*, - $crate::bindings::VMStateField { - flags: $crate::bindings::VMStateFlags::VMS_END, - ..$crate::zeroable::Zeroable::ZERO - } - ]; - _FIELDS.as_ptr() + ); + _FIELDS }} } @@ -567,38 +473,271 @@ macro_rules! vmstate_validate { | $crate::bindings::VMStateFlags::VMS_ARRAY.0, ), num: 0, // 0 elements: no data, only run test_fn callback - ..$crate::zeroable::Zeroable::ZERO + ..::common::zeroable::Zeroable::ZERO } }; } -/// A transparent wrapper type for the `subsections` field of -/// [`VMStateDescription`]. +/// Helper macro to allow using a struct in [`vmstate_of!`] /// -/// This is necessary to be able to declare subsection descriptions as statics, -/// because the only way to implement `Sync` for a foreign type (and `*const` -/// pointers are foreign types in Rust) is to create a wrapper struct and -/// `unsafe impl Sync` for it. +/// # Safety /// -/// This struct is used in the -/// [`vm_state_subsections`](crate::vmstate_subsections) macro implementation. -#[repr(transparent)] -pub struct VMStateSubsectionsWrapper(pub &'static [*const crate::bindings::VMStateDescription]); +/// The [`VMStateDescription`] constant `$vmsd` must be an accurate +/// description of the struct. +#[macro_export] +macro_rules! impl_vmstate_struct { + ($type:ty, $vmsd:expr) => { + unsafe impl $crate::vmstate::VMState for $type { + const BASE: $crate::bindings::VMStateField = { + static VMSD: &$crate::bindings::VMStateDescription = $vmsd.as_ref(); + + $crate::bindings::VMStateField { + vmsd: ::core::ptr::addr_of!(*VMSD), + size: ::core::mem::size_of::<$type>(), + flags: $crate::bindings::VMStateFlags::VMS_STRUCT, + ..common::Zeroable::ZERO + } + }; + } + }; +} -unsafe impl Sync for VMStateSubsectionsWrapper {} +/// The type returned by [`vmstate_subsections!`](crate::vmstate_subsections). +pub type VMStateSubsections = &'static [Option<&'static crate::bindings::VMStateDescription>]; /// Helper macro to declare a list of subsections ([`VMStateDescription`]) /// into a static and return a pointer to the array of pointers it created. #[macro_export] macro_rules! vmstate_subsections { ($($subsection:expr),*$(,)*) => {{ - static _SUBSECTIONS: $crate::vmstate::VMStateSubsectionsWrapper = $crate::vmstate::VMStateSubsectionsWrapper(&[ + static _SUBSECTIONS: $crate::vmstate::VMStateSubsections = &[ $({ - static _SUBSECTION: $crate::bindings::VMStateDescription = $subsection; - ::core::ptr::addr_of!(_SUBSECTION) + static _SUBSECTION: $crate::bindings::VMStateDescription = $subsection.get(); + Some(&_SUBSECTION) }),*, - ::core::ptr::null() - ]); - _SUBSECTIONS.0.as_ptr() + None, + ]; + &_SUBSECTIONS }} } + +pub struct VMStateDescription(bindings::VMStateDescription, PhantomData); + +// SAFETY: When a *const T is passed to the callbacks, the call itself +// is done in a thread-safe manner. The invocation is okay as long as +// T itself is `Sync`. +unsafe impl Sync for VMStateDescription {} + +#[derive(Clone)] +pub struct VMStateDescriptionBuilder(bindings::VMStateDescription, PhantomData); + +#[derive(Debug)] +pub struct InvalidError; + +impl Error for InvalidError {} + +impl std::fmt::Display for InvalidError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "invalid migration data") + } +} + +impl From for Errno { + fn from(_value: InvalidError) -> Errno { + io::ErrorKind::InvalidInput.into() + } +} + +unsafe extern "C" fn vmstate_no_version_cb< + T, + F: for<'a> FnCall<(&'a T,), Result<(), impl Into>>, +>( + opaque: *mut c_void, +) -> c_int { + // SAFETY: the function is used in T's implementation of VMState + let result = F::call((unsafe { &*(opaque.cast::()) },)); + into_neg_errno(result) +} + +unsafe extern "C" fn vmstate_post_load_cb< + T, + F: for<'a> FnCall<(&'a T, u8), Result<(), impl Into>>, +>( + opaque: *mut c_void, + version_id: c_int, +) -> c_int { + // SAFETY: the function is used in T's implementation of VMState + let owner: &T = unsafe { &*(opaque.cast::()) }; + let version: u8 = version_id.try_into().unwrap(); + let result = F::call((owner, version)); + into_neg_errno(result) +} + +unsafe extern "C" fn vmstate_needed_cb FnCall<(&'a T,), bool>>( + opaque: *mut c_void, +) -> bool { + // SAFETY: the function is used in T's implementation of VMState + F::call((unsafe { &*(opaque.cast::()) },)) +} + +unsafe extern "C" fn vmstate_dev_unplug_pending_cb FnCall<(&'a T,), bool>>( + opaque: *mut c_void, +) -> bool { + // SAFETY: the function is used in T's implementation of VMState + F::call((unsafe { &*(opaque.cast::()) },)) +} + +impl VMStateDescriptionBuilder { + #[must_use] + pub const fn name(mut self, name_str: &CStr) -> Self { + self.0.name = ::std::ffi::CStr::as_ptr(name_str); + self + } + + #[must_use] + pub const fn unmigratable(mut self) -> Self { + self.0.unmigratable = true; + self + } + + #[must_use] + pub const fn early_setup(mut self) -> Self { + self.0.early_setup = true; + self + } + + #[must_use] + pub const fn version_id(mut self, version: u8) -> Self { + self.0.version_id = version as c_int; + self + } + + #[must_use] + pub const fn minimum_version_id(mut self, min_version: u8) -> Self { + self.0.minimum_version_id = min_version as c_int; + self + } + + #[must_use] + pub const fn priority(mut self, priority: MigrationPriority) -> Self { + self.0.priority = priority; + self + } + + #[must_use] + pub const fn pre_load FnCall<(&'a T,), Result<(), impl Into>>>( + mut self, + _f: &F, + ) -> Self { + self.0.pre_load = if F::IS_SOME { + Some(vmstate_no_version_cb::) + } else { + None + }; + self + } + + #[must_use] + pub const fn post_load FnCall<(&'a T, u8), Result<(), impl Into>>>( + mut self, + _f: &F, + ) -> Self { + self.0.post_load = if F::IS_SOME { + Some(vmstate_post_load_cb::) + } else { + None + }; + self + } + + #[must_use] + pub const fn pre_save FnCall<(&'a T,), Result<(), impl Into>>>( + mut self, + _f: &F, + ) -> Self { + self.0.pre_save = if F::IS_SOME { + Some(vmstate_no_version_cb::) + } else { + None + }; + self + } + + #[must_use] + pub const fn post_save FnCall<(&'a T,), Result<(), impl Into>>>( + mut self, + _f: &F, + ) -> Self { + self.0.post_save = if F::IS_SOME { + Some(vmstate_no_version_cb::) + } else { + None + }; + self + } + + #[must_use] + pub const fn needed FnCall<(&'a T,), bool>>(mut self, _f: &F) -> Self { + self.0.needed = if F::IS_SOME { + Some(vmstate_needed_cb::) + } else { + None + }; + self + } + + #[must_use] + pub const fn unplug_pending FnCall<(&'a T,), bool>>(mut self, _f: &F) -> Self { + self.0.dev_unplug_pending = if F::IS_SOME { + Some(vmstate_dev_unplug_pending_cb::) + } else { + None + }; + self + } + + #[must_use] + pub const fn fields(mut self, fields: &'static [VMStateField]) -> Self { + if fields[fields.len() - 1].flags.0 != VMStateFlags::VMS_END.0 { + panic!("fields are not terminated, use vmstate_fields!"); + } + self.0.fields = fields.as_ptr(); + self + } + + #[must_use] + pub const fn subsections(mut self, subs: &'static VMStateSubsections) -> Self { + if subs[subs.len() - 1].is_some() { + panic!("subsections are not terminated, use vmstate_subsections!"); + } + let subs: *const Option<&bindings::VMStateDescription> = subs.as_ptr(); + self.0.subsections = subs.cast::<*const bindings::VMStateDescription>(); + self + } + + #[must_use] + pub const fn build(self) -> VMStateDescription { + VMStateDescription::(self.0, PhantomData) + } + + #[must_use] + pub const fn new() -> Self { + Self(bindings::VMStateDescription::ZERO, PhantomData) + } +} + +impl Default for VMStateDescriptionBuilder { + fn default() -> Self { + Self::new() + } +} + +impl VMStateDescription { + pub const fn get(&self) -> bindings::VMStateDescription { + self.0 + } + + pub const fn as_ref(&self) -> &bindings::VMStateDescription { + &self.0 + } +} diff --git a/rust/qemu-api/wrapper.h b/rust/migration/wrapper.h similarity index 77% rename from rust/qemu-api/wrapper.h rename to rust/migration/wrapper.h index 15a1b19847f2f..daf316aed411b 100644 --- a/rust/qemu-api/wrapper.h +++ b/rust/migration/wrapper.h @@ -48,24 +48,4 @@ typedef enum memory_order { #endif /* __CLANG_STDATOMIC_H */ #include "qemu/osdep.h" -#include "qemu/log.h" -#include "qemu/log-for-trace.h" -#include "qemu/module.h" -#include "qemu-io.h" -#include "system/system.h" -#include "hw/sysbus.h" -#include "system/memory.h" -#include "chardev/char-fe.h" -#include "hw/clock.h" -#include "hw/qdev-clock.h" -#include "hw/qdev-properties.h" -#include "hw/qdev-properties-system.h" -#include "hw/irq.h" -#include "qapi/error.h" -#include "qapi/error-internal.h" #include "migration/vmstate.h" -#include "chardev/char-serial.h" -#include "exec/memattrs.h" -#include "qemu/timer.h" -#include "system/address-spaces.h" -#include "hw/char/pl011.h" diff --git a/rust/qemu-api-macros/src/lib.rs b/rust/qemu-api-macros/src/lib.rs deleted file mode 100644 index b525d89c09e49..0000000000000 --- a/rust/qemu-api-macros/src/lib.rs +++ /dev/null @@ -1,265 +0,0 @@ -// Copyright 2024, Linaro Limited -// Author(s): Manos Pitsidianakis -// SPDX-License-Identifier: GPL-2.0-or-later - -use proc_macro::TokenStream; -use quote::quote; -use syn::{ - parse_macro_input, parse_quote, punctuated::Punctuated, spanned::Spanned, token::Comma, Data, - DeriveInput, Error, Field, Fields, FieldsUnnamed, Ident, Meta, Path, Token, Variant, -}; -mod bits; -use bits::BitsConstInternal; - -#[cfg(test)] -mod tests; - -fn get_fields<'a>( - input: &'a DeriveInput, - msg: &str, -) -> Result<&'a Punctuated, Error> { - let Data::Struct(ref s) = &input.data else { - return Err(Error::new( - input.ident.span(), - format!("Struct required for {msg}"), - )); - }; - let Fields::Named(ref fs) = &s.fields else { - return Err(Error::new( - input.ident.span(), - format!("Named fields required for {msg}"), - )); - }; - Ok(&fs.named) -} - -fn get_unnamed_field<'a>(input: &'a DeriveInput, msg: &str) -> Result<&'a Field, Error> { - let Data::Struct(ref s) = &input.data else { - return Err(Error::new( - input.ident.span(), - format!("Struct required for {msg}"), - )); - }; - let Fields::Unnamed(FieldsUnnamed { ref unnamed, .. }) = &s.fields else { - return Err(Error::new( - s.fields.span(), - format!("Tuple struct required for {msg}"), - )); - }; - if unnamed.len() != 1 { - return Err(Error::new( - s.fields.span(), - format!("A single field is required for {msg}"), - )); - } - Ok(&unnamed[0]) -} - -fn is_c_repr(input: &DeriveInput, msg: &str) -> Result<(), Error> { - let expected = parse_quote! { #[repr(C)] }; - - if input.attrs.iter().any(|attr| attr == &expected) { - Ok(()) - } else { - Err(Error::new( - input.ident.span(), - format!("#[repr(C)] required for {msg}"), - )) - } -} - -fn is_transparent_repr(input: &DeriveInput, msg: &str) -> Result<(), Error> { - let expected = parse_quote! { #[repr(transparent)] }; - - if input.attrs.iter().any(|attr| attr == &expected) { - Ok(()) - } else { - Err(Error::new( - input.ident.span(), - format!("#[repr(transparent)] required for {msg}"), - )) - } -} - -fn derive_object_or_error(input: DeriveInput) -> Result { - is_c_repr(&input, "#[derive(Object)]")?; - - let name = &input.ident; - let parent = &get_fields(&input, "#[derive(Object)]")?[0].ident; - - Ok(quote! { - ::qemu_api::assert_field_type!(#name, #parent, - ::qemu_api::qom::ParentField<<#name as ::qemu_api::qom::ObjectImpl>::ParentType>); - - ::qemu_api::module_init! { - MODULE_INIT_QOM => unsafe { - ::qemu_api::bindings::type_register_static(&<#name as ::qemu_api::qom::ObjectImpl>::TYPE_INFO); - } - } - }) -} - -#[proc_macro_derive(Object)] -pub fn derive_object(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - - derive_object_or_error(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} - -fn derive_opaque_or_error(input: DeriveInput) -> Result { - is_transparent_repr(&input, "#[derive(Wrapper)]")?; - - let name = &input.ident; - let field = &get_unnamed_field(&input, "#[derive(Wrapper)]")?; - let typ = &field.ty; - - // TODO: how to add "::qemu_api"? For now, this is only used in the - // qemu_api crate so it's not a problem. - Ok(quote! { - unsafe impl crate::cell::Wrapper for #name { - type Wrapped = <#typ as crate::cell::Wrapper>::Wrapped; - } - impl #name { - pub unsafe fn from_raw<'a>(ptr: *mut ::Wrapped) -> &'a Self { - let ptr = ::std::ptr::NonNull::new(ptr).unwrap().cast::(); - unsafe { ptr.as_ref() } - } - - pub const fn as_mut_ptr(&self) -> *mut ::Wrapped { - self.0.as_mut_ptr() - } - - pub const fn as_ptr(&self) -> *const ::Wrapped { - self.0.as_ptr() - } - - pub const fn as_void_ptr(&self) -> *mut ::core::ffi::c_void { - self.0.as_void_ptr() - } - - pub const fn raw_get(slot: *mut Self) -> *mut ::Wrapped { - slot.cast() - } - } - }) -} - -#[proc_macro_derive(Wrapper)] -pub fn derive_opaque(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - - derive_opaque_or_error(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} - -#[allow(non_snake_case)] -fn get_repr_uN(input: &DeriveInput, msg: &str) -> Result { - let repr = input.attrs.iter().find(|attr| attr.path().is_ident("repr")); - if let Some(repr) = repr { - let nested = repr.parse_args_with(Punctuated::::parse_terminated)?; - for meta in nested { - match meta { - Meta::Path(path) if path.is_ident("u8") => return Ok(path), - Meta::Path(path) if path.is_ident("u16") => return Ok(path), - Meta::Path(path) if path.is_ident("u32") => return Ok(path), - Meta::Path(path) if path.is_ident("u64") => return Ok(path), - _ => {} - } - } - } - - Err(Error::new( - input.ident.span(), - format!("#[repr(u8/u16/u32/u64) required for {msg}"), - )) -} - -fn get_variants(input: &DeriveInput) -> Result<&Punctuated, Error> { - let Data::Enum(ref e) = &input.data else { - return Err(Error::new( - input.ident.span(), - "Cannot derive TryInto for union or struct.", - )); - }; - if let Some(v) = e.variants.iter().find(|v| v.fields != Fields::Unit) { - return Err(Error::new( - v.fields.span(), - "Cannot derive TryInto for enum with non-unit variants.", - )); - } - Ok(&e.variants) -} - -#[rustfmt::skip::macros(quote)] -fn derive_tryinto_body( - name: &Ident, - variants: &Punctuated, - repr: &Path, -) -> Result { - let discriminants: Vec<&Ident> = variants.iter().map(|f| &f.ident).collect(); - - Ok(quote! { - #(const #discriminants: #repr = #name::#discriminants as #repr;)* - match value { - #(#discriminants => core::result::Result::Ok(#name::#discriminants),)* - _ => core::result::Result::Err(value), - } - }) -} - -#[rustfmt::skip::macros(quote)] -fn derive_tryinto_or_error(input: DeriveInput) -> Result { - let repr = get_repr_uN(&input, "#[derive(TryInto)]")?; - let name = &input.ident; - let body = derive_tryinto_body(name, get_variants(&input)?, &repr)?; - let errmsg = format!("invalid value for {name}"); - - Ok(quote! { - impl #name { - #[allow(dead_code)] - pub const fn into_bits(self) -> #repr { - self as #repr - } - - #[allow(dead_code)] - pub const fn from_bits(value: #repr) -> Self { - match ({ - #body - }) { - Ok(x) => x, - Err(_) => panic!(#errmsg), - } - } - } - impl core::convert::TryFrom<#repr> for #name { - type Error = #repr; - - #[allow(ambiguous_associated_items)] - fn try_from(value: #repr) -> Result { - #body - } - } - }) -} - -#[proc_macro_derive(TryInto)] -pub fn derive_tryinto(input: TokenStream) -> TokenStream { - let input = parse_macro_input!(input as DeriveInput); - - derive_tryinto_or_error(input) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} - -#[proc_macro] -pub fn bits_const_internal(ts: TokenStream) -> TokenStream { - let ts = proc_macro2::TokenStream::from(ts); - let mut it = ts.into_iter(); - - BitsConstInternal::parse(&mut it) - .unwrap_or_else(syn::Error::into_compile_error) - .into() -} diff --git a/rust/qemu-api-macros/src/tests.rs b/rust/qemu-api-macros/src/tests.rs deleted file mode 100644 index d6dcd62fcf680..0000000000000 --- a/rust/qemu-api-macros/src/tests.rs +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2025, Linaro Limited -// Author(s): Manos Pitsidianakis -// SPDX-License-Identifier: GPL-2.0-or-later - -use quote::quote; - -use super::*; - -macro_rules! derive_compile_fail { - ($derive_fn:ident, $input:expr, $error_msg:expr) => {{ - let input: proc_macro2::TokenStream = $input; - let error_msg: &str = $error_msg; - let derive_fn: fn(input: syn::DeriveInput) -> Result = - $derive_fn; - - let input: syn::DeriveInput = syn::parse2(input).unwrap(); - let result = derive_fn(input); - let err = result.unwrap_err().into_compile_error(); - assert_eq!( - err.to_string(), - quote! { ::core::compile_error! { #error_msg } }.to_string() - ); - }}; -} - -macro_rules! derive_compile { - ($derive_fn:ident, $input:expr, $($expected:tt)*) => {{ - let input: proc_macro2::TokenStream = $input; - let expected: proc_macro2::TokenStream = $($expected)*; - let derive_fn: fn(input: syn::DeriveInput) -> Result = - $derive_fn; - - let input: syn::DeriveInput = syn::parse2(input).unwrap(); - let result = derive_fn(input).unwrap(); - assert_eq!(result.to_string(), expected.to_string()); - }}; -} - -#[test] -fn test_derive_object() { - derive_compile_fail!( - derive_object_or_error, - quote! { - #[derive(Object)] - struct Foo { - _unused: [u8; 0], - } - }, - "#[repr(C)] required for #[derive(Object)]" - ); - derive_compile!( - derive_object_or_error, - quote! { - #[derive(Object)] - #[repr(C)] - struct Foo { - _unused: [u8; 0], - } - }, - quote! { - ::qemu_api::assert_field_type!( - Foo, - _unused, - ::qemu_api::qom::ParentField<::ParentType> - ); - ::qemu_api::module_init! { - MODULE_INIT_QOM => unsafe { - ::qemu_api::bindings::type_register_static(&::TYPE_INFO); - } - } - } - ); -} - -#[test] -fn test_derive_tryinto() { - derive_compile_fail!( - derive_tryinto_or_error, - quote! { - #[derive(TryInto)] - struct Foo { - _unused: [u8; 0], - } - }, - "#[repr(u8/u16/u32/u64) required for #[derive(TryInto)]" - ); - derive_compile!( - derive_tryinto_or_error, - quote! { - #[derive(TryInto)] - #[repr(u8)] - enum Foo { - First = 0, - Second, - } - }, - quote! { - impl Foo { - #[allow(dead_code)] - pub const fn into_bits(self) -> u8 { - self as u8 - } - - #[allow(dead_code)] - pub const fn from_bits(value: u8) -> Self { - match ({ - const First: u8 = Foo::First as u8; - const Second: u8 = Foo::Second as u8; - match value { - First => core::result::Result::Ok(Foo::First), - Second => core::result::Result::Ok(Foo::Second), - _ => core::result::Result::Err(value), - } - }) { - Ok(x) => x, - Err(_) => panic!("invalid value for Foo"), - } - } - } - - impl core::convert::TryFrom for Foo { - type Error = u8; - - #[allow(ambiguous_associated_items)] - fn try_from(value: u8) -> Result { - const First: u8 = Foo::First as u8; - const Second: u8 = Foo::Second as u8; - match value { - First => core::result::Result::Ok(Foo::First), - Second => core::result::Result::Ok(Foo::Second), - _ => core::result::Result::Err(value), - } - } - } - } - ); -} diff --git a/rust/qemu-api/.gitignore b/rust/qemu-api/.gitignore deleted file mode 100644 index df6c2163e030a..0000000000000 --- a/rust/qemu-api/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Ignore generated bindings file overrides. -/src/bindings.inc.rs diff --git a/rust/qemu-api/README.md b/rust/qemu-api/README.md deleted file mode 100644 index ed1b7ab263d72..0000000000000 --- a/rust/qemu-api/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# QEMU bindings and API wrappers - -This library exports helper Rust types, Rust macros and C FFI bindings for internal QEMU APIs. - -The C bindings can be generated with `bindgen`, using this build target: - -```console -$ make bindings.inc.rs -``` - -## Generate Rust documentation - -Common Cargo tasks can be performed from the QEMU build directory - -```console -$ make clippy -$ make rustfmt -$ make rustdoc -``` diff --git a/rust/qemu-api/meson.build b/rust/qemu-api/meson.build deleted file mode 100644 index a090297c458b1..0000000000000 --- a/rust/qemu-api/meson.build +++ /dev/null @@ -1,114 +0,0 @@ -_qemu_api_cfg = run_command(rustc_args, - '--config-headers', config_host_h, '--features', files('Cargo.toml'), - capture: true, check: true).stdout().strip().splitlines() - -# _qemu_api_cfg += ['--cfg', 'feature="allocator"'] -if get_option('debug_mutex') - _qemu_api_cfg += ['--cfg', 'feature="debug_cell"'] -endif - -c_enums = [ - 'DeviceCategory', - 'GpioPolarity', - 'MachineInitPhase', - 'MemoryDeviceInfoKind', - 'MigrationPolicy', - 'MigrationPriority', - 'QEMUChrEvent', - 'QEMUClockType', - 'ResetType', - 'device_endian', - 'module_init_type', -] -_qemu_api_bindgen_args = [] -foreach enum : c_enums - _qemu_api_bindgen_args += ['--rustified-enum', enum] -endforeach -c_bitfields = [ - 'ClockEvent', - 'VMStateFlags', -] -foreach enum : c_bitfields - _qemu_api_bindgen_args += ['--bitfield-enum', enum] -endforeach - -# TODO: Remove this comment when the clang/libclang mismatch issue is solved. -# -# Rust bindings generation with `bindgen` might fail in some cases where the -# detected `libclang` does not match the expected `clang` version/target. In -# this case you must pass the path to `clang` and `libclang` to your build -# command invocation using the environment variables CLANG_PATH and -# LIBCLANG_PATH -_qemu_api_bindings_inc_rs = rust.bindgen( - input: 'wrapper.h', - dependencies: common_ss.all_dependencies(), - output: 'bindings.inc.rs', - include_directories: bindings_incdir, - bindgen_version: ['>=0.60.0'], - args: bindgen_args_common + _qemu_api_bindgen_args, - ) - -_qemu_api_rs = static_library( - 'qemu_api', - structured_sources( - [ - 'src/lib.rs', - 'src/assertions.rs', - 'src/bindings.rs', - 'src/bitops.rs', - 'src/callbacks.rs', - 'src/cell.rs', - 'src/chardev.rs', - 'src/errno.rs', - 'src/error.rs', - 'src/irq.rs', - 'src/log.rs', - 'src/memory.rs', - 'src/module.rs', - 'src/prelude.rs', - 'src/qdev.rs', - 'src/qom.rs', - 'src/sysbus.rs', - 'src/timer.rs', - 'src/uninit.rs', - 'src/vmstate.rs', - 'src/zeroable.rs', - ], - {'.' : _qemu_api_bindings_inc_rs}, - ), - override_options: ['rust_std=2021', 'build.rust_std=2021'], - rust_abi: 'rust', - rust_args: _qemu_api_cfg, - dependencies: [anyhow_rs, foreign_rs, libc_rs, qemu_api_macros, qemuutil_rs, - qom, hwcore, chardev, migration], -) - -rust.test('rust-qemu-api-tests', _qemu_api_rs, - suite: ['unit', 'rust']) - -qemu_api = declare_dependency(link_with: [_qemu_api_rs], - dependencies: [qemu_api_macros, qom, hwcore, chardev, migration]) - -# Doctests are essentially integration tests, so they need the same dependencies. -# Note that running them requires the object files for C code, so place them -# in a separate suite that is run by the "build" CI jobs rather than "check". -rust.doctest('rust-qemu-api-doctests', - _qemu_api_rs, - protocol: 'rust', - dependencies: qemu_api, - suite: ['doc', 'rust']) - -test('rust-qemu-api-integration', - executable( - 'rust-qemu-api-integration', - files('tests/tests.rs', 'tests/vmstate_tests.rs'), - override_options: ['rust_std=2021', 'build.rust_std=2021'], - rust_args: ['--test'], - install: false, - dependencies: [qemu_api]), - args: [ - '--test', '--test-threads', '1', - '--format', 'pretty', - ], - protocol: 'rust', - suite: ['unit', 'rust']) diff --git a/rust/qemu-api/src/lib.rs b/rust/qemu-api/src/lib.rs deleted file mode 100644 index 86dcd8ef17a9e..0000000000000 --- a/rust/qemu-api/src/lib.rs +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2024, Linaro Limited -// Author(s): Manos Pitsidianakis -// SPDX-License-Identifier: GPL-2.0-or-later - -#![cfg_attr(not(MESON), doc = include_str!("../README.md"))] -#![deny(clippy::missing_const_for_fn)] - -#[rustfmt::skip] -pub mod bindings; - -// preserve one-item-per-"use" syntax, it is clearer -// for prelude-like modules -#[rustfmt::skip] -pub mod prelude; - -pub mod assertions; -pub mod bitops; -pub mod callbacks; -pub mod cell; -pub mod chardev; -pub mod errno; -pub mod error; -pub mod irq; -pub mod log; -pub mod memory; -pub mod module; -pub mod qdev; -pub mod qom; -pub mod sysbus; -pub mod timer; -pub mod uninit; -pub mod vmstate; -pub mod zeroable; - -use std::{ - alloc::{GlobalAlloc, Layout}, - ffi::c_void, -}; - -pub use error::{Error, Result}; - -#[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)] -extern "C" { - fn g_aligned_alloc0( - n_blocks: bindings::gsize, - n_block_bytes: bindings::gsize, - alignment: bindings::gsize, - ) -> bindings::gpointer; - fn g_aligned_free(mem: bindings::gpointer); -} - -#[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))] -extern "C" { - fn qemu_memalign(alignment: usize, size: usize) -> *mut c_void; - fn qemu_vfree(ptr: *mut c_void); -} - -extern "C" { - fn g_malloc0(n_bytes: bindings::gsize) -> bindings::gpointer; - fn g_free(mem: bindings::gpointer); -} - -/// An allocator that uses the same allocator as QEMU in C. -/// -/// It is enabled by default with the `allocator` feature. -/// -/// To set it up manually as a global allocator in your crate: -/// -/// ```ignore -/// use qemu_api::QemuAllocator; -/// -/// #[global_allocator] -/// static GLOBAL: QemuAllocator = QemuAllocator::new(); -/// ``` -#[derive(Clone, Copy, Debug)] -#[repr(C)] -pub struct QemuAllocator { - _unused: [u8; 0], -} - -#[cfg_attr(all(feature = "allocator", not(test)), global_allocator)] -pub static GLOBAL: QemuAllocator = QemuAllocator::new(); - -impl QemuAllocator { - // From the glibc documentation, on GNU systems, malloc guarantees 16-byte - // alignment on 64-bit systems and 8-byte alignment on 32-bit systems. See - // https://www.gnu.org/software/libc/manual/html_node/Malloc-Examples.html. - // This alignment guarantee also applies to Windows and Android. On Darwin - // and OpenBSD, the alignment is 16 bytes on both 64-bit and 32-bit systems. - #[cfg(all( - target_pointer_width = "32", - not(any(target_os = "macos", target_os = "openbsd")) - ))] - pub const DEFAULT_ALIGNMENT_BYTES: Option = Some(8); - #[cfg(all( - target_pointer_width = "64", - not(any(target_os = "macos", target_os = "openbsd")) - ))] - pub const DEFAULT_ALIGNMENT_BYTES: Option = Some(16); - #[cfg(all( - any(target_pointer_width = "32", target_pointer_width = "64"), - any(target_os = "macos", target_os = "openbsd") - ))] - pub const DEFAULT_ALIGNMENT_BYTES: Option = Some(16); - #[cfg(not(any(target_pointer_width = "32", target_pointer_width = "64")))] - pub const DEFAULT_ALIGNMENT_BYTES: Option = None; - - pub const fn new() -> Self { - Self { _unused: [] } - } -} - -impl Default for QemuAllocator { - fn default() -> Self { - Self::new() - } -} - -// Sanity check. -const _: [(); 8] = [(); ::core::mem::size_of::<*mut c_void>()]; - -unsafe impl GlobalAlloc for QemuAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - if matches!(Self::DEFAULT_ALIGNMENT_BYTES, Some(default) if default.checked_rem(layout.align()) == Some(0)) - { - // SAFETY: g_malloc0() is safe to call. - unsafe { g_malloc0(layout.size().try_into().unwrap()).cast::() } - } else { - #[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)] - { - // SAFETY: g_aligned_alloc0() is safe to call. - unsafe { - g_aligned_alloc0( - layout.size().try_into().unwrap(), - 1, - layout.align().try_into().unwrap(), - ) - .cast::() - } - } - #[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))] - { - // SAFETY: qemu_memalign() is safe to call. - unsafe { qemu_memalign(layout.align(), layout.size()).cast::() } - } - } - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - if matches!(Self::DEFAULT_ALIGNMENT_BYTES, Some(default) if default.checked_rem(layout.align()) == Some(0)) - { - // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid - // glib-allocated pointer, so `g_free`ing is safe. - unsafe { g_free(ptr.cast::<_>()) } - } else { - #[cfg(HAVE_GLIB_WITH_ALIGNED_ALLOC)] - { - // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid aligned - // glib-allocated pointer, so `g_aligned_free`ing is safe. - unsafe { g_aligned_free(ptr.cast::<_>()) } - } - #[cfg(not(HAVE_GLIB_WITH_ALIGNED_ALLOC))] - { - // SAFETY: `ptr` must have been allocated by Self::alloc thus a valid aligned - // glib-allocated pointer, so `qemu_vfree`ing is safe. - unsafe { qemu_vfree(ptr.cast::<_>()) } - } - } - } -} diff --git a/rust/qemu-api/src/prelude.rs b/rust/qemu-api/src/prelude.rs deleted file mode 100644 index 8f9e23ee2c5f1..0000000000000 --- a/rust/qemu-api/src/prelude.rs +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2024 Red Hat, Inc. -// Author(s): Paolo Bonzini -// SPDX-License-Identifier: GPL-2.0-or-later - -//! Commonly used traits and types for QEMU. - -pub use crate::bitops::IntegerExt; - -pub use crate::cell::BqlCell; -pub use crate::cell::BqlRefCell; - -pub use crate::errno; - -pub use crate::log_mask_ln; - -pub use crate::qdev::DeviceMethods; - -pub use crate::qom::InterfaceType; -pub use crate::qom::IsA; -pub use crate::qom::Object; -pub use crate::qom::ObjectCast; -pub use crate::qom::ObjectDeref; -pub use crate::qom::ObjectClassMethods; -pub use crate::qom::ObjectMethods; -pub use crate::qom::ObjectType; - -pub use crate::qom_isa; - -pub use crate::sysbus::SysBusDeviceMethods; - -pub use crate::vmstate::VMState; diff --git a/rust/qemu-api/src/zeroable.rs b/rust/qemu-api/src/zeroable.rs deleted file mode 100644 index d8239d08563d1..0000000000000 --- a/rust/qemu-api/src/zeroable.rs +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -//! Defines a trait for structs that can be safely initialized with zero bytes. - -/// Encapsulates the requirement that -/// `MaybeUninit::::zeroed().assume_init()` does not cause undefined -/// behavior. -/// -/// # Safety -/// -/// Do not add this trait to a type unless all-zeroes is a valid value for the -/// type. In particular, raw pointers can be zero, but references and -/// `NonNull` cannot. -pub unsafe trait Zeroable: Default { - /// Return a value of Self whose memory representation consists of all - /// zeroes, with the possible exclusion of padding bytes. - const ZERO: Self = unsafe { ::core::mem::MaybeUninit::::zeroed().assume_init() }; -} - -// bindgen does not derive Default here -#[allow(clippy::derivable_impls)] -impl Default for crate::bindings::VMStateFlags { - fn default() -> Self { - Self(0) - } -} - -unsafe impl Zeroable for crate::bindings::Property__bindgen_ty_1 {} -unsafe impl Zeroable for crate::bindings::Property {} -unsafe impl Zeroable for crate::bindings::VMStateFlags {} -unsafe impl Zeroable for crate::bindings::VMStateField {} -unsafe impl Zeroable for crate::bindings::VMStateDescription {} -unsafe impl Zeroable for crate::bindings::MemoryRegionOps__bindgen_ty_1 {} -unsafe impl Zeroable for crate::bindings::MemoryRegionOps__bindgen_ty_2 {} -unsafe impl Zeroable for crate::bindings::MemoryRegionOps {} -unsafe impl Zeroable for crate::bindings::MemTxAttrs {} -unsafe impl Zeroable for crate::bindings::CharBackend {} diff --git a/rust/qemu-api-macros/Cargo.toml b/rust/qemu-macros/Cargo.toml similarity index 92% rename from rust/qemu-api-macros/Cargo.toml rename to rust/qemu-macros/Cargo.toml index 0cd40c8e168a3..c25b6c0b0da97 100644 --- a/rust/qemu-api-macros/Cargo.toml +++ b/rust/qemu-macros/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "qemu_api_macros" +name = "qemu_macros" version = "0.1.0" authors = ["Manos Pitsidianakis "] description = "Rust bindings for QEMU - Utility macros" @@ -16,6 +16,7 @@ rust-version.workspace = true proc-macro = true [dependencies] +attrs = "0.2.9" proc-macro2 = "1" quote = "1" syn = { version = "2", features = ["extra-traits"] } diff --git a/rust/qemu-api-macros/meson.build b/rust/qemu-macros/meson.build similarity index 63% rename from rust/qemu-api-macros/meson.build rename to rust/qemu-macros/meson.build index 2152bcb99b30e..0f27e0df925e9 100644 --- a/rust/qemu-api-macros/meson.build +++ b/rust/qemu-macros/meson.build @@ -1,5 +1,5 @@ -_qemu_api_macros_rs = rust.proc_macro( - 'qemu_api_macros', +_qemu_macros_rs = rust.proc_macro( + 'qemu_macros', files('src/lib.rs'), override_options: ['rust_std=2021', 'build.rust_std=2021'], rust_args: [ @@ -8,15 +8,16 @@ _qemu_api_macros_rs = rust.proc_macro( '--cfg', 'feature="proc-macro"', ], dependencies: [ + attrs_rs_native, proc_macro2_rs_native, quote_rs_native, syn_rs_native, ], ) -qemu_api_macros = declare_dependency( - link_with: _qemu_api_macros_rs, +qemu_macros = declare_dependency( + link_with: _qemu_macros_rs, ) -rust.test('rust-qemu-api-macros-tests', _qemu_api_macros_rs, +rust.test('rust-qemu-macros-tests', _qemu_macros_rs, suite: ['unit', 'rust']) diff --git a/rust/qemu-api-macros/src/bits.rs b/rust/qemu-macros/src/bits.rs similarity index 100% rename from rust/qemu-api-macros/src/bits.rs rename to rust/qemu-macros/src/bits.rs diff --git a/rust/qemu-macros/src/lib.rs b/rust/qemu-macros/src/lib.rs new file mode 100644 index 0000000000000..50239f228be2d --- /dev/null +++ b/rust/qemu-macros/src/lib.rs @@ -0,0 +1,502 @@ +// Copyright 2024, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +use proc_macro::TokenStream; +use quote::{quote, quote_spanned}; +use syn::{ + parse::{Parse, ParseStream}, + parse_macro_input, parse_quote, + punctuated::Punctuated, + spanned::Spanned, + token::Comma, + Attribute, Data, DeriveInput, Error, Field, Fields, FieldsUnnamed, Ident, Meta, Path, Token, + Variant, +}; + +mod bits; +use bits::BitsConstInternal; + +mod migration_state; +use migration_state::MigrationStateDerive; + +#[cfg(test)] +mod tests; + +fn get_fields<'a>( + input: &'a DeriveInput, + msg: &str, +) -> Result<&'a Punctuated, Error> { + let Data::Struct(ref s) = &input.data else { + return Err(Error::new( + input.ident.span(), + format!("Struct required for {msg}"), + )); + }; + let Fields::Named(ref fs) = &s.fields else { + return Err(Error::new( + input.ident.span(), + format!("Named fields required for {msg}"), + )); + }; + Ok(&fs.named) +} + +fn get_unnamed_field<'a>(input: &'a DeriveInput, msg: &str) -> Result<&'a Field, Error> { + let Data::Struct(ref s) = &input.data else { + return Err(Error::new( + input.ident.span(), + format!("Struct required for {msg}"), + )); + }; + let Fields::Unnamed(FieldsUnnamed { ref unnamed, .. }) = &s.fields else { + return Err(Error::new( + s.fields.span(), + format!("Tuple struct required for {msg}"), + )); + }; + if unnamed.len() != 1 { + return Err(Error::new( + s.fields.span(), + format!("A single field is required for {msg}"), + )); + } + Ok(&unnamed[0]) +} + +fn is_c_repr(input: &DeriveInput, msg: &str) -> Result<(), Error> { + let expected = parse_quote! { #[repr(C)] }; + + if input.attrs.iter().any(|attr| attr == &expected) { + Ok(()) + } else { + Err(Error::new( + input.ident.span(), + format!("#[repr(C)] required for {msg}"), + )) + } +} + +fn is_transparent_repr(input: &DeriveInput, msg: &str) -> Result<(), Error> { + let expected = parse_quote! { #[repr(transparent)] }; + + if input.attrs.iter().any(|attr| attr == &expected) { + Ok(()) + } else { + Err(Error::new( + input.ident.span(), + format!("#[repr(transparent)] required for {msg}"), + )) + } +} + +fn derive_object_or_error(input: DeriveInput) -> Result { + is_c_repr(&input, "#[derive(Object)]")?; + + let name = &input.ident; + let parent = &get_fields(&input, "#[derive(Object)]")? + .get(0) + .ok_or_else(|| { + Error::new( + input.ident.span(), + "#[derive(Object)] requires a parent field", + ) + })? + .ident; + + Ok(quote! { + ::common::assert_field_type!(#name, #parent, + ::qom::ParentField<<#name as ::qom::ObjectImpl>::ParentType>); + + ::util::module_init! { + MODULE_INIT_QOM => unsafe { + ::qom::type_register_static(&<#name as ::qom::ObjectImpl>::TYPE_INFO); + } + } + }) +} + +#[proc_macro_derive(Object)] +pub fn derive_object(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + derive_object_or_error(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +fn derive_opaque_or_error(input: DeriveInput) -> Result { + is_transparent_repr(&input, "#[derive(Wrapper)]")?; + + let name = &input.ident; + let field = &get_unnamed_field(&input, "#[derive(Wrapper)]")?; + let typ = &field.ty; + + Ok(quote! { + unsafe impl ::common::opaque::Wrapper for #name { + type Wrapped = <#typ as ::common::opaque::Wrapper>::Wrapped; + } + impl #name { + pub unsafe fn from_raw<'a>(ptr: *mut ::Wrapped) -> &'a Self { + let ptr = ::std::ptr::NonNull::new(ptr).unwrap().cast::(); + unsafe { ptr.as_ref() } + } + + pub const fn as_mut_ptr(&self) -> *mut ::Wrapped { + self.0.as_mut_ptr() + } + + pub const fn as_ptr(&self) -> *const ::Wrapped { + self.0.as_ptr() + } + + pub const fn as_void_ptr(&self) -> *mut ::core::ffi::c_void { + self.0.as_void_ptr() + } + + pub const fn raw_get(slot: *mut Self) -> *mut ::Wrapped { + slot.cast() + } + } + }) +} + +#[derive(Debug)] +enum DevicePropertyName { + CStr(syn::LitCStr), + Str(syn::LitStr), +} + +impl Parse for DevicePropertyName { + fn parse(input: ParseStream<'_>) -> syn::Result { + let lo = input.lookahead1(); + if lo.peek(syn::LitStr) { + Ok(Self::Str(input.parse()?)) + } else if lo.peek(syn::LitCStr) { + Ok(Self::CStr(input.parse()?)) + } else { + Err(lo.error()) + } + } +} + +#[derive(Default, Debug)] +struct DeviceProperty { + rename: Option, + bitnr: Option, + defval: Option, +} + +impl DeviceProperty { + fn parse_from(&mut self, a: &Attribute) -> syn::Result<()> { + use attrs::{set, with, Attrs}; + let mut parser = Attrs::new(); + parser.once("rename", with::eq(set::parse(&mut self.rename))); + parser.once("bit", with::eq(set::parse(&mut self.bitnr))); + parser.once("default", with::eq(set::parse(&mut self.defval))); + a.parse_args_with(&mut parser) + } + + fn parse(a: &Attribute) -> syn::Result { + let mut retval = Self::default(); + retval.parse_from(a)?; + Ok(retval) + } +} + +#[proc_macro_derive(Device, attributes(property))] +pub fn derive_device(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + derive_device_or_error(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +fn derive_device_or_error(input: DeriveInput) -> Result { + is_c_repr(&input, "#[derive(Device)]")?; + let properties: Vec<(syn::Field, DeviceProperty)> = get_fields(&input, "#[derive(Device)]")? + .iter() + .flat_map(|f| { + f.attrs + .iter() + .filter(|a| a.path().is_ident("property")) + .map(|a| Ok((f.clone(), DeviceProperty::parse(a)?))) + }) + .collect::, Error>>()?; + let name = &input.ident; + let mut properties_expanded = vec![]; + + for (field, prop) in properties { + let DeviceProperty { + rename, + bitnr, + defval, + } = prop; + let field_name = field.ident.unwrap(); + macro_rules! str_to_c_str { + ($value:expr, $span:expr) => {{ + let (value, span) = ($value, $span); + let cstr = std::ffi::CString::new(value.as_str()).map_err(|err| { + Error::new( + span, + format!( + "Property name `{value}` cannot be represented as a C string: {err}" + ), + ) + })?; + let cstr_lit = syn::LitCStr::new(&cstr, span); + Ok(quote! { #cstr_lit }) + }}; + } + + let prop_name = rename.map_or_else( + || str_to_c_str!(field_name.to_string(), field_name.span()), + |prop_rename| -> Result { + match prop_rename { + DevicePropertyName::CStr(cstr_lit) => Ok(quote! { #cstr_lit }), + DevicePropertyName::Str(str_lit) => { + str_to_c_str!(str_lit.value(), str_lit.span()) + } + } + }, + )?; + let field_ty = field.ty.clone(); + let qdev_prop = if bitnr.is_none() { + quote! { <#field_ty as ::hwcore::QDevProp>::BASE_INFO } + } else { + quote! { <#field_ty as ::hwcore::QDevProp>::BIT_INFO } + }; + let bitnr = bitnr.unwrap_or(syn::Expr::Verbatim(quote! { 0 })); + let set_default = defval.is_some(); + let defval = defval.unwrap_or(syn::Expr::Verbatim(quote! { 0 })); + properties_expanded.push(quote! { + ::hwcore::bindings::Property { + name: ::std::ffi::CStr::as_ptr(#prop_name), + info: #qdev_prop, + offset: ::core::mem::offset_of!(#name, #field_name) as isize, + bitnr: #bitnr, + set_default: #set_default, + defval: ::hwcore::bindings::Property__bindgen_ty_1 { u: #defval as u64 }, + ..::common::Zeroable::ZERO + } + }); + } + + Ok(quote_spanned! {input.span() => + unsafe impl ::hwcore::DevicePropertiesImpl for #name { + const PROPERTIES: &'static [::hwcore::bindings::Property] = &[ + #(#properties_expanded),* + ]; + } + }) +} + +#[proc_macro_derive(Wrapper)] +pub fn derive_opaque(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + derive_opaque_or_error(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +#[allow(non_snake_case)] +fn get_repr_uN(input: &DeriveInput, msg: &str) -> Result { + let repr = input.attrs.iter().find(|attr| attr.path().is_ident("repr")); + if let Some(repr) = repr { + let nested = repr.parse_args_with(Punctuated::::parse_terminated)?; + for meta in nested { + match meta { + Meta::Path(path) if path.is_ident("u8") => return Ok(path), + Meta::Path(path) if path.is_ident("u16") => return Ok(path), + Meta::Path(path) if path.is_ident("u32") => return Ok(path), + Meta::Path(path) if path.is_ident("u64") => return Ok(path), + _ => {} + } + } + } + + Err(Error::new( + input.ident.span(), + format!("#[repr(u8/u16/u32/u64) required for {msg}"), + )) +} + +fn get_variants(input: &DeriveInput) -> Result<&Punctuated, Error> { + let Data::Enum(ref e) = &input.data else { + return Err(Error::new( + input.ident.span(), + "Cannot derive TryInto for union or struct.", + )); + }; + if let Some(v) = e.variants.iter().find(|v| v.fields != Fields::Unit) { + return Err(Error::new( + v.fields.span(), + "Cannot derive TryInto for enum with non-unit variants.", + )); + } + Ok(&e.variants) +} + +#[rustfmt::skip::macros(quote)] +fn derive_tryinto_body( + name: &Ident, + variants: &Punctuated, + repr: &Path, +) -> Result { + let discriminants: Vec<&Ident> = variants.iter().map(|f| &f.ident).collect(); + + Ok(quote! { + #(const #discriminants: #repr = #name::#discriminants as #repr;)* + match value { + #(#discriminants => core::result::Result::Ok(#name::#discriminants),)* + _ => core::result::Result::Err(value), + } + }) +} + +#[rustfmt::skip::macros(quote)] +fn derive_tryinto_or_error(input: DeriveInput) -> Result { + let repr = get_repr_uN(&input, "#[derive(TryInto)]")?; + let name = &input.ident; + let body = derive_tryinto_body(name, get_variants(&input)?, &repr)?; + let errmsg = format!("invalid value for {name}"); + + Ok(quote! { + impl #name { + #[allow(dead_code)] + pub const fn into_bits(self) -> #repr { + self as #repr + } + + #[allow(dead_code)] + pub const fn from_bits(value: #repr) -> Self { + match ({ + #body + }) { + Ok(x) => x, + Err(_) => panic!(#errmsg), + } + } + } + impl core::convert::TryFrom<#repr> for #name { + type Error = #repr; + + #[allow(ambiguous_associated_items)] + fn try_from(value: #repr) -> Result { + #body + } + } + }) +} + +#[proc_macro_derive(TryInto)] +pub fn derive_tryinto(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + + derive_tryinto_or_error(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} + +#[proc_macro] +pub fn bits_const_internal(ts: TokenStream) -> TokenStream { + let ts = proc_macro2::TokenStream::from(ts); + let mut it = ts.into_iter(); + + let out = BitsConstInternal::parse(&mut it).unwrap_or_else(syn::Error::into_compile_error); + + // https://github.com/rust-lang/rust-clippy/issues/15852 + quote! { + { + #[allow(clippy::double_parens)] + #out + } + } + .into() +} + +/// Derive macro for generating migration state structures and trait +/// implementations. +/// +/// This macro generates a migration state struct and implements the +/// `ToMigrationState` trait for the annotated struct, enabling state +/// serialization and restoration. Note that defining a `VMStateDescription` +/// for the migration state struct is left to the user. +/// +/// # Container attributes +/// +/// The following attributes can be applied to the struct: +/// +/// - `#[migration_state(rename = CustomName)]` - Customizes the name of the +/// generated migration struct. By default, the generated struct is named +/// `{OriginalName}Migration`. +/// +/// # Field attributes +/// +/// The following attributes can be applied to individual fields: +/// +/// - `#[migration_state(omit)]` - Excludes the field from the migration state +/// entirely. +/// +/// - `#[migration_state(into(Type))]` - Converts the field using `.into()` +/// during both serialization and restoration. +/// +/// - `#[migration_state(try_into(Type))]` - Converts the field using +/// `.try_into()` during both serialization and restoration. Returns +/// `InvalidError` on conversion failure. +/// +/// - `#[migration_state(clone)]` - Clones the field value. +/// +/// Fields without any attributes use `ToMigrationState` recursively; note that +/// this is a simple copy for types that implement `Copy`. +/// +/// # Attribute compatibility +/// +/// - `omit` cannot be used with any other attributes +/// - only one of `into(Type)`, `try_into(Type)` can be used, but they can be +/// coupled with `clone`. +/// +/// # Examples +/// +/// Basic usage: +/// ```ignore +/// #[derive(ToMigrationState)] +/// struct MyStruct { +/// field1: u32, +/// field2: Timer, +/// } +/// ``` +/// +/// With attributes: +/// ```ignore +/// #[derive(ToMigrationState)] +/// #[migration_state(rename = CustomMigration)] +/// struct MyStruct { +/// #[migration_state(omit)] +/// runtime_field: u32, +/// +/// #[migration_state(clone)] +/// shared_data: String, +/// +/// #[migration_state(into(Cow<'static, str>), clone)] +/// converted_field: String, +/// +/// #[migration_state(try_into(i8))] +/// fallible_field: u32, +/// +/// // Default: use ToMigrationState trait recursively +/// nested_field: NestedStruct, +/// +/// // Primitive types have a default implementation of ToMigrationState +/// simple_field: u32, +/// } +/// ``` +#[proc_macro_derive(ToMigrationState, attributes(migration_state))] +pub fn derive_to_migration_state(input: TokenStream) -> TokenStream { + let input = parse_macro_input!(input as DeriveInput); + MigrationStateDerive::expand(input) + .unwrap_or_else(syn::Error::into_compile_error) + .into() +} diff --git a/rust/qemu-macros/src/migration_state.rs b/rust/qemu-macros/src/migration_state.rs new file mode 100644 index 0000000000000..5edf0efe687f4 --- /dev/null +++ b/rust/qemu-macros/src/migration_state.rs @@ -0,0 +1,298 @@ +use std::borrow::Cow; + +use proc_macro2::TokenStream; +use quote::{format_ident, quote, ToTokens}; +use syn::{spanned::Spanned, DeriveInput, Error, Field, Ident, Result, Type}; + +use crate::get_fields; + +#[derive(Debug, Default)] +enum ConversionMode { + #[default] + None, + Omit, + Into(Type), + TryInto(Type), + ToMigrationState, +} + +impl ConversionMode { + fn target_type(&self, original_type: &Type) -> TokenStream { + match self { + ConversionMode::Into(ty) | ConversionMode::TryInto(ty) => ty.to_token_stream(), + ConversionMode::ToMigrationState => { + quote! { <#original_type as ToMigrationState>::Migrated } + } + _ => original_type.to_token_stream(), + } + } +} + +#[derive(Debug, Default)] +struct ContainerAttrs { + rename: Option, +} + +impl ContainerAttrs { + fn parse_from(&mut self, attrs: &[syn::Attribute]) -> Result<()> { + use attrs::{set, with, Attrs}; + Attrs::new() + .once("rename", with::eq(set::parse(&mut self.rename))) + .parse_attrs("migration_state", attrs)?; + Ok(()) + } + + fn parse(attrs: &[syn::Attribute]) -> Result { + let mut container_attrs = Self::default(); + container_attrs.parse_from(attrs)?; + Ok(container_attrs) + } +} + +#[derive(Debug, Default)] +struct FieldAttrs { + conversion: ConversionMode, + clone: bool, +} + +impl FieldAttrs { + fn parse_from(&mut self, attrs: &[syn::Attribute]) -> Result<()> { + let mut omit_flag = false; + let mut into_type: Option = None; + let mut try_into_type: Option = None; + + use attrs::{set, with, Attrs}; + Attrs::new() + .once("omit", set::flag(&mut omit_flag)) + .once("into", with::paren(set::parse(&mut into_type))) + .once("try_into", with::paren(set::parse(&mut try_into_type))) + .once("clone", set::flag(&mut self.clone)) + .parse_attrs("migration_state", attrs)?; + + self.conversion = match (omit_flag, into_type, try_into_type, self.clone) { + // Valid combinations of attributes first... + (true, None, None, false) => ConversionMode::Omit, + (false, Some(ty), None, _) => ConversionMode::Into(ty), + (false, None, Some(ty), _) => ConversionMode::TryInto(ty), + (false, None, None, true) => ConversionMode::None, // clone without conversion + (false, None, None, false) => ConversionMode::ToMigrationState, // default behavior + + // ... then the error cases + (true, _, _, _) => { + return Err(Error::new( + attrs[0].span(), + "ToMigrationState: omit cannot be used with other attributes", + )); + } + (_, Some(_), Some(_), _) => { + return Err(Error::new( + attrs[0].span(), + "ToMigrationState: into and try_into attributes cannot be used together", + )); + } + }; + + Ok(()) + } + + fn parse(attrs: &[syn::Attribute]) -> Result { + let mut field_attrs = Self::default(); + field_attrs.parse_from(attrs)?; + Ok(field_attrs) + } +} + +#[derive(Debug)] +struct MigrationStateField { + name: Ident, + original_type: Type, + attrs: FieldAttrs, +} + +impl MigrationStateField { + fn maybe_clone(&self, mut value: TokenStream) -> TokenStream { + if self.attrs.clone { + value = quote! { #value.clone() }; + } + value + } + + fn generate_migration_state_field(&self) -> TokenStream { + let name = &self.name; + let field_type = self.attrs.conversion.target_type(&self.original_type); + + quote! { + pub #name: #field_type, + } + } + + fn generate_snapshot_field(&self) -> TokenStream { + let name = &self.name; + let value = self.maybe_clone(quote! { self.#name }); + + match &self.attrs.conversion { + ConversionMode::Omit => { + unreachable!("Omitted fields are filtered out during processing") + } + ConversionMode::None => quote! { + target.#name = #value; + }, + ConversionMode::Into(_) => quote! { + target.#name = #value.into(); + }, + ConversionMode::TryInto(_) => quote! { + target.#name = #value.try_into().map_err(|_| migration::InvalidError)?; + }, + ConversionMode::ToMigrationState => quote! { + self.#name.snapshot_migration_state(&mut target.#name)?; + }, + } + } + + fn generate_restore_field(&self) -> TokenStream { + let name = &self.name; + + match &self.attrs.conversion { + ConversionMode::Omit => { + unreachable!("Omitted fields are filtered out during processing") + } + ConversionMode::None => quote! { + self.#name = #name; + }, + ConversionMode::Into(_) => quote! { + self.#name = #name.into(); + }, + ConversionMode::TryInto(_) => quote! { + self.#name = #name.try_into().map_err(|_| migration::InvalidError)?; + }, + ConversionMode::ToMigrationState => quote! { + self.#name.restore_migrated_state_mut(#name, _version_id)?; + }, + } + } +} + +#[derive(Debug)] +pub struct MigrationStateDerive { + input: DeriveInput, + fields: Vec, + container_attrs: ContainerAttrs, +} + +impl MigrationStateDerive { + fn parse(input: DeriveInput) -> Result { + let container_attrs = ContainerAttrs::parse(&input.attrs)?; + let fields = get_fields(&input, "ToMigrationState")?; + let fields = Self::process_fields(fields)?; + + Ok(Self { + input, + fields, + container_attrs, + }) + } + + fn process_fields( + fields: &syn::punctuated::Punctuated, + ) -> Result> { + let processed = fields + .iter() + .map(|field| { + let attrs = FieldAttrs::parse(&field.attrs)?; + Ok((field, attrs)) + }) + .collect::>>()? + .into_iter() + .filter(|(_, attrs)| !matches!(attrs.conversion, ConversionMode::Omit)) + .map(|(field, attrs)| MigrationStateField { + name: field.ident.as_ref().unwrap().clone(), + original_type: field.ty.clone(), + attrs, + }) + .collect(); + + Ok(processed) + } + + fn migration_state_name(&self) -> Cow<'_, Ident> { + match &self.container_attrs.rename { + Some(rename) => Cow::Borrowed(rename), + None => Cow::Owned(format_ident!("{}Migration", &self.input.ident)), + } + } + + fn generate_migration_state_struct(&self) -> TokenStream { + let name = self.migration_state_name(); + let fields = self + .fields + .iter() + .map(MigrationStateField::generate_migration_state_field); + + quote! { + #[derive(Default)] + pub struct #name { + #(#fields)* + } + } + } + + fn generate_snapshot_migration_state(&self) -> TokenStream { + let fields = self + .fields + .iter() + .map(MigrationStateField::generate_snapshot_field); + + quote! { + fn snapshot_migration_state(&self, target: &mut Self::Migrated) -> Result<(), migration::InvalidError> { + #(#fields)* + Ok(()) + } + } + } + + fn generate_restore_migrated_state(&self) -> TokenStream { + let names: Vec<_> = self.fields.iter().map(|f| &f.name).collect(); + let fields = self + .fields + .iter() + .map(MigrationStateField::generate_restore_field); + + // version_id could be used or not depending on conversion attributes + quote! { + #[allow(clippy::used_underscore_binding)] + fn restore_migrated_state_mut(&mut self, source: Self::Migrated, _version_id: u8) -> Result<(), migration::InvalidError> { + let Self::Migrated { #(#names),* } = source; + #(#fields)* + Ok(()) + } + } + } + + fn generate(&self) -> TokenStream { + let struct_name = &self.input.ident; + let generics = &self.input.generics; + + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + let name = self.migration_state_name(); + let migration_state_struct = self.generate_migration_state_struct(); + let snapshot_impl = self.generate_snapshot_migration_state(); + let restore_impl = self.generate_restore_migrated_state(); + + quote! { + #migration_state_struct + + impl #impl_generics ToMigrationState for #struct_name #ty_generics #where_clause { + type Migrated = #name; + + #snapshot_impl + + #restore_impl + } + } + } + + pub fn expand(input: DeriveInput) -> Result { + let tokens = Self::parse(input)?.generate(); + Ok(tokens) + } +} diff --git a/rust/qemu-macros/src/tests.rs b/rust/qemu-macros/src/tests.rs new file mode 100644 index 0000000000000..65691412ff57e --- /dev/null +++ b/rust/qemu-macros/src/tests.rs @@ -0,0 +1,456 @@ +// Copyright 2025, Linaro Limited +// Author(s): Manos Pitsidianakis +// SPDX-License-Identifier: GPL-2.0-or-later + +use quote::quote; + +use super::*; + +macro_rules! derive_compile_fail { + ($derive_fn:path, $input:expr, $($error_msg:expr),+ $(,)?) => {{ + let input: proc_macro2::TokenStream = $input; + let error_msg = &[$( quote! { ::core::compile_error! { $error_msg } } ),*]; + let derive_fn: fn(input: syn::DeriveInput) -> Result = + $derive_fn; + + let input: syn::DeriveInput = syn::parse2(input).unwrap(); + let result = derive_fn(input); + let err = result.unwrap_err().into_compile_error(); + assert_eq!( + err.to_string(), + quote! { #(#error_msg)* }.to_string() + ); + }}; +} + +macro_rules! derive_compile { + ($derive_fn:path, $input:expr, $($expected:tt)*) => {{ + let input: proc_macro2::TokenStream = $input; + let expected: proc_macro2::TokenStream = $($expected)*; + let derive_fn: fn(input: syn::DeriveInput) -> Result = + $derive_fn; + + let input: syn::DeriveInput = syn::parse2(input).unwrap(); + let result = derive_fn(input).unwrap(); + assert_eq!(result.to_string(), expected.to_string()); + }}; +} + +#[test] +fn test_derive_device() { + // Check that repr(C) is used + derive_compile_fail!( + derive_device_or_error, + quote! { + #[derive(Device)] + struct Foo { + _unused: [u8; 0], + } + }, + "#[repr(C)] required for #[derive(Device)]" + ); + // Check that invalid/misspelled attributes raise an error + derive_compile_fail!( + derive_device_or_error, + quote! { + #[repr(C)] + #[derive(Device)] + struct DummyState { + #[property(defalt = true)] + migrate_clock: bool, + } + }, + "Expected one of `bit`, `default` or `rename`" + ); + // Check that repeated attributes are not allowed: + derive_compile_fail!( + derive_device_or_error, + quote! { + #[repr(C)] + #[derive(Device)] + struct DummyState { + #[property(rename = "migrate-clk", rename = "migrate-clk", default = true)] + migrate_clock: bool, + } + }, + "Duplicate argument", + "Already used here", + ); + derive_compile_fail!( + derive_device_or_error, + quote! { + #[repr(C)] + #[derive(Device)] + struct DummyState { + #[property(default = true, default = true)] + migrate_clock: bool, + } + }, + "Duplicate argument", + "Already used here", + ); + derive_compile_fail!( + derive_device_or_error, + quote! { + #[repr(C)] + #[derive(Device)] + struct DummyState { + #[property(bit = 0, bit = 1)] + flags: u32, + } + }, + "Duplicate argument", + "Already used here", + ); + // Check that the field name is preserved when `rename` isn't used: + derive_compile!( + derive_device_or_error, + quote! { + #[repr(C)] + #[derive(Device)] + pub struct DummyState { + parent: ParentField, + #[property(default = true)] + migrate_clock: bool, + } + }, + quote! { + unsafe impl ::hwcore::DevicePropertiesImpl for DummyState { + const PROPERTIES: &'static [::hwcore::bindings::Property] = &[ + ::hwcore::bindings::Property { + name: ::std::ffi::CStr::as_ptr(c"migrate_clock"), + info: ::BASE_INFO, + offset: ::core::mem::offset_of!(DummyState, migrate_clock) as isize, + bitnr: 0, + set_default: true, + defval: ::hwcore::bindings::Property__bindgen_ty_1 { u: true as u64 }, + ..::common::Zeroable::ZERO + } + ]; + } + } + ); + // Check that `rename` value is used for the property name when used: + derive_compile!( + derive_device_or_error, + quote! { + #[repr(C)] + #[derive(Device)] + pub struct DummyState { + parent: ParentField, + #[property(rename = "migrate-clk", default = true)] + migrate_clock: bool, + } + }, + quote! { + unsafe impl ::hwcore::DevicePropertiesImpl for DummyState { + const PROPERTIES: &'static [::hwcore::bindings::Property] = &[ + ::hwcore::bindings::Property { + name: ::std::ffi::CStr::as_ptr(c"migrate-clk"), + info: ::BASE_INFO, + offset: ::core::mem::offset_of!(DummyState, migrate_clock) as isize, + bitnr: 0, + set_default: true, + defval: ::hwcore::bindings::Property__bindgen_ty_1 { u: true as u64 }, + ..::common::Zeroable::ZERO + } + ]; + } + } + ); + // Check that `bit` value is used for the bit property without default + // value (note: though C macro (e.g., DEFINE_PROP_BIT) always requires + // default value, Rust side allows to default this field to "0"): + derive_compile!( + derive_device_or_error, + quote! { + #[repr(C)] + #[derive(Device)] + pub struct DummyState { + parent: ParentField, + #[property(bit = 3)] + flags: u32, + } + }, + quote! { + unsafe impl ::hwcore::DevicePropertiesImpl for DummyState { + const PROPERTIES: &'static [::hwcore::bindings::Property] = &[ + ::hwcore::bindings::Property { + name: ::std::ffi::CStr::as_ptr(c"flags"), + info: ::BIT_INFO, + offset: ::core::mem::offset_of!(DummyState, flags) as isize, + bitnr: 3, + set_default: false, + defval: ::hwcore::bindings::Property__bindgen_ty_1 { u: 0 as u64 }, + ..::common::Zeroable::ZERO + } + ]; + } + } + ); + // Check that `bit` value is used for the bit property when used: + derive_compile!( + derive_device_or_error, + quote! { + #[repr(C)] + #[derive(Device)] + pub struct DummyState { + parent: ParentField, + #[property(bit = 3, default = true)] + flags: u32, + } + }, + quote! { + unsafe impl ::hwcore::DevicePropertiesImpl for DummyState { + const PROPERTIES: &'static [::hwcore::bindings::Property] = &[ + ::hwcore::bindings::Property { + name: ::std::ffi::CStr::as_ptr(c"flags"), + info: ::BIT_INFO, + offset: ::core::mem::offset_of!(DummyState, flags) as isize, + bitnr: 3, + set_default: true, + defval: ::hwcore::bindings::Property__bindgen_ty_1 { u: true as u64 }, + ..::common::Zeroable::ZERO + } + ]; + } + } + ); + // Check that `bit` value is used for the bit property with rename when used: + derive_compile!( + derive_device_or_error, + quote! { + #[repr(C)] + #[derive(Device)] + pub struct DummyState { + parent: ParentField, + #[property(rename = "msi", bit = 3, default = false)] + flags: u64, + } + }, + quote! { + unsafe impl ::hwcore::DevicePropertiesImpl for DummyState { + const PROPERTIES: &'static [::hwcore::bindings::Property] = &[ + ::hwcore::bindings::Property { + name: ::std::ffi::CStr::as_ptr(c"msi"), + info: ::BIT_INFO, + offset: ::core::mem::offset_of!(DummyState, flags) as isize, + bitnr: 3, + set_default: true, + defval: ::hwcore::bindings::Property__bindgen_ty_1 { u: false as u64 }, + ..::common::Zeroable::ZERO + } + ]; + } + } + ); +} + +#[test] +fn test_derive_object() { + derive_compile_fail!( + derive_object_or_error, + quote! { + #[derive(Object)] + struct Foo { + _unused: [u8; 0], + } + }, + "#[repr(C)] required for #[derive(Object)]" + ); + derive_compile!( + derive_object_or_error, + quote! { + #[derive(Object)] + #[repr(C)] + struct Foo { + _unused: [u8; 0], + } + }, + quote! { + ::common::assert_field_type!( + Foo, + _unused, + ::qom::ParentField<::ParentType> + ); + ::util::module_init! { + MODULE_INIT_QOM => unsafe { + ::qom::type_register_static(&::TYPE_INFO); + } + } + } + ); +} + +#[test] +fn test_derive_tryinto() { + derive_compile_fail!( + derive_tryinto_or_error, + quote! { + #[derive(TryInto)] + struct Foo { + _unused: [u8; 0], + } + }, + "#[repr(u8/u16/u32/u64) required for #[derive(TryInto)]" + ); + derive_compile!( + derive_tryinto_or_error, + quote! { + #[derive(TryInto)] + #[repr(u8)] + enum Foo { + First = 0, + Second, + } + }, + quote! { + impl Foo { + #[allow(dead_code)] + pub const fn into_bits(self) -> u8 { + self as u8 + } + + #[allow(dead_code)] + pub const fn from_bits(value: u8) -> Self { + match ({ + const First: u8 = Foo::First as u8; + const Second: u8 = Foo::Second as u8; + match value { + First => core::result::Result::Ok(Foo::First), + Second => core::result::Result::Ok(Foo::Second), + _ => core::result::Result::Err(value), + } + }) { + Ok(x) => x, + Err(_) => panic!("invalid value for Foo"), + } + } + } + + impl core::convert::TryFrom for Foo { + type Error = u8; + + #[allow(ambiguous_associated_items)] + fn try_from(value: u8) -> Result { + const First: u8 = Foo::First as u8; + const Second: u8 = Foo::Second as u8; + match value { + First => core::result::Result::Ok(Foo::First), + Second => core::result::Result::Ok(Foo::Second), + _ => core::result::Result::Err(value), + } + } + } + } + ); +} + +#[test] +fn test_derive_to_migration_state() { + derive_compile_fail!( + MigrationStateDerive::expand, + quote! { + struct MyStruct { + #[migration_state(omit, clone)] + bad: u32, + } + }, + "ToMigrationState: omit cannot be used with other attributes" + ); + derive_compile_fail!( + MigrationStateDerive::expand, + quote! { + struct MyStruct { + #[migration_state(into)] + bad: u32, + } + }, + "unexpected end of input, expected parentheses" + ); + derive_compile_fail!( + MigrationStateDerive::expand, + quote! { + struct MyStruct { + #[migration_state(into(String), try_into(String))] + bad: &'static str, + } + }, + "ToMigrationState: into and try_into attributes cannot be used together" + ); + derive_compile!( + MigrationStateDerive::expand, + quote! { + #[migration_state(rename = CustomMigration)] + struct MyStruct { + #[migration_state(omit)] + runtime_field: u32, + + #[migration_state(clone)] + shared_data: String, + + #[migration_state(into(Cow<'static, str>), clone)] + converted_field: String, + + #[migration_state(try_into(i8))] + fallible_field: u32, + + nested_field: NestedStruct, + simple_field: u32, + } + }, + quote! { + #[derive(Default)] + pub struct CustomMigration { + pub shared_data: String, + pub converted_field: Cow<'static, str>, + pub fallible_field: i8, + pub nested_field: ::Migrated, + pub simple_field: ::Migrated, + } + impl ToMigrationState for MyStruct { + type Migrated = CustomMigration; + fn snapshot_migration_state( + &self, + target: &mut Self::Migrated + ) -> Result<(), migration::InvalidError> { + target.shared_data = self.shared_data.clone(); + target.converted_field = self.converted_field.clone().into(); + target.fallible_field = self + .fallible_field + .try_into() + .map_err(|_| migration::InvalidError)?; + self.nested_field + .snapshot_migration_state(&mut target.nested_field)?; + self.simple_field + .snapshot_migration_state(&mut target.simple_field)?; + Ok(()) + } + #[allow(clippy::used_underscore_binding)] + fn restore_migrated_state_mut( + &mut self, + source: Self::Migrated, + _version_id: u8 + ) -> Result<(), migration::InvalidError> { + let Self::Migrated { + shared_data, + converted_field, + fallible_field, + nested_field, + simple_field + } = source; + self.shared_data = shared_data; + self.converted_field = converted_field.into(); + self.fallible_field = fallible_field + .try_into() + .map_err(|_| migration::InvalidError)?; + self.nested_field + .restore_migrated_state_mut(nested_field, _version_id)?; + self.simple_field + .restore_migrated_state_mut(simple_field, _version_id)?; + Ok(()) + } + } + } + ); +} diff --git a/rust/qom/Cargo.toml b/rust/qom/Cargo.toml new file mode 100644 index 0000000000000..4be3c2541b612 --- /dev/null +++ b/rust/qom/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "qom" +version = "0.1.0" +description = "Rust bindings for QEMU/QOM" +resolver = "2" +publish = false + +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +common = { path = "../common" } +bql = { path = "../bql" } +migration = { path = "../migration" } +qemu_macros = { path = "../qemu-macros" } +util = { path = "../util" } +glib-sys.workspace = true + +[lints] +workspace = true diff --git a/rust/qom/build.rs b/rust/qom/build.rs new file mode 120000 index 0000000000000..71a3167885c23 --- /dev/null +++ b/rust/qom/build.rs @@ -0,0 +1 @@ +../util/build.rs \ No newline at end of file diff --git a/rust/qom/meson.build b/rust/qom/meson.build new file mode 100644 index 0000000000000..e50f41858d618 --- /dev/null +++ b/rust/qom/meson.build @@ -0,0 +1,43 @@ +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_qom_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common, + c_args: bindgen_c_args, +) + +_qom_rs = static_library( + 'qom', + structured_sources( + [ + 'src/lib.rs', + 'src/bindings.rs', + 'src/prelude.rs', + 'src/qom.rs', + ], + {'.': _qom_bindings_inc_rs} + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + link_with: [_bql_rs, _migration_rs], + dependencies: [common_rs, glib_sys_rs, qemu_macros], +) + +qom_rs = declare_dependency(link_with: [_qom_rs], dependencies: [qemu_macros, qom]) + +# Doctests are essentially integration tests, so they need the same dependencies. +# Note that running them requires the object files for C code, so place them +# in a separate suite that is run by the "build" CI jobs rather than "check". +rust.doctest('rust-qom-rs-doctests', + _qom_rs, + dependencies: qom_rs, + suite: ['doc', 'rust']) diff --git a/rust/qom/src/bindings.rs b/rust/qom/src/bindings.rs new file mode 100644 index 0000000000000..91de42f242672 --- /dev/null +++ b/rust/qom/src/bindings.rs @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unnecessary_transmutes, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc, + clippy::too_many_arguments +)] + +use glib_sys::{GHashTable, GHashTableIter, GList, GPtrArray, GQueue, GSList}; + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); diff --git a/rust/qom/src/lib.rs b/rust/qom/src/lib.rs new file mode 100644 index 0000000000000..24c44fc2afb50 --- /dev/null +++ b/rust/qom/src/lib.rs @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pub use qemu_macros::Object; + +pub mod bindings; + +// preserve one-item-per-"use" syntax, it is clearer +// for prelude-like modules +#[rustfmt::skip] +pub mod prelude; + +mod qom; +pub use qom::*; diff --git a/rust/qom/src/prelude.rs b/rust/qom/src/prelude.rs new file mode 100644 index 0000000000000..00a6095977110 --- /dev/null +++ b/rust/qom/src/prelude.rs @@ -0,0 +1,12 @@ +//! Traits and essential types intended for blanket imports. + +pub use crate::qom::InterfaceType; +pub use crate::qom::IsA; +pub use crate::qom::Object; +pub use crate::qom::ObjectCast; +pub use crate::qom::ObjectClassMethods; +pub use crate::qom::ObjectDeref; +pub use crate::qom::ObjectMethods; +pub use crate::qom::ObjectType; + +pub use crate::qom_isa; diff --git a/rust/qemu-api/src/qom.rs b/rust/qom/src/qom.rs similarity index 97% rename from rust/qemu-api/src/qom.rs rename to rust/qom/src/qom.rs index e20ee014cb18c..5808051cd77ec 100644 --- a/rust/qemu-api/src/qom.rs +++ b/rust/qom/src/qom.rs @@ -101,19 +101,18 @@ use std::{ ptr::NonNull, }; -pub use bindings::ObjectClass; - -use crate::{ - bindings::{ - self, object_class_dynamic_cast, object_dynamic_cast, object_get_class, - object_get_typename, object_new, object_ref, object_unref, TypeInfo, - }, - cell::{bql_locked, Opaque}, +use common::Opaque; +use migration::impl_vmstate_pointer; + +use crate::bindings::{ + self, object_class_dynamic_cast, object_dynamic_cast, object_get_class, object_get_typename, + object_new, object_ref, object_unref, TypeInfo, }; +pub use crate::bindings::{type_register_static, ObjectClass}; /// A safe wrapper around [`bindings::Object`]. #[repr(transparent)] -#[derive(Debug, qemu_api_macros::Wrapper)] +#[derive(Debug, common::Wrapper)] pub struct Object(Opaque); unsafe impl Send for Object {} @@ -147,7 +146,7 @@ macro_rules! qom_isa { $( // SAFETY: it is the caller responsibility to have $parent as the // first field - unsafe impl $crate::qom::IsA<$parent> for $struct {} + unsafe impl $crate::IsA<$parent> for $struct {} impl AsRef<$parent> for $struct { fn as_ref(&self) -> &$parent { @@ -174,7 +173,7 @@ macro_rules! qom_isa { /// /// ```ignore /// #[repr(C)] -/// #[derive(qemu_api_macros::Object)] +/// #[derive(qom::Object)] /// pub struct MyDevice { /// parent: ParentField, /// ... @@ -307,7 +306,7 @@ impl ParentInit<'_, T> { /// Fields beyond `Object` could be uninitialized and it's your /// responsibility to avoid that they're used when the pointer is /// dereferenced, either directly or through a cast. - pub fn as_object_mut_ptr(&self) -> *mut bindings::Object { + pub const fn as_object_mut_ptr(&self) -> *mut bindings::Object { self.as_object_ptr().cast_mut() } @@ -318,7 +317,7 @@ impl ParentInit<'_, T> { /// Fields beyond `Object` could be uninitialized and it's your /// responsibility to avoid that they're used when the pointer is /// dereferenced, either directly or through a cast. - pub fn as_object_ptr(&self) -> *const bindings::Object { + pub const fn as_object_ptr(&self) -> *const bindings::Object { self.0.as_ptr().cast() } } @@ -336,7 +335,7 @@ impl<'a, T: ObjectImpl> ParentInit<'a, T> { /// However, while the fields of the resulting reference are initialized, /// calls might use uninitialized fields of the subclass. It is your /// responsibility to avoid this. - pub unsafe fn upcast(&self) -> &'a U + pub const unsafe fn upcast(&self) -> &'a U where T::ParentType: IsA, { @@ -871,7 +870,7 @@ impl ObjectDeref for Owned {} impl Drop for Owned { fn drop(&mut self) { - assert!(bql_locked()); + assert!(bql::is_locked()); // SAFETY: creation method is unsafe, and whoever calls it has // responsibility that the pointer is valid, and remains valid // throughout the lifetime of the `Owned` and its clones. @@ -895,7 +894,7 @@ impl> fmt::Debug for Owned { pub trait ObjectClassMethods: IsA { /// Return a new reference counted instance of this class fn new() -> Owned { - assert!(bql_locked()); + assert!(bql::is_locked()); // SAFETY: the object created by object_new is allocated on // the heap and has a reference count of 1 unsafe { @@ -948,3 +947,5 @@ where impl ObjectClassMethods for T where T: IsA {} impl ObjectMethods for R where R::Target: IsA {} + +impl_vmstate_pointer!(Owned where T: VMState + ObjectType); diff --git a/rust/qom/wrapper.h b/rust/qom/wrapper.h new file mode 100644 index 0000000000000..3b71bcd3f5bfa --- /dev/null +++ b/rust/qom/wrapper.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * This header file is meant to be used as input to the `bindgen` application + * in order to generate C FFI compatible Rust bindings. + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H +/* + * Fix potential missing stdatomic.h error in case bindgen does not insert the + * correct libclang header paths on its own. We do not use stdatomic.h symbols + * in QEMU code, so it's fine to declare dummy types instead. + */ +typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst, +} memory_order; +#endif /* __CLANG_STDATOMIC_H */ + +#include "qemu/osdep.h" + +#include "qom/object.h" diff --git a/rust/system/Cargo.toml b/rust/system/Cargo.toml new file mode 100644 index 0000000000000..186ea00bfff9c --- /dev/null +++ b/rust/system/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "system" +version = "0.1.0" +description = "Rust bindings for QEMU/system" +resolver = "2" +publish = false + +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +common = { path = "../common" } +qom = { path = "../qom" } +util = { path = "../util" } +glib-sys.workspace = true + +[lints] +workspace = true diff --git a/rust/system/build.rs b/rust/system/build.rs new file mode 120000 index 0000000000000..71a3167885c23 --- /dev/null +++ b/rust/system/build.rs @@ -0,0 +1 @@ +../util/build.rs \ No newline at end of file diff --git a/rust/system/meson.build b/rust/system/meson.build new file mode 100644 index 0000000000000..73d61991146ac --- /dev/null +++ b/rust/system/meson.build @@ -0,0 +1,43 @@ +c_enums = [ + 'device_endian', +] +_system_bindgen_args = [] +foreach enum : c_enums + _system_bindgen_args += ['--rustified-enum', enum] +endforeach + +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_system_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common + _system_bindgen_args, + c_args: bindgen_c_args, +) + +_system_rs = static_library( + 'system', + structured_sources( + [ + 'src/lib.rs', + 'src/bindings.rs', + 'src/memory.rs', + ], + {'.': _system_bindings_inc_rs} + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + link_with: [_bql_rs, _migration_rs, _qom_rs, _util_rs], + dependencies: [glib_sys_rs, common_rs, qemu_macros], +) + +system_rs = declare_dependency(link_with: [_system_rs], + dependencies: [hwcore]) diff --git a/rust/system/src/bindings.rs b/rust/system/src/bindings.rs new file mode 100644 index 0000000000000..6cbb588de3d9c --- /dev/null +++ b/rust/system/src/bindings.rs @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unnecessary_transmutes, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc, + clippy::too_many_arguments +)] + +use common::Zeroable; +use glib_sys::{ + guint, GArray, GByteArray, GHashTable, GHashTableIter, GList, GPollFD, GPtrArray, GQueue, + GSList, GString, +}; + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); + +// SAFETY: these are constants and vtables; the Send and Sync requirements +// are deferred to the unsafe callbacks that they contain +unsafe impl Send for MemoryRegionOps {} +unsafe impl Sync for MemoryRegionOps {} + +// SAFETY: this is a pure data struct +unsafe impl Send for CoalescedMemoryRange {} +unsafe impl Sync for CoalescedMemoryRange {} + +unsafe impl Zeroable for MemoryRegionOps__bindgen_ty_1 {} +unsafe impl Zeroable for MemoryRegionOps__bindgen_ty_2 {} +unsafe impl Zeroable for MemoryRegionOps {} +unsafe impl Zeroable for MemTxAttrs {} diff --git a/rust/system/src/lib.rs b/rust/system/src/lib.rs new file mode 100644 index 0000000000000..aafe9a866c925 --- /dev/null +++ b/rust/system/src/lib.rs @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pub mod bindings; + +mod memory; +pub use memory::*; diff --git a/rust/qemu-api/src/memory.rs b/rust/system/src/memory.rs similarity index 90% rename from rust/qemu-api/src/memory.rs rename to rust/system/src/memory.rs index e40fad6cf19ea..4b3316bf767f1 100644 --- a/rust/qemu-api/src/memory.rs +++ b/rust/system/src/memory.rs @@ -9,16 +9,11 @@ use std::{ marker::PhantomData, }; -pub use bindings::{hwaddr, MemTxAttrs}; - -use crate::{ - bindings::{self, device_endian, memory_region_init_io}, - callbacks::FnCall, - cell::Opaque, - prelude::*, - uninit::MaybeUninitField, - zeroable::Zeroable, -}; +use common::{callbacks::FnCall, uninit::MaybeUninitField, zeroable::Zeroable, Opaque}; +use qom::prelude::*; + +use crate::bindings::{self, device_endian, memory_region_init_io}; +pub use crate::bindings::{hwaddr, MemTxAttrs}; pub struct MemoryRegionOps( bindings::MemoryRegionOps, @@ -134,18 +129,13 @@ impl Default for MemoryRegionOpsBuilder { /// A safe wrapper around [`bindings::MemoryRegion`]. #[repr(transparent)] -#[derive(qemu_api_macros::Wrapper)] +#[derive(common::Wrapper)] pub struct MemoryRegion(Opaque); unsafe impl Send for MemoryRegion {} unsafe impl Sync for MemoryRegion {} impl MemoryRegion { - // inline to ensure that it is not included in tests, which only - // link to hwcore and qom. FIXME: inlining is actually the opposite - // of what we want, since this is the type-erased version of the - // init_io function below. Look into splitting the qemu_api crate. - #[inline(always)] unsafe fn do_init_io( slot: *mut bindings::MemoryRegion, owner: *mut bindings::Object, @@ -189,6 +179,7 @@ unsafe impl ObjectType for MemoryRegion { const TYPE_NAME: &'static CStr = unsafe { CStr::from_bytes_with_nul_unchecked(bindings::TYPE_MEMORY_REGION) }; } + qom_isa!(MemoryRegion: Object); /// A special `MemTxAttrs` constant, used to indicate that no memory diff --git a/rust/system/wrapper.h b/rust/system/wrapper.h new file mode 100644 index 0000000000000..48abde8505268 --- /dev/null +++ b/rust/system/wrapper.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * This header file is meant to be used as input to the `bindgen` application + * in order to generate C FFI compatible Rust bindings. + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H +/* + * Fix potential missing stdatomic.h error in case bindgen does not insert the + * correct libclang header paths on its own. We do not use stdatomic.h symbols + * in QEMU code, so it's fine to declare dummy types instead. + */ +typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst, +} memory_order; +#endif /* __CLANG_STDATOMIC_H */ + +#include "qemu/osdep.h" + +#include "system/system.h" +#include "system/memory.h" +#include "system/address-spaces.h" diff --git a/rust/tests/Cargo.toml b/rust/tests/Cargo.toml new file mode 100644 index 0000000000000..d47dc3314d81b --- /dev/null +++ b/rust/tests/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "tests" +version = "0.1.0" +description = "Rust integration tests for QEMU" +resolver = "2" +publish = false + +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +common = { path = "../common" } +chardev = { path = "../chardev" } +hwcore = { path = "../hw/core" } +migration = { path = "../migration" } +util = { path = "../util" } +bql = { path = "../bql" } +qom = { path = "../qom" } +system = { path = "../system" } + +[lints] +workspace = true diff --git a/rust/tests/meson.build b/rust/tests/meson.build new file mode 100644 index 0000000000000..00688c66fb134 --- /dev/null +++ b/rust/tests/meson.build @@ -0,0 +1,14 @@ +test('rust-integration', + executable( + 'rust-integration', + files('tests/vmstate_tests.rs'), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_args: ['--test'], + install: false, + dependencies: [bql_rs, common_rs, util_rs, migration_rs, qom_rs]), + args: [ + '--test', '--test-threads', '1', + '--format', 'pretty', + ], + protocol: 'rust', + suite: ['unit', 'rust']) diff --git a/rust/qemu-api/tests/vmstate_tests.rs b/rust/tests/tests/vmstate_tests.rs similarity index 78% rename from rust/qemu-api/tests/vmstate_tests.rs rename to rust/tests/tests/vmstate_tests.rs index bded836eb608d..fa9bbd6a12227 100644 --- a/rust/qemu-api/tests/vmstate_tests.rs +++ b/rust/tests/tests/vmstate_tests.rs @@ -9,16 +9,16 @@ use std::{ slice, }; -use qemu_api::{ +use bql::BqlCell; +use common::Opaque; +use migration::{ bindings::{ vmstate_info_bool, vmstate_info_int32, vmstate_info_int64, vmstate_info_int8, vmstate_info_uint64, vmstate_info_uint8, vmstate_info_unused_buffer, VMStateFlags, }, - cell::{BqlCell, Opaque}, - impl_vmstate_forward, - vmstate::{VMStateDescription, VMStateField}, - vmstate_fields, vmstate_of, vmstate_struct, vmstate_unused, vmstate_validate, - zeroable::Zeroable, + impl_vmstate_forward, impl_vmstate_struct, + vmstate::{VMStateDescription, VMStateDescriptionBuilder, VMStateField}, + vmstate_fields, vmstate_of, vmstate_unused, vmstate_validate, }; const FOO_ARRAY_MAX: usize = 3; @@ -41,22 +41,24 @@ struct FooA { elem: i8, } -static VMSTATE_FOOA: VMStateDescription = VMStateDescription { - name: c"foo_a".as_ptr(), - version_id: 1, - minimum_version_id: 1, - fields: vmstate_fields! { +static VMSTATE_FOOA: VMStateDescription = VMStateDescriptionBuilder::::new() + .name(c"foo_a") + .version_id(1) + .minimum_version_id(1) + .fields(vmstate_fields! { vmstate_of!(FooA, elem), vmstate_unused!(size_of::()), vmstate_of!(FooA, arr[0 .. num]).with_version_id(0), vmstate_of!(FooA, arr_mul[0 .. num_mul * 16]), - }, - ..Zeroable::ZERO -}; + }) + .build(); + +impl_vmstate_struct!(FooA, VMSTATE_FOOA); #[test] fn test_vmstate_uint16() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOA.as_ref().fields, 5) }; // 1st VMStateField ("elem") in VMSTATE_FOOA (corresponding to VMSTATE_UINT16) assert_eq!( @@ -76,7 +78,8 @@ fn test_vmstate_uint16() { #[test] fn test_vmstate_unused() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOA.as_ref().fields, 5) }; // 2nd VMStateField ("unused") in VMSTATE_FOOA (corresponding to VMSTATE_UNUSED) assert_eq!( @@ -96,7 +99,8 @@ fn test_vmstate_unused() { #[test] fn test_vmstate_varray_uint16_unsafe() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOA.as_ref().fields, 5) }; // 3rd VMStateField ("arr") in VMSTATE_FOOA (corresponding to // VMSTATE_VARRAY_UINT16_UNSAFE) @@ -117,7 +121,8 @@ fn test_vmstate_varray_uint16_unsafe() { #[test] fn test_vmstate_varray_multiply() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOA.fields, 5) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOA.as_ref().fields, 5) }; // 4th VMStateField ("arr_mul") in VMSTATE_FOOA (corresponding to // VMSTATE_VARRAY_MULTIPLY) @@ -171,24 +176,24 @@ fn validate_foob(_state: &FooB, _version_id: u8) -> bool { true } -static VMSTATE_FOOB: VMStateDescription = VMStateDescription { - name: c"foo_b".as_ptr(), - version_id: 2, - minimum_version_id: 1, - fields: vmstate_fields! { +static VMSTATE_FOOB: VMStateDescription = VMStateDescriptionBuilder::::new() + .name(c"foo_b") + .version_id(2) + .minimum_version_id(1) + .fields(vmstate_fields! { vmstate_of!(FooB, val).with_version_id(2), vmstate_of!(FooB, wrap), - vmstate_struct!(FooB, arr_a[0 .. num_a], &VMSTATE_FOOA, FooA).with_version_id(1), - vmstate_struct!(FooB, arr_a_mul[0 .. num_a_mul * 32], &VMSTATE_FOOA, FooA).with_version_id(2), + vmstate_of!(FooB, arr_a[0 .. num_a]).with_version_id(1), + vmstate_of!(FooB, arr_a_mul[0 .. num_a_mul * 32]).with_version_id(2), vmstate_of!(FooB, arr_i64), - vmstate_struct!(FooB, arr_a_wrap[0 .. num_a_wrap], &VMSTATE_FOOA, FooA, validate_foob), - }, - ..Zeroable::ZERO -}; + vmstate_of!(FooB, arr_a_wrap[0 .. num_a_wrap], validate_foob), + }) + .build(); #[test] fn test_vmstate_bool_v() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOB.as_ref().fields, 7) }; // 1st VMStateField ("val") in VMSTATE_FOOB (corresponding to VMSTATE_BOOL_V) assert_eq!( @@ -208,7 +213,8 @@ fn test_vmstate_bool_v() { #[test] fn test_vmstate_uint64() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOB.as_ref().fields, 7) }; // 2nd VMStateField ("wrap") in VMSTATE_FOOB (corresponding to VMSTATE_U64) assert_eq!( @@ -228,7 +234,8 @@ fn test_vmstate_uint64() { #[test] fn test_vmstate_struct_varray_uint8() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOB.as_ref().fields, 7) }; // 3rd VMStateField ("arr_a") in VMSTATE_FOOB (corresponding to // VMSTATE_STRUCT_VARRAY_UINT8) @@ -246,13 +253,14 @@ fn test_vmstate_struct_varray_uint8() { foo_fields[2].flags.0, VMStateFlags::VMS_STRUCT.0 | VMStateFlags::VMS_VARRAY_UINT8.0 ); - assert_eq!(foo_fields[2].vmsd, &VMSTATE_FOOA); + assert_eq!(foo_fields[2].vmsd, VMSTATE_FOOA.as_ref()); assert!(foo_fields[2].field_exists.is_none()); } #[test] fn test_vmstate_struct_varray_uint32_multiply() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOB.as_ref().fields, 7) }; // 4th VMStateField ("arr_a_mul") in VMSTATE_FOOB (corresponding to // (no C version) MULTIPLY variant of VMSTATE_STRUCT_VARRAY_UINT32) @@ -272,13 +280,14 @@ fn test_vmstate_struct_varray_uint32_multiply() { | VMStateFlags::VMS_VARRAY_UINT32.0 | VMStateFlags::VMS_MULTIPLY_ELEMENTS.0 ); - assert_eq!(foo_fields[3].vmsd, &VMSTATE_FOOA); + assert_eq!(foo_fields[3].vmsd, VMSTATE_FOOA.as_ref()); assert!(foo_fields[3].field_exists.is_none()); } #[test] fn test_vmstate_macro_array() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOB.as_ref().fields, 7) }; // 5th VMStateField ("arr_i64") in VMSTATE_FOOB (corresponding to // VMSTATE_ARRAY) @@ -299,7 +308,8 @@ fn test_vmstate_macro_array() { #[test] fn test_vmstate_struct_varray_uint8_wrapper() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOB.fields, 7) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOB.as_ref().fields, 7) }; let mut foo_b: FooB = Default::default(); let foo_b_p = std::ptr::addr_of_mut!(foo_b).cast::(); @@ -335,26 +345,26 @@ struct FooC { arr_ptr_wrap: FooCWrapper, } -static VMSTATE_FOOC: VMStateDescription = VMStateDescription { - name: c"foo_c".as_ptr(), - version_id: 3, - minimum_version_id: 1, - fields: vmstate_fields! { +unsafe impl Sync for FooC {} + +static VMSTATE_FOOC: VMStateDescription = VMStateDescriptionBuilder::::new() + .name(c"foo_c") + .version_id(3) + .minimum_version_id(1) + .fields(vmstate_fields! { vmstate_of!(FooC, ptr).with_version_id(2), - // FIXME: Currently vmstate_struct doesn't support the pointer to structure. - // VMSTATE_STRUCT_POINTER: vmstate_struct!(FooC, ptr_a, VMSTATE_FOOA, NonNull) - vmstate_unused!(size_of::>()), + vmstate_of!(FooC, ptr_a), vmstate_of!(FooC, arr_ptr), vmstate_of!(FooC, arr_ptr_wrap), - }, - ..Zeroable::ZERO -}; + }) + .build(); const PTR_SIZE: usize = size_of::<*mut ()>(); #[test] fn test_vmstate_pointer() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOC.as_ref().fields, 6) }; // 1st VMStateField ("ptr") in VMSTATE_FOOC (corresponding to VMSTATE_POINTER) assert_eq!( @@ -375,9 +385,35 @@ fn test_vmstate_pointer() { assert!(foo_fields[0].field_exists.is_none()); } +#[test] +fn test_vmstate_struct_pointer() { + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOC.as_ref().fields, 6) }; + + // 2st VMStateField ("ptr_a") in VMSTATE_FOOC (corresponding to + // VMSTATE_STRUCT_POINTER) + assert_eq!( + unsafe { CStr::from_ptr(foo_fields[1].name) }.to_bytes_with_nul(), + b"ptr_a\0" + ); + assert_eq!(foo_fields[1].offset, PTR_SIZE); + assert_eq!(foo_fields[1].num_offset, 0); + assert_eq!(foo_fields[1].vmsd, VMSTATE_FOOA.as_ref()); + assert_eq!(foo_fields[1].version_id, 0); + assert_eq!(foo_fields[1].size, size_of::()); + assert_eq!(foo_fields[1].num, 0); + assert_eq!( + foo_fields[1].flags.0, + VMStateFlags::VMS_STRUCT.0 | VMStateFlags::VMS_POINTER.0 + ); + assert!(foo_fields[1].info.is_null()); + assert!(foo_fields[1].field_exists.is_none()); +} + #[test] fn test_vmstate_macro_array_of_pointer() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOC.as_ref().fields, 6) }; // 3rd VMStateField ("arr_ptr") in VMSTATE_FOOC (corresponding to // VMSTATE_ARRAY_OF_POINTER) @@ -401,7 +437,8 @@ fn test_vmstate_macro_array_of_pointer() { #[test] fn test_vmstate_macro_array_of_pointer_wrapped() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOC.fields, 6) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOC.as_ref().fields, 6) }; // 4th VMStateField ("arr_ptr_wrap") in VMSTATE_FOOC (corresponding to // VMSTATE_ARRAY_OF_POINTER) @@ -432,8 +469,7 @@ fn test_vmstate_macro_array_of_pointer_wrapped() { // * VMSTATE_FOOD: // - VMSTATE_VALIDATE -// Add more member fields when vmstate_of/vmstate_struct support "test" -// parameter. +// Add more member fields when vmstate_of support "test" parameter. struct FooD; impl FooD { @@ -450,21 +486,21 @@ fn validate_food_2(_state: &FooD, _version_id: u8) -> bool { true } -static VMSTATE_FOOD: VMStateDescription = VMStateDescription { - name: c"foo_d".as_ptr(), - version_id: 3, - minimum_version_id: 1, - fields: vmstate_fields! { +static VMSTATE_FOOD: VMStateDescription = VMStateDescriptionBuilder::::new() + .name(c"foo_d") + .version_id(3) + .minimum_version_id(1) + .fields(vmstate_fields! { vmstate_validate!(FooD, c"foo_d_0", FooD::validate_food_0), vmstate_validate!(FooD, c"foo_d_1", FooD::validate_food_1), vmstate_validate!(FooD, c"foo_d_2", validate_food_2), - }, - ..Zeroable::ZERO -}; + }) + .build(); #[test] fn test_vmstate_validate() { - let foo_fields: &[VMStateField] = unsafe { slice::from_raw_parts(VMSTATE_FOOD.fields, 4) }; + let foo_fields: &[VMStateField] = + unsafe { slice::from_raw_parts(VMSTATE_FOOD.as_ref().fields, 4) }; let mut foo_d = FooD; let foo_d_p = std::ptr::addr_of_mut!(foo_d).cast::(); diff --git a/rust/trace/Cargo.toml b/rust/trace/Cargo.toml new file mode 100644 index 0000000000000..fc81bce5803d3 --- /dev/null +++ b/rust/trace/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "trace" +version = "0.1.0" +authors = ["Tanish Desai "] +description = "QEMU tracing infrastructure support" +resolver = "2" +publish = false + +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +libc = { workspace = true } + +[lints] +workspace = true diff --git a/rust/trace/meson.build b/rust/trace/meson.build new file mode 100644 index 0000000000000..adca57e550753 --- /dev/null +++ b/rust/trace/meson.build @@ -0,0 +1,19 @@ +rust = import('rust') + +lib_rs = configure_file( + input: 'src/lib.rs', + output: 'lib.rs', + configuration: { + 'MESON_BUILD_ROOT': meson.project_build_root(), + }) + +_trace_rs = static_library( + 'trace', # Library name, + lib_rs, + trace_rs_targets, # List of generated `.rs` custom targets + override_options: ['rust_std=2021', 'build.rust_std=2021'], + dependencies: [libc_rs], + rust_abi: 'rust', +) + +trace_rs = declare_dependency(link_with: _trace_rs) diff --git a/rust/trace/src/lib.rs b/rust/trace/src/lib.rs new file mode 100644 index 0000000000000..e03bce43c4717 --- /dev/null +++ b/rust/trace/src/lib.rs @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +//! This crate provides macros that aid in using QEMU's tracepoint +//! functionality. + +#[doc(hidden)] +/// Re-exported item to avoid adding libc as a dependency everywhere. +pub use libc::{syslog, LOG_INFO}; + +#[macro_export] +/// Define the trace-points from the named directory (which should have slashes +/// replaced by underscore characters) as functions in a module called `trace`. +/// +/// ```ignore +/// ::trace::include_trace!("hw_char"); +/// // ... +/// trace::trace_pl011_read_fifo_rx_full(); +/// ``` +macro_rules! include_trace { + ($name:literal) => { + #[allow( + clippy::ptr_as_ptr, + clippy::cast_lossless, + clippy::used_underscore_binding + )] + mod trace { + #[cfg(not(MESON))] + include!(concat!( + env!("MESON_BUILD_ROOT"), + "/trace/trace-", + $name, + ".rs" + )); + + #[cfg(MESON)] + include!(concat!("@MESON_BUILD_ROOT@/trace/trace-", $name, ".rs")); + } + }; +} diff --git a/rust/util/Cargo.toml b/rust/util/Cargo.toml new file mode 100644 index 0000000000000..85f914365450d --- /dev/null +++ b/rust/util/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "util" +version = "0.1.0" +description = "Rust bindings for QEMU/util" +resolver = "2" +publish = false + +authors.workspace = true +edition.workspace = true +homepage.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true + +[dependencies] +anyhow = { workspace = true } +foreign = { workspace = true } +glib-sys = { workspace = true } +libc = { workspace = true } +common = { path = "../common" } + +[lints] +workspace = true diff --git a/rust/qemu-api/build.rs b/rust/util/build.rs similarity index 81% rename from rust/qemu-api/build.rs rename to rust/util/build.rs index 29d09456257cb..5654d1d5624d9 100644 --- a/rust/qemu-api/build.rs +++ b/rust/util/build.rs @@ -9,12 +9,14 @@ use std::os::windows::fs::symlink_file; use std::{env, fs::remove_file, io::Result, path::Path}; fn main() -> Result<()> { + let manifest_dir = env!("CARGO_MANIFEST_DIR"); let file = if let Ok(root) = env::var("MESON_BUILD_ROOT") { - format!("{root}/rust/qemu-api/bindings.inc.rs") + let sub = get_rust_subdir(manifest_dir).unwrap(); + format!("{root}/{sub}/bindings.inc.rs") } else { // Placing bindings.inc.rs in the source directory is supported // but not documented or encouraged. - format!("{}/src/bindings.inc.rs", env!("CARGO_MANIFEST_DIR")) + format!("{manifest_dir}/src/bindings.inc.rs") }; let file = Path::new(&file); @@ -41,3 +43,7 @@ fn main() -> Result<()> { println!("cargo:rerun-if-changed=build.rs"); Ok(()) } + +fn get_rust_subdir(path: &str) -> Option<&str> { + path.find("/rust").map(|index| &path[index + 1..]) +} diff --git a/rust/util/meson.build b/rust/util/meson.build new file mode 100644 index 0000000000000..b0b75e93ff655 --- /dev/null +++ b/rust/util/meson.build @@ -0,0 +1,59 @@ +_util_bindgen_args = [] +c_enums = [ + 'module_init_type', + 'QEMUClockType', +] +foreach enum : c_enums + _util_bindgen_args += ['--rustified-enum', enum] +endforeach + +# +# TODO: Remove this comment when the clang/libclang mismatch issue is solved. +# +# Rust bindings generation with `bindgen` might fail in some cases where the +# detected `libclang` does not match the expected `clang` version/target. In +# this case you must pass the path to `clang` and `libclang` to your build +# command invocation using the environment variables CLANG_PATH and +# LIBCLANG_PATH +_util_bindings_inc_rs = rust.bindgen( + input: 'wrapper.h', + dependencies: common_ss.all_dependencies(), + output: 'bindings.inc.rs', + include_directories: bindings_incdir, + bindgen_version: ['>=0.60.0'], + args: bindgen_args_common + _util_bindgen_args, + c_args: bindgen_c_args, +) + +_util_rs = static_library( + 'util', + structured_sources( + [ + 'src/lib.rs', + 'src/bindings.rs', + 'src/error.rs', + 'src/log.rs', + 'src/module.rs', + 'src/timer.rs', + ], + {'.': _util_bindings_inc_rs} + ), + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + dependencies: [anyhow_rs, libc_rs, foreign_rs, glib_sys_rs, common_rs, qom, qemuutil], +) + +util_rs = declare_dependency(link_with: [_util_rs], dependencies: [qemuutil, qom]) + +rust.test('rust-util-tests', _util_rs, + dependencies: [qemuutil, qom], + suite: ['unit', 'rust']) + +# Doctests are essentially integration tests, so they need the same dependencies. +# Note that running them requires the object files for C code, so place them +# in a separate suite that is run by the "build" CI jobs rather than "check". +rust.doctest('rust-util-rs-doctests', + _util_rs, + dependencies: util_rs, + suite: ['doc', 'rust'] +) diff --git a/rust/util/src/bindings.rs b/rust/util/src/bindings.rs new file mode 100644 index 0000000000000..c277a295add3d --- /dev/null +++ b/rust/util/src/bindings.rs @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +#![allow( + dead_code, + improper_ctypes_definitions, + improper_ctypes, + non_camel_case_types, + non_snake_case, + non_upper_case_globals, + unnecessary_transmutes, + unsafe_op_in_unsafe_fn, + clippy::pedantic, + clippy::restriction, + clippy::style, + clippy::missing_const_for_fn, + clippy::ptr_offset_with_cast, + clippy::useless_transmute, + clippy::missing_safety_doc, + clippy::too_many_arguments +)] + +use glib_sys::{guint, GList, GPollFD, GQueue, GSList, GString}; + +#[cfg(MESON)] +include!("bindings.inc.rs"); + +#[cfg(not(MESON))] +include!(concat!(env!("OUT_DIR"), "/bindings.inc.rs")); diff --git a/rust/qemu-api/src/error.rs b/rust/util/src/error.rs similarity index 98% rename from rust/qemu-api/src/error.rs rename to rust/util/src/error.rs index e114fc4178bf8..bfa5a8685bc4e 100644 --- a/rust/qemu-api/src/error.rs +++ b/rust/util/src/error.rs @@ -19,7 +19,7 @@ //! //! This module is most commonly used at the boundary between C and Rust code; //! other code will usually access it through the -//! [`qemu_api::Result`](crate::Result) type alias, and will use the +//! [`utils::Result`](crate::Result) type alias, and will use the //! [`std::error::Error`] interface to let C errors participate in Rust's error //! handling functionality. //! @@ -30,7 +30,7 @@ //! type up to C code, or from a combination of the two. //! //! The third case, corresponding to [`Error::with_error`], is the only one that -//! requires mentioning [`qemu_api::Error`](crate::Error) explicitly. Similar +//! requires mentioning [`utils::Error`](crate::Error) explicitly. Similar //! to how QEMU's C code handles errno values, the string and the //! `anyhow::Error` object will be concatenated with `:` as the separator. @@ -316,10 +316,10 @@ mod tests { use std::ffi::CStr; use anyhow::anyhow; + use common::assert_match; use foreign::OwnedPointer; use super::*; - use crate::{assert_match, bindings}; #[track_caller] fn error_for_test(msg: &CStr) -> OwnedPointer { diff --git a/rust/util/src/lib.rs b/rust/util/src/lib.rs new file mode 100644 index 0000000000000..16c89b95174d0 --- /dev/null +++ b/rust/util/src/lib.rs @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +pub mod bindings; +pub mod error; +pub mod log; +pub mod module; +pub mod timer; + +pub use error::{Error, Result}; diff --git a/rust/qemu-api/src/log.rs b/rust/util/src/log.rs similarity index 93% rename from rust/qemu-api/src/log.rs rename to rust/util/src/log.rs index a441b8c1f2e61..0a4bc4249a174 100644 --- a/rust/qemu-api/src/log.rs +++ b/rust/util/src/log.rs @@ -8,7 +8,9 @@ use std::{ ptr::NonNull, }; -use crate::{bindings, errno}; +use common::errno; + +use crate::bindings; #[repr(u32)] /// Represents specific error categories within QEMU's logging system. @@ -47,7 +49,7 @@ impl LogGuard { /// # Examples /// /// ``` - /// # use qemu_api::log::LogGuard; + /// # use util::log::LogGuard; /// # use std::io::Write; /// if let Some(mut log) = LogGuard::new() { /// writeln!(log, "test"); @@ -114,7 +116,7 @@ impl Drop for LogGuard { /// # Example /// /// ``` -/// use qemu_api::{log::Log, log_mask_ln}; +/// use util::{log::Log, log_mask_ln}; /// /// let error_address = 0xbad; /// log_mask_ln!(Log::GuestError, "Address 0x{error_address:x} out of range"); @@ -124,7 +126,7 @@ impl Drop for LogGuard { /// trailing `,`: /// /// ``` -/// use qemu_api::{log::Log, log_mask_ln}; +/// use util::{log::Log, log_mask_ln}; /// /// let error_address = 0xbad; /// log_mask_ln!( @@ -137,12 +139,12 @@ impl Drop for LogGuard { macro_rules! log_mask_ln { ($mask:expr, $fmt:tt $($args:tt)*) => {{ // Type assertion to enforce type `Log` for $mask - let _: Log = $mask; + let _: $crate::log::Log = $mask; if unsafe { - (::qemu_api::bindings::qemu_loglevel & ($mask as std::os::raw::c_int)) != 0 + ($crate::bindings::qemu_loglevel & ($mask as std::os::raw::c_uint)) != 0 } { - _ = ::qemu_api::log::LogGuard::log_fmt( + _ = $crate::log::LogGuard::log_fmt( format_args!("{}\n", format_args!($fmt $($args)*))); } }}; diff --git a/rust/qemu-api/src/module.rs b/rust/util/src/module.rs similarity index 97% rename from rust/qemu-api/src/module.rs rename to rust/util/src/module.rs index fa5cea3598f96..06c45fc142b4b 100644 --- a/rust/qemu-api/src/module.rs +++ b/rust/util/src/module.rs @@ -36,7 +36,7 @@ macro_rules! module_init { // shortcut because it's quite common that $body needs unsafe {} ($type:ident => unsafe $body:block) => { - $crate::module_init! { + ::util::module_init! { $type => { unsafe { $body } } } }; diff --git a/rust/qemu-api/src/timer.rs b/rust/util/src/timer.rs similarity index 91% rename from rust/qemu-api/src/timer.rs rename to rust/util/src/timer.rs index 0a2d111d49098..c6b3e4088ecb1 100644 --- a/rust/qemu-api/src/timer.rs +++ b/rust/util/src/timer.rs @@ -7,22 +7,22 @@ use std::{ pin::Pin, }; -use crate::{ - bindings::{self, qemu_clock_get_ns, timer_del, timer_init_full, timer_mod, QEMUClockType}, - callbacks::FnCall, - cell::Opaque, +use common::{callbacks::FnCall, Opaque}; + +use crate::bindings::{ + self, qemu_clock_get_ns, timer_del, timer_init_full, timer_mod, QEMUClockType, }; /// A safe wrapper around [`bindings::QEMUTimer`]. #[repr(transparent)] -#[derive(Debug, qemu_api_macros::Wrapper)] +#[derive(Debug, common::Wrapper)] pub struct Timer(Opaque); unsafe impl Send for Timer {} unsafe impl Sync for Timer {} #[repr(transparent)] -#[derive(qemu_api_macros::Wrapper)] +#[derive(common::Wrapper)] pub struct TimerListGroup(Opaque); unsafe impl Send for TimerListGroup {} @@ -39,7 +39,7 @@ impl Timer { /// /// The timer must be initialized before it is armed with /// [`modify`](Self::modify). - pub unsafe fn new() -> Self { + pub const unsafe fn new() -> Self { // SAFETY: requirements relayed to callers of Timer::new Self(unsafe { Opaque::zeroed() }) } @@ -56,7 +56,7 @@ impl Timer { ) where F: for<'a> FnCall<(&'a T,)>, { - let _: () = F::ASSERT_IS_SOME; + const { assert!(F::IS_SOME) }; /// timer expiration callback unsafe extern "C" fn rust_timer_handler FnCall<(&'a T,)>>( diff --git a/rust/util/wrapper.h b/rust/util/wrapper.h new file mode 100644 index 0000000000000..b9ed68a01d824 --- /dev/null +++ b/rust/util/wrapper.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * This header file is meant to be used as input to the `bindgen` application + * in order to generate C FFI compatible Rust bindings. + */ + +#ifndef __CLANG_STDATOMIC_H +#define __CLANG_STDATOMIC_H +/* + * Fix potential missing stdatomic.h error in case bindgen does not insert the + * correct libclang header paths on its own. We do not use stdatomic.h symbols + * in QEMU code, so it's fine to declare dummy types instead. + */ +typedef enum memory_order { + memory_order_relaxed, + memory_order_consume, + memory_order_acquire, + memory_order_release, + memory_order_acq_rel, + memory_order_seq_cst, +} memory_order; +#endif /* __CLANG_STDATOMIC_H */ + +#include "qemu/osdep.h" + +#include "qapi/error.h" +#include "qapi/error-internal.h" +#include "qemu/log-for-trace.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/timer.h" diff --git a/scripts/analyse-locks-simpletrace.py b/scripts/analyse-locks-simpletrace.py index d650dd7140867..bd04cd43c9429 100755 --- a/scripts/analyse-locks-simpletrace.py +++ b/scripts/analyse-locks-simpletrace.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- # # Analyse lock events and compute statistics # diff --git a/scripts/archive-source.sh b/scripts/archive-source.sh index 035828c532e78..8f97b19a088fe 100755 --- a/scripts/archive-source.sh +++ b/scripts/archive-source.sh @@ -26,12 +26,28 @@ sub_file="${sub_tdir}/submodule.tar" # independent of what the developer currently has initialized # in their checkout, because the build environment is completely # different to the host OS. -subprojects="keycodemapdb libvfio-user berkeley-softfloat-3 - berkeley-testfloat-3 anyhow-1-rs arbitrary-int-1-rs bilge-0.2-rs - bilge-impl-0.2-rs either-1-rs foreign-0.3-rs itertools-0.11-rs - libc-0.2-rs proc-macro2-1-rs - proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs - syn-2-rs unicode-ident-1-rs" +subprojects=( + anyhow-1-rs + arbitrary-int-1-rs + attrs-0.2-rs + berkeley-softfloat-3 + berkeley-testfloat-3 + bilge-0.2-rs + bilge-impl-0.2-rs + either-1-rs + foreign-0.3-rs + glib-sys-0.21-rs + itertools-0.11-rs + keycodemapdb + libc-0.2-rs + libvfio-user + proc-macro-error-1-rs + proc-macro-error-attr-1-rs + proc-macro2-1-rs + quote-1-rs + syn-2-rs + unicode-ident-1-rs +) sub_deinit="" function cleanup() { @@ -77,9 +93,10 @@ function subproject_dir() { git archive --format tar "$(tree_ish)" > "$tar_file" test $? -ne 0 && error "failed to archive qemu" -for sp in $subprojects; do - meson subprojects download $sp - test $? -ne 0 && error "failed to download subproject $sp" +meson subprojects download ${subprojects[@]} >/dev/null +test $? -ne 0 && error "failed to download subprojects $subprojects" + +for sp in "${subprojects[@]}"; do tar --append --file "$tar_file" --exclude=.git subprojects/"$(subproject_dir $sp)" test $? -ne 0 && error "failed to append subproject $sp to $tar_file" done diff --git a/scripts/arm_processor_error.py b/scripts/arm_processor_error.py new file mode 100644 index 0000000000000..73d069f070d44 --- /dev/null +++ b/scripts/arm_processor_error.py @@ -0,0 +1,476 @@ +#!/usr/bin/env python3 +# +# pylint: disable=C0301,C0114,R0903,R0912,R0913,R0914,R0915,W0511 +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (C) 2024-2025 Mauro Carvalho Chehab + +# TODO: current implementation has dummy defaults. +# +# For a better implementation, a QMP addition/call is needed to +# retrieve some data for ARM Processor Error injection: +# +# - ARM registers: power_state, mpidr. + +""" +Generate an ARM processor error CPER, compatible with +UEFI 2.9A Errata. + +Injecting such errors can be done using: + + $ ./scripts/ghes_inject.py arm + Error injected. + +Produces a simple CPER register, as detected on a Linux guest: + +[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 1 +[Hardware Error]: event severity: recoverable +[Hardware Error]: Error 0, type: recoverable +[Hardware Error]: section_type: ARM processor error +[Hardware Error]: MIDR: 0x0000000000000000 +[Hardware Error]: running state: 0x0 +[Hardware Error]: Power State Coordination Interface state: 0 +[Hardware Error]: Error info structure 0: +[Hardware Error]: num errors: 2 +[Hardware Error]: error_type: 0x02: cache error +[Hardware Error]: error_info: 0x000000000091000f +[Hardware Error]: transaction type: Data Access +[Hardware Error]: cache error, operation type: Data write +[Hardware Error]: cache level: 2 +[Hardware Error]: processor context not corrupted +[Firmware Warn]: GHES: Unhandled processor error type 0x02: cache error + +The ARM Processor Error message can be customized via command line +parameters. For instance: + + $ ./scripts/ghes_inject.py arm --mpidr 0x444 --running --affinity 1 \ + --error-info 12345678 --vendor 0x13,123,4,5,1 --ctx-array 0,1,2,3,4,5 \ + -t cache tlb bus micro-arch tlb,micro-arch + Error injected. + +Injects this error, as detected on a Linux guest: + +[Hardware Error]: Hardware error from APEI Generic Hardware Error Source: 1 +[Hardware Error]: event severity: recoverable +[Hardware Error]: Error 0, type: recoverable +[Hardware Error]: section_type: ARM processor error +[Hardware Error]: MIDR: 0x0000000000000000 +[Hardware Error]: Multiprocessor Affinity Register (MPIDR): 0x0000000000000000 +[Hardware Error]: error affinity level: 0 +[Hardware Error]: running state: 0x1 +[Hardware Error]: Power State Coordination Interface state: 0 +[Hardware Error]: Error info structure 0: +[Hardware Error]: num errors: 2 +[Hardware Error]: error_type: 0x02: cache error +[Hardware Error]: error_info: 0x0000000000bc614e +[Hardware Error]: cache level: 2 +[Hardware Error]: processor context not corrupted +[Hardware Error]: Error info structure 1: +[Hardware Error]: num errors: 2 +[Hardware Error]: error_type: 0x04: TLB error +[Hardware Error]: error_info: 0x000000000054007f +[Hardware Error]: transaction type: Instruction +[Hardware Error]: TLB error, operation type: Instruction fetch +[Hardware Error]: TLB level: 1 +[Hardware Error]: processor context not corrupted +[Hardware Error]: the error has not been corrected +[Hardware Error]: PC is imprecise +[Hardware Error]: Error info structure 2: +[Hardware Error]: num errors: 2 +[Hardware Error]: error_type: 0x08: bus error +[Hardware Error]: error_info: 0x00000080d6460fff +[Hardware Error]: transaction type: Generic +[Hardware Error]: bus error, operation type: Generic read (type of instruction or data request cannot be determined) +[Hardware Error]: affinity level at which the bus error occurred: 1 +[Hardware Error]: processor context corrupted +[Hardware Error]: the error has been corrected +[Hardware Error]: PC is imprecise +[Hardware Error]: Program execution can be restarted reliably at the PC associated with the error. +[Hardware Error]: participation type: Local processor observed +[Hardware Error]: request timed out +[Hardware Error]: address space: External Memory Access +[Hardware Error]: memory access attributes:0x20 +[Hardware Error]: access mode: secure +[Hardware Error]: Error info structure 3: +[Hardware Error]: num errors: 2 +[Hardware Error]: error_type: 0x10: micro-architectural error +[Hardware Error]: error_info: 0x0000000078da03ff +[Hardware Error]: Error info structure 4: +[Hardware Error]: num errors: 2 +[Hardware Error]: error_type: 0x14: TLB error|micro-architectural error +[Hardware Error]: Context info structure 0: +[Hardware Error]: register context type: AArch64 EL1 context registers +[Hardware Error]: 00000000: 00000000 00000000 +[Hardware Error]: Vendor specific error info has 5 bytes: +[Hardware Error]: 00000000: 13 7b 04 05 01 .{... +[Firmware Warn]: GHES: Unhandled processor error type 0x02: cache error +[Firmware Warn]: GHES: Unhandled processor error type 0x04: TLB error +[Firmware Warn]: GHES: Unhandled processor error type 0x08: bus error +[Firmware Warn]: GHES: Unhandled processor error type 0x10: micro-architectural error +[Firmware Warn]: GHES: Unhandled processor error type 0x14: TLB error|micro-architectural error +""" + +import argparse +import re + +from qmp_helper import qmp, util, cper_guid + + +class ArmProcessorEinj: + """ + Implements ARM Processor Error injection via GHES + """ + + DESC = """ + Generates an ARM processor error CPER, compatible with + UEFI 2.9A Errata. + """ + + ACPI_GHES_ARM_CPER_LENGTH = 40 + ACPI_GHES_ARM_CPER_PEI_LENGTH = 32 + + # Context types + CONTEXT_AARCH32_EL1 = 1 + CONTEXT_AARCH64_EL1 = 5 + CONTEXT_MISC_REG = 8 + + def __init__(self, subparsers): + """Initialize the error injection class and add subparser""" + + # Valid choice values + self.arm_valid_bits = { + "mpidr": util.bit(0), + "affinity": util.bit(1), + "running": util.bit(2), + "vendor": util.bit(3), + } + + self.pei_flags = { + "first": util.bit(0), + "last": util.bit(1), + "propagated": util.bit(2), + "overflow": util.bit(3), + } + + self.pei_error_types = { + "cache": util.bit(1), + "tlb": util.bit(2), + "bus": util.bit(3), + "micro-arch": util.bit(4), + } + + self.pei_valid_bits = { + "multiple-error": util.bit(0), + "flags": util.bit(1), + "error-info": util.bit(2), + "virt-addr": util.bit(3), + "phy-addr": util.bit(4), + } + + self.data = bytearray() + + parser = subparsers.add_parser("arm", description=self.DESC) + + arm_valid_bits = ",".join(self.arm_valid_bits.keys()) + flags = ",".join(self.pei_flags.keys()) + error_types = ",".join(self.pei_error_types.keys()) + pei_valid_bits = ",".join(self.pei_valid_bits.keys()) + + # UEFI N.16 ARM Validation bits + g_arm = parser.add_argument_group("ARM processor") + g_arm.add_argument("--arm", "--arm-valid", + help=f"ARM valid bits: {arm_valid_bits}") + g_arm.add_argument("-a", "--affinity", "--level", "--affinity-level", + type=lambda x: int(x, 0), + help="Affinity level (when multiple levels apply)") + g_arm.add_argument("-l", "--mpidr", type=lambda x: int(x, 0), + help="Multiprocessor Affinity Register") + g_arm.add_argument("-i", "--midr", type=lambda x: int(x, 0), + help="Main ID Register") + g_arm.add_argument("-r", "--running", + action=argparse.BooleanOptionalAction, + default=None, + help="Indicates if the processor is running or not") + g_arm.add_argument("--psci", "--psci-state", + type=lambda x: int(x, 0), + help="Power State Coordination Interface - PSCI state") + + # TODO: Add vendor-specific support + + # UEFI N.17 bitmaps (type and flags) + g_pei = parser.add_argument_group("ARM Processor Error Info (PEI)") + g_pei.add_argument("-t", "--type", nargs="+", + help=f"one or more error types: {error_types}") + g_pei.add_argument("-f", "--flags", nargs="*", + help=f"zero or more error flags: {flags}") + g_pei.add_argument("-V", "--pei-valid", "--error-valid", nargs="*", + help=f"zero or more PEI valid bits: {pei_valid_bits}") + + # UEFI N.17 Integer values + g_pei.add_argument("-m", "--multiple-error", nargs="+", + help="Number of errors: 0: Single error, 1: Multiple errors, 2-65535: Error count if known") + g_pei.add_argument("-e", "--error-info", nargs="+", + help="Error information (UEFI 2.10 tables N.18 to N.20)") + g_pei.add_argument("-p", "--physical-address", nargs="+", + help="Physical address") + g_pei.add_argument("-v", "--virtual-address", nargs="+", + help="Virtual address") + + # UEFI N.21 Context + g_ctx = parser.add_argument_group("Processor Context") + g_ctx.add_argument("--ctx-type", "--context-type", nargs="*", + help="Type of the context (0=ARM32 GPR, 5=ARM64 EL1, other values supported)") + g_ctx.add_argument("--ctx-size", "--context-size", nargs="*", + help="Minimal size of the context") + g_ctx.add_argument("--ctx-array", "--context-array", nargs="*", + help="Comma-separated arrays for each context") + + # Vendor-specific data + g_vendor = parser.add_argument_group("Vendor-specific data") + g_vendor.add_argument("--vendor", "--vendor-specific", nargs="+", + help="Vendor-specific byte arrays of data") + + # Add arguments for Generic Error Data + qmp.argparse(parser) + + parser.set_defaults(func=self.send_cper) + + def send_cper(self, args): + """Parse subcommand arguments and send a CPER via QMP""" + + qmp_cmd = qmp(args.host, args.port, args.debug) + + # Handle Generic Error Data arguments if any + qmp_cmd.set_args(args) + + is_cpu_type = re.compile(r"^([\w+]+\-)?arm\-cpu$") + cpus = qmp_cmd.search_qom("/machine/unattached/device", + "type", is_cpu_type) + + cper = {} + pei = {} + ctx = {} + vendor = {} + + arg = vars(args) + + # Handle global parameters + if args.arm: + arm_valid_init = False + cper["valid"] = util.get_choice(name="valid", + value=args.arm, + choices=self.arm_valid_bits, + suffixes=["-error", "-err"]) + else: + cper["valid"] = 0 + arm_valid_init = True + + if "running" in arg: + if args.running: + cper["running-state"] = util.bit(0) + else: + cper["running-state"] = 0 + else: + cper["running-state"] = 0 + + if arm_valid_init: + if args.affinity: + cper["valid"] |= self.arm_valid_bits["affinity"] + + if args.mpidr: + cper["valid"] |= self.arm_valid_bits["mpidr"] + + if "running-state" in cper: + cper["valid"] |= self.arm_valid_bits["running"] + + if args.psci: + cper["valid"] |= self.arm_valid_bits["running"] + + # Handle PEI + if not args.type: + args.type = ["cache-error"] + + util.get_mult_choices( + pei, + name="valid", + values=args.pei_valid, + choices=self.pei_valid_bits, + suffixes=["-valid", "--addr"], + ) + util.get_mult_choices( + pei, + name="type", + values=args.type, + choices=self.pei_error_types, + suffixes=["-error", "-err"], + ) + util.get_mult_choices( + pei, + name="flags", + values=args.flags, + choices=self.pei_flags, + suffixes=["-error", "-cap"], + ) + util.get_mult_int(pei, "error-info", args.error_info) + util.get_mult_int(pei, "multiple-error", args.multiple_error) + util.get_mult_int(pei, "phy-addr", args.physical_address) + util.get_mult_int(pei, "virt-addr", args.virtual_address) + + # Handle context + util.get_mult_int(ctx, "type", args.ctx_type, allow_zero=True) + util.get_mult_int(ctx, "minimal-size", args.ctx_size, allow_zero=True) + util.get_mult_array(ctx, "register", args.ctx_array, allow_zero=True) + + util.get_mult_array(vendor, "bytes", args.vendor, max_val=255) + + # Store PEI + pei_data = bytearray() + default_flags = self.pei_flags["first"] + default_flags |= self.pei_flags["last"] + + error_info_num = 0 + + for i, p in pei.items(): # pylint: disable=W0612 + error_info_num += 1 + + # UEFI 2.10 doesn't define how to encode error information + # when multiple types are raised. So, provide a default only + # if a single type is there + if "error-info" not in p: + if p["type"] == util.bit(1): + p["error-info"] = 0x0091000F + if p["type"] == util.bit(2): + p["error-info"] = 0x0054007F + if p["type"] == util.bit(3): + p["error-info"] = 0x80D6460FFF + if p["type"] == util.bit(4): + p["error-info"] = 0x78DA03FF + + if "valid" not in p: + p["valid"] = 0 + if "multiple-error" in p: + p["valid"] |= self.pei_valid_bits["multiple-error"] + + if "flags" in p: + p["valid"] |= self.pei_valid_bits["flags"] + + if "error-info" in p: + p["valid"] |= self.pei_valid_bits["error-info"] + + if "phy-addr" in p: + p["valid"] |= self.pei_valid_bits["phy-addr"] + + if "virt-addr" in p: + p["valid"] |= self.pei_valid_bits["virt-addr"] + + # Version + util.data_add(pei_data, 0, 1) + + util.data_add(pei_data, + self.ACPI_GHES_ARM_CPER_PEI_LENGTH, 1) + + util.data_add(pei_data, p["valid"], 2) + util.data_add(pei_data, p["type"], 1) + util.data_add(pei_data, p.get("multiple-error", 1), 2) + util.data_add(pei_data, p.get("flags", default_flags), 1) + util.data_add(pei_data, p.get("error-info", 0), 8) + util.data_add(pei_data, p.get("virt-addr", 0xDEADBEEF), 8) + util.data_add(pei_data, p.get("phy-addr", 0xABBA0BAD), 8) + + # Store Context + ctx_data = bytearray() + context_info_num = 0 + + if ctx: + ret = qmp_cmd.send_cmd("query-target", may_open=True) + + default_ctx = self.CONTEXT_MISC_REG + + if "arch" in ret: + if ret["arch"] == "aarch64": + default_ctx = self.CONTEXT_AARCH64_EL1 + elif ret["arch"] == "arm": + default_ctx = self.CONTEXT_AARCH32_EL1 + + for k in sorted(ctx.keys()): + context_info_num += 1 + + if "type" not in ctx[k]: + ctx[k]["type"] = default_ctx + + if "register" not in ctx[k]: + ctx[k]["register"] = [] + + reg_size = len(ctx[k]["register"]) + size = 0 + + if "minimal-size" in ctx: + size = ctx[k]["minimal-size"] + + size = max(size, reg_size) + + size = (size + 1) % 0xFFFE + + # Version + util.data_add(ctx_data, 0, 2) + + util.data_add(ctx_data, ctx[k]["type"], 2) + + util.data_add(ctx_data, 8 * size, 4) + + for r in ctx[k]["register"]: + util.data_add(ctx_data, r, 8) + + for i in range(reg_size, size): # pylint: disable=W0612 + util.data_add(ctx_data, 0, 8) + + # Vendor-specific bytes are not grouped + vendor_data = bytearray() + if vendor: + for k in sorted(vendor.keys()): + for b in vendor[k]["bytes"]: + util.data_add(vendor_data, b, 1) + + # Encode ARM Processor Error + data = bytearray() + + util.data_add(data, cper["valid"], 4) + + util.data_add(data, error_info_num, 2) + util.data_add(data, context_info_num, 2) + + # Calculate the length of the CPER data + cper_length = self.ACPI_GHES_ARM_CPER_LENGTH + cper_length += len(pei_data) + cper_length += len(vendor_data) + cper_length += len(ctx_data) + util.data_add(data, cper_length, 4) + + util.data_add(data, arg.get("affinity-level", 0), 1) + + # Reserved + util.data_add(data, 0, 3) + + if "midr-el1" not in arg: + if cpus: + cmd_arg = { + 'path': cpus[0], + 'property': "midr" + } + ret = qmp_cmd.send_cmd("qom-get", cmd_arg, may_open=True) + if isinstance(ret, int): + arg["midr-el1"] = ret + + util.data_add(data, arg.get("mpidr-el1", 0), 8) + util.data_add(data, arg.get("midr-el1", 0), 8) + util.data_add(data, cper["running-state"], 4) + util.data_add(data, arg.get("psci-state", 0), 4) + + # Add PEI + data.extend(pei_data) + data.extend(ctx_data) + data.extend(vendor_data) + + self.data = data + + qmp_cmd.send_cper(cper_guid.CPER_PROC_ARM, self.data) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 833f20f555531..d3d75f3f13982 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -1368,6 +1368,9 @@ sub checkspdx { $expr =~ s/^\s*//g; $expr =~ s/\s*$//g; + # Cull C comment end + $expr =~ s/\*\/.*//; + my @bits = split / +/, $expr; my $prefer = "GPL-2.0-or-later"; @@ -1813,7 +1816,8 @@ sub process { } # Check SPDX-License-Identifier references a permitted license - if ($rawline =~ m,SPDX-License-Identifier: (.*?)(\*/)?\s*$,) { + if (($rawline =~ m,SPDX-License-Identifier: (.*?)(\*/)?\s*$,) && + $rawline !~ /^-/) { $fileinfo->{facts}->{sawspdx} = 1; &checkspdx($realfile, $1); } @@ -3194,9 +3198,9 @@ sub process { if ($line =~ /\bsignal\s*\(/ && !($line =~ /SIG_(?:IGN|DFL)/)) { ERROR("use sigaction to establish signal handlers; signal is not portable\n" . $herecurr); } -# recommend qemu_bh_new_guarded instead of qemu_bh_new - if ($realfile =~ /.*\/hw\/.*/ && $line =~ /\bqemu_bh_new\s*\(/) { - ERROR("use qemu_bh_new_guarded() instead of qemu_bh_new() to avoid reentrancy problems\n" . $herecurr); +# recommend aio_bh_new_guarded instead of legacy qemu_bh_new / qemu_bh_new_guarded + if ($realfile =~ /.*\/hw\/.*/ && $line =~ /\bqemu_bh_new(_guarded)?\s*\(/) { + ERROR("use aio_bh_new_guarded() instead of qemu_bh_new*() to avoid reentrancy problems\n" . $herecurr); } # recommend aio_bh_new_guarded instead of aio_bh_new if ($realfile =~ /.*\/hw\/.*/ && $line =~ /\baio_bh_new\s*\(/) { diff --git a/scripts/ci/gitlab-failure-analysis b/scripts/ci/gitlab-failure-analysis new file mode 100755 index 0000000000000..906725be97312 --- /dev/null +++ b/scripts/ci/gitlab-failure-analysis @@ -0,0 +1,117 @@ +#!/usr/bin/env python3 +# +# A script to analyse failures in the gitlab pipelines. It requires an +# API key from gitlab with the following permissions: +# - api +# - read_repository +# - read_user +# + +import argparse +import gitlab +import os + +# +# Arguments +# +class NoneForEmptyStringAction(argparse.Action): + def __call__(self, parser, namespace, value, option_string=None): + if value == '': + setattr(namespace, self.dest, None) + else: + setattr(namespace, self.dest, value) + + +parser = argparse.ArgumentParser(description="Analyse failed GitLab CI runs.") + +parser.add_argument("--gitlab", + default="https://gitlab.com", + help="GitLab instance URL (default: https://gitlab.com).") +parser.add_argument("--id", default=11167699, + type=int, + help="GitLab project id (default: 11167699 for qemu-project/qemu)") +parser.add_argument("--token", + default=os.getenv("GITLAB_TOKEN"), + help="Your personal access token with 'api' scope.") +parser.add_argument("--branch", + type=str, + default="staging", + action=NoneForEmptyStringAction, + help="The name of the branch (default: 'staging')") +parser.add_argument("--status", + type=str, + action=NoneForEmptyStringAction, + default="failed", + help="Filter by branch status (default: 'failed')") +parser.add_argument("--count", type=int, + default=3, + help="The number of failed runs to fetch.") +parser.add_argument("--skip-jobs", + default=False, + action='store_true', + help="Skip dumping the job info") +parser.add_argument("--pipeline", type=int, + nargs="+", + default=None, + help="Explicit pipeline ID(s) to fetch.") + + +if __name__ == "__main__": + args = parser.parse_args() + + gl = gitlab.Gitlab(url=args.gitlab, private_token=args.token) + project = gl.projects.get(args.id) + + + pipelines_to_process = [] + + # Use explicit pipeline IDs if provided, otherwise fetch a list + if args.pipeline: + args.count = len(args.pipeline) + for p_id in args.pipeline: + pipelines_to_process.append(project.pipelines.get(p_id)) + else: + # Use an iterator to fetch the pipelines + pipe_iter = project.pipelines.list(iterator=True, + status=args.status, + ref=args.branch) + # Check each failed pipeline + pipelines_to_process = [next(pipe_iter) for _ in range(args.count)] + + # Check each pipeline + for p in pipelines_to_process: + + jobs = p.jobs.list(get_all=True) + failed_jobs = [j for j in jobs if j.status == "failed"] + skipped_jobs = [j for j in jobs if j.status == "skipped"] + manual_jobs = [j for j in jobs if j.status == "manual"] + + trs = p.test_report_summary.get() + total = trs.total["count"] + skipped = trs.total["skipped"] + failed = trs.total["failed"] + + print(f"{p.status} pipeline {p.id}, total jobs {len(jobs)}, " + f"skipped {len(skipped_jobs)}, " + f"failed {len(failed_jobs)}, ", + f"{total} tests, " + f"{skipped} skipped tests, " + f"{failed} failed tests") + + if not args.skip_jobs: + for j in failed_jobs: + print(f" Failed job {j.id}, {j.name}, {j.web_url}") + + # It seems we can only extract failing tests from the full + # test report, maybe there is some way to filter it. + + if failed > 0: + ftr = p.test_report.get() + failed_suites = [s for s in ftr.test_suites if + s["failed_count"] > 0] + for fs in failed_suites: + name = fs["name"] + tests = fs["test_cases"] + failed_tests = [t for t in tests if t["status"] == 'failed'] + for t in failed_tests: + print(f" Failed test {t["classname"]}, {name}, {t["name"]}") diff --git a/scripts/ci/setup/gitlab-runner.yml b/scripts/ci/setup/gitlab-runner.yml index 57e7faebf10e0..7025935487ae7 100644 --- a/scripts/ci/setup/gitlab-runner.yml +++ b/scripts/ci/setup/gitlab-runner.yml @@ -16,7 +16,7 @@ tasks: - debug: msg: 'Checking for a valid GitLab registration token' - failed_when: "gitlab_runner_registration_token == 'PLEASE_PROVIDE_A_VALID_TOKEN'" + failed_when: "gitlab_runner_authentication_token == 'PLEASE_PROVIDE_A_VALID_TOKEN'" - name: Create a group for the gitlab-runner service group: @@ -95,15 +95,7 @@ # Register Runners - name: Register the gitlab-runner - command: "/usr/bin/gitlab-runner register --non-interactive --url {{ gitlab_runner_server_url }} --registration-token {{ gitlab_runner_registration_token }} --executor shell --tag-list {{ ansible_facts[\"architecture\"] }},{{ ansible_facts[\"distribution\"]|lower }}_{{ ansible_facts[\"distribution_version\"] }} --description '{{ ansible_facts[\"distribution\"] }} {{ ansible_facts[\"distribution_version\"] }} {{ ansible_facts[\"architecture\"] }} ({{ ansible_facts[\"os_family\"] }})'" - - # The secondary runner will still run under the single gitlab-runner service - - name: Register secondary gitlab-runner - command: "/usr/bin/gitlab-runner register --non-interactive --url {{ gitlab_runner_server_url }} --registration-token {{ gitlab_runner_registration_token }} --executor shell --tag-list aarch32,{{ ansible_facts[\"distribution\"]|lower }}_{{ ansible_facts[\"distribution_version\"] }} --description '{{ ansible_facts[\"distribution\"] }} {{ ansible_facts[\"distribution_version\"] }} {{ ansible_facts[\"architecture\"] }} ({{ ansible_facts[\"os_family\"] }})'" - when: - - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['architecture'] == 'aarch64' - - ansible_facts['distribution_version'] == '22.04' + command: "/usr/bin/gitlab-runner register --non-interactive --url {{ gitlab_runner_server_url }} --token {{ gitlab_runner_authentication_token }} --executor shell" - name: Install the gitlab-runner service using its own functionality command: "/usr/bin/gitlab-runner install --user gitlab-runner --working-directory /home/gitlab-runner" diff --git a/scripts/ci/setup/ubuntu/build-environment.yml b/scripts/ci/setup/ubuntu/build-environment.yml index 56b51609e3800..0f8ec5fab04fa 100644 --- a/scripts/ci/setup/ubuntu/build-environment.yml +++ b/scripts/ci/setup/ubuntu/build-environment.yml @@ -35,33 +35,16 @@ # the package lists are updated by "make lcitool-refresh" - name: Include package lists based on OS and architecture include_vars: - file: "ubuntu-2204-{{ ansible_facts['architecture'] }}.yaml" + file: "ubuntu-2404-{{ ansible_facts['architecture'] }}.yaml" when: - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['distribution_version'] == '22.04' + - ansible_facts['distribution_version'] == '24.04' - - name: Install packages for QEMU on Ubuntu 22.04 + - name: Install packages for QEMU on Ubuntu 24.04 package: name: "{{ packages }}" when: - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['distribution_version'] == '22.04' + - ansible_facts['distribution_version'] == '24.04' - - name: Install armhf cross-compile packages to build QEMU on AArch64 Ubuntu 22.04 - package: - name: - - binutils-arm-linux-gnueabihf - - gcc-arm-linux-gnueabihf - - libblkid-dev:armhf - - libc6-dev:armhf - - libffi-dev:armhf - - libglib2.0-dev:armhf - - libmount-dev:armhf - - libpcre2-dev:armhf - - libpixman-1-dev:armhf - - zlib1g-dev:armhf - when: - - ansible_facts['distribution'] == 'Ubuntu' - - ansible_facts['distribution_version'] == '22.04' - - ansible_facts['architecture'] == 'aarch64' diff --git a/scripts/ci/setup/ubuntu/ubuntu-2204-armhf-cross.yml b/scripts/ci/setup/ubuntu/ubuntu-2204-armhf-cross.yml deleted file mode 100644 index 0cc34cd10b9a1..0000000000000 --- a/scripts/ci/setup/ubuntu/ubuntu-2204-armhf-cross.yml +++ /dev/null @@ -1,127 +0,0 @@ -# THIS FILE WAS AUTO-GENERATED -# -# $ lcitool variables --cross-arch armv7l ubuntu-2204 qemu -# -# https://gitlab.com/libvirt/libvirt-ci - -packages: - - bash - - bc - - bison - - bsdextrautils - - bzip2 - - ca-certificates - - ccache - - dbus - - debianutils - - diffutils - - exuberant-ctags - - findutils - - flex - - gcc - - gcovr - - gettext - - git - - hostname - - libglib2.0-dev - - libpcre2-dev - - libsndio-dev - - libspice-protocol-dev - - llvm - - locales - - make - - meson - - mtools - - ncat - - ninja-build - - openssh-client - - pkgconf - - python3 - - python3-numpy - - python3-opencv - - python3-pillow - - python3-pip - - python3-sphinx - - python3-sphinx-rtd-theme - - python3-tomli - - python3-venv - - python3-yaml - - rpm2cpio - - sed - - socat - - sparse - - swtpm - - tar - - tesseract-ocr - - tesseract-ocr-eng - - xorriso - - zstd - - gcc-arm-linux-gnueabihf - - libaio-dev:armhf - - libasan6:armhf - - libasound2-dev:armhf - - libattr1-dev:armhf - - libbpf-dev:armhf - - libbrlapi-dev:armhf - - libbz2-dev:armhf - - libc6-dev:armhf - - libcacard-dev:armhf - - libcap-ng-dev:armhf - - libcapstone-dev:armhf - - libcmocka-dev:armhf - - libcurl4-gnutls-dev:armhf - - libdaxctl-dev:armhf - - libdrm-dev:armhf - - libepoxy-dev:armhf - - libfdt-dev:armhf - - libffi-dev:armhf - - libfuse3-dev:armhf - - libgbm-dev:armhf - - libgcrypt20-dev:armhf - - libglib2.0-dev:armhf - - libglusterfs-dev:armhf - - libgnutls28-dev:armhf - - libgtk-3-dev:armhf - - libibumad-dev:armhf - - libibverbs-dev:armhf - - libiscsi-dev:armhf - - libjemalloc-dev:armhf - - libjpeg-turbo8-dev:armhf - - libjson-c-dev:armhf - - liblttng-ust-dev:armhf - - liblzo2-dev:armhf - - libncursesw5-dev:armhf - - libnfs-dev:armhf - - libnuma-dev:armhf - - libpam0g-dev:armhf - - libpipewire-0.3-dev:armhf - - libpixman-1-dev:armhf - - libpng-dev:armhf - - libpulse-dev:armhf - - librbd-dev:armhf - - librdmacm-dev:armhf - - libsasl2-dev:armhf - - libsdl2-dev:armhf - - libsdl2-image-dev:armhf - - libseccomp-dev:armhf - - libselinux1-dev:armhf - - libslirp-dev:armhf - - libsnappy-dev:armhf - - libspice-server-dev:armhf - - libssh-dev:armhf - - libsystemd-dev:armhf - - libtasn1-6-dev:armhf - - libubsan1:armhf - - libudev-dev:armhf - - liburing-dev:armhf - - libusb-1.0-0-dev:armhf - - libusbredirhost-dev:armhf - - libvdeplug-dev:armhf - - libvirglrenderer-dev:armhf - - libvte-2.91-dev:armhf - - libxen-dev:armhf - - libzstd-dev:armhf - - nettle-dev:armhf - - systemtap-sdt-dev:armhf - - zlib1g-dev:armhf - diff --git a/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml b/scripts/ci/setup/ubuntu/ubuntu-2404-aarch64.yaml similarity index 92% rename from scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml rename to scripts/ci/setup/ubuntu/ubuntu-2404-aarch64.yaml index f11e9808267e0..ce632d97108b9 100644 --- a/scripts/ci/setup/ubuntu/ubuntu-2204-aarch64.yaml +++ b/scripts/ci/setup/ubuntu/ubuntu-2404-aarch64.yaml @@ -1,12 +1,13 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool variables --host-arch aarch64 ubuntu-2204 qemu +# $ lcitool variables --host-arch aarch64 ubuntu-2404 qemu # # https://gitlab.com/libvirt/libvirt-ci packages: - bash - bc + - bindgen - bison - bsdextrautils - bzip2 @@ -36,6 +37,7 @@ packages: - libcap-ng-dev - libcapstone-dev - libcbor-dev + - libclang-dev - libcmocka-dev - libcurl4-gnutls-dev - libdaxctl-dev @@ -80,6 +82,7 @@ packages: - libspice-protocol-dev - libspice-server-dev - libssh-dev + - libstd-rust-dev - libsystemd-dev - libtasn1-6-dev - libubsan1 @@ -90,12 +93,12 @@ packages: - libvdeplug-dev - libvirglrenderer-dev - libvte-2.91-dev + - libxdp-dev - libxen-dev - libzstd-dev - llvm - locales - make - - meson - mtools - multipath-tools - ncat @@ -108,10 +111,12 @@ packages: - python3-opencv - python3-pillow - python3-pip + - python3-setuptools - python3-sphinx - python3-sphinx-rtd-theme - python3-tomli - python3-venv + - python3-wheel - python3-yaml - rpm2cpio - rustc-1.77 diff --git a/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml b/scripts/ci/setup/ubuntu/ubuntu-2404-s390x.yaml similarity index 92% rename from scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml rename to scripts/ci/setup/ubuntu/ubuntu-2404-s390x.yaml index 6559cb29343f9..f45f75c960209 100644 --- a/scripts/ci/setup/ubuntu/ubuntu-2204-s390x.yaml +++ b/scripts/ci/setup/ubuntu/ubuntu-2404-s390x.yaml @@ -1,12 +1,13 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool variables --host-arch s390x ubuntu-2204 qemu +# $ lcitool variables --host-arch s390x ubuntu-2404 qemu # # https://gitlab.com/libvirt/libvirt-ci packages: - bash - bc + - bindgen - bison - bsdextrautils - bzip2 @@ -36,6 +37,7 @@ packages: - libcap-ng-dev - libcapstone-dev - libcbor-dev + - libclang-dev - libcmocka-dev - libcurl4-gnutls-dev - libdaxctl-dev @@ -79,6 +81,7 @@ packages: - libsndio-dev - libspice-protocol-dev - libssh-dev + - libstd-rust-dev - libsystemd-dev - libtasn1-6-dev - libubsan1 @@ -89,11 +92,11 @@ packages: - libvdeplug-dev - libvirglrenderer-dev - libvte-2.91-dev + - libxdp-dev - libzstd-dev - llvm - locales - make - - meson - mtools - multipath-tools - ncat @@ -106,10 +109,12 @@ packages: - python3-opencv - python3-pillow - python3-pip + - python3-setuptools - python3-sphinx - python3-sphinx-rtd-theme - python3-tomli - python3-venv + - python3-wheel - python3-yaml - rpm2cpio - rustc-1.77 diff --git a/scripts/ci/setup/vars.yml.template b/scripts/ci/setup/vars.yml.template index 4b355fb80fd10..e9ddc05f3b085 100644 --- a/scripts/ci/setup/vars.yml.template +++ b/scripts/ci/setup/vars.yml.template @@ -6,5 +6,6 @@ ansible_to_gitlab_arch: x86_64: amd64 aarch64: arm64 s390x: s390x -# A unique token made available by GitLab to your project for registering runners -gitlab_runner_registration_token: PLEASE_PROVIDE_A_VALID_TOKEN +# A unique token made obtained from GitLab for each runner +# see: https://gitlab.com/PROJECT/REPO/-/runners/new +gitlab_runner_authentication_token: PLEASE_PROVIDE_A_VALID_TOKEN diff --git a/scripts/clean_functional_cache.py b/scripts/clean_functional_cache.py new file mode 100755 index 0000000000000..c3370ffbb8704 --- /dev/null +++ b/scripts/clean_functional_cache.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +"""Delete stale assets from the download cache of the functional tests""" + +import os +import stat +import sys +import time +from pathlib import Path + + +cache_dir_env = os.getenv('QEMU_TEST_CACHE_DIR') +if cache_dir_env: + cache_dir = Path(cache_dir_env, "download") +else: + cache_dir = Path(Path("~").expanduser(), ".cache", "qemu", "download") + +if not cache_dir.exists(): + print(f"Cache dir {cache_dir} does not exist!", file=sys.stderr) + sys.exit(1) + +os.chdir(cache_dir) + +for file in cache_dir.iterdir(): + # Only consider the files that use a sha256 as filename: + if len(file.name) != 64: + continue + + try: + timestamp = int(file.with_suffix(".stamp").read_text()) + except FileNotFoundError: + # Assume it's an old file that was already in the cache before we + # added the code for evicting stale assets. Use the release date + # of QEMU v10.1 as a default timestamp. + timestamp = time.mktime((2025, 8, 26, 0, 0, 0, 0, 0, 0)) + + age = time.time() - timestamp + + # Delete files older than half of a year (183 days * 24h * 60m * 60s) + if age > 15811200: + print(f"Removing {cache_dir}/{file.name}.") + file.chmod(stat.S_IWRITE) + file.unlink() diff --git a/scripts/coccinelle/exec_rw_const.cocci b/scripts/coccinelle/exec_rw_const.cocci index 1a202969519b1..4c02c94e04e5d 100644 --- a/scripts/coccinelle/exec_rw_const.cocci +++ b/scripts/coccinelle/exec_rw_const.cocci @@ -21,13 +21,6 @@ expression E1, E2, E3, E4, E5; + address_space_rw(E1, E2, E3, E4, E5, true) | -- cpu_physical_memory_rw(E1, E2, E3, 0) -+ cpu_physical_memory_rw(E1, E2, E3, false) -| -- cpu_physical_memory_rw(E1, E2, E3, 1) -+ cpu_physical_memory_rw(E1, E2, E3, true) -| - - cpu_physical_memory_map(E1, E2, 0) + cpu_physical_memory_map(E1, E2, false) | @@ -62,18 +55,6 @@ symbol true, false; + address_space_write(E1, E2, E3, E4, E5) ) -// Avoid uses of cpu_physical_memory_rw() with a constant is_write argument. -@@ -expression E1, E2, E3; -@@ -( -- cpu_physical_memory_rw(E1, E2, E3, false) -+ cpu_physical_memory_read(E1, E2, E3) -| -- cpu_physical_memory_rw(E1, E2, E3, true) -+ cpu_physical_memory_write(E1, E2, E3) -) - // Remove useless cast @@ expression E1, E2, E3, E4, E5, E6; @@ -93,9 +74,6 @@ type T; + address_space_write_rom(E1, E2, E3, E4, E5) | -- cpu_physical_memory_rw(E1, (T *)(E2), E3, E4) -+ cpu_physical_memory_rw(E1, E2, E3, E4) -| - cpu_physical_memory_read(E1, (T *)(E2), E3) + cpu_physical_memory_read(E1, E2, E3) | diff --git a/scripts/coverity-scan/COMPONENTS.md b/scripts/coverity-scan/COMPONENTS.md index 72995903ff9ea..95805b536bcf4 100644 --- a/scripts/coverity-scan/COMPONENTS.md +++ b/scripts/coverity-scan/COMPONENTS.md @@ -147,6 +147,9 @@ tcg system ~ .*/qemu(/system/.*|/accel/.*) +plugins + ~ .*/qemu(/contrib|/tests/tcg)?/plugins/.* + (headers) ~ .*/qemu(/include/.*) diff --git a/scripts/decodetree.py b/scripts/decodetree.py index e8b72da3a97a2..f992472b73e9d 100644 --- a/scripts/decodetree.py +++ b/scripts/decodetree.py @@ -1016,9 +1016,12 @@ def infer_format(arg, fieldmask, flds, width): else: var_flds[n] = c + if not arg: + arg = infer_argument_set(flds) + # Look for an existing format with the same argument set and fields for fmt in formats.values(): - if arg and fmt.base != arg: + if fmt.base != arg: continue if fieldmask != fmt.fieldmask: continue @@ -1029,8 +1032,6 @@ def infer_format(arg, fieldmask, flds, width): return (fmt, const_flds) name = decode_function + '_Fmt_' + str(len(formats)) - if not arg: - arg = infer_argument_set(flds) fmt = Format(name, 0, arg, 0, 0, 0, fieldmask, var_flds, width) formats[name] = fmt diff --git a/scripts/get-wraps-from-cargo-registry.py b/scripts/get-wraps-from-cargo-registry.py new file mode 100755 index 0000000000000..31eed5c2dd451 --- /dev/null +++ b/scripts/get-wraps-from-cargo-registry.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python3 + +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +get-wraps-from-cargo-registry.py - Update Meson subprojects from a global registry +""" + +# Copyright (C) 2025 Red Hat, Inc. +# +# Author: Paolo Bonzini + +import argparse +import configparser +import filecmp +import glob +import os +import subprocess +import sys + + +def get_name_and_semver(namever: str) -> tuple[str, str]: + """Split a subproject name into its name and semantic version parts""" + parts = namever.rsplit("-", 1) + if len(parts) != 2: + return namever, "" + + return parts[0], parts[1] + + +class UpdateSubprojects: + cargo_registry: str + top_srcdir: str + dry_run: bool + changes: int = 0 + + def find_installed_crate(self, namever: str) -> str | None: + """Find installed crate matching name and semver prefix""" + name, semver = get_name_and_semver(namever) + + # exact version match + path = os.path.join(self.cargo_registry, f"{name}-{semver}") + if os.path.exists(path): + return f"{name}-{semver}" + + # semver match + matches = sorted(glob.glob(f"{path}.*")) + return os.path.basename(matches[0]) if matches else None + + def compare_build_rs(self, orig_dir: str, registry_namever: str) -> None: + """Warn if the build.rs in the original directory differs from the registry version.""" + orig_build_rs = os.path.join(orig_dir, "build.rs") + new_build_rs = os.path.join(self.cargo_registry, registry_namever, "build.rs") + + msg = None + if os.path.isfile(orig_build_rs) != os.path.isfile(new_build_rs): + if os.path.isfile(orig_build_rs): + msg = f"build.rs removed in {registry_namever}" + if os.path.isfile(new_build_rs): + msg = f"build.rs added in {registry_namever}" + + elif os.path.isfile(orig_build_rs) and not filecmp.cmp(orig_build_rs, new_build_rs): + msg = f"build.rs changed from {orig_dir} to {registry_namever}" + + if msg: + print(f"⚠️ Warning: {msg}") + print(" This may affect the build process - please review the differences.") + + def update_subproject(self, wrap_file: str, registry_namever: str) -> None: + """Modify [wrap-file] section to point to self.cargo_registry.""" + assert wrap_file.endswith("-rs.wrap") + wrap_name = wrap_file[:-5] + + env = os.environ.copy() + env["MESON_PACKAGE_CACHE_DIR"] = self.cargo_registry + + config = configparser.ConfigParser() + config.read(wrap_file) + if "wrap-file" not in config: + return + + # do not download the wrap, always use the local copy + orig_dir = config["wrap-file"]["directory"] + if os.path.exists(orig_dir) and orig_dir != registry_namever: + self.compare_build_rs(orig_dir, registry_namever) + + if self.dry_run: + if orig_dir == registry_namever: + print(f"Will install {orig_dir} from registry.") + else: + print(f"Will replace {orig_dir} with {registry_namever}.") + self.changes += 1 + return + + config["wrap-file"]["directory"] = registry_namever + for key in list(config["wrap-file"].keys()): + if key.startswith("source"): + del config["wrap-file"][key] + + # replace existing directory with installed version + if os.path.exists(orig_dir): + subprocess.run( + ["meson", "subprojects", "purge", "--confirm", wrap_name], + cwd=self.top_srcdir, + env=env, + check=True, + ) + + with open(wrap_file, "w") as f: + config.write(f) + + if orig_dir == registry_namever: + print(f"Installing {orig_dir} from registry.") + else: + print(f"Replacing {orig_dir} with {registry_namever}.") + patch_dir = config["wrap-file"]["patch_directory"] + patch_dir = os.path.join("packagefiles", patch_dir) + _, ver = registry_namever.rsplit("-", 1) + subprocess.run( + ["meson", "rewrite", "kwargs", "set", "project", "/", "version", ver], + cwd=patch_dir, + env=env, + check=True, + ) + + subprocess.run( + ["meson", "subprojects", "download", wrap_name], + cwd=self.top_srcdir, + env=env, + check=True, + ) + self.changes += 1 + + @staticmethod + def parse_cmdline() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Replace Meson subprojects with packages in a Cargo registry" + ) + parser.add_argument( + "--cargo-registry", + default=os.environ.get("CARGO_REGISTRY"), + help="Path to Cargo registry (default: CARGO_REGISTRY env var)", + ) + parser.add_argument( + "--dry-run", + action="store_true", + default=False, + help="Do not actually replace anything", + ) + + args = parser.parse_args() + if not args.cargo_registry: + print("error: CARGO_REGISTRY environment variable not set and --cargo-registry not provided") + sys.exit(1) + + return args + + def __init__(self, args: argparse.Namespace): + self.cargo_registry = args.cargo_registry + self.dry_run = args.dry_run + self.top_srcdir = os.getcwd() + + def main(self) -> None: + if not os.path.exists("subprojects"): + print("'subprojects' directory not found, nothing to do.") + return + + os.chdir("subprojects") + for wrap_file in sorted(glob.glob("*-rs.wrap")): + namever = wrap_file[:-8] # Remove '-rs.wrap' + + registry_namever = self.find_installed_crate(namever) + if not registry_namever: + print(f"No installed crate found for {wrap_file}") + continue + + self.update_subproject(wrap_file, registry_namever) + + if self.changes: + if self.dry_run: + print("Rerun without --dry-run to apply changes.") + else: + print(f"✨ {self.changes} subproject(s) updated!") + else: + print("No changes.") + + +if __name__ == "__main__": + args = UpdateSubprojects.parse_cmdline() + UpdateSubprojects(args).main() diff --git a/scripts/ghes_inject.py b/scripts/ghes_inject.py new file mode 100755 index 0000000000000..9a235201418b4 --- /dev/null +++ b/scripts/ghes_inject.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (C) 2024-2025 Mauro Carvalho Chehab + +""" +Handle ACPI GHESv2 error injection logic QEMU QMP interface. +""" + +import argparse +import sys + +from arm_processor_error import ArmProcessorEinj + +EINJ_DESC = """ +Handle ACPI GHESv2 error injection logic QEMU QMP interface. + +It allows using UEFI BIOS EINJ features to generate GHES records. + +It helps testing CPER and GHES drivers at the guest OS and how +userspace applications at the guest handle them. +""" + +def main(): + """Main program""" + + # Main parser - handle generic args like QEMU QMP TCP socket options + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter, + usage="%(prog)s [options]", + description=EINJ_DESC) + + g_options = parser.add_argument_group("QEMU QMP socket options") + g_options.add_argument("-H", "--host", default="localhost", type=str, + help="host name") + g_options.add_argument("-P", "--port", default=4445, type=int, + help="TCP port number") + g_options.add_argument('-d', '--debug', action='store_true') + + subparsers = parser.add_subparsers() + + ArmProcessorEinj(subparsers) + + args = parser.parse_args() + if "func" in args: + args.func(args) + else: + sys.exit(f"Please specify a valid command for {sys.argv[0]}") + +if __name__ == "__main__": + main() diff --git a/scripts/kernel-doc b/scripts/kernel-doc deleted file mode 100755 index fec83f53eda32..0000000000000 --- a/scripts/kernel-doc +++ /dev/null @@ -1,2442 +0,0 @@ -#!/usr/bin/env perl -# SPDX-License-Identifier: GPL-2.0-only - -use warnings; -use strict; - -## Copyright (c) 1998 Michael Zucchi, All Rights Reserved ## -## Copyright (C) 2000, 1 Tim Waugh ## -## Copyright (C) 2001 Simon Huggins ## -## Copyright (C) 2005-2012 Randy Dunlap ## -## Copyright (C) 2012 Dan Luedtke ## -## ## -## #define enhancements by Armin Kuster ## -## Copyright (c) 2000 MontaVista Software, Inc. ## -## ## -## This software falls under the GNU General Public License. ## -## Please read the COPYING file for more information ## - -# 18/01/2001 - Cleanups -# Functions prototyped as foo(void) same as foo() -# Stop eval'ing where we don't need to. -# -- huggie@earth.li - -# 27/06/2001 - Allowed whitespace after initial "/**" and -# allowed comments before function declarations. -# -- Christian Kreibich - -# Still to do: -# - add perldoc documentation -# - Look more closely at some of the scarier bits :) - -# 26/05/2001 - Support for separate source and object trees. -# Return error code. -# Keith Owens - -# 23/09/2001 - Added support for typedefs, structs, enums and unions -# Support for Context section; can be terminated using empty line -# Small fixes (like spaces vs. \s in regex) -# -- Tim Jansen - -# 25/07/2012 - Added support for HTML5 -# -- Dan Luedtke - -sub usage { - my $message = <<"EOF"; -Usage: $0 [OPTION ...] FILE ... - -Read C language source or header FILEs, extract embedded documentation comments, -and print formatted documentation to standard output. - -The documentation comments are identified by "/**" opening comment mark. See -Documentation/doc-guide/kernel-doc.rst for the documentation comment syntax. - -Output format selection (mutually exclusive): - -man Output troff manual page format. This is the default. - -rst Output reStructuredText format. - -none Do not output documentation, only warnings. - -Output format selection modifier (affects only ReST output): - - -sphinx-version Use the ReST C domain dialect compatible with an - specific Sphinx Version. - If not specified, kernel-doc will auto-detect using - the sphinx-build version found on PATH. - -Output selection (mutually exclusive): - -export Only output documentation for symbols that have been - exported using EXPORT_SYMBOL() or EXPORT_SYMBOL_GPL() - in any input FILE or -export-file FILE. - -internal Only output documentation for symbols that have NOT been - exported using EXPORT_SYMBOL() or EXPORT_SYMBOL_GPL() - in any input FILE or -export-file FILE. - -function NAME Only output documentation for the given function(s) - or DOC: section title(s). All other functions and DOC: - sections are ignored. May be specified multiple times. - -nosymbol NAME Exclude the specified symbols from the output - documentation. May be specified multiple times. - -Output selection modifiers: - -no-doc-sections Do not output DOC: sections. - -enable-lineno Enable output of #define LINENO lines. Only works with - reStructuredText format. - -export-file FILE Specify an additional FILE in which to look for - EXPORT_SYMBOL() and EXPORT_SYMBOL_GPL(). To be used with - -export or -internal. May be specified multiple times. - -Other parameters: - -v Verbose output, more warnings and other information. - -h Print this help. - -Werror Treat warnings as errors. - -EOF - print $message; - exit 1; -} - -# -# format of comments. -# In the following table, (...)? signifies optional structure. -# (...)* signifies 0 or more structure elements -# /** -# * function_name(:)? (- short description)? -# (* @parameterx: (description of parameter x)?)* -# (* a blank line)? -# * (Description:)? (Description of function)? -# * (section header: (section description)? )* -# (*)?*/ -# -# So .. the trivial example would be: -# -# /** -# * my_function -# */ -# -# If the Description: header tag is omitted, then there must be a blank line -# after the last parameter specification. -# e.g. -# /** -# * my_function - does my stuff -# * @my_arg: its mine damnit -# * -# * Does my stuff explained. -# */ -# -# or, could also use: -# /** -# * my_function - does my stuff -# * @my_arg: its mine damnit -# * Description: Does my stuff explained. -# */ -# etc. -# -# Besides functions you can also write documentation for structs, unions, -# enums and typedefs. Instead of the function name you must write the name -# of the declaration; the struct/union/enum/typedef must always precede -# the name. Nesting of declarations is not supported. -# Use the argument mechanism to document members or constants. -# e.g. -# /** -# * struct my_struct - short description -# * @a: first member -# * @b: second member -# * -# * Longer description -# */ -# struct my_struct { -# int a; -# int b; -# /* private: */ -# int c; -# }; -# -# All descriptions can be multiline, except the short function description. -# -# For really longs structs, you can also describe arguments inside the -# body of the struct. -# eg. -# /** -# * struct my_struct - short description -# * @a: first member -# * @b: second member -# * -# * Longer description -# */ -# struct my_struct { -# int a; -# int b; -# /** -# * @c: This is longer description of C -# * -# * You can use paragraphs to describe arguments -# * using this method. -# */ -# int c; -# }; -# -# This should be use only for struct/enum members. -# -# You can also add additional sections. When documenting kernel functions you -# should document the "Context:" of the function, e.g. whether the functions -# can be called form interrupts. Unlike other sections you can end it with an -# empty line. -# A non-void function should have a "Return:" section describing the return -# value(s). -# Example-sections should contain the string EXAMPLE so that they are marked -# appropriately in DocBook. -# -# Example: -# /** -# * user_function - function that can only be called in user context -# * @a: some argument -# * Context: !in_interrupt() -# * -# * Some description -# * Example: -# * user_function(22); -# */ -# ... -# -# -# All descriptive text is further processed, scanning for the following special -# patterns, which are highlighted appropriately. -# -# 'funcname()' - function -# '$ENVVAR' - environmental variable -# '&struct_name' - name of a structure (up to two words including 'struct') -# '&struct_name.member' - name of a structure member -# '@parameter' - name of a parameter -# '%CONST' - name of a constant. -# '``LITERAL``' - literal string without any spaces on it. - -## init lots of data - -my $errors = 0; -my $warnings = 0; -my $anon_struct_union = 0; - -# match expressions used to find embedded type information -my $type_constant = '\b``([^\`]+)``\b'; -my $type_constant2 = '\%([-_\w]+)'; -my $type_func = '(\w+)\(\)'; -my $type_param = '\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)'; -my $type_param_ref = '([\!]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)'; -my $type_fp_param = '\@(\w+)\(\)'; # Special RST handling for func ptr params -my $type_fp_param2 = '\@(\w+->\S+)\(\)'; # Special RST handling for structs with func ptr params -my $type_env = '(\$\w+)'; -my $type_enum = '#(enum\s*([_\w]+))'; -my $type_struct = '#(struct\s*([_\w]+))'; -my $type_typedef = '#(([A-Z][_\w]*))'; -my $type_union = '#(union\s*([_\w]+))'; -my $type_member = '#([_\w]+)(\.|->)([_\w]+)'; -my $type_fallback = '(?!)'; # this never matches -my $type_member_func = $type_member . '\(\)'; - -# Output conversion substitutions. -# One for each output format - -# these are pretty rough -my @highlights_man = ( - [$type_constant, "\$1"], - [$type_constant2, "\$1"], - [$type_func, "\\\\fB\$1\\\\fP"], - [$type_enum, "\\\\fI\$1\\\\fP"], - [$type_struct, "\\\\fI\$1\\\\fP"], - [$type_typedef, "\\\\fI\$1\\\\fP"], - [$type_union, "\\\\fI\$1\\\\fP"], - [$type_param, "\\\\fI\$1\\\\fP"], - [$type_param_ref, "\\\\fI\$1\$2\\\\fP"], - [$type_member, "\\\\fI\$1\$2\$3\\\\fP"], - [$type_fallback, "\\\\fI\$1\\\\fP"] - ); -my $blankline_man = ""; - -# rst-mode -my @highlights_rst = ( - [$type_constant, "``\$1``"], - [$type_constant2, "``\$1``"], - # Note: need to escape () to avoid func matching later - [$type_member_func, "\\:c\\:type\\:`\$1\$2\$3\\\\(\\\\) <\$1>`"], - [$type_member, "\\:c\\:type\\:`\$1\$2\$3 <\$1>`"], - [$type_fp_param, "**\$1\\\\(\\\\)**"], - [$type_fp_param2, "**\$1\\\\(\\\\)**"], - [$type_func, "\$1()"], - [$type_enum, "\\:c\\:type\\:`\$1 <\$2>`"], - [$type_struct, "\\:c\\:type\\:`\$1 <\$2>`"], - [$type_typedef, "\\:c\\:type\\:`\$1 <\$2>`"], - [$type_union, "\\:c\\:type\\:`\$1 <\$2>`"], - # in rst this can refer to any type - [$type_fallback, "\\:c\\:type\\:`\$1`"], - [$type_param_ref, "**\$1\$2**"] - ); -my $blankline_rst = "\n"; - -# read arguments -if ($#ARGV == -1) { - usage(); -} - -my $kernelversion; -my ($sphinx_major, $sphinx_minor, $sphinx_patch); - -my $dohighlight = ""; - -my $verbose = 0; -my $Werror = 0; -my $output_mode = "rst"; -my $output_preformatted = 0; -my $no_doc_sections = 0; -my $enable_lineno = 0; -my @highlights = @highlights_rst; -my $blankline = $blankline_rst; -my $modulename = "Kernel API"; - -use constant { - OUTPUT_ALL => 0, # output all symbols and doc sections - OUTPUT_INCLUDE => 1, # output only specified symbols - OUTPUT_EXPORTED => 2, # output exported symbols - OUTPUT_INTERNAL => 3, # output non-exported symbols -}; -my $output_selection = OUTPUT_ALL; -my $show_not_found = 0; # No longer used - -my @export_file_list; - -my @build_time; -if (defined($ENV{'KBUILD_BUILD_TIMESTAMP'}) && - (my $seconds = `date -d"${ENV{'KBUILD_BUILD_TIMESTAMP'}}" +%s`) ne '') { - @build_time = gmtime($seconds); -} else { - @build_time = localtime; -} - -my $man_date = ('January', 'February', 'March', 'April', 'May', 'June', - 'July', 'August', 'September', 'October', - 'November', 'December')[$build_time[4]] . - " " . ($build_time[5]+1900); - -# Essentially these are globals. -# They probably want to be tidied up, made more localised or something. -# CAVEAT EMPTOR! Some of the others I localised may not want to be, which -# could cause "use of undefined value" or other bugs. -my ($function, %function_table, %parametertypes, $declaration_purpose); -my %nosymbol_table = (); -my $declaration_start_line; -my ($type, $declaration_name, $return_type); -my ($newsection, $newcontents, $prototype, $brcount, %source_map); - -if (defined($ENV{'KBUILD_VERBOSE'})) { - $verbose = "$ENV{'KBUILD_VERBOSE'}"; -} - -if (defined($ENV{'KDOC_WERROR'})) { - $Werror = "$ENV{'KDOC_WERROR'}"; -} - -if (defined($ENV{'KCFLAGS'})) { - my $kcflags = "$ENV{'KCFLAGS'}"; - - if ($kcflags =~ /Werror/) { - $Werror = 1; - } -} - -# Generated docbook code is inserted in a template at a point where -# docbook v3.1 requires a non-zero sequence of RefEntry's; see: -# https://www.oasis-open.org/docbook/documentation/reference/html/refentry.html -# We keep track of number of generated entries and generate a dummy -# if needs be to ensure the expanded template can be postprocessed -# into html. -my $section_counter = 0; - -my $lineprefix=""; - -# Parser states -use constant { - STATE_NORMAL => 0, # normal code - STATE_NAME => 1, # looking for function name - STATE_BODY_MAYBE => 2, # body - or maybe more description - STATE_BODY => 3, # the body of the comment - STATE_BODY_WITH_BLANK_LINE => 4, # the body, which has a blank line - STATE_PROTO => 5, # scanning prototype - STATE_DOCBLOCK => 6, # documentation block - STATE_INLINE => 7, # gathering doc outside main block -}; -my $state; -my $in_doc_sect; -my $leading_space; - -# Inline documentation state -use constant { - STATE_INLINE_NA => 0, # not applicable ($state != STATE_INLINE) - STATE_INLINE_NAME => 1, # looking for member name (@foo:) - STATE_INLINE_TEXT => 2, # looking for member documentation - STATE_INLINE_END => 3, # done - STATE_INLINE_ERROR => 4, # error - Comment without header was found. - # Spit a warning as it's not - # proper kernel-doc and ignore the rest. -}; -my $inline_doc_state; - -#declaration types: can be -# 'function', 'struct', 'union', 'enum', 'typedef' -my $decl_type; - -my $doc_start = '^/\*\*\s*$'; # Allow whitespace at end of comment start. -my $doc_end = '\*/'; -my $doc_com = '\s*\*\s*'; -my $doc_com_body = '\s*\* ?'; -my $doc_decl = $doc_com . '(\w+)'; -# @params and a strictly limited set of supported section names -my $doc_sect = $doc_com . - '\s*(\@[.\w]+|\@\.\.\.|description|context|returns?|notes?|examples?)\s*:(.*)'; -my $doc_content = $doc_com_body . '(.*)'; -my $doc_block = $doc_com . 'DOC:\s*(.*)?'; -my $doc_inline_start = '^\s*/\*\*\s*$'; -my $doc_inline_sect = '\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)'; -my $doc_inline_end = '^\s*\*/\s*$'; -my $doc_inline_oneline = '^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$'; -my $export_symbol = '^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*;'; - -my %parameterdescs; -my %parameterdesc_start_lines; -my @parameterlist; -my %sections; -my @sectionlist; -my %section_start_lines; -my $sectcheck; -my $struct_actual; - -my $contents = ""; -my $new_start_line = 0; - -# the canonical section names. see also $doc_sect above. -my $section_default = "Description"; # default section -my $section_intro = "Introduction"; -my $section = $section_default; -my $section_context = "Context"; -my $section_return = "Return"; - -my $undescribed = "-- undescribed --"; - -reset_state(); - -while ($ARGV[0] =~ m/^--?(.*)/) { - my $cmd = $1; - shift @ARGV; - if ($cmd eq "man") { - $output_mode = "man"; - @highlights = @highlights_man; - $blankline = $blankline_man; - } elsif ($cmd eq "rst") { - $output_mode = "rst"; - @highlights = @highlights_rst; - $blankline = $blankline_rst; - } elsif ($cmd eq "none") { - $output_mode = "none"; - } elsif ($cmd eq "module") { # not needed for XML, inherits from calling document - $modulename = shift @ARGV; - } elsif ($cmd eq "function") { # to only output specific functions - $output_selection = OUTPUT_INCLUDE; - $function = shift @ARGV; - $function_table{$function} = 1; - } elsif ($cmd eq "nosymbol") { # Exclude specific symbols - my $symbol = shift @ARGV; - $nosymbol_table{$symbol} = 1; - } elsif ($cmd eq "export") { # only exported symbols - $output_selection = OUTPUT_EXPORTED; - %function_table = (); - } elsif ($cmd eq "internal") { # only non-exported symbols - $output_selection = OUTPUT_INTERNAL; - %function_table = (); - } elsif ($cmd eq "export-file") { - my $file = shift @ARGV; - push(@export_file_list, $file); - } elsif ($cmd eq "v") { - $verbose = 1; - } elsif ($cmd eq "Werror") { - $Werror = 1; - } elsif (($cmd eq "h") || ($cmd eq "help")) { - usage(); - } elsif ($cmd eq 'no-doc-sections') { - $no_doc_sections = 1; - } elsif ($cmd eq 'enable-lineno') { - $enable_lineno = 1; - } elsif ($cmd eq 'show-not-found') { - $show_not_found = 1; # A no-op but don't fail - } elsif ($cmd eq "sphinx-version") { - my $ver_string = shift @ARGV; - if ($ver_string =~ m/^(\d+)(\.\d+)?(\.\d+)?/) { - $sphinx_major = $1; - if (defined($2)) { - $sphinx_minor = substr($2,1); - } else { - $sphinx_minor = 0; - } - if (defined($3)) { - $sphinx_patch = substr($3,1) - } else { - $sphinx_patch = 0; - } - } else { - die "Sphinx version should either major.minor or major.minor.patch format\n"; - } - } else { - # Unknown argument - usage(); - } -} - -# continue execution near EOF; - -# The C domain dialect changed on Sphinx 3. So, we need to check the -# version in order to produce the right tags. -sub findprog($) -{ - foreach(split(/:/, $ENV{PATH})) { - return "$_/$_[0]" if(-x "$_/$_[0]"); - } -} - -sub get_sphinx_version() -{ - my $ver; - - my $cmd = "sphinx-build"; - if (!findprog($cmd)) { - my $cmd = "sphinx-build3"; - if (!findprog($cmd)) { - $sphinx_major = 1; - $sphinx_minor = 2; - $sphinx_patch = 0; - printf STDERR "Warning: Sphinx version not found. Using default (Sphinx version %d.%d.%d)\n", - $sphinx_major, $sphinx_minor, $sphinx_patch; - return; - } - } - - open IN, "$cmd --version 2>&1 |"; - while () { - if (m/^\s*sphinx-build\s+([\d]+)\.([\d\.]+)(\+\/[\da-f]+)?$/) { - $sphinx_major = $1; - $sphinx_minor = $2; - $sphinx_patch = $3; - last; - } - # Sphinx 1.2.x uses a different format - if (m/^\s*Sphinx.*\s+([\d]+)\.([\d\.]+)$/) { - $sphinx_major = $1; - $sphinx_minor = $2; - $sphinx_patch = $3; - last; - } - } - close IN; -} - -# get kernel version from env -sub get_kernel_version() { - my $version = 'unknown kernel version'; - - if (defined($ENV{'KERNELVERSION'})) { - $version = $ENV{'KERNELVERSION'}; - } - return $version; -} - -# -sub print_lineno { - my $lineno = shift; - if ($enable_lineno && defined($lineno)) { - print "#define LINENO " . $lineno . "\n"; - } -} -## -# dumps section contents to arrays/hashes intended for that purpose. -# -sub dump_section { - my $file = shift; - my $name = shift; - my $contents = join "\n", @_; - - if ($name =~ m/$type_param/) { - $name = $1; - $parameterdescs{$name} = $contents; - $sectcheck = $sectcheck . $name . " "; - $parameterdesc_start_lines{$name} = $new_start_line; - $new_start_line = 0; - } elsif ($name eq "@\.\.\.") { - $name = "..."; - $parameterdescs{$name} = $contents; - $sectcheck = $sectcheck . $name . " "; - $parameterdesc_start_lines{$name} = $new_start_line; - $new_start_line = 0; - } else { - if (defined($sections{$name}) && ($sections{$name} ne "")) { - # Only warn on user specified duplicate section names. - if ($name ne $section_default) { - print STDERR "${file}:$.: warning: duplicate section name '$name'\n"; - ++$warnings; - } - $sections{$name} .= $contents; - } else { - $sections{$name} = $contents; - push @sectionlist, $name; - $section_start_lines{$name} = $new_start_line; - $new_start_line = 0; - } - } -} - -## -# dump DOC: section after checking that it should go out -# -sub dump_doc_section { - my $file = shift; - my $name = shift; - my $contents = join "\n", @_; - - if ($no_doc_sections) { - return; - } - - return if (defined($nosymbol_table{$name})); - - if (($output_selection == OUTPUT_ALL) || - (($output_selection == OUTPUT_INCLUDE) && - defined($function_table{$name}))) - { - dump_section($file, $name, $contents); - output_blockhead({'sectionlist' => \@sectionlist, - 'sections' => \%sections, - 'module' => $modulename, - 'content-only' => ($output_selection != OUTPUT_ALL), }); - } -} - -## -# output function -# -# parameterdescs, a hash. -# function => "function name" -# parameterlist => @list of parameters -# parameterdescs => %parameter descriptions -# sectionlist => @list of sections -# sections => %section descriptions -# - -sub output_highlight { - my $contents = join "\n",@_; - my $line; - -# DEBUG -# if (!defined $contents) { -# use Carp; -# confess "output_highlight got called with no args?\n"; -# } - -# print STDERR "contents b4:$contents\n"; - eval $dohighlight; - die $@ if $@; -# print STDERR "contents af:$contents\n"; - - foreach $line (split "\n", $contents) { - if (! $output_preformatted) { - $line =~ s/^\s*//; - } - if ($line eq ""){ - if (! $output_preformatted) { - print $lineprefix, $blankline; - } - } else { - if ($output_mode eq "man" && substr($line, 0, 1) eq ".") { - print "\\&$line"; - } else { - print $lineprefix, $line; - } - } - print "\n"; - } -} - -## -# output function in man -sub output_function_man(%) { - my %args = %{$_[0]}; - my ($parameter, $section); - my $count; - - print ".TH \"$args{'function'}\" 9 \"$args{'function'}\" \"$man_date\" \"Kernel Hacker's Manual\" LINUX\n"; - - print ".SH NAME\n"; - print $args{'function'} . " \\- " . $args{'purpose'} . "\n"; - - print ".SH SYNOPSIS\n"; - if ($args{'functiontype'} ne "") { - print ".B \"" . $args{'functiontype'} . "\" " . $args{'function'} . "\n"; - } else { - print ".B \"" . $args{'function'} . "\n"; - } - $count = 0; - my $parenth = "("; - my $post = ","; - foreach my $parameter (@{$args{'parameterlist'}}) { - if ($count == $#{$args{'parameterlist'}}) { - $post = ");"; - } - $type = $args{'parametertypes'}{$parameter}; - if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { - # pointer-to-function - print ".BI \"" . $parenth . $1 . "\" " . " \") (" . $2 . ")" . $post . "\"\n"; - } else { - $type =~ s/([^\*])$/$1 /; - print ".BI \"" . $parenth . $type . "\" " . " \"" . $post . "\"\n"; - } - $count++; - $parenth = ""; - } - - print ".SH ARGUMENTS\n"; - foreach $parameter (@{$args{'parameterlist'}}) { - my $parameter_name = $parameter; - $parameter_name =~ s/\[.*//; - - print ".IP \"" . $parameter . "\" 12\n"; - output_highlight($args{'parameterdescs'}{$parameter_name}); - } - foreach $section (@{$args{'sectionlist'}}) { - print ".SH \"", uc $section, "\"\n"; - output_highlight($args{'sections'}{$section}); - } -} - -## -# output enum in man -sub output_enum_man(%) { - my %args = %{$_[0]}; - my ($parameter, $section); - my $count; - - print ".TH \"$args{'module'}\" 9 \"enum $args{'enum'}\" \"$man_date\" \"API Manual\" LINUX\n"; - - print ".SH NAME\n"; - print "enum " . $args{'enum'} . " \\- " . $args{'purpose'} . "\n"; - - print ".SH SYNOPSIS\n"; - print "enum " . $args{'enum'} . " {\n"; - $count = 0; - foreach my $parameter (@{$args{'parameterlist'}}) { - print ".br\n.BI \" $parameter\"\n"; - if ($count == $#{$args{'parameterlist'}}) { - print "\n};\n"; - last; - } - else { - print ", \n.br\n"; - } - $count++; - } - - print ".SH Constants\n"; - foreach $parameter (@{$args{'parameterlist'}}) { - my $parameter_name = $parameter; - $parameter_name =~ s/\[.*//; - - print ".IP \"" . $parameter . "\" 12\n"; - output_highlight($args{'parameterdescs'}{$parameter_name}); - } - foreach $section (@{$args{'sectionlist'}}) { - print ".SH \"$section\"\n"; - output_highlight($args{'sections'}{$section}); - } -} - -## -# output struct in man -sub output_struct_man(%) { - my %args = %{$_[0]}; - my ($parameter, $section); - - print ".TH \"$args{'module'}\" 9 \"" . $args{'type'} . " " . $args{'struct'} . "\" \"$man_date\" \"API Manual\" LINUX\n"; - - print ".SH NAME\n"; - print $args{'type'} . " " . $args{'struct'} . " \\- " . $args{'purpose'} . "\n"; - - my $declaration = $args{'definition'}; - $declaration =~ s/\t/ /g; - $declaration =~ s/\n/"\n.br\n.BI \"/g; - print ".SH SYNOPSIS\n"; - print $args{'type'} . " " . $args{'struct'} . " {\n.br\n"; - print ".BI \"$declaration\n};\n.br\n\n"; - - print ".SH Members\n"; - foreach $parameter (@{$args{'parameterlist'}}) { - ($parameter =~ /^#/) && next; - - my $parameter_name = $parameter; - $parameter_name =~ s/\[.*//; - - ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; - print ".IP \"" . $parameter . "\" 12\n"; - output_highlight($args{'parameterdescs'}{$parameter_name}); - } - foreach $section (@{$args{'sectionlist'}}) { - print ".SH \"$section\"\n"; - output_highlight($args{'sections'}{$section}); - } -} - -## -# output typedef in man -sub output_typedef_man(%) { - my %args = %{$_[0]}; - my ($parameter, $section); - - print ".TH \"$args{'module'}\" 9 \"$args{'typedef'}\" \"$man_date\" \"API Manual\" LINUX\n"; - - print ".SH NAME\n"; - print "typedef " . $args{'typedef'} . " \\- " . $args{'purpose'} . "\n"; - - foreach $section (@{$args{'sectionlist'}}) { - print ".SH \"$section\"\n"; - output_highlight($args{'sections'}{$section}); - } -} - -sub output_blockhead_man(%) { - my %args = %{$_[0]}; - my ($parameter, $section); - my $count; - - print ".TH \"$args{'module'}\" 9 \"$args{'module'}\" \"$man_date\" \"API Manual\" LINUX\n"; - - foreach $section (@{$args{'sectionlist'}}) { - print ".SH \"$section\"\n"; - output_highlight($args{'sections'}{$section}); - } -} - -## -# output in restructured text -# - -# -# This could use some work; it's used to output the DOC: sections, and -# starts by putting out the name of the doc section itself, but that tends -# to duplicate a header already in the template file. -# -sub output_blockhead_rst(%) { - my %args = %{$_[0]}; - my ($parameter, $section); - - foreach $section (@{$args{'sectionlist'}}) { - next if (defined($nosymbol_table{$section})); - - if ($output_selection != OUTPUT_INCLUDE) { - print "**$section**\n\n"; - } - print_lineno($section_start_lines{$section}); - output_highlight_rst($args{'sections'}{$section}); - print "\n"; - } -} - -# -# Apply the RST highlights to a sub-block of text. -# -sub highlight_block($) { - # The dohighlight kludge requires the text be called $contents - my $contents = shift; - eval $dohighlight; - die $@ if $@; - return $contents; -} - -# -# Regexes used only here. -# -my $sphinx_literal = '^[^.].*::$'; -my $sphinx_cblock = '^\.\.\ +code-block::'; - -sub output_highlight_rst { - my $input = join "\n",@_; - my $output = ""; - my $line; - my $in_literal = 0; - my $litprefix; - my $block = ""; - - foreach $line (split "\n",$input) { - # - # If we're in a literal block, see if we should drop out - # of it. Otherwise pass the line straight through unmunged. - # - if ($in_literal) { - if (! ($line =~ /^\s*$/)) { - # - # If this is the first non-blank line in a literal - # block we need to figure out what the proper indent is. - # - if ($litprefix eq "") { - $line =~ /^(\s*)/; - $litprefix = '^' . $1; - $output .= $line . "\n"; - } elsif (! ($line =~ /$litprefix/)) { - $in_literal = 0; - } else { - $output .= $line . "\n"; - } - } else { - $output .= $line . "\n"; - } - } - # - # Not in a literal block (or just dropped out) - # - if (! $in_literal) { - $block .= $line . "\n"; - if (($line =~ /$sphinx_literal/) || ($line =~ /$sphinx_cblock/)) { - $in_literal = 1; - $litprefix = ""; - $output .= highlight_block($block); - $block = "" - } - } - } - - if ($block) { - $output .= highlight_block($block); - } - foreach $line (split "\n", $output) { - print $lineprefix . $line . "\n"; - } -} - -sub output_function_rst(%) { - my %args = %{$_[0]}; - my ($parameter, $section); - my $oldprefix = $lineprefix; - my $start = ""; - my $is_macro = 0; - - if ($sphinx_major < 3) { - if ($args{'typedef'}) { - print ".. c:type:: ". $args{'function'} . "\n\n"; - print_lineno($declaration_start_line); - print " **Typedef**: "; - $lineprefix = ""; - output_highlight_rst($args{'purpose'}); - $start = "\n\n**Syntax**\n\n ``"; - $is_macro = 1; - } else { - print ".. c:function:: "; - } - } else { - if ($args{'typedef'} || $args{'functiontype'} eq "") { - $is_macro = 1; - print ".. c:macro:: ". $args{'function'} . "\n\n"; - } else { - print ".. c:function:: "; - } - - if ($args{'typedef'}) { - print_lineno($declaration_start_line); - print " **Typedef**: "; - $lineprefix = ""; - output_highlight_rst($args{'purpose'}); - $start = "\n\n**Syntax**\n\n ``"; - } else { - print "``" if ($is_macro); - } - } - if ($args{'functiontype'} ne "") { - $start .= $args{'functiontype'} . " " . $args{'function'} . " ("; - } else { - $start .= $args{'function'} . " ("; - } - print $start; - - my $count = 0; - foreach my $parameter (@{$args{'parameterlist'}}) { - if ($count ne 0) { - print ", "; - } - $count++; - $type = $args{'parametertypes'}{$parameter}; - - if ($type =~ m/([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)/) { - # pointer-to-function - print $1 . $parameter . ") (" . $2 . ")"; - } else { - print $type; - } - } - if ($is_macro) { - print ")``\n\n"; - } else { - print ")\n\n"; - } - if (!$args{'typedef'}) { - print_lineno($declaration_start_line); - $lineprefix = " "; - output_highlight_rst($args{'purpose'}); - print "\n"; - } - - print "**Parameters**\n\n"; - $lineprefix = " "; - foreach $parameter (@{$args{'parameterlist'}}) { - my $parameter_name = $parameter; - $parameter_name =~ s/\[.*//; - $type = $args{'parametertypes'}{$parameter}; - - if ($type ne "") { - print "``$type``\n"; - } else { - print "``$parameter``\n"; - } - - print_lineno($parameterdesc_start_lines{$parameter_name}); - - if (defined($args{'parameterdescs'}{$parameter_name}) && - $args{'parameterdescs'}{$parameter_name} ne $undescribed) { - output_highlight_rst($args{'parameterdescs'}{$parameter_name}); - } else { - print " *undescribed*\n"; - } - print "\n"; - } - - $lineprefix = $oldprefix; - output_section_rst(@_); -} - -sub output_section_rst(%) { - my %args = %{$_[0]}; - my $section; - my $oldprefix = $lineprefix; - $lineprefix = ""; - - foreach $section (@{$args{'sectionlist'}}) { - print "**$section**\n\n"; - print_lineno($section_start_lines{$section}); - output_highlight_rst($args{'sections'}{$section}); - print "\n"; - } - print "\n"; - $lineprefix = $oldprefix; -} - -sub output_enum_rst(%) { - my %args = %{$_[0]}; - my ($parameter); - my $oldprefix = $lineprefix; - my $count; - - if ($sphinx_major < 3) { - my $name = "enum " . $args{'enum'}; - print "\n\n.. c:type:: " . $name . "\n\n"; - } else { - my $name = $args{'enum'}; - print "\n\n.. c:enum:: " . $name . "\n\n"; - } - print_lineno($declaration_start_line); - $lineprefix = " "; - output_highlight_rst($args{'purpose'}); - print "\n"; - - print "**Constants**\n\n"; - $lineprefix = " "; - foreach $parameter (@{$args{'parameterlist'}}) { - print "``$parameter``\n"; - if ($args{'parameterdescs'}{$parameter} ne $undescribed) { - output_highlight_rst($args{'parameterdescs'}{$parameter}); - } else { - print " *undescribed*\n"; - } - print "\n"; - } - - $lineprefix = $oldprefix; - output_section_rst(@_); -} - -sub output_typedef_rst(%) { - my %args = %{$_[0]}; - my ($parameter); - my $oldprefix = $lineprefix; - my $name; - - if ($sphinx_major < 3) { - $name = "typedef " . $args{'typedef'}; - } else { - $name = $args{'typedef'}; - } - print "\n\n.. c:type:: " . $name . "\n\n"; - print_lineno($declaration_start_line); - $lineprefix = " "; - output_highlight_rst($args{'purpose'}); - print "\n"; - - $lineprefix = $oldprefix; - output_section_rst(@_); -} - -sub output_struct_rst(%) { - my %args = %{$_[0]}; - my ($parameter); - my $oldprefix = $lineprefix; - - if ($sphinx_major < 3) { - my $name = $args{'type'} . " " . $args{'struct'}; - print "\n\n.. c:type:: " . $name . "\n\n"; - } else { - my $name = $args{'struct'}; - if ($args{'type'} eq 'union') { - print "\n\n.. c:union:: " . $name . "\n\n"; - } else { - print "\n\n.. c:struct:: " . $name . "\n\n"; - } - } - print_lineno($declaration_start_line); - $lineprefix = " "; - output_highlight_rst($args{'purpose'}); - print "\n"; - - print "**Definition**\n\n"; - print "::\n\n"; - my $declaration = $args{'definition'}; - $declaration =~ s/\t/ /g; - print " " . $args{'type'} . " " . $args{'struct'} . " {\n$declaration };\n\n"; - - print "**Members**\n\n"; - $lineprefix = " "; - foreach $parameter (@{$args{'parameterlist'}}) { - ($parameter =~ /^#/) && next; - - my $parameter_name = $parameter; - $parameter_name =~ s/\[.*//; - - ($args{'parameterdescs'}{$parameter_name} ne $undescribed) || next; - $type = $args{'parametertypes'}{$parameter}; - print_lineno($parameterdesc_start_lines{$parameter_name}); - print "``" . $parameter . "``\n"; - output_highlight_rst($args{'parameterdescs'}{$parameter_name}); - print "\n"; - } - print "\n"; - - $lineprefix = $oldprefix; - output_section_rst(@_); -} - -## none mode output functions - -sub output_function_none(%) { -} - -sub output_enum_none(%) { -} - -sub output_typedef_none(%) { -} - -sub output_struct_none(%) { -} - -sub output_blockhead_none(%) { -} - -## -# generic output function for all types (function, struct/union, typedef, enum); -# calls the generated, variable output_ function name based on -# functype and output_mode -sub output_declaration { - no strict 'refs'; - my $name = shift; - my $functype = shift; - my $func = "output_${functype}_$output_mode"; - - return if (defined($nosymbol_table{$name})); - - if (($output_selection == OUTPUT_ALL) || - (($output_selection == OUTPUT_INCLUDE || - $output_selection == OUTPUT_EXPORTED) && - defined($function_table{$name})) || - ($output_selection == OUTPUT_INTERNAL && - !($functype eq "function" && defined($function_table{$name})))) - { - &$func(@_); - $section_counter++; - } -} - -## -# generic output function - calls the right one based on current output mode. -sub output_blockhead { - no strict 'refs'; - my $func = "output_blockhead_" . $output_mode; - &$func(@_); - $section_counter++; -} - -## -# takes a declaration (struct, union, enum, typedef) and -# invokes the right handler. NOT called for functions. -sub dump_declaration($$) { - no strict 'refs'; - my ($prototype, $file) = @_; - my $func = "dump_" . $decl_type; - &$func(@_); -} - -sub dump_union($$) { - dump_struct(@_); -} - -sub dump_struct($$) { - my $x = shift; - my $file = shift; - - if ($x =~ /(struct|union)\s+(\w+)\s*\{(.*)\}(\s*(__packed|__aligned|____cacheline_aligned_in_smp|____cacheline_aligned|__attribute__\s*\(\([a-z0-9,_\s\(\)]*\)\)))*/) { - my $decl_type = $1; - $declaration_name = $2; - my $members = $3; - - # ignore members marked private: - $members =~ s/\/\*\s*private:.*?\/\*\s*public:.*?\*\///gosi; - $members =~ s/\/\*\s*private:.*//gosi; - # strip comments: - $members =~ s/\/\*.*?\*\///gos; - # strip attributes - $members =~ s/\s*__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)/ /gi; - $members =~ s/\s*__aligned\s*\([^;]*\)/ /gos; - $members =~ s/\s*__packed\s*/ /gos; - $members =~ s/\s*CRYPTO_MINALIGN_ATTR/ /gos; - $members =~ s/\s*____cacheline_aligned_in_smp/ /gos; - $members =~ s/\s*____cacheline_aligned/ /gos; - - # replace DECLARE_BITMAP - $members =~ s/__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)/DECLARE_BITMAP($1, __ETHTOOL_LINK_MODE_MASK_NBITS)/gos; - $members =~ s/DECLARE_BITMAP\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[BITS_TO_LONGS($2)\]/gos; - # replace DECLARE_HASHTABLE - $members =~ s/DECLARE_HASHTABLE\s*\(([^,)]+),\s*([^,)]+)\)/unsigned long $1\[1 << (($2) - 1)\]/gos; - # replace DECLARE_KFIFO - $members =~ s/DECLARE_KFIFO\s*\(([^,)]+),\s*([^,)]+),\s*([^,)]+)\)/$2 \*$1/gos; - # replace DECLARE_KFIFO_PTR - $members =~ s/DECLARE_KFIFO_PTR\s*\(([^,)]+),\s*([^,)]+)\)/$2 \*$1/gos; - - my $declaration = $members; - - # Split nested struct/union elements as newer ones - while ($members =~ m/(struct|union)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;/) { - my $newmember; - my $maintype = $1; - my $ids = $4; - my $content = $3; - foreach my $id(split /,/, $ids) { - $newmember .= "$maintype $id; "; - - $id =~ s/[:\[].*//; - $id =~ s/^\s*\**(\S+)\s*/$1/; - foreach my $arg (split /;/, $content) { - next if ($arg =~ m/^\s*$/); - if ($arg =~ m/^([^\(]+\(\*?\s*)([\w\.]*)(\s*\).*)/) { - # pointer-to-function - my $type = $1; - my $name = $2; - my $extra = $3; - next if (!$name); - if ($id =~ m/^\s*$/) { - # anonymous struct/union - $newmember .= "$type$name$extra; "; - } else { - $newmember .= "$type$id.$name$extra; "; - } - } else { - my $type; - my $names; - $arg =~ s/^\s+//; - $arg =~ s/\s+$//; - # Handle bitmaps - $arg =~ s/:\s*\d+\s*//g; - # Handle arrays - $arg =~ s/\[.*\]//g; - # The type may have multiple words, - # and multiple IDs can be defined, like: - # const struct foo, *bar, foobar - # So, we remove spaces when parsing the - # names, in order to match just names - # and commas for the names - $arg =~ s/\s*,\s*/,/g; - if ($arg =~ m/(.*)\s+([\S+,]+)/) { - $type = $1; - $names = $2; - } else { - $newmember .= "$arg; "; - next; - } - foreach my $name (split /,/, $names) { - $name =~ s/^\s*\**(\S+)\s*/$1/; - next if (($name =~ m/^\s*$/)); - if ($id =~ m/^\s*$/) { - # anonymous struct/union - $newmember .= "$type $name; "; - } else { - $newmember .= "$type $id.$name; "; - } - } - } - } - } - $members =~ s/(struct|union)([^\{\};]+)\{([^\{\}]*)\}([^\{\}\;]*)\;/$newmember/; - } - - # Ignore other nested elements, like enums - $members =~ s/(\{[^\{\}]*\})//g; - - create_parameterlist($members, ';', $file, $declaration_name); - check_sections($file, $declaration_name, $decl_type, $sectcheck, $struct_actual); - - # Adjust declaration for better display - $declaration =~ s/([\{;])/$1\n/g; - $declaration =~ s/\}\s+;/};/g; - # Better handle inlined enums - do {} while ($declaration =~ s/(enum\s+\{[^\}]+),([^\n])/$1,\n$2/); - - my @def_args = split /\n/, $declaration; - my $level = 1; - $declaration = ""; - foreach my $clause (@def_args) { - $clause =~ s/^\s+//; - $clause =~ s/\s+$//; - $clause =~ s/\s+/ /; - next if (!$clause); - $level-- if ($clause =~ m/(\})/ && $level > 1); - if (!($clause =~ m/^\s*#/)) { - $declaration .= "\t" x $level; - } - $declaration .= "\t" . $clause . "\n"; - $level++ if ($clause =~ m/(\{)/ && !($clause =~m/\}/)); - } - output_declaration($declaration_name, - 'struct', - {'struct' => $declaration_name, - 'module' => $modulename, - 'definition' => $declaration, - 'parameterlist' => \@parameterlist, - 'parameterdescs' => \%parameterdescs, - 'parametertypes' => \%parametertypes, - 'sectionlist' => \@sectionlist, - 'sections' => \%sections, - 'purpose' => $declaration_purpose, - 'type' => $decl_type - }); - } - else { - print STDERR "${file}:$.: error: Cannot parse struct or union!\n"; - ++$errors; - } -} - - -sub show_warnings($$) { - my $functype = shift; - my $name = shift; - - return 0 if (defined($nosymbol_table{$name})); - - return 1 if ($output_selection == OUTPUT_ALL); - - if ($output_selection == OUTPUT_EXPORTED) { - if (defined($function_table{$name})) { - return 1; - } else { - return 0; - } - } - if ($output_selection == OUTPUT_INTERNAL) { - if (!($functype eq "function" && defined($function_table{$name}))) { - return 1; - } else { - return 0; - } - } - if ($output_selection == OUTPUT_INCLUDE) { - if (defined($function_table{$name})) { - return 1; - } else { - return 0; - } - } - die("Please add the new output type at show_warnings()"); -} - -sub dump_enum($$) { - my $x = shift; - my $file = shift; - my $members; - - - $x =~ s@/\*.*?\*/@@gos; # strip comments. - # strip #define macros inside enums - $x =~ s@#\s*((define|ifdef)\s+|endif)[^;]*;@@gos; - - if ($x =~ /typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;/) { - $declaration_name = $2; - $members = $1; - } elsif ($x =~ /enum\s+(\w*)\s*\{(.*)\}/) { - $declaration_name = $1; - $members = $2; - } - - if ($declaration_name) { - my %_members; - - $members =~ s/\s+$//; - - foreach my $arg (split ',', $members) { - $arg =~ s/^\s*(\w+).*/$1/; - push @parameterlist, $arg; - if (!$parameterdescs{$arg}) { - $parameterdescs{$arg} = $undescribed; - if (show_warnings("enum", $declaration_name)) { - print STDERR "${file}:$.: warning: Enum value '$arg' not described in enum '$declaration_name'\n"; - } - } - $_members{$arg} = 1; - } - - while (my ($k, $v) = each %parameterdescs) { - if (!exists($_members{$k})) { - if (show_warnings("enum", $declaration_name)) { - print STDERR "${file}:$.: warning: Excess enum value '$k' description in '$declaration_name'\n"; - } - } - } - - output_declaration($declaration_name, - 'enum', - {'enum' => $declaration_name, - 'module' => $modulename, - 'parameterlist' => \@parameterlist, - 'parameterdescs' => \%parameterdescs, - 'sectionlist' => \@sectionlist, - 'sections' => \%sections, - 'purpose' => $declaration_purpose - }); - } else { - print STDERR "${file}:$.: error: Cannot parse enum!\n"; - ++$errors; - } -} - -my $typedef_type = qr { ((?:\s+[\w\*]+){1,8})\s* }x; -my $typedef_ident = qr { \*?\s*(\w\S+)\s* }x; -my $typedef_args = qr { \s*\((.*)\); }x; - -my $typedef1 = qr { typedef$typedef_type\($typedef_ident\)$typedef_args }x; -my $typedef2 = qr { typedef$typedef_type$typedef_ident$typedef_args }x; - -sub dump_typedef($$) { - my $x = shift; - my $file = shift; - - $x =~ s@/\*.*?\*/@@gos; # strip comments. - - # Parse function typedef prototypes - if ($x =~ $typedef1 || $x =~ $typedef2) { - $return_type = $1; - $declaration_name = $2; - my $args = $3; - $return_type =~ s/^\s+//; - - create_parameterlist($args, ',', $file, $declaration_name); - - output_declaration($declaration_name, - 'function', - {'function' => $declaration_name, - 'typedef' => 1, - 'module' => $modulename, - 'functiontype' => $return_type, - 'parameterlist' => \@parameterlist, - 'parameterdescs' => \%parameterdescs, - 'parametertypes' => \%parametertypes, - 'sectionlist' => \@sectionlist, - 'sections' => \%sections, - 'purpose' => $declaration_purpose - }); - return; - } - - while (($x =~ /\(*.\)\s*;$/) || ($x =~ /\[*.\]\s*;$/)) { - $x =~ s/\(*.\)\s*;$/;/; - $x =~ s/\[*.\]\s*;$/;/; - } - - if ($x =~ /typedef.*\s+(\w+)\s*;/) { - $declaration_name = $1; - - output_declaration($declaration_name, - 'typedef', - {'typedef' => $declaration_name, - 'module' => $modulename, - 'sectionlist' => \@sectionlist, - 'sections' => \%sections, - 'purpose' => $declaration_purpose - }); - } - else { - print STDERR "${file}:$.: error: Cannot parse typedef!\n"; - ++$errors; - } -} - -sub save_struct_actual($) { - my $actual = shift; - - # strip all spaces from the actual param so that it looks like one string item - $actual =~ s/\s*//g; - $struct_actual = $struct_actual . $actual . " "; -} - -sub create_parameterlist($$$$) { - my $args = shift; - my $splitter = shift; - my $file = shift; - my $declaration_name = shift; - my $type; - my $param; - - # temporarily replace commas inside function pointer definition - while ($args =~ /(\([^\),]+),/) { - $args =~ s/(\([^\),]+),/$1#/g; - } - - foreach my $arg (split($splitter, $args)) { - # strip comments - $arg =~ s/\/\*.*\*\///; - # strip leading/trailing spaces - $arg =~ s/^\s*//; - $arg =~ s/\s*$//; - $arg =~ s/\s+/ /; - - if ($arg =~ /^#/) { - # Treat preprocessor directive as a typeless variable just to fill - # corresponding data structures "correctly". Catch it later in - # output_* subs. - push_parameter($arg, "", "", $file); - } elsif ($arg =~ m/\(.+\)\s*\(/) { - # pointer-to-function - $arg =~ tr/#/,/; - $arg =~ m/[^\(]+\(\*?\s*([\w\.]*)\s*\)/; - $param = $1; - $type = $arg; - $type =~ s/([^\(]+\(\*?)\s*$param/$1/; - save_struct_actual($param); - push_parameter($param, $type, $arg, $file, $declaration_name); - } elsif ($arg) { - $arg =~ s/\s*:\s*/:/g; - $arg =~ s/\s*\[/\[/g; - - my @args = split('\s*,\s*', $arg); - if ($args[0] =~ m/\*/) { - $args[0] =~ s/(\*+)\s*/ $1/; - } - - my @first_arg; - if ($args[0] =~ /^(.*\s+)(.*?\[.*\].*)$/) { - shift @args; - push(@first_arg, split('\s+', $1)); - push(@first_arg, $2); - } else { - @first_arg = split('\s+', shift @args); - } - - unshift(@args, pop @first_arg); - $type = join " ", @first_arg; - - foreach $param (@args) { - if ($param =~ m/^(\*+)\s*(.*)/) { - save_struct_actual($2); - - push_parameter($2, "$type $1", $arg, $file, $declaration_name); - } - elsif ($param =~ m/(.*?):(\d+)/) { - if ($type ne "") { # skip unnamed bit-fields - save_struct_actual($1); - push_parameter($1, "$type:$2", $arg, $file, $declaration_name) - } - } - else { - save_struct_actual($param); - push_parameter($param, $type, $arg, $file, $declaration_name); - } - } - } - } -} - -sub push_parameter($$$$$) { - my $param = shift; - my $type = shift; - my $org_arg = shift; - my $file = shift; - my $declaration_name = shift; - - if (($anon_struct_union == 1) && ($type eq "") && - ($param eq "}")) { - return; # ignore the ending }; from anon. struct/union - } - - $anon_struct_union = 0; - $param =~ s/[\[\)].*//; - - if ($type eq "" && $param =~ /\.\.\.$/) - { - if (!$param =~ /\w\.\.\.$/) { - # handles unnamed variable parameters - $param = "..."; - } - elsif ($param =~ /\w\.\.\.$/) { - # for named variable parameters of the form `x...`, remove the dots - $param =~ s/\.\.\.$//; - } - if (!defined $parameterdescs{$param} || $parameterdescs{$param} eq "") { - $parameterdescs{$param} = "variable arguments"; - } - } - elsif ($type eq "" && ($param eq "" or $param eq "void")) - { - $param="void"; - $parameterdescs{void} = "no arguments"; - } - elsif ($type eq "" && ($param eq "struct" or $param eq "union")) - # handle unnamed (anonymous) union or struct: - { - $type = $param; - $param = "{unnamed_" . $param . "}"; - $parameterdescs{$param} = "anonymous\n"; - $anon_struct_union = 1; - } - - # warn if parameter has no description - # (but ignore ones starting with # as these are not parameters - # but inline preprocessor statements); - # Note: It will also ignore void params and unnamed structs/unions - if (!defined $parameterdescs{$param} && $param !~ /^#/) { - $parameterdescs{$param} = $undescribed; - - if (show_warnings($type, $declaration_name) && $param !~ /\./) { - print STDERR - "${file}:$.: warning: Function parameter or member '$param' not described in '$declaration_name'\n"; - ++$warnings; - } - } - - # strip spaces from $param so that it is one continuous string - # on @parameterlist; - # this fixes a problem where check_sections() cannot find - # a parameter like "addr[6 + 2]" because it actually appears - # as "addr[6", "+", "2]" on the parameter list; - # but it's better to maintain the param string unchanged for output, - # so just weaken the string compare in check_sections() to ignore - # "[blah" in a parameter string; - ###$param =~ s/\s*//g; - push @parameterlist, $param; - $org_arg =~ s/\s\s+/ /g; - $parametertypes{$param} = $org_arg; -} - -sub check_sections($$$$$) { - my ($file, $decl_name, $decl_type, $sectcheck, $prmscheck) = @_; - my @sects = split ' ', $sectcheck; - my @prms = split ' ', $prmscheck; - my $err; - my ($px, $sx); - my $prm_clean; # strip trailing "[array size]" and/or beginning "*" - - foreach $sx (0 .. $#sects) { - $err = 1; - foreach $px (0 .. $#prms) { - $prm_clean = $prms[$px]; - $prm_clean =~ s/\[.*\]//; - $prm_clean =~ s/__attribute__\s*\(\([a-z,_\*\s\(\)]*\)\)//i; - # ignore array size in a parameter string; - # however, the original param string may contain - # spaces, e.g.: addr[6 + 2] - # and this appears in @prms as "addr[6" since the - # parameter list is split at spaces; - # hence just ignore "[..." for the sections check; - $prm_clean =~ s/\[.*//; - - ##$prm_clean =~ s/^\**//; - if ($prm_clean eq $sects[$sx]) { - $err = 0; - last; - } - } - if ($err) { - if ($decl_type eq "function") { - print STDERR "${file}:$.: warning: " . - "Excess function parameter " . - "'$sects[$sx]' " . - "description in '$decl_name'\n"; - ++$warnings; - } - } - } -} - -## -# Checks the section describing the return value of a function. -sub check_return_section { - my $file = shift; - my $declaration_name = shift; - my $return_type = shift; - - # Ignore an empty return type (It's a macro) - # Ignore functions with a "void" return type. (But don't ignore "void *") - if (($return_type eq "") || ($return_type =~ /void\s*\w*\s*$/)) { - return; - } - - if (!defined($sections{$section_return}) || - $sections{$section_return} eq "") { - print STDERR "${file}:$.: warning: " . - "No description found for return value of " . - "'$declaration_name'\n"; - ++$warnings; - } -} - -## -# takes a function prototype and the name of the current file being -# processed and spits out all the details stored in the global -# arrays/hashes. -sub dump_function($$) { - my $prototype = shift; - my $file = shift; - my $noret = 0; - - print_lineno($new_start_line); - - $prototype =~ s/^static +//; - $prototype =~ s/^extern +//; - $prototype =~ s/^asmlinkage +//; - $prototype =~ s/^inline +//; - $prototype =~ s/^__inline__ +//; - $prototype =~ s/^__inline +//; - $prototype =~ s/^__always_inline +//; - $prototype =~ s/^noinline +//; - $prototype =~ s/__init +//; - $prototype =~ s/__init_or_module +//; - $prototype =~ s/__meminit +//; - $prototype =~ s/__must_check +//; - $prototype =~ s/__weak +//; - $prototype =~ s/__sched +//; - $prototype =~ s/__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +//; - my $define = $prototype =~ s/^#\s*define\s+//; #ak added - $prototype =~ s/__attribute__\s*\(\( - (?: - [\w\s]++ # attribute name - (?:\([^)]*+\))? # attribute arguments - \s*+,? # optional comma at the end - )+ - \)\)\s+//x; - - # Strip QEMU specific compiler annotations - $prototype =~ s/QEMU_[A-Z_]+ +//; - - # Yes, this truly is vile. We are looking for: - # 1. Return type (may be nothing if we're looking at a macro) - # 2. Function name - # 3. Function parameters. - # - # All the while we have to watch out for function pointer parameters - # (which IIRC is what the two sections are for), C types (these - # regexps don't even start to express all the possibilities), and - # so on. - # - # If you mess with these regexps, it's a good idea to check that - # the following functions' documentation still comes out right: - # - parport_register_device (function pointer parameters) - # - atomic_set (macro) - # - pci_match_device, __copy_to_user (long return type) - - if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) { - # This is an object-like macro, it has no return type and no parameter - # list. - # Function-like macros are not allowed to have spaces between - # declaration_name and opening parenthesis (notice the \s+). - $return_type = $1; - $declaration_name = $2; - $noret = 1; - } elsif ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || - $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s+\w+\s+\w+\s*\*+)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/ || - $prototype =~ m/^(\w+\s+\w+\s*\*+\s*\w+\s*\*+\s*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\{]*)\)/) { - $return_type = $1; - $declaration_name = $2; - my $args = $3; - - create_parameterlist($args, ',', $file, $declaration_name); - } else { - print STDERR "${file}:$.: warning: cannot understand function prototype: '$prototype'\n"; - return; - } - - my $prms = join " ", @parameterlist; - check_sections($file, $declaration_name, "function", $sectcheck, $prms); - - # This check emits a lot of warnings at the moment, because many - # functions don't have a 'Return' doc section. So until the number - # of warnings goes sufficiently down, the check is only performed in - # verbose mode. - # TODO: always perform the check. - if ($verbose && !$noret) { - check_return_section($file, $declaration_name, $return_type); - } - - # The function parser can be called with a typedef parameter. - # Handle it. - if ($return_type =~ /typedef/) { - output_declaration($declaration_name, - 'function', - {'function' => $declaration_name, - 'typedef' => 1, - 'module' => $modulename, - 'functiontype' => $return_type, - 'parameterlist' => \@parameterlist, - 'parameterdescs' => \%parameterdescs, - 'parametertypes' => \%parametertypes, - 'sectionlist' => \@sectionlist, - 'sections' => \%sections, - 'purpose' => $declaration_purpose - }); - } else { - output_declaration($declaration_name, - 'function', - {'function' => $declaration_name, - 'module' => $modulename, - 'functiontype' => $return_type, - 'parameterlist' => \@parameterlist, - 'parameterdescs' => \%parameterdescs, - 'parametertypes' => \%parametertypes, - 'sectionlist' => \@sectionlist, - 'sections' => \%sections, - 'purpose' => $declaration_purpose - }); - } -} - -sub reset_state { - $function = ""; - %parameterdescs = (); - %parametertypes = (); - @parameterlist = (); - %sections = (); - @sectionlist = (); - $sectcheck = ""; - $struct_actual = ""; - $prototype = ""; - - $state = STATE_NORMAL; - $inline_doc_state = STATE_INLINE_NA; -} - -sub tracepoint_munge($) { - my $file = shift; - my $tracepointname = 0; - my $tracepointargs = 0; - - if ($prototype =~ m/TRACE_EVENT\((.*?),/) { - $tracepointname = $1; - } - if ($prototype =~ m/DEFINE_SINGLE_EVENT\((.*?),/) { - $tracepointname = $1; - } - if ($prototype =~ m/DEFINE_EVENT\((.*?),(.*?),/) { - $tracepointname = $2; - } - $tracepointname =~ s/^\s+//; #strip leading whitespace - if ($prototype =~ m/TP_PROTO\((.*?)\)/) { - $tracepointargs = $1; - } - if (($tracepointname eq 0) || ($tracepointargs eq 0)) { - print STDERR "${file}:$.: warning: Unrecognized tracepoint format: \n". - "$prototype\n"; - } else { - $prototype = "static inline void trace_$tracepointname($tracepointargs)"; - } -} - -sub syscall_munge() { - my $void = 0; - - $prototype =~ s@[\r\n]+@ @gos; # strip newlines/CR's -## if ($prototype =~ m/SYSCALL_DEFINE0\s*\(\s*(a-zA-Z0-9_)*\s*\)/) { - if ($prototype =~ m/SYSCALL_DEFINE0/) { - $void = 1; -## $prototype = "long sys_$1(void)"; - } - - $prototype =~ s/SYSCALL_DEFINE.*\(/long sys_/; # fix return type & func name - if ($prototype =~ m/long (sys_.*?),/) { - $prototype =~ s/,/\(/; - } elsif ($void) { - $prototype =~ s/\)/\(void\)/; - } - - # now delete all of the odd-number commas in $prototype - # so that arg types & arg names don't have a comma between them - my $count = 0; - my $len = length($prototype); - if ($void) { - $len = 0; # skip the for-loop - } - for (my $ix = 0; $ix < $len; $ix++) { - if (substr($prototype, $ix, 1) eq ',') { - $count++; - if ($count % 2 == 1) { - substr($prototype, $ix, 1) = ' '; - } - } - } -} - -sub process_proto_function($$) { - my $x = shift; - my $file = shift; - - $x =~ s@\/\/.*$@@gos; # strip C99-style comments to end of line - - if ($x =~ m#\s*/\*\s+MACDOC\s*#io || ($x =~ /^#/ && $x !~ /^#\s*define/)) { - # do nothing - } - elsif ($x =~ /([^\{]*)/) { - $prototype .= $1; - } - - if (($x =~ /\{/) || ($x =~ /\#\s*define/) || ($x =~ /;/)) { - $prototype =~ s@/\*.*?\*/@@gos; # strip comments. - $prototype =~ s@[\r\n]+@ @gos; # strip newlines/cr's. - $prototype =~ s@^\s+@@gos; # strip leading spaces - - # Handle prototypes for function pointers like: - # int (*pcs_config)(struct foo) - $prototype =~ s@^(\S+\s+)\(\s*\*(\S+)\)@$1$2@gos; - - if ($prototype =~ /SYSCALL_DEFINE/) { - syscall_munge(); - } - if ($prototype =~ /TRACE_EVENT/ || $prototype =~ /DEFINE_EVENT/ || - $prototype =~ /DEFINE_SINGLE_EVENT/) - { - tracepoint_munge($file); - } - dump_function($prototype, $file); - reset_state(); - } -} - -sub process_proto_type($$) { - my $x = shift; - my $file = shift; - - $x =~ s@[\r\n]+@ @gos; # strip newlines/cr's. - $x =~ s@^\s+@@gos; # strip leading spaces - $x =~ s@\s+$@@gos; # strip trailing spaces - $x =~ s@\/\/.*$@@gos; # strip C99-style comments to end of line - - if ($x =~ /^#/) { - # To distinguish preprocessor directive from regular declaration later. - $x .= ";"; - } - - while (1) { - if ( $x =~ /([^\{\};]*)([\{\};])(.*)/ ) { - if( length $prototype ) { - $prototype .= " " - } - $prototype .= $1 . $2; - ($2 eq '{') && $brcount++; - ($2 eq '}') && $brcount--; - if (($2 eq ';') && ($brcount == 0)) { - dump_declaration($prototype, $file); - reset_state(); - last; - } - $x = $3; - } else { - $prototype .= $x; - last; - } - } -} - - -sub map_filename($) { - my $file; - my ($orig_file) = @_; - - if (defined($ENV{'SRCTREE'})) { - $file = "$ENV{'SRCTREE'}" . "/" . $orig_file; - } else { - $file = $orig_file; - } - - if (defined($source_map{$file})) { - $file = $source_map{$file}; - } - - return $file; -} - -sub process_export_file($) { - my ($orig_file) = @_; - my $file = map_filename($orig_file); - - if (!open(IN,"<$file")) { - print STDERR "Error: Cannot open file $file\n"; - ++$errors; - return; - } - - while () { - if (/$export_symbol/) { - next if (defined($nosymbol_table{$2})); - $function_table{$2} = 1; - } - } - - close(IN); -} - -# -# Parsers for the various processing states. -# -# STATE_NORMAL: looking for the /** to begin everything. -# -sub process_normal() { - if (/$doc_start/o) { - $state = STATE_NAME; # next line is always the function name - $in_doc_sect = 0; - $declaration_start_line = $. + 1; - } -} - -# -# STATE_NAME: Looking for the "name - description" line -# -sub process_name($$) { - my $file = shift; - my $identifier; - my $descr; - - if (/$doc_block/o) { - $state = STATE_DOCBLOCK; - $contents = ""; - $new_start_line = $.; - - if ( $1 eq "" ) { - $section = $section_intro; - } else { - $section = $1; - } - } - elsif (/$doc_decl/o) { - $identifier = $1; - if (/\s*([\w\s]+?)(\s*-|:)/) { - $identifier = $1; - } - - $state = STATE_BODY; - # if there's no @param blocks need to set up default section - # here - $contents = ""; - $section = $section_default; - $new_start_line = $. + 1; - if (/[-:](.*)/) { - # strip leading/trailing/multiple spaces - $descr= $1; - $descr =~ s/^\s*//; - $descr =~ s/\s*$//; - $descr =~ s/\s+/ /g; - $declaration_purpose = $descr; - $state = STATE_BODY_MAYBE; - } else { - $declaration_purpose = ""; - } - - if (($declaration_purpose eq "") && $verbose) { - print STDERR "${file}:$.: warning: missing initial short description on line:\n"; - print STDERR $_; - ++$warnings; - } - - if ($identifier =~ m/^struct\b/) { - $decl_type = 'struct'; - } elsif ($identifier =~ m/^union\b/) { - $decl_type = 'union'; - } elsif ($identifier =~ m/^enum\b/) { - $decl_type = 'enum'; - } elsif ($identifier =~ m/^typedef\b/) { - $decl_type = 'typedef'; - } else { - $decl_type = 'function'; - } - - if ($verbose) { - print STDERR "${file}:$.: info: Scanning doc for $identifier\n"; - } - } else { - print STDERR "${file}:$.: warning: Cannot understand $_ on line $.", - " - I thought it was a doc line\n"; - ++$warnings; - $state = STATE_NORMAL; - } -} - - -# -# STATE_BODY and STATE_BODY_MAYBE: the bulk of a kerneldoc comment. -# -sub process_body($$) { - my $file = shift; - - # Until all named variable macro parameters are - # documented using the bare name (`x`) rather than with - # dots (`x...`), strip the dots: - if ($section =~ /\w\.\.\.$/) { - $section =~ s/\.\.\.$//; - - if ($verbose) { - print STDERR "${file}:$.: warning: Variable macro arguments should be documented without dots\n"; - ++$warnings; - } - } - - if ($state == STATE_BODY_WITH_BLANK_LINE && /^\s*\*\s?\S/) { - dump_section($file, $section, $contents); - $section = $section_default; - $new_start_line = $.; - $contents = ""; - } - - if (/$doc_sect/i) { # case insensitive for supported section names - $newsection = $1; - $newcontents = $2; - - # map the supported section names to the canonical names - if ($newsection =~ m/^description$/i) { - $newsection = $section_default; - } elsif ($newsection =~ m/^context$/i) { - $newsection = $section_context; - } elsif ($newsection =~ m/^returns?$/i) { - $newsection = $section_return; - } elsif ($newsection =~ m/^\@return$/) { - # special: @return is a section, not a param description - $newsection = $section_return; - } - - if (($contents ne "") && ($contents ne "\n")) { - if (!$in_doc_sect && $verbose) { - print STDERR "${file}:$.: warning: contents before sections\n"; - ++$warnings; - } - dump_section($file, $section, $contents); - $section = $section_default; - } - - $in_doc_sect = 1; - $state = STATE_BODY; - $contents = $newcontents; - $new_start_line = $.; - while (substr($contents, 0, 1) eq " ") { - $contents = substr($contents, 1); - } - if ($contents ne "") { - $contents .= "\n"; - } - $section = $newsection; - $leading_space = undef; - } elsif (/$doc_end/) { - if (($contents ne "") && ($contents ne "\n")) { - dump_section($file, $section, $contents); - $section = $section_default; - $contents = ""; - } - # look for doc_com + + doc_end: - if ($_ =~ m'\s*\*\s*[a-zA-Z_0-9:\.]+\*/') { - print STDERR "${file}:$.: warning: suspicious ending line: $_"; - ++$warnings; - } - - $prototype = ""; - $state = STATE_PROTO; - $brcount = 0; - $new_start_line = $. + 1; - } elsif (/$doc_content/) { - if ($1 eq "") { - if ($section eq $section_context) { - dump_section($file, $section, $contents); - $section = $section_default; - $contents = ""; - $new_start_line = $.; - $state = STATE_BODY; - } else { - if ($section ne $section_default) { - $state = STATE_BODY_WITH_BLANK_LINE; - } else { - $state = STATE_BODY; - } - $contents .= "\n"; - } - } elsif ($state == STATE_BODY_MAYBE) { - # Continued declaration purpose - chomp($declaration_purpose); - $declaration_purpose .= " " . $1; - $declaration_purpose =~ s/\s+/ /g; - } else { - my $cont = $1; - if ($section =~ m/^@/ || $section eq $section_context) { - if (!defined $leading_space) { - if ($cont =~ m/^(\s+)/) { - $leading_space = $1; - } else { - $leading_space = ""; - } - } - $cont =~ s/^$leading_space//; - } - $contents .= $cont . "\n"; - } - } else { - # i dont know - bad line? ignore. - print STDERR "${file}:$.: warning: bad line: $_"; - ++$warnings; - } -} - - -# -# STATE_PROTO: reading a function/whatever prototype. -# -sub process_proto($$) { - my $file = shift; - - if (/$doc_inline_oneline/) { - $section = $1; - $contents = $2; - if ($contents ne "") { - $contents .= "\n"; - dump_section($file, $section, $contents); - $section = $section_default; - $contents = ""; - } - } elsif (/$doc_inline_start/) { - $state = STATE_INLINE; - $inline_doc_state = STATE_INLINE_NAME; - } elsif ($decl_type eq 'function') { - process_proto_function($_, $file); - } else { - process_proto_type($_, $file); - } -} - -# -# STATE_DOCBLOCK: within a DOC: block. -# -sub process_docblock($$) { - my $file = shift; - - if (/$doc_end/) { - dump_doc_section($file, $section, $contents); - $section = $section_default; - $contents = ""; - $function = ""; - %parameterdescs = (); - %parametertypes = (); - @parameterlist = (); - %sections = (); - @sectionlist = (); - $prototype = ""; - $state = STATE_NORMAL; - } elsif (/$doc_content/) { - if ( $1 eq "" ) { - $contents .= $blankline; - } else { - $contents .= $1 . "\n"; - } - } -} - -# -# STATE_INLINE: docbook comments within a prototype. -# -sub process_inline($$) { - my $file = shift; - - # First line (state 1) needs to be a @parameter - if ($inline_doc_state == STATE_INLINE_NAME && /$doc_inline_sect/o) { - $section = $1; - $contents = $2; - $new_start_line = $.; - if ($contents ne "") { - while (substr($contents, 0, 1) eq " ") { - $contents = substr($contents, 1); - } - $contents .= "\n"; - } - $inline_doc_state = STATE_INLINE_TEXT; - # Documentation block end */ - } elsif (/$doc_inline_end/) { - if (($contents ne "") && ($contents ne "\n")) { - dump_section($file, $section, $contents); - $section = $section_default; - $contents = ""; - } - $state = STATE_PROTO; - $inline_doc_state = STATE_INLINE_NA; - # Regular text - } elsif (/$doc_content/) { - if ($inline_doc_state == STATE_INLINE_TEXT) { - $contents .= $1 . "\n"; - # nuke leading blank lines - if ($contents =~ /^\s*$/) { - $contents = ""; - } - } elsif ($inline_doc_state == STATE_INLINE_NAME) { - $inline_doc_state = STATE_INLINE_ERROR; - print STDERR "${file}:$.: warning: "; - print STDERR "Incorrect use of kernel-doc format: $_"; - ++$warnings; - } - } -} - - -sub process_file($) { - my $file; - my $initial_section_counter = $section_counter; - my ($orig_file) = @_; - - $file = map_filename($orig_file); - - if (!open(IN_FILE,"<$file")) { - print STDERR "Error: Cannot open file $file\n"; - ++$errors; - return; - } - - $. = 1; - - $section_counter = 0; - while () { - while (s/\\\s*$//) { - $_ .= ; - } - # Replace tabs by spaces - while ($_ =~ s/\t+/' ' x (length($&) * 8 - length($`) % 8)/e) {}; - # Hand this line to the appropriate state handler - if ($state == STATE_NORMAL) { - process_normal(); - } elsif ($state == STATE_NAME) { - process_name($file, $_); - } elsif ($state == STATE_BODY || $state == STATE_BODY_MAYBE || - $state == STATE_BODY_WITH_BLANK_LINE) { - process_body($file, $_); - } elsif ($state == STATE_INLINE) { # scanning for inline parameters - process_inline($file, $_); - } elsif ($state == STATE_PROTO) { - process_proto($file, $_); - } elsif ($state == STATE_DOCBLOCK) { - process_docblock($file, $_); - } - } - - # Make sure we got something interesting. - if ($initial_section_counter == $section_counter && $ - output_mode ne "none") { - if ($output_selection == OUTPUT_INCLUDE) { - print STDERR "${file}:1: warning: '$_' not found\n" - for keys %function_table; - } - else { - print STDERR "${file}:1: warning: no structured comments found\n"; - } - } - close IN_FILE; -} - - -if ($output_mode eq "rst") { - get_sphinx_version() if (!$sphinx_major); -} - -$kernelversion = get_kernel_version(); - -# generate a sequence of code that will splice in highlighting information -# using the s// operator. -for (my $k = 0; $k < @highlights; $k++) { - my $pattern = $highlights[$k][0]; - my $result = $highlights[$k][1]; -# print STDERR "scanning pattern:$pattern, highlight:($result)\n"; - $dohighlight .= "\$contents =~ s:$pattern:$result:gs;\n"; -} - -# Read the file that maps relative names to absolute names for -# separate source and object directories and for shadow trees. -if (open(SOURCE_MAP, "<.tmp_filelist.txt")) { - my ($relname, $absname); - while() { - chop(); - ($relname, $absname) = (split())[0..1]; - $relname =~ s:^/+::; - $source_map{$relname} = $absname; - } - close(SOURCE_MAP); -} - -if ($output_selection == OUTPUT_EXPORTED || - $output_selection == OUTPUT_INTERNAL) { - - push(@export_file_list, @ARGV); - - foreach (@export_file_list) { - chomp; - process_export_file($_); - } -} - -foreach (@ARGV) { - chomp; - process_file($_); -} -if ($verbose && $errors) { - print STDERR "$errors errors\n"; -} -if ($verbose && $warnings) { - print STDERR "$warnings warnings\n"; -} - -if ($Werror && $warnings) { - print STDERR "$warnings warnings as Errors\n"; - exit($warnings); -} else { - exit($output_mode eq "none" ? 0 : $errors) -} diff --git a/scripts/kernel-doc.py b/scripts/kernel-doc.py new file mode 100755 index 0000000000000..fc3d46ef519f8 --- /dev/null +++ b/scripts/kernel-doc.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2025: Mauro Carvalho Chehab . +# +# pylint: disable=C0103,R0915 +# +# Converted from the kernel-doc script originally written in Perl +# under GPLv2, copyrighted since 1998 by the following authors: +# +# Aditya Srivastava +# Akira Yokosawa +# Alexander A. Klimov +# Alexander Lobakin +# André Almeida +# Andy Shevchenko +# Anna-Maria Behnsen +# Armin Kuster +# Bart Van Assche +# Ben Hutchings +# Borislav Petkov +# Chen-Yu Tsai +# Coco Li +# Conchúr Navid +# Daniel Santos +# Danilo Cesar Lemes de Paula +# Dan Luedtke +# Donald Hunter +# Gabriel Krisman Bertazi +# Greg Kroah-Hartman +# Harvey Harrison +# Horia Geanta +# Ilya Dryomov +# Jakub Kicinski +# Jani Nikula +# Jason Baron +# Jason Gunthorpe +# Jérémy Bobbio +# Johannes Berg +# Johannes Weiner +# Jonathan Cameron +# Jonathan Corbet +# Jonathan Neuschäfer +# Kamil Rytarowski +# Kees Cook +# Laurent Pinchart +# Levin, Alexander (Sasha Levin) +# Linus Torvalds +# Lucas De Marchi +# Mark Rutland +# Markus Heiser +# Martin Waitz +# Masahiro Yamada +# Matthew Wilcox +# Mauro Carvalho Chehab +# Michal Wajdeczko +# Michael Zucchi +# Mike Rapoport +# Niklas Söderlund +# Nishanth Menon +# Paolo Bonzini +# Pavan Kumar Linga +# Pavel Pisa +# Peter Maydell +# Pierre-Louis Bossart +# Randy Dunlap +# Richard Kennedy +# Rich Walker +# Rolf Eike Beer +# Sakari Ailus +# Silvio Fricke +# Simon Huggins +# Tim Waugh +# Tomasz Warniełło +# Utkarsh Tripathi +# valdis.kletnieks@vt.edu +# Vegard Nossum +# Will Deacon +# Yacine Belkadi +# Yujie Liu + +""" +kernel_doc +========== + +Print formatted kernel documentation to stdout + +Read C language source or header FILEs, extract embedded +documentation comments, and print formatted documentation +to standard output. + +The documentation comments are identified by the "/**" +opening comment mark. + +See Documentation/doc-guide/kernel-doc.rst for the +documentation comment syntax. +""" + +import argparse +import logging +import os +import sys + +# Import Python modules + +LIB_DIR = "lib/kdoc" +SRC_DIR = os.path.dirname(os.path.realpath(__file__)) + +sys.path.insert(0, os.path.join(SRC_DIR, LIB_DIR)) + +from kdoc_files import KernelFiles # pylint: disable=C0413 +from kdoc_output import RestFormat, ManFormat # pylint: disable=C0413 + +DESC = """ +Read C language source or header FILEs, extract embedded documentation comments, +and print formatted documentation to standard output. + +The documentation comments are identified by the "/**" opening comment mark. + +See Documentation/doc-guide/kernel-doc.rst for the documentation comment syntax. +""" + +EXPORT_FILE_DESC = """ +Specify an additional FILE in which to look for EXPORT_SYMBOL information. + +May be used multiple times. +""" + +EXPORT_DESC = """ +Only output documentation for the symbols that have been +exported using EXPORT_SYMBOL() and related macros in any input +FILE or -export-file FILE. +""" + +INTERNAL_DESC = """ +Only output documentation for the symbols that have NOT been +exported using EXPORT_SYMBOL() and related macros in any input +FILE or -export-file FILE. +""" + +FUNCTION_DESC = """ +Only output documentation for the given function or DOC: section +title. All other functions and DOC: sections are ignored. + +May be used multiple times. +""" + +NOSYMBOL_DESC = """ +Exclude the specified symbol from the output documentation. + +May be used multiple times. +""" + +FILES_DESC = """ +Header and C source files to be parsed. +""" + +WARN_CONTENTS_BEFORE_SECTIONS_DESC = """ +Warns if there are contents before sections (deprecated). + +This option is kept just for backward-compatibility, but it does nothing, +neither here nor at the original Perl script. +""" + + +class MsgFormatter(logging.Formatter): + """Helper class to format warnings on a similar way to kernel-doc.pl""" + + def format(self, record): + record.levelname = record.levelname.capitalize() + return logging.Formatter.format(self, record) + +def main(): + """Main program""" + + parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter, + description=DESC) + + # Normal arguments + + parser.add_argument("-v", "-verbose", "--verbose", action="store_true", + help="Verbose output, more warnings and other information.") + + parser.add_argument("-d", "-debug", "--debug", action="store_true", + help="Enable debug messages") + + parser.add_argument("-M", "-modulename", "--modulename", + default="Kernel API", + help="Allow setting a module name at the output.") + + parser.add_argument("-l", "-enable-lineno", "--enable_lineno", + action="store_true", + help="Enable line number output (only in ReST mode)") + + # Arguments to control the warning behavior + + parser.add_argument("-Wreturn", "--wreturn", action="store_true", + help="Warns about the lack of a return markup on functions.") + + parser.add_argument("-Wshort-desc", "-Wshort-description", "--wshort-desc", + action="store_true", + help="Warns if initial short description is missing") + + parser.add_argument("-Wcontents-before-sections", + "--wcontents-before-sections", action="store_true", + help=WARN_CONTENTS_BEFORE_SECTIONS_DESC) + + parser.add_argument("-Wall", "--wall", action="store_true", + help="Enable all types of warnings") + + parser.add_argument("-Werror", "--werror", action="store_true", + help="Treat warnings as errors.") + + parser.add_argument("-export-file", "--export-file", action='append', + help=EXPORT_FILE_DESC) + + # Output format mutually-exclusive group + + out_group = parser.add_argument_group("Output format selection (mutually exclusive)") + + out_fmt = out_group.add_mutually_exclusive_group() + + out_fmt.add_argument("-m", "-man", "--man", action="store_true", + help="Output troff manual page format.") + out_fmt.add_argument("-r", "-rst", "--rst", action="store_true", + help="Output reStructuredText format (default).") + out_fmt.add_argument("-N", "-none", "--none", action="store_true", + help="Do not output documentation, only warnings.") + + # Output selection mutually-exclusive group + + sel_group = parser.add_argument_group("Output selection (mutually exclusive)") + sel_mut = sel_group.add_mutually_exclusive_group() + + sel_mut.add_argument("-e", "-export", "--export", action='store_true', + help=EXPORT_DESC) + + sel_mut.add_argument("-i", "-internal", "--internal", action='store_true', + help=INTERNAL_DESC) + + sel_mut.add_argument("-s", "-function", "--symbol", action='append', + help=FUNCTION_DESC) + + # Those are valid for all 3 types of filter + parser.add_argument("-n", "-nosymbol", "--nosymbol", action='append', + help=NOSYMBOL_DESC) + + parser.add_argument("-D", "-no-doc-sections", "--no-doc-sections", + action='store_true', help="Don't outputt DOC sections") + + parser.add_argument("files", metavar="FILE", + nargs="+", help=FILES_DESC) + + args = parser.parse_args() + + if args.wall: + args.wreturn = True + args.wshort_desc = True + args.wcontents_before_sections = True + + logger = logging.getLogger() + + if not args.debug: + logger.setLevel(logging.INFO) + else: + logger.setLevel(logging.DEBUG) + + formatter = MsgFormatter('%(levelname)s: %(message)s') + + handler = logging.StreamHandler() + handler.setFormatter(formatter) + + logger.addHandler(handler) + + python_ver = sys.version_info[:2] + if python_ver < (3,6): + logger.warning("Python 3.6 or later is required by kernel-doc") + + # Return 0 here to avoid breaking compilation + sys.exit(0) + + if python_ver < (3,7): + logger.warning("Python 3.7 or later is required for correct results") + + if args.man: + out_style = ManFormat(modulename=args.modulename) + elif args.none: + out_style = None + else: + out_style = RestFormat() + + kfiles = KernelFiles(verbose=args.verbose, + out_style=out_style, werror=args.werror, + wreturn=args.wreturn, wshort_desc=args.wshort_desc, + wcontents_before_sections=args.wcontents_before_sections) + + kfiles.parse(args.files, export_file=args.export_file) + + for t in kfiles.msg(enable_lineno=args.enable_lineno, export=args.export, + internal=args.internal, symbol=args.symbol, + nosymbol=args.nosymbol, export_file=args.export_file, + no_doc_sections=args.no_doc_sections): + msg = t[1] + if msg: + print(msg) + + error_count = kfiles.errors + if not error_count: + sys.exit(0) + + if args.werror: + print(f"{error_count} warnings as errors") + sys.exit(error_count) + + if args.verbose: + print(f"{error_count} errors") + + if args.none: + sys.exit(0) + + sys.exit(error_count) + + +# Call main method +if __name__ == "__main__": + main() diff --git a/scripts/lib/kdoc/kdoc_files.py b/scripts/lib/kdoc/kdoc_files.py new file mode 100644 index 0000000000000..9e09b45b02faa --- /dev/null +++ b/scripts/lib/kdoc/kdoc_files.py @@ -0,0 +1,291 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2025: Mauro Carvalho Chehab . +# +# pylint: disable=R0903,R0913,R0914,R0917 + +""" +Parse lernel-doc tags on multiple kernel source files. +""" + +import argparse +import logging +import os +import re + +from kdoc_parser import KernelDoc +from kdoc_output import OutputFormat + + +class GlobSourceFiles: + """ + Parse C source code file names and directories via an Interactor. + """ + + def __init__(self, srctree=None, valid_extensions=None): + """ + Initialize valid extensions with a tuple. + + If not defined, assume default C extensions (.c and .h) + + It would be possible to use python's glob function, but it is + very slow, and it is not interactive. So, it would wait to read all + directories before actually do something. + + So, let's use our own implementation. + """ + + if not valid_extensions: + self.extensions = (".c", ".h") + else: + self.extensions = valid_extensions + + self.srctree = srctree + + def _parse_dir(self, dirname): + """Internal function to parse files recursively""" + + with os.scandir(dirname) as obj: + for entry in obj: + name = os.path.join(dirname, entry.name) + + if entry.is_dir(): + yield from self._parse_dir(name) + + if not entry.is_file(): + continue + + basename = os.path.basename(name) + + if not basename.endswith(self.extensions): + continue + + yield name + + def parse_files(self, file_list, file_not_found_cb): + """ + Define an interator to parse all source files from file_list, + handling directories if any + """ + + if not file_list: + return + + for fname in file_list: + if self.srctree: + f = os.path.join(self.srctree, fname) + else: + f = fname + + if os.path.isdir(f): + yield from self._parse_dir(f) + elif os.path.isfile(f): + yield f + elif file_not_found_cb: + file_not_found_cb(fname) + + +class KernelFiles(): + """ + Parse kernel-doc tags on multiple kernel source files. + + There are two type of parsers defined here: + - self.parse_file(): parses both kernel-doc markups and + EXPORT_SYMBOL* macros; + - self.process_export_file(): parses only EXPORT_SYMBOL* macros. + """ + + def warning(self, msg): + """Ancillary routine to output a warning and increment error count""" + + self.config.log.warning(msg) + self.errors += 1 + + def error(self, msg): + """Ancillary routine to output an error and increment error count""" + + self.config.log.error(msg) + self.errors += 1 + + def parse_file(self, fname): + """ + Parse a single Kernel source. + """ + + # Prevent parsing the same file twice if results are cached + if fname in self.files: + return + + doc = KernelDoc(self.config, fname) + export_table, entries = doc.parse_kdoc() + + self.export_table[fname] = export_table + + self.files.add(fname) + self.export_files.add(fname) # parse_kdoc() already check exports + + self.results[fname] = entries + + def process_export_file(self, fname): + """ + Parses EXPORT_SYMBOL* macros from a single Kernel source file. + """ + + # Prevent parsing the same file twice if results are cached + if fname in self.export_files: + return + + doc = KernelDoc(self.config, fname) + export_table = doc.parse_export() + + if not export_table: + self.error(f"Error: Cannot check EXPORT_SYMBOL* on {fname}") + export_table = set() + + self.export_table[fname] = export_table + self.export_files.add(fname) + + def file_not_found_cb(self, fname): + """ + Callback to warn if a file was not found. + """ + + self.error(f"Cannot find file {fname}") + + def __init__(self, verbose=False, out_style=None, + werror=False, wreturn=False, wshort_desc=False, + wcontents_before_sections=False, + logger=None): + """ + Initialize startup variables and parse all files + """ + + if not verbose: + verbose = bool(os.environ.get("KBUILD_VERBOSE", 0)) + + if out_style is None: + out_style = OutputFormat() + + if not werror: + kcflags = os.environ.get("KCFLAGS", None) + if kcflags: + match = re.search(r"(\s|^)-Werror(\s|$)/", kcflags) + if match: + werror = True + + # reading this variable is for backwards compat just in case + # someone was calling it with the variable from outside the + # kernel's build system + kdoc_werror = os.environ.get("KDOC_WERROR", None) + if kdoc_werror: + werror = kdoc_werror + + # Some variables are global to the parser logic as a whole as they are + # used to send control configuration to KernelDoc class. As such, + # those variables are read-only inside the KernelDoc. + self.config = argparse.Namespace + + self.config.verbose = verbose + self.config.werror = werror + self.config.wreturn = wreturn + self.config.wshort_desc = wshort_desc + self.config.wcontents_before_sections = wcontents_before_sections + + if not logger: + self.config.log = logging.getLogger("kernel-doc") + else: + self.config.log = logger + + self.config.warning = self.warning + + self.config.src_tree = os.environ.get("SRCTREE", None) + + # Initialize variables that are internal to KernelFiles + + self.out_style = out_style + + self.errors = 0 + self.results = {} + + self.files = set() + self.export_files = set() + self.export_table = {} + + def parse(self, file_list, export_file=None): + """ + Parse all files + """ + + glob = GlobSourceFiles(srctree=self.config.src_tree) + + for fname in glob.parse_files(file_list, self.file_not_found_cb): + self.parse_file(fname) + + for fname in glob.parse_files(export_file, self.file_not_found_cb): + self.process_export_file(fname) + + def out_msg(self, fname, name, arg): + """ + Return output messages from a file name using the output style + filtering. + + If output type was not handled by the syler, return None. + """ + + # NOTE: we can add rules here to filter out unwanted parts, + # although OutputFormat.msg already does that. + + return self.out_style.msg(fname, name, arg) + + def msg(self, enable_lineno=False, export=False, internal=False, + symbol=None, nosymbol=None, no_doc_sections=False, + filenames=None, export_file=None): + """ + Interacts over the kernel-doc results and output messages, + returning kernel-doc markups on each interaction + """ + + self.out_style.set_config(self.config) + + if not filenames: + filenames = sorted(self.results.keys()) + + glob = GlobSourceFiles(srctree=self.config.src_tree) + + for fname in filenames: + function_table = set() + + if internal or export: + if not export_file: + export_file = [fname] + + for f in glob.parse_files(export_file, self.file_not_found_cb): + function_table |= self.export_table[f] + + if symbol: + for s in symbol: + function_table.add(s) + + self.out_style.set_filter(export, internal, symbol, nosymbol, + function_table, enable_lineno, + no_doc_sections) + + msg = "" + if fname not in self.results: + self.config.log.warning("No kernel-doc for file %s", fname) + continue + + for arg in self.results[fname]: + m = self.out_msg(fname, arg.name, arg) + + if m is None: + ln = arg.get("ln", 0) + dtype = arg.get('type', "") + + self.config.log.warning("%s:%d Can't handle %s", + fname, ln, dtype) + else: + msg += m + + if msg: + yield fname, msg diff --git a/scripts/lib/kdoc/kdoc_item.py b/scripts/lib/kdoc/kdoc_item.py new file mode 100644 index 0000000000000..b3b225764550d --- /dev/null +++ b/scripts/lib/kdoc/kdoc_item.py @@ -0,0 +1,42 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# A class that will, eventually, encapsulate all of the parsed data that we +# then pass into the output modules. +# + +class KdocItem: + def __init__(self, name, type, start_line, **other_stuff): + self.name = name + self.type = type + self.declaration_start_line = start_line + self.sections = {} + self.sections_start_lines = {} + self.parameterlist = [] + self.parameterdesc_start_lines = [] + self.parameterdescs = {} + self.parametertypes = {} + # + # Just save everything else into our own dict so that the output + # side can grab it directly as before. As we move things into more + # structured data, this will, hopefully, fade away. + # + self.other_stuff = other_stuff + + def get(self, key, default = None): + return self.other_stuff.get(key, default) + + def __getitem__(self, key): + return self.get(key) + + # + # Tracking of section and parameter information. + # + def set_sections(self, sections, start_lines): + self.sections = sections + self.section_start_lines = start_lines + + def set_params(self, names, descs, types, starts): + self.parameterlist = names + self.parameterdescs = descs + self.parametertypes = types + self.parameterdesc_start_lines = starts diff --git a/scripts/lib/kdoc/kdoc_output.py b/scripts/lib/kdoc/kdoc_output.py new file mode 100644 index 0000000000000..39fa872dfca79 --- /dev/null +++ b/scripts/lib/kdoc/kdoc_output.py @@ -0,0 +1,749 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2025: Mauro Carvalho Chehab . +# +# pylint: disable=C0301,R0902,R0911,R0912,R0913,R0914,R0915,R0917 + +""" +Implement output filters to print kernel-doc documentation. + +The implementation uses a virtual base class (OutputFormat) which +contains a dispatches to virtual methods, and some code to filter +out output messages. + +The actual implementation is done on one separate class per each type +of output. Currently, there are output classes for ReST and man/troff. +""" + +import os +import re +from datetime import datetime + +from kdoc_parser import KernelDoc, type_param +from kdoc_re import KernRe + + +function_pointer = KernRe(r"([^\(]*\(\*)\s*\)\s*\(([^\)]*)\)", cache=False) + +# match expressions used to find embedded type information +type_constant = KernRe(r"\b``([^\`]+)``\b", cache=False) +type_constant2 = KernRe(r"\%([-_*\w]+)", cache=False) +type_func = KernRe(r"(\w+)\(\)", cache=False) +type_param_ref = KernRe(r"([\!~\*]?)\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False) + +# Special RST handling for func ptr params +type_fp_param = KernRe(r"\@(\w+)\(\)", cache=False) + +# Special RST handling for structs with func ptr params +type_fp_param2 = KernRe(r"\@(\w+->\S+)\(\)", cache=False) + +type_env = KernRe(r"(\$\w+)", cache=False) +type_enum = KernRe(r"#(enum\s*([_\w]+))", cache=False) +type_struct = KernRe(r"#(struct\s*([_\w]+))", cache=False) +type_typedef = KernRe(r"#(([A-Z][_\w]*))", cache=False) +type_union = KernRe(r"#(union\s*([_\w]+))", cache=False) +type_member = KernRe(r"#([_\w]+)(\.|->)([_\w]+)", cache=False) +type_fallback = KernRe(r"((?!))", cache=False) # this never matches +type_member_func = type_member + KernRe(r"\(\)", cache=False) + + +class OutputFormat: + """ + Base class for OutputFormat. If used as-is, it means that only + warnings will be displayed. + """ + + # output mode. + OUTPUT_ALL = 0 # output all symbols and doc sections + OUTPUT_INCLUDE = 1 # output only specified symbols + OUTPUT_EXPORTED = 2 # output exported symbols + OUTPUT_INTERNAL = 3 # output non-exported symbols + + # Virtual member to be overriden at the inherited classes + highlights = [] + + def __init__(self): + """Declare internal vars and set mode to OUTPUT_ALL""" + + self.out_mode = self.OUTPUT_ALL + self.enable_lineno = None + self.nosymbol = {} + self.symbol = None + self.function_table = None + self.config = None + self.no_doc_sections = False + + self.data = "" + + def set_config(self, config): + """ + Setup global config variables used by both parser and output. + """ + + self.config = config + + def set_filter(self, export, internal, symbol, nosymbol, function_table, + enable_lineno, no_doc_sections): + """ + Initialize filter variables according with the requested mode. + + Only one choice is valid between export, internal and symbol. + + The nosymbol filter can be used on all modes. + """ + + self.enable_lineno = enable_lineno + self.no_doc_sections = no_doc_sections + self.function_table = function_table + + if symbol: + self.out_mode = self.OUTPUT_INCLUDE + elif export: + self.out_mode = self.OUTPUT_EXPORTED + elif internal: + self.out_mode = self.OUTPUT_INTERNAL + else: + self.out_mode = self.OUTPUT_ALL + + if nosymbol: + self.nosymbol = set(nosymbol) + + + def highlight_block(self, block): + """ + Apply the RST highlights to a sub-block of text. + """ + + for r, sub in self.highlights: + block = r.sub(sub, block) + + return block + + def out_warnings(self, args): + """ + Output warnings for identifiers that will be displayed. + """ + + for log_msg in args.warnings: + self.config.warning(log_msg) + + def check_doc(self, name, args): + """Check if DOC should be output""" + + if self.no_doc_sections: + return False + + if name in self.nosymbol: + return False + + if self.out_mode == self.OUTPUT_ALL: + self.out_warnings(args) + return True + + if self.out_mode == self.OUTPUT_INCLUDE: + if name in self.function_table: + self.out_warnings(args) + return True + + return False + + def check_declaration(self, dtype, name, args): + """ + Checks if a declaration should be output or not based on the + filtering criteria. + """ + + if name in self.nosymbol: + return False + + if self.out_mode == self.OUTPUT_ALL: + self.out_warnings(args) + return True + + if self.out_mode in [self.OUTPUT_INCLUDE, self.OUTPUT_EXPORTED]: + if name in self.function_table: + return True + + if self.out_mode == self.OUTPUT_INTERNAL: + if dtype != "function": + self.out_warnings(args) + return True + + if name not in self.function_table: + self.out_warnings(args) + return True + + return False + + def msg(self, fname, name, args): + """ + Handles a single entry from kernel-doc parser + """ + + self.data = "" + + dtype = args.type + + if dtype == "doc": + self.out_doc(fname, name, args) + return self.data + + if not self.check_declaration(dtype, name, args): + return self.data + + if dtype == "function": + self.out_function(fname, name, args) + return self.data + + if dtype == "enum": + self.out_enum(fname, name, args) + return self.data + + if dtype == "typedef": + self.out_typedef(fname, name, args) + return self.data + + if dtype in ["struct", "union"]: + self.out_struct(fname, name, args) + return self.data + + # Warn if some type requires an output logic + self.config.log.warning("doesn't now how to output '%s' block", + dtype) + + return None + + # Virtual methods to be overridden by inherited classes + # At the base class, those do nothing. + def out_doc(self, fname, name, args): + """Outputs a DOC block""" + + def out_function(self, fname, name, args): + """Outputs a function""" + + def out_enum(self, fname, name, args): + """Outputs an enum""" + + def out_typedef(self, fname, name, args): + """Outputs a typedef""" + + def out_struct(self, fname, name, args): + """Outputs a struct""" + + +class RestFormat(OutputFormat): + """Consts and functions used by ReST output""" + + highlights = [ + (type_constant, r"``\1``"), + (type_constant2, r"``\1``"), + + # Note: need to escape () to avoid func matching later + (type_member_func, r":c:type:`\1\2\3\\(\\) <\1>`"), + (type_member, r":c:type:`\1\2\3 <\1>`"), + (type_fp_param, r"**\1\\(\\)**"), + (type_fp_param2, r"**\1\\(\\)**"), + (type_func, r"\1()"), + (type_enum, r":c:type:`\1 <\2>`"), + (type_struct, r":c:type:`\1 <\2>`"), + (type_typedef, r":c:type:`\1 <\2>`"), + (type_union, r":c:type:`\1 <\2>`"), + + # in rst this can refer to any type + (type_fallback, r":c:type:`\1`"), + (type_param_ref, r"**\1\2**") + ] + blankline = "\n" + + sphinx_literal = KernRe(r'^[^.].*::$', cache=False) + sphinx_cblock = KernRe(r'^\.\.\ +code-block::', cache=False) + + def __init__(self): + """ + Creates class variables. + + Not really mandatory, but it is a good coding style and makes + pylint happy. + """ + + super().__init__() + self.lineprefix = "" + + def print_lineno(self, ln): + """Outputs a line number""" + + if self.enable_lineno and ln is not None: + ln += 1 + self.data += f".. LINENO {ln}\n" + + def output_highlight(self, args): + """ + Outputs a C symbol that may require being converted to ReST using + the self.highlights variable + """ + + input_text = args + output = "" + in_literal = False + litprefix = "" + block = "" + + for line in input_text.strip("\n").split("\n"): + + # If we're in a literal block, see if we should drop out of it. + # Otherwise, pass the line straight through unmunged. + if in_literal: + if line.strip(): # If the line is not blank + # If this is the first non-blank line in a literal block, + # figure out the proper indent. + if not litprefix: + r = KernRe(r'^(\s*)') + if r.match(line): + litprefix = '^' + r.group(1) + else: + litprefix = "" + + output += line + "\n" + elif not KernRe(litprefix).match(line): + in_literal = False + else: + output += line + "\n" + else: + output += line + "\n" + + # Not in a literal block (or just dropped out) + if not in_literal: + block += line + "\n" + if self.sphinx_literal.match(line) or self.sphinx_cblock.match(line): + in_literal = True + litprefix = "" + output += self.highlight_block(block) + block = "" + + # Handle any remaining block + if block: + output += self.highlight_block(block) + + # Print the output with the line prefix + for line in output.strip("\n").split("\n"): + self.data += self.lineprefix + line + "\n" + + def out_section(self, args, out_docblock=False): + """ + Outputs a block section. + + This could use some work; it's used to output the DOC: sections, and + starts by putting out the name of the doc section itself, but that + tends to duplicate a header already in the template file. + """ + for section, text in args.sections.items(): + # Skip sections that are in the nosymbol_table + if section in self.nosymbol: + continue + + if out_docblock: + if not self.out_mode == self.OUTPUT_INCLUDE: + self.data += f".. _{section}:\n\n" + self.data += f'{self.lineprefix}**{section}**\n\n' + else: + self.data += f'{self.lineprefix}**{section}**\n\n' + + self.print_lineno(args.section_start_lines.get(section, 0)) + self.output_highlight(text) + self.data += "\n" + self.data += "\n" + + def out_doc(self, fname, name, args): + if not self.check_doc(name, args): + return + self.out_section(args, out_docblock=True) + + def out_function(self, fname, name, args): + + oldprefix = self.lineprefix + signature = "" + + func_macro = args.get('func_macro', False) + if func_macro: + signature = name + else: + if args.get('functiontype'): + signature = args['functiontype'] + " " + signature += name + " (" + + ln = args.declaration_start_line + count = 0 + for parameter in args.parameterlist: + if count != 0: + signature += ", " + count += 1 + dtype = args.parametertypes.get(parameter, "") + + if function_pointer.search(dtype): + signature += function_pointer.group(1) + parameter + function_pointer.group(3) + else: + signature += dtype + + if not func_macro: + signature += ")" + + self.print_lineno(ln) + if args.get('typedef') or not args.get('functiontype'): + self.data += f".. c:macro:: {name}\n\n" + + if args.get('typedef'): + self.data += " **Typedef**: " + self.lineprefix = "" + self.output_highlight(args.get('purpose', "")) + self.data += "\n\n**Syntax**\n\n" + self.data += f" ``{signature}``\n\n" + else: + self.data += f"``{signature}``\n\n" + else: + self.data += f".. c:function:: {signature}\n\n" + + if not args.get('typedef'): + self.print_lineno(ln) + self.lineprefix = " " + self.output_highlight(args.get('purpose', "")) + self.data += "\n" + + # Put descriptive text into a container (HTML
) to help set + # function prototypes apart + self.lineprefix = " " + + if args.parameterlist: + self.data += ".. container:: kernelindent\n\n" + self.data += f"{self.lineprefix}**Parameters**\n\n" + + for parameter in args.parameterlist: + parameter_name = KernRe(r'\[.*').sub('', parameter) + dtype = args.parametertypes.get(parameter, "") + + if dtype: + self.data += f"{self.lineprefix}``{dtype}``\n" + else: + self.data += f"{self.lineprefix}``{parameter}``\n" + + self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0)) + + self.lineprefix = " " + if parameter_name in args.parameterdescs and \ + args.parameterdescs[parameter_name] != KernelDoc.undescribed: + + self.output_highlight(args.parameterdescs[parameter_name]) + self.data += "\n" + else: + self.data += f"{self.lineprefix}*undescribed*\n\n" + self.lineprefix = " " + + self.out_section(args) + self.lineprefix = oldprefix + + def out_enum(self, fname, name, args): + + oldprefix = self.lineprefix + ln = args.declaration_start_line + + self.data += f"\n\n.. c:enum:: {name}\n\n" + + self.print_lineno(ln) + self.lineprefix = " " + self.output_highlight(args.get('purpose', '')) + self.data += "\n" + + self.data += ".. container:: kernelindent\n\n" + outer = self.lineprefix + " " + self.lineprefix = outer + " " + self.data += f"{outer}**Constants**\n\n" + + for parameter in args.parameterlist: + self.data += f"{outer}``{parameter}``\n" + + if args.parameterdescs.get(parameter, '') != KernelDoc.undescribed: + self.output_highlight(args.parameterdescs[parameter]) + else: + self.data += f"{self.lineprefix}*undescribed*\n\n" + self.data += "\n" + + self.lineprefix = oldprefix + self.out_section(args) + + def out_typedef(self, fname, name, args): + + oldprefix = self.lineprefix + ln = args.declaration_start_line + + self.data += f"\n\n.. c:type:: {name}\n\n" + + self.print_lineno(ln) + self.lineprefix = " " + + self.output_highlight(args.get('purpose', '')) + + self.data += "\n" + + self.lineprefix = oldprefix + self.out_section(args) + + def out_struct(self, fname, name, args): + + purpose = args.get('purpose', "") + declaration = args.get('definition', "") + dtype = args.type + ln = args.declaration_start_line + + self.data += f"\n\n.. c:{dtype}:: {name}\n\n" + + self.print_lineno(ln) + + oldprefix = self.lineprefix + self.lineprefix += " " + + self.output_highlight(purpose) + self.data += "\n" + + self.data += ".. container:: kernelindent\n\n" + self.data += f"{self.lineprefix}**Definition**::\n\n" + + self.lineprefix = self.lineprefix + " " + + declaration = declaration.replace("\t", self.lineprefix) + + self.data += f"{self.lineprefix}{dtype} {name}" + ' {' + "\n" + self.data += f"{declaration}{self.lineprefix}" + "};\n\n" + + self.lineprefix = " " + self.data += f"{self.lineprefix}**Members**\n\n" + for parameter in args.parameterlist: + if not parameter or parameter.startswith("#"): + continue + + parameter_name = parameter.split("[", maxsplit=1)[0] + + if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed: + continue + + self.print_lineno(args.parameterdesc_start_lines.get(parameter_name, 0)) + + self.data += f"{self.lineprefix}``{parameter}``\n" + + self.lineprefix = " " + self.output_highlight(args.parameterdescs[parameter_name]) + self.lineprefix = " " + + self.data += "\n" + + self.data += "\n" + + self.lineprefix = oldprefix + self.out_section(args) + + +class ManFormat(OutputFormat): + """Consts and functions used by man pages output""" + + highlights = ( + (type_constant, r"\1"), + (type_constant2, r"\1"), + (type_func, r"\\fB\1\\fP"), + (type_enum, r"\\fI\1\\fP"), + (type_struct, r"\\fI\1\\fP"), + (type_typedef, r"\\fI\1\\fP"), + (type_union, r"\\fI\1\\fP"), + (type_param, r"\\fI\1\\fP"), + (type_param_ref, r"\\fI\1\2\\fP"), + (type_member, r"\\fI\1\2\3\\fP"), + (type_fallback, r"\\fI\1\\fP") + ) + blankline = "" + + date_formats = [ + "%a %b %d %H:%M:%S %Z %Y", + "%a %b %d %H:%M:%S %Y", + "%Y-%m-%d", + "%b %d %Y", + "%B %d %Y", + "%m %d %Y", + ] + + def __init__(self, modulename): + """ + Creates class variables. + + Not really mandatory, but it is a good coding style and makes + pylint happy. + """ + + super().__init__() + self.modulename = modulename + + dt = None + tstamp = os.environ.get("KBUILD_BUILD_TIMESTAMP") + if tstamp: + for fmt in self.date_formats: + try: + dt = datetime.strptime(tstamp, fmt) + break + except ValueError: + pass + + if not dt: + dt = datetime.now() + + self.man_date = dt.strftime("%B %Y") + + def output_highlight(self, block): + """ + Outputs a C symbol that may require being highlighted with + self.highlights variable using troff syntax + """ + + contents = self.highlight_block(block) + + if isinstance(contents, list): + contents = "\n".join(contents) + + for line in contents.strip("\n").split("\n"): + line = KernRe(r"^\s*").sub("", line) + if not line: + continue + + if line[0] == ".": + self.data += "\\&" + line + "\n" + else: + self.data += line + "\n" + + def out_doc(self, fname, name, args): + if not self.check_doc(name, args): + return + + self.data += f'.TH "{self.modulename}" 9 "{self.modulename}" "{self.man_date}" "API Manual" LINUX' + "\n" + + for section, text in args.sections.items(): + self.data += f'.SH "{section}"' + "\n" + self.output_highlight(text) + + def out_function(self, fname, name, args): + """output function in man""" + + self.data += f'.TH "{name}" 9 "{name}" "{self.man_date}" "Kernel Hacker\'s Manual" LINUX' + "\n" + + self.data += ".SH NAME\n" + self.data += f"{name} \\- {args['purpose']}\n" + + self.data += ".SH SYNOPSIS\n" + if args.get('functiontype', ''): + self.data += f'.B "{args["functiontype"]}" {name}' + "\n" + else: + self.data += f'.B "{name}' + "\n" + + count = 0 + parenth = "(" + post = "," + + for parameter in args.parameterlist: + if count == len(args.parameterlist) - 1: + post = ");" + + dtype = args.parametertypes.get(parameter, "") + if function_pointer.match(dtype): + # Pointer-to-function + self.data += f'".BI "{parenth}{function_pointer.group(1)}" " ") ({function_pointer.group(2)}){post}"' + "\n" + else: + dtype = KernRe(r'([^\*])$').sub(r'\1 ', dtype) + + self.data += f'.BI "{parenth}{dtype}" "{post}"' + "\n" + count += 1 + parenth = "" + + if args.parameterlist: + self.data += ".SH ARGUMENTS\n" + + for parameter in args.parameterlist: + parameter_name = re.sub(r'\[.*', '', parameter) + + self.data += f'.IP "{parameter}" 12' + "\n" + self.output_highlight(args.parameterdescs.get(parameter_name, "")) + + for section, text in args.sections.items(): + self.data += f'.SH "{section.upper()}"' + "\n" + self.output_highlight(text) + + def out_enum(self, fname, name, args): + self.data += f'.TH "{self.modulename}" 9 "enum {name}" "{self.man_date}" "API Manual" LINUX' + "\n" + + self.data += ".SH NAME\n" + self.data += f"enum {name} \\- {args['purpose']}\n" + + self.data += ".SH SYNOPSIS\n" + self.data += f"enum {name}" + " {\n" + + count = 0 + for parameter in args.parameterlist: + self.data += f'.br\n.BI " {parameter}"' + "\n" + if count == len(args.parameterlist) - 1: + self.data += "\n};\n" + else: + self.data += ", \n.br\n" + + count += 1 + + self.data += ".SH Constants\n" + + for parameter in args.parameterlist: + parameter_name = KernRe(r'\[.*').sub('', parameter) + self.data += f'.IP "{parameter}" 12' + "\n" + self.output_highlight(args.parameterdescs.get(parameter_name, "")) + + for section, text in args.sections.items(): + self.data += f'.SH "{section}"' + "\n" + self.output_highlight(text) + + def out_typedef(self, fname, name, args): + module = self.modulename + purpose = args.get('purpose') + + self.data += f'.TH "{module}" 9 "{name}" "{self.man_date}" "API Manual" LINUX' + "\n" + + self.data += ".SH NAME\n" + self.data += f"typedef {name} \\- {purpose}\n" + + for section, text in args.sections.items(): + self.data += f'.SH "{section}"' + "\n" + self.output_highlight(text) + + def out_struct(self, fname, name, args): + module = self.modulename + purpose = args.get('purpose') + definition = args.get('definition') + + self.data += f'.TH "{module}" 9 "{args.type} {name}" "{self.man_date}" "API Manual" LINUX' + "\n" + + self.data += ".SH NAME\n" + self.data += f"{args.type} {name} \\- {purpose}\n" + + # Replace tabs with two spaces and handle newlines + declaration = definition.replace("\t", " ") + declaration = KernRe(r"\n").sub('"\n.br\n.BI "', declaration) + + self.data += ".SH SYNOPSIS\n" + self.data += f"{args.type} {name} " + "{" + "\n.br\n" + self.data += f'.BI "{declaration}\n' + "};\n.br\n\n" + + self.data += ".SH Members\n" + for parameter in args.parameterlist: + if parameter.startswith("#"): + continue + + parameter_name = re.sub(r"\[.*", "", parameter) + + if args.parameterdescs.get(parameter_name) == KernelDoc.undescribed: + continue + + self.data += f'.IP "{parameter}" 12' + "\n" + self.output_highlight(args.parameterdescs.get(parameter_name)) + + for section, text in args.sections.items(): + self.data += f'.SH "{section}"' + "\n" + self.output_highlight(text) diff --git a/scripts/lib/kdoc/kdoc_parser.py b/scripts/lib/kdoc/kdoc_parser.py new file mode 100644 index 0000000000000..32b43562929b7 --- /dev/null +++ b/scripts/lib/kdoc/kdoc_parser.py @@ -0,0 +1,1670 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2025: Mauro Carvalho Chehab . +# +# pylint: disable=C0301,C0302,R0904,R0912,R0913,R0914,R0915,R0917,R1702 + +""" +kdoc_parser +=========== + +Read a C language source or header FILE and extract embedded +documentation comments +""" + +import sys +import re +from pprint import pformat + +from kdoc_re import NestedMatch, KernRe +from kdoc_item import KdocItem + +# +# Regular expressions used to parse kernel-doc markups at KernelDoc class. +# +# Let's declare them in lowercase outside any class to make easier to +# convert from the python script. +# +# As those are evaluated at the beginning, no need to cache them +# + +# Allow whitespace at end of comment start. +doc_start = KernRe(r'^/\*\*\s*$', cache=False) + +doc_end = KernRe(r'\*/', cache=False) +doc_com = KernRe(r'\s*\*\s*', cache=False) +doc_com_body = KernRe(r'\s*\* ?', cache=False) +doc_decl = doc_com + KernRe(r'(\w+)', cache=False) + +# @params and a strictly limited set of supported section names +# Specifically: +# Match @word: +# @...: +# @{section-name}: +# while trying to not match literal block starts like "example::" +# +known_section_names = 'description|context|returns?|notes?|examples?' +known_sections = KernRe(known_section_names, flags = re.I) +doc_sect = doc_com + \ + KernRe(r'\s*(\@[.\w]+|\@\.\.\.|' + known_section_names + r')\s*:([^:].*)?$', + flags=re.I, cache=False) + +doc_content = doc_com_body + KernRe(r'(.*)', cache=False) +doc_inline_start = KernRe(r'^\s*/\*\*\s*$', cache=False) +doc_inline_sect = KernRe(r'\s*\*\s*(@\s*[\w][\w\.]*\s*):(.*)', cache=False) +doc_inline_end = KernRe(r'^\s*\*/\s*$', cache=False) +doc_inline_oneline = KernRe(r'^\s*/\*\*\s*(@[\w\s]+):\s*(.*)\s*\*/\s*$', cache=False) +attribute = KernRe(r"__attribute__\s*\(\([a-z0-9,_\*\s\(\)]*\)\)", + flags=re.I | re.S, cache=False) + +export_symbol = KernRe(r'^\s*EXPORT_SYMBOL(_GPL)?\s*\(\s*(\w+)\s*\)\s*', cache=False) +export_symbol_ns = KernRe(r'^\s*EXPORT_SYMBOL_NS(_GPL)?\s*\(\s*(\w+)\s*,\s*"\S+"\)\s*', cache=False) + +type_param = KernRe(r"\@(\w*((\.\w+)|(->\w+))*(\.\.\.)?)", cache=False) + +# +# Tests for the beginning of a kerneldoc block in its various forms. +# +doc_block = doc_com + KernRe(r'DOC:\s*(.*)?', cache=False) +doc_begin_data = KernRe(r"^\s*\*?\s*(struct|union|enum|typedef)\b\s*(\w*)", cache = False) +doc_begin_func = KernRe(str(doc_com) + # initial " * ' + r"(?:\w+\s*\*\s*)?" + # type (not captured) + r'(?:define\s+)?' + # possible "define" (not captured) + r'(\w+)\s*(?:\(\w*\))?\s*' + # name and optional "(...)" + r'(?:[-:].*)?$', # description (not captured) + cache = False) + +# +# A little helper to get rid of excess white space +# +multi_space = KernRe(r'\s\s+') +def trim_whitespace(s): + return multi_space.sub(' ', s.strip()) + +class state: + """ + State machine enums + """ + + # Parser states + NORMAL = 0 # normal code + NAME = 1 # looking for function name + DECLARATION = 2 # We have seen a declaration which might not be done + BODY = 3 # the body of the comment + SPECIAL_SECTION = 4 # doc section ending with a blank line + PROTO = 5 # scanning prototype + DOCBLOCK = 6 # documentation block + INLINE_NAME = 7 # gathering doc outside main block + INLINE_TEXT = 8 # reading the body of inline docs + + name = [ + "NORMAL", + "NAME", + "DECLARATION", + "BODY", + "SPECIAL_SECTION", + "PROTO", + "DOCBLOCK", + "INLINE_NAME", + "INLINE_TEXT", + ] + + +SECTION_DEFAULT = "Description" # default section + +class KernelEntry: + + def __init__(self, config, ln): + self.config = config + + self._contents = [] + self.prototype = "" + + self.warnings = [] + + self.parameterlist = [] + self.parameterdescs = {} + self.parametertypes = {} + self.parameterdesc_start_lines = {} + + self.section_start_lines = {} + self.sections = {} + + self.anon_struct_union = False + + self.leading_space = None + + # State flags + self.brcount = 0 + self.declaration_start_line = ln + 1 + + # + # Management of section contents + # + def add_text(self, text): + self._contents.append(text) + + def contents(self): + return '\n'.join(self._contents) + '\n' + + # TODO: rename to emit_message after removal of kernel-doc.pl + def emit_msg(self, log_msg, warning=True): + """Emit a message""" + + if not warning: + self.config.log.info(log_msg) + return + + # Delegate warning output to output logic, as this way it + # will report warnings/info only for symbols that are output + + self.warnings.append(log_msg) + return + + # + # Begin a new section. + # + def begin_section(self, line_no, title = SECTION_DEFAULT, dump = False): + if dump: + self.dump_section(start_new = True) + self.section = title + self.new_start_line = line_no + + def dump_section(self, start_new=True): + """ + Dumps section contents to arrays/hashes intended for that purpose. + """ + # + # If we have accumulated no contents in the default ("description") + # section, don't bother. + # + if self.section == SECTION_DEFAULT and not self._contents: + return + name = self.section + contents = self.contents() + + if type_param.match(name): + name = type_param.group(1) + + self.parameterdescs[name] = contents + self.parameterdesc_start_lines[name] = self.new_start_line + + self.new_start_line = 0 + + else: + if name in self.sections and self.sections[name] != "": + # Only warn on user-specified duplicate section names + if name != SECTION_DEFAULT: + self.emit_msg(self.new_start_line, + f"duplicate section name '{name}'\n") + # Treat as a new paragraph - add a blank line + self.sections[name] += '\n' + contents + else: + self.sections[name] = contents + self.section_start_lines[name] = self.new_start_line + self.new_start_line = 0 + +# self.config.log.debug("Section: %s : %s", name, pformat(vars(self))) + + if start_new: + self.section = SECTION_DEFAULT + self._contents = [] + + +class KernelDoc: + """ + Read a C language source or header FILE and extract embedded + documentation comments. + """ + + # Section names + + section_context = "Context" + section_return = "Return" + + undescribed = "-- undescribed --" + + def __init__(self, config, fname): + """Initialize internal variables""" + + self.fname = fname + self.config = config + + # Initial state for the state machines + self.state = state.NORMAL + + # Store entry currently being processed + self.entry = None + + # Place all potential outputs into an array + self.entries = [] + + # + # We need Python 3.7 for its "dicts remember the insertion + # order" guarantee + # + if sys.version_info.major == 3 and sys.version_info.minor < 7: + self.emit_msg(0, + 'Python 3.7 or later is required for correct results') + + def emit_msg(self, ln, msg, warning=True): + """Emit a message""" + + log_msg = f"{self.fname}:{ln} {msg}" + + if self.entry: + self.entry.emit_msg(log_msg, warning) + return + + if warning: + self.config.log.warning(log_msg) + else: + self.config.log.info(log_msg) + + def dump_section(self, start_new=True): + """ + Dumps section contents to arrays/hashes intended for that purpose. + """ + + if self.entry: + self.entry.dump_section(start_new) + + # TODO: rename it to store_declaration after removal of kernel-doc.pl + def output_declaration(self, dtype, name, **args): + """ + Stores the entry into an entry array. + + The actual output and output filters will be handled elsewhere + """ + + item = KdocItem(name, dtype, self.entry.declaration_start_line, **args) + item.warnings = self.entry.warnings + + # Drop empty sections + # TODO: improve empty sections logic to emit warnings + sections = self.entry.sections + for section in ["Description", "Return"]: + if section in sections and not sections[section].rstrip(): + del sections[section] + item.set_sections(sections, self.entry.section_start_lines) + item.set_params(self.entry.parameterlist, self.entry.parameterdescs, + self.entry.parametertypes, + self.entry.parameterdesc_start_lines) + self.entries.append(item) + + self.config.log.debug("Output: %s:%s = %s", dtype, name, pformat(args)) + + def reset_state(self, ln): + """ + Ancillary routine to create a new entry. It initializes all + variables used by the state machine. + """ + + self.entry = KernelEntry(self.config, ln) + + # State flags + self.state = state.NORMAL + + def push_parameter(self, ln, decl_type, param, dtype, + org_arg, declaration_name): + """ + Store parameters and their descriptions at self.entry. + """ + + if self.entry.anon_struct_union and dtype == "" and param == "}": + return # Ignore the ending }; from anonymous struct/union + + self.entry.anon_struct_union = False + + param = KernRe(r'[\[\)].*').sub('', param, count=1) + + if dtype == "" and param.endswith("..."): + if KernRe(r'\w\.\.\.$').search(param): + # For named variable parameters of the form `x...`, + # remove the dots + param = param[:-3] + else: + # Handles unnamed variable parameters + param = "..." + + if param not in self.entry.parameterdescs or \ + not self.entry.parameterdescs[param]: + + self.entry.parameterdescs[param] = "variable arguments" + + elif dtype == "" and (not param or param == "void"): + param = "void" + self.entry.parameterdescs[param] = "no arguments" + + elif dtype == "" and param in ["struct", "union"]: + # Handle unnamed (anonymous) union or struct + dtype = param + param = "{unnamed_" + param + "}" + self.entry.parameterdescs[param] = "anonymous\n" + self.entry.anon_struct_union = True + + # Handle cache group enforcing variables: they do not need + # to be described in header files + elif "__cacheline_group" in param: + # Ignore __cacheline_group_begin and __cacheline_group_end + return + + # Warn if parameter has no description + # (but ignore ones starting with # as these are not parameters + # but inline preprocessor statements) + if param not in self.entry.parameterdescs and not param.startswith("#"): + self.entry.parameterdescs[param] = self.undescribed + + if "." not in param: + if decl_type == 'function': + dname = f"{decl_type} parameter" + else: + dname = f"{decl_type} member" + + self.emit_msg(ln, + f"{dname} '{param}' not described in '{declaration_name}'") + + # Strip spaces from param so that it is one continuous string on + # parameterlist. This fixes a problem where check_sections() + # cannot find a parameter like "addr[6 + 2]" because it actually + # appears as "addr[6", "+", "2]" on the parameter list. + # However, it's better to maintain the param string unchanged for + # output, so just weaken the string compare in check_sections() + # to ignore "[blah" in a parameter string. + + self.entry.parameterlist.append(param) + org_arg = KernRe(r'\s\s+').sub(' ', org_arg) + self.entry.parametertypes[param] = org_arg + + + def create_parameter_list(self, ln, decl_type, args, + splitter, declaration_name): + """ + Creates a list of parameters, storing them at self.entry. + """ + + # temporarily replace all commas inside function pointer definition + arg_expr = KernRe(r'(\([^\),]+),') + while arg_expr.search(args): + args = arg_expr.sub(r"\1#", args) + + for arg in args.split(splitter): + # Strip comments + arg = KernRe(r'\/\*.*\*\/').sub('', arg) + + # Ignore argument attributes + arg = KernRe(r'\sPOS0?\s').sub(' ', arg) + + # Strip leading/trailing spaces + arg = arg.strip() + arg = KernRe(r'\s+').sub(' ', arg, count=1) + + if arg.startswith('#'): + # Treat preprocessor directive as a typeless variable just to fill + # corresponding data structures "correctly". Catch it later in + # output_* subs. + + # Treat preprocessor directive as a typeless variable + self.push_parameter(ln, decl_type, arg, "", + "", declaration_name) + + elif KernRe(r'\(.+\)\s*\(').search(arg): + # Pointer-to-function + + arg = arg.replace('#', ',') + + r = KernRe(r'[^\(]+\(\*?\s*([\w\[\]\.]*)\s*\)') + if r.match(arg): + param = r.group(1) + else: + self.emit_msg(ln, f"Invalid param: {arg}") + param = arg + + dtype = KernRe(r'([^\(]+\(\*?)\s*' + re.escape(param)).sub(r'\1', arg) + self.push_parameter(ln, decl_type, param, dtype, + arg, declaration_name) + + elif KernRe(r'\(.+\)\s*\[').search(arg): + # Array-of-pointers + + arg = arg.replace('#', ',') + r = KernRe(r'[^\(]+\(\s*\*\s*([\w\[\]\.]*?)\s*(\s*\[\s*[\w]+\s*\]\s*)*\)') + if r.match(arg): + param = r.group(1) + else: + self.emit_msg(ln, f"Invalid param: {arg}") + param = arg + + dtype = KernRe(r'([^\(]+\(\*?)\s*' + re.escape(param)).sub(r'\1', arg) + + self.push_parameter(ln, decl_type, param, dtype, + arg, declaration_name) + + elif arg: + arg = KernRe(r'\s*:\s*').sub(":", arg) + arg = KernRe(r'\s*\[').sub('[', arg) + + args = KernRe(r'\s*,\s*').split(arg) + if args[0] and '*' in args[0]: + args[0] = re.sub(r'(\*+)\s*', r' \1', args[0]) + + first_arg = [] + r = KernRe(r'^(.*\s+)(.*?\[.*\].*)$') + if args[0] and r.match(args[0]): + args.pop(0) + first_arg.extend(r.group(1)) + first_arg.append(r.group(2)) + else: + first_arg = KernRe(r'\s+').split(args.pop(0)) + + args.insert(0, first_arg.pop()) + dtype = ' '.join(first_arg) + + for param in args: + if KernRe(r'^(\*+)\s*(.*)').match(param): + r = KernRe(r'^(\*+)\s*(.*)') + if not r.match(param): + self.emit_msg(ln, f"Invalid param: {param}") + continue + + param = r.group(1) + + self.push_parameter(ln, decl_type, r.group(2), + f"{dtype} {r.group(1)}", + arg, declaration_name) + + elif KernRe(r'(.*?):(\w+)').search(param): + r = KernRe(r'(.*?):(\w+)') + if not r.match(param): + self.emit_msg(ln, f"Invalid param: {param}") + continue + + if dtype != "": # Skip unnamed bit-fields + self.push_parameter(ln, decl_type, r.group(1), + f"{dtype}:{r.group(2)}", + arg, declaration_name) + else: + self.push_parameter(ln, decl_type, param, dtype, + arg, declaration_name) + + def check_sections(self, ln, decl_name, decl_type): + """ + Check for errors inside sections, emitting warnings if not found + parameters are described. + """ + for section in self.entry.sections: + if section not in self.entry.parameterlist and \ + not known_sections.search(section): + if decl_type == 'function': + dname = f"{decl_type} parameter" + else: + dname = f"{decl_type} member" + self.emit_msg(ln, + f"Excess {dname} '{section}' description in '{decl_name}'") + + def check_return_section(self, ln, declaration_name, return_type): + """ + If the function doesn't return void, warns about the lack of a + return description. + """ + + if not self.config.wreturn: + return + + # Ignore an empty return type (It's a macro) + # Ignore functions with a "void" return type (but not "void *") + if not return_type or KernRe(r'void\s*\w*\s*$').search(return_type): + return + + if not self.entry.sections.get("Return", None): + self.emit_msg(ln, + f"No description found for return value of '{declaration_name}'") + + def dump_struct(self, ln, proto): + """ + Store an entry for an struct or union + """ + + type_pattern = r'(struct|union)' + + qualifiers = [ + "__attribute__", + "__packed", + "__aligned", + "____cacheline_aligned_in_smp", + "____cacheline_aligned", + ] + + definition_body = r'\{(.*)\}\s*' + "(?:" + '|'.join(qualifiers) + ")?" + struct_members = KernRe(type_pattern + r'([^\{\};]+)(\{)([^\{\}]*)(\})([^\{\}\;]*)(\;)') + + # Extract struct/union definition + members = None + declaration_name = None + decl_type = None + + r = KernRe(type_pattern + r'\s+(\w+)\s*' + definition_body) + if r.search(proto): + decl_type = r.group(1) + declaration_name = r.group(2) + members = r.group(3) + else: + r = KernRe(r'typedef\s+' + type_pattern + r'\s*' + definition_body + r'\s*(\w+)\s*;') + + if r.search(proto): + decl_type = r.group(1) + declaration_name = r.group(3) + members = r.group(2) + + if not members: + self.emit_msg(ln, f"{proto} error: Cannot parse struct or union!") + return + + if self.entry.identifier != declaration_name: + self.emit_msg(ln, + f"expecting prototype for {decl_type} {self.entry.identifier}. Prototype was for {decl_type} {declaration_name} instead\n") + return + + args_pattern = r'([^,)]+)' + + sub_prefixes = [ + (KernRe(r'\/\*\s*private:.*?\/\*\s*public:.*?\*\/', re.S | re.I), ''), + (KernRe(r'\/\*\s*private:.*', re.S | re.I), ''), + + # Strip comments + (KernRe(r'\/\*.*?\*\/', re.S), ''), + + # Strip attributes + (attribute, ' '), + (KernRe(r'\s*__aligned\s*\([^;]*\)', re.S), ' '), + (KernRe(r'\s*__counted_by\s*\([^;]*\)', re.S), ' '), + (KernRe(r'\s*__counted_by_(le|be)\s*\([^;]*\)', re.S), ' '), + (KernRe(r'\s*__packed\s*', re.S), ' '), + (KernRe(r'\s*CRYPTO_MINALIGN_ATTR', re.S), ' '), + (KernRe(r'\s*____cacheline_aligned_in_smp', re.S), ' '), + (KernRe(r'\s*____cacheline_aligned', re.S), ' '), + + # Unwrap struct_group macros based on this definition: + # __struct_group(TAG, NAME, ATTRS, MEMBERS...) + # which has variants like: struct_group(NAME, MEMBERS...) + # Only MEMBERS arguments require documentation. + # + # Parsing them happens on two steps: + # + # 1. drop struct group arguments that aren't at MEMBERS, + # storing them as STRUCT_GROUP(MEMBERS) + # + # 2. remove STRUCT_GROUP() ancillary macro. + # + # The original logic used to remove STRUCT_GROUP() using an + # advanced regex: + # + # \bSTRUCT_GROUP(\(((?:(?>[^)(]+)|(?1))*)\))[^;]*; + # + # with two patterns that are incompatible with + # Python re module, as it has: + # + # - a recursive pattern: (?1) + # - an atomic grouping: (?>...) + # + # I tried a simpler version: but it didn't work either: + # \bSTRUCT_GROUP\(([^\)]+)\)[^;]*; + # + # As it doesn't properly match the end parenthesis on some cases. + # + # So, a better solution was crafted: there's now a NestedMatch + # class that ensures that delimiters after a search are properly + # matched. So, the implementation to drop STRUCT_GROUP() will be + # handled in separate. + + (KernRe(r'\bstruct_group\s*\(([^,]*,)', re.S), r'STRUCT_GROUP('), + (KernRe(r'\bstruct_group_attr\s*\(([^,]*,){2}', re.S), r'STRUCT_GROUP('), + (KernRe(r'\bstruct_group_tagged\s*\(([^,]*),([^,]*),', re.S), r'struct \1 \2; STRUCT_GROUP('), + (KernRe(r'\b__struct_group\s*\(([^,]*,){3}', re.S), r'STRUCT_GROUP('), + + # Replace macros + # + # TODO: use NestedMatch for FOO($1, $2, ...) matches + # + # it is better to also move those to the NestedMatch logic, + # to ensure that parenthesis will be properly matched. + + (KernRe(r'__ETHTOOL_DECLARE_LINK_MODE_MASK\s*\(([^\)]+)\)', re.S), r'DECLARE_BITMAP(\1, __ETHTOOL_LINK_MODE_MASK_NBITS)'), + (KernRe(r'DECLARE_PHY_INTERFACE_MASK\s*\(([^\)]+)\)', re.S), r'DECLARE_BITMAP(\1, PHY_INTERFACE_MODE_MAX)'), + (KernRe(r'DECLARE_BITMAP\s*\(' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'unsigned long \1[BITS_TO_LONGS(\2)]'), + (KernRe(r'DECLARE_HASHTABLE\s*\(' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'unsigned long \1[1 << ((\2) - 1)]'), + (KernRe(r'DECLARE_KFIFO\s*\(' + args_pattern + r',\s*' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'\2 *\1'), + (KernRe(r'DECLARE_KFIFO_PTR\s*\(' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'\2 *\1'), + (KernRe(r'(?:__)?DECLARE_FLEX_ARRAY\s*\(' + args_pattern + r',\s*' + args_pattern + r'\)', re.S), r'\1 \2[]'), + (KernRe(r'DEFINE_DMA_UNMAP_ADDR\s*\(' + args_pattern + r'\)', re.S), r'dma_addr_t \1'), + (KernRe(r'DEFINE_DMA_UNMAP_LEN\s*\(' + args_pattern + r'\)', re.S), r'__u32 \1'), + (KernRe(r'VIRTIO_DECLARE_FEATURES\s*\(' + args_pattern + r'\)', re.S), r'u64 \1; u64 \1_array[VIRTIO_FEATURES_DWORDS]'), + ] + + # Regexes here are guaranteed to have the end limiter matching + # the start delimiter. Yet, right now, only one replace group + # is allowed. + + sub_nested_prefixes = [ + (re.compile(r'\bSTRUCT_GROUP\('), r'\1'), + ] + + for search, sub in sub_prefixes: + members = search.sub(sub, members) + + nested = NestedMatch() + + for search, sub in sub_nested_prefixes: + members = nested.sub(search, sub, members) + + # Keeps the original declaration as-is + declaration = members + + # Split nested struct/union elements + # + # This loop was simpler at the original kernel-doc perl version, as + # while ($members =~ m/$struct_members/) { ... } + # reads 'members' string on each interaction. + # + # Python behavior is different: it parses 'members' only once, + # creating a list of tuples from the first interaction. + # + # On other words, this won't get nested structs. + # + # So, we need to have an extra loop on Python to override such + # re limitation. + + while True: + tuples = struct_members.findall(members) + if not tuples: + break + + for t in tuples: + newmember = "" + maintype = t[0] + s_ids = t[5] + content = t[3] + + oldmember = "".join(t) + + for s_id in s_ids.split(','): + s_id = s_id.strip() + + newmember += f"{maintype} {s_id}; " + s_id = KernRe(r'[:\[].*').sub('', s_id) + s_id = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', s_id) + + for arg in content.split(';'): + arg = arg.strip() + + if not arg: + continue + + r = KernRe(r'^([^\(]+\(\*?\s*)([\w\.]*)(\s*\).*)') + if r.match(arg): + # Pointer-to-function + dtype = r.group(1) + name = r.group(2) + extra = r.group(3) + + if not name: + continue + + if not s_id: + # Anonymous struct/union + newmember += f"{dtype}{name}{extra}; " + else: + newmember += f"{dtype}{s_id}.{name}{extra}; " + + else: + arg = arg.strip() + # Handle bitmaps + arg = KernRe(r':\s*\d+\s*').sub('', arg) + + # Handle arrays + arg = KernRe(r'\[.*\]').sub('', arg) + + # Handle multiple IDs + arg = KernRe(r'\s*,\s*').sub(',', arg) + + r = KernRe(r'(.*)\s+([\S+,]+)') + + if r.search(arg): + dtype = r.group(1) + names = r.group(2) + else: + newmember += f"{arg}; " + continue + + for name in names.split(','): + name = KernRe(r'^\s*\**(\S+)\s*').sub(r'\1', name).strip() + + if not name: + continue + + if not s_id: + # Anonymous struct/union + newmember += f"{dtype} {name}; " + else: + newmember += f"{dtype} {s_id}.{name}; " + + members = members.replace(oldmember, newmember) + + # Ignore other nested elements, like enums + members = re.sub(r'(\{[^\{\}]*\})', '', members) + + self.create_parameter_list(ln, decl_type, members, ';', + declaration_name) + self.check_sections(ln, declaration_name, decl_type) + + # Adjust declaration for better display + declaration = KernRe(r'([\{;])').sub(r'\1\n', declaration) + declaration = KernRe(r'\}\s+;').sub('};', declaration) + + # Better handle inlined enums + while True: + r = KernRe(r'(enum\s+\{[^\}]+),([^\n])') + if not r.search(declaration): + break + + declaration = r.sub(r'\1,\n\2', declaration) + + def_args = declaration.split('\n') + level = 1 + declaration = "" + for clause in def_args: + + clause = clause.strip() + clause = KernRe(r'\s+').sub(' ', clause, count=1) + + if not clause: + continue + + if '}' in clause and level > 1: + level -= 1 + + if not KernRe(r'^\s*#').match(clause): + declaration += "\t" * level + + declaration += "\t" + clause + "\n" + if "{" in clause and "}" not in clause: + level += 1 + + self.output_declaration(decl_type, declaration_name, + definition=declaration, + purpose=self.entry.declaration_purpose) + + def dump_enum(self, ln, proto): + """ + Stores an enum inside self.entries array. + """ + + # Ignore members marked private + proto = KernRe(r'\/\*\s*private:.*?\/\*\s*public:.*?\*\/', flags=re.S).sub('', proto) + proto = KernRe(r'\/\*\s*private:.*}', flags=re.S).sub('}', proto) + + # Strip comments + proto = KernRe(r'\/\*.*?\*\/', flags=re.S).sub('', proto) + + # Strip #define macros inside enums + proto = KernRe(r'#\s*((define|ifdef|if)\s+|endif)[^;]*;', flags=re.S).sub('', proto) + + # + # Parse out the name and members of the enum. Typedef form first. + # + r = KernRe(r'typedef\s+enum\s*\{(.*)\}\s*(\w*)\s*;') + if r.search(proto): + declaration_name = r.group(2) + members = r.group(1).rstrip() + # + # Failing that, look for a straight enum + # + else: + r = KernRe(r'enum\s+(\w*)\s*\{(.*)\}') + if r.match(proto): + declaration_name = r.group(1) + members = r.group(2).rstrip() + # + # OK, this isn't going to work. + # + else: + self.emit_msg(ln, f"{proto}: error: Cannot parse enum!") + return + # + # Make sure we found what we were expecting. + # + if self.entry.identifier != declaration_name: + if self.entry.identifier == "": + self.emit_msg(ln, + f"{proto}: wrong kernel-doc identifier on prototype") + else: + self.emit_msg(ln, + f"expecting prototype for enum {self.entry.identifier}. " + f"Prototype was for enum {declaration_name} instead") + return + + if not declaration_name: + declaration_name = "(anonymous)" + # + # Parse out the name of each enum member, and verify that we + # have a description for it. + # + member_set = set() + members = KernRe(r'\([^;)]*\)').sub('', members) + for arg in members.split(','): + if not arg: + continue + arg = KernRe(r'^\s*(\w+).*').sub(r'\1', arg) + self.entry.parameterlist.append(arg) + if arg not in self.entry.parameterdescs: + self.entry.parameterdescs[arg] = self.undescribed + self.emit_msg(ln, + f"Enum value '{arg}' not described in enum '{declaration_name}'") + member_set.add(arg) + # + # Ensure that every described member actually exists in the enum. + # + for k in self.entry.parameterdescs: + if k not in member_set: + self.emit_msg(ln, + f"Excess enum value '%{k}' description in '{declaration_name}'") + + self.output_declaration('enum', declaration_name, + purpose=self.entry.declaration_purpose) + + def dump_declaration(self, ln, prototype): + """ + Stores a data declaration inside self.entries array. + """ + + if self.entry.decl_type == "enum": + self.dump_enum(ln, prototype) + elif self.entry.decl_type == "typedef": + self.dump_typedef(ln, prototype) + elif self.entry.decl_type in ["union", "struct"]: + self.dump_struct(ln, prototype) + else: + # This would be a bug + self.emit_message(ln, f'Unknown declaration type: {self.entry.decl_type}') + + def dump_function(self, ln, prototype): + """ + Stores a function of function macro inside self.entries array. + """ + + func_macro = False + return_type = '' + decl_type = 'function' + + # Prefixes that would be removed + sub_prefixes = [ + (r"^static +", "", 0), + (r"^extern +", "", 0), + (r"^asmlinkage +", "", 0), + (r"^inline +", "", 0), + (r"^__inline__ +", "", 0), + (r"^__inline +", "", 0), + (r"^__always_inline +", "", 0), + (r"^noinline +", "", 0), + (r"^__FORTIFY_INLINE +", "", 0), + (r"QEMU_[A-Z_]+ +", "", 0), + (r"__init +", "", 0), + (r"__init_or_module +", "", 0), + (r"__deprecated +", "", 0), + (r"__flatten +", "", 0), + (r"__meminit +", "", 0), + (r"__must_check +", "", 0), + (r"__weak +", "", 0), + (r"__sched +", "", 0), + (r"_noprof", "", 0), + (r"__printf\s*\(\s*\d*\s*,\s*\d*\s*\) +", "", 0), + (r"__(?:re)?alloc_size\s*\(\s*\d+\s*(?:,\s*\d+\s*)?\) +", "", 0), + (r"__diagnose_as\s*\(\s*\S+\s*(?:,\s*\d+\s*)*\) +", "", 0), + (r"DECL_BUCKET_PARAMS\s*\(\s*(\S+)\s*,\s*(\S+)\s*\)", r"\1, \2", 0), + (r"__attribute_const__ +", "", 0), + + # It seems that Python support for re.X is broken: + # At least for me (Python 3.13), this didn't work +# (r""" +# __attribute__\s*\(\( +# (?: +# [\w\s]+ # attribute name +# (?:\([^)]*\))? # attribute arguments +# \s*,? # optional comma at the end +# )+ +# \)\)\s+ +# """, "", re.X), + + # So, remove whitespaces and comments from it + (r"__attribute__\s*\(\((?:[\w\s]+(?:\([^)]*\))?\s*,?)+\)\)\s+", "", 0), + ] + + for search, sub, flags in sub_prefixes: + prototype = KernRe(search, flags).sub(sub, prototype) + + # Macros are a special case, as they change the prototype format + new_proto = KernRe(r"^#\s*define\s+").sub("", prototype) + if new_proto != prototype: + is_define_proto = True + prototype = new_proto + else: + is_define_proto = False + + # Yes, this truly is vile. We are looking for: + # 1. Return type (may be nothing if we're looking at a macro) + # 2. Function name + # 3. Function parameters. + # + # All the while we have to watch out for function pointer parameters + # (which IIRC is what the two sections are for), C types (these + # regexps don't even start to express all the possibilities), and + # so on. + # + # If you mess with these regexps, it's a good idea to check that + # the following functions' documentation still comes out right: + # - parport_register_device (function pointer parameters) + # - atomic_set (macro) + # - pci_match_device, __copy_to_user (long return type) + + name = r'[a-zA-Z0-9_~:]+' + prototype_end1 = r'[^\(]*' + prototype_end2 = r'[^\{]*' + prototype_end = fr'\(({prototype_end1}|{prototype_end2})\)' + + # Besides compiling, Perl qr{[\w\s]+} works as a non-capturing group. + # So, this needs to be mapped in Python with (?:...)? or (?:...)+ + + type1 = r'(?:[\w\s]+)?' + type2 = r'(?:[\w\s]+\*+)+' + + found = False + + if is_define_proto: + r = KernRe(r'^()(' + name + r')\s+') + + if r.search(prototype): + return_type = '' + declaration_name = r.group(2) + func_macro = True + + found = True + + if not found: + patterns = [ + rf'^()({name})\s*{prototype_end}', + rf'^({type1})\s+({name})\s*{prototype_end}', + rf'^({type2})\s*({name})\s*{prototype_end}', + ] + + for p in patterns: + r = KernRe(p) + + if r.match(prototype): + + return_type = r.group(1) + declaration_name = r.group(2) + args = r.group(3) + + self.create_parameter_list(ln, decl_type, args, ',', + declaration_name) + + found = True + break + if not found: + self.emit_msg(ln, + f"cannot understand function prototype: '{prototype}'") + return + + if self.entry.identifier != declaration_name: + self.emit_msg(ln, + f"expecting prototype for {self.entry.identifier}(). Prototype was for {declaration_name}() instead") + return + + self.check_sections(ln, declaration_name, "function") + + self.check_return_section(ln, declaration_name, return_type) + + if 'typedef' in return_type: + self.output_declaration(decl_type, declaration_name, + typedef=True, + functiontype=return_type, + purpose=self.entry.declaration_purpose, + func_macro=func_macro) + else: + self.output_declaration(decl_type, declaration_name, + typedef=False, + functiontype=return_type, + purpose=self.entry.declaration_purpose, + func_macro=func_macro) + + def dump_typedef(self, ln, proto): + """ + Stores a typedef inside self.entries array. + """ + + typedef_type = r'((?:\s+[\w\*]+\b){0,7}\s+(?:\w+\b|\*+))\s*' + typedef_ident = r'\*?\s*(\w\S+)\s*' + typedef_args = r'\s*\((.*)\);' + + typedef1 = KernRe(r'typedef' + typedef_type + r'\(' + typedef_ident + r'\)' + typedef_args) + typedef2 = KernRe(r'typedef' + typedef_type + typedef_ident + typedef_args) + + # Strip comments + proto = KernRe(r'/\*.*?\*/', flags=re.S).sub('', proto) + + # Parse function typedef prototypes + for r in [typedef1, typedef2]: + if not r.match(proto): + continue + + return_type = r.group(1).strip() + declaration_name = r.group(2) + args = r.group(3) + + if self.entry.identifier != declaration_name: + self.emit_msg(ln, + f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n") + return + + decl_type = 'function' + self.create_parameter_list(ln, decl_type, args, ',', declaration_name) + + self.output_declaration(decl_type, declaration_name, + typedef=True, + functiontype=return_type, + purpose=self.entry.declaration_purpose) + return + + # Handle nested parentheses or brackets + r = KernRe(r'(\(*.\)\s*|\[*.\]\s*);$') + while r.search(proto): + proto = r.sub('', proto) + + # Parse simple typedefs + r = KernRe(r'typedef.*\s+(\w+)\s*;') + if r.match(proto): + declaration_name = r.group(1) + + if self.entry.identifier != declaration_name: + self.emit_msg(ln, + f"expecting prototype for typedef {self.entry.identifier}. Prototype was for typedef {declaration_name} instead\n") + return + + self.output_declaration('typedef', declaration_name, + purpose=self.entry.declaration_purpose) + return + + self.emit_msg(ln, "error: Cannot parse typedef!") + + @staticmethod + def process_export(function_set, line): + """ + process EXPORT_SYMBOL* tags + + This method doesn't use any variable from the class, so declare it + with a staticmethod decorator. + """ + + # We support documenting some exported symbols with different + # names. A horrible hack. + suffixes = [ '_noprof' ] + + # Note: it accepts only one EXPORT_SYMBOL* per line, as having + # multiple export lines would violate Kernel coding style. + + if export_symbol.search(line): + symbol = export_symbol.group(2) + elif export_symbol_ns.search(line): + symbol = export_symbol_ns.group(2) + else: + return False + # + # Found an export, trim out any special suffixes + # + for suffix in suffixes: + # Be backward compatible with Python < 3.9 + if symbol.endswith(suffix): + symbol = symbol[:-len(suffix)] + function_set.add(symbol) + return True + + def process_normal(self, ln, line): + """ + STATE_NORMAL: looking for the /** to begin everything. + """ + + if not doc_start.match(line): + return + + # start a new entry + self.reset_state(ln) + + # next line is always the function name + self.state = state.NAME + + def process_name(self, ln, line): + """ + STATE_NAME: Looking for the "name - description" line + """ + # + # Check for a DOC: block and handle them specially. + # + if doc_block.search(line): + + if not doc_block.group(1): + self.entry.begin_section(ln, "Introduction") + else: + self.entry.begin_section(ln, doc_block.group(1)) + + self.entry.identifier = self.entry.section + self.state = state.DOCBLOCK + # + # Otherwise we're looking for a normal kerneldoc declaration line. + # + elif doc_decl.search(line): + self.entry.identifier = doc_decl.group(1) + + # Test for data declaration + if doc_begin_data.search(line): + self.entry.decl_type = doc_begin_data.group(1) + self.entry.identifier = doc_begin_data.group(2) + # + # Look for a function description + # + elif doc_begin_func.search(line): + self.entry.identifier = doc_begin_func.group(1) + self.entry.decl_type = "function" + # + # We struck out. + # + else: + self.emit_msg(ln, + f"This comment starts with '/**', but isn't a kernel-doc comment. Refer Documentation/doc-guide/kernel-doc.rst\n{line}") + self.state = state.NORMAL + return + # + # OK, set up for a new kerneldoc entry. + # + self.state = state.BODY + self.entry.identifier = self.entry.identifier.strip(" ") + # if there's no @param blocks need to set up default section here + self.entry.begin_section(ln + 1) + # + # Find the description portion, which *should* be there but + # isn't always. + # (We should be able to capture this from the previous parsing - someday) + # + r = KernRe("[-:](.*)") + if r.search(line): + self.entry.declaration_purpose = trim_whitespace(r.group(1)) + self.state = state.DECLARATION + else: + self.entry.declaration_purpose = "" + + if not self.entry.declaration_purpose and self.config.wshort_desc: + self.emit_msg(ln, + f"missing initial short description on line:\n{line}") + + if not self.entry.identifier and self.entry.decl_type != "enum": + self.emit_msg(ln, + f"wrong kernel-doc identifier on line:\n{line}") + self.state = state.NORMAL + + if self.config.verbose: + self.emit_msg(ln, + f"Scanning doc for {self.entry.decl_type} {self.entry.identifier}", + warning=False) + # + # Failed to find an identifier. Emit a warning + # + else: + self.emit_msg(ln, f"Cannot find identifier on line:\n{line}") + + # + # Helper function to determine if a new section is being started. + # + def is_new_section(self, ln, line): + if doc_sect.search(line): + self.state = state.BODY + # + # Pick out the name of our new section, tweaking it if need be. + # + newsection = doc_sect.group(1) + if newsection.lower() == 'description': + newsection = 'Description' + elif newsection.lower() == 'context': + newsection = 'Context' + self.state = state.SPECIAL_SECTION + elif newsection.lower() in ["@return", "@returns", + "return", "returns"]: + newsection = "Return" + self.state = state.SPECIAL_SECTION + elif newsection[0] == '@': + self.state = state.SPECIAL_SECTION + # + # Initialize the contents, and get the new section going. + # + newcontents = doc_sect.group(2) + if not newcontents: + newcontents = "" + self.dump_section() + self.entry.begin_section(ln, newsection) + self.entry.leading_space = None + + self.entry.add_text(newcontents.lstrip()) + return True + return False + + # + # Helper function to detect (and effect) the end of a kerneldoc comment. + # + def is_comment_end(self, ln, line): + if doc_end.search(line): + self.dump_section() + + # Look for doc_com + + doc_end: + r = KernRe(r'\s*\*\s*[a-zA-Z_0-9:\.]+\*/') + if r.match(line): + self.emit_msg(ln, f"suspicious ending line: {line}") + + self.entry.prototype = "" + self.entry.new_start_line = ln + 1 + + self.state = state.PROTO + return True + return False + + + def process_decl(self, ln, line): + """ + STATE_DECLARATION: We've seen the beginning of a declaration + """ + if self.is_new_section(ln, line) or self.is_comment_end(ln, line): + return + # + # Look for anything with the " * " line beginning. + # + if doc_content.search(line): + cont = doc_content.group(1) + # + # A blank line means that we have moved out of the declaration + # part of the comment (without any "special section" parameter + # descriptions). + # + if cont == "": + self.state = state.BODY + # + # Otherwise we have more of the declaration section to soak up. + # + else: + self.entry.declaration_purpose = \ + trim_whitespace(self.entry.declaration_purpose + ' ' + cont) + else: + # Unknown line, ignore + self.emit_msg(ln, f"bad line: {line}") + + + def process_special(self, ln, line): + """ + STATE_SPECIAL_SECTION: a section ending with a blank line + """ + # + # If we have hit a blank line (only the " * " marker), then this + # section is done. + # + if KernRe(r"\s*\*\s*$").match(line): + self.entry.begin_section(ln, dump = True) + self.state = state.BODY + return + # + # Not a blank line, look for the other ways to end the section. + # + if self.is_new_section(ln, line) or self.is_comment_end(ln, line): + return + # + # OK, we should have a continuation of the text for this section. + # + if doc_content.search(line): + cont = doc_content.group(1) + # + # If the lines of text after the first in a special section have + # leading white space, we need to trim it out or Sphinx will get + # confused. For the second line (the None case), see what we + # find there and remember it. + # + if self.entry.leading_space is None: + r = KernRe(r'^(\s+)') + if r.match(cont): + self.entry.leading_space = len(r.group(1)) + else: + self.entry.leading_space = 0 + # + # Otherwise, before trimming any leading chars, be *sure* + # that they are white space. We should maybe warn if this + # isn't the case. + # + for i in range(0, self.entry.leading_space): + if cont[i] != " ": + self.entry.leading_space = i + break + # + # Add the trimmed result to the section and we're done. + # + self.entry.add_text(cont[self.entry.leading_space:]) + else: + # Unknown line, ignore + self.emit_msg(ln, f"bad line: {line}") + + def process_body(self, ln, line): + """ + STATE_BODY: the bulk of a kerneldoc comment. + """ + if self.is_new_section(ln, line) or self.is_comment_end(ln, line): + return + + if doc_content.search(line): + cont = doc_content.group(1) + self.entry.add_text(cont) + else: + # Unknown line, ignore + self.emit_msg(ln, f"bad line: {line}") + + def process_inline_name(self, ln, line): + """STATE_INLINE_NAME: beginning of docbook comments within a prototype.""" + + if doc_inline_sect.search(line): + self.entry.begin_section(ln, doc_inline_sect.group(1)) + self.entry.add_text(doc_inline_sect.group(2).lstrip()) + self.state = state.INLINE_TEXT + elif doc_inline_end.search(line): + self.dump_section() + self.state = state.PROTO + elif doc_content.search(line): + self.emit_msg(ln, f"Incorrect use of kernel-doc format: {line}") + self.state = state.PROTO + # else ... ?? + + def process_inline_text(self, ln, line): + """STATE_INLINE_TEXT: docbook comments within a prototype.""" + + if doc_inline_end.search(line): + self.dump_section() + self.state = state.PROTO + elif doc_content.search(line): + self.entry.add_text(doc_content.group(1)) + # else ... ?? + + def syscall_munge(self, ln, proto): # pylint: disable=W0613 + """ + Handle syscall definitions + """ + + is_void = False + + # Strip newlines/CR's + proto = re.sub(r'[\r\n]+', ' ', proto) + + # Check if it's a SYSCALL_DEFINE0 + if 'SYSCALL_DEFINE0' in proto: + is_void = True + + # Replace SYSCALL_DEFINE with correct return type & function name + proto = KernRe(r'SYSCALL_DEFINE.*\(').sub('long sys_', proto) + + r = KernRe(r'long\s+(sys_.*?),') + if r.search(proto): + proto = KernRe(',').sub('(', proto, count=1) + elif is_void: + proto = KernRe(r'\)').sub('(void)', proto, count=1) + + # Now delete all of the odd-numbered commas in the proto + # so that argument types & names don't have a comma between them + count = 0 + length = len(proto) + + if is_void: + length = 0 # skip the loop if is_void + + for ix in range(length): + if proto[ix] == ',': + count += 1 + if count % 2 == 1: + proto = proto[:ix] + ' ' + proto[ix + 1:] + + return proto + + def tracepoint_munge(self, ln, proto): + """ + Handle tracepoint definitions + """ + + tracepointname = None + tracepointargs = None + + # Match tracepoint name based on different patterns + r = KernRe(r'TRACE_EVENT\((.*?),') + if r.search(proto): + tracepointname = r.group(1) + + r = KernRe(r'DEFINE_SINGLE_EVENT\((.*?),') + if r.search(proto): + tracepointname = r.group(1) + + r = KernRe(r'DEFINE_EVENT\((.*?),(.*?),') + if r.search(proto): + tracepointname = r.group(2) + + if tracepointname: + tracepointname = tracepointname.lstrip() + + r = KernRe(r'TP_PROTO\((.*?)\)') + if r.search(proto): + tracepointargs = r.group(1) + + if not tracepointname or not tracepointargs: + self.emit_msg(ln, + f"Unrecognized tracepoint format:\n{proto}\n") + else: + proto = f"static inline void trace_{tracepointname}({tracepointargs})" + self.entry.identifier = f"trace_{self.entry.identifier}" + + return proto + + def process_proto_function(self, ln, line): + """Ancillary routine to process a function prototype""" + + # strip C99-style comments to end of line + line = KernRe(r"\/\/.*$", re.S).sub('', line) + # + # Soak up the line's worth of prototype text, stopping at { or ; if present. + # + if KernRe(r'\s*#\s*define').match(line): + self.entry.prototype = line + elif not line.startswith('#'): # skip other preprocessor stuff + r = KernRe(r'([^\{]*)') + if r.match(line): + self.entry.prototype += r.group(1) + " " + # + # If we now have the whole prototype, clean it up and declare victory. + # + if '{' in line or ';' in line or KernRe(r'\s*#\s*define').match(line): + # strip comments and surrounding spaces + self.entry.prototype = KernRe(r'/\*.*\*/').sub('', self.entry.prototype).strip() + # + # Handle self.entry.prototypes for function pointers like: + # int (*pcs_config)(struct foo) + # by turning it into + # int pcs_config(struct foo) + # + r = KernRe(r'^(\S+\s+)\(\s*\*(\S+)\)') + self.entry.prototype = r.sub(r'\1\2', self.entry.prototype) + # + # Handle special declaration syntaxes + # + if 'SYSCALL_DEFINE' in self.entry.prototype: + self.entry.prototype = self.syscall_munge(ln, + self.entry.prototype) + else: + r = KernRe(r'TRACE_EVENT|DEFINE_EVENT|DEFINE_SINGLE_EVENT') + if r.search(self.entry.prototype): + self.entry.prototype = self.tracepoint_munge(ln, + self.entry.prototype) + # + # ... and we're done + # + self.dump_function(ln, self.entry.prototype) + self.reset_state(ln) + + def process_proto_type(self, ln, line): + """Ancillary routine to process a type""" + + # Strip C99-style comments and surrounding whitespace + line = KernRe(r"//.*$", re.S).sub('', line).strip() + if not line: + return # nothing to see here + + # To distinguish preprocessor directive from regular declaration later. + if line.startswith('#'): + line += ";" + # + # Split the declaration on any of { } or ;, and accumulate pieces + # until we hit a semicolon while not inside {brackets} + # + r = KernRe(r'(.*?)([{};])') + for chunk in r.split(line): + if chunk: # Ignore empty matches + self.entry.prototype += chunk + # + # This cries out for a match statement ... someday after we can + # drop Python 3.9 ... + # + if chunk == '{': + self.entry.brcount += 1 + elif chunk == '}': + self.entry.brcount -= 1 + elif chunk == ';' and self.entry.brcount <= 0: + self.dump_declaration(ln, self.entry.prototype) + self.reset_state(ln) + return + # + # We hit the end of the line while still in the declaration; put + # in a space to represent the newline. + # + self.entry.prototype += ' ' + + def process_proto(self, ln, line): + """STATE_PROTO: reading a function/whatever prototype.""" + + if doc_inline_oneline.search(line): + self.entry.begin_section(ln, doc_inline_oneline.group(1)) + self.entry.add_text(doc_inline_oneline.group(2)) + self.dump_section() + + elif doc_inline_start.search(line): + self.state = state.INLINE_NAME + + elif self.entry.decl_type == 'function': + self.process_proto_function(ln, line) + + else: + self.process_proto_type(ln, line) + + def process_docblock(self, ln, line): + """STATE_DOCBLOCK: within a DOC: block.""" + + if doc_end.search(line): + self.dump_section() + self.output_declaration("doc", self.entry.identifier) + self.reset_state(ln) + + elif doc_content.search(line): + self.entry.add_text(doc_content.group(1)) + + def parse_export(self): + """ + Parses EXPORT_SYMBOL* macros from a single Kernel source file. + """ + + export_table = set() + + try: + with open(self.fname, "r", encoding="utf8", + errors="backslashreplace") as fp: + + for line in fp: + self.process_export(export_table, line) + + except IOError: + return None + + return export_table + + # + # The state/action table telling us which function to invoke in + # each state. + # + state_actions = { + state.NORMAL: process_normal, + state.NAME: process_name, + state.BODY: process_body, + state.DECLARATION: process_decl, + state.SPECIAL_SECTION: process_special, + state.INLINE_NAME: process_inline_name, + state.INLINE_TEXT: process_inline_text, + state.PROTO: process_proto, + state.DOCBLOCK: process_docblock, + } + + def parse_kdoc(self): + """ + Open and process each line of a C source file. + The parsing is controlled via a state machine, and the line is passed + to a different process function depending on the state. The process + function may update the state as needed. + + Besides parsing kernel-doc tags, it also parses export symbols. + """ + + prev = "" + prev_ln = None + export_table = set() + + try: + with open(self.fname, "r", encoding="utf8", + errors="backslashreplace") as fp: + for ln, line in enumerate(fp): + + line = line.expandtabs().strip("\n") + + # Group continuation lines on prototypes + if self.state == state.PROTO: + if line.endswith("\\"): + prev += line.rstrip("\\") + if not prev_ln: + prev_ln = ln + continue + + if prev: + ln = prev_ln + line = prev + line + prev = "" + prev_ln = None + + self.config.log.debug("%d %s: %s", + ln, state.name[self.state], + line) + + # This is an optimization over the original script. + # There, when export_file was used for the same file, + # it was read twice. Here, we use the already-existing + # loop to parse exported symbols as well. + # + if (self.state != state.NORMAL) or \ + not self.process_export(export_table, line): + # Hand this line to the appropriate state handler + self.state_actions[self.state](self, ln, line) + + except OSError: + self.config.log.error(f"Error: Cannot open file {self.fname}") + + return export_table, self.entries diff --git a/scripts/lib/kdoc/kdoc_re.py b/scripts/lib/kdoc/kdoc_re.py new file mode 100644 index 0000000000000..612223e1e7238 --- /dev/null +++ b/scripts/lib/kdoc/kdoc_re.py @@ -0,0 +1,270 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 +# Copyright(c) 2025: Mauro Carvalho Chehab . + +""" +Regular expression ancillary classes. + +Those help caching regular expressions and do matching for kernel-doc. +""" + +import re + +# Local cache for regular expressions +re_cache = {} + + +class KernRe: + """ + Helper class to simplify regex declaration and usage, + + It calls re.compile for a given pattern. It also allows adding + regular expressions and define sub at class init time. + + Regular expressions can be cached via an argument, helping to speedup + searches. + """ + + def _add_regex(self, string, flags): + """ + Adds a new regex or re-use it from the cache. + """ + self.regex = re_cache.get(string, None) + if not self.regex: + self.regex = re.compile(string, flags=flags) + if self.cache: + re_cache[string] = self.regex + + def __init__(self, string, cache=True, flags=0): + """ + Compile a regular expression and initialize internal vars. + """ + + self.cache = cache + self.last_match = None + + self._add_regex(string, flags) + + def __str__(self): + """ + Return the regular expression pattern. + """ + return self.regex.pattern + + def __add__(self, other): + """ + Allows adding two regular expressions into one. + """ + + return KernRe(str(self) + str(other), cache=self.cache or other.cache, + flags=self.regex.flags | other.regex.flags) + + def match(self, string): + """ + Handles a re.match storing its results + """ + + self.last_match = self.regex.match(string) + return self.last_match + + def search(self, string): + """ + Handles a re.search storing its results + """ + + self.last_match = self.regex.search(string) + return self.last_match + + def findall(self, string): + """ + Alias to re.findall + """ + + return self.regex.findall(string) + + def split(self, string): + """ + Alias to re.split + """ + + return self.regex.split(string) + + def sub(self, sub, string, count=0): + """ + Alias to re.sub + """ + + return self.regex.sub(sub, string, count=count) + + def group(self, num): + """ + Returns the group results of the last match + """ + + return self.last_match.group(num) + + +class NestedMatch: + """ + Finding nested delimiters is hard with regular expressions. It is + even harder on Python with its normal re module, as there are several + advanced regular expressions that are missing. + + This is the case of this pattern: + + '\\bSTRUCT_GROUP(\\(((?:(?>[^)(]+)|(?1))*)\\))[^;]*;' + + which is used to properly match open/close parenthesis of the + string search STRUCT_GROUP(), + + Add a class that counts pairs of delimiters, using it to match and + replace nested expressions. + + The original approach was suggested by: + https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex + + Although I re-implemented it to make it more generic and match 3 types + of delimiters. The logic checks if delimiters are paired. If not, it + will ignore the search string. + """ + + # TODO: make NestedMatch handle multiple match groups + # + # Right now, regular expressions to match it are defined only up to + # the start delimiter, e.g.: + # + # \bSTRUCT_GROUP\( + # + # is similar to: STRUCT_GROUP\((.*)\) + # except that the content inside the match group is delimiter's aligned. + # + # The content inside parenthesis are converted into a single replace + # group (e.g. r`\1'). + # + # It would be nice to change such definition to support multiple + # match groups, allowing a regex equivalent to. + # + # FOO\((.*), (.*), (.*)\) + # + # it is probably easier to define it not as a regular expression, but + # with some lexical definition like: + # + # FOO(arg1, arg2, arg3) + + DELIMITER_PAIRS = { + '{': '}', + '(': ')', + '[': ']', + } + + RE_DELIM = re.compile(r'[\{\}\[\]\(\)]') + + def _search(self, regex, line): + """ + Finds paired blocks for a regex that ends with a delimiter. + + The suggestion of using finditer to match pairs came from: + https://stackoverflow.com/questions/5454322/python-how-to-match-nested-parentheses-with-regex + but I ended using a different implementation to align all three types + of delimiters and seek for an initial regular expression. + + The algorithm seeks for open/close paired delimiters and place them + into a stack, yielding a start/stop position of each match when the + stack is zeroed. + + The algorithm shoud work fine for properly paired lines, but will + silently ignore end delimiters that preceeds an start delimiter. + This should be OK for kernel-doc parser, as unaligned delimiters + would cause compilation errors. So, we don't need to rise exceptions + to cover such issues. + """ + + stack = [] + + for match_re in regex.finditer(line): + start = match_re.start() + offset = match_re.end() + + d = line[offset - 1] + if d not in self.DELIMITER_PAIRS: + continue + + end = self.DELIMITER_PAIRS[d] + stack.append(end) + + for match in self.RE_DELIM.finditer(line[offset:]): + pos = match.start() + offset + + d = line[pos] + + if d in self.DELIMITER_PAIRS: + end = self.DELIMITER_PAIRS[d] + + stack.append(end) + continue + + # Does the end delimiter match what it is expected? + if stack and d == stack[-1]: + stack.pop() + + if not stack: + yield start, offset, pos + 1 + break + + def search(self, regex, line): + """ + This is similar to re.search: + + It matches a regex that it is followed by a delimiter, + returning occurrences only if all delimiters are paired. + """ + + for t in self._search(regex, line): + + yield line[t[0]:t[2]] + + def sub(self, regex, sub, line, count=0): + """ + This is similar to re.sub: + + It matches a regex that it is followed by a delimiter, + replacing occurrences only if all delimiters are paired. + + if r'\1' is used, it works just like re: it places there the + matched paired data with the delimiter stripped. + + If count is different than zero, it will replace at most count + items. + """ + out = "" + + cur_pos = 0 + n = 0 + + for start, end, pos in self._search(regex, line): + out += line[cur_pos:start] + + # Value, ignoring start/end delimiters + value = line[end:pos - 1] + + # replaces \1 at the sub string, if \1 is used there + new_sub = sub + new_sub = new_sub.replace(r'\1', value) + + out += new_sub + + # Drop end ';' if any + if line[pos] == ';': + pos += 1 + + cur_pos = pos + n += 1 + + if count and count >= n: + break + + # Append the remaining string + l = len(line) + out += line[cur_pos:l] + + return out diff --git a/scripts/make-release b/scripts/make-release index 4509a9fabf500..bc1b43caa2548 100755 --- a/scripts/make-release +++ b/scripts/make-release @@ -40,7 +40,7 @@ fi # Only include wraps that are invoked with subproject() SUBPROJECTS="libvfio-user keycodemapdb berkeley-softfloat-3 - berkeley-testfloat-3 anyhow-1-rs arbitrary-int-1-rs bilge-0.2-rs + berkeley-testfloat-3 anyhow-1-rs arbitrary-int-1-rs attrs-0.2-rs bilge-0.2-rs bilge-impl-0.2-rs either-1-rs foreign-0.3-rs itertools-0.11-rs libc-0.2-rs proc-macro2-1-rs proc-macro-error-1-rs proc-macro-error-attr-1-rs quote-1-rs @@ -62,17 +62,15 @@ meson subprojects download $SUBPROJECTS (cd roms/skiboot && ./make_version.sh > .version) # Fetch edk2 submodule's submodules, since it won't have access to them via # the tarball later. -# -# A more uniform way to handle this sort of situation would be nice, but we -# don't necessarily have much control over how a submodule handles its -# submodule dependencies, so we continue to handle these on a case-by-case -# basis for now. -(cd roms/edk2 && \ - git submodule update --init --depth 1 -- \ - ArmPkg/Library/ArmSoftFloatLib/berkeley-softfloat-3 \ - BaseTools/Source/C/BrotliCompress/brotli \ - CryptoPkg/Library/OpensslLib/openssl \ - MdeModulePkg/Library/BrotliCustomDecompressLib/brotli) + +# As recommended by the EDK2 readme, we don't use --recursive here. +# EDK2 won't use any code or feature from a submodule of a submodule, +# so we don't need to add them to the tarball. +# Although we don't necessarily need all of the submodules that EDK2 +# has, we clone them all, to avoid running into problems where EDK2 +# adds a new submodule or changes its use of an existing one and +# the sources we ship in the tarball then fail to build. +(cd roms/edk2 && git submodule update --init --depth 1) popd exclude=(--exclude=.git) diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh index 0ebe6bc52a6b9..3d0d13234461e 100644 --- a/scripts/meson-buildoptions.sh +++ b/scripts/meson-buildoptions.sh @@ -58,6 +58,7 @@ meson_options_help() { printf "%s\n" ' --enable-ubsan enable undefined behaviour sanitizer' printf "%s\n" ' --firmwarepath=VALUES search PATH for firmware files [share/qemu-' printf "%s\n" ' firmware]' + printf "%s\n" ' --gdb=VALUE Path to GDB' printf "%s\n" ' --iasl=VALUE Path to ACPI disassembler' printf "%s\n" ' --includedir=VALUE Header file directory [include]' printf "%s\n" ' --interp-prefix=VALUE where to find shared libraries etc., use %M for' @@ -154,6 +155,7 @@ meson_options_help() { printf "%s\n" ' membarrier membarrier system call (for Linux 4.14+ or Windows' printf "%s\n" ' modules modules support (non Windows)' printf "%s\n" ' mpath Multipath persistent reservation passthrough' + printf "%s\n" ' mshv MSHV acceleration support' printf "%s\n" ' multiprocess Out of process device emulation support' printf "%s\n" ' netmap netmap network backend support' printf "%s\n" ' nettle nettle cryptography support' @@ -323,6 +325,7 @@ _meson_option_parse() { --disable-fuzzing) printf "%s" -Dfuzzing=false ;; --enable-gcrypt) printf "%s" -Dgcrypt=enabled ;; --disable-gcrypt) printf "%s" -Dgcrypt=disabled ;; + --gdb=*) quote_sh "-Dgdb=$2" ;; --enable-gettext) printf "%s" -Dgettext=enabled ;; --disable-gettext) printf "%s" -Dgettext=disabled ;; --enable-gio) printf "%s" -Dgio=enabled ;; @@ -408,6 +411,8 @@ _meson_option_parse() { --disable-modules) printf "%s" -Dmodules=disabled ;; --enable-mpath) printf "%s" -Dmpath=enabled ;; --disable-mpath) printf "%s" -Dmpath=disabled ;; + --enable-mshv) printf "%s" -Dmshv=enabled ;; + --disable-mshv) printf "%s" -Dmshv=disabled ;; --enable-multiprocess) printf "%s" -Dmultiprocess=enabled ;; --disable-multiprocess) printf "%s" -Dmultiprocess=disabled ;; --enable-netmap) printf "%s" -Dnetmap=enabled ;; diff --git a/scripts/minikconf.py b/scripts/minikconf.py index 6f7f43b291816..4de5aeed11a22 100644 --- a/scripts/minikconf.py +++ b/scripts/minikconf.py @@ -340,7 +340,7 @@ class KconfigParser: @classmethod def parse(self, fp, mode=None): - data = KconfigData(mode or KconfigParser.defconfig) + data = KconfigData(mode or defconfig) parser = KconfigParser(data) parser.parse_file(fp) return data @@ -363,7 +363,9 @@ def parse_file(self, fp): def do_assignment(self, var, val): if not var.startswith("CONFIG_"): - raise Error('assigned variable should start with CONFIG_') + raise KconfigParserError( + self, "assigned variable should start with CONFIG_" + ) var = self.data.do_var(var[7:]) self.data.do_assignment(var, val) diff --git a/scripts/modinfo-collect.py b/scripts/modinfo-collect.py index 48bd92bd61808..6ebaea989db64 100644 --- a/scripts/modinfo-collect.py +++ b/scripts/modinfo-collect.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- import os import sys diff --git a/scripts/modinfo-generate.py b/scripts/modinfo-generate.py index b1538fcced77e..aaf23544c4622 100644 --- a/scripts/modinfo-generate.py +++ b/scripts/modinfo-generate.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- import os import sys diff --git a/scripts/oss-fuzz/minimize_qtest_trace.py b/scripts/oss-fuzz/minimize_qtest_trace.py index d1f3990c16a7c..414a6d91dd816 100755 --- a/scripts/oss-fuzz/minimize_qtest_trace.py +++ b/scripts/oss-fuzz/minimize_qtest_trace.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ This takes a crashing qtest trace and tries to remove superfluous operations diff --git a/scripts/oss-fuzz/output_reproducer.py b/scripts/oss-fuzz/output_reproducer.py index e8ef76b341381..0df96cf95878e 100755 --- a/scripts/oss-fuzz/output_reproducer.py +++ b/scripts/oss-fuzz/output_reproducer.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Convert plain qtest traces to C or Bash reproducers diff --git a/scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py b/scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py index b154a25508f7e..8af0d5d9c4e04 100755 --- a/scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py +++ b/scripts/oss-fuzz/reorder_fuzzer_qtest_trace.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- """ Use this to convert qtest log info from a generic fuzzer input into a qtest diff --git a/scripts/probe-gdb-support.py b/scripts/probe-gdb-support.py index 6bcadce15007f..43c7030287385 100644 --- a/scripts/probe-gdb-support.py +++ b/scripts/probe-gdb-support.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# coding: utf-8 # # Probe gdb for supported architectures. # diff --git a/scripts/qapi/error.py b/scripts/qapi/error.py index e35e4ddb26a01..f73bc553db6fd 100644 --- a/scripts/qapi/error.py +++ b/scripts/qapi/error.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # Copyright (c) 2017-2019 Red Hat Inc. # # Authors: diff --git a/scripts/qapi/expr.py b/scripts/qapi/expr.py index cae0a083591ab..f40b247f8b61b 100644 --- a/scripts/qapi/expr.py +++ b/scripts/qapi/expr.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # Copyright IBM, Corp. 2011 # Copyright (c) 2013-2021 Red Hat Inc. # diff --git a/scripts/qapi/gen.py b/scripts/qapi/gen.py index d3c56d45c8925..0c9b8db3b0241 100644 --- a/scripts/qapi/gen.py +++ b/scripts/qapi/gen.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # QAPI code generation # # Copyright (c) 2015-2019 Red Hat Inc. diff --git a/scripts/qapi/parser.py b/scripts/qapi/parser.py index 2529edf81aa8e..9fbf80a541045 100644 --- a/scripts/qapi/parser.py +++ b/scripts/qapi/parser.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # QAPI schema parser # # Copyright IBM, Corp. 2011 diff --git a/scripts/qapi/schema.py b/scripts/qapi/schema.py index 3abddea35257e..8d88b40de2e19 100644 --- a/scripts/qapi/schema.py +++ b/scripts/qapi/schema.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # QAPI schema internal representation # # Copyright (c) 2015-2019 Red Hat Inc. diff --git a/scripts/qemu-plugin-symbols.py b/scripts/qemu-plugin-symbols.py index e285ebb8f9ed3..69644979c19b2 100755 --- a/scripts/qemu-plugin-symbols.py +++ b/scripts/qemu-plugin-symbols.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- # # Extract QEMU Plugin API symbols from a header file # diff --git a/scripts/qemugdb/tcg.py b/scripts/qemugdb/tcg.py index 16c03c06a9437..22529c72775cd 100644 --- a/scripts/qemugdb/tcg.py +++ b/scripts/qemugdb/tcg.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- -# # GDB debugging support, TCG status # # Copyright 2016 Linaro Ltd diff --git a/scripts/qemugdb/timers.py b/scripts/qemugdb/timers.py index 46537b27cf058..5714f92cc211e 100644 --- a/scripts/qemugdb/timers.py +++ b/scripts/qemugdb/timers.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # GDB debugging support # # Copyright 2017 Linaro Ltd diff --git a/scripts/qmp_helper.py b/scripts/qmp_helper.py new file mode 100755 index 0000000000000..c1e7e0fd80ce3 --- /dev/null +++ b/scripts/qmp_helper.py @@ -0,0 +1,703 @@ +#!/usr/bin/env python3 +# +# pylint: disable=C0103,E0213,E1135,E1136,E1137,R0902,R0903,R0912,R0913,R0917 +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Copyright (C) 2024-2025 Mauro Carvalho Chehab + +""" +Helper classes to be used by ghes_inject command classes. +""" + +import json +import sys + +from datetime import datetime +from os import path as os_path + +try: + qemu_dir = os_path.abspath(os_path.dirname(os_path.dirname(__file__))) + sys.path.append(os_path.join(qemu_dir, 'python')) + + from qemu.qmp.legacy import QEMUMonitorProtocol + +except ModuleNotFoundError as exc: + print(f"Module '{exc.name}' not found.") + print("Try export PYTHONPATH=top-qemu-dir/python or run from top-qemu-dir") + sys.exit(1) + +from base64 import b64encode + +class util: + """ + Ancillary functions to deal with bitmaps, parse arguments, + generate GUID and encode data on a bytearray buffer. + """ + + # + # Helper routines to handle multiple choice arguments + # + def get_choice(name, value, choices, suffixes=None, bitmask=True): + """Produce a list from multiple choice argument""" + + new_values = 0 + + if not value: + return new_values + + for val in value.split(","): + val = val.lower() + + if suffixes: + for suffix in suffixes: + val = val.removesuffix(suffix) + + if val not in choices.keys(): + if suffixes: + for suffix in suffixes: + if val + suffix in choices.keys(): + val += suffix + break + + if val not in choices.keys(): + sys.exit(f"Error on '{name}': choice '{val}' is invalid.") + + val = choices[val] + + if bitmask: + new_values |= val + else: + if new_values: + sys.exit(f"Error on '{name}': only one value is accepted.") + + new_values = val + + return new_values + + def get_array(name, values, max_val=None): + """Add numbered hashes from integer lists into an array""" + + array = [] + + for value in values: + for val in value.split(","): + try: + val = int(val, 0) + except ValueError: + sys.exit(f"Error on '{name}': {val} is not an integer") + + if val < 0: + sys.exit(f"Error on '{name}': {val} is not unsigned") + + if max_val and val > max_val: + sys.exit(f"Error on '{name}': {val} is too little") + + array.append(val) + + return array + + def get_mult_array(mult, name, values, allow_zero=False, max_val=None): + """Add numbered hashes from integer lists""" + + if not allow_zero: + if not values: + return + else: + if values is None: + return + + if not values: + i = 0 + if i not in mult: + mult[i] = {} + + mult[i][name] = [] + return + + i = 0 + for value in values: + for val in value.split(","): + try: + val = int(val, 0) + except ValueError: + sys.exit(f"Error on '{name}': {val} is not an integer") + + if val < 0: + sys.exit(f"Error on '{name}': {val} is not unsigned") + + if max_val and val > max_val: + sys.exit(f"Error on '{name}': {val} is too little") + + if i not in mult: + mult[i] = {} + + if name not in mult[i]: + mult[i][name] = [] + + mult[i][name].append(val) + + i += 1 + + + def get_mult_choices(mult, name, values, choices, + suffixes=None, allow_zero=False): + """Add numbered hashes from multiple choice arguments""" + + if not allow_zero: + if not values: + return + else: + if values is None: + return + + i = 0 + for val in values: + new_values = util.get_choice(name, val, choices, suffixes) + + if i not in mult: + mult[i] = {} + + mult[i][name] = new_values + i += 1 + + + def get_mult_int(mult, name, values, allow_zero=False): + """Add numbered hashes from integer arguments""" + if not allow_zero: + if not values: + return + else: + if values is None: + return + + i = 0 + for val in values: + try: + val = int(val, 0) + except ValueError: + sys.exit(f"Error on '{name}': {val} is not an integer") + + if val < 0: + sys.exit(f"Error on '{name}': {val} is not unsigned") + + if i not in mult: + mult[i] = {} + + mult[i][name] = val + i += 1 + + + # + # Data encode helper functions + # + def bit(b): + """Simple macro to define a bit on a bitmask""" + return 1 << b + + + def data_add(data, value, num_bytes): + """Adds bytes from value inside a bitarray""" + + data.extend(value.to_bytes(num_bytes, byteorder="little")) # pylint: disable=E1101 + + def dump_bytearray(name, data): + """Does an hexdump of a byte array, grouping in bytes""" + + print(f"{name} ({len(data)} bytes):") + + for ln_start in range(0, len(data), 16): + ln_end = min(ln_start + 16, len(data)) + print(f" {ln_start:08x} ", end="") + for i in range(ln_start, ln_end): + print(f"{data[i]:02x} ", end="") + for i in range(ln_end, ln_start + 16): + print(" ", end="") + print(" ", end="") + for i in range(ln_start, ln_end): + if data[i] >= 32 and data[i] < 127: + print(chr(data[i]), end="") + else: + print(".", end="") + + print() + print() + + def time(string): + """Handle BCD timestamps used on Generic Error Data Block""" + + time = None + + # Formats to be used when parsing time stamps + formats = [ + "%Y-%m-%d %H:%M:%S", + ] + + if string == "now": + time = datetime.now() + + if time is None: + for fmt in formats: + try: + time = datetime.strptime(string, fmt) + break + except ValueError: + pass + + if time is None: + raise ValueError("Invalid time format") + + return time + +class guid: + """ + Simple class to handle GUID fields. + """ + + def __init__(self, time_low, time_mid, time_high, nodes): + """Initialize a GUID value""" + + assert len(nodes) == 8 + + self.time_low = time_low + self.time_mid = time_mid + self.time_high = time_high + self.nodes = nodes + + @classmethod + def UUID(cls, guid_str): + """Initialize a GUID using a string on its standard format""" + + if len(guid_str) != 36: + print("Size not 36") + raise ValueError('Invalid GUID size') + + # It is easier to parse without separators. So, drop them + guid_str = guid_str.replace('-', '') + + if len(guid_str) != 32: + print("Size not 32", guid_str, len(guid_str)) + raise ValueError('Invalid GUID hex size') + + time_low = 0 + time_mid = 0 + time_high = 0 + nodes = [] + + for i in reversed(range(16, 32, 2)): + h = guid_str[i:i + 2] + value = int(h, 16) + nodes.insert(0, value) + + time_high = int(guid_str[12:16], 16) + time_mid = int(guid_str[8:12], 16) + time_low = int(guid_str[0:8], 16) + + return cls(time_low, time_mid, time_high, nodes) + + def __str__(self): + """Output a GUID value on its default string representation""" + + clock = self.nodes[0] << 8 | self.nodes[1] + + node = 0 + for i in range(2, len(self.nodes)): + node = node << 8 | self.nodes[i] + + s = f"{self.time_low:08x}-{self.time_mid:04x}-" + s += f"{self.time_high:04x}-{clock:04x}-{node:012x}" + return s + + def to_bytes(self): + """Output a GUID value in bytes""" + + data = bytearray() + + util.data_add(data, self.time_low, 4) + util.data_add(data, self.time_mid, 2) + util.data_add(data, self.time_high, 2) + data.extend(bytearray(self.nodes)) + + return data + +class qmp: + """ + Opens a connection and send/receive QMP commands. + """ + + def send_cmd(self, command, args=None, may_open=False, return_error=True): + """Send a command to QMP, optinally opening a connection""" + + if may_open: + self._connect() + elif not self.connected: + return False + + msg = { 'execute': command } + if args: + msg['arguments'] = args + + try: + obj = self.qmp_monitor.cmd_obj(msg) + # Can we use some other exception class here? + except Exception as e: # pylint: disable=W0718 + print(f"Command: {command}") + print(f"Failed to inject error: {e}.") + return None + + if "return" in obj: + if isinstance(obj.get("return"), dict): + if obj["return"]: + return obj["return"] + return "OK" + + return obj["return"] + + if isinstance(obj.get("error"), dict): + error = obj["error"] + if return_error: + print(f"Command: {msg}") + print(f'{error["class"]}: {error["desc"]}') + else: + print(json.dumps(obj)) + + return None + + def _close(self): + """Shutdown and close the socket, if opened""" + if not self.connected: + return + + self.qmp_monitor.close() + self.connected = False + + def _connect(self): + """Connect to a QMP TCP/IP port, if not connected yet""" + + if self.connected: + return True + + try: + self.qmp_monitor.connect(negotiate=True) + except ConnectionError: + sys.exit(f"Can't connect to QMP host {self.host}:{self.port}") + + self.connected = True + + return True + + BLOCK_STATUS_BITS = { + "uncorrectable": util.bit(0), + "correctable": util.bit(1), + "multi-uncorrectable": util.bit(2), + "multi-correctable": util.bit(3), + } + + ERROR_SEVERITY = { + "recoverable": 0, + "fatal": 1, + "corrected": 2, + "none": 3, + } + + VALIDATION_BITS = { + "fru-id": util.bit(0), + "fru-text": util.bit(1), + "timestamp": util.bit(2), + } + + GEDB_FLAGS_BITS = { + "recovered": util.bit(0), + "prev-error": util.bit(1), + "simulated": util.bit(2), + } + + GENERIC_DATA_SIZE = 72 + + def argparse(parser): + """Prepare a parser group to query generic error data""" + + block_status_bits = ",".join(qmp.BLOCK_STATUS_BITS.keys()) + error_severity_enum = ",".join(qmp.ERROR_SEVERITY.keys()) + validation_bits = ",".join(qmp.VALIDATION_BITS.keys()) + gedb_flags_bits = ",".join(qmp.GEDB_FLAGS_BITS.keys()) + + g_gen = parser.add_argument_group("Generic Error Data") # pylint: disable=E1101 + g_gen.add_argument("--block-status", + help=f"block status bits: {block_status_bits}") + g_gen.add_argument("--raw-data", nargs="+", + help="Raw data inside the Error Status Block") + g_gen.add_argument("--error-severity", "--severity", + help=f"error severity: {error_severity_enum}") + g_gen.add_argument("--gen-err-valid-bits", + "--generic-error-validation-bits", + help=f"validation bits: {validation_bits}") + g_gen.add_argument("--fru-id", type=guid.UUID, + help="GUID representing a physical device") + g_gen.add_argument("--fru-text", + help="ASCII string identifying the FRU hardware") + g_gen.add_argument("--timestamp", type=util.time, + help="Time when the error info was collected") + g_gen.add_argument("--precise", "--precise-timestamp", + action='store_true', + help="Marks the timestamp as precise if --timestamp is used") + g_gen.add_argument("--gedb-flags", + help=f"General Error Data Block flags: {gedb_flags_bits}") + + def set_args(self, args): + """Set the arguments optionally defined via self.argparse()""" + + if args.block_status: + self.block_status = util.get_choice(name="block-status", + value=args.block_status, + choices=self.BLOCK_STATUS_BITS, + bitmask=False) + if args.raw_data: + self.raw_data = util.get_array("raw-data", args.raw_data, + max_val=255) + print(self.raw_data) + + if args.error_severity: + self.error_severity = util.get_choice(name="error-severity", + value=args.error_severity, + choices=self.ERROR_SEVERITY, + bitmask=False) + + if args.fru_id: + self.fru_id = args.fru_id.to_bytes() + if not args.gen_err_valid_bits: + self.validation_bits |= self.VALIDATION_BITS["fru-id"] + + if args.fru_text: + text = bytearray(args.fru_text.encode('ascii')) + if len(text) > 20: + sys.exit("FRU text is too big to fit") + + self.fru_text = text + if not args.gen_err_valid_bits: + self.validation_bits |= self.VALIDATION_BITS["fru-text"] + + if args.timestamp: + time = args.timestamp + century = int(time.year / 100) + + bcd = bytearray() + util.data_add(bcd, (time.second // 10) << 4 | (time.second % 10), 1) + util.data_add(bcd, (time.minute // 10) << 4 | (time.minute % 10), 1) + util.data_add(bcd, (time.hour // 10) << 4 | (time.hour % 10), 1) + + if args.precise: + util.data_add(bcd, 1, 1) + else: + util.data_add(bcd, 0, 1) + + util.data_add(bcd, (time.day // 10) << 4 | (time.day % 10), 1) + util.data_add(bcd, (time.month // 10) << 4 | (time.month % 10), 1) + util.data_add(bcd, + ((time.year % 100) // 10) << 4 | (time.year % 10), 1) + util.data_add(bcd, ((century % 100) // 10) << 4 | (century % 10), 1) + + self.timestamp = bcd + if not args.gen_err_valid_bits: + self.validation_bits |= self.VALIDATION_BITS["timestamp"] + + if args.gen_err_valid_bits: + self.validation_bits = util.get_choice(name="validation", + value=args.gen_err_valid_bits, + choices=self.VALIDATION_BITS) + + def __init__(self, host, port, debug=False): + """Initialize variables used by the QMP send logic""" + + self.connected = False + self.host = host + self.port = port + self.debug = debug + + # ACPI 6.1: 18.3.2.7.1 Generic Error Data: Generic Error Status Block + self.block_status = self.BLOCK_STATUS_BITS["uncorrectable"] + self.raw_data = [] + self.error_severity = self.ERROR_SEVERITY["recoverable"] + + # ACPI 6.1: 18.3.2.7.1 Generic Error Data: Generic Error Data Entry + self.validation_bits = 0 + self.flags = 0 + self.fru_id = bytearray(16) + self.fru_text = bytearray(20) + self.timestamp = bytearray(8) + + self.qmp_monitor = QEMUMonitorProtocol(address=(self.host, self.port)) + + # + # Socket QMP send command + # + def send_cper_raw(self, cper_data): + """Send a raw CPER data to QEMU though QMP TCP socket""" + + data = b64encode(bytes(cper_data)).decode('ascii') + + cmd_arg = { + 'cper': data + } + + self._connect() + + if self.send_cmd("inject-ghes-v2-error", cmd_arg): + print("Error injected.") + + def send_cper(self, notif_type, payload): + """Send commands to QEMU though QMP TCP socket""" + + # Fill CPER record header + + # NOTE: bits 4 to 13 of block status contain the number of + # data entries in the data section. This is currently unsupported. + + cper_length = len(payload) + data_length = cper_length + len(self.raw_data) + self.GENERIC_DATA_SIZE + + # Generic Error Data Entry + gede = bytearray() + + gede.extend(notif_type.to_bytes()) + util.data_add(gede, self.error_severity, 4) + util.data_add(gede, 0x300, 2) + util.data_add(gede, self.validation_bits, 1) + util.data_add(gede, self.flags, 1) + util.data_add(gede, cper_length, 4) + gede.extend(self.fru_id) + gede.extend(self.fru_text) + gede.extend(self.timestamp) + + # Generic Error Status Block + gebs = bytearray() + + if self.raw_data: + raw_data_offset = len(gebs) + else: + raw_data_offset = 0 + + util.data_add(gebs, self.block_status, 4) + util.data_add(gebs, raw_data_offset, 4) + util.data_add(gebs, len(self.raw_data), 4) + util.data_add(gebs, data_length, 4) + util.data_add(gebs, self.error_severity, 4) + + cper_data = bytearray() + cper_data.extend(gebs) + cper_data.extend(gede) + cper_data.extend(bytearray(self.raw_data)) + cper_data.extend(bytearray(payload)) + + if self.debug: + print(f"GUID: {notif_type}") + + util.dump_bytearray("Generic Error Status Block", gebs) + util.dump_bytearray("Generic Error Data Entry", gede) + + if self.raw_data: + util.dump_bytearray("Raw data", bytearray(self.raw_data)) + + util.dump_bytearray("Payload", payload) + + self.send_cper_raw(cper_data) + + + def search_qom(self, path, prop, regex): + """ + Return a list of devices that match path array like: + + /machine/unattached/device + /machine/peripheral-anon/device + ... + """ + + found = [] + + i = 0 + while 1: + dev = f"{path}[{i}]" + args = { + 'path': dev, + 'property': prop + } + ret = self.send_cmd("qom-get", args, may_open=True, + return_error=False) + if not ret: + break + + if isinstance(ret, str): + if regex.search(ret): + found.append(dev) + + i += 1 + if i > 10000: + print("Too many objects returned by qom-get!") + break + + return found + +class cper_guid: + """ + Contains CPER GUID, as per: + https://uefi.org/specs/UEFI/2.10/Apx_N_Common_Platform_Error_Record.html + """ + + CPER_PROC_GENERIC = guid(0x9876CCAD, 0x47B4, 0x4bdb, + [0xB6, 0x5E, 0x16, 0xF1, + 0x93, 0xC4, 0xF3, 0xDB]) + + CPER_PROC_X86 = guid(0xDC3EA0B0, 0xA144, 0x4797, + [0xB9, 0x5B, 0x53, 0xFA, + 0x24, 0x2B, 0x6E, 0x1D]) + + CPER_PROC_ITANIUM = guid(0xe429faf1, 0x3cb7, 0x11d4, + [0xbc, 0xa7, 0x00, 0x80, + 0xc7, 0x3c, 0x88, 0x81]) + + CPER_PROC_ARM = guid(0xE19E3D16, 0xBC11, 0x11E4, + [0x9C, 0xAA, 0xC2, 0x05, + 0x1D, 0x5D, 0x46, 0xB0]) + + CPER_PLATFORM_MEM = guid(0xA5BC1114, 0x6F64, 0x4EDE, + [0xB8, 0x63, 0x3E, 0x83, + 0xED, 0x7C, 0x83, 0xB1]) + + CPER_PLATFORM_MEM2 = guid(0x61EC04FC, 0x48E6, 0xD813, + [0x25, 0xC9, 0x8D, 0xAA, + 0x44, 0x75, 0x0B, 0x12]) + + CPER_PCIE = guid(0xD995E954, 0xBBC1, 0x430F, + [0xAD, 0x91, 0xB4, 0x4D, + 0xCB, 0x3C, 0x6F, 0x35]) + + CPER_PCI_BUS = guid(0xC5753963, 0x3B84, 0x4095, + [0xBF, 0x78, 0xED, 0xDA, + 0xD3, 0xF9, 0xC9, 0xDD]) + + CPER_PCI_DEV = guid(0xEB5E4685, 0xCA66, 0x4769, + [0xB6, 0xA2, 0x26, 0x06, + 0x8B, 0x00, 0x13, 0x26]) + + CPER_FW_ERROR = guid(0x81212A96, 0x09ED, 0x4996, + [0x94, 0x71, 0x8D, 0x72, + 0x9C, 0x8E, 0x69, 0xED]) + + CPER_DMA_GENERIC = guid(0x5B51FEF7, 0xC79D, 0x4434, + [0x8F, 0x1B, 0xAA, 0x62, + 0xDE, 0x3E, 0x2C, 0x64]) + + CPER_DMA_VT = guid(0x71761D37, 0x32B2, 0x45cd, + [0xA7, 0xD0, 0xB0, 0xFE, + 0xDD, 0x93, 0xE8, 0xCF]) + + CPER_DMA_IOMMU = guid(0x036F84E1, 0x7F37, 0x428c, + [0xA7, 0x9E, 0x57, 0x5F, + 0xDF, 0xAA, 0x84, 0xEC]) + + CPER_CCIX_PER = guid(0x91335EF6, 0xEBFB, 0x4478, + [0xA6, 0xA6, 0x88, 0xB7, + 0x28, 0xCF, 0x75, 0xD7]) + + CPER_CXL_PROT_ERR = guid(0x80B9EFB4, 0x52B5, 0x4DE3, + [0xA7, 0x77, 0x68, 0x78, + 0x4B, 0x77, 0x10, 0x48]) diff --git a/scripts/replay-dump.py b/scripts/replay-dump.py index 4ce7ff51cc7b3..097636570dd89 100755 --- a/scripts/replay-dump.py +++ b/scripts/replay-dump.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- # # Dump the contents of a recorded execution stream # diff --git a/scripts/rust-to-clang-target-test.sh b/scripts/rust-to-clang-target-test.sh new file mode 100755 index 0000000000000..ff6f8fcdc56e9 --- /dev/null +++ b/scripts/rust-to-clang-target-test.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env sh +# +# Copyright (C) 2025 Red Hat, Inc. +# +# Based on rust_to_clang_target() tests from rust-bindgen. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +scripts_dir=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd) +. "$scripts_dir/rust-to-clang-target.sh" + +test_case() { + input="$1" + expected="$2" + result=$(rust_to_clang_target "$input") + + if [ "$result" = "$expected" ]; then + echo " OK: '$input' -> '$result'" + else + echo " FAILED: '$input'" + echo " Expected: '$expected'" + echo " Got: '$result'" + exit 1 + fi +} + +echo "Running tests..." + +test_case "aarch64-apple-ios" "arm64-apple-ios" +test_case "riscv64gc-unknown-linux-gnu" "riscv64-unknown-linux-gnu" +test_case "riscv64imac-unknown-none-elf" "riscv64-unknown-none-elf" +test_case "riscv32imc-unknown-none-elf" "riscv32-unknown-none-elf" +test_case "riscv32imac-unknown-none-elf" "riscv32-unknown-none-elf" +test_case "riscv32imafc-unknown-none-elf" "riscv32-unknown-none-elf" +test_case "riscv32i-unknown-none-elf" "riscv32-unknown-none-elf" +test_case "riscv32imc-esp-espidf" "riscv32-esp-elf" +test_case "xtensa-esp32-espidf" "xtensa-esp32-elf" +test_case "aarch64-apple-ios-sim" "arm64-apple-ios-simulator" +test_case "aarch64-apple-tvos-sim" "arm64-apple-tvos-simulator" +test_case "aarch64-apple-watchos-sim" "arm64-apple-watchos-simulator" + +echo "" +echo "All tests passed!" diff --git a/scripts/rust-to-clang-target.sh b/scripts/rust-to-clang-target.sh new file mode 100644 index 0000000000000..72db7e1300f7f --- /dev/null +++ b/scripts/rust-to-clang-target.sh @@ -0,0 +1,60 @@ +# Copyright (C) 2025 Red Hat, Inc. +# +# Based on rust_to_clang_target() from rust-bindgen. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +rust_to_clang_target() { + rust_target="$1" + + # Split the string by hyphens + triple_parts="" + old_IFS="$IFS" + IFS='-' + for part in $rust_target; do + triple_parts="$triple_parts $part" + done + IFS="$old_IFS" + set -- $triple_parts + + # RISC-V + case "$1" in + riscv32*) + set -- "riscv32" "${2}" "${3}" "${4}" + ;; + riscv64*) + set -- "riscv64" "${2}" "${3}" "${4}" + ;; + esac + + # Apple + if [ "$2" = "apple" ]; then + if [ "$1" = "aarch64" ]; then + set -- "arm64" "${2}" "${3}" "${4}" + fi + if [ "$4" = "sim" ]; then + set -- "${1}" "${2}" "${3}" "simulator" + fi + fi + + # ESP-IDF + if [ "$3" = "espidf" ]; then + set -- "${1}" "${2}" "elf" "${4}" + fi + + # Reassemble the string + new_triple="" + first=1 + for part in "$@"; do + if [ -n "$part" ]; then + if [ "$first" -eq 1 ]; then + new_triple="$part" + first=0 + else + new_triple="$new_triple-$part" + fi + fi + done + + echo "$new_triple" +} diff --git a/scripts/rust/rust_root_crate.sh b/scripts/rust/rust_root_crate.sh index 975bddf7f1a4c..f05b8d021081f 100755 --- a/scripts/rust/rust_root_crate.sh +++ b/scripts/rust/rust_root_crate.sh @@ -4,7 +4,7 @@ set -eu cat < 1 and "int" in bits: + bits.remove("int") + bits.sort() + name = ' '.join(bits) + else: + if len(bits) > 1: + raise ValueError("Invalid type '%s'." % name) + name = bits[0] + + ty = C_TO_RUST_TYPE_MAP[name.strip()] + if ptr: + ty = f'*{"const" if const else "mut"} {ty}' + return ty + class Arguments: """Event arguments description.""" @@ -122,10 +239,6 @@ def __init__(self, args): else: self._args.append(arg) - def copy(self): - """Create a new copy.""" - return Arguments(list(self._args)) - @staticmethod def build(arg_str): """Build and Arguments instance from an argument string. @@ -170,10 +283,16 @@ def __len__(self): def __str__(self): """String suitable for declaring function arguments.""" + def onearg(t, n): + if t[-1] == '*': + return "".join([t, n]) + else: + return " ".join([t, n]) + if len(self._args) == 0: return "void" else: - return ", ".join([ " ".join([t, n]) for t,n in self._args ]) + return ", ".join([ onearg(t, n) for t,n in self._args ]) def __repr__(self): """Evaluable string representation for this object.""" @@ -191,6 +310,43 @@ def casted(self): """List of argument names casted to their type.""" return ["(%s)%s" % (type_, name) for type_, name in self._args] + def rust_decl_extern(self): + """Return a Rust argument list for an extern "C" function""" + return ", ".join((f"_{name}: {c_type_to_rust(type_)}" + for type_, name in self._args)) + + def rust_decl(self): + """Return a Rust argument list for a tracepoint function""" + def decl_type(type_): + if type_ == "const char *": + return "&std::ffi::CStr" + return c_type_to_rust(type_) + + return ", ".join((f"_{name}: {decl_type(type_)}" + for type_, name in self._args)) + + def rust_call_extern(self): + """Return a Rust argument list for a call to an extern "C" function""" + def rust_cast(name, type_): + if type_ == "const char *": + return f"_{name}.as_ptr()" + return f"_{name}" + + return ", ".join((rust_cast(name, type_) for type_, name in self._args)) + + def rust_call_varargs(self): + """Return a Rust argument list for a call to a C varargs function""" + def rust_cast(name, type_): + if type_ == "const char *": + return f"_{name}.as_ptr()" + + type_ = c_type_to_rust(type_) + if type_ in RUST_VARARGS_SMALL_TYPES: + return f"_{name} as std::ffi::c_int" + return f"_{name} /* as {type_} */" + + return ", ".join((rust_cast(name, type_) for type_, name in self._args)) + class Event(object): """Event description. @@ -216,13 +372,12 @@ class Event(object): r"(?P\w+)" r"\((?P[^)]*)\)" r"\s*" - r"(?:(?:(?P\".+),)?\s*(?P\".+))?" + r"(?P\".+)?" r"\s*") - _VALID_PROPS = set(["disable", "vcpu"]) + _VALID_PROPS = set(["disable"]) - def __init__(self, name, props, fmt, args, lineno, filename, orig=None, - event_trans=None, event_exec=None): + def __init__(self, name, props, fmt, args, lineno, filename): """ Parameters ---------- @@ -230,20 +385,14 @@ def __init__(self, name, props, fmt, args, lineno, filename, orig=None, Event name. props : list of str Property names. - fmt : str, list of str - Event printing format string(s). + fmt : str + Event printing format string. args : Arguments Event arguments. lineno : int The line number in the input file. filename : str The path to the input file. - orig : Event or None - Original Event before transformation/generation. - event_trans : Event or None - Generated translation-time event ("tcg" property). - event_exec : Event or None - Generated execution-time event ("tcg" property). """ self.name = name @@ -252,29 +401,16 @@ def __init__(self, name, props, fmt, args, lineno, filename, orig=None, self.args = args self.lineno = int(lineno) self.filename = str(filename) - self.event_trans = event_trans - self.event_exec = event_exec if len(args) > 10: raise ValueError("Event '%s' has more than maximum permitted " "argument count" % name) - if orig is None: - self.original = weakref.ref(self) - else: - self.original = orig - unknown_props = set(self.properties) - self._VALID_PROPS if len(unknown_props) > 0: raise ValueError("Unknown properties: %s" % ", ".join(unknown_props)) - assert isinstance(self.fmt, str) or len(self.fmt) == 2 - def copy(self): - """Create a new copy.""" - return Event(self.name, list(self.properties), self.fmt, - self.args.copy(), self.lineno, self.filename, - self, self.event_trans, self.event_exec) @staticmethod def build(line_str, lineno, filename): @@ -296,8 +432,7 @@ def build(line_str, lineno, filename): name = groups["name"] props = groups["props"].split() fmt = groups["fmt"] - fmt_trans = groups["fmt_trans"] - if fmt.find("%m") != -1 or fmt_trans.find("%m") != -1: + if fmt.find("%m") != -1: raise ValueError("Event format '%m' is forbidden, pass the error " "as an explicit trace argument") if fmt.endswith(r'\n"'): @@ -306,33 +441,25 @@ def build(line_str, lineno, filename): if '\\n' in fmt: raise ValueError("Event format must not use new line character") - if len(fmt_trans) > 0: - fmt = [fmt_trans, fmt] args = Arguments.build(groups["args"]) return Event(name, props, fmt, args, lineno, posix_relpath(filename)) def __repr__(self): """Evaluable string representation for this object.""" - if isinstance(self.fmt, str): - fmt = self.fmt - else: - fmt = "%s, %s" % (self.fmt[0], self.fmt[1]) return "Event('%s %s(%s) %s')" % (" ".join(self.properties), self.name, self.args, - fmt) + self.fmt) # Star matching on PRI is dangerous as one might have multiple # arguments with that format, hence the non-greedy version of it. _FMT = re.compile(r"(%[\d\.]*\w+|%.*?PRI\S+)") def formats(self): """List conversion specifiers in the argument print format string.""" - assert not isinstance(self.fmt, list) return self._FMT.findall(self.fmt) QEMU_TRACE = "trace_%(name)s" - QEMU_TRACE_NOCHECK = "_nocheck__" + QEMU_TRACE QEMU_TRACE_TCG = QEMU_TRACE + "_tcg" QEMU_DSTATE = "_TRACE_%(NAME)s_DSTATE" QEMU_BACKEND_DSTATE = "TRACE_%(NAME)s_BACKEND_DSTATE" diff --git a/scripts/tracetool/backend/__init__.py b/scripts/tracetool/backend/__init__.py index 7bfcc86cc5366..9109a783c72f6 100644 --- a/scripts/tracetool/backend/__init__.py +++ b/scripts/tracetool/backend/__init__.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ Backend management. @@ -19,11 +19,15 @@ Backend attributes ------------------ -========= ==================================================================== -Attribute Description -========= ==================================================================== -PUBLIC If exists and is set to 'True', the backend is considered "public". -========= ==================================================================== +=========================== ==================================================== +Attribute Description +=========================== ==================================================== +PUBLIC If exists and is set to 'True', the backend is + considered "public". +CHECK_TRACE_EVENT_GET_STATE If exists and is set to 'True', the backend-specific + code inside the tracepoint is emitted within an + ``if trace_event_get_state()`` conditional. +=========================== ==================================================== Backend functions @@ -94,29 +98,40 @@ def exists(name): if name == "nop": return True name = name.replace("-", "_") - return tracetool.try_import("tracetool.backend." + name)[1] + return tracetool.try_import("tracetool.backend." + name)[0] class Wrapper: def __init__(self, backends, format): self._backends = [backend.replace("-", "_") for backend in backends] self._format = format.replace("-", "_") + self.check_trace_event_get_state = False for backend in self._backends: assert exists(backend) assert tracetool.format.exists(self._format) + for backend in self.backend_modules(): + check_trace_event_get_state = getattr(backend, "CHECK_TRACE_EVENT_GET_STATE", False) + self.check_trace_event_get_state = self.check_trace_event_get_state or check_trace_event_get_state - def _run_function(self, name, *args, **kwargs): + def backend_modules(self): for backend in self._backends: - func = tracetool.try_import("tracetool.backend." + backend, - name % self._format, None)[1] - if func is not None: - func(*args, **kwargs) + module = tracetool.try_import("tracetool.backend." + backend)[1] + if module is not None: + yield module + + def _run_function(self, name, *args, check_trace_event_get_state=None, **kwargs): + for backend in self.backend_modules(): + func = getattr(backend, name % self._format, None) + if func is not None and \ + (check_trace_event_get_state is None or + check_trace_event_get_state == getattr(backend, 'CHECK_TRACE_EVENT_GET_STATE', False)): + func(*args, **kwargs) def generate_begin(self, events, group): self._run_function("generate_%s_begin", events, group) - def generate(self, event, group): - self._run_function("generate_%s", event, group) + def generate(self, event, group, check_trace_event_get_state=None): + self._run_function("generate_%s", event, group, check_trace_event_get_state=check_trace_event_get_state) def generate_backend_dstate(self, event, group): self._run_function("generate_%s_backend_dstate", event, group) diff --git a/scripts/tracetool/backend/dtrace.py b/scripts/tracetool/backend/dtrace.py index e17edc9b9d829..b4af403025c84 100644 --- a/scripts/tracetool/backend/dtrace.py +++ b/scripts/tracetool/backend/dtrace.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ DTrace/SystemTAP backend. diff --git a/scripts/tracetool/backend/ftrace.py b/scripts/tracetool/backend/ftrace.py index 5fa30ccc08e69..e03698a2edff8 100644 --- a/scripts/tracetool/backend/ftrace.py +++ b/scripts/tracetool/backend/ftrace.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ Ftrace built-in backend. @@ -12,10 +12,11 @@ __email__ = "stefanha@redhat.com" -from tracetool import out +from tracetool import out, expand_format_string PUBLIC = True +CHECK_TRACE_EVENT_GET_STATE = True def generate_h_begin(events, group): @@ -28,22 +29,11 @@ def generate_h(event, group): if len(event.args) > 0: argnames = ", " + argnames - out(' {', - ' char ftrace_buf[MAX_TRACE_STRLEN];', - ' int unused __attribute__ ((unused));', - ' int trlen;', - ' if (trace_event_get_state(%(event_id)s)) {', - '#line %(event_lineno)d "%(event_filename)s"', - ' trlen = snprintf(ftrace_buf, MAX_TRACE_STRLEN,', - ' "%(name)s " %(fmt)s "\\n" %(argnames)s);', + out('#line %(event_lineno)d "%(event_filename)s"', + ' ftrace_write("%(name)s " %(fmt)s "\\n" %(argnames)s);', '#line %(out_next_lineno)d "%(out_filename)s"', - ' trlen = MIN(trlen, MAX_TRACE_STRLEN - 1);', - ' unused = write(trace_marker_fd, ftrace_buf, trlen);', - ' }', - ' }', name=event.name, args=event.args, - event_id="TRACE_" + event.name.upper(), event_lineno=event.lineno, event_filename=event.filename, fmt=event.fmt.rstrip("\n"), @@ -53,3 +43,9 @@ def generate_h(event, group): def generate_h_backend_dstate(event, group): out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\', event_id="TRACE_" + event.name.upper()) + +def generate_rs(event, group): + out(' let format_string = c"%(fmt)s";', + ' unsafe {bindings::ftrace_write(format_string.as_ptr() as *const c_char, %(args)s);}', + fmt=expand_format_string(event.fmt), + args=event.args.rust_call_varargs()) diff --git a/scripts/tracetool/backend/log.py b/scripts/tracetool/backend/log.py index 17ba1cd90ebb8..9e3e5046f5f66 100644 --- a/scripts/tracetool/backend/log.py +++ b/scripts/tracetool/backend/log.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ Stderr built-in backend. @@ -12,15 +12,15 @@ __email__ = "stefanha@redhat.com" -from tracetool import out +from tracetool import out, expand_format_string PUBLIC = True +CHECK_TRACE_EVENT_GET_STATE = True def generate_h_begin(events, group): out('#include "qemu/log-for-trace.h"', - '#include "qemu/error-report.h"', '') @@ -29,29 +29,11 @@ def generate_h(event, group): if len(event.args) > 0: argnames = ", " + argnames - if "vcpu" in event.properties: - # already checked on the generic format code - cond = "true" - else: - cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper()) - - out(' if (%(cond)s && qemu_loglevel_mask(LOG_TRACE)) {', - ' if (message_with_timestamp) {', - ' struct timeval _now;', - ' gettimeofday(&_now, NULL);', - '#line %(event_lineno)d "%(event_filename)s"', - ' qemu_log("%%d@%%zu.%%06zu:%(name)s " %(fmt)s "\\n",', - ' qemu_get_thread_id(),', - ' (size_t)_now.tv_sec, (size_t)_now.tv_usec', - ' %(argnames)s);', - '#line %(out_next_lineno)d "%(out_filename)s"', - ' } else {', + out(' if (qemu_loglevel_mask(LOG_TRACE)) {', '#line %(event_lineno)d "%(event_filename)s"', ' qemu_log("%(name)s " %(fmt)s "\\n"%(argnames)s);', '#line %(out_next_lineno)d "%(out_filename)s"', ' }', - ' }', - cond=cond, event_lineno=event.lineno, event_filename=event.filename, name=event.name, @@ -62,3 +44,11 @@ def generate_h(event, group): def generate_h_backend_dstate(event, group): out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\', event_id="TRACE_" + event.name.upper()) + +def generate_rs(event, group): + out(' let format_string = c"%(fmt)s\\n";', + ' if (unsafe { bindings::qemu_loglevel } & bindings::LOG_TRACE) != 0 {', + ' unsafe { bindings::qemu_log(format_string.as_ptr() as *const c_char, %(args)s);}', + ' }', + fmt=expand_format_string(event.fmt, event.name + " "), + args=event.args.rust_call_varargs()) diff --git a/scripts/tracetool/backend/simple.py b/scripts/tracetool/backend/simple.py index 2688d4b64b33f..b131e4fc19422 100644 --- a/scripts/tracetool/backend/simple.py +++ b/scripts/tracetool/backend/simple.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ Simple built-in backend. @@ -16,6 +16,7 @@ PUBLIC = True +CHECK_TRACE_EVENT_GET_STATE = True def is_string(arg): @@ -36,17 +37,8 @@ def generate_h_begin(events, group): def generate_h(event, group): - event_id = 'TRACE_' + event.name.upper() - if "vcpu" in event.properties: - # already checked on the generic format code - cond = "true" - else: - cond = "trace_event_get_state(%s)" % event_id - out(' if (%(cond)s) {', - ' _simple_%(api)s(%(args)s);', - ' }', + out(' _simple_%(api)s(%(args)s);', api=event.api(), - cond=cond, args=", ".join(event.args.names())) @@ -106,3 +98,10 @@ def generate_c(event, group): out(' trace_record_finish(&rec);', '}', '') + +def generate_rs(event, group): + out(' extern "C" { fn _simple_%(api)s(%(rust_args)s); }', + ' unsafe { _simple_%(api)s(%(args)s); }', + api=event.api(), + rust_args=event.args.rust_decl_extern(), + args=event.args.rust_call_extern()) diff --git a/scripts/tracetool/backend/syslog.py b/scripts/tracetool/backend/syslog.py index 5a3a00fe310ae..12b826593db4b 100644 --- a/scripts/tracetool/backend/syslog.py +++ b/scripts/tracetool/backend/syslog.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ Syslog built-in backend. @@ -12,10 +12,11 @@ __email__ = "stefanha@redhat.com" -from tracetool import out +from tracetool import out, expand_format_string PUBLIC = True +CHECK_TRACE_EVENT_GET_STATE = True def generate_h_begin(events, group): @@ -28,24 +29,20 @@ def generate_h(event, group): if len(event.args) > 0: argnames = ", " + argnames - if "vcpu" in event.properties: - # already checked on the generic format code - cond = "true" - else: - cond = "trace_event_get_state(%s)" % ("TRACE_" + event.name.upper()) - - out(' if (%(cond)s) {', - '#line %(event_lineno)d "%(event_filename)s"', + out('#line %(event_lineno)d "%(event_filename)s"', ' syslog(LOG_INFO, "%(name)s " %(fmt)s %(argnames)s);', '#line %(out_next_lineno)d "%(out_filename)s"', - ' }', - cond=cond, event_lineno=event.lineno, event_filename=event.filename, name=event.name, fmt=event.fmt.rstrip("\n"), argnames=argnames) +def generate_rs(event, group): + out(' let format_string = c"%(fmt)s";', + ' unsafe {::trace::syslog(::trace::LOG_INFO, format_string.as_ptr() as *const c_char, %(args)s);}', + fmt=expand_format_string(event.fmt), + args=event.args.rust_call_varargs()) def generate_h_backend_dstate(event, group): out(' trace_event_get_state_dynamic_by_id(%(event_id)s) || \\', diff --git a/scripts/tracetool/backend/ust.py b/scripts/tracetool/backend/ust.py index c857516f21261..3aa9bb1da2991 100644 --- a/scripts/tracetool/backend/ust.py +++ b/scripts/tracetool/backend/ust.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ LTTng User Space Tracing backend. diff --git a/scripts/tracetool/format/__init__.py b/scripts/tracetool/format/__init__.py index 2dc46f3dd93e4..7b9d1b578265e 100644 --- a/scripts/tracetool/format/__init__.py +++ b/scripts/tracetool/format/__init__.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ Format management. @@ -70,7 +70,7 @@ def exists(name): if len(name) == 0: return False name = name.replace("-", "_") - return tracetool.try_import("tracetool.format." + name)[1] + return tracetool.try_import("tracetool.format." + name)[0] def generate(events, format, backend, group): diff --git a/scripts/tracetool/format/c.py b/scripts/tracetool/format/c.py index 69edf0d588ee8..50e03313cbf18 100644 --- a/scripts/tracetool/format/c.py +++ b/scripts/tracetool/format/c.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ trace/generated-tracers.c @@ -22,6 +22,7 @@ def generate(events, backend, group): header = "trace-" + group + ".h" out('/* This file is autogenerated by tracetool, do not edit. */', + '/* SPDX-License-Identifier: GPL-2.0-or-later */', '', '#include "qemu/osdep.h"', '#include "qemu/module.h"', @@ -36,7 +37,7 @@ def generate(events, backend, group): ' .id = 0,', ' .name = \"%(name)s\",', ' .sstate = %(sstate)s,', - ' .dstate = &%(dstate)s ', + ' .dstate = &%(dstate)s', '};', event = e.api(e.QEMU_EVENT), name = e.name, diff --git a/scripts/tracetool/format/d.py b/scripts/tracetool/format/d.py index ebfb714200266..e9e33dfe30a3a 100644 --- a/scripts/tracetool/format/d.py +++ b/scripts/tracetool/format/d.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ trace/generated-tracers.dtrace (DTrace only). @@ -39,7 +39,8 @@ def generate(events, backend, group): if not events and platform != "darwin": return - out('/* This file is autogenerated by tracetool, do not edit. */' + out('/* This file is autogenerated by tracetool, do not edit. */', + '/* SPDX-License-Identifier: GPL-2.0-or-later */', '', 'provider qemu {') diff --git a/scripts/tracetool/format/h.py b/scripts/tracetool/format/h.py index ea126b07ea57b..dd58713a15816 100644 --- a/scripts/tracetool/format/h.py +++ b/scripts/tracetool/format/h.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ trace/generated-tracers.h @@ -19,6 +19,7 @@ def generate(events, backend, group): header = "trace/control.h" out('/* This file is autogenerated by tracetool, do not edit. */', + '/* SPDX-License-Identifier: GPL-2.0-or-later */', '', '#ifndef TRACE_%s_GENERATED_TRACERS_H' % group.upper(), '#define TRACE_%s_GENERATED_TRACERS_H' % group.upper(), @@ -39,11 +40,6 @@ def generate(events, backend, group): enabled = 0 else: enabled = 1 - if "tcg-exec" in e.properties: - # a single define for the two "sub-events" - out('#define TRACE_%(name)s_ENABLED %(enabled)d', - name=e.original.name.upper(), - enabled=enabled) out('#define TRACE_%s_ENABLED %d' % (e.name.upper(), enabled)) backend.generate_begin(events, group) @@ -59,33 +55,24 @@ def generate(events, backend, group): out(' false)') - # tracer without checks out('', 'static inline void %(api)s(%(args)s)', '{', - api=e.api(e.QEMU_TRACE_NOCHECK), + api=e.api(), args=e.args) if "disable" not in e.properties: - backend.generate(e, group) - + backend.generate(e, group, check_trace_event_get_state=False) + + if backend.check_trace_event_get_state: + event_id = 'TRACE_' + e.name.upper() + cond = "trace_event_get_state(%s)" % event_id + out(' if (%(cond)s) {', + cond=cond) + backend.generate(e, group, check_trace_event_get_state=True) + out(' }') out('}') - cond = "true" - - out('', - 'static inline void %(api)s(%(args)s)', - '{', - ' if (%(cond)s) {', - ' %(api_nocheck)s(%(names)s);', - ' }', - '}', - api=e.api(), - api_nocheck=e.api(e.QEMU_TRACE_NOCHECK), - args=e.args, - names=", ".join(e.args.names()), - cond=cond) - backend.generate_end(events, group) out('#endif /* TRACE_%s_GENERATED_TRACERS_H */' % group.upper()) diff --git a/scripts/tracetool/format/log_stap.py b/scripts/tracetool/format/log_stap.py index b49afababd676..259303a189df6 100644 --- a/scripts/tracetool/format/log_stap.py +++ b/scripts/tracetool/format/log_stap.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ Generate .stp file that printfs log messages (DTrace with SystemTAP only). @@ -18,8 +18,6 @@ from tracetool.backend.simple import is_string from tracetool.format.stap import stap_escape -def global_var_name(name): - return probeprefix().replace(".", "_") + "_" + name STATE_SKIP = 0 STATE_LITERAL = 1 @@ -88,6 +86,7 @@ def c_fmt_to_stap(fmt): def generate(events, backend, group): out('/* This file is autogenerated by tracetool, do not edit. */', + '/* SPDX-License-Identifier: GPL-2.0-or-later */', '') for event_id, e in enumerate(events): diff --git a/scripts/tracetool/format/rs.py b/scripts/tracetool/format/rs.py new file mode 100644 index 0000000000000..32ac4e5977048 --- /dev/null +++ b/scripts/tracetool/format/rs.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +""" +trace-DIR.rs +""" + +__author__ = "Tanish Desai " +__copyright__ = "Copyright 2025, Tanish Desai " +__license__ = "GPL version 2 or (at your option) any later version" + +__maintainer__ = "Stefan Hajnoczi" +__email__ = "stefanha@redhat.com" + + +from tracetool import out + + +def generate(events, backend, group): + out('// SPDX-License-Identifier: GPL-2.0-or-later', + '// This file is @generated by tracetool, do not edit.', + '', + '#[allow(unused_imports)]', + 'use std::ffi::c_char;', + '#[allow(unused_imports)]', + 'use util::bindings;', + '', + '#[inline(always)]', + 'fn trace_event_state_is_enabled(dstate: u16) -> bool {', + ' (unsafe { trace_events_enabled_count }) != 0 && dstate != 0', + '}', + '', + 'extern "C" {', + ' static mut trace_events_enabled_count: u32;', + '}',) + + out('extern "C" {') + + for e in events: + out(' static mut %s: u16;' % e.api(e.QEMU_DSTATE)) + out('}') + + backend.generate_begin(events, group) + + for e in events: + out('', + '#[inline(always)]', + '#[allow(dead_code)]', + 'pub fn %(api)s(%(args)s)', + '{', + api=e.api(e.QEMU_TRACE), + args=e.args.rust_decl()) + + if "disable" not in e.properties: + backend.generate(e, group, check_trace_event_get_state=False) + if backend.check_trace_event_get_state: + event_id = 'TRACE_' + e.name.upper() + out(' if trace_event_state_is_enabled(unsafe { _%(event_id)s_DSTATE}) {', + event_id = event_id, + api=e.api()) + backend.generate(e, group, check_trace_event_get_state=True) + out(' }') + out('}') + + backend.generate_end(events, group) diff --git a/scripts/tracetool/format/simpletrace_stap.py b/scripts/tracetool/format/simpletrace_stap.py index 4f4633b4e689c..c7bde97a8556d 100644 --- a/scripts/tracetool/format/simpletrace_stap.py +++ b/scripts/tracetool/format/simpletrace_stap.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ Generate .stp file that outputs simpletrace binary traces (DTrace with SystemTAP only). @@ -17,11 +17,10 @@ from tracetool.backend.simple import is_string from tracetool.format.stap import stap_escape -def global_var_name(name): - return probeprefix().replace(".", "_") + "_" + name def generate(events, backend, group): out('/* This file is autogenerated by tracetool, do not edit. */', + '/* SPDX-License-Identifier: GPL-2.0-or-later */', '') for event_id, e in enumerate(events): diff --git a/scripts/tracetool/format/stap.py b/scripts/tracetool/format/stap.py index a218b0445c9b1..285c9203ba707 100644 --- a/scripts/tracetool/format/stap.py +++ b/scripts/tracetool/format/stap.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ Generate .stp file (DTrace with SystemTAP only). @@ -38,6 +38,7 @@ def generate(events, backend, group): if "disable" not in e.properties] out('/* This file is autogenerated by tracetool, do not edit. */', + '/* SPDX-License-Identifier: GPL-2.0-or-later */', '') for e in events: diff --git a/scripts/tracetool/format/ust_events_c.py b/scripts/tracetool/format/ust_events_c.py index deced9533ddfa..074226bfd3766 100644 --- a/scripts/tracetool/format/ust_events_c.py +++ b/scripts/tracetool/format/ust_events_c.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ trace/generated-ust.c @@ -20,6 +20,7 @@ def generate(events, backend, group): if "disabled" not in e.properties] out('/* This file is autogenerated by tracetool, do not edit. */', + '/* SPDX-License-Identifier: GPL-2.0-or-later */', '', '#include "qemu/osdep.h"', '', diff --git a/scripts/tracetool/format/ust_events_h.py b/scripts/tracetool/format/ust_events_h.py index b99fe6896bafa..cee7970a40368 100644 --- a/scripts/tracetool/format/ust_events_h.py +++ b/scripts/tracetool/format/ust_events_h.py @@ -1,4 +1,4 @@ -# -*- coding: utf-8 -*- +# SPDX-License-Identifier: GPL-2.0-or-later """ trace/generated-ust-provider.h @@ -25,6 +25,7 @@ def generate(events, backend, group): include = "trace-ust.h" out('/* This file is autogenerated by tracetool, do not edit. */', + '/* SPDX-License-Identifier: GPL-2.0-or-later */', '', '#undef TRACEPOINT_PROVIDER', '#define TRACEPOINT_PROVIDER qemu', diff --git a/scripts/update-linux-headers.sh b/scripts/update-linux-headers.sh index b43b8ef75a632..844d9cb9f5e43 100755 --- a/scripts/update-linux-headers.sh +++ b/scripts/update-linux-headers.sh @@ -90,6 +90,7 @@ cp_portable() { -e 's/]*\)>/"standard-headers\/linux\/\1"/' \ -e "$arch_cmd" \ -e 's/__bitwise//' \ + -e 's/__counted_by(\w*)//' \ -e 's/__attribute__((packed))/QEMU_PACKED/' \ -e 's/__inline__/inline/' \ -e 's/__BITS_PER_LONG/HOST_LONG_BITS/' \ @@ -156,11 +157,6 @@ EOF cp "$hdrdir/include/asm/unistd_32.h" "$output/linux-headers/asm-s390/" cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-s390/" fi - if [ $arch = arm ]; then - cp "$hdrdir/include/asm/unistd-eabi.h" "$output/linux-headers/asm-arm/" - cp "$hdrdir/include/asm/unistd-oabi.h" "$output/linux-headers/asm-arm/" - cp "$hdrdir/include/asm/unistd-common.h" "$output/linux-headers/asm-arm/" - fi if [ $arch = arm64 ]; then cp "$hdrdir/include/asm/sve_context.h" "$output/linux-headers/asm-arm64/" cp "$hdrdir/include/asm/unistd_64.h" "$output/linux-headers/asm-arm64/" @@ -200,7 +196,7 @@ rm -rf "$output/linux-headers/linux" mkdir -p "$output/linux-headers/linux" for header in const.h stddef.h kvm.h vfio.h vfio_ccw.h vfio_zdev.h vhost.h \ psci.h psp-sev.h userfaultfd.h memfd.h mman.h nvme_ioctl.h \ - vduse.h iommufd.h bits.h; do + vduse.h iommufd.h bits.h mshv.h; do cp "$hdrdir/include/linux/$header" "$output/linux-headers/linux" done diff --git a/scripts/userfaultfd-wrlat.py b/scripts/userfaultfd-wrlat.py index 0684be4e04487..a61a9abbfcb25 100755 --- a/scripts/userfaultfd-wrlat.py +++ b/scripts/userfaultfd-wrlat.py @@ -17,7 +17,6 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from __future__ import print_function from bcc import BPF from ctypes import c_ushort, c_int, c_ulonglong from time import sleep diff --git a/scsi/pr-manager-helper.c b/scsi/pr-manager-helper.c index 6b86f01b01fe5..aea751fb047d7 100644 --- a/scsi/pr-manager-helper.c +++ b/scsi/pr-manager-helper.c @@ -105,20 +105,15 @@ static int pr_manager_helper_initialize(PRManagerHelper *pr_mgr, .u.q_unix.path = path }; QIOChannelSocket *sioc = qio_channel_socket_new(); - Error *local_err = NULL; - uint32_t flags; int r; assert(!pr_mgr->ioc); qio_channel_set_name(QIO_CHANNEL(sioc), "pr-manager-helper"); - qio_channel_socket_connect_sync(sioc, - &saddr, - &local_err); + r = qio_channel_socket_connect_sync(sioc, &saddr, errp); g_free(path); - if (local_err) { + if (r < 0) { object_unref(OBJECT(sioc)); - error_propagate(errp, local_err); return -ENOTCONN; } diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c index b69dd982d6a0b..074b4db472076 100644 --- a/scsi/qemu-pr-helper.c +++ b/scsi/qemu-pr-helper.c @@ -733,8 +733,11 @@ static void coroutine_fn prh_co_entry(void *opaque) uint32_t flags; int r; - qio_channel_set_blocking(QIO_CHANNEL(client->ioc), - false, NULL); + if (!qio_channel_set_blocking(QIO_CHANNEL(client->ioc), + false, &local_err)) { + goto out; + } + qio_channel_set_follow_coroutine_ctx(QIO_CHANNEL(client->ioc), true); /* A very simple negotiation for future extensibility. No features @@ -786,6 +789,7 @@ static void coroutine_fn prh_co_entry(void *opaque) } } +out: if (local_err) { if (verbose == 0) { error_free(local_err); @@ -794,7 +798,6 @@ static void coroutine_fn prh_co_entry(void *opaque) } } -out: object_unref(OBJECT(client->ioc)); g_free(client); } diff --git a/semihosting/arm-compat-semi-stub.c b/semihosting/arm-compat-semi-stub.c new file mode 100644 index 0000000000000..bfa3681e2676d --- /dev/null +++ b/semihosting/arm-compat-semi-stub.c @@ -0,0 +1,19 @@ +/* + * Stubs for platforms different from ARM + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "semihosting/semihost.h" +#include + +bool semihosting_arm_compatible(void) +{ + return false; +} + +void semihosting_arm_compatible_init(void) +{ + g_assert_not_reached(); +} diff --git a/semihosting/arm-compat-semi.c b/semihosting/arm-compat-semi.c index 86e5260e504be..61001267965ad 100644 --- a/semihosting/arm-compat-semi.c +++ b/semihosting/arm-compat-semi.c @@ -100,6 +100,13 @@ static int gdb_open_modeflags[12] = { GDB_O_RDWR | GDB_O_CREAT | GDB_O_APPEND, }; +/* + * For ARM semihosting, we have a separate structure for routing + * data for the console which is outside the guest fd address space. + */ +static GuestFD console_in_gf; +static GuestFD console_out_gf; + #ifndef CONFIG_USER_ONLY /** @@ -115,7 +122,7 @@ static int gdb_open_modeflags[12] = { */ typedef struct LayoutInfo { - target_ulong rambase; + vaddr rambase; size_t ramsize; hwaddr heapbase; hwaddr heaplimit; @@ -166,8 +173,7 @@ static LayoutInfo common_semi_find_bases(CPUState *cs) #endif -#include "cpu.h" -#include "common-semi-target.h" +#include "semihosting/common-semi.h" /* * Read the input value from the argument block; fail the semihosting @@ -207,7 +213,7 @@ static LayoutInfo common_semi_find_bases(CPUState *cs) * global, and we assume that the guest takes care of avoiding any races. */ #ifndef CONFIG_USER_ONLY -static target_ulong syscall_err; +static uint64_t syscall_err; #include "semihosting/uaccess.h" #endif @@ -253,8 +259,8 @@ static void common_semi_rw_cb(CPUState *cs, uint64_t ret, int err) { /* Recover the original length from the third argument. */ CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); - target_ulong args = common_semi_arg(cs, 1); - target_ulong arg2; + uint64_t args = common_semi_arg(cs, 1); + uint64_t arg2; GET_ARG(2); if (err) { @@ -293,9 +299,9 @@ static void common_semi_seek_cb(CPUState *cs, uint64_t ret, int err) * is defined by GDB's remote protocol and is not target-specific.) * We put this on the guest's stack just below SP. */ -static target_ulong common_semi_flen_buf(CPUState *cs) +static uint64_t common_semi_flen_buf(CPUState *cs) { - target_ulong sp = common_semi_stack_bottom(cs); + vaddr sp = common_semi_stack_bottom(cs); return sp - 64; } @@ -352,6 +358,25 @@ static const uint8_t featurefile_data[] = { SH_EXT_EXIT_EXTENDED | SH_EXT_STDOUT_STDERR, /* Feature byte 0 */ }; +bool semihosting_arm_compatible(void) +{ + return true; +} + +void semihosting_arm_compatible_init(void) +{ + /* For ARM-compat, the console is in a separate namespace. */ + if (use_gdb_syscalls()) { + console_in_gf.type = GuestFDGDB; + console_in_gf.hostfd = 0; + console_out_gf.type = GuestFDGDB; + console_out_gf.hostfd = 2; + } else { + console_in_gf.type = GuestFDConsole; + console_out_gf.type = GuestFDConsole; + } +} + /* * Do a semihosting call. * @@ -363,9 +388,9 @@ static const uint8_t featurefile_data[] = { void do_common_semihosting(CPUState *cs) { CPUArchState *env = cpu_env(cs); - target_ulong args; - target_ulong arg0, arg1, arg2, arg3; - target_ulong ul_ret; + uint64_t args; + uint64_t arg0, arg1, arg2, arg3; + uint64_t ul_ret; char * s; int nr; int64_t elapsed; @@ -436,7 +461,7 @@ void do_common_semihosting(CPUState *cs) case TARGET_SYS_WRITEC: /* - * FIXME: the byte to be written is in a target_ulong slot, + * FIXME: the byte to be written is in a uint64_t slot, * which means this is wrong for a big-endian guest. */ semihost_sys_write_gf(cs, common_semi_dead_cb, @@ -475,10 +500,13 @@ void do_common_semihosting(CPUState *cs) break; case TARGET_SYS_ISERROR: + { GET_ARG(0); - common_semi_set_ret(cs, (target_long)arg0 < 0); + bool ret = is_64bit_semihosting(env) ? + (int64_t)arg0 < 0 : (int32_t)arg0 < 0; + common_semi_set_ret(cs, ret); break; - + } case TARGET_SYS_ISTTY: GET_ARG(0); semihost_sys_isatty(cs, common_semi_istty_cb, arg0); @@ -662,11 +690,11 @@ void do_common_semihosting(CPUState *cs) case TARGET_SYS_HEAPINFO: { - target_ulong retvals[4]; + uint64_t retvals[4]; int i; #ifdef CONFIG_USER_ONLY TaskState *ts = get_task_state(cs); - target_ulong limit; + static abi_ulong heapbase, heaplimit; #else LayoutInfo info = common_semi_find_bases(cs); #endif @@ -678,25 +706,25 @@ void do_common_semihosting(CPUState *cs) * Some C libraries assume the heap immediately follows .bss, so * allocate it using sbrk. */ - if (!ts->heap_limit) { - abi_ulong ret; - - ts->heap_base = do_brk(0); - limit = ts->heap_base + COMMON_SEMI_HEAP_SIZE; + if (!heaplimit) { + heapbase = do_brk(0); /* Try a big heap, and reduce the size if that fails. */ - for (;;) { - ret = do_brk(limit); + for (abi_ulong size = COMMON_SEMI_HEAP_SIZE; ; size >>= 1) { + abi_ulong limit = heapbase + size; + abi_ulong ret = do_brk(limit); if (ret >= limit) { + heaplimit = limit; break; } - limit = (ts->heap_base >> 1) + (limit >> 1); } - ts->heap_limit = limit; } - - retvals[0] = ts->heap_base; - retvals[1] = ts->heap_limit; - retvals[2] = ts->stack_base; + retvals[0] = heapbase; + retvals[1] = heaplimit; + /* + * Note that semihosting is *not* thread aware. + * Always return the stack base of the main thread. + */ + retvals[2] = ts->info->start_stack; retvals[3] = 0; /* Stack limit. */ #else retvals[0] = info.heapbase; /* Heap Base */ @@ -728,7 +756,8 @@ void do_common_semihosting(CPUState *cs) { uint32_t ret; - if (common_semi_sys_exit_extended(cs, nr)) { + if (nr == TARGET_SYS_EXIT_EXTENDED || + common_semi_sys_exit_is_extended(cs)) { /* * The A64 version of SYS_EXIT takes a parameter block, * so the application-exit type can return a subcode which @@ -759,7 +788,7 @@ void do_common_semihosting(CPUState *cs) case TARGET_SYS_ELAPSED: elapsed = get_clock() - clock_start; - if (sizeof(target_ulong) == 8) { + if (is_64bit_semihosting(env)) { if (SET_ARG(0, elapsed)) { goto do_fault; } diff --git a/semihosting/guestfd.c b/semihosting/guestfd.c index d3241434c516f..e8f236c690c44 100644 --- a/semihosting/guestfd.c +++ b/semihosting/guestfd.c @@ -12,35 +12,20 @@ #include "gdbstub/syscalls.h" #include "semihosting/semihost.h" #include "semihosting/guestfd.h" -#ifndef CONFIG_USER_ONLY -#include CONFIG_DEVICES -#endif static GArray *guestfd_array; -#ifdef CONFIG_ARM_COMPATIBLE_SEMIHOSTING -GuestFD console_in_gf; -GuestFD console_out_gf; -#endif - void qemu_semihosting_guestfd_init(void) { /* New entries zero-initialized, i.e. type GuestFDUnused */ guestfd_array = g_array_new(FALSE, TRUE, sizeof(GuestFD)); -#ifdef CONFIG_ARM_COMPATIBLE_SEMIHOSTING - /* For ARM-compat, the console is in a separate namespace. */ - if (use_gdb_syscalls()) { - console_in_gf.type = GuestFDGDB; - console_in_gf.hostfd = 0; - console_out_gf.type = GuestFDGDB; - console_out_gf.hostfd = 2; - } else { - console_in_gf.type = GuestFDConsole; - console_out_gf.type = GuestFDConsole; + if (semihosting_arm_compatible()) { + semihosting_arm_compatible_init(); + return; } -#else - /* Otherwise, the stdio file descriptors apply. */ + + /* Out of ARM, the stdio file descriptors apply. */ guestfd_array = g_array_set_size(guestfd_array, 3); #ifndef CONFIG_USER_ONLY if (!use_gdb_syscalls()) { @@ -54,7 +39,6 @@ void qemu_semihosting_guestfd_init(void) associate_guestfd(0, 0); associate_guestfd(1, 1); associate_guestfd(2, 2); -#endif } /* diff --git a/semihosting/meson.build b/semihosting/meson.build index b1ab2506c6e38..99f10e2e2bbbd 100644 --- a/semihosting/meson.build +++ b/semihosting/meson.build @@ -1,17 +1,21 @@ -specific_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files( - 'guestfd.c', - 'syscalls.c', -)) - common_ss.add(when: 'CONFIG_SEMIHOSTING', if_false: files('stubs-all.c')) -user_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files('user.c')) +user_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files( + 'user.c', + 'guestfd.c')) system_ss.add(when: 'CONFIG_SEMIHOSTING', if_true: files( 'config.c', 'console.c', + 'guestfd.c', 'uaccess.c', + 'syscalls.c', ), if_false: files( 'stubs-system.c', )) +system_ss.add(when: 'CONFIG_ARM_COMPATIBLE_SEMIHOSTING', + if_true: files('arm-compat-semi.c'), + if_false: files('arm-compat-semi-stub.c')) -specific_ss.add(when: ['CONFIG_ARM_COMPATIBLE_SEMIHOSTING'], +specific_ss.add(when: ['CONFIG_SEMIHOSTING', 'CONFIG_USER_ONLY'], + if_true: files('syscalls.c')) +specific_ss.add(when: ['CONFIG_ARM_COMPATIBLE_SEMIHOSTING', 'CONFIG_USER_ONLY'], if_true: files('arm-compat-semi.c')) diff --git a/semihosting/syscalls.c b/semihosting/syscalls.c index f6451d9bb0e65..20f155f869a26 100644 --- a/semihosting/syscalls.c +++ b/semihosting/syscalls.c @@ -8,7 +8,6 @@ #include "qemu/osdep.h" #include "qemu/log.h" -#include "cpu.h" #include "gdbstub/syscalls.h" #include "semihosting/guestfd.h" #include "semihosting/syscalls.h" @@ -23,7 +22,7 @@ /* * Validate or compute the length of the string (including terminator). */ -static int validate_strlen(CPUState *cs, target_ulong str, target_ulong tlen) +static int validate_strlen(CPUState *cs, vaddr str, uint64_t tlen) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); char c; @@ -52,7 +51,7 @@ static int validate_strlen(CPUState *cs, target_ulong str, target_ulong tlen) } static int validate_lock_user_string(char **pstr, CPUState *cs, - target_ulong tstr, target_ulong tlen) + vaddr tstr, uint64_t tlen) { int ret = validate_strlen(cs, tstr, tlen); CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); @@ -72,7 +71,7 @@ static int validate_lock_user_string(char **pstr, CPUState *cs, * big-endian. Until we do something with gdb, also produce the * same big-endian result from the host. */ -static int copy_stat_to_user(CPUState *cs, target_ulong addr, +static int copy_stat_to_user(CPUState *cs, vaddr addr, const struct stat *s) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); @@ -129,7 +128,7 @@ static void gdb_open_cb(CPUState *cs, uint64_t ret, int err) } static void gdb_open(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len, + vaddr fname, uint64_t fname_len, int gdb_flags, int mode) { int len = validate_strlen(cs, fname, fname_len); @@ -140,7 +139,7 @@ static void gdb_open(CPUState *cs, gdb_syscall_complete_cb complete, gdb_open_complete = complete; gdb_do_syscall(gdb_open_cb, "open,%s,%x,%x", - (uint64_t)fname, (uint32_t)len, + (vaddr)fname, (uint32_t)len, (uint32_t)gdb_flags, (uint32_t)mode); } @@ -151,17 +150,17 @@ static void gdb_close(CPUState *cs, gdb_syscall_complete_cb complete, } static void gdb_read(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len) + GuestFD *gf, vaddr buf, uint64_t len) { gdb_do_syscall(complete, "read,%x,%lx,%lx", - (uint32_t)gf->hostfd, (uint64_t)buf, (uint64_t)len); + (uint32_t)gf->hostfd, (vaddr)buf, (uint64_t)len); } static void gdb_write(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len) + GuestFD *gf, vaddr buf, uint64_t len) { gdb_do_syscall(complete, "write,%x,%lx,%lx", - (uint32_t)gf->hostfd, (uint64_t)buf, (uint64_t)len); + (uint32_t)gf->hostfd, (vaddr)buf, (uint64_t)len); } static void gdb_lseek(CPUState *cs, gdb_syscall_complete_cb complete, @@ -178,15 +177,15 @@ static void gdb_isatty(CPUState *cs, gdb_syscall_complete_cb complete, } static void gdb_fstat(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong addr) + GuestFD *gf, vaddr addr) { gdb_do_syscall(complete, "fstat,%x,%lx", - (uint32_t)gf->hostfd, (uint64_t)addr); + (uint32_t)gf->hostfd, (vaddr)addr); } static void gdb_stat(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len, - target_ulong addr) + vaddr fname, uint64_t fname_len, + vaddr addr) { int len = validate_strlen(cs, fname, fname_len); if (len < 0) { @@ -195,11 +194,11 @@ static void gdb_stat(CPUState *cs, gdb_syscall_complete_cb complete, } gdb_do_syscall(complete, "stat,%s,%lx", - (uint64_t)fname, (uint32_t)len, (uint64_t)addr); + (vaddr)fname, (uint32_t)len, (vaddr)addr); } static void gdb_remove(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len) + vaddr fname, uint64_t fname_len) { int len = validate_strlen(cs, fname, fname_len); if (len < 0) { @@ -207,12 +206,12 @@ static void gdb_remove(CPUState *cs, gdb_syscall_complete_cb complete, return; } - gdb_do_syscall(complete, "unlink,%s", (uint64_t)fname, (uint32_t)len); + gdb_do_syscall(complete, "unlink,%s", (vaddr)fname, (uint32_t)len); } static void gdb_rename(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong oname, target_ulong oname_len, - target_ulong nname, target_ulong nname_len) + vaddr oname, uint64_t oname_len, + vaddr nname, uint64_t nname_len) { int olen, nlen; @@ -228,12 +227,12 @@ static void gdb_rename(CPUState *cs, gdb_syscall_complete_cb complete, } gdb_do_syscall(complete, "rename,%s,%s", - (uint64_t)oname, (uint32_t)olen, - (uint64_t)nname, (uint32_t)nlen); + (vaddr)oname, (uint32_t)olen, + (vaddr)nname, (uint32_t)nlen); } static void gdb_system(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong cmd, target_ulong cmd_len) + vaddr cmd, uint64_t cmd_len) { int len = validate_strlen(cs, cmd, cmd_len); if (len < 0) { @@ -241,14 +240,14 @@ static void gdb_system(CPUState *cs, gdb_syscall_complete_cb complete, return; } - gdb_do_syscall(complete, "system,%s", (uint64_t)cmd, (uint32_t)len); + gdb_do_syscall(complete, "system,%s", (vaddr)cmd, (uint32_t)len); } static void gdb_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong tv_addr, target_ulong tz_addr) + vaddr tv_addr, vaddr tz_addr) { gdb_do_syscall(complete, "gettimeofday,%lx,%lx", - (uint64_t)tv_addr, (uint64_t)tz_addr); + (vaddr)tv_addr, (vaddr)tz_addr); } /* @@ -256,7 +255,7 @@ static void gdb_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, */ static void host_open(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len, + vaddr fname, uint64_t fname_len, int gdb_flags, int mode) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); @@ -316,7 +315,7 @@ static void host_close(CPUState *cs, gdb_syscall_complete_cb complete, } static void host_read(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len) + GuestFD *gf, vaddr buf, uint64_t len) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); void *ptr = lock_user(VERIFY_WRITE, buf, len, 0); @@ -337,7 +336,7 @@ static void host_read(CPUState *cs, gdb_syscall_complete_cb complete, } static void host_write(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len) + GuestFD *gf, vaddr buf, uint64_t len) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); void *ptr = lock_user(VERIFY_READ, buf, len, 1); @@ -395,7 +394,7 @@ static void host_flen(CPUState *cs, gdb_syscall_complete_cb complete, } static void host_fstat(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong addr) + GuestFD *gf, vaddr addr) { struct stat buf; int ret; @@ -410,8 +409,8 @@ static void host_fstat(CPUState *cs, gdb_syscall_complete_cb complete, } static void host_stat(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len, - target_ulong addr) + vaddr fname, uint64_t fname_len, + vaddr addr) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); struct stat buf; @@ -440,7 +439,7 @@ static void host_stat(CPUState *cs, gdb_syscall_complete_cb complete, } static void host_remove(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len) + vaddr fname, uint64_t fname_len) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); char *p; @@ -458,8 +457,8 @@ static void host_remove(CPUState *cs, gdb_syscall_complete_cb complete, } static void host_rename(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong oname, target_ulong oname_len, - target_ulong nname, target_ulong nname_len) + vaddr oname, uint64_t oname_len, + vaddr nname, uint64_t nname_len) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); char *ostr, *nstr; @@ -484,7 +483,7 @@ static void host_rename(CPUState *cs, gdb_syscall_complete_cb complete, } static void host_system(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong cmd, target_ulong cmd_len) + vaddr cmd, uint64_t cmd_len) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); char *p; @@ -502,7 +501,7 @@ static void host_system(CPUState *cs, gdb_syscall_complete_cb complete, } static void host_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong tv_addr, target_ulong tz_addr) + vaddr tv_addr, vaddr tz_addr) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); struct gdb_timeval *p; @@ -547,10 +546,10 @@ static void host_poll_one(CPUState *cs, gdb_syscall_complete_cb complete, */ static void staticfile_read(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len) + GuestFD *gf, vaddr buf, uint64_t len) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); - target_ulong rest = gf->staticfile.len - gf->staticfile.off; + uint64_t rest = gf->staticfile.len - gf->staticfile.off; void *ptr; if (len > rest) { @@ -605,7 +604,7 @@ static void staticfile_flen(CPUState *cs, gdb_syscall_complete_cb complete, */ static void console_read(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len) + GuestFD *gf, vaddr buf, uint64_t len) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); char *ptr; @@ -622,7 +621,7 @@ static void console_read(CPUState *cs, gdb_syscall_complete_cb complete, } static void console_write(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len) + GuestFD *gf, vaddr buf, uint64_t len) { CPUArchState *env G_GNUC_UNUSED = cpu_env(cs); char *ptr = lock_user(VERIFY_READ, buf, len, 1); @@ -638,7 +637,7 @@ static void console_write(CPUState *cs, gdb_syscall_complete_cb complete, } static void console_fstat(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong addr) + GuestFD *gf, vaddr addr) { static const struct stat tty_buf = { .st_mode = 020666, /* S_IFCHR, ugo+rw */ @@ -683,7 +682,7 @@ static void console_poll_one(CPUState *cs, gdb_syscall_complete_cb complete, */ void semihost_sys_open(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len, + vaddr fname, uint64_t fname_len, int gdb_flags, int mode) { if (use_gdb_syscalls()) { @@ -719,7 +718,7 @@ void semihost_sys_close(CPUState *cs, gdb_syscall_complete_cb complete, int fd) } void semihost_sys_read_gf(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len) + GuestFD *gf, vaddr buf, uint64_t len) { /* * Bound length for 64-bit guests on 32-bit hosts, not overflowing ssize_t. @@ -748,7 +747,7 @@ void semihost_sys_read_gf(CPUState *cs, gdb_syscall_complete_cb complete, } void semihost_sys_read(CPUState *cs, gdb_syscall_complete_cb complete, - int fd, target_ulong buf, target_ulong len) + int fd, vaddr buf, uint64_t len) { GuestFD *gf = get_guestfd(fd); @@ -760,7 +759,7 @@ void semihost_sys_read(CPUState *cs, gdb_syscall_complete_cb complete, } void semihost_sys_write_gf(CPUState *cs, gdb_syscall_complete_cb complete, - GuestFD *gf, target_ulong buf, target_ulong len) + GuestFD *gf, vaddr buf, uint64_t len) { /* * Bound length for 64-bit guests on 32-bit hosts, not overflowing ssize_t. @@ -790,7 +789,7 @@ void semihost_sys_write_gf(CPUState *cs, gdb_syscall_complete_cb complete, } void semihost_sys_write(CPUState *cs, gdb_syscall_complete_cb complete, - int fd, target_ulong buf, target_ulong len) + int fd, vaddr buf, uint64_t len) { GuestFD *gf = get_guestfd(fd); @@ -856,7 +855,7 @@ void semihost_sys_isatty(CPUState *cs, gdb_syscall_complete_cb complete, int fd) void semihost_sys_flen(CPUState *cs, gdb_syscall_complete_cb fstat_cb, gdb_syscall_complete_cb flen_cb, int fd, - target_ulong fstat_addr) + vaddr fstat_addr) { GuestFD *gf = get_guestfd(fd); @@ -881,7 +880,7 @@ void semihost_sys_flen(CPUState *cs, gdb_syscall_complete_cb fstat_cb, } void semihost_sys_fstat(CPUState *cs, gdb_syscall_complete_cb complete, - int fd, target_ulong addr) + int fd, vaddr addr) { GuestFD *gf = get_guestfd(fd); @@ -906,8 +905,8 @@ void semihost_sys_fstat(CPUState *cs, gdb_syscall_complete_cb complete, } void semihost_sys_stat(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len, - target_ulong addr) + vaddr fname, uint64_t fname_len, + vaddr addr) { if (use_gdb_syscalls()) { gdb_stat(cs, complete, fname, fname_len, addr); @@ -917,7 +916,7 @@ void semihost_sys_stat(CPUState *cs, gdb_syscall_complete_cb complete, } void semihost_sys_remove(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong fname, target_ulong fname_len) + vaddr fname, uint64_t fname_len) { if (use_gdb_syscalls()) { gdb_remove(cs, complete, fname, fname_len); @@ -927,8 +926,8 @@ void semihost_sys_remove(CPUState *cs, gdb_syscall_complete_cb complete, } void semihost_sys_rename(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong oname, target_ulong oname_len, - target_ulong nname, target_ulong nname_len) + vaddr oname, uint64_t oname_len, + vaddr nname, uint64_t nname_len) { if (use_gdb_syscalls()) { gdb_rename(cs, complete, oname, oname_len, nname, nname_len); @@ -938,7 +937,7 @@ void semihost_sys_rename(CPUState *cs, gdb_syscall_complete_cb complete, } void semihost_sys_system(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong cmd, target_ulong cmd_len) + vaddr cmd, uint64_t cmd_len) { if (use_gdb_syscalls()) { gdb_system(cs, complete, cmd, cmd_len); @@ -948,7 +947,7 @@ void semihost_sys_system(CPUState *cs, gdb_syscall_complete_cb complete, } void semihost_sys_gettimeofday(CPUState *cs, gdb_syscall_complete_cb complete, - target_ulong tv_addr, target_ulong tz_addr) + vaddr tv_addr, vaddr tz_addr) { if (use_gdb_syscalls()) { gdb_gettimeofday(cs, complete, tv_addr, tz_addr); diff --git a/stubs/cpu-destroy-address-spaces.c b/stubs/cpu-destroy-address-spaces.c new file mode 100644 index 0000000000000..dc6813f5bd128 --- /dev/null +++ b/stubs/cpu-destroy-address-spaces.c @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "exec/cpu-common.h" + +/* + * user-mode CPUs never create address spaces with + * cpu_address_space_init(), so the cleanup function doesn't + * need to do anything. We need this stub because cpu-common.c + * is built-once so it can't #ifndef CONFIG_USER around the + * call; the real function is in physmem.c which is system-only. + */ +void cpu_destroy_address_spaces(CPUState *cpu) +{ +} diff --git a/stubs/meson.build b/stubs/meson.build index cef046e6854dd..5d577467bfddf 100644 --- a/stubs/meson.build +++ b/stubs/meson.build @@ -55,6 +55,7 @@ endif if have_user # Symbols that are used by hw/core. stub_ss.add(files('cpu-synchronize-state.c')) + stub_ss.add(files('cpu-destroy-address-spaces.c')) # Stubs for QAPI events. Those can always be included in the build, but # they are not built at all for --disable-system builds. diff --git a/subprojects/.gitignore b/subprojects/.gitignore index f4281934ce11b..c00c847837244 100644 --- a/subprojects/.gitignore +++ b/subprojects/.gitignore @@ -6,17 +6,22 @@ /keycodemapdb /libvfio-user /slirp -/anyhow-1.0.98 -/arbitrary-int-1.2.7 -/bilge-0.2.0 -/bilge-impl-0.2.0 -/either-1.12.0 -/foreign-0.3.1 -/itertools-0.11.0 -/libc-0.2.162 -/proc-macro-error-1.0.4 -/proc-macro-error-attr-1.0.4 -/proc-macro2-1.0.84 -/quote-1.0.36 -/syn-2.0.66 -/unicode-ident-1.0.12 +/anyhow-* +/arbitrary-int-* +/attrs-* +/bilge-* +/bilge-impl-* +/either-* +/foreign-* +/glib-sys-* +/itertools-* +/libc-* +/proc-macro-error-* +/proc-macro-error-attr-* +/proc-macro* +/quote-* +/syn-* +/unicode-ident-* + +# Workaround for Meson v1.9.0 https://github.com/mesonbuild/meson/issues/14948 +/.wraplock diff --git a/subprojects/attrs-0.2-rs.wrap b/subprojects/attrs-0.2-rs.wrap new file mode 100644 index 0000000000000..cd43c91d63ec7 --- /dev/null +++ b/subprojects/attrs-0.2-rs.wrap @@ -0,0 +1,7 @@ +[wrap-file] +directory = attrs-0.2.9 +source_url = https://crates.io/api/v1/crates/attrs/0.2.9/download +source_filename = attrs-0.2.9.tar.gz +source_hash = 2a207d40f43de65285f3de0509bb6cb16bc46098864fce957122bbacce327e5f +#method = cargo +patch_directory = attrs-0.2-rs diff --git a/subprojects/glib-sys-0.21-rs.wrap b/subprojects/glib-sys-0.21-rs.wrap new file mode 100644 index 0000000000000..313ced731ac86 --- /dev/null +++ b/subprojects/glib-sys-0.21-rs.wrap @@ -0,0 +1,7 @@ +[wrap-file] +directory = glib-sys-0.21.2 +source_url = https://crates.io/api/v1/crates/glib-sys/0.21.2/download +source_filename = glib-sys-0.21.2.tar.gz +source_hash = d09d3d0fddf7239521674e57b0465dfbd844632fec54f059f7f56112e3f927e1 +#method = cargo +patch_directory = glib-sys-0.21-rs diff --git a/subprojects/packagefiles/attrs-0.2-rs/meson.build b/subprojects/packagefiles/attrs-0.2-rs/meson.build new file mode 100644 index 0000000000000..ee575476cb2cd --- /dev/null +++ b/subprojects/packagefiles/attrs-0.2-rs/meson.build @@ -0,0 +1,33 @@ +project('attrs-0.2-rs', 'rust', + meson_version: '>=1.5.0', + version: '0.2.9', + license: 'MIT OR Apache-2.0', + default_options: []) + +subproject('proc-macro2-1-rs', required: true) +subproject('syn-2-rs', required: true) + +proc_macro2_dep = dependency('proc-macro2-1-rs', native: true) +syn_dep = dependency('syn-2-rs', native: true) + +_attrs_rs = static_library( + 'attrs', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + ], + dependencies: [ + proc_macro2_dep, + syn_dep, + ], + native: true, +) + +attrs_dep = declare_dependency( + link_with: _attrs_rs, +) + +meson.override_dependency('attrs-0.2-rs', attrs_dep, native: true) diff --git a/subprojects/packagefiles/glib-sys-0.21-rs/meson.build b/subprojects/packagefiles/glib-sys-0.21-rs/meson.build new file mode 100644 index 0000000000000..8c5483311ed98 --- /dev/null +++ b/subprojects/packagefiles/glib-sys-0.21-rs/meson.build @@ -0,0 +1,33 @@ +project('glib-sys-0.21-rs', 'rust', + meson_version: '>=1.5.0', + version: '0.21.2', + license: 'MIT', + default_options: []) + +subproject('libc-0.2-rs', required: true) +libc_rs = dependency('libc-0.2-rs') + +_glib_sys_rs = static_library( + 'glib_sys', + files('src/lib.rs'), + gnu_symbol_visibility: 'hidden', + override_options: ['rust_std=2021', 'build.rust_std=2021'], + rust_abi: 'rust', + rust_args: [ + '--cap-lints', 'allow', + '--cfg', 'feature="v2_66"', + '--cfg', 'feature="v2_64"', + '--cfg', 'feature="v2_62"', + '--cfg', 'feature="v2_60"', + '--cfg', 'feature="v2_58"', + ], + # should also link with glib; don't bother doing it here since all + # QEMU targets have it + dependencies: [libc_rs], +) + +glib_sys_dep = declare_dependency( + link_with: _glib_sys_rs, +) + +meson.override_dependency('glib-sys-0.21-rs', glib_sys_dep) diff --git a/subprojects/packagefiles/proc-macro2-1-rs/meson.build b/subprojects/packagefiles/proc-macro2-1-rs/meson.build index 5759df3ecc9b0..ba7de07029271 100644 --- a/subprojects/packagefiles/proc-macro2-1-rs/meson.build +++ b/subprojects/packagefiles/proc-macro2-1-rs/meson.build @@ -1,6 +1,6 @@ project('proc-macro2-1-rs', 'rust', meson_version: '>=1.5.0', - version: '1.0.84', + version: '1.0.95', license: 'MIT OR Apache-2.0', default_options: []) diff --git a/subprojects/packagefiles/syn-2-rs/meson.build b/subprojects/packagefiles/syn-2-rs/meson.build index a0094174084ec..3e6dc318a9c7b 100644 --- a/subprojects/packagefiles/syn-2-rs/meson.build +++ b/subprojects/packagefiles/syn-2-rs/meson.build @@ -1,6 +1,6 @@ project('syn-2-rs', 'rust', meson_version: '>=1.5.0', - version: '2.0.66', + version: '2.0.104', license: 'MIT OR Apache-2.0', default_options: []) diff --git a/subprojects/proc-macro2-1-rs.wrap b/subprojects/proc-macro2-1-rs.wrap index 6c9369f0df3f3..0f06cd8e111f8 100644 --- a/subprojects/proc-macro2-1-rs.wrap +++ b/subprojects/proc-macro2-1-rs.wrap @@ -1,8 +1,8 @@ [wrap-file] -directory = proc-macro2-1.0.84 -source_url = https://crates.io/api/v1/crates/proc-macro2/1.0.84/download -source_filename = proc-macro2-1.0.84.0.tar.gz -source_hash = ec96c6a92621310b51366f1e28d05ef11489516e93be030060e5fc12024a49d6 +directory = proc-macro2-1.0.95 +source_url = https://crates.io/api/v1/crates/proc-macro2/1.0.95/download +source_filename = proc-macro2-1.0.95.0.tar.gz +source_hash = 02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778 #method = cargo patch_directory = proc-macro2-1-rs diff --git a/subprojects/syn-2-rs.wrap b/subprojects/syn-2-rs.wrap index d79cf750fb492..1e5e9d9fb6e32 100644 --- a/subprojects/syn-2-rs.wrap +++ b/subprojects/syn-2-rs.wrap @@ -1,8 +1,8 @@ [wrap-file] -directory = syn-2.0.66 -source_url = https://crates.io/api/v1/crates/syn/2.0.66/download -source_filename = syn-2.0.66.0.tar.gz -source_hash = c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5 +directory = syn-2.0.104 +source_url = https://crates.io/api/v1/crates/syn/2.0.104/download +source_filename = syn-2.0.104.0.tar.gz +source_hash = 17b6f705963418cdb9927482fa304bc562ece2fdd4f616084c50b7023b435a40 #method = cargo patch_directory = syn-2-rs diff --git a/system/cpu-timers.c b/system/cpu-timers.c index cb35fa62b8a41..9919b46230f1c 100644 --- a/system/cpu-timers.c +++ b/system/cpu-timers.c @@ -246,14 +246,14 @@ void qemu_timer_notify_cb(void *opaque, QEMUClockType type) if (qemu_in_vcpu_thread()) { /* - * A CPU is currently running; kick it back out to the + * A CPU is currently running; send it out of the * tcg_cpu_exec() loop so it will recalculate its * icount deadline immediately. */ - qemu_cpu_kick(current_cpu); + cpu_exit(current_cpu); } else if (first_cpu) { /* - * qemu_cpu_kick is not enough to kick a halted CPU out of + * cpu_exit() is not enough to kick a halted CPU out of * qemu_tcg_wait_io_event. async_run_on_cpu, instead, * causes cpu_thread_is_idle to return false. This way, * handle_icount_deadline can run. diff --git a/system/cpus.c b/system/cpus.c index 256723558d0c8..aa7bfcf56e5c5 100644 --- a/system/cpus.c +++ b/system/cpus.c @@ -254,9 +254,15 @@ int64_t cpus_get_elapsed_ticks(void) return cpu_get_ticks(); } +void cpu_set_interrupt(CPUState *cpu, int mask) +{ + /* Pairs with cpu_test_interrupt(). */ + qatomic_or(&cpu->interrupt_request, mask); +} + void generic_handle_interrupt(CPUState *cpu, int mask) { - cpu->interrupt_request |= mask; + cpu_set_interrupt(cpu, mask); if (!qemu_cpu_is_self(cpu)) { qemu_cpu_kick(cpu); @@ -444,7 +450,7 @@ static void qemu_cpu_stop(CPUState *cpu, bool exit) qemu_cond_broadcast(&qemu_pause_cond); } -void qemu_wait_io_event_common(CPUState *cpu) +void qemu_process_cpu_events_common(CPUState *cpu) { qatomic_set_mb(&cpu->thread_kicked, false); if (cpu->stop) { @@ -453,10 +459,11 @@ void qemu_wait_io_event_common(CPUState *cpu) process_queued_cpu_work(cpu); } -void qemu_wait_io_event(CPUState *cpu) +void qemu_process_cpu_events(CPUState *cpu) { bool slept = false; + qatomic_set(&cpu->exit_request, false); while (cpu_thread_is_idle(cpu)) { if (!slept) { slept = true; @@ -468,7 +475,7 @@ void qemu_wait_io_event(CPUState *cpu) qemu_plugin_vcpu_resume_cb(cpu); } - qemu_wait_io_event_common(cpu); + qemu_process_cpu_events_common(cpu); } void cpus_kick_thread(CPUState *cpu) @@ -598,7 +605,7 @@ void cpu_pause(CPUState *cpu) qemu_cpu_stop(cpu, true); } else { cpu->stop = true; - qemu_cpu_kick(cpu); + cpu_exit(cpu); } } @@ -638,6 +645,7 @@ void pause_all_vcpus(void) while (!all_vcpus_paused()) { qemu_cond_wait(&qemu_pause_cond, &bql); + /* FIXME: is this needed? */ CPU_FOREACH(cpu) { qemu_cpu_kick(cpu); } @@ -666,7 +674,7 @@ void cpu_remove_sync(CPUState *cpu) { cpu->stop = true; cpu->unplug = true; - qemu_cpu_kick(cpu); + cpu_exit(cpu); bql_unlock(); qemu_thread_join(cpu->thread); bql_lock(); diff --git a/system/globals.c b/system/globals.c index 9640c9511e9aa..98f9876d5d4fc 100644 --- a/system/globals.c +++ b/system/globals.c @@ -52,7 +52,6 @@ bool vga_interface_created; Chardev *parallel_hds[MAX_PARALLEL_PORTS]; QEMUOptionRom option_rom[MAX_OPTION_ROMS]; int nb_option_roms; -int old_param; const char *qemu_name; unsigned int nb_prom_envs; const char *prom_envs[MAX_PROM_ENVS]; diff --git a/system/memory.c b/system/memory.c index 56465479406f4..8b84661ae36c5 100644 --- a/system/memory.c +++ b/system/memory.c @@ -25,6 +25,7 @@ #include "qemu/target-info.h" #include "qom/object.h" #include "trace.h" +#include "system/physmem.h" #include "system/ram_addr.h" #include "system/kvm.h" #include "system/runstate.h" @@ -1796,16 +1797,37 @@ static void memory_region_finalize(Object *obj) { MemoryRegion *mr = MEMORY_REGION(obj); - assert(!mr->container); - - /* We know the region is not visible in any address space (it - * does not have a container and cannot be a root either because - * it has no references, so we can blindly clear mr->enabled. - * memory_region_set_enabled instead could trigger a transaction - * and cause an infinite loop. + /* + * Each memory region (that can be freed) must have an owner, and it + * always has the same lifecycle of its owner. It means when reaching + * here, the memory region's owner's refcount is zero. + * + * Here it is possible that the MR has: + * + * (1) mr->container set, which means this MR is a subregion of a + * container MR. In this case they must share the same owner as the + * container (otherwise the container should have kept a refcount + * of this MR's owner). + * + * (2) mr->subregions non-empty, which means this MR is a container of + * one or more other MRs (which might have the the owner as this + * MR, or a different owner). + * + * We know the MR, or any MR that is attached to this one as either + * container or children, is not visible in any address space, because + * otherwise the address space should have taken at least one refcount + * of this MR's owner. So we can blindly clear mr->enabled. + * + * memory_region_set_enabled instead could trigger a transaction and + * cause an infinite loop. */ mr->enabled = false; memory_region_transaction_begin(); + if (mr->container) { + /* Must share the owner; see above comments */ + assert(mr->container->owner == mr->owner); + memory_region_del_subregion(mr->container, mr); + } while (!QTAILQ_EMPTY(&mr->subregions)) { MemoryRegion *subregion = QTAILQ_FIRST(&mr->subregions); memory_region_del_subregion(mr, subregion); @@ -2023,13 +2045,9 @@ void memory_region_notify_iommu_one(IOMMUNotifier *notifier, return; } - if (notifier->notifier_flags & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { - /* Crop (iova, addr_mask) to range */ - tmp.iova = MAX(tmp.iova, notifier->start); - tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova; - } else { - assert(entry->iova >= notifier->start && entry_end <= notifier->end); - } + /* Crop (iova, addr_mask) to range */ + tmp.iova = MAX(tmp.iova, notifier->start); + tmp.addr_mask = MIN(entry_end, notifier->end) - tmp.iova; if (event->type & notifier->notifier_flags) { notifier->notify(notifier, &tmp); @@ -2254,7 +2272,7 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size) { assert(mr->ram_block); - cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, + physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, size, memory_region_get_dirty_log_mask(mr)); } @@ -2358,7 +2376,7 @@ DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snapshot; assert(mr->ram_block); memory_region_sync_dirty_bitmap(mr, false); - snapshot = cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); + snapshot = physical_memory_snapshot_and_clear_dirty(mr, addr, size, client); memory_global_after_dirty_log_sync(); return snapshot; } @@ -2367,7 +2385,7 @@ bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *sna hwaddr addr, hwaddr size) { assert(mr->ram_block); - return cpu_physical_memory_snapshot_get_dirty(snap, + return physical_memory_snapshot_get_dirty(snap, memory_region_get_ram_addr(mr) + addr, size); } @@ -2405,7 +2423,7 @@ void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr, hwaddr size, unsigned client) { assert(mr->ram_block); - cpu_physical_memory_test_and_clear_dirty( + physical_memory_test_and_clear_dirty( memory_region_get_ram_addr(mr) + addr, size, client); } @@ -2546,6 +2564,21 @@ void memory_region_clear_flush_coalesced(MemoryRegion *mr) } } +void memory_region_enable_lockless_io(MemoryRegion *mr) +{ + mr->lockless_io = true; + /* + * reentrancy_guard has per device scope, that when enabled + * will effectively prevent concurrent access to device's IO + * MemoryRegion(s) by not calling accessor callback. + * + * Turn it off for lock-less IO enabled devices, to allow + * concurrent IO. + * TODO: remove this when reentrancy_guard becomes per transaction. + */ + mr->disable_reentrancy_guard = true; +} + void memory_region_add_eventfd(MemoryRegion *mr, hwaddr addr, unsigned size, @@ -2625,7 +2658,10 @@ static void memory_region_update_container_subregions(MemoryRegion *subregion) memory_region_transaction_begin(); - memory_region_ref(subregion); + if (mr->owner != subregion->owner) { + memory_region_ref(subregion); + } + QTAILQ_FOREACH(other, &mr->subregions, subregions_link) { if (subregion->priority >= other->priority) { QTAILQ_INSERT_BEFORE(other, subregion, subregions_link); @@ -2683,7 +2719,11 @@ void memory_region_del_subregion(MemoryRegion *mr, assert(alias->mapped_via_alias >= 0); } QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link); - memory_region_unref(subregion); + + if (mr->owner != subregion->owner) { + memory_region_unref(subregion); + } + memory_region_update_pending |= mr->enabled && subregion->enabled; memory_region_transaction_commit(); } @@ -3235,7 +3275,14 @@ static void do_address_space_destroy(AddressSpace *as) memory_region_unref(as->root); } -void address_space_destroy(AddressSpace *as) +static void do_address_space_destroy_free(AddressSpace *as) +{ + do_address_space_destroy(as); + g_free(as); +} + +/* Detach address space from global view, notify all listeners */ +static void address_space_detach(AddressSpace *as) { MemoryRegion *root = as->root; @@ -3250,9 +3297,20 @@ void address_space_destroy(AddressSpace *as) * values to expire before freeing the data. */ as->root = root; +} + +void address_space_destroy(AddressSpace *as) +{ + address_space_detach(as); call_rcu(as, do_address_space_destroy, rcu); } +void address_space_destroy_free(AddressSpace *as) +{ + address_space_detach(as); + call_rcu(as, do_address_space_destroy_free, rcu); +} + static const char *memory_region_type(MemoryRegion *mr) { if (mr->alias) { diff --git a/system/memory_ldst.c.inc b/system/memory_ldst.c.inc index 7f32d3d9ff39d..333da209d1abc 100644 --- a/system/memory_ldst.c.inc +++ b/system/memory_ldst.c.inc @@ -287,7 +287,7 @@ void glue(address_space_stl_notdirty, SUFFIX)(ARG1_DECL, dirty_log_mask = memory_region_get_dirty_log_mask(mr); dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); - cpu_physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, + physical_memory_set_dirty_range(memory_region_get_ram_addr(mr) + addr, 4, dirty_log_mask); r = MEMTX_OK; } diff --git a/system/physmem.c b/system/physmem.c index 130c148ffb5c1..a340ca3e61663 100644 --- a/system/physmem.c +++ b/system/physmem.c @@ -43,6 +43,8 @@ #include "system/kvm.h" #include "system/tcg.h" #include "system/qtest.h" +#include "system/physmem.h" +#include "system/ramblock.h" #include "qemu/timer.h" #include "qemu/config-file.h" #include "qemu/error-report.h" @@ -165,13 +167,11 @@ static bool ram_is_cpr_compatible(RAMBlock *rb); * CPUAddressSpace: all the information a CPU needs about an AddressSpace * @cpu: the CPU whose AddressSpace this is * @as: the AddressSpace itself - * @memory_dispatch: its dispatch pointer (cached, RCU protected) * @tcg_as_listener: listener for tracking changes to the AddressSpace */ typedef struct CPUAddressSpace { CPUState *cpu; AddressSpace *as; - struct AddressSpaceDispatch *memory_dispatch; MemoryListener tcg_as_listener; } CPUAddressSpace; @@ -692,7 +692,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, IOMMUTLBEntry iotlb; int iommu_idx; hwaddr addr = orig_addr; - AddressSpaceDispatch *d = cpu->cpu_ases[asidx].memory_dispatch; + AddressSpaceDispatch *d = address_space_to_dispatch(cpu->cpu_ases[asidx].as); for (;;) { section = address_space_translate_internal(d, addr, &addr, plen, false); @@ -753,7 +753,7 @@ MemoryRegionSection *iotlb_to_section(CPUState *cpu, { int asidx = cpu_asidx_from_attrs(cpu, attrs); CPUAddressSpace *cpuas = &cpu->cpu_ases[asidx]; - AddressSpaceDispatch *d = cpuas->memory_dispatch; + AddressSpaceDispatch *d = address_space_to_dispatch(cpuas->as); int section_index = index & ~TARGET_PAGE_MASK; MemoryRegionSection *ret; @@ -795,12 +795,8 @@ void cpu_address_space_init(CPUState *cpu, int asidx, cpu->as = as; } - /* KVM cannot currently support multiple address spaces. */ - assert(asidx == 0 || !kvm_enabled()); - if (!cpu->cpu_ases) { cpu->cpu_ases = g_new0(CPUAddressSpace, cpu->num_ases); - cpu->cpu_ases_count = cpu->num_ases; } newas = &cpu->cpu_ases[asidx]; @@ -814,32 +810,29 @@ void cpu_address_space_init(CPUState *cpu, int asidx, } } -void cpu_address_space_destroy(CPUState *cpu, int asidx) +void cpu_destroy_address_spaces(CPUState *cpu) { CPUAddressSpace *cpuas; + int asidx; assert(cpu->cpu_ases); - assert(asidx >= 0 && asidx < cpu->num_ases); - /* KVM cannot currently support multiple address spaces. */ - assert(asidx == 0 || !kvm_enabled()); - - cpuas = &cpu->cpu_ases[asidx]; - if (tcg_enabled()) { - memory_listener_unregister(&cpuas->tcg_as_listener); - } - address_space_destroy(cpuas->as); - g_free_rcu(cpuas->as, rcu); + /* convenience alias just points to some cpu_ases[n] */ + cpu->as = NULL; - if (asidx == 0) { - /* reset the convenience alias for address space 0 */ - cpu->as = NULL; + for (asidx = 0; asidx < cpu->num_ases; asidx++) { + cpuas = &cpu->cpu_ases[asidx]; + if (!cpuas->as) { + /* This index was never initialized; no deinit needed */ + continue; + } + if (tcg_enabled()) { + memory_listener_unregister(&cpuas->tcg_as_listener); + } + g_clear_pointer(&cpuas->as, address_space_destroy_free); } - if (--cpu->cpu_ases_count == 0) { - g_free(cpu->cpu_ases); - cpu->cpu_ases = NULL; - } + g_clear_pointer(&cpu->cpu_ases, g_free); } AddressSpace *cpu_get_address_space(CPUState *cpu, int asidx) @@ -907,8 +900,197 @@ void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length) } } +void physical_memory_dirty_bits_cleared(ram_addr_t start, ram_addr_t length) +{ + if (tcg_enabled()) { + tlb_reset_dirty_range_all(start, length); + } +} + +static bool physical_memory_get_dirty(ram_addr_t start, ram_addr_t length, + unsigned client) +{ + DirtyMemoryBlocks *blocks; + unsigned long end, page; + unsigned long idx, offset, base; + bool dirty = false; + + assert(client < DIRTY_MEMORY_NUM); + + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page = start >> TARGET_PAGE_BITS; + + WITH_RCU_READ_LOCK_GUARD() { + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); + + idx = page / DIRTY_MEMORY_BLOCK_SIZE; + offset = page % DIRTY_MEMORY_BLOCK_SIZE; + base = page - offset; + while (page < end) { + unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); + unsigned long num = next - base; + unsigned long found = find_next_bit(blocks->blocks[idx], + num, offset); + if (found < num) { + dirty = true; + break; + } + + page = next; + idx++; + offset = 0; + base += DIRTY_MEMORY_BLOCK_SIZE; + } + } + + return dirty; +} + +bool physical_memory_get_dirty_flag(ram_addr_t addr, unsigned client) +{ + return physical_memory_get_dirty(addr, 1, client); +} + +bool physical_memory_is_clean(ram_addr_t addr) +{ + bool vga = physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_VGA); + bool code = physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_CODE); + bool migration = + physical_memory_get_dirty_flag(addr, DIRTY_MEMORY_MIGRATION); + return !(vga && code && migration); +} + +static bool physical_memory_all_dirty(ram_addr_t start, ram_addr_t length, + unsigned client) +{ + DirtyMemoryBlocks *blocks; + unsigned long end, page; + unsigned long idx, offset, base; + bool dirty = true; + + assert(client < DIRTY_MEMORY_NUM); + + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page = start >> TARGET_PAGE_BITS; + + RCU_READ_LOCK_GUARD(); + + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); + + idx = page / DIRTY_MEMORY_BLOCK_SIZE; + offset = page % DIRTY_MEMORY_BLOCK_SIZE; + base = page - offset; + while (page < end) { + unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); + unsigned long num = next - base; + unsigned long found = find_next_zero_bit(blocks->blocks[idx], + num, offset); + if (found < num) { + dirty = false; + break; + } + + page = next; + idx++; + offset = 0; + base += DIRTY_MEMORY_BLOCK_SIZE; + } + + return dirty; +} + +uint8_t physical_memory_range_includes_clean(ram_addr_t start, + ram_addr_t length, + uint8_t mask) +{ + uint8_t ret = 0; + + if (mask & (1 << DIRTY_MEMORY_VGA) && + !physical_memory_all_dirty(start, length, DIRTY_MEMORY_VGA)) { + ret |= (1 << DIRTY_MEMORY_VGA); + } + if (mask & (1 << DIRTY_MEMORY_CODE) && + !physical_memory_all_dirty(start, length, DIRTY_MEMORY_CODE)) { + ret |= (1 << DIRTY_MEMORY_CODE); + } + if (mask & (1 << DIRTY_MEMORY_MIGRATION) && + !physical_memory_all_dirty(start, length, DIRTY_MEMORY_MIGRATION)) { + ret |= (1 << DIRTY_MEMORY_MIGRATION); + } + return ret; +} + +void physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client) +{ + unsigned long page, idx, offset; + DirtyMemoryBlocks *blocks; + + assert(client < DIRTY_MEMORY_NUM); + + page = addr >> TARGET_PAGE_BITS; + idx = page / DIRTY_MEMORY_BLOCK_SIZE; + offset = page % DIRTY_MEMORY_BLOCK_SIZE; + + RCU_READ_LOCK_GUARD(); + + blocks = qatomic_rcu_read(&ram_list.dirty_memory[client]); + + set_bit_atomic(offset, blocks->blocks[idx]); +} + +void physical_memory_set_dirty_range(ram_addr_t start, ram_addr_t length, + uint8_t mask) +{ + DirtyMemoryBlocks *blocks[DIRTY_MEMORY_NUM]; + unsigned long end, page; + unsigned long idx, offset, base; + int i; + + if (!mask && !xen_enabled()) { + return; + } + + end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; + page = start >> TARGET_PAGE_BITS; + + WITH_RCU_READ_LOCK_GUARD() { + for (i = 0; i < DIRTY_MEMORY_NUM; i++) { + blocks[i] = qatomic_rcu_read(&ram_list.dirty_memory[i]); + } + + idx = page / DIRTY_MEMORY_BLOCK_SIZE; + offset = page % DIRTY_MEMORY_BLOCK_SIZE; + base = page - offset; + while (page < end) { + unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); + + if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) { + bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx], + offset, next - page); + } + if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) { + bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx], + offset, next - page); + } + if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) { + bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx], + offset, next - page); + } + + page = next; + idx++; + offset = 0; + base += DIRTY_MEMORY_BLOCK_SIZE; + } + } + + if (xen_enabled()) { + xen_hvm_modified_memory(start, length); + } +} + /* Note: start and end must be within the same ram block. */ -bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, +bool physical_memory_test_and_clear_dirty(ram_addr_t start, ram_addr_t length, unsigned client) { @@ -950,13 +1132,20 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start, } if (dirty) { - cpu_physical_memory_dirty_bits_cleared(start, length); + physical_memory_dirty_bits_cleared(start, length); } return dirty; } -DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty +static void physical_memory_clear_dirty_range(ram_addr_t addr, ram_addr_t length) +{ + physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_MIGRATION); + physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_VGA); + physical_memory_test_and_clear_dirty(addr, length, DIRTY_MEMORY_CODE); +} + +DirtyBitmapSnapshot *physical_memory_snapshot_and_clear_dirty (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client) { DirtyMemoryBlocks *blocks; @@ -1003,14 +1192,14 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty } } - cpu_physical_memory_dirty_bits_cleared(start, length); + physical_memory_dirty_bits_cleared(start, length); memory_region_clear_dirty_bitmap(mr, offset, length); return snap; } -bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, +bool physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, ram_addr_t start, ram_addr_t length) { @@ -1031,6 +1220,109 @@ bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap, return false; } +uint64_t physical_memory_set_dirty_lebitmap(unsigned long *bitmap, + ram_addr_t start, + ram_addr_t pages) +{ + unsigned long i, j; + unsigned long page_number, c, nbits; + hwaddr addr; + ram_addr_t ram_addr; + uint64_t num_dirty = 0; + unsigned long len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; + unsigned long hpratio = qemu_real_host_page_size() / TARGET_PAGE_SIZE; + unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); + + /* start address is aligned at the start of a word? */ + if ((((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) && + (hpratio == 1)) { + unsigned long **blocks[DIRTY_MEMORY_NUM]; + unsigned long idx; + unsigned long offset; + long k; + long nr = BITS_TO_LONGS(pages); + + idx = (start >> TARGET_PAGE_BITS) / DIRTY_MEMORY_BLOCK_SIZE; + offset = BIT_WORD((start >> TARGET_PAGE_BITS) % + DIRTY_MEMORY_BLOCK_SIZE); + + WITH_RCU_READ_LOCK_GUARD() { + for (i = 0; i < DIRTY_MEMORY_NUM; i++) { + blocks[i] = + qatomic_rcu_read(&ram_list.dirty_memory[i])->blocks; + } + + for (k = 0; k < nr; k++) { + if (bitmap[k]) { + unsigned long temp = leul_to_cpu(bitmap[k]); + + nbits = ctpopl(temp); + qatomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); + + if (global_dirty_tracking) { + qatomic_or( + &blocks[DIRTY_MEMORY_MIGRATION][idx][offset], + temp); + if (unlikely( + global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { + total_dirty_pages += nbits; + } + } + + num_dirty += nbits; + + if (tcg_enabled()) { + qatomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], + temp); + } + } + + if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { + offset = 0; + idx++; + } + } + } + + if (xen_enabled()) { + xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS); + } + } else { + uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL + : DIRTY_CLIENTS_NOCODE; + + if (!global_dirty_tracking) { + clients &= ~(1 << DIRTY_MEMORY_MIGRATION); + } + + /* + * bitmap-traveling is faster than memory-traveling (for addr...) + * especially when most of the memory is not dirty. + */ + for (i = 0; i < len; i++) { + if (bitmap[i] != 0) { + c = leul_to_cpu(bitmap[i]); + nbits = ctpopl(c); + if (unlikely(global_dirty_tracking & GLOBAL_DIRTY_DIRTY_RATE)) { + total_dirty_pages += nbits; + } + num_dirty += nbits; + do { + j = ctzl(c); + c &= ~(1ul << j); + page_number = (i * HOST_LONG_BITS + j) * hpratio; + addr = page_number * TARGET_PAGE_SIZE; + ram_addr = start + addr; + physical_memory_set_dirty_range(ram_addr, + TARGET_PAGE_SIZE * hpratio, clients); + } while (c != 0); + } + } + } + + return num_dirty; +} + static int subpage_register(subpage_t *mmio, uint32_t start, uint32_t end, uint16_t section); static subpage_t *subpage_init(FlatView *fv, hwaddr base); @@ -1787,9 +2079,9 @@ int qemu_ram_resize(RAMBlock *block, ram_addr_t newsize, Error **errp) ram_block_notify_resize(block->host, oldsize, newsize); } - cpu_physical_memory_clear_dirty_range(block->offset, block->used_length); + physical_memory_clear_dirty_range(block->offset, block->used_length); block->used_length = newsize; - cpu_physical_memory_set_dirty_range(block->offset, block->used_length, + physical_memory_set_dirty_range(block->offset, block->used_length, DIRTY_CLIENTS_ALL); memory_region_set_size(block->mr, unaligned_size); if (block->resized) { @@ -1811,7 +2103,7 @@ void qemu_ram_msync(RAMBlock *block, ram_addr_t start, ram_addr_t length) #ifdef CONFIG_LIBPMEM /* The lack of support for pmem should not block the sync */ - if (ramblock_is_pmem(block)) { + if (ram_block_is_pmem(block)) { void *addr = ramblock_ptr(block, start); pmem_persist(addr, length); return; @@ -1994,7 +2286,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp) ram_list.version++; qemu_mutex_unlock_ramlist(); - cpu_physical_memory_set_dirty_range(new_block->offset, + physical_memory_set_dirty_range(new_block->offset, new_block->used_length, DIRTY_CLIENTS_ALL); @@ -2780,9 +3072,6 @@ static void tcg_log_global_after_sync(MemoryListener *listener) static void tcg_commit_cpu(CPUState *cpu, run_on_cpu_data data) { - CPUAddressSpace *cpuas = data.host_ptr; - - cpuas->memory_dispatch = address_space_to_dispatch(cpuas->as); tlb_flush(cpu); } @@ -2798,11 +3087,7 @@ static void tcg_commit(MemoryListener *listener) cpu = cpuas->cpu; /* - * Defer changes to as->memory_dispatch until the cpu is quiescent. - * Otherwise we race between (1) other cpu threads and (2) ongoing - * i/o for the current cpu thread, with data cached by mmu_lookup(). - * - * In addition, queueing the work function will kick the cpu back to + * Queueing the work function will kick the cpu back to * the main loop, which will end the RCU critical section and reclaim * the memory data structures. * @@ -2850,19 +3135,19 @@ static void invalidate_and_set_dirty(MemoryRegion *mr, hwaddr addr, addr += ramaddr; /* No early return if dirty_log_mask is or becomes 0, because - * cpu_physical_memory_set_dirty_range will still call + * physical_memory_set_dirty_range will still call * xen_modified_memory. */ if (dirty_log_mask) { dirty_log_mask = - cpu_physical_memory_range_includes_clean(addr, length, dirty_log_mask); + physical_memory_range_includes_clean(addr, length, dirty_log_mask); } if (dirty_log_mask & (1 << DIRTY_MEMORY_CODE)) { assert(tcg_enabled()); tb_invalidate_phys_range(NULL, addr, addr + length - 1); dirty_log_mask &= ~(1 << DIRTY_MEMORY_CODE); } - cpu_physical_memory_set_dirty_range(addr, length, dirty_log_mask); + physical_memory_set_dirty_range(addr, length, dirty_log_mask); } void memory_region_flush_rom_device(MemoryRegion *mr, hwaddr addr, hwaddr size) @@ -2909,7 +3194,7 @@ bool prepare_mmio_access(MemoryRegion *mr) { bool release_lock = false; - if (!bql_locked()) { + if (!bql_locked() && !mr->lockless_io) { bql_lock(); release_lock = true; } @@ -3036,7 +3321,7 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, l = len; mr = flatview_translate(fv, addr, &mr_addr, &l, true, attrs); - if (!flatview_access_allowed(mr, attrs, addr, len)) { + if (!flatview_access_allowed(mr, attrs, mr_addr, l)) { return MEMTX_ACCESS_ERROR; } return flatview_write_continue(fv, addr, attrs, buf, len, @@ -3127,7 +3412,7 @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr, l = len; mr = flatview_translate(fv, addr, &mr_addr, &l, false, attrs); - if (!flatview_access_allowed(mr, attrs, addr, len)) { + if (!flatview_access_allowed(mr, attrs, mr_addr, l)) { return MEMTX_ACCESS_ERROR; } return flatview_read_continue(fv, addr, attrs, buf, len, @@ -3194,68 +3479,45 @@ MemTxResult address_space_set(AddressSpace *as, hwaddr addr, return error; } -void cpu_physical_memory_rw(hwaddr addr, void *buf, - hwaddr len, bool is_write) +void cpu_physical_memory_read(hwaddr addr, void *buf, hwaddr len) { - address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED, - buf, len, is_write); + address_space_read(&address_space_memory, addr, + MEMTXATTRS_UNSPECIFIED, buf, len); } -enum write_rom_type { - WRITE_DATA, - FLUSH_CACHE, -}; - -static inline MemTxResult address_space_write_rom_internal(AddressSpace *as, - hwaddr addr, - MemTxAttrs attrs, - const void *ptr, - hwaddr len, - enum write_rom_type type) +void cpu_physical_memory_write(hwaddr addr, const void *buf, hwaddr len) { - hwaddr l; - uint8_t *ram_ptr; - hwaddr addr1; - MemoryRegion *mr; - const uint8_t *buf = ptr; + address_space_write(&address_space_memory, addr, + MEMTXATTRS_UNSPECIFIED, buf, len); +} +/* used for ROM loading : can write in RAM and ROM */ +MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, + MemTxAttrs attrs, + const void *buf, hwaddr len) +{ RCU_READ_LOCK_GUARD(); while (len > 0) { - l = len; - mr = address_space_translate(as, addr, &addr1, &l, true, attrs); + hwaddr addr1, l = len; + MemoryRegion *mr = address_space_translate(as, addr, &addr1, &l, + true, attrs); if (!memory_region_supports_direct_access(mr)) { l = memory_access_size(mr, l, addr1); } else { /* ROM/RAM case */ - ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); - switch (type) { - case WRITE_DATA: - memcpy(ram_ptr, buf, l); - invalidate_and_set_dirty(mr, addr1, l); - break; - case FLUSH_CACHE: - flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l); - break; - } + void *ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); + memcpy(ram_ptr, buf, l); + invalidate_and_set_dirty(mr, addr1, l); } len -= l; - buf += l; addr += l; + buf += l; } return MEMTX_OK; } -/* used for ROM loading : can write in RAM and ROM */ -MemTxResult address_space_write_rom(AddressSpace *as, hwaddr addr, - MemTxAttrs attrs, - const void *buf, hwaddr len) -{ - return address_space_write_rom_internal(as, addr, attrs, - buf, len, WRITE_DATA); -} - -void cpu_flush_icache_range(hwaddr start, hwaddr len) +void address_space_flush_icache_range(AddressSpace *as, hwaddr addr, hwaddr len) { /* * This function should do the same thing as an icache flush that was @@ -3267,9 +3529,22 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len) return; } - address_space_write_rom_internal(&address_space_memory, - start, MEMTXATTRS_UNSPECIFIED, - NULL, len, FLUSH_CACHE); + RCU_READ_LOCK_GUARD(); + while (len > 0) { + hwaddr addr1, l = len; + MemoryRegion *mr = address_space_translate(as, addr, &addr1, &l, true, + MEMTXATTRS_UNSPECIFIED); + + if (!memory_region_supports_direct_access(mr)) { + l = memory_access_size(mr, l, addr1); + } else { + /* ROM/RAM case */ + void *ram_ptr = qemu_map_ram_ptr(mr->ram_block, addr1); + flush_idcache_range((uintptr_t)ram_ptr, (uintptr_t)ram_ptr, l); + } + len -= l; + addr += l; + } } /* @@ -3385,6 +3660,17 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, return flatview_access_valid(fv, addr, len, is_write, attrs); } +bool address_space_is_io(AddressSpace *as, hwaddr addr) +{ + MemoryRegion *mr; + + RCU_READ_LOCK_GUARD(); + mr = address_space_translate(as, addr, &addr, NULL, false, + MEMTXATTRS_UNSPECIFIED); + + return !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); +} + static hwaddr flatview_extend_translation(FlatView *fv, hwaddr addr, hwaddr target_len, @@ -3779,19 +4065,6 @@ int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, return 0; } -bool cpu_physical_memory_is_io(hwaddr phys_addr) -{ - MemoryRegion*mr; - hwaddr l = 1; - - RCU_READ_LOCK_GUARD(); - mr = address_space_translate(&address_space_memory, - phys_addr, &phys_addr, &l, false, - MEMTXATTRS_UNSPECIFIED); - - return !(memory_region_is_ram(mr) || memory_region_is_romd(mr)); -} - int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) { RAMBlock *block; @@ -3808,18 +4081,18 @@ int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque) } /* - * Unmap pages of memory from start to start+length such that + * Unmap pages of memory from offset to offset+length such that * they a) read as 0, b) Trigger whatever fault mechanism * the OS provides for postcopy. * The pages must be unmapped by the end of the function. * Returns: 0 on success, none-0 on failure * */ -int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) +int ram_block_discard_range(RAMBlock *rb, uint64_t offset, size_t length) { int ret = -1; - uint8_t *host_startaddr = rb->host + start; + uint8_t *host_startaddr = rb->host + offset; if (!QEMU_PTR_IS_ALIGNED(host_startaddr, rb->page_size)) { error_report("%s: Unaligned start address: %p", @@ -3827,7 +4100,7 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) goto err; } - if ((start + length) <= rb->max_length) { + if ((offset + length) <= rb->max_length) { bool need_madvise, need_fallocate; if (!QEMU_IS_ALIGNED(length, rb->page_size)) { error_report("%s: Unaligned length: %zx", __func__, length); @@ -3878,11 +4151,11 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) } ret = fallocate(rb->fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, - start + rb->fd_offset, length); + offset + rb->fd_offset, length); if (ret) { ret = -errno; error_report("%s: Failed to fallocate %s:%" PRIx64 "+%" PRIx64 - " +%zx (%d)", __func__, rb->idstr, start, + " +%zx (%d)", __func__, rb->idstr, offset, rb->fd_offset, length, ret); goto err; } @@ -3890,7 +4163,7 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) ret = -ENOSYS; error_report("%s: fallocate not available/file" "%s:%" PRIx64 "+%" PRIx64 " +%zx (%d)", __func__, - rb->idstr, start, rb->fd_offset, length, ret); + rb->idstr, offset, rb->fd_offset, length, ret); goto err; #endif } @@ -3910,13 +4183,13 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) ret = -errno; error_report("%s: Failed to discard range " "%s:%" PRIx64 " +%zx (%d)", - __func__, rb->idstr, start, length, ret); + __func__, rb->idstr, offset, length, ret); goto err; } #else ret = -ENOSYS; error_report("%s: MADVISE not available %s:%" PRIx64 " +%zx (%d)", - __func__, rb->idstr, start, length, ret); + __func__, rb->idstr, offset, length, ret); goto err; #endif } @@ -3924,14 +4197,14 @@ int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length) need_madvise, need_fallocate, ret); } else { error_report("%s: Overrun block '%s' (%" PRIu64 "/%zx/" RAM_ADDR_FMT")", - __func__, rb->idstr, start, length, rb->max_length); + __func__, rb->idstr, offset, length, rb->max_length); } err: return ret; } -int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, +int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t offset, size_t length) { int ret = -1; @@ -3939,23 +4212,23 @@ int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, #ifdef CONFIG_FALLOCATE_PUNCH_HOLE /* ignore fd_offset with guest_memfd */ ret = fallocate(rb->guest_memfd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, - start, length); + offset, length); if (ret) { ret = -errno; error_report("%s: Failed to fallocate %s:%" PRIx64 " +%zx (%d)", - __func__, rb->idstr, start, length, ret); + __func__, rb->idstr, offset, length, ret); } #else ret = -ENOSYS; error_report("%s: fallocate not available %s:%" PRIx64 " +%zx (%d)", - __func__, rb->idstr, start, length, ret); + __func__, rb->idstr, offset, length, ret); #endif return ret; } -bool ramblock_is_pmem(RAMBlock *rb) +bool ram_block_is_pmem(RAMBlock *rb) { return rb->flags & RAM_PMEM; } diff --git a/system/qdev-monitor.c b/system/qdev-monitor.c index 2ac92d0a076cb..ec4a2394ceb31 100644 --- a/system/qdev-monitor.c +++ b/system/qdev-monitor.c @@ -73,9 +73,9 @@ typedef struct QDevAlias /* Please keep this table sorted by typename. */ static const QDevAlias qdev_alias_table[] = { - { "AC97", "ac97" }, /* -soundhw name */ + { "AC97", "ac97" }, { "e1000", "e1000-82540em" }, - { "ES1370", "es1370" }, /* -soundhw name */ + { "ES1370", "es1370" }, { "ich9-ahci", "ahci" }, { "lsi53c895a", "lsi" }, { "virtio-9p-device", "virtio-9p", QEMU_ARCH_VIRTIO_MMIO }, diff --git a/system/runstate.c b/system/runstate.c index 6178b0091a3e1..32467aa882543 100644 --- a/system/runstate.c +++ b/system/runstate.c @@ -76,9 +76,6 @@ typedef struct { } RunStateTransition; static const RunStateTransition runstate_transitions_def[] = { - { RUN_STATE_PRELAUNCH, RUN_STATE_INMIGRATE }, - { RUN_STATE_PRELAUNCH, RUN_STATE_SUSPENDED }, - { RUN_STATE_DEBUG, RUN_STATE_RUNNING }, { RUN_STATE_DEBUG, RUN_STATE_FINISH_MIGRATE }, { RUN_STATE_DEBUG, RUN_STATE_PRELAUNCH }, @@ -118,6 +115,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_PRELAUNCH, RUN_STATE_RUNNING }, { RUN_STATE_PRELAUNCH, RUN_STATE_FINISH_MIGRATE }, { RUN_STATE_PRELAUNCH, RUN_STATE_INMIGRATE }, + { RUN_STATE_PRELAUNCH, RUN_STATE_SUSPENDED }, { RUN_STATE_FINISH_MIGRATE, RUN_STATE_RUNNING }, { RUN_STATE_FINISH_MIGRATE, RUN_STATE_PAUSED }, diff --git a/system/vl.c b/system/vl.c index 3b7057e6c669f..17bbc092c8777 100644 --- a/system/vl.c +++ b/system/vl.c @@ -1672,7 +1672,8 @@ static MachineClass *select_machine(QDict *qdict, Error **errp) { ERRP_GUARD(); const char *machine_type = qdict_get_try_str(qdict, "type"); - g_autoptr(GSList) machines = object_class_get_list(TYPE_MACHINE, false); + g_autoptr(GSList) machines = object_class_get_list(target_machine_typename(), + false); MachineClass *machine_class = NULL; if (machine_type) { @@ -3077,7 +3078,7 @@ void qemu_init(int argc, char **argv) model = g_strdup(qdict_get_str(dict, "model")); qdict_del(dict, "model"); if (is_help_option(model)) { - show_valid_soundhw(); + audio_print_available_models(); exit(0); } } @@ -3524,10 +3525,6 @@ void qemu_init(int argc, char **argv) prom_envs[nb_prom_envs] = optarg; nb_prom_envs++; break; - case QEMU_OPTION_old_param: - warn_report("-old-param is deprecated"); - old_param = 1; - break; case QEMU_OPTION_rtc: opts = qemu_opts_parse_noisily(qemu_find_opts("rtc"), optarg, false); @@ -3820,7 +3817,7 @@ void qemu_init(int argc, char **argv) migration_object_init(); /* parse features once if machine provides default cpu_type */ - current_machine->cpu_type = machine_class_default_cpu_type(machine_class); + current_machine->cpu_type = machine_default_cpu_type(current_machine); if (cpu_option) { current_machine->cpu_type = parse_cpu_option(cpu_option); } @@ -3841,6 +3838,8 @@ void qemu_init(int argc, char **argv) } qemu_init_displays(); accel_setup_post(current_machine); - os_setup_post(); + if (migrate_mode() != MIG_MODE_CPR_EXEC) { + os_setup_post(); + } resume_mux_open(); } diff --git a/target-info-stub.c b/target-info-stub.c index ca0caa3686c3a..d96d8249c1dd8 100644 --- a/target-info-stub.c +++ b/target-info-stub.c @@ -12,6 +12,10 @@ #include "hw/boards.h" #include "cpu.h" +/* Validate correct placement of CPUArchState. */ +QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0); +QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState)); + static const TargetInfo target_info_stub = { .target_name = TARGET_NAME, .target_arch = SYS_EMU_TARGET__MAX, diff --git a/target/alpha/cpu.c b/target/alpha/cpu.c index bf1787a69ddc8..932cddac055ce 100644 --- a/target/alpha/cpu.c +++ b/target/alpha/cpu.c @@ -86,10 +86,10 @@ static bool alpha_cpu_has_work(CPUState *cs) assume that if a CPU really wants to stay asleep, it will mask interrupts at the chipset level, which will prevent these bits from being set in the first place. */ - return cs->interrupt_request & (CPU_INTERRUPT_HARD - | CPU_INTERRUPT_TIMER - | CPU_INTERRUPT_SMP - | CPU_INTERRUPT_MCHK); + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD + | CPU_INTERRUPT_TIMER + | CPU_INTERRUPT_SMP + | CPU_INTERRUPT_MCHK); } #endif /* !CONFIG_USER_ONLY */ diff --git a/target/alpha/helper.h b/target/alpha/helper.h index d60f208703163..954a5c8294cf5 100644 --- a/target/alpha/helper.h +++ b/target/alpha/helper.h @@ -90,9 +90,9 @@ DEF_HELPER_FLAGS_2(ieee_input_s, TCG_CALL_NO_WG, void, env, i64) #if !defined (CONFIG_USER_ONLY) DEF_HELPER_FLAGS_1(tbia, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(tbis, TCG_CALL_NO_RWG, void, env, i64) -DEF_HELPER_FLAGS_1(tb_flush, TCG_CALL_NO_RWG, void, env) DEF_HELPER_1(halt, void, i64) +DEF_HELPER_1(whami, i64, env) DEF_HELPER_FLAGS_0(get_vmtime, TCG_CALL_NO_RWG, i64) DEF_HELPER_FLAGS_0(get_walltime, TCG_CALL_NO_RWG, i64) diff --git a/target/alpha/machine.c b/target/alpha/machine.c index 5f302b166da61..6828b123ca104 100644 --- a/target/alpha/machine.c +++ b/target/alpha/machine.c @@ -25,8 +25,8 @@ static const VMStateInfo vmstate_fpcr = { }; static const VMStateField vmstate_env_fields[] = { - VMSTATE_UINTTL_ARRAY(ir, CPUAlphaState, 31), - VMSTATE_UINTTL_ARRAY(fir, CPUAlphaState, 31), + VMSTATE_UINT64_ARRAY(ir, CPUAlphaState, 31), + VMSTATE_UINT64_ARRAY(fir, CPUAlphaState, 31), /* Save the architecture value of the fpcr, not the internally expanded version. Since this architecture value does not exist in memory to be stored, this requires a but of hoop @@ -41,27 +41,27 @@ static const VMStateField vmstate_env_fields[] = { .flags = VMS_SINGLE, .offset = 0 }, - VMSTATE_UINTTL(pc, CPUAlphaState), - VMSTATE_UINTTL(unique, CPUAlphaState), - VMSTATE_UINTTL(lock_addr, CPUAlphaState), - VMSTATE_UINTTL(lock_value, CPUAlphaState), + VMSTATE_UINT64(pc, CPUAlphaState), + VMSTATE_UINT64(unique, CPUAlphaState), + VMSTATE_UINT64(lock_addr, CPUAlphaState), + VMSTATE_UINT64(lock_value, CPUAlphaState), VMSTATE_UINT32(flags, CPUAlphaState), VMSTATE_UINT32(pcc_ofs, CPUAlphaState), - VMSTATE_UINTTL(trap_arg0, CPUAlphaState), - VMSTATE_UINTTL(trap_arg1, CPUAlphaState), - VMSTATE_UINTTL(trap_arg2, CPUAlphaState), + VMSTATE_UINT64(trap_arg0, CPUAlphaState), + VMSTATE_UINT64(trap_arg1, CPUAlphaState), + VMSTATE_UINT64(trap_arg2, CPUAlphaState), - VMSTATE_UINTTL(exc_addr, CPUAlphaState), - VMSTATE_UINTTL(palbr, CPUAlphaState), - VMSTATE_UINTTL(ptbr, CPUAlphaState), - VMSTATE_UINTTL(vptptr, CPUAlphaState), - VMSTATE_UINTTL(sysval, CPUAlphaState), - VMSTATE_UINTTL(usp, CPUAlphaState), + VMSTATE_UINT64(exc_addr, CPUAlphaState), + VMSTATE_UINT64(palbr, CPUAlphaState), + VMSTATE_UINT64(ptbr, CPUAlphaState), + VMSTATE_UINT64(vptptr, CPUAlphaState), + VMSTATE_UINT64(sysval, CPUAlphaState), + VMSTATE_UINT64(usp, CPUAlphaState), - VMSTATE_UINTTL_ARRAY(shadow, CPUAlphaState, 8), - VMSTATE_UINTTL_ARRAY(scratch, CPUAlphaState, 24), + VMSTATE_UINT64_ARRAY(shadow, CPUAlphaState, 8), + VMSTATE_UINT64_ARRAY(scratch, CPUAlphaState, 24), VMSTATE_END_OF_LIST() }; diff --git a/target/alpha/sys_helper.c b/target/alpha/sys_helper.c index 51e32544287a8..0e0a619975bd0 100644 --- a/target/alpha/sys_helper.c +++ b/target/alpha/sys_helper.c @@ -20,7 +20,6 @@ #include "qemu/osdep.h" #include "cpu.h" #include "exec/cputlb.h" -#include "exec/tb-flush.h" #include "exec/helper-proto.h" #include "system/runstate.h" #include "system/system.h" @@ -38,11 +37,6 @@ void helper_tbis(CPUAlphaState *env, uint64_t p) tlb_flush_page(env_cpu(env), p); } -void helper_tb_flush(CPUAlphaState *env) -{ - tb_flush(env_cpu(env)); -} - void helper_halt(uint64_t restart) { if (restart) { @@ -73,3 +67,8 @@ void helper_set_alarm(CPUAlphaState *env, uint64_t expire) timer_del(cpu->alarm_timer); } } + +uint64_t HELPER(whami)(CPUAlphaState *env) +{ + return env_cpu(env)->cpu_index; +} diff --git a/target/alpha/translate.c b/target/alpha/translate.c index cebab0318cfff..b1d8a4eb80ae6 100644 --- a/target/alpha/translate.c +++ b/target/alpha/translate.c @@ -48,8 +48,6 @@ struct DisasContext { #ifdef CONFIG_USER_ONLY MemOp unalign; -#else - uint64_t palbr; #endif uint32_t tbflags; int mem_idx; @@ -438,18 +436,18 @@ static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, return DISAS_NEXT; } -static void gen_goto_tb(DisasContext *ctx, int idx, int32_t disp) +static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, int32_t disp) { if (translator_use_goto_tb(&ctx->base, ctx->base.pc_next + disp)) { /* With PCREL, PC must always be up-to-date. */ if (ctx->pcrel) { gen_pc_disp(ctx, cpu_pc, disp); - tcg_gen_goto_tb(idx); + tcg_gen_goto_tb(tb_slot_idx); } else { - tcg_gen_goto_tb(idx); + tcg_gen_goto_tb(tb_slot_idx); gen_pc_disp(ctx, cpu_pc, disp); } - tcg_gen_exit_tb(ctx->base.tb, idx); + tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx); } else { gen_pc_disp(ctx, cpu_pc, disp); tcg_gen_lookup_and_goto_ptr(); @@ -1128,8 +1126,7 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) break; case 0x3C: /* WHAMI */ - tcg_gen_ld32s_i64(ctx->ir[IR_V0], tcg_env, - -offsetof(AlphaCPU, env) + offsetof(CPUState, cpu_index)); + gen_helper_whami(ctx->ir[IR_V0], tcg_env); break; case 0x3E: @@ -1155,7 +1152,6 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) #else { TCGv tmp = tcg_temp_new(); - uint64_t entry; gen_pc_disp(ctx, tmp, 0); if (ctx->tbflags & ENV_FLAG_PAL_MODE) { @@ -1165,12 +1161,11 @@ static DisasJumpType gen_call_pal(DisasContext *ctx, int palcode) } tcg_gen_st_i64(tmp, tcg_env, offsetof(CPUAlphaState, exc_addr)); - entry = ctx->palbr; - entry += (palcode & 0x80 - ? 0x2000 + (palcode - 0x80) * 64 - : 0x1000 + palcode * 64); - - tcg_gen_movi_i64(cpu_pc, entry); + tcg_gen_ld_i64(cpu_pc, tcg_env, offsetof(CPUAlphaState, palbr)); + tcg_gen_addi_i64(cpu_pc, cpu_pc, + palcode & 0x80 + ? 0x2000 + (palcode - 0x80) * 64 + : 0x1000 + palcode * 64); return DISAS_PC_UPDATED; } #endif @@ -1292,11 +1287,7 @@ static DisasJumpType gen_mtpr(DisasContext *ctx, TCGv vb, int regno) case 7: /* PALBR */ tcg_gen_st_i64(vb, tcg_env, offsetof(CPUAlphaState, palbr)); - /* Changing the PAL base register implies un-chaining all of the TBs - that ended with a CALL_PAL. Since the base register usually only - changes during boot, flushing everything works well. */ - gen_helper_tb_flush(tcg_env); - return DISAS_PC_STALE; + break; case 32 ... 39: /* Accessing the "non-shadow" general registers. */ @@ -2874,7 +2865,6 @@ static void alpha_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) ctx->ir = cpu_std_ir; ctx->unalign = (ctx->tbflags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN); #else - ctx->palbr = env->palbr; ctx->ir = (ctx->tbflags & ENV_FLAG_PAL_MODE ? cpu_pal_ir : cpu_std_ir); #endif diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index 20c70c7d6bb20..a788376d1d3e8 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -17,24 +17,12 @@ #include "qemu/main-loop.h" #include "system/tcg.h" #include "target/arm/multiprocessing.h" - -#ifndef DEBUG_ARM_POWERCTL -#define DEBUG_ARM_POWERCTL 0 -#endif - -#define DPRINTF(fmt, args...) \ - do { \ - if (DEBUG_ARM_POWERCTL) { \ - fprintf(stderr, "[ARM]%s: " fmt , __func__, ##args); \ - } \ - } while (0) +#include "trace.h" CPUState *arm_get_cpu_by_id(uint64_t id) { CPUState *cpu; - DPRINTF("cpu %" PRId64 "\n", id); - CPU_FOREACH(cpu) { ARMCPU *armcpu = ARM_CPU(cpu); @@ -102,9 +90,9 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, assert(bql_locked()); - DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 - "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry, - context_id); + trace_arm_powerctl_set_cpu_on(cpuid, target_el, + target_aa64 ? "aarch64" : "aarch32", + entry, context_id); /* requested EL level need to be in the 1 to 3 range */ assert((target_el > 0) && (target_el < 4)); @@ -208,6 +196,8 @@ int arm_set_cpu_on_and_reset(uint64_t cpuid) assert(bql_locked()); + trace_arm_powerctl_set_cpu_on_and_reset(cpuid); + /* Retrieve the cpu we are powering up */ target_cpu_state = arm_get_cpu_by_id(cpuid); if (!target_cpu_state) { @@ -261,7 +251,7 @@ int arm_set_cpu_off(uint64_t cpuid) assert(bql_locked()); - DPRINTF("cpu %" PRId64 "\n", cpuid); + trace_arm_powerctl_set_cpu_off(cpuid); /* change to the cpu we are powering up */ target_cpu_state = arm_get_cpu_by_id(cpuid); @@ -297,7 +287,7 @@ int arm_reset_cpu(uint64_t cpuid) assert(bql_locked()); - DPRINTF("cpu %" PRId64 "\n", cpuid); + trace_arm_powerctl_set_cpu_off(cpuid); /* change to the cpu we are resetting */ target_cpu_state = arm_get_cpu_by_id(cpuid); diff --git a/target/arm/common-semi-target.h b/target/arm/common-semi-target.c similarity index 59% rename from target/arm/common-semi-target.h rename to target/arm/common-semi-target.c index da51f2d7f540d..2b77ce9c17b65 100644 --- a/target/arm/common-semi-target.h +++ b/target/arm/common-semi-target.c @@ -7,12 +7,12 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef TARGET_ARM_COMMON_SEMI_TARGET_H -#define TARGET_ARM_COMMON_SEMI_TARGET_H - +#include "qemu/osdep.h" +#include "cpu.h" +#include "semihosting/common-semi.h" #include "target/arm/cpu-qom.h" -static inline target_ulong common_semi_arg(CPUState *cs, int argno) +uint64_t common_semi_arg(CPUState *cs, int argno) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; @@ -23,7 +23,7 @@ static inline target_ulong common_semi_arg(CPUState *cs, int argno) } } -static inline void common_semi_set_ret(CPUState *cs, target_ulong ret) +void common_semi_set_ret(CPUState *cs, uint64_t ret) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; @@ -34,27 +34,25 @@ static inline void common_semi_set_ret(CPUState *cs, target_ulong ret) } } -static inline bool common_semi_sys_exit_extended(CPUState *cs, int nr) +bool common_semi_sys_exit_is_extended(CPUState *cs) { - return nr == TARGET_SYS_EXIT_EXTENDED || is_a64(cpu_env(cs)); + return is_a64(cpu_env(cs)); } -static inline bool is_64bit_semihosting(CPUArchState *env) +bool is_64bit_semihosting(CPUArchState *env) { return is_a64(env); } -static inline target_ulong common_semi_stack_bottom(CPUState *cs) +uint64_t common_semi_stack_bottom(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; return is_a64(env) ? env->xregs[31] : env->regs[13]; } -static inline bool common_semi_has_synccache(CPUArchState *env) +bool common_semi_has_synccache(CPUArchState *env) { /* Ok for A64, invalid for A32/T32 */ return is_a64(env); } - -#endif diff --git a/target/arm/cpregs-gcs.c b/target/arm/cpregs-gcs.c new file mode 100644 index 0000000000000..1ed52a211a66b --- /dev/null +++ b/target/arm/cpregs-gcs.c @@ -0,0 +1,156 @@ +/* + * QEMU ARM CP Register GCS regiters and instructions + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" +#include "exec/icount.h" +#include "hw/irq.h" +#include "cpu.h" +#include "cpu-features.h" +#include "cpregs.h" +#include "internals.h" + + +static CPAccessResult access_gcs(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) < 3 + && arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_GCSEN)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_gcs_el0(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 0 && !(env->cp15.gcscr_el[0] & GCSCRE0_NTR)) { + return CP_ACCESS_TRAP_EL1; + } + return access_gcs(env, ri, isread); +} + +static void gcspr_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + /* + * Bits [2:0] are RES0, so we might as well clear them now, + * rather than upon each usage a-la GetCurrentGCSPointer. + */ + raw_write(env, ri, value & ~7); +} + +static CPAccessResult access_gcspushm(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + int el = arm_current_el(env); + if (!(env->cp15.gcscr_el[el] & GCSCR_PUSHMEN)) { + return CP_ACCESS_TRAP_BIT | (el ? el : 1); + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_gcspushx(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* Trap if lock taken, and enabled. */ + if (!(env->pstate & PSTATE_EXLOCK)) { + int el = arm_current_el(env); + if (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN) { + return CP_ACCESS_EXLOCK; + } + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_gcspopcx(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* Trap if lock not taken, and enabled. */ + if (env->pstate & PSTATE_EXLOCK) { + int el = arm_current_el(env); + if (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN) { + return CP_ACCESS_EXLOCK; + } + } + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo gcs_reginfo[] = { + { .name = "GCSCRE0_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 5, .opc2 = 2, + .access = PL1_RW, .accessfn = access_gcs, .fgt = FGT_NGCS_EL0, + .fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[0]) }, + { .name = "GCSCR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 5, .opc2 = 0, + .access = PL1_RW, .accessfn = access_gcs, .fgt = FGT_NGCS_EL1, + .nv2_redirect_offset = 0x8d0 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 5, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 5, 0), + .fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[1]) }, + { .name = "GCSCR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 5, .opc2 = 0, + .access = PL2_RW, .accessfn = access_gcs, + .fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[2]) }, + { .name = "GCSCR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 5, .opc2 = 0, + .access = PL3_RW, + .fieldoffset = offsetof(CPUARMState, cp15.gcscr_el[3]) }, + + { .name = "GCSPR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 3, .crn = 2, .crm = 5, .opc2 = 1, + .access = PL0_R | PL1_W, .accessfn = access_gcs_el0, + .fgt = FGT_NGCS_EL0, .writefn = gcspr_write, + .fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[0]) }, + { .name = "GCSPR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 5, .opc2 = 1, + .access = PL1_RW, .accessfn = access_gcs, + .fgt = FGT_NGCS_EL1, .writefn = gcspr_write, + .nv2_redirect_offset = 0x8c0 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 5, 1), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 5, 1), + .fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[1]) }, + { .name = "GCSPR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 2, .crm = 5, .opc2 = 1, + .access = PL2_RW, .accessfn = access_gcs, .writefn = gcspr_write, + .fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[2]) }, + { .name = "GCSPR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .crn = 2, .crm = 5, .opc2 = 1, + .access = PL3_RW, .writefn = gcspr_write, + .fieldoffset = offsetof(CPUARMState, cp15.gcspr_el[2]) }, + + { .name = "GCSPUSHM", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 0, + .access = PL0_W, .accessfn = access_gcspushm, + .fgt = FGT_NGCSPUSHM_EL1, .type = ARM_CP_GCSPUSHM }, + { .name = "GCSPOPM", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 1, + .access = PL0_R, .type = ARM_CP_GCSPOPM }, + { .name = "GCSSS1", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 2, + .access = PL0_W, .type = ARM_CP_GCSSS1 }, + { .name = "GCSSS2", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 7, .opc2 = 3, + .access = PL0_R, .type = ARM_CP_GCSSS2 }, + { .name = "GCSPUSHX", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 7, .opc2 = 4, + .access = PL1_W, .accessfn = access_gcspushx, .fgt = FGT_NGCSEPP, + .type = ARM_CP_GCSPUSHX }, + { .name = "GCSPOPCX", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 7, .opc2 = 5, + .access = PL1_W, .accessfn = access_gcspopcx, .fgt = FGT_NGCSEPP, + .type = ARM_CP_GCSPOPCX }, + { .name = "GCSPOPX", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 7, .opc2 = 6, + .access = PL1_W, .type = ARM_CP_GCSPOPX }, +}; + +void define_gcs_cpregs(ARMCPU *cpu) +{ + if (cpu_isar_feature(aa64_gcs, cpu)) { + define_arm_cp_regs(cpu, gcs_reginfo); + } +} diff --git a/target/arm/cpregs-pmu.c b/target/arm/cpregs-pmu.c index 0f295b1376cc9..31c01eddc8768 100644 --- a/target/arm/cpregs-pmu.c +++ b/target/arm/cpregs-pmu.c @@ -228,22 +228,27 @@ static bool event_supported(uint16_t number) return supported_event_map[number] != UNSUPPORTED_EVENT; } -static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, - bool isread) +static CPAccessResult do_pmreg_access(CPUARMState *env, bool is_pmcr) { /* * Performance monitor registers user accessibility is controlled - * by PMUSERENR. MDCR_EL2.TPM and MDCR_EL3.TPM allow configurable + * by PMUSERENR. MDCR_EL2.TPM/TPMCR and MDCR_EL3.TPM allow configurable * trapping to EL2 or EL3 for other accesses. */ int el = arm_current_el(env); - uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); if (el == 0 && !(env->cp15.c9_pmuserenr & 1)) { return CP_ACCESS_TRAP_EL1; } - if (el < 2 && (mdcr_el2 & MDCR_TPM)) { - return CP_ACCESS_TRAP_EL2; + if (el < 2) { + uint64_t mdcr_el2 = arm_mdcr_el2_eff(env); + + if (mdcr_el2 & MDCR_TPM) { + return CP_ACCESS_TRAP_EL2; + } + if (is_pmcr && (mdcr_el2 & MDCR_TPMCR)) { + return CP_ACCESS_TRAP_EL2; + } } if (el < 3 && (env->cp15.mdcr_el3 & MDCR_TPM)) { return CP_ACCESS_TRAP_EL3; @@ -252,6 +257,19 @@ static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + return do_pmreg_access(env, false); +} + +static CPAccessResult pmreg_access_pmcr(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + return do_pmreg_access(env, true); +} + static CPAccessResult pmreg_access_xevcntr(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) @@ -1067,11 +1085,6 @@ static const ARMCPRegInfo v7_pm_reginfo[] = { .fgt = FGT_PMSELR_EL0, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), .writefn = pmselr_write, .raw_writefn = raw_write, }, - { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, - .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, - .fgt = FGT_PMCCNTR_EL0, - .readfn = pmccntr_read, .writefn = pmccntr_write32, - .accessfn = pmreg_access_ccntr }, { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 13, .opc2 = 0, .access = PL0_RW, .accessfn = pmreg_access_ccntr, @@ -1192,14 +1205,14 @@ void define_pm_cpregs(ARMCPU *cpu) .fgt = FGT_PMCR_EL0, .type = ARM_CP_IO | ARM_CP_ALIAS, .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr), - .accessfn = pmreg_access, + .accessfn = pmreg_access_pmcr, .readfn = pmcr_read, .raw_readfn = raw_read, .writefn = pmcr_write, .raw_writefn = raw_write, }; const ARMCPRegInfo pmcr64 = { .name = "PMCR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .crn = 9, .crm = 12, .opc2 = 0, - .access = PL0_RW, .accessfn = pmreg_access, + .access = PL0_RW, .accessfn = pmreg_access_pmcr, .fgt = FGT_PMCR_EL0, .type = ARM_CP_IO, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr), @@ -1211,6 +1224,23 @@ void define_pm_cpregs(ARMCPU *cpu) define_one_arm_cp_reg(cpu, &pmcr); define_one_arm_cp_reg(cpu, &pmcr64); define_arm_cp_regs(cpu, v7_pm_reginfo); + /* + * 32-bit AArch32 PMCCNTR. We don't expose this to GDB if the + * new-in-v8 PMUv3 64-bit AArch32 PMCCNTR register is implemented + * (as that will provide the GDB user's view of "PMCCNTR"). + */ + ARMCPRegInfo pmccntr = { + .name = "PMCCNTR", + .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, + .access = PL0_RW, .accessfn = pmreg_access_ccntr, + .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, + .fgt = FGT_PMCCNTR_EL0, + .readfn = pmccntr_read, .writefn = pmccntr_write32, + }; + if (arm_feature(env, ARM_FEATURE_V8)) { + pmccntr.type |= ARM_CP_NO_GDB; + } + define_one_arm_cp_reg(cpu, &pmccntr); for (unsigned i = 0, pmcrn = pmu_num_counters(env); i < pmcrn; i++) { g_autofree char *pmevcntr_name = g_strdup_printf("PMEVCNTR%d", i); @@ -1276,6 +1306,13 @@ void define_pm_cpregs(ARMCPU *cpu) .access = PL0_R, .accessfn = pmreg_access, .type = ARM_CP_CONST, .fgt = FGT_PMCEIDN_EL0, .resetvalue = cpu->pmceid1 }, + /* AArch32 64-bit PMCCNTR view: added in PMUv3 with Armv8 */ + { .name = "PMCCNTR", .state = ARM_CP_STATE_AA32, + .cp = 15, .crm = 9, .opc1 = 0, + .access = PL0_RW, .accessfn = pmreg_access_ccntr, .resetvalue = 0, + .type = ARM_CP_ALIAS | ARM_CP_IO | ARM_CP_64BIT, + .fgt = FGT_PMCCNTR_EL0, .readfn = pmccntr_read, + .writefn = pmccntr_write, }, }; define_arm_cp_regs(cpu, v8_pm_reginfo); } diff --git a/target/arm/cpregs.h b/target/arm/cpregs.h index c9506aa6d5744..763de5e051c3d 100644 --- a/target/arm/cpregs.h +++ b/target/arm/cpregs.h @@ -22,6 +22,7 @@ #define TARGET_ARM_CPREGS_H #include "hw/registerfields.h" +#include "exec/memop.h" #include "target/arm/kvm-consts.h" #include "cpu.h" @@ -46,6 +47,14 @@ enum { ARM_CP_DC_ZVA = 0x0005, ARM_CP_DC_GVA = 0x0006, ARM_CP_DC_GZVA = 0x0007, + /* Special: gcs instructions */ + ARM_CP_GCSPUSHM = 0x0008, + ARM_CP_GCSPOPM = 0x0009, + ARM_CP_GCSPUSHX = 0x000a, + ARM_CP_GCSPOPX = 0x000b, + ARM_CP_GCSPOPCX = 0x000c, + ARM_CP_GCSSS1 = 0x000d, + ARM_CP_GCSSS2 = 0x000e, /* Flag: reads produce resetvalue; writes ignored. */ ARM_CP_CONST = 1 << 4, @@ -135,6 +144,11 @@ enum { * identically to the normal one, other than FGT trapping handling.) */ ARM_CP_ADD_TLBI_NXS = 1 << 21, + /* + * Flag: even though this sysreg has opc1 == 4 or 5, it + * should not trap to EL2 when HCR_EL2.NV is set. + */ + ARM_CP_NV_NO_TRAP = 1 << 22, }; /* @@ -174,16 +188,20 @@ enum { * add a bit to distinguish between secure and non-secure cpregs in the * hashtable. */ -#define CP_REG_NS_SHIFT 29 -#define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT) +#define CP_REG_AA32_NS_SHIFT 29 +#define CP_REG_AA32_NS_MASK (1 << CP_REG_AA32_NS_SHIFT) + +/* Distinguish 32-bit and 64-bit views of AArch32 system registers. */ +#define CP_REG_AA32_64BIT_SHIFT 15 +#define CP_REG_AA32_64BIT_MASK (1 << CP_REG_AA32_64BIT_SHIFT) #define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \ - ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \ - ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2)) + (((ns) << CP_REG_AA32_NS_SHIFT) | \ + ((is64) << CP_REG_AA32_64BIT_SHIFT) | \ + ((cp) << 16) | ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2)) -#define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \ - (CP_REG_AA64_MASK | \ - ((cp) << CP_REG_ARM_COPROC_SHIFT) | \ +#define ENCODE_AA64_CP_REG(op0, op1, crn, crm, op2) \ + (CP_REG_AA64_MASK | CP_REG_ARM64_SYSREG | \ ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \ ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \ ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \ @@ -201,14 +219,14 @@ static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid) cpregid |= CP_REG_AA64_MASK; } else { if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) { - cpregid |= (1 << 15); + cpregid |= CP_REG_AA32_64BIT_MASK; } /* * KVM is always non-secure so add the NS flag on AArch32 register * entries. */ - cpregid |= 1 << CP_REG_NS_SHIFT; + cpregid |= CP_REG_AA32_NS_MASK; } return cpregid; } @@ -225,8 +243,8 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) kvmid = cpregid & ~CP_REG_AA64_MASK; kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64; } else { - kvmid = cpregid & ~(1 << 15); - if (cpregid & (1 << 15)) { + kvmid = cpregid & ~CP_REG_AA32_64BIT_MASK; + if (cpregid & CP_REG_AA32_64BIT_MASK) { kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM; } else { kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM; @@ -346,6 +364,14 @@ typedef enum CPAccessResult { * specified target EL. */ CP_ACCESS_UNDEFINED = (2 << 2), + + /* + * Access fails with EXLOCK, a GCS exception syndrome. + * These traps are always to the current execution EL, + * which is the same as the usual target EL because + * they cannot occur from EL0. + */ + CP_ACCESS_EXLOCK = (3 << 2), } CPAccessResult; /* Indexes into fgt_read[] */ @@ -408,10 +434,19 @@ FIELD(HFGRTR_EL2, ERXPFGCTL_EL1, 47, 1) FIELD(HFGRTR_EL2, ERXPFGCDN_EL1, 48, 1) FIELD(HFGRTR_EL2, ERXADDR_EL1, 49, 1) FIELD(HFGRTR_EL2, NACCDATA_EL1, 50, 1) -/* 51-53: RES0 */ +/* 51: RES0 */ +FIELD(HFGRTR_EL2, NGCS_EL0, 52, 1) +FIELD(HFGRTR_EL2, NGCS_EL1, 53, 1) FIELD(HFGRTR_EL2, NSMPRI_EL1, 54, 1) FIELD(HFGRTR_EL2, NTPIDR2_EL0, 55, 1) -/* 56-63: RES0 */ +FIELD(HFGRTR_EL2, NRCWMASK_EL1, 56, 1) +FIELD(HFGRTR_EL2, NPIRE0_EL1, 57, 1) +FIELD(HFGRTR_EL2, NPIR_EL1, 58, 1) +FIELD(HFGRTR_EL2, NPOR_EL0, 59, 1) +FIELD(HFGRTR_EL2, NPOR_EL1, 60, 1) +FIELD(HFGRTR_EL2, NS2POR_EL1, 61, 1) +FIELD(HFGRTR_EL2, NMAIR2_EL1, 62, 1) +FIELD(HFGRTR_EL2, NAMAIR2_EL1, 63, 1) /* These match HFGRTR but bits for RO registers are RES0 */ FIELD(HFGWTR_EL2, AFSR0_EL1, 0, 1) @@ -452,8 +487,18 @@ FIELD(HFGWTR_EL2, ERXPFGCTL_EL1, 47, 1) FIELD(HFGWTR_EL2, ERXPFGCDN_EL1, 48, 1) FIELD(HFGWTR_EL2, ERXADDR_EL1, 49, 1) FIELD(HFGWTR_EL2, NACCDATA_EL1, 50, 1) +FIELD(HFGWTR_EL2, NGCS_EL0, 52, 1) +FIELD(HFGWTR_EL2, NGCS_EL1, 53, 1) FIELD(HFGWTR_EL2, NSMPRI_EL1, 54, 1) FIELD(HFGWTR_EL2, NTPIDR2_EL0, 55, 1) +FIELD(HFGWTR_EL2, NRCWMASK_EL1, 56, 1) +FIELD(HFGWTR_EL2, NPIRE0_EL1, 57, 1) +FIELD(HFGWTR_EL2, NPIR_EL1, 58, 1) +FIELD(HFGWTR_EL2, NPOR_EL0, 59, 1) +FIELD(HFGWTR_EL2, NPOR_EL1, 60, 1) +FIELD(HFGWTR_EL2, NS2POR_EL1, 61, 1) +FIELD(HFGWTR_EL2, NMAIR2_EL1, 62, 1) +FIELD(HFGWTR_EL2, NAMAIR2_EL1, 63, 1) FIELD(HFGITR_EL2, ICIALLUIS, 0, 1) FIELD(HFGITR_EL2, ICIALLU, 1, 1) @@ -512,6 +557,11 @@ FIELD(HFGITR_EL2, SVC_EL1, 53, 1) FIELD(HFGITR_EL2, DCCVAC, 54, 1) FIELD(HFGITR_EL2, NBRBINJ, 55, 1) FIELD(HFGITR_EL2, NBRBIALL, 56, 1) +FIELD(HFGITR_EL2, NGCSPUSHM_EL1, 57, 1) +FIELD(HFGITR_EL2, NGCSSTR_EL1, 58, 1) +FIELD(HFGITR_EL2, NGCSEPP, 59, 1) +FIELD(HFGITR_EL2, COSPRCTX, 60, 1) +FIELD(HFGITR_EL2, ATS1E1A, 62, 1) FIELD(HDFGRTR_EL2, DBGBCRN_EL1, 0, 1) FIELD(HDFGRTR_EL2, DBGBVRN_EL1, 1, 1) @@ -750,8 +800,12 @@ typedef enum FGTBit { DO_BIT(HFGRTR, VBAR_EL1), DO_BIT(HFGRTR, ICC_IGRPENN_EL1), DO_BIT(HFGRTR, ERRIDR_EL1), + DO_REV_BIT(HFGRTR, NGCS_EL0), + DO_REV_BIT(HFGRTR, NGCS_EL1), DO_REV_BIT(HFGRTR, NSMPRI_EL1), DO_REV_BIT(HFGRTR, NTPIDR2_EL0), + DO_REV_BIT(HFGRTR, NPIRE0_EL1), + DO_REV_BIT(HFGRTR, NPIR_EL1), /* Trap bits in HDFGRTR_EL2 / HDFGWTR_EL2, starting from bit 0. */ DO_BIT(HDFGRTR, DBGBCRN_EL1), @@ -830,6 +884,9 @@ typedef enum FGTBit { DO_BIT(HFGITR, DVPRCTX), DO_BIT(HFGITR, CPPRCTX), DO_BIT(HFGITR, DCCVAC), + DO_REV_BIT(HFGITR, NGCSPUSHM_EL1), + DO_REV_BIT(HFGITR, NGCSEPP), + DO_BIT(HFGITR, ATS1E1A), } FGTBit; #undef DO_BIT @@ -841,15 +898,15 @@ typedef struct ARMCPRegInfo ARMCPRegInfo; * Access functions for coprocessor registers. These cannot fail and * may not raise exceptions. */ -typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque); -typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque, +typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *ri); +typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value); /* Access permission check functions for coprocessor registers. */ typedef CPAccessResult CPAccessFn(CPUARMState *env, - const ARMCPRegInfo *opaque, + const ARMCPRegInfo *ri, bool isread); /* Hook function for register reset */ -typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque); +typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *ri); #define CP_ANY 0xff @@ -907,11 +964,19 @@ struct ARMCPRegInfo { uint32_t nv2_redirect_offset; /* - * The opaque pointer passed to define_arm_cp_regs_with_opaque() when - * this register was defined: can be used to hand data through to the - * register read/write functions, since they are passed the ARMCPRegInfo*. + * With VHE, with E2H, at EL2, access to this EL0/EL1 reg redirects + * to the EL2 reg with the specified key. + */ + uint32_t vhe_redir_to_el2; + + /* + * For VHE. Before registration, this field holds the key for an + * EL02/EL12 reg to be created to point back to this EL0/EL1 reg. + * After registration, this field is set only on the EL02/EL12 reg + * and points back to the EL02/EL12 reg for redirection with E2H. */ - void *opaque; + uint32_t vhe_redir_to_el01; + /* * Value of this register, if it is ARM_CP_CONST. Otherwise, if * fieldoffset is non-zero, the reset value of the register. @@ -979,52 +1044,17 @@ struct ARMCPRegInfo { * fieldoffset is 0 then no reset will be done. */ CPResetFn *resetfn; - - /* - * "Original" readfn, writefn, accessfn. - * For ARMv8.1-VHE register aliases, we overwrite the read/write - * accessor functions of various EL1/EL0 to perform the runtime - * check for which sysreg should actually be modified, and then - * forwards the operation. Before overwriting the accessors, - * the original function is copied here, so that accesses that - * really do go to the EL1/EL0 version proceed normally. - * (The corresponding EL2 register is linked via opaque.) - */ - CPReadFn *orig_readfn; - CPWriteFn *orig_writefn; - CPAccessFn *orig_accessfn; }; -/* - * Macros which are lvalues for the field in CPUARMState for the - * ARMCPRegInfo *ri. - */ -#define CPREG_FIELD32(env, ri) \ - (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) -#define CPREG_FIELD64(env, ri) \ - (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) - -void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, const ARMCPRegInfo *reg, - void *opaque); +void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs); +void define_arm_cp_regs_len(ARMCPU *cpu, const ARMCPRegInfo *regs, size_t len); -static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs) -{ - define_one_arm_cp_reg_with_opaque(cpu, regs, NULL); -} - -void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs, - void *opaque, size_t len); - -#define define_arm_cp_regs_with_opaque(CPU, REGS, OPAQUE) \ - do { \ - QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0); \ - define_arm_cp_regs_with_opaque_len(CPU, REGS, OPAQUE, \ - ARRAY_SIZE(REGS)); \ +#define define_arm_cp_regs(CPU, REGS) \ + do { \ + QEMU_BUILD_BUG_ON(ARRAY_SIZE(REGS) == 0); \ + define_arm_cp_regs_len(CPU, REGS, ARRAY_SIZE(REGS)); \ } while (0) -#define define_arm_cp_regs(CPU, REGS) \ - define_arm_cp_regs_with_opaque(CPU, REGS, NULL) - const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp); /* @@ -1075,15 +1105,16 @@ void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value); * CPResetFn that does nothing, for use if no reset is required even * if fieldoffset is non zero. */ -void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque); +void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *ri); /* - * Return true if this reginfo struct's field in the cpu state struct - * is 64 bits wide. + * Return MO_32 if the field in CPUARMState is uint32_t or + * MO_64 if the field in CPUARMState is uint64_t. */ -static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri) +static inline MemOp cpreg_field_type(const ARMCPRegInfo *ri) { - return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT); + return (ri->state == ARM_CP_STATE_AA64 || (ri->type & ARM_CP_64BIT) + ? MO_64 : MO_32); } static inline bool cp_access_ok(int current_el, @@ -1143,7 +1174,7 @@ static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri) * means that the right set of registers is exactly those where * the opc1 field is 4 or 5. (You can see this also in the assert * we do that the opc1 field and the permissions mask line up in - * define_one_arm_cp_reg_with_opaque().) + * define_one_arm_cp_reg().) * Checking the opc1 field is easier for us and avoids the problem * that we do not consistently use the right architectural names * for all sysregs, since we treat the name field as largely for debug. @@ -1152,12 +1183,17 @@ static inline bool arm_cpreg_traps_in_nv(const ARMCPRegInfo *ri) * fragile to future new sysregs, but this seems the least likely * to break. * - * In particular, note that the released sysreg XML defines that - * the FEAT_MEC sysregs and instructions do not follow this FEAT_NV - * trapping rule, so we will need to add an ARM_CP_* flag to indicate - * "register does not trap on NV" to handle those if/when we implement - * FEAT_MEC. + * In particular, note that the FEAT_MEC sysregs and instructions + * are exceptions to this trapping rule, so they are marked as + * ARM_CP_NV_NO_TRAP to indicate that they should not be trapped + * to EL2. (They are an exception because the FEAT_MEC sysregs UNDEF + * unless in Realm, and Realm is not expected to be virtualized.) */ + + if (ri->type & ARM_CP_NV_NO_TRAP) { + return false; + } + return ri->opc1 == 4 || ri->opc1 == 5; } diff --git a/target/arm/cpu-features.h b/target/arm/cpu-features.h index 5876162428af2..37f1eca3af62d 100644 --- a/target/arm/cpu-features.h +++ b/target/arm/cpu-features.h @@ -25,6 +25,421 @@ #include "cpu.h" #include "cpu-sysregs.h" +/* + * System register ID fields. + */ +FIELD(CLIDR_EL1, CTYPE1, 0, 3) +FIELD(CLIDR_EL1, CTYPE2, 3, 3) +FIELD(CLIDR_EL1, CTYPE3, 6, 3) +FIELD(CLIDR_EL1, CTYPE4, 9, 3) +FIELD(CLIDR_EL1, CTYPE5, 12, 3) +FIELD(CLIDR_EL1, CTYPE6, 15, 3) +FIELD(CLIDR_EL1, CTYPE7, 18, 3) +FIELD(CLIDR_EL1, LOUIS, 21, 3) +FIELD(CLIDR_EL1, LOC, 24, 3) +FIELD(CLIDR_EL1, LOUU, 27, 3) +FIELD(CLIDR_EL1, ICB, 30, 3) + +/* When FEAT_CCIDX is implemented */ +FIELD(CCSIDR_EL1, CCIDX_LINESIZE, 0, 3) +FIELD(CCSIDR_EL1, CCIDX_ASSOCIATIVITY, 3, 21) +FIELD(CCSIDR_EL1, CCIDX_NUMSETS, 32, 24) + +/* When FEAT_CCIDX is not implemented */ +FIELD(CCSIDR_EL1, LINESIZE, 0, 3) +FIELD(CCSIDR_EL1, ASSOCIATIVITY, 3, 10) +FIELD(CCSIDR_EL1, NUMSETS, 13, 15) + +FIELD(CTR_EL0, IMINLINE, 0, 4) +FIELD(CTR_EL0, L1IP, 14, 2) +FIELD(CTR_EL0, DMINLINE, 16, 4) +FIELD(CTR_EL0, ERG, 20, 4) +FIELD(CTR_EL0, CWG, 24, 4) +FIELD(CTR_EL0, IDC, 28, 1) +FIELD(CTR_EL0, DIC, 29, 1) +FIELD(CTR_EL0, TMINLINE, 32, 6) + +FIELD(MIDR_EL1, REVISION, 0, 4) +FIELD(MIDR_EL1, PARTNUM, 4, 12) +FIELD(MIDR_EL1, ARCHITECTURE, 16, 4) +FIELD(MIDR_EL1, VARIANT, 20, 4) +FIELD(MIDR_EL1, IMPLEMENTER, 24, 8) + +FIELD(ID_ISAR0, SWAP, 0, 4) +FIELD(ID_ISAR0, BITCOUNT, 4, 4) +FIELD(ID_ISAR0, BITFIELD, 8, 4) +FIELD(ID_ISAR0, CMPBRANCH, 12, 4) +FIELD(ID_ISAR0, COPROC, 16, 4) +FIELD(ID_ISAR0, DEBUG, 20, 4) +FIELD(ID_ISAR0, DIVIDE, 24, 4) + +FIELD(ID_ISAR1, ENDIAN, 0, 4) +FIELD(ID_ISAR1, EXCEPT, 4, 4) +FIELD(ID_ISAR1, EXCEPT_AR, 8, 4) +FIELD(ID_ISAR1, EXTEND, 12, 4) +FIELD(ID_ISAR1, IFTHEN, 16, 4) +FIELD(ID_ISAR1, IMMEDIATE, 20, 4) +FIELD(ID_ISAR1, INTERWORK, 24, 4) +FIELD(ID_ISAR1, JAZELLE, 28, 4) + +FIELD(ID_ISAR2, LOADSTORE, 0, 4) +FIELD(ID_ISAR2, MEMHINT, 4, 4) +FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4) +FIELD(ID_ISAR2, MULT, 12, 4) +FIELD(ID_ISAR2, MULTS, 16, 4) +FIELD(ID_ISAR2, MULTU, 20, 4) +FIELD(ID_ISAR2, PSR_AR, 24, 4) +FIELD(ID_ISAR2, REVERSAL, 28, 4) + +FIELD(ID_ISAR3, SATURATE, 0, 4) +FIELD(ID_ISAR3, SIMD, 4, 4) +FIELD(ID_ISAR3, SVC, 8, 4) +FIELD(ID_ISAR3, SYNCHPRIM, 12, 4) +FIELD(ID_ISAR3, TABBRANCH, 16, 4) +FIELD(ID_ISAR3, T32COPY, 20, 4) +FIELD(ID_ISAR3, TRUENOP, 24, 4) +FIELD(ID_ISAR3, T32EE, 28, 4) + +FIELD(ID_ISAR4, UNPRIV, 0, 4) +FIELD(ID_ISAR4, WITHSHIFTS, 4, 4) +FIELD(ID_ISAR4, WRITEBACK, 8, 4) +FIELD(ID_ISAR4, SMC, 12, 4) +FIELD(ID_ISAR4, BARRIER, 16, 4) +FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4) +FIELD(ID_ISAR4, PSR_M, 24, 4) +FIELD(ID_ISAR4, SWP_FRAC, 28, 4) + +FIELD(ID_ISAR5, SEVL, 0, 4) +FIELD(ID_ISAR5, AES, 4, 4) +FIELD(ID_ISAR5, SHA1, 8, 4) +FIELD(ID_ISAR5, SHA2, 12, 4) +FIELD(ID_ISAR5, CRC32, 16, 4) +FIELD(ID_ISAR5, RDM, 24, 4) +FIELD(ID_ISAR5, VCMA, 28, 4) + +FIELD(ID_ISAR6, JSCVT, 0, 4) +FIELD(ID_ISAR6, DP, 4, 4) +FIELD(ID_ISAR6, FHM, 8, 4) +FIELD(ID_ISAR6, SB, 12, 4) +FIELD(ID_ISAR6, SPECRES, 16, 4) +FIELD(ID_ISAR6, BF16, 20, 4) +FIELD(ID_ISAR6, I8MM, 24, 4) + +FIELD(ID_MMFR0, VMSA, 0, 4) +FIELD(ID_MMFR0, PMSA, 4, 4) +FIELD(ID_MMFR0, OUTERSHR, 8, 4) +FIELD(ID_MMFR0, SHARELVL, 12, 4) +FIELD(ID_MMFR0, TCM, 16, 4) +FIELD(ID_MMFR0, AUXREG, 20, 4) +FIELD(ID_MMFR0, FCSE, 24, 4) +FIELD(ID_MMFR0, INNERSHR, 28, 4) + +FIELD(ID_MMFR1, L1HVDVA, 0, 4) +FIELD(ID_MMFR1, L1UNIVA, 4, 4) +FIELD(ID_MMFR1, L1HVDSW, 8, 4) +FIELD(ID_MMFR1, L1UNISW, 12, 4) +FIELD(ID_MMFR1, L1HVD, 16, 4) +FIELD(ID_MMFR1, L1UNI, 20, 4) +FIELD(ID_MMFR1, L1TSTCLN, 24, 4) +FIELD(ID_MMFR1, BPRED, 28, 4) + +FIELD(ID_MMFR2, L1HVDFG, 0, 4) +FIELD(ID_MMFR2, L1HVDBG, 4, 4) +FIELD(ID_MMFR2, L1HVDRNG, 8, 4) +FIELD(ID_MMFR2, HVDTLB, 12, 4) +FIELD(ID_MMFR2, UNITLB, 16, 4) +FIELD(ID_MMFR2, MEMBARR, 20, 4) +FIELD(ID_MMFR2, WFISTALL, 24, 4) +FIELD(ID_MMFR2, HWACCFLG, 28, 4) + +FIELD(ID_MMFR3, CMAINTVA, 0, 4) +FIELD(ID_MMFR3, CMAINTSW, 4, 4) +FIELD(ID_MMFR3, BPMAINT, 8, 4) +FIELD(ID_MMFR3, MAINTBCST, 12, 4) +FIELD(ID_MMFR3, PAN, 16, 4) +FIELD(ID_MMFR3, COHWALK, 20, 4) +FIELD(ID_MMFR3, CMEMSZ, 24, 4) +FIELD(ID_MMFR3, SUPERSEC, 28, 4) + +FIELD(ID_MMFR4, SPECSEI, 0, 4) +FIELD(ID_MMFR4, AC2, 4, 4) +FIELD(ID_MMFR4, XNX, 8, 4) +FIELD(ID_MMFR4, CNP, 12, 4) +FIELD(ID_MMFR4, HPDS, 16, 4) +FIELD(ID_MMFR4, LSM, 20, 4) +FIELD(ID_MMFR4, CCIDX, 24, 4) +FIELD(ID_MMFR4, EVT, 28, 4) + +FIELD(ID_MMFR5, ETS, 0, 4) +FIELD(ID_MMFR5, NTLBPA, 4, 4) + +FIELD(ID_PFR0, STATE0, 0, 4) +FIELD(ID_PFR0, STATE1, 4, 4) +FIELD(ID_PFR0, STATE2, 8, 4) +FIELD(ID_PFR0, STATE3, 12, 4) +FIELD(ID_PFR0, CSV2, 16, 4) +FIELD(ID_PFR0, AMU, 20, 4) +FIELD(ID_PFR0, DIT, 24, 4) +FIELD(ID_PFR0, RAS, 28, 4) + +FIELD(ID_PFR1, PROGMOD, 0, 4) +FIELD(ID_PFR1, SECURITY, 4, 4) +FIELD(ID_PFR1, MPROGMOD, 8, 4) +FIELD(ID_PFR1, VIRTUALIZATION, 12, 4) +FIELD(ID_PFR1, GENTIMER, 16, 4) +FIELD(ID_PFR1, SEC_FRAC, 20, 4) +FIELD(ID_PFR1, VIRT_FRAC, 24, 4) +FIELD(ID_PFR1, GIC, 28, 4) + +FIELD(ID_PFR2, CSV3, 0, 4) +FIELD(ID_PFR2, SSBS, 4, 4) +FIELD(ID_PFR2, RAS_FRAC, 8, 4) + +FIELD(ID_AA64ISAR0, AES, 4, 4) +FIELD(ID_AA64ISAR0, SHA1, 8, 4) +FIELD(ID_AA64ISAR0, SHA2, 12, 4) +FIELD(ID_AA64ISAR0, CRC32, 16, 4) +FIELD(ID_AA64ISAR0, ATOMIC, 20, 4) +FIELD(ID_AA64ISAR0, TME, 24, 4) +FIELD(ID_AA64ISAR0, RDM, 28, 4) +FIELD(ID_AA64ISAR0, SHA3, 32, 4) +FIELD(ID_AA64ISAR0, SM3, 36, 4) +FIELD(ID_AA64ISAR0, SM4, 40, 4) +FIELD(ID_AA64ISAR0, DP, 44, 4) +FIELD(ID_AA64ISAR0, FHM, 48, 4) +FIELD(ID_AA64ISAR0, TS, 52, 4) +FIELD(ID_AA64ISAR0, TLB, 56, 4) +FIELD(ID_AA64ISAR0, RNDR, 60, 4) + +FIELD(ID_AA64ISAR1, DPB, 0, 4) +FIELD(ID_AA64ISAR1, APA, 4, 4) +FIELD(ID_AA64ISAR1, API, 8, 4) +FIELD(ID_AA64ISAR1, JSCVT, 12, 4) +FIELD(ID_AA64ISAR1, FCMA, 16, 4) +FIELD(ID_AA64ISAR1, LRCPC, 20, 4) +FIELD(ID_AA64ISAR1, GPA, 24, 4) +FIELD(ID_AA64ISAR1, GPI, 28, 4) +FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) +FIELD(ID_AA64ISAR1, SB, 36, 4) +FIELD(ID_AA64ISAR1, SPECRES, 40, 4) +FIELD(ID_AA64ISAR1, BF16, 44, 4) +FIELD(ID_AA64ISAR1, DGH, 48, 4) +FIELD(ID_AA64ISAR1, I8MM, 52, 4) +FIELD(ID_AA64ISAR1, XS, 56, 4) +FIELD(ID_AA64ISAR1, LS64, 60, 4) + +FIELD(ID_AA64ISAR2, WFXT, 0, 4) +FIELD(ID_AA64ISAR2, RPRES, 4, 4) +FIELD(ID_AA64ISAR2, GPA3, 8, 4) +FIELD(ID_AA64ISAR2, APA3, 12, 4) +FIELD(ID_AA64ISAR2, MOPS, 16, 4) +FIELD(ID_AA64ISAR2, BC, 20, 4) +FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4) +FIELD(ID_AA64ISAR2, CLRBHB, 28, 4) +FIELD(ID_AA64ISAR2, SYSREG_128, 32, 4) +FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4) +FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4) +FIELD(ID_AA64ISAR2, RPRFM, 48, 4) +FIELD(ID_AA64ISAR2, CSSC, 52, 4) +FIELD(ID_AA64ISAR2, LUT, 56, 4) +FIELD(ID_AA64ISAR2, ATS1A, 60, 4) + +FIELD(ID_AA64PFR0, EL0, 0, 4) +FIELD(ID_AA64PFR0, EL1, 4, 4) +FIELD(ID_AA64PFR0, EL2, 8, 4) +FIELD(ID_AA64PFR0, EL3, 12, 4) +FIELD(ID_AA64PFR0, FP, 16, 4) +FIELD(ID_AA64PFR0, ADVSIMD, 20, 4) +FIELD(ID_AA64PFR0, GIC, 24, 4) +FIELD(ID_AA64PFR0, RAS, 28, 4) +FIELD(ID_AA64PFR0, SVE, 32, 4) +FIELD(ID_AA64PFR0, SEL2, 36, 4) +FIELD(ID_AA64PFR0, MPAM, 40, 4) +FIELD(ID_AA64PFR0, AMU, 44, 4) +FIELD(ID_AA64PFR0, DIT, 48, 4) +FIELD(ID_AA64PFR0, RME, 52, 4) +FIELD(ID_AA64PFR0, CSV2, 56, 4) +FIELD(ID_AA64PFR0, CSV3, 60, 4) + +FIELD(ID_AA64PFR1, BT, 0, 4) +FIELD(ID_AA64PFR1, SSBS, 4, 4) +FIELD(ID_AA64PFR1, MTE, 8, 4) +FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4) +FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4) +FIELD(ID_AA64PFR1, SME, 24, 4) +FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4) +FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4) +FIELD(ID_AA64PFR1, NMI, 36, 4) +FIELD(ID_AA64PFR1, MTE_FRAC, 40, 4) +FIELD(ID_AA64PFR1, GCS, 44, 4) +FIELD(ID_AA64PFR1, THE, 48, 4) +FIELD(ID_AA64PFR1, MTEX, 52, 4) +FIELD(ID_AA64PFR1, DF2, 56, 4) +FIELD(ID_AA64PFR1, PFAR, 60, 4) + +FIELD(ID_AA64PFR2, MTEPERM, 0, 4) +FIELD(ID_AA64PFR2, MTESTOREONLY, 4, 4) +FIELD(ID_AA64PFR2, MTEFAR, 8, 4) +FIELD(ID_AA64PFR2, FPMR, 32, 4) + +FIELD(ID_AA64MMFR0, PARANGE, 0, 4) +FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4) +FIELD(ID_AA64MMFR0, BIGEND, 8, 4) +FIELD(ID_AA64MMFR0, SNSMEM, 12, 4) +FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4) +FIELD(ID_AA64MMFR0, TGRAN16, 20, 4) +FIELD(ID_AA64MMFR0, TGRAN64, 24, 4) +FIELD(ID_AA64MMFR0, TGRAN4, 28, 4) +FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4) +FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4) +FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4) +FIELD(ID_AA64MMFR0, EXS, 44, 4) +FIELD(ID_AA64MMFR0, FGT, 56, 4) +FIELD(ID_AA64MMFR0, ECV, 60, 4) + +FIELD(ID_AA64MMFR1, HAFDBS, 0, 4) +FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4) +FIELD(ID_AA64MMFR1, VH, 8, 4) +FIELD(ID_AA64MMFR1, HPDS, 12, 4) +FIELD(ID_AA64MMFR1, LO, 16, 4) +FIELD(ID_AA64MMFR1, PAN, 20, 4) +FIELD(ID_AA64MMFR1, SPECSEI, 24, 4) +FIELD(ID_AA64MMFR1, XNX, 28, 4) +FIELD(ID_AA64MMFR1, TWED, 32, 4) +FIELD(ID_AA64MMFR1, ETS, 36, 4) +FIELD(ID_AA64MMFR1, HCX, 40, 4) +FIELD(ID_AA64MMFR1, AFP, 44, 4) +FIELD(ID_AA64MMFR1, NTLBPA, 48, 4) +FIELD(ID_AA64MMFR1, TIDCP1, 52, 4) +FIELD(ID_AA64MMFR1, CMOW, 56, 4) +FIELD(ID_AA64MMFR1, ECBHB, 60, 4) + +FIELD(ID_AA64MMFR2, CNP, 0, 4) +FIELD(ID_AA64MMFR2, UAO, 4, 4) +FIELD(ID_AA64MMFR2, LSM, 8, 4) +FIELD(ID_AA64MMFR2, IESB, 12, 4) +FIELD(ID_AA64MMFR2, VARANGE, 16, 4) +FIELD(ID_AA64MMFR2, CCIDX, 20, 4) +FIELD(ID_AA64MMFR2, NV, 24, 4) +FIELD(ID_AA64MMFR2, ST, 28, 4) +FIELD(ID_AA64MMFR2, AT, 32, 4) +FIELD(ID_AA64MMFR2, IDS, 36, 4) +FIELD(ID_AA64MMFR2, FWB, 40, 4) +FIELD(ID_AA64MMFR2, TTL, 48, 4) +FIELD(ID_AA64MMFR2, BBM, 52, 4) +FIELD(ID_AA64MMFR2, EVT, 56, 4) +FIELD(ID_AA64MMFR2, E0PD, 60, 4) + +FIELD(ID_AA64MMFR3, TCRX, 0, 4) +FIELD(ID_AA64MMFR3, SCTLRX, 4, 4) +FIELD(ID_AA64MMFR3, S1PIE, 8, 4) +FIELD(ID_AA64MMFR3, S2PIE, 12, 4) +FIELD(ID_AA64MMFR3, S1POE, 16, 4) +FIELD(ID_AA64MMFR3, S2POE, 20, 4) +FIELD(ID_AA64MMFR3, AIE, 24, 4) +FIELD(ID_AA64MMFR3, MEC, 28, 4) +FIELD(ID_AA64MMFR3, D128, 32, 4) +FIELD(ID_AA64MMFR3, D128_2, 36, 4) +FIELD(ID_AA64MMFR3, SNERR, 40, 4) +FIELD(ID_AA64MMFR3, ANERR, 44, 4) +FIELD(ID_AA64MMFR3, SDERR, 52, 4) +FIELD(ID_AA64MMFR3, ADERR, 56, 4) +FIELD(ID_AA64MMFR3, SPEC_FPACC, 60, 4) + +FIELD(ID_AA64DFR0, DEBUGVER, 0, 4) +FIELD(ID_AA64DFR0, TRACEVER, 4, 4) +FIELD(ID_AA64DFR0, PMUVER, 8, 4) +FIELD(ID_AA64DFR0, BRPS, 12, 4) +FIELD(ID_AA64DFR0, PMSS, 16, 4) +FIELD(ID_AA64DFR0, WRPS, 20, 4) +FIELD(ID_AA64DFR0, SEBEP, 24, 4) +FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) +FIELD(ID_AA64DFR0, PMSVER, 32, 4) +FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) +FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) +FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4) +FIELD(ID_AA64DFR0, MTPMU, 48, 4) +FIELD(ID_AA64DFR0, BRBE, 52, 4) +FIELD(ID_AA64DFR0, EXTTRCBUFF, 56, 4) +FIELD(ID_AA64DFR0, HPMN0, 60, 4) + +FIELD(ID_AA64ZFR0, SVEVER, 0, 4) +FIELD(ID_AA64ZFR0, AES, 4, 4) +FIELD(ID_AA64ZFR0, BITPERM, 16, 4) +FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4) +FIELD(ID_AA64ZFR0, B16B16, 24, 4) +FIELD(ID_AA64ZFR0, SHA3, 32, 4) +FIELD(ID_AA64ZFR0, SM4, 40, 4) +FIELD(ID_AA64ZFR0, I8MM, 44, 4) +FIELD(ID_AA64ZFR0, F32MM, 52, 4) +FIELD(ID_AA64ZFR0, F64MM, 56, 4) + +FIELD(ID_AA64SMFR0, F32F32, 32, 1) +FIELD(ID_AA64SMFR0, BI32I32, 33, 1) +FIELD(ID_AA64SMFR0, B16F32, 34, 1) +FIELD(ID_AA64SMFR0, F16F32, 35, 1) +FIELD(ID_AA64SMFR0, I8I32, 36, 4) +FIELD(ID_AA64SMFR0, F16F16, 42, 1) +FIELD(ID_AA64SMFR0, B16B16, 43, 1) +FIELD(ID_AA64SMFR0, I16I32, 44, 4) +FIELD(ID_AA64SMFR0, F64F64, 48, 1) +FIELD(ID_AA64SMFR0, I16I64, 52, 4) +FIELD(ID_AA64SMFR0, SMEVER, 56, 4) +FIELD(ID_AA64SMFR0, FA64, 63, 1) + +FIELD(ID_DFR0, COPDBG, 0, 4) +FIELD(ID_DFR0, COPSDBG, 4, 4) +FIELD(ID_DFR0, MMAPDBG, 8, 4) +FIELD(ID_DFR0, COPTRC, 12, 4) +FIELD(ID_DFR0, MMAPTRC, 16, 4) +FIELD(ID_DFR0, MPROFDBG, 20, 4) +FIELD(ID_DFR0, PERFMON, 24, 4) +FIELD(ID_DFR0, TRACEFILT, 28, 4) + +FIELD(ID_DFR1, MTPMU, 0, 4) +FIELD(ID_DFR1, HPMN0, 4, 4) + +FIELD(DBGDIDR, SE_IMP, 12, 1) +FIELD(DBGDIDR, NSUHD_IMP, 14, 1) +FIELD(DBGDIDR, VERSION, 16, 4) +FIELD(DBGDIDR, CTX_CMPS, 20, 4) +FIELD(DBGDIDR, BRPS, 24, 4) +FIELD(DBGDIDR, WRPS, 28, 4) + +FIELD(DBGDEVID, PCSAMPLE, 0, 4) +FIELD(DBGDEVID, WPADDRMASK, 4, 4) +FIELD(DBGDEVID, BPADDRMASK, 8, 4) +FIELD(DBGDEVID, VECTORCATCH, 12, 4) +FIELD(DBGDEVID, VIRTEXTNS, 16, 4) +FIELD(DBGDEVID, DOUBLELOCK, 20, 4) +FIELD(DBGDEVID, AUXREGS, 24, 4) +FIELD(DBGDEVID, CIDMASK, 28, 4) + +FIELD(DBGDEVID1, PCSROFFSET, 0, 4) + +FIELD(MVFR0, SIMDREG, 0, 4) +FIELD(MVFR0, FPSP, 4, 4) +FIELD(MVFR0, FPDP, 8, 4) +FIELD(MVFR0, FPTRAP, 12, 4) +FIELD(MVFR0, FPDIVIDE, 16, 4) +FIELD(MVFR0, FPSQRT, 20, 4) +FIELD(MVFR0, FPSHVEC, 24, 4) +FIELD(MVFR0, FPROUND, 28, 4) + +FIELD(MVFR1, FPFTZ, 0, 4) +FIELD(MVFR1, FPDNAN, 4, 4) +FIELD(MVFR1, SIMDLS, 8, 4) /* A-profile only */ +FIELD(MVFR1, SIMDINT, 12, 4) /* A-profile only */ +FIELD(MVFR1, SIMDSP, 16, 4) /* A-profile only */ +FIELD(MVFR1, SIMDHP, 20, 4) /* A-profile only */ +FIELD(MVFR1, MVE, 8, 4) /* M-profile only */ +FIELD(MVFR1, FP16, 20, 4) /* M-profile only */ +FIELD(MVFR1, FPHP, 24, 4) +FIELD(MVFR1, SIMDFMAC, 28, 4) + +FIELD(MVFR2, SIMDMISC, 0, 4) +FIELD(MVFR2, FPMISC, 4, 4) + /* * Naming convention for isar_feature functions: * Functions which test 32-bit ID registers should have _aa32_ in @@ -406,9 +821,14 @@ static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) return FIELD_EX64_IDREG(id, ID_AA64ISAR0, CRC32) != 0; } -static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) +static inline bool isar_feature_aa64_lse(const ARMISARegisters *id) { - return FIELD_EX64_IDREG(id, ID_AA64ISAR0, ATOMIC) != 0; + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, ATOMIC) >= 2; +} + +static inline bool isar_feature_aa64_lse128(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64ISAR0, ATOMIC) >= 3; } static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) @@ -604,11 +1024,21 @@ static inline bool isar_feature_aa64_rpres(const ARMISARegisters *id) return FIELD_EX64_IDREG(id, ID_AA64ISAR2, RPRES); } +static inline bool isar_feature_aa64_cssc(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64ISAR2, CSSC) != 0; +} + static inline bool isar_feature_aa64_lut(const ARMISARegisters *id) { return FIELD_EX64_IDREG(id, ID_AA64ISAR2, LUT); } +static inline bool isar_feature_aa64_ats1a(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64ISAR2, ATS1A); +} + static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically. */ @@ -661,6 +1091,11 @@ static inline bool isar_feature_aa64_rme(const ARMISARegisters *id) return FIELD_EX64_IDREG(id, ID_AA64PFR0, RME) != 0; } +static inline bool isar_feature_aa64_rme_gpc2(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64PFR0, RME) >= 2; +} + static inline bool isar_feature_aa64_dit(const ARMISARegisters *id) { return FIELD_EX64_IDREG(id, ID_AA64PFR0, DIT) != 0; @@ -714,6 +1149,11 @@ static inline bool isar_feature_aa64_nmi(const ARMISARegisters *id) return FIELD_EX64_IDREG(id, ID_AA64PFR1, NMI) != 0; } +static inline bool isar_feature_aa64_gcs(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64PFR1, GCS) != 0; +} + static inline bool isar_feature_aa64_tgran4_lpa2(const ARMISARegisters *id) { return FIELD_SEX64_IDREG(id, ID_AA64MMFR0, TGRAN4) >= 1; @@ -904,6 +1344,31 @@ static inline bool isar_feature_aa64_nv2(const ARMISARegisters *id) return FIELD_EX64_IDREG(id, ID_AA64MMFR2, NV) >= 2; } +static inline bool isar_feature_aa64_tcr2(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64MMFR3, TCRX) != 0; +} + +static inline bool isar_feature_aa64_sctlr2(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64MMFR3, SCTLRX) != 0; +} + +static inline bool isar_feature_aa64_s1pie(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64MMFR3, S1PIE) != 0; +} + +static inline bool isar_feature_aa64_s2pie(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64MMFR3, S2PIE) != 0; +} + +static inline bool isar_feature_aa64_mec(const ARMISARegisters *id) +{ + return FIELD_EX64_IDREG(id, ID_AA64MMFR3, MEC) != 0; +} + static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id) { return FIELD_EX64_IDREG(id, ID_AA64DFR0, PMUVER) >= 4 && diff --git a/target/arm/cpu-irq.c b/target/arm/cpu-irq.c new file mode 100644 index 0000000000000..fe514cc93af88 --- /dev/null +++ b/target/arm/cpu-irq.c @@ -0,0 +1,381 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* + * QEMU ARM CPU - interrupt_request handling + * + * Copyright (c) 2003-2025 QEMU contributors + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "accel/tcg/cpu-ops.h" +#include "internals.h" + +#ifdef CONFIG_TCG +static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, + unsigned int target_el, + unsigned int cur_el, bool secure, + uint64_t hcr_el2) +{ + CPUARMState *env = cpu_env(cs); + bool pstate_unmasked; + bool unmasked = false; + bool allIntMask = false; + + /* + * Don't take exceptions if they target a lower EL. + * This check should catch any exceptions that would not be taken + * but left pending. + */ + if (cur_el > target_el) { + return false; + } + + if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && + env->cp15.sctlr_el[target_el] & SCTLR_NMI && cur_el == target_el) { + allIntMask = env->pstate & PSTATE_ALLINT || + ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) && + (env->pstate & PSTATE_SP)); + } + + switch (excp_idx) { + case EXCP_NMI: + pstate_unmasked = !allIntMask; + break; + + case EXCP_VINMI: + if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { + /* VINMIs are only taken when hypervized. */ + return false; + } + return !allIntMask; + case EXCP_VFNMI: + if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { + /* VFNMIs are only taken when hypervized. */ + return false; + } + return !allIntMask; + case EXCP_FIQ: + pstate_unmasked = (!(env->daif & PSTATE_F)) && (!allIntMask); + break; + + case EXCP_IRQ: + pstate_unmasked = (!(env->daif & PSTATE_I)) && (!allIntMask); + break; + + case EXCP_VFIQ: + if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { + /* VFIQs are only taken when hypervized. */ + return false; + } + return !(env->daif & PSTATE_F) && (!allIntMask); + case EXCP_VIRQ: + if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { + /* VIRQs are only taken when hypervized. */ + return false; + } + return !(env->daif & PSTATE_I) && (!allIntMask); + case EXCP_VSERR: + if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) { + /* VIRQs are only taken when hypervized. */ + return false; + } + return !(env->daif & PSTATE_A); + default: + g_assert_not_reached(); + } + + /* + * Use the target EL, current execution state and SCR/HCR settings to + * determine whether the corresponding CPSR bit is used to mask the + * interrupt. + */ + if ((target_el > cur_el) && (target_el != 1)) { + /* Exceptions targeting a higher EL may not be maskable */ + if (arm_feature(env, ARM_FEATURE_AARCH64)) { + switch (target_el) { + case 2: + /* + * According to ARM DDI 0487H.a, an interrupt can be masked + * when HCR_E2H and HCR_TGE are both set regardless of the + * current Security state. Note that we need to revisit this + * part again once we need to support NMI. + */ + if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + unmasked = true; + } + break; + case 3: + /* Interrupt cannot be masked when the target EL is 3 */ + unmasked = true; + break; + default: + g_assert_not_reached(); + } + } else { + /* + * The old 32-bit-only environment has a more complicated + * masking setup. HCR and SCR bits not only affect interrupt + * routing but also change the behaviour of masking. + */ + bool hcr, scr; + + switch (excp_idx) { + case EXCP_FIQ: + /* + * If FIQs are routed to EL3 or EL2 then there are cases where + * we override the CPSR.F in determining if the exception is + * masked or not. If neither of these are set then we fall back + * to the CPSR.F setting otherwise we further assess the state + * below. + */ + hcr = hcr_el2 & HCR_FMO; + scr = (env->cp15.scr_el3 & SCR_FIQ); + + /* + * When EL3 is 32-bit, the SCR.FW bit controls whether the + * CPSR.F bit masks FIQ interrupts when taken in non-secure + * state. If SCR.FW is set then FIQs can be masked by CPSR.F + * when non-secure but only when FIQs are only routed to EL3. + */ + scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr); + break; + case EXCP_IRQ: + /* + * When EL3 execution state is 32-bit, if HCR.IMO is set then + * we may override the CPSR.I masking when in non-secure state. + * The SCR.IRQ setting has already been taken into consideration + * when setting the target EL, so it does not have a further + * affect here. + */ + hcr = hcr_el2 & HCR_IMO; + scr = false; + break; + default: + g_assert_not_reached(); + } + + if ((scr || hcr) && !secure) { + unmasked = true; + } + } + } + + /* + * The PSTATE bits only mask the interrupt if we have not overridden the + * ability above. + */ + return unmasked || pstate_unmasked; +} + +bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + CPUARMState *env = cpu_env(cs); + uint32_t cur_el = arm_current_el(env); + bool secure = arm_is_secure(env); + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + uint32_t target_el; + uint32_t excp_idx; + + /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */ + + if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && + (arm_sctlr(env, cur_el) & SCTLR_NMI)) { + if (interrupt_request & CPU_INTERRUPT_NMI) { + excp_idx = EXCP_NMI; + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VINMI) { + excp_idx = EXCP_VINMI; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VFNMI) { + excp_idx = EXCP_VFNMI; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + } else { + /* + * NMI disabled: interrupts with superpriority are handled + * as if they didn't have it + */ + if (interrupt_request & CPU_INTERRUPT_NMI) { + interrupt_request |= CPU_INTERRUPT_HARD; + } + if (interrupt_request & CPU_INTERRUPT_VINMI) { + interrupt_request |= CPU_INTERRUPT_VIRQ; + } + if (interrupt_request & CPU_INTERRUPT_VFNMI) { + interrupt_request |= CPU_INTERRUPT_VFIQ; + } + } + + if (interrupt_request & CPU_INTERRUPT_FIQ) { + excp_idx = EXCP_FIQ; + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_HARD) { + excp_idx = EXCP_IRQ; + target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VIRQ) { + excp_idx = EXCP_VIRQ; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VFIQ) { + excp_idx = EXCP_VFIQ; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + goto found; + } + } + if (interrupt_request & CPU_INTERRUPT_VSERR) { + excp_idx = EXCP_VSERR; + target_el = 1; + if (arm_excp_unmasked(cs, excp_idx, target_el, + cur_el, secure, hcr_el2)) { + /* Taking a virtual abort clears HCR_EL2.VSE */ + env->cp15.hcr_el2 &= ~HCR_VSE; + cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); + goto found; + } + } + return false; + + found: + cs->exception_index = excp_idx; + env->exception.target_el = target_el; + cs->cc->tcg_ops->do_interrupt(cs); + return true; +} +#endif /* CONFIG_TCG */ + +void arm_cpu_update_virq(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VIRQ, which is the logical OR of + * the HCR_EL2.VI bit and the input line level from the GIC. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && + !(arm_hcrx_el2_eff(env) & HCRX_VINMI)) || + (env->irq_line_state & CPU_INTERRUPT_VIRQ); + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VIRQ)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VIRQ); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); + } + } +} + +void arm_cpu_update_vfiq(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VFIQ, which is the logical OR of + * the HCR_EL2.VF bit and the input line level from the GIC. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = ((arm_hcr_el2_eff(env) & HCR_VF) && + !(arm_hcrx_el2_eff(env) & HCRX_VFNMI)) || + (env->irq_line_state & CPU_INTERRUPT_VFIQ); + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VFIQ)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VFIQ); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ); + } + } +} + +void arm_cpu_update_vinmi(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VINMI, which is the logical OR of + * the HCRX_EL2.VINMI bit and the input line level from the GIC. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && + (arm_hcrx_el2_eff(env) & HCRX_VINMI)) || + (env->irq_line_state & CPU_INTERRUPT_VINMI); + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VINMI)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VINMI); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VINMI); + } + } +} + +void arm_cpu_update_vfnmi(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VFNMI, which is the HCRX_EL2.VFNMI bit. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = (arm_hcr_el2_eff(env) & HCR_VF) && + (arm_hcrx_el2_eff(env) & HCRX_VFNMI); + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VFNMI)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VFNMI); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VFNMI); + } + } +} + +void arm_cpu_update_vserr(ARMCPU *cpu) +{ + /* + * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit. + */ + CPUARMState *env = &cpu->env; + CPUState *cs = CPU(cpu); + + bool new_state = env->cp15.hcr_el2 & HCR_VSE; + + if (new_state != cpu_test_interrupt(cs, CPU_INTERRUPT_VSERR)) { + if (new_state) { + cpu_interrupt(cs, CPU_INTERRUPT_VSERR); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); + } + } +} + diff --git a/target/arm/cpu-sysregs.h.inc b/target/arm/cpu-sysregs.h.inc index f48a9daa7c14c..2bb2861c62344 100644 --- a/target/arm/cpu-sysregs.h.inc +++ b/target/arm/cpu-sysregs.h.inc @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-or-later */ DEF(ID_AA64PFR0_EL1, 3, 0, 0, 4, 0) DEF(ID_AA64PFR1_EL1, 3, 0, 0, 4, 1) +DEF(ID_AA64PFR2_EL1, 3, 0, 0, 4, 2) DEF(ID_AA64SMFR0_EL1, 3, 0, 0, 4, 5) DEF(ID_AA64DFR0_EL1, 3, 0, 0, 5, 0) DEF(ID_AA64DFR1_EL1, 3, 0, 0, 5, 1) diff --git a/target/arm/cpu.c b/target/arm/cpu.c index e2b2337399cf5..3b556f1404ed1 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -52,6 +52,8 @@ #include "target/arm/cpu-qom.h" #include "target/arm/gtimer.h" +#include "trace.h" + static void arm_cpu_set_pc(CPUState *cs, vaddr value) { ARMCPU *cpu = ARM_CPU(cs); @@ -142,11 +144,11 @@ static bool arm_cpu_has_work(CPUState *cs) ARMCPU *cpu = ARM_CPU(cs); return (cpu->power_state != PSCI_OFF) - && cs->interrupt_request & - (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD - | CPU_INTERRUPT_NMI | CPU_INTERRUPT_VINMI | CPU_INTERRUPT_VFNMI - | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR - | CPU_INTERRUPT_EXITTB); + && cpu_test_interrupt(cs, + CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD + | CPU_INTERRUPT_NMI | CPU_INTERRUPT_VINMI | CPU_INTERRUPT_VFNMI + | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ | CPU_INTERRUPT_VSERR + | CPU_INTERRUPT_EXITTB); } #endif /* !CONFIG_USER_ONLY */ @@ -192,14 +194,8 @@ static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) * This is basically only used for fields in non-core coprocessors * (like the pxa2xx ones). */ - if (!ri->fieldoffset) { - return; - } - - if (cpreg_field_is_64bit(ri)) { - CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue; - } else { - CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue; + if (ri->fieldoffset) { + raw_write(&cpu->env, ri, ri->resetvalue); } } @@ -231,6 +227,8 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj); CPUARMState *env = &cpu->env; + trace_arm_cpu_reset(arm_cpu_mp_affinity(cpu)); + if (acc->parent_phases.hold) { acc->parent_phases.hold(obj, type); } @@ -247,10 +245,6 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) cpu->power_state = cs->start_powered_off ? PSCI_OFF : PSCI_ON; - if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q'; - } - if (arm_feature(env, ARM_FEATURE_AARCH64)) { /* 64 bit CPUs always start in 64 bit mode */ env->aarch64 = true; @@ -317,6 +311,10 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) env->cp15.mdscr_el1 |= 1 << 12; /* Enable FEAT_MOPS */ env->cp15.sctlr_el[1] |= SCTLR_MSCEN; + /* For Linux, GCSPR_EL0 is always readable. */ + if (cpu_isar_feature(aa64_gcs, cpu)) { + env->cp15.gcscr_el[0] = GCSCRE0_NTR; + } #else /* Reset into the highest available EL */ if (arm_feature(env, ARM_FEATURE_EL3)) { @@ -349,11 +347,6 @@ static void arm_cpu_reset_hold(Object *obj, ResetType type) env->uncached_cpsr = ARM_CPU_MODE_USR; /* For user mode we must enable access to coprocessors */ env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30; - if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - env->cp15.c15_cpar = 3; - } else if (arm_feature(env, ARM_FEATURE_XSCALE)) { - env->cp15.c15_cpar = 1; - } #else /* @@ -589,6 +582,8 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el) bool have_el3 = arm_feature(env, ARM_FEATURE_EL3); bool have_el2 = arm_feature(env, ARM_FEATURE_EL2); + trace_arm_emulate_firmware_reset(arm_cpu_mp_affinity(cpu), target_el); + /* * Check we have the EL we're aiming for. If that is the * highest implemented EL, then cpu_reset has already done @@ -644,6 +639,22 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el) if (cpu_isar_feature(aa64_fgt, cpu)) { env->cp15.scr_el3 |= SCR_FGTEN; } + if (cpu_isar_feature(aa64_gcs, cpu)) { + env->cp15.scr_el3 |= SCR_GCSEN; + } + if (cpu_isar_feature(aa64_tcr2, cpu)) { + env->cp15.scr_el3 |= SCR_TCR2EN; + } + if (cpu_isar_feature(aa64_sctlr2, cpu)) { + env->cp15.scr_el3 |= SCR_SCTLR2EN; + } + if (cpu_isar_feature(aa64_s1pie, cpu) || + cpu_isar_feature(aa64_s2pie, cpu)) { + env->cp15.scr_el3 |= SCR_PIEN; + } + if (cpu_isar_feature(aa64_mec, cpu)) { + env->cp15.scr_el3 |= SCR_MECEN; + } } if (target_el == 2) { @@ -680,376 +691,6 @@ void arm_emulate_firmware_reset(CPUState *cpustate, int target_el) } -#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) - -static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, - unsigned int target_el, - unsigned int cur_el, bool secure, - uint64_t hcr_el2) -{ - CPUARMState *env = cpu_env(cs); - bool pstate_unmasked; - bool unmasked = false; - bool allIntMask = false; - - /* - * Don't take exceptions if they target a lower EL. - * This check should catch any exceptions that would not be taken - * but left pending. - */ - if (cur_el > target_el) { - return false; - } - - if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && - env->cp15.sctlr_el[target_el] & SCTLR_NMI && cur_el == target_el) { - allIntMask = env->pstate & PSTATE_ALLINT || - ((env->cp15.sctlr_el[target_el] & SCTLR_SPINTMASK) && - (env->pstate & PSTATE_SP)); - } - - switch (excp_idx) { - case EXCP_NMI: - pstate_unmasked = !allIntMask; - break; - - case EXCP_VINMI: - if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { - /* VINMIs are only taken when hypervized. */ - return false; - } - return !allIntMask; - case EXCP_VFNMI: - if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { - /* VFNMIs are only taken when hypervized. */ - return false; - } - return !allIntMask; - case EXCP_FIQ: - pstate_unmasked = (!(env->daif & PSTATE_F)) && (!allIntMask); - break; - - case EXCP_IRQ: - pstate_unmasked = (!(env->daif & PSTATE_I)) && (!allIntMask); - break; - - case EXCP_VFIQ: - if (!(hcr_el2 & HCR_FMO) || (hcr_el2 & HCR_TGE)) { - /* VFIQs are only taken when hypervized. */ - return false; - } - return !(env->daif & PSTATE_F) && (!allIntMask); - case EXCP_VIRQ: - if (!(hcr_el2 & HCR_IMO) || (hcr_el2 & HCR_TGE)) { - /* VIRQs are only taken when hypervized. */ - return false; - } - return !(env->daif & PSTATE_I) && (!allIntMask); - case EXCP_VSERR: - if (!(hcr_el2 & HCR_AMO) || (hcr_el2 & HCR_TGE)) { - /* VIRQs are only taken when hypervized. */ - return false; - } - return !(env->daif & PSTATE_A); - default: - g_assert_not_reached(); - } - - /* - * Use the target EL, current execution state and SCR/HCR settings to - * determine whether the corresponding CPSR bit is used to mask the - * interrupt. - */ - if ((target_el > cur_el) && (target_el != 1)) { - /* Exceptions targeting a higher EL may not be maskable */ - if (arm_feature(env, ARM_FEATURE_AARCH64)) { - switch (target_el) { - case 2: - /* - * According to ARM DDI 0487H.a, an interrupt can be masked - * when HCR_E2H and HCR_TGE are both set regardless of the - * current Security state. Note that we need to revisit this - * part again once we need to support NMI. - */ - if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { - unmasked = true; - } - break; - case 3: - /* Interrupt cannot be masked when the target EL is 3 */ - unmasked = true; - break; - default: - g_assert_not_reached(); - } - } else { - /* - * The old 32-bit-only environment has a more complicated - * masking setup. HCR and SCR bits not only affect interrupt - * routing but also change the behaviour of masking. - */ - bool hcr, scr; - - switch (excp_idx) { - case EXCP_FIQ: - /* - * If FIQs are routed to EL3 or EL2 then there are cases where - * we override the CPSR.F in determining if the exception is - * masked or not. If neither of these are set then we fall back - * to the CPSR.F setting otherwise we further assess the state - * below. - */ - hcr = hcr_el2 & HCR_FMO; - scr = (env->cp15.scr_el3 & SCR_FIQ); - - /* - * When EL3 is 32-bit, the SCR.FW bit controls whether the - * CPSR.F bit masks FIQ interrupts when taken in non-secure - * state. If SCR.FW is set then FIQs can be masked by CPSR.F - * when non-secure but only when FIQs are only routed to EL3. - */ - scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr); - break; - case EXCP_IRQ: - /* - * When EL3 execution state is 32-bit, if HCR.IMO is set then - * we may override the CPSR.I masking when in non-secure state. - * The SCR.IRQ setting has already been taken into consideration - * when setting the target EL, so it does not have a further - * affect here. - */ - hcr = hcr_el2 & HCR_IMO; - scr = false; - break; - default: - g_assert_not_reached(); - } - - if ((scr || hcr) && !secure) { - unmasked = true; - } - } - } - - /* - * The PSTATE bits only mask the interrupt if we have not overridden the - * ability above. - */ - return unmasked || pstate_unmasked; -} - -static bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request) -{ - CPUARMState *env = cpu_env(cs); - uint32_t cur_el = arm_current_el(env); - bool secure = arm_is_secure(env); - uint64_t hcr_el2 = arm_hcr_el2_eff(env); - uint32_t target_el; - uint32_t excp_idx; - - /* The prioritization of interrupts is IMPLEMENTATION DEFINED. */ - - if (cpu_isar_feature(aa64_nmi, env_archcpu(env)) && - (arm_sctlr(env, cur_el) & SCTLR_NMI)) { - if (interrupt_request & CPU_INTERRUPT_NMI) { - excp_idx = EXCP_NMI; - target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VINMI) { - excp_idx = EXCP_VINMI; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VFNMI) { - excp_idx = EXCP_VFNMI; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - } else { - /* - * NMI disabled: interrupts with superpriority are handled - * as if they didn't have it - */ - if (interrupt_request & CPU_INTERRUPT_NMI) { - interrupt_request |= CPU_INTERRUPT_HARD; - } - if (interrupt_request & CPU_INTERRUPT_VINMI) { - interrupt_request |= CPU_INTERRUPT_VIRQ; - } - if (interrupt_request & CPU_INTERRUPT_VFNMI) { - interrupt_request |= CPU_INTERRUPT_VFIQ; - } - } - - if (interrupt_request & CPU_INTERRUPT_FIQ) { - excp_idx = EXCP_FIQ; - target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_HARD) { - excp_idx = EXCP_IRQ; - target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure); - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VIRQ) { - excp_idx = EXCP_VIRQ; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VFIQ) { - excp_idx = EXCP_VFIQ; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - goto found; - } - } - if (interrupt_request & CPU_INTERRUPT_VSERR) { - excp_idx = EXCP_VSERR; - target_el = 1; - if (arm_excp_unmasked(cs, excp_idx, target_el, - cur_el, secure, hcr_el2)) { - /* Taking a virtual abort clears HCR_EL2.VSE */ - env->cp15.hcr_el2 &= ~HCR_VSE; - cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); - goto found; - } - } - return false; - - found: - cs->exception_index = excp_idx; - env->exception.target_el = target_el; - cs->cc->tcg_ops->do_interrupt(cs); - return true; -} - -#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */ - -void arm_cpu_update_virq(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VIRQ, which is the logical OR of - * the HCR_EL2.VI bit and the input line level from the GIC. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && - !(arm_hcrx_el2_eff(env) & HCRX_VINMI)) || - (env->irq_line_state & CPU_INTERRUPT_VIRQ); - - if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VIRQ); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); - } - } -} - -void arm_cpu_update_vfiq(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VFIQ, which is the logical OR of - * the HCR_EL2.VF bit and the input line level from the GIC. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = ((arm_hcr_el2_eff(env) & HCR_VF) && - !(arm_hcrx_el2_eff(env) & HCRX_VFNMI)) || - (env->irq_line_state & CPU_INTERRUPT_VFIQ); - - if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VFIQ); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ); - } - } -} - -void arm_cpu_update_vinmi(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VINMI, which is the logical OR of - * the HCRX_EL2.VINMI bit and the input line level from the GIC. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = ((arm_hcr_el2_eff(env) & HCR_VI) && - (arm_hcrx_el2_eff(env) & HCRX_VINMI)) || - (env->irq_line_state & CPU_INTERRUPT_VINMI); - - if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VINMI) != 0)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VINMI); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VINMI); - } - } -} - -void arm_cpu_update_vfnmi(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VFNMI, which is the HCRX_EL2.VFNMI bit. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = (arm_hcr_el2_eff(env) & HCR_VF) && - (arm_hcrx_el2_eff(env) & HCRX_VFNMI); - - if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFNMI) != 0)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VFNMI); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VFNMI); - } - } -} - -void arm_cpu_update_vserr(ARMCPU *cpu) -{ - /* - * Update the interrupt level for VSERR, which is the HCR_EL2.VSE bit. - */ - CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); - - bool new_state = env->cp15.hcr_el2 & HCR_VSE; - - if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VSERR) != 0)) { - if (new_state) { - cpu_interrupt(cs, CPU_INTERRUPT_VSERR); - } else { - cpu_reset_interrupt(cs, CPU_INTERRUPT_VSERR); - } - } -} - #ifndef CONFIG_USER_ONLY static void arm_cpu_set_irq(void *opaque, int irq, int level) { @@ -1192,7 +833,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; - uint32_t psr = pstate_read(env); + uint64_t psr = pstate_read(env); int i, j; int el = arm_current_el(env); uint64_t hcr = arm_hcr_el2_eff(env); @@ -1214,7 +855,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) } else { ns_status = ""; } - qemu_fprintf(f, "PSTATE=%08x %c%c%c%c %sEL%d%c", + qemu_fprintf(f, "PSTATE=%016" PRIx64 " %c%c%c%c %sEL%d%c", psr, psr & PSTATE_N ? 'N' : '-', psr & PSTATE_Z ? 'Z' : '-', @@ -1231,7 +872,7 @@ static void aarch64_cpu_dump_state(CPUState *cs, FILE *f, int flags) (FIELD_EX64(env->svcr, SVCR, SM) ? 'S' : '-')); } if (cpu_isar_feature(aa64_bti, cpu)) { - qemu_fprintf(f, " BTYPE=%d", (psr & PSTATE_BTYPE) >> 10); + qemu_fprintf(f, " BTYPE=%d", (int)(psr & PSTATE_BTYPE) >> 10); } qemu_fprintf(f, "%s%s%s", (hcr & HCR_NV) ? " NV" : "", @@ -2253,14 +1894,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } - /* - * We rely on no XScale CPU having VFP so we can use the same bits in the - * TB flags field for VECSTRIDE and XSCALE_CPAR. - */ - assert(arm_feature(env, ARM_FEATURE_AARCH64) || - !cpu_isar_feature(aa32_vfp_simd, cpu) || - !arm_feature(env, ARM_FEATURE_XSCALE)); - #ifndef CONFIG_USER_ONLY { int pagebits; @@ -2617,14 +2250,10 @@ static const Property arm_cpu_properties[] = { static const gchar *arm_gdb_arch_name(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; if (arm_gdbstub_is_aarch64(cpu)) { return "aarch64"; } - if (arm_feature(env, ARM_FEATURE_IWMMXT)) { - return "iwmmxt"; - } return "arm"; } diff --git a/target/arm/cpu.h b/target/arm/cpu.h index dc9b6dce4c922..bf221e6f97318 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -33,6 +33,7 @@ #include "target/arm/multiprocessing.h" #include "target/arm/gtimer.h" #include "target/arm/cpu-sysregs.h" +#include "target/arm/mmuidx.h" #define EXCP_UDEF 1 /* undefined instruction */ #define EXCP_SWI 2 /* software interrupt */ @@ -267,7 +268,7 @@ typedef struct CPUArchState { uint64_t xregs[32]; uint64_t pc; /* PSTATE isn't an architectural register for ARMv8. However, it is - * convenient for us to assemble the underlying state into a 32 bit format + * convenient for us to assemble the underlying state into a 64 bit format * identical to the architectural format used for the SPSR. (This is also * what the Linux kernel's 'pstate' field in signal handlers and KVM's * 'pstate' register are.) Of the PSTATE bits: @@ -279,7 +280,7 @@ typedef struct CPUArchState { * SM and ZA are kept in env->svcr * all other bits are stored in their correct places in env->pstate */ - uint32_t pstate; + uint64_t pstate; bool aarch64; /* True if CPU is in aarch64 state; inverse of PSTATE.nRW */ bool thumb; /* True if CPU is in thumb mode; cpsr[5] */ @@ -337,10 +338,10 @@ typedef struct CPUArchState { }; uint64_t sctlr_el[4]; }; + uint64_t sctlr2_el[4]; /* Extension to System control register. */ uint64_t vsctlr; /* Virtualization System control register. */ uint64_t cpacr_el1; /* Architectural feature access control register */ uint64_t cptr_el[4]; /* ARMv8 feature trap registers */ - uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */ uint64_t sder; /* Secure debug enable register. */ uint32_t nsacr; /* Non-secure access control register. */ union { /* MMU translation table base 0. */ @@ -365,8 +366,12 @@ typedef struct CPUArchState { uint64_t vsttbr_el2; /* Secure Virtualization Translation Table. */ /* MMU translation table base control. */ uint64_t tcr_el[4]; + uint64_t tcr2_el[3]; uint64_t vtcr_el2; /* Virtualization Translation Control. */ uint64_t vstcr_el2; /* Secure Virtualization Translation Control. */ + uint64_t pir_el[4]; /* PIRE0_EL1, PIR_EL1, PIR_EL2, PIR_EL3 */ + uint64_t pire0_el2; + uint64_t s2pir_el2; uint32_t c2_data; /* MPU data cacheable bits. */ uint32_t c2_insn; /* MPU instruction cacheable bits. */ union { /* MMU domain access control register @@ -511,7 +516,6 @@ typedef struct CPUArchState { uint64_t cntvoff_el2; /* Counter Virtual Offset register */ uint64_t cntpoff_el2; /* Counter Physical Offset register */ ARMGenericTimer c14_timer[NUM_GTIMERS]; - uint32_t c15_cpar; /* XScale Coprocessor Access Register */ uint32_t c15_ticonfig; /* TI925T configuration byte. */ uint32_t c15_i_max; /* Maximum D-cache dirty line index. */ uint32_t c15_i_min; /* Minimum D-cache dirty line index. */ @@ -576,6 +580,18 @@ typedef struct CPUArchState { /* NV2 register */ uint64_t vncr_el2; + + uint64_t gcscr_el[4]; /* GCSCRE0_EL1, GCSCR_EL[123] */ + uint64_t gcspr_el[4]; /* GCSPR_EL[0123] */ + + /* MEC registers */ + uint64_t mecid_p0_el2; + uint64_t mecid_a0_el2; + uint64_t mecid_p1_el2; + uint64_t mecid_a1_el2; + uint64_t mecid_rl_a_el3; + uint64_t vmecid_p_el2; + uint64_t vmecid_a_el2; } cp15; struct { @@ -630,13 +646,10 @@ typedef struct CPUArchState { * entry process. */ struct { - uint32_t syndrome; /* AArch64 format syndrome register */ - uint32_t fsr; /* AArch32 format fault status register info */ + uint64_t syndrome; /* AArch64 format syndrome register */ uint64_t vaddress; /* virtual addr associated with exception, if any */ + uint32_t fsr; /* AArch32 format fault status register info */ uint32_t target_el; /* EL the exception should be targeted for */ - /* If we implement EL2 we will also need to store information - * about the intermediate physical address for stage 2 faults. - */ } exception; /* Information associated with an SError */ @@ -697,14 +710,6 @@ typedef struct CPUArchState { */ uint64_t exclusive_high; - /* iwMMXt coprocessor state. */ - struct { - uint64_t regs[16]; - uint64_t val; - - uint32_t cregs[16]; - } iwmmxt; - struct { ARMPACKey apia; ARMPACKey apib; @@ -933,6 +938,7 @@ struct ArchCPU { DynamicGDBFeatureInfo dyn_sysreg_feature; DynamicGDBFeatureInfo dyn_svereg_feature; + DynamicGDBFeatureInfo dyn_smereg_feature; DynamicGDBFeatureInfo dyn_m_systemreg_feature; DynamicGDBFeatureInfo dyn_m_secextreg_feature; @@ -1420,6 +1426,19 @@ void pmu_init(ARMCPU *cpu); #define SCTLR_SPINTMASK (1ULL << 62) /* FEAT_NMI */ #define SCTLR_TIDCP (1ULL << 63) /* FEAT_TIDCP1 */ +#define SCTLR2_EMEC (1ULL << 1) /* FEAT_MEC */ +#define SCTLR2_NMEA (1ULL << 2) /* FEAT_DoubleFault2 */ +#define SCTLR2_ENADERR (1ULL << 3) /* FEAT_ADERR */ +#define SCTLR2_ENANERR (1ULL << 4) /* FEAT_ANERR */ +#define SCTLR2_EASE (1ULL << 5) /* FEAT_DoubleFault2 */ +#define SCTLR2_ENIDCP128 (1ULL << 6) /* FEAT_SYSREG128 */ +#define SCTLR2_ENPACM (1ULL << 7) /* FEAT_PAuth_LR */ +#define SCTLR2_ENPACM0 (1ULL << 8) /* FEAT_PAuth_LR */ +#define SCTLR2_CPTA (1ULL << 9) /* FEAT_CPA2 */ +#define SCTLR2_CPTA0 (1ULL << 10) /* FEAT_CPA2 */ +#define SCTLR2_CPTM (1ULL << 11) /* FEAT_CPA2 */ +#define SCTLR2_CPTM0 (1ULL << 12) /* FEAT_CAP2 */ + #define CPSR_M (0x1fU) #define CPSR_T (1U << 5) #define CPSR_F (1U << 6) @@ -1492,6 +1511,7 @@ void pmu_init(ARMCPU *cpu); #define PSTATE_C (1U << 29) #define PSTATE_Z (1U << 30) #define PSTATE_N (1U << 31) +#define PSTATE_EXLOCK (1ULL << 34) #define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V) #define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F) #define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF | PSTATE_BTYPE) @@ -1528,7 +1548,7 @@ static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler) * interprocessing, so we don't attempt to sync with the cpsr state used by * the 32 bit decoder. */ -static inline uint32_t pstate_read(CPUARMState *env) +static inline uint64_t pstate_read(CPUARMState *env) { int ZF; @@ -1538,7 +1558,7 @@ static inline uint32_t pstate_read(CPUARMState *env) | env->pstate | env->daif | (env->btype << 10); } -static inline void pstate_write(CPUARMState *env, uint32_t val) +static inline void pstate_write(CPUARMState *env, uint64_t val) { env->ZF = (~val) & PSTATE_Z; env->NF = val; @@ -1710,11 +1730,24 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask) #define SCR_ENAS0 (1ULL << 36) #define SCR_ADEN (1ULL << 37) #define SCR_HXEN (1ULL << 38) +#define SCR_GCSEN (1ULL << 39) #define SCR_TRNDR (1ULL << 40) #define SCR_ENTP2 (1ULL << 41) +#define SCR_TCR2EN (1ULL << 43) +#define SCR_SCTLR2EN (1ULL << 44) +#define SCR_PIEN (1ULL << 45) #define SCR_GPF (1ULL << 48) +#define SCR_MECEN (1ULL << 49) #define SCR_NSE (1ULL << 62) +/* GCSCR_ELx fields */ +#define GCSCR_PCRSEL (1ULL << 0) +#define GCSCR_RVCHKEN (1ULL << 5) +#define GCSCR_EXLOCKEN (1ULL << 6) +#define GCSCR_PUSHMEN (1ULL << 8) +#define GCSCR_STREN (1ULL << 9) +#define GCSCRE0_NTR (1ULL << 10) + /* Return the current FPSCR value. */ uint32_t vfp_get_fpscr(CPUARMState *env); void vfp_set_fpscr(CPUARMState *env, uint32_t val); @@ -1848,16 +1881,6 @@ enum arm_cpu_mode { /* QEMU-internal value meaning "FPSCR, but we care only about NZCV" */ #define QEMU_VFP_FPSCR_NZCV 0xffff -/* iwMMXt coprocessor control registers. */ -#define ARM_IWMMXT_wCID 0 -#define ARM_IWMMXT_wCon 1 -#define ARM_IWMMXT_wCSSF 2 -#define ARM_IWMMXT_wCASF 3 -#define ARM_IWMMXT_wCGR0 8 -#define ARM_IWMMXT_wCGR1 9 -#define ARM_IWMMXT_wCGR2 10 -#define ARM_IWMMXT_wCGR3 11 - /* V7M CCR bits */ FIELD(V7M_CCR, NONBASETHRDENA, 0, 1) FIELD(V7M_CCR, USERSETMPEND, 1, 1) @@ -1996,424 +2019,20 @@ FIELD(V7M_VPR, P0, 0, 16) FIELD(V7M_VPR, MASK01, 16, 4) FIELD(V7M_VPR, MASK23, 20, 4) -/* - * System register ID fields. - */ -FIELD(CLIDR_EL1, CTYPE1, 0, 3) -FIELD(CLIDR_EL1, CTYPE2, 3, 3) -FIELD(CLIDR_EL1, CTYPE3, 6, 3) -FIELD(CLIDR_EL1, CTYPE4, 9, 3) -FIELD(CLIDR_EL1, CTYPE5, 12, 3) -FIELD(CLIDR_EL1, CTYPE6, 15, 3) -FIELD(CLIDR_EL1, CTYPE7, 18, 3) -FIELD(CLIDR_EL1, LOUIS, 21, 3) -FIELD(CLIDR_EL1, LOC, 24, 3) -FIELD(CLIDR_EL1, LOUU, 27, 3) -FIELD(CLIDR_EL1, ICB, 30, 3) - -/* When FEAT_CCIDX is implemented */ -FIELD(CCSIDR_EL1, CCIDX_LINESIZE, 0, 3) -FIELD(CCSIDR_EL1, CCIDX_ASSOCIATIVITY, 3, 21) -FIELD(CCSIDR_EL1, CCIDX_NUMSETS, 32, 24) - -/* When FEAT_CCIDX is not implemented */ -FIELD(CCSIDR_EL1, LINESIZE, 0, 3) -FIELD(CCSIDR_EL1, ASSOCIATIVITY, 3, 10) -FIELD(CCSIDR_EL1, NUMSETS, 13, 15) - -FIELD(CTR_EL0, IMINLINE, 0, 4) -FIELD(CTR_EL0, L1IP, 14, 2) -FIELD(CTR_EL0, DMINLINE, 16, 4) -FIELD(CTR_EL0, ERG, 20, 4) -FIELD(CTR_EL0, CWG, 24, 4) -FIELD(CTR_EL0, IDC, 28, 1) -FIELD(CTR_EL0, DIC, 29, 1) -FIELD(CTR_EL0, TMINLINE, 32, 6) - -FIELD(MIDR_EL1, REVISION, 0, 4) -FIELD(MIDR_EL1, PARTNUM, 4, 12) -FIELD(MIDR_EL1, ARCHITECTURE, 16, 4) -FIELD(MIDR_EL1, VARIANT, 20, 4) -FIELD(MIDR_EL1, IMPLEMENTER, 24, 8) - -FIELD(ID_ISAR0, SWAP, 0, 4) -FIELD(ID_ISAR0, BITCOUNT, 4, 4) -FIELD(ID_ISAR0, BITFIELD, 8, 4) -FIELD(ID_ISAR0, CMPBRANCH, 12, 4) -FIELD(ID_ISAR0, COPROC, 16, 4) -FIELD(ID_ISAR0, DEBUG, 20, 4) -FIELD(ID_ISAR0, DIVIDE, 24, 4) - -FIELD(ID_ISAR1, ENDIAN, 0, 4) -FIELD(ID_ISAR1, EXCEPT, 4, 4) -FIELD(ID_ISAR1, EXCEPT_AR, 8, 4) -FIELD(ID_ISAR1, EXTEND, 12, 4) -FIELD(ID_ISAR1, IFTHEN, 16, 4) -FIELD(ID_ISAR1, IMMEDIATE, 20, 4) -FIELD(ID_ISAR1, INTERWORK, 24, 4) -FIELD(ID_ISAR1, JAZELLE, 28, 4) - -FIELD(ID_ISAR2, LOADSTORE, 0, 4) -FIELD(ID_ISAR2, MEMHINT, 4, 4) -FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4) -FIELD(ID_ISAR2, MULT, 12, 4) -FIELD(ID_ISAR2, MULTS, 16, 4) -FIELD(ID_ISAR2, MULTU, 20, 4) -FIELD(ID_ISAR2, PSR_AR, 24, 4) -FIELD(ID_ISAR2, REVERSAL, 28, 4) - -FIELD(ID_ISAR3, SATURATE, 0, 4) -FIELD(ID_ISAR3, SIMD, 4, 4) -FIELD(ID_ISAR3, SVC, 8, 4) -FIELD(ID_ISAR3, SYNCHPRIM, 12, 4) -FIELD(ID_ISAR3, TABBRANCH, 16, 4) -FIELD(ID_ISAR3, T32COPY, 20, 4) -FIELD(ID_ISAR3, TRUENOP, 24, 4) -FIELD(ID_ISAR3, T32EE, 28, 4) - -FIELD(ID_ISAR4, UNPRIV, 0, 4) -FIELD(ID_ISAR4, WITHSHIFTS, 4, 4) -FIELD(ID_ISAR4, WRITEBACK, 8, 4) -FIELD(ID_ISAR4, SMC, 12, 4) -FIELD(ID_ISAR4, BARRIER, 16, 4) -FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4) -FIELD(ID_ISAR4, PSR_M, 24, 4) -FIELD(ID_ISAR4, SWP_FRAC, 28, 4) - -FIELD(ID_ISAR5, SEVL, 0, 4) -FIELD(ID_ISAR5, AES, 4, 4) -FIELD(ID_ISAR5, SHA1, 8, 4) -FIELD(ID_ISAR5, SHA2, 12, 4) -FIELD(ID_ISAR5, CRC32, 16, 4) -FIELD(ID_ISAR5, RDM, 24, 4) -FIELD(ID_ISAR5, VCMA, 28, 4) - -FIELD(ID_ISAR6, JSCVT, 0, 4) -FIELD(ID_ISAR6, DP, 4, 4) -FIELD(ID_ISAR6, FHM, 8, 4) -FIELD(ID_ISAR6, SB, 12, 4) -FIELD(ID_ISAR6, SPECRES, 16, 4) -FIELD(ID_ISAR6, BF16, 20, 4) -FIELD(ID_ISAR6, I8MM, 24, 4) - -FIELD(ID_MMFR0, VMSA, 0, 4) -FIELD(ID_MMFR0, PMSA, 4, 4) -FIELD(ID_MMFR0, OUTERSHR, 8, 4) -FIELD(ID_MMFR0, SHARELVL, 12, 4) -FIELD(ID_MMFR0, TCM, 16, 4) -FIELD(ID_MMFR0, AUXREG, 20, 4) -FIELD(ID_MMFR0, FCSE, 24, 4) -FIELD(ID_MMFR0, INNERSHR, 28, 4) - -FIELD(ID_MMFR1, L1HVDVA, 0, 4) -FIELD(ID_MMFR1, L1UNIVA, 4, 4) -FIELD(ID_MMFR1, L1HVDSW, 8, 4) -FIELD(ID_MMFR1, L1UNISW, 12, 4) -FIELD(ID_MMFR1, L1HVD, 16, 4) -FIELD(ID_MMFR1, L1UNI, 20, 4) -FIELD(ID_MMFR1, L1TSTCLN, 24, 4) -FIELD(ID_MMFR1, BPRED, 28, 4) - -FIELD(ID_MMFR2, L1HVDFG, 0, 4) -FIELD(ID_MMFR2, L1HVDBG, 4, 4) -FIELD(ID_MMFR2, L1HVDRNG, 8, 4) -FIELD(ID_MMFR2, HVDTLB, 12, 4) -FIELD(ID_MMFR2, UNITLB, 16, 4) -FIELD(ID_MMFR2, MEMBARR, 20, 4) -FIELD(ID_MMFR2, WFISTALL, 24, 4) -FIELD(ID_MMFR2, HWACCFLG, 28, 4) - -FIELD(ID_MMFR3, CMAINTVA, 0, 4) -FIELD(ID_MMFR3, CMAINTSW, 4, 4) -FIELD(ID_MMFR3, BPMAINT, 8, 4) -FIELD(ID_MMFR3, MAINTBCST, 12, 4) -FIELD(ID_MMFR3, PAN, 16, 4) -FIELD(ID_MMFR3, COHWALK, 20, 4) -FIELD(ID_MMFR3, CMEMSZ, 24, 4) -FIELD(ID_MMFR3, SUPERSEC, 28, 4) - -FIELD(ID_MMFR4, SPECSEI, 0, 4) -FIELD(ID_MMFR4, AC2, 4, 4) -FIELD(ID_MMFR4, XNX, 8, 4) -FIELD(ID_MMFR4, CNP, 12, 4) -FIELD(ID_MMFR4, HPDS, 16, 4) -FIELD(ID_MMFR4, LSM, 20, 4) -FIELD(ID_MMFR4, CCIDX, 24, 4) -FIELD(ID_MMFR4, EVT, 28, 4) - -FIELD(ID_MMFR5, ETS, 0, 4) -FIELD(ID_MMFR5, NTLBPA, 4, 4) - -FIELD(ID_PFR0, STATE0, 0, 4) -FIELD(ID_PFR0, STATE1, 4, 4) -FIELD(ID_PFR0, STATE2, 8, 4) -FIELD(ID_PFR0, STATE3, 12, 4) -FIELD(ID_PFR0, CSV2, 16, 4) -FIELD(ID_PFR0, AMU, 20, 4) -FIELD(ID_PFR0, DIT, 24, 4) -FIELD(ID_PFR0, RAS, 28, 4) - -FIELD(ID_PFR1, PROGMOD, 0, 4) -FIELD(ID_PFR1, SECURITY, 4, 4) -FIELD(ID_PFR1, MPROGMOD, 8, 4) -FIELD(ID_PFR1, VIRTUALIZATION, 12, 4) -FIELD(ID_PFR1, GENTIMER, 16, 4) -FIELD(ID_PFR1, SEC_FRAC, 20, 4) -FIELD(ID_PFR1, VIRT_FRAC, 24, 4) -FIELD(ID_PFR1, GIC, 28, 4) - -FIELD(ID_PFR2, CSV3, 0, 4) -FIELD(ID_PFR2, SSBS, 4, 4) -FIELD(ID_PFR2, RAS_FRAC, 8, 4) - -FIELD(ID_AA64ISAR0, AES, 4, 4) -FIELD(ID_AA64ISAR0, SHA1, 8, 4) -FIELD(ID_AA64ISAR0, SHA2, 12, 4) -FIELD(ID_AA64ISAR0, CRC32, 16, 4) -FIELD(ID_AA64ISAR0, ATOMIC, 20, 4) -FIELD(ID_AA64ISAR0, TME, 24, 4) -FIELD(ID_AA64ISAR0, RDM, 28, 4) -FIELD(ID_AA64ISAR0, SHA3, 32, 4) -FIELD(ID_AA64ISAR0, SM3, 36, 4) -FIELD(ID_AA64ISAR0, SM4, 40, 4) -FIELD(ID_AA64ISAR0, DP, 44, 4) -FIELD(ID_AA64ISAR0, FHM, 48, 4) -FIELD(ID_AA64ISAR0, TS, 52, 4) -FIELD(ID_AA64ISAR0, TLB, 56, 4) -FIELD(ID_AA64ISAR0, RNDR, 60, 4) - -FIELD(ID_AA64ISAR1, DPB, 0, 4) -FIELD(ID_AA64ISAR1, APA, 4, 4) -FIELD(ID_AA64ISAR1, API, 8, 4) -FIELD(ID_AA64ISAR1, JSCVT, 12, 4) -FIELD(ID_AA64ISAR1, FCMA, 16, 4) -FIELD(ID_AA64ISAR1, LRCPC, 20, 4) -FIELD(ID_AA64ISAR1, GPA, 24, 4) -FIELD(ID_AA64ISAR1, GPI, 28, 4) -FIELD(ID_AA64ISAR1, FRINTTS, 32, 4) -FIELD(ID_AA64ISAR1, SB, 36, 4) -FIELD(ID_AA64ISAR1, SPECRES, 40, 4) -FIELD(ID_AA64ISAR1, BF16, 44, 4) -FIELD(ID_AA64ISAR1, DGH, 48, 4) -FIELD(ID_AA64ISAR1, I8MM, 52, 4) -FIELD(ID_AA64ISAR1, XS, 56, 4) -FIELD(ID_AA64ISAR1, LS64, 60, 4) - -FIELD(ID_AA64ISAR2, WFXT, 0, 4) -FIELD(ID_AA64ISAR2, RPRES, 4, 4) -FIELD(ID_AA64ISAR2, GPA3, 8, 4) -FIELD(ID_AA64ISAR2, APA3, 12, 4) -FIELD(ID_AA64ISAR2, MOPS, 16, 4) -FIELD(ID_AA64ISAR2, BC, 20, 4) -FIELD(ID_AA64ISAR2, PAC_FRAC, 24, 4) -FIELD(ID_AA64ISAR2, CLRBHB, 28, 4) -FIELD(ID_AA64ISAR2, SYSREG_128, 32, 4) -FIELD(ID_AA64ISAR2, SYSINSTR_128, 36, 4) -FIELD(ID_AA64ISAR2, PRFMSLC, 40, 4) -FIELD(ID_AA64ISAR2, RPRFM, 48, 4) -FIELD(ID_AA64ISAR2, CSSC, 52, 4) -FIELD(ID_AA64ISAR2, LUT, 56, 4) -FIELD(ID_AA64ISAR2, ATS1A, 60, 4) - -FIELD(ID_AA64PFR0, EL0, 0, 4) -FIELD(ID_AA64PFR0, EL1, 4, 4) -FIELD(ID_AA64PFR0, EL2, 8, 4) -FIELD(ID_AA64PFR0, EL3, 12, 4) -FIELD(ID_AA64PFR0, FP, 16, 4) -FIELD(ID_AA64PFR0, ADVSIMD, 20, 4) -FIELD(ID_AA64PFR0, GIC, 24, 4) -FIELD(ID_AA64PFR0, RAS, 28, 4) -FIELD(ID_AA64PFR0, SVE, 32, 4) -FIELD(ID_AA64PFR0, SEL2, 36, 4) -FIELD(ID_AA64PFR0, MPAM, 40, 4) -FIELD(ID_AA64PFR0, AMU, 44, 4) -FIELD(ID_AA64PFR0, DIT, 48, 4) -FIELD(ID_AA64PFR0, RME, 52, 4) -FIELD(ID_AA64PFR0, CSV2, 56, 4) -FIELD(ID_AA64PFR0, CSV3, 60, 4) - -FIELD(ID_AA64PFR1, BT, 0, 4) -FIELD(ID_AA64PFR1, SSBS, 4, 4) -FIELD(ID_AA64PFR1, MTE, 8, 4) -FIELD(ID_AA64PFR1, RAS_FRAC, 12, 4) -FIELD(ID_AA64PFR1, MPAM_FRAC, 16, 4) -FIELD(ID_AA64PFR1, SME, 24, 4) -FIELD(ID_AA64PFR1, RNDR_TRAP, 28, 4) -FIELD(ID_AA64PFR1, CSV2_FRAC, 32, 4) -FIELD(ID_AA64PFR1, NMI, 36, 4) -FIELD(ID_AA64PFR1, MTE_FRAC, 40, 4) -FIELD(ID_AA64PFR1, GCS, 44, 4) -FIELD(ID_AA64PFR1, THE, 48, 4) -FIELD(ID_AA64PFR1, MTEX, 52, 4) -FIELD(ID_AA64PFR1, DF2, 56, 4) -FIELD(ID_AA64PFR1, PFAR, 60, 4) - -FIELD(ID_AA64MMFR0, PARANGE, 0, 4) -FIELD(ID_AA64MMFR0, ASIDBITS, 4, 4) -FIELD(ID_AA64MMFR0, BIGEND, 8, 4) -FIELD(ID_AA64MMFR0, SNSMEM, 12, 4) -FIELD(ID_AA64MMFR0, BIGENDEL0, 16, 4) -FIELD(ID_AA64MMFR0, TGRAN16, 20, 4) -FIELD(ID_AA64MMFR0, TGRAN64, 24, 4) -FIELD(ID_AA64MMFR0, TGRAN4, 28, 4) -FIELD(ID_AA64MMFR0, TGRAN16_2, 32, 4) -FIELD(ID_AA64MMFR0, TGRAN64_2, 36, 4) -FIELD(ID_AA64MMFR0, TGRAN4_2, 40, 4) -FIELD(ID_AA64MMFR0, EXS, 44, 4) -FIELD(ID_AA64MMFR0, FGT, 56, 4) -FIELD(ID_AA64MMFR0, ECV, 60, 4) - -FIELD(ID_AA64MMFR1, HAFDBS, 0, 4) -FIELD(ID_AA64MMFR1, VMIDBITS, 4, 4) -FIELD(ID_AA64MMFR1, VH, 8, 4) -FIELD(ID_AA64MMFR1, HPDS, 12, 4) -FIELD(ID_AA64MMFR1, LO, 16, 4) -FIELD(ID_AA64MMFR1, PAN, 20, 4) -FIELD(ID_AA64MMFR1, SPECSEI, 24, 4) -FIELD(ID_AA64MMFR1, XNX, 28, 4) -FIELD(ID_AA64MMFR1, TWED, 32, 4) -FIELD(ID_AA64MMFR1, ETS, 36, 4) -FIELD(ID_AA64MMFR1, HCX, 40, 4) -FIELD(ID_AA64MMFR1, AFP, 44, 4) -FIELD(ID_AA64MMFR1, NTLBPA, 48, 4) -FIELD(ID_AA64MMFR1, TIDCP1, 52, 4) -FIELD(ID_AA64MMFR1, CMOW, 56, 4) -FIELD(ID_AA64MMFR1, ECBHB, 60, 4) - -FIELD(ID_AA64MMFR2, CNP, 0, 4) -FIELD(ID_AA64MMFR2, UAO, 4, 4) -FIELD(ID_AA64MMFR2, LSM, 8, 4) -FIELD(ID_AA64MMFR2, IESB, 12, 4) -FIELD(ID_AA64MMFR2, VARANGE, 16, 4) -FIELD(ID_AA64MMFR2, CCIDX, 20, 4) -FIELD(ID_AA64MMFR2, NV, 24, 4) -FIELD(ID_AA64MMFR2, ST, 28, 4) -FIELD(ID_AA64MMFR2, AT, 32, 4) -FIELD(ID_AA64MMFR2, IDS, 36, 4) -FIELD(ID_AA64MMFR2, FWB, 40, 4) -FIELD(ID_AA64MMFR2, TTL, 48, 4) -FIELD(ID_AA64MMFR2, BBM, 52, 4) -FIELD(ID_AA64MMFR2, EVT, 56, 4) -FIELD(ID_AA64MMFR2, E0PD, 60, 4) - -FIELD(ID_AA64MMFR3, TCRX, 0, 4) -FIELD(ID_AA64MMFR3, SCTLRX, 4, 4) -FIELD(ID_AA64MMFR3, S1PIE, 8, 4) -FIELD(ID_AA64MMFR3, S2PIE, 12, 4) -FIELD(ID_AA64MMFR3, S1POE, 16, 4) -FIELD(ID_AA64MMFR3, S2POE, 20, 4) -FIELD(ID_AA64MMFR3, AIE, 24, 4) -FIELD(ID_AA64MMFR3, MEC, 28, 4) -FIELD(ID_AA64MMFR3, D128, 32, 4) -FIELD(ID_AA64MMFR3, D128_2, 36, 4) -FIELD(ID_AA64MMFR3, SNERR, 40, 4) -FIELD(ID_AA64MMFR3, ANERR, 44, 4) -FIELD(ID_AA64MMFR3, SDERR, 52, 4) -FIELD(ID_AA64MMFR3, ADERR, 56, 4) -FIELD(ID_AA64MMFR3, SPEC_FPACC, 60, 4) - -FIELD(ID_AA64DFR0, DEBUGVER, 0, 4) -FIELD(ID_AA64DFR0, TRACEVER, 4, 4) -FIELD(ID_AA64DFR0, PMUVER, 8, 4) -FIELD(ID_AA64DFR0, BRPS, 12, 4) -FIELD(ID_AA64DFR0, PMSS, 16, 4) -FIELD(ID_AA64DFR0, WRPS, 20, 4) -FIELD(ID_AA64DFR0, SEBEP, 24, 4) -FIELD(ID_AA64DFR0, CTX_CMPS, 28, 4) -FIELD(ID_AA64DFR0, PMSVER, 32, 4) -FIELD(ID_AA64DFR0, DOUBLELOCK, 36, 4) -FIELD(ID_AA64DFR0, TRACEFILT, 40, 4) -FIELD(ID_AA64DFR0, TRACEBUFFER, 44, 4) -FIELD(ID_AA64DFR0, MTPMU, 48, 4) -FIELD(ID_AA64DFR0, BRBE, 52, 4) -FIELD(ID_AA64DFR0, EXTTRCBUFF, 56, 4) -FIELD(ID_AA64DFR0, HPMN0, 60, 4) - -FIELD(ID_AA64ZFR0, SVEVER, 0, 4) -FIELD(ID_AA64ZFR0, AES, 4, 4) -FIELD(ID_AA64ZFR0, BITPERM, 16, 4) -FIELD(ID_AA64ZFR0, BFLOAT16, 20, 4) -FIELD(ID_AA64ZFR0, B16B16, 24, 4) -FIELD(ID_AA64ZFR0, SHA3, 32, 4) -FIELD(ID_AA64ZFR0, SM4, 40, 4) -FIELD(ID_AA64ZFR0, I8MM, 44, 4) -FIELD(ID_AA64ZFR0, F32MM, 52, 4) -FIELD(ID_AA64ZFR0, F64MM, 56, 4) - -FIELD(ID_AA64SMFR0, F32F32, 32, 1) -FIELD(ID_AA64SMFR0, BI32I32, 33, 1) -FIELD(ID_AA64SMFR0, B16F32, 34, 1) -FIELD(ID_AA64SMFR0, F16F32, 35, 1) -FIELD(ID_AA64SMFR0, I8I32, 36, 4) -FIELD(ID_AA64SMFR0, F16F16, 42, 1) -FIELD(ID_AA64SMFR0, B16B16, 43, 1) -FIELD(ID_AA64SMFR0, I16I32, 44, 4) -FIELD(ID_AA64SMFR0, F64F64, 48, 1) -FIELD(ID_AA64SMFR0, I16I64, 52, 4) -FIELD(ID_AA64SMFR0, SMEVER, 56, 4) -FIELD(ID_AA64SMFR0, FA64, 63, 1) - -FIELD(ID_DFR0, COPDBG, 0, 4) -FIELD(ID_DFR0, COPSDBG, 4, 4) -FIELD(ID_DFR0, MMAPDBG, 8, 4) -FIELD(ID_DFR0, COPTRC, 12, 4) -FIELD(ID_DFR0, MMAPTRC, 16, 4) -FIELD(ID_DFR0, MPROFDBG, 20, 4) -FIELD(ID_DFR0, PERFMON, 24, 4) -FIELD(ID_DFR0, TRACEFILT, 28, 4) - -FIELD(ID_DFR1, MTPMU, 0, 4) -FIELD(ID_DFR1, HPMN0, 4, 4) - -FIELD(DBGDIDR, SE_IMP, 12, 1) -FIELD(DBGDIDR, NSUHD_IMP, 14, 1) -FIELD(DBGDIDR, VERSION, 16, 4) -FIELD(DBGDIDR, CTX_CMPS, 20, 4) -FIELD(DBGDIDR, BRPS, 24, 4) -FIELD(DBGDIDR, WRPS, 28, 4) - -FIELD(DBGDEVID, PCSAMPLE, 0, 4) -FIELD(DBGDEVID, WPADDRMASK, 4, 4) -FIELD(DBGDEVID, BPADDRMASK, 8, 4) -FIELD(DBGDEVID, VECTORCATCH, 12, 4) -FIELD(DBGDEVID, VIRTEXTNS, 16, 4) -FIELD(DBGDEVID, DOUBLELOCK, 20, 4) -FIELD(DBGDEVID, AUXREGS, 24, 4) -FIELD(DBGDEVID, CIDMASK, 28, 4) - -FIELD(DBGDEVID1, PCSROFFSET, 0, 4) - -FIELD(MVFR0, SIMDREG, 0, 4) -FIELD(MVFR0, FPSP, 4, 4) -FIELD(MVFR0, FPDP, 8, 4) -FIELD(MVFR0, FPTRAP, 12, 4) -FIELD(MVFR0, FPDIVIDE, 16, 4) -FIELD(MVFR0, FPSQRT, 20, 4) -FIELD(MVFR0, FPSHVEC, 24, 4) -FIELD(MVFR0, FPROUND, 28, 4) - -FIELD(MVFR1, FPFTZ, 0, 4) -FIELD(MVFR1, FPDNAN, 4, 4) -FIELD(MVFR1, SIMDLS, 8, 4) /* A-profile only */ -FIELD(MVFR1, SIMDINT, 12, 4) /* A-profile only */ -FIELD(MVFR1, SIMDSP, 16, 4) /* A-profile only */ -FIELD(MVFR1, SIMDHP, 20, 4) /* A-profile only */ -FIELD(MVFR1, MVE, 8, 4) /* M-profile only */ -FIELD(MVFR1, FP16, 20, 4) /* M-profile only */ -FIELD(MVFR1, FPHP, 24, 4) -FIELD(MVFR1, SIMDFMAC, 28, 4) - -FIELD(MVFR2, SIMDMISC, 0, 4) -FIELD(MVFR2, FPMISC, 4, 4) - FIELD(GPCCR, PPS, 0, 3) +FIELD(GPCCR, RLPAD, 5, 1) +FIELD(GPCCR, NSPAD, 6, 1) +FIELD(GPCCR, SPAD, 7, 1) FIELD(GPCCR, IRGN, 8, 2) FIELD(GPCCR, ORGN, 10, 2) FIELD(GPCCR, SH, 12, 2) FIELD(GPCCR, PGS, 14, 2) FIELD(GPCCR, GPC, 16, 1) FIELD(GPCCR, GPCP, 17, 1) +FIELD(GPCCR, TBGPCD, 18, 1) +FIELD(GPCCR, NSO, 19, 1) FIELD(GPCCR, L0GPTSZ, 20, 4) +FIELD(GPCCR, APPSAA, 24, 1) FIELD(MFAR, FPA, 12, 40) FIELD(MFAR, NSE, 62, 1) @@ -2427,8 +2046,6 @@ QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK); */ enum arm_features { ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */ - ARM_FEATURE_XSCALE, /* Intel XScale extensions. */ - ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */ ARM_FEATURE_V6, ARM_FEATURE_V6K, ARM_FEATURE_V7, @@ -2629,6 +2246,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env) */ uint64_t arm_hcr_el2_eff_secstate(CPUARMState *env, ARMSecuritySpace space); uint64_t arm_hcr_el2_eff(CPUARMState *env); +uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env); uint64_t arm_hcrx_el2_eff(CPUARMState *env); /* @@ -2708,212 +2326,6 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync); #define TYPE_ARM_HOST_CPU "host-" TYPE_ARM_CPU -/* ARM has the following "translation regimes" (as the ARM ARM calls them): - * - * If EL3 is 64-bit: - * + NonSecure EL1 & 0 stage 1 - * + NonSecure EL1 & 0 stage 2 - * + NonSecure EL2 - * + NonSecure EL2 & 0 (ARMv8.1-VHE) - * + Secure EL1 & 0 stage 1 - * + Secure EL1 & 0 stage 2 (FEAT_SEL2) - * + Secure EL2 (FEAT_SEL2) - * + Secure EL2 & 0 (FEAT_SEL2) - * + Realm EL1 & 0 stage 1 (FEAT_RME) - * + Realm EL1 & 0 stage 2 (FEAT_RME) - * + Realm EL2 (FEAT_RME) - * + EL3 - * If EL3 is 32-bit: - * + NonSecure PL1 & 0 stage 1 - * + NonSecure PL1 & 0 stage 2 - * + NonSecure PL2 - * + Secure PL1 & 0 - * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) - * - * For QEMU, an mmu_idx is not quite the same as a translation regime because: - * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes, - * because they may differ in access permissions even if the VA->PA map is - * the same - * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 - * translation, which means that we have one mmu_idx that deals with two - * concatenated translation regimes [this sort of combined s1+2 TLB is - * architecturally permitted] - * 3. we don't need to allocate an mmu_idx to translations that we won't be - * handling via the TLB. The only way to do a stage 1 translation without - * the immediate stage 2 translation is via the ATS or AT system insns, - * which can be slow-pathed and always do a page table walk. - * The only use of stage 2 translations is either as part of an s1+2 - * lookup or when loading the descriptors during a stage 1 page table walk, - * and in both those cases we don't use the TLB. - * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" - * translation regimes, because they map reasonably well to each other - * and they can't both be active at the same time. - * 5. we want to be able to use the TLB for accesses done as part of a - * stage1 page table walk, rather than having to walk the stage2 page - * table over and over. - * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access - * Never (PAN) bit within PSTATE. - * 7. we fold together most secure and non-secure regimes for A-profile, - * because there are no banked system registers for aarch64, so the - * process of switching between secure and non-secure is - * already heavyweight. - * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure, - * because both are in use simultaneously for Secure EL2. - * - * This gives us the following list of cases: - * - * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2) - * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2) - * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN) - * EL0 EL2&0 - * EL2 EL2&0 - * EL2 EL2&0 +PAN - * EL2 (aka NS PL2) - * EL3 (aka AArch32 S PL1 PL1&0) - * AArch32 S PL0 PL1&0 (we call this EL30_0) - * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN) - * Stage2 Secure - * Stage2 NonSecure - * plus one TLB per Physical address space: S, NS, Realm, Root - * - * for a total of 16 different mmu_idx. - * - * R profile CPUs have an MPU, but can use the same set of MMU indexes - * as A profile. They only need to distinguish EL0 and EL1 (and - * EL2 for cores like the Cortex-R52). - * - * M profile CPUs are rather different as they do not have a true MMU. - * They have the following different MMU indexes: - * User - * Privileged - * User, execution priority negative (ie the MPU HFNMIENA bit may apply) - * Privileged, execution priority negative (ditto) - * If the CPU supports the v8M Security Extension then there are also: - * Secure User - * Secure Privileged - * Secure User, execution priority negative - * Secure Privileged, execution priority negative - * - * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code - * are not quite the same -- different CPU types (most notably M profile - * vs A/R profile) would like to use MMU indexes with different semantics, - * but since we don't ever need to use all of those in a single CPU we - * can avoid having to set NB_MMU_MODES to "total number of A profile MMU - * modes + total number of M profile MMU modes". The lower bits of - * ARMMMUIdx are the core TLB mmu index, and the higher bits are always - * the same for any particular CPU. - * Variables of type ARMMUIdx are always full values, and the core - * index values are in variables of type 'int'. - * - * Our enumeration includes at the end some entries which are not "true" - * mmu_idx values in that they don't have corresponding TLBs and are only - * valid for doing slow path page table walks. - * - * The constant names here are patterned after the general style of the names - * of the AT/ATS operations. - * The values used are carefully arranged to make mmu_idx => EL lookup easy. - * For M profile we arrange them to have a bit for priv, a bit for negpri - * and a bit for secure. - */ -#define ARM_MMU_IDX_A 0x10 /* A profile */ -#define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */ -#define ARM_MMU_IDX_M 0x40 /* M profile */ - -/* Meanings of the bits for M profile mmu idx values */ -#define ARM_MMU_IDX_M_PRIV 0x1 -#define ARM_MMU_IDX_M_NEGPRI 0x2 -#define ARM_MMU_IDX_M_S 0x4 /* Secure */ - -#define ARM_MMU_IDX_TYPE_MASK \ - (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB) -#define ARM_MMU_IDX_COREIDX_MASK 0xf - -typedef enum ARMMMUIdx { - /* - * A-profile. - */ - ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A, - ARMMMUIdx_E20_0 = 1 | ARM_MMU_IDX_A, - ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A, - ARMMMUIdx_E20_2 = 3 | ARM_MMU_IDX_A, - ARMMMUIdx_E10_1_PAN = 4 | ARM_MMU_IDX_A, - ARMMMUIdx_E20_2_PAN = 5 | ARM_MMU_IDX_A, - ARMMMUIdx_E2 = 6 | ARM_MMU_IDX_A, - ARMMMUIdx_E3 = 7 | ARM_MMU_IDX_A, - ARMMMUIdx_E30_0 = 8 | ARM_MMU_IDX_A, - ARMMMUIdx_E30_3_PAN = 9 | ARM_MMU_IDX_A, - - /* - * Used for second stage of an S12 page table walk, or for descriptor - * loads during first stage of an S1 page table walk. Note that both - * are in use simultaneously for SecureEL2: the security state for - * the S2 ptw is selected by the NS bit from the S1 ptw. - */ - ARMMMUIdx_Stage2_S = 10 | ARM_MMU_IDX_A, - ARMMMUIdx_Stage2 = 11 | ARM_MMU_IDX_A, - - /* TLBs with 1-1 mapping to the physical address spaces. */ - ARMMMUIdx_Phys_S = 12 | ARM_MMU_IDX_A, - ARMMMUIdx_Phys_NS = 13 | ARM_MMU_IDX_A, - ARMMMUIdx_Phys_Root = 14 | ARM_MMU_IDX_A, - ARMMMUIdx_Phys_Realm = 15 | ARM_MMU_IDX_A, - - /* - * These are not allocated TLBs and are used only for AT system - * instructions or for the first stage of an S12 page table walk. - */ - ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB, - ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB, - ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB, - - /* - * M-profile. - */ - ARMMMUIdx_MUser = ARM_MMU_IDX_M, - ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV, - ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI, - ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI, - ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S, - ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S, - ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S, - ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S, -} ARMMMUIdx; - -/* - * Bit macros for the core-mmu-index values for each index, - * for use when calling tlb_flush_by_mmuidx() and friends. - */ -#define TO_CORE_BIT(NAME) \ - ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK) - -typedef enum ARMMMUIdxBit { - TO_CORE_BIT(E10_0), - TO_CORE_BIT(E20_0), - TO_CORE_BIT(E10_1), - TO_CORE_BIT(E10_1_PAN), - TO_CORE_BIT(E2), - TO_CORE_BIT(E20_2), - TO_CORE_BIT(E20_2_PAN), - TO_CORE_BIT(E3), - TO_CORE_BIT(E30_0), - TO_CORE_BIT(E30_3_PAN), - TO_CORE_BIT(Stage2), - TO_CORE_BIT(Stage2_S), - - TO_CORE_BIT(MUser), - TO_CORE_BIT(MPriv), - TO_CORE_BIT(MUserNegPri), - TO_CORE_BIT(MPrivNegPri), - TO_CORE_BIT(MSUser), - TO_CORE_BIT(MSPriv), - TO_CORE_BIT(MSUserNegPri), - TO_CORE_BIT(MSPrivNegPri), -} ARMMMUIdxBit; - -#undef TO_CORE_BIT - -#define MMU_USER_IDX 0 - /* Indexes used when registering address spaces with cpu_address_space_init */ typedef enum ARMASIdx { ARMASIdx_NS = 0, @@ -3008,13 +2420,6 @@ FIELD(TBFLAG_AM32, THUMB, 23, 1) /* Not cached. */ */ FIELD(TBFLAG_A32, VECLEN, 0, 3) /* Not cached. */ FIELD(TBFLAG_A32, VECSTRIDE, 3, 2) /* Not cached. */ -/* - * We store the bottom two bits of the CPAR as TB flags and handle - * checks on the other bits at runtime. This shares the same bits as - * VECSTRIDE, which is OK as no XScale CPU has VFP. - * Not cached, because VECLEN+VECSTRIDE are not cached. - */ -FIELD(TBFLAG_A32, XSCALE_CPAR, 5, 2) FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */ FIELD(TBFLAG_A32, SCTLR__B, 8, 1) /* Cannot overlap with SCTLR_B */ FIELD(TBFLAG_A32, HSTR_ACTIVE, 9, 1) @@ -3076,13 +2481,15 @@ FIELD(TBFLAG_A64, ATA0, 31, 1) FIELD(TBFLAG_A64, NV, 32, 1) FIELD(TBFLAG_A64, NV1, 33, 1) FIELD(TBFLAG_A64, NV2, 34, 1) -/* Set if FEAT_NV2 RAM accesses use the EL2&0 translation regime */ -FIELD(TBFLAG_A64, NV2_MEM_E20, 35, 1) +FIELD(TBFLAG_A64, E2H, 35, 1) /* Set if FEAT_NV2 RAM accesses are big-endian */ FIELD(TBFLAG_A64, NV2_MEM_BE, 36, 1) FIELD(TBFLAG_A64, AH, 37, 1) /* FPCR.AH */ FIELD(TBFLAG_A64, NEP, 38, 1) /* FPCR.NEP */ FIELD(TBFLAG_A64, ZT0EXC_EL, 39, 2) +FIELD(TBFLAG_A64, GCS_EN, 41, 1) +FIELD(TBFLAG_A64, GCS_RVCEN, 42, 1) +FIELD(TBFLAG_A64, GCSSTR_EL, 43, 2) /* * Helpers for using the above. Note that only the A64 accessors use @@ -3235,7 +2642,6 @@ extern const uint64_t pred_esz_masks[5]; */ #define PAGE_BTI PAGE_TARGET_1 #define PAGE_MTE PAGE_TARGET_2 -#define PAGE_TARGET_STICKY PAGE_MTE /* We associate one allocation tag per 16 bytes, the minimum. */ #define LOG2_TAG_GRANULE 4 diff --git a/target/arm/debug_helper.c b/target/arm/debug_helper.c index 69fb1d0d9ff8d..579516e154142 100644 --- a/target/arm/debug_helper.c +++ b/target/arm/debug_helper.c @@ -940,6 +940,13 @@ static void dbgclaimclr_write(CPUARMState *env, const ARMCPRegInfo *ri, env->cp15.dbgclaim &= ~(value & 0xFF); } +static CPAccessResult access_bogus(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + /* Always UNDEF, as if this cpreg didn't exist */ + return CP_ACCESS_UNDEFINED; +} + static const ARMCPRegInfo debug_cp_reginfo[] = { /* * DBGDRAR, DBGDSAR: always RAZ since we don't implement memory mapped @@ -988,11 +995,42 @@ static const ARMCPRegInfo debug_cp_reginfo[] = { .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, .access = PL1_RW, .accessfn = access_tdcc, .type = ARM_CP_CONST, .resetvalue = 0 }, - /* DBGDTRTX_EL0/DBGDTRRX_EL0 depend on direction */ - { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_BOTH, .cp = 14, + /* Architecturally DBGDTRTX is named DBGDTRRX when used for reads */ + { .name = "DBGDTRTX_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0, .access = PL0_RW, .accessfn = access_tdcc, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "DBGDTRTX", .state = ARM_CP_STATE_AA32, .cp = 14, + .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, + .access = PL0_RW, .accessfn = access_tdcc, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* This is AArch64-only and is a combination of DBGDTRTX and DBGDTRRX */ + { .name = "DBGDTR_EL0", .state = ARM_CP_STATE_AA64, + .opc0 = 2, .opc1 = 3, .crn = 0, .crm = 4, .opc2 = 0, + .access = PL0_RW, .accessfn = access_tdcc, + .type = ARM_CP_CONST, .resetvalue = 0 }, + /* + * This is not a real AArch32 register. We used to incorrectly expose + * this due to a QEMU bug; to avoid breaking migration compatibility we + * need to continue to provide it so that we don't fail the inbound + * migration when it tells us about a sysreg that we don't have. + * We set an always-fails .accessfn, which means that the guest doesn't + * actually see this register (it will always UNDEF, identically to if + * there were no cpreg definition for it other than that we won't print + * a LOG_UNIMP message about it), and we set the ARM_CP_NO_GDB flag so the + * gdbstub won't see it either. + * (We can't just set .access = 0, because add_cpreg_to_hashtable() + * helpfully ignores cpregs which aren't accessible to the highest + * implemented EL.) + * + * TODO: implement a system for being able to describe "this register + * can be ignored if it appears in the inbound stream"; then we can + * remove this temporary hack. + */ + { .name = "BOGUS_DBGDTR_EL0", .state = ARM_CP_STATE_AA32, + .cp = 14, .opc1 = 3, .crn = 0, .crm = 5, .opc2 = 0, + .access = PL0_RW, .accessfn = access_bogus, + .type = ARM_CP_CONST | ARM_CP_NO_GDB, .resetvalue = 0 }, /* * OSECCR_EL1 provides a mechanism for an operating system * to access the contents of EDECCR. EDECCR is not implemented though, diff --git a/target/arm/el2-stubs.c b/target/arm/el2-stubs.c new file mode 100644 index 0000000000000..972023c337fd5 --- /dev/null +++ b/target/arm/el2-stubs.c @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +/* QEMU ARM CPU - user-mode emulation stubs for EL2 interrupts + * + * These should not really be needed, but CP registers for EL2 + * are not elided by user-mode emulation and they call these + * functions. Leave them as stubs until it's cleaned up. + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internals.h" + +void arm_cpu_update_virq(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void arm_cpu_update_vfiq(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void arm_cpu_update_vinmi(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void arm_cpu_update_vfnmi(ARMCPU *cpu) +{ + g_assert_not_reached(); +} + +void arm_cpu_update_vserr(ARMCPU *cpu) +{ + g_assert_not_reached(); +} diff --git a/target/arm/gdbstub.c b/target/arm/gdbstub.c index ce4497ad7c3e4..8d2229f5192db 100644 --- a/target/arm/gdbstub.c +++ b/target/arm/gdbstub.c @@ -247,10 +247,20 @@ static int arm_gdb_get_sysreg(CPUState *cs, GByteArray *buf, int reg) key = cpu->dyn_sysreg_feature.data.cpregs.keys[reg]; ri = get_arm_cp_reginfo(cpu->cp_regs, key); if (ri) { - if (cpreg_field_is_64bit(ri)) { + switch (cpreg_field_type(ri)) { + case MO_64: + if (ri->vhe_redir_to_el2 && + (arm_hcr_el2_eff(env) & HCR_E2H) && + arm_current_el(env) == 2) { + ri = get_arm_cp_reginfo(cpu->cp_regs, ri->vhe_redir_to_el2); + } else if (ri->vhe_redir_to_el01) { + ri = get_arm_cp_reginfo(cpu->cp_regs, ri->vhe_redir_to_el01); + } return gdb_get_reg64(buf, (uint64_t)read_raw_cp_reg(env, ri)); - } else { + case MO_32: return gdb_get_reg32(buf, (uint32_t)read_raw_cp_reg(env, ri)); + default: + g_assert_not_reached(); } } return 0; @@ -527,7 +537,8 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) * registers so we don't need to include both. */ #ifdef TARGET_AARCH64 - if (isar_feature_aa64_sve(&cpu->isar)) { + if (isar_feature_aa64_sve(&cpu->isar) || + isar_feature_aa64_sme(&cpu->isar)) { GDBFeature *feature = arm_gen_dynamic_svereg_feature(cs, cs->gdb_num_regs); gdb_register_coprocessor(cs, aarch64_gdb_get_sve_reg, aarch64_gdb_set_sve_reg, feature, 0); @@ -537,6 +548,13 @@ void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu) gdb_find_static_feature("aarch64-fpu.xml"), 0); } + + if (isar_feature_aa64_sme(&cpu->isar)) { + GDBFeature *sme_feature = + arm_gen_dynamic_smereg_feature(cs, cs->gdb_num_regs); + gdb_register_coprocessor(cs, aarch64_gdb_get_sme_reg, + aarch64_gdb_set_sme_reg, sme_feature, 0); + } /* * Note that we report pauth information via the feature name * org.gnu.gdb.aarch64.pauth_v2, not org.gnu.gdb.aarch64.pauth. diff --git a/target/arm/gdbstub64.c b/target/arm/gdbstub64.c index 64ee9b3b56791..65d6bbe65fb9b 100644 --- a/target/arm/gdbstub64.c +++ b/target/arm/gdbstub64.c @@ -47,6 +47,7 @@ int aarch64_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) case 32: return gdb_get_reg64(mem_buf, env->pc); case 33: + /* pstate is now a 64-bit value; can we simply adjust the xml? */ return gdb_get_reg32(mem_buf, pstate_read(env)); } /* Unknown register. */ @@ -75,6 +76,7 @@ int aarch64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) return 8; case 33: /* CPSR */ + /* pstate is now a 64-bit value; can we simply adjust the xml? */ pstate_write(env, tmp); return 4; } @@ -115,8 +117,22 @@ int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg) /* 128 bit FP register */ { uint64_t *q = aa64_vfp_qreg(env, reg); - q[0] = ldq_le_p(buf); - q[1] = ldq_le_p(buf + 8); + + /* + * On the wire these are target-endian 128 bit values. + * In the CPU state these are host-order uint64_t values + * with the least-significant one first. This means they're + * the other way around for target_big_endian() (which is + * only true for us for aarch64_be-linux-user). + */ + if (target_big_endian()) { + q[1] = ldq_p(buf); + q[0] = ldq_p(buf + 8); + } else{ + q[0] = ldq_p(buf); + q[1] = ldq_p(buf + 8); + } + return 16; } case 32: @@ -192,10 +208,17 @@ int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg) case 0 ... 31: { int vq, len = 0; - uint64_t *p = (uint64_t *) buf; for (vq = 0; vq < cpu->sve_max_vq; vq++) { - env->vfp.zregs[reg].d[vq * 2 + 1] = *p++; - env->vfp.zregs[reg].d[vq * 2] = *p++; + if (target_big_endian()) { + env->vfp.zregs[reg].d[vq * 2 + 1] = ldq_p(buf); + buf += 8; + env->vfp.zregs[reg].d[vq * 2] = ldq_p(buf); + } else{ + env->vfp.zregs[reg].d[vq * 2] = ldq_p(buf); + buf += 8; + env->vfp.zregs[reg].d[vq * 2 + 1] = ldq_p(buf); + } + buf += 8; len += 16; } return len; @@ -210,9 +233,9 @@ int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg) { int preg = reg - 34; int vq, len = 0; - uint64_t *p = (uint64_t *) buf; for (vq = 0; vq < cpu->sve_max_vq; vq = vq + 4) { - env->vfp.pregs[preg].p[vq / 4] = *p++; + env->vfp.pregs[preg].p[vq / 4] = ldq_p(buf); + buf += 8; len += 8; } return len; @@ -228,6 +251,90 @@ int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg) return 0; } +int aarch64_gdb_get_sme_reg(CPUState *cs, GByteArray *buf, int reg) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + switch (reg) { + case 0: /* svg register */ + { + int vq = 0; + if (FIELD_EX64(env->svcr, SVCR, SM)) { + vq = sve_vqm1_for_el_sm(env, arm_current_el(env), + FIELD_EX64(env->svcr, SVCR, SM)) + 1; + } + /* svg = vector granules (2 * vector quardwords) in streaming mode */ + return gdb_get_reg64(buf, vq * 2); + } + case 1: /* svcr register */ + return gdb_get_reg64(buf, env->svcr); + case 2: /* za register */ + { + int len = 0; + int vq = cpu->sme_max_vq; + int svl = vq * 16; + for (int i = 0; i < svl; i++) { + for (int q = 0; q < vq; q++) { + len += gdb_get_reg128(buf, + env->za_state.za[i].d[q * 2 + 1], + env->za_state.za[i].d[q * 2]); + } + } + return len; + } + default: + /* gdbstub asked for something out of range */ + qemu_log_mask(LOG_UNIMP, "%s: out of range register %d", __func__, reg); + break; + } + + return 0; +} + +int aarch64_gdb_set_sme_reg(CPUState *cs, uint8_t *buf, int reg) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + switch (reg) { + case 0: /* svg register */ + /* cannot set svg via gdbstub */ + return 8; + case 1: /* svcr register */ + aarch64_set_svcr(env, ldq_le_p(buf), + R_SVCR_SM_MASK | R_SVCR_ZA_MASK); + return 8; + case 2: /* za register */ + { + int len = 0; + int vq = cpu->sme_max_vq; + int svl = vq * 16; + for (int i = 0; i < svl; i++) { + for (int q = 0; q < vq; q++) { + if (target_big_endian()) { + env->za_state.za[i].d[q * 2 + 1] = ldq_p(buf); + buf += 8; + env->za_state.za[i].d[q * 2] = ldq_p(buf); + } else{ + env->za_state.za[i].d[q * 2] = ldq_p(buf); + buf += 8; + env->za_state.za[i].d[q * 2 + 1] = ldq_p(buf); + } + buf += 8; + len += 16; + } + } + return len; + } + default: + /* gdbstub asked for something out of range */ + break; + } + + return 0; +} + int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg) { ARMCPU *cpu = ARM_CPU(cs); @@ -392,6 +499,41 @@ GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cs, int base_reg) return &cpu->dyn_svereg_feature.desc; } +GDBFeature *arm_gen_dynamic_smereg_feature(CPUState *cs, int base_reg) +{ + ARMCPU *cpu = ARM_CPU(cs); + int vq = cpu->sme_max_vq; + int svl = vq * 16; + GDBFeatureBuilder builder; + int reg = 0; + + gdb_feature_builder_init(&builder, &cpu->dyn_smereg_feature.desc, + "org.gnu.gdb.aarch64.sme", "sme-registers.xml", + base_reg); + + + /* Create the sme_bv vector type. */ + gdb_feature_builder_append_tag( + &builder, "", + svl); + + /* Create the sme_bvv vector type. */ + gdb_feature_builder_append_tag( + &builder, "", + svl); + + /* Define the svg, svcr, and za registers. */ + + gdb_feature_builder_append_reg(&builder, "svg", 64, reg++, "int", NULL); + gdb_feature_builder_append_reg(&builder, "svcr", 64, reg++, "int", NULL); + gdb_feature_builder_append_reg(&builder, "za", svl * svl * 8, reg++, + "sme_bvv", NULL); + + gdb_feature_builder_end(&builder); + + return &cpu->dyn_smereg_feature.desc; +} + #ifdef CONFIG_USER_ONLY int aarch64_gdb_get_tag_ctl_reg(CPUState *cs, GByteArray *buf, int reg) { diff --git a/target/arm/helper.c b/target/arm/helper.c index 0c1299ff841f3..167f2909b3fe0 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -40,26 +40,57 @@ static void switch_mode(CPUARMState *env, int mode); +int compare_u64(const void *a, const void *b) +{ + if (*(uint64_t *)a > *(uint64_t *)b) { + return 1; + } + if (*(uint64_t *)a < *(uint64_t *)b) { + return -1; + } + return 0; +} + +/* + * Macros which are lvalues for the field in CPUARMState for the + * ARMCPRegInfo *ri. + */ +#define CPREG_FIELD32(env, ri) \ + (*(uint32_t *)((char *)(env) + (ri)->fieldoffset)) +#define CPREG_FIELD64(env, ri) \ + (*(uint64_t *)((char *)(env) + (ri)->fieldoffset)) + uint64_t raw_read(CPUARMState *env, const ARMCPRegInfo *ri) { assert(ri->fieldoffset); - if (cpreg_field_is_64bit(ri)) { + switch (cpreg_field_type(ri)) { + case MO_64: return CPREG_FIELD64(env, ri); - } else { + case MO_32: return CPREG_FIELD32(env, ri); + default: + g_assert_not_reached(); } } void raw_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { assert(ri->fieldoffset); - if (cpreg_field_is_64bit(ri)) { + switch (cpreg_field_type(ri)) { + case MO_64: CPREG_FIELD64(env, ri) = value; - } else { + break; + case MO_32: CPREG_FIELD32(env, ri) = value; + break; + default: + g_assert_not_reached(); } } +#undef CPREG_FIELD32 +#undef CPREG_FIELD64 + static void *raw_ptr(CPUARMState *env, const ARMCPRegInfo *ri) { return (char *)env + ri->fieldoffset; @@ -198,11 +229,11 @@ bool write_list_to_cpustate(ARMCPU *cpu) return ok; } -static void add_cpreg_to_list(gpointer key, gpointer opaque) +static void add_cpreg_to_list(gpointer key, gpointer value, gpointer opaque) { ARMCPU *cpu = opaque; uint32_t regidx = (uintptr_t)key; - const ARMCPRegInfo *ri = get_arm_cp_reginfo(cpu->cp_regs, regidx); + const ARMCPRegInfo *ri = value; if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { cpu->cpreg_indexes[cpu->cpreg_array_len] = cpreg_to_kvm_id(regidx); @@ -211,61 +242,49 @@ static void add_cpreg_to_list(gpointer key, gpointer opaque) } } -static void count_cpreg(gpointer key, gpointer opaque) +static void count_cpreg(gpointer key, gpointer value, gpointer opaque) { ARMCPU *cpu = opaque; - const ARMCPRegInfo *ri; - - ri = g_hash_table_lookup(cpu->cp_regs, key); + const ARMCPRegInfo *ri = value; if (!(ri->type & (ARM_CP_NO_RAW | ARM_CP_ALIAS))) { cpu->cpreg_array_len++; } } -static gint cpreg_key_compare(gconstpointer a, gconstpointer b, gpointer d) -{ - uint64_t aidx = cpreg_to_kvm_id((uintptr_t)a); - uint64_t bidx = cpreg_to_kvm_id((uintptr_t)b); - - if (aidx > bidx) { - return 1; - } - if (aidx < bidx) { - return -1; - } - return 0; -} - void init_cpreg_list(ARMCPU *cpu) { /* * Initialise the cpreg_tuples[] array based on the cp_regs hash. * Note that we require cpreg_tuples[] to be sorted by key ID. */ - GList *keys; int arraylen; - keys = g_hash_table_get_keys(cpu->cp_regs); - keys = g_list_sort_with_data(keys, cpreg_key_compare, NULL); - cpu->cpreg_array_len = 0; - - g_list_foreach(keys, count_cpreg, cpu); + g_hash_table_foreach(cpu->cp_regs, count_cpreg, cpu); arraylen = cpu->cpreg_array_len; - cpu->cpreg_indexes = g_new(uint64_t, arraylen); - cpu->cpreg_values = g_new(uint64_t, arraylen); - cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); - cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); - cpu->cpreg_vmstate_array_len = cpu->cpreg_array_len; + if (arraylen) { + cpu->cpreg_indexes = g_new(uint64_t, arraylen); + cpu->cpreg_values = g_new(uint64_t, arraylen); + cpu->cpreg_vmstate_indexes = g_new(uint64_t, arraylen); + cpu->cpreg_vmstate_values = g_new(uint64_t, arraylen); + } else { + cpu->cpreg_indexes = NULL; + cpu->cpreg_values = NULL; + cpu->cpreg_vmstate_indexes = NULL; + cpu->cpreg_vmstate_values = NULL; + } + cpu->cpreg_vmstate_array_len = arraylen; cpu->cpreg_array_len = 0; - g_list_foreach(keys, add_cpreg_to_list, cpu); + g_hash_table_foreach(cpu->cp_regs, add_cpreg_to_list, cpu); assert(cpu->cpreg_array_len == arraylen); - g_list_free(keys); + if (arraylen) { + qsort(cpu->cpreg_indexes, arraylen, sizeof(uint64_t), compare_u64); + } } bool arm_pan_enabled(CPUARMState *env) @@ -401,7 +420,9 @@ int alle1_tlbmask(CPUARMState *env) */ return (ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_1_GCS | ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_E10_0_GCS | ARMMMUIdxBit_Stage2 | ARMMMUIdxBit_Stage2_S); } @@ -435,6 +456,8 @@ static const ARMCPRegInfo cp_reginfo[] = { .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_CONTEXTIDR_EL1, .nv2_redirect_offset = 0x108 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 1), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 1), .secure = ARM_CP_SECSTATE_NS, .fieldoffset = offsetof(CPUARMState, cp15.contextidr_el[1]), .resetvalue = 0, .writefn = contextidr_write, .raw_writefn = raw_write, }, @@ -652,9 +675,11 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { */ { .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, }, - { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, + { .name = "CPACR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2, .accessfn = cpacr_access, .fgt = FGT_CPACR_EL1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 1, 2), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 2), .nv2_redirect_offset = 0x100 | NV2_REDIR_NV1, .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.cpacr_el1), .resetfn = cpacr_reset, .writefn = cpacr_write, .readfn = cpacr_read }, @@ -741,6 +766,22 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) if (cpu_isar_feature(aa64_ecv, cpu)) { valid_mask |= SCR_ECVEN; } + if (cpu_isar_feature(aa64_gcs, cpu)) { + valid_mask |= SCR_GCSEN; + } + if (cpu_isar_feature(aa64_tcr2, cpu)) { + valid_mask |= SCR_TCR2EN; + } + if (cpu_isar_feature(aa64_sctlr2, cpu)) { + valid_mask |= SCR_SCTLR2EN; + } + if (cpu_isar_feature(aa64_s1pie, cpu) || + cpu_isar_feature(aa64_s2pie, cpu)) { + valid_mask |= SCR_PIEN; + } + if (cpu_isar_feature(aa64_mec, cpu)) { + valid_mask |= SCR_MECEN; + } } else { valid_mask &= ~(SCR_RW | SCR_ST); if (cpu_isar_feature(aa32_ras, cpu)) { @@ -775,12 +816,17 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) */ if (changed & (SCR_NS | SCR_NSE)) { tlb_flush_by_mmuidx(env_cpu(env), (ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_E10_0_GCS | ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E20_0_GCS | ARMMMUIdxBit_E10_1 | - ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E10_1_PAN | + ARMMMUIdxBit_E10_1_GCS | + ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E2)); + ARMMMUIdxBit_E20_2_GCS | + ARMMMUIdxBit_E2 | + ARMMMUIdxBit_E2_GCS)); } } @@ -833,40 +879,40 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) uint64_t ret = 0; if (hcr_el2 & HCR_IMO) { - if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_VIRQ)) { ret |= CPSR_I; } - if (cs->interrupt_request & CPU_INTERRUPT_VINMI) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_VINMI)) { ret |= ISR_IS; ret |= CPSR_I; } } else { - if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) { ret |= CPSR_I; } - if (cs->interrupt_request & CPU_INTERRUPT_NMI) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { ret |= ISR_IS; ret |= CPSR_I; } } if (hcr_el2 & HCR_FMO) { - if (cs->interrupt_request & CPU_INTERRUPT_VFIQ) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFIQ)) { ret |= CPSR_F; } - if (cs->interrupt_request & CPU_INTERRUPT_VFNMI) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_VFNMI)) { ret |= ISR_FS; ret |= CPSR_F; } } else { - if (cs->interrupt_request & CPU_INTERRUPT_FIQ) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_FIQ)) { ret |= CPSR_F; } } if (hcr_el2 & HCR_AMO) { - if (cs->interrupt_request & CPU_INTERRUPT_VSERR) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_VSERR)) { ret |= CPSR_A; } } @@ -931,12 +977,16 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_AFSR0_EL1, .nv2_redirect_offset = 0x128 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 0), .type = ARM_CP_CONST, .resetvalue = 0 }, { .name = "AFSR1_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 1, .opc2 = 1, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_AFSR1_EL1, .nv2_redirect_offset = 0x130 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 1, 1), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 1, 1), .type = ARM_CP_CONST, .resetvalue = 0 }, /* * MAIR can just read-as-written because we don't implement caches @@ -947,6 +997,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_MAIR_EL1, .nv2_redirect_offset = 0x140 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 0), .fieldoffset = offsetof(CPUARMState, cp15.mair_el[1]), .resetvalue = 0 }, { .name = "MAIR_EL3", .state = ARM_CP_STATE_AA64, @@ -1056,7 +1108,7 @@ static const ARMCPRegInfo v6k_cp_reginfo[] = { .resetvalue = 0 }, }; -static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *opaque) +static void arm_gt_cntfrq_reset(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); @@ -1993,9 +2045,11 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = { .resetfn = arm_gt_cntfrq_reset, }, /* overall control: mostly access permissions */ - { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH, + { .name = "CNTKCTL_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0, .access = PL1_RW, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 14, 1, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 14, 1, 0), .fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl), .resetvalue = 0, }, @@ -2725,7 +2779,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* If the ASID changes (with a 64-bit write), we must flush the TLB. */ - if (cpreg_field_is_64bit(ri) && + if (cpreg_field_type(ri) == MO_64 && extract64(raw_read(env, ri) ^ value, 48, 16) != 0) { ARMCPU *cpu = env_archcpu(env); tlb_flush(CPU(cpu)); @@ -2746,7 +2800,9 @@ static void vmsa_tcr_ttbr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, (arm_hcr_el2_eff(env) & HCR_E2H)) { uint16_t mask = ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E20_0; + ARMMMUIdxBit_E20_2_GCS | + ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E20_0_GCS; tlb_flush_by_mmuidx(env_cpu(env), mask); } raw_write(env, ri, value); @@ -2786,6 +2842,8 @@ static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = { .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_FAR_EL1, .nv2_redirect_offset = 0x220 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 6, 0, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 6, 0, 0), .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]), .resetvalue = 0, }, }; @@ -2796,12 +2854,16 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_ESR_EL1, .nv2_redirect_offset = 0x138 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 2, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 2, 0), .fieldoffset = offsetof(CPUARMState, cp15.esr_el[1]), .resetvalue = 0, }, { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 2, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_TTBR0_EL1, .nv2_redirect_offset = 0x200 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 0), .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr0_s), offsetof(CPUARMState, cp15.ttbr0_ns) } }, @@ -2810,6 +2872,8 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_TTBR1_EL1, .nv2_redirect_offset = 0x210 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 1), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 1), .writefn = vmsa_ttbr_write, .resetvalue = 0, .raw_writefn = raw_write, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.ttbr1_s), offsetof(CPUARMState, cp15.ttbr1_ns) } }, @@ -2818,6 +2882,8 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = { .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_TCR_EL1, .nv2_redirect_offset = 0x120 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 2), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 2), .writefn = vmsa_tcr_el12_write, .raw_writefn = raw_write, .resetvalue = 0, @@ -2862,8 +2928,12 @@ static void omap_threadid_write(CPUARMState *env, const ARMCPRegInfo *ri, static void omap_wfi_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { +#ifdef CONFIG_USER_ONLY + g_assert_not_reached(); +#else /* Wait-for-interrupt (deprecated) */ cpu_interrupt(env_cpu(env), CPU_INTERRUPT_HALT); +#endif } static void omap_cachemaint_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -2917,39 +2987,6 @@ static const ARMCPRegInfo omap_cp_reginfo[] = { .type = ARM_CP_CONST | ARM_CP_OVERRIDE, .resetvalue = 0 }, }; -static void xscale_cpar_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - env->cp15.c15_cpar = value & 0x3fff; -} - -static const ARMCPRegInfo xscale_cp_reginfo[] = { - { .name = "XSCALE_CPAR", - .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0, .access = PL1_RW, - .fieldoffset = offsetof(CPUARMState, cp15.c15_cpar), .resetvalue = 0, - .writefn = xscale_cpar_write, }, - { .name = "XSCALE_AUXCR", - .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 1, .access = PL1_RW, - .fieldoffset = offsetof(CPUARMState, cp15.c1_xscaleauxcr), - .resetvalue = 0, }, - /* - * XScale specific cache-lockdown: since we have no cache we NOP these - * and hope the guest does not really rely on cache behaviour. - */ - { .name = "XSCALE_LOCK_ICACHE_LINE", - .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0, - .access = PL1_W, .type = ARM_CP_NOP }, - { .name = "XSCALE_UNLOCK_ICACHE", - .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NOP }, - { .name = "XSCALE_DCACHE_LOCK", - .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 0, - .access = PL1_RW, .type = ARM_CP_NOP }, - { .name = "XSCALE_UNLOCK_DCACHE", - .cp = 15, .opc1 = 0, .crn = 9, .crm = 2, .opc2 = 1, - .access = PL1_W, .type = ARM_CP_NOP }, -}; - static const ARMCPRegInfo dummy_c15_cp_reginfo[] = { /* * RAZ/WI the whole crn=15 space, when we don't have a more specific @@ -3052,12 +3089,14 @@ static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri) } static const ARMCPRegInfo lpae_cp_reginfo[] = { - /* NOP AMAIR0/1 */ - { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH, + /* AMAIR0 is mapped to AMAIR_EL1[31:0] */ + { .name = "AMAIR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_AMAIR_EL1, .nv2_redirect_offset = 0x148 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 3, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 3, 0), .type = ARM_CP_CONST, .resetvalue = 0 }, /* AMAIR1 is mapped to AMAIR_EL1[63:32] */ { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1, @@ -3340,16 +3379,6 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, /* This may enable/disable the MMU, so do a TLB flush. */ tlb_flush(CPU(cpu)); - - if (tcg_enabled() && ri->type & ARM_CP_SUPPRESS_TB_END) { - /* - * Normally we would always end the TB on an SCTLR write; see the - * comment in ARMCPRegInfo sctlr initialization below for why Xscale - * is special. Setting ARM_CP_SUPPRESS_TB_END also stops the rebuild - * of hflags from the translator, so do it here. - */ - arm_rebuild_hflags(env); - } } static void mdcr_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3397,15 +3426,71 @@ static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, } } +static CPAccessResult access_nv1_with_nvx(uint64_t hcr_nv) +{ + return hcr_nv == (HCR_NV | HCR_NV1) ? CP_ACCESS_TRAP_EL2 : CP_ACCESS_OK; +} + static CPAccessResult access_nv1(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { if (arm_current_el(env) == 1) { - uint64_t hcr_nv = arm_hcr_el2_eff(env) & (HCR_NV | HCR_NV1 | HCR_NV2); + return access_nv1_with_nvx(arm_hcr_el2_nvx_eff(env)); + } + return CP_ACCESS_OK; +} - if (hcr_nv == (HCR_NV | HCR_NV1)) { - return CP_ACCESS_TRAP_EL2; +static CPAccessResult access_nv1_or_exlock_el1(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1) { + uint64_t nvx = arm_hcr_el2_nvx_eff(env); + + if (!isread && + (env->pstate & PSTATE_EXLOCK) && + (env->cp15.gcscr_el[1] & GCSCR_EXLOCKEN) && + !(nvx & HCR_NV1)) { + return CP_ACCESS_EXLOCK; } + return access_nv1_with_nvx(nvx); + } + + /* + * At EL2, since VHE redirection is done at translation time, + * el_is_in_host is always false here, so EXLOCK does not apply. + */ + return CP_ACCESS_OK; +} + +static CPAccessResult access_exlock_el2(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + int el = arm_current_el(env); + + if (el == 3) { + return CP_ACCESS_OK; + } + + /* + * Access to the EL2 register from EL1 means NV is set, and + * EXLOCK has priority over an NV1 trap to EL2. + */ + if (!isread && + (env->pstate & PSTATE_EXLOCK) && + (env->cp15.gcscr_el[el] & GCSCR_EXLOCKEN)) { + return CP_ACCESS_EXLOCK; + } + return CP_ACCESS_OK; +} + +static CPAccessResult access_exlock_el3(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + if (!isread && + (env->pstate & PSTATE_EXLOCK) && + (env->cp15.gcscr_el[3] & GCSCR_EXLOCKEN)) { + return CP_ACCESS_EXLOCK; } return CP_ACCESS_OK; } @@ -3581,14 +3666,18 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { { .name = "ELR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 1, - .access = PL1_RW, .accessfn = access_nv1, + .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1, .nv2_redirect_offset = 0x230 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 1), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 1), .fieldoffset = offsetof(CPUARMState, elr_el[1]) }, { .name = "SPSR_EL1", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 0, .opc2 = 0, - .access = PL1_RW, .accessfn = access_nv1, + .access = PL1_RW, .accessfn = access_nv1_or_exlock_el1, .nv2_redirect_offset = 0x160 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 4, 0, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 4, 0, 0), .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_SVC]) }, /* * We rely on the access checks not allowing the guest to write to the @@ -3728,7 +3817,8 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask) value &= valid_mask; /* RW is RAO/WI if EL1 is AArch64 only */ - if (!cpu_isar_feature(aa64_aa32_el1, cpu)) { + if (arm_feature(env, ARM_FEATURE_AARCH64) && + !cpu_isar_feature(aa64_aa32_el1, cpu)) { value |= HCR_RW; } @@ -3873,6 +3963,16 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env) return arm_hcr_el2_eff_secstate(env, arm_security_space_below_el3(env)); } +uint64_t arm_hcr_el2_nvx_eff(CPUARMState *env) +{ + uint64_t hcr = arm_hcr_el2_eff(env); + + if (!(hcr & HCR_NV)) { + return 0; /* CONSTRAINED UNPREDICTABLE wrt NV1 */ + } + return hcr & (HCR_NV2 | HCR_NV1 | HCR_NV); +} + /* * Corresponds to ARM pseudocode function ELIsInHost(). */ @@ -3907,23 +4007,27 @@ static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri, ARMCPU *cpu = env_archcpu(env); uint64_t valid_mask = 0; - /* FEAT_MOPS adds MSCEn and MCE2 */ if (cpu_isar_feature(aa64_mops, cpu)) { valid_mask |= HCRX_MSCEN | HCRX_MCE2; } - - /* FEAT_NMI adds TALLINT, VINMI and VFNMI */ if (cpu_isar_feature(aa64_nmi, cpu)) { valid_mask |= HCRX_TALLINT | HCRX_VINMI | HCRX_VFNMI; } - /* FEAT_CMOW adds CMOW */ if (cpu_isar_feature(aa64_cmow, cpu)) { valid_mask |= HCRX_CMOW; } - /* FEAT_XS adds FGTnXS, FnXS */ if (cpu_isar_feature(aa64_xs, cpu)) { valid_mask |= HCRX_FGTNXS | HCRX_FNXS; } + if (cpu_isar_feature(aa64_tcr2, cpu)) { + valid_mask |= HCRX_TCR2EN; + } + if (cpu_isar_feature(aa64_sctlr2, cpu)) { + valid_mask |= HCRX_SCTLR2EN; + } + if (cpu_isar_feature(aa64_gcs, cpu)) { + valid_mask |= HCRX_GCSEN; + } /* Clear RES0 bits. */ env->cp15.hcrx_el2 = value & valid_mask; @@ -3981,11 +4085,22 @@ uint64_t arm_hcrx_el2_eff(CPUARMState *env) * This may need to be revisited for future bits. */ if (!arm_is_el2_enabled(env)) { + ARMCPU *cpu = env_archcpu(env); uint64_t hcrx = 0; - if (cpu_isar_feature(aa64_mops, env_archcpu(env))) { - /* MSCEn behaves as 1 if EL2 is not enabled */ + + /* Bits which whose effective value is 1 if el2 not enabled. */ + if (cpu_isar_feature(aa64_mops, cpu)) { hcrx |= HCRX_MSCEN; } + if (cpu_isar_feature(aa64_tcr2, cpu)) { + hcrx |= HCRX_TCR2EN; + } + if (cpu_isar_feature(aa64_sctlr2, cpu)) { + hcrx |= HCRX_SCTLR2EN; + } + if (cpu_isar_feature(aa64_gcs, cpu)) { + hcrx |= HCRX_GCSEN; + } return hcrx; } if (arm_feature(env, ARM_FEATURE_EL3) && !(env->cp15.scr_el3 & SCR_HXEN)) { @@ -4043,7 +4158,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "ELR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 1, - .access = PL2_RW, + .access = PL2_RW, .accessfn = access_exlock_el2, .fieldoffset = offsetof(CPUARMState, elr_el[2]) }, { .name = "ESR_EL2", .state = ARM_CP_STATE_BOTH, .type = ARM_CP_NV2_REDIRECT, @@ -4061,7 +4176,7 @@ static const ARMCPRegInfo el2_cp_reginfo[] = { { .name = "SPSR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS | ARM_CP_NV2_REDIRECT, .opc0 = 3, .opc1 = 4, .crn = 4, .crm = 0, .opc2 = 0, - .access = PL2_RW, + .access = PL2_RW, .accessfn = access_exlock_el2, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_HYP]) }, { .name = "VBAR_EL2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 4, .crn = 12, .crm = 0, .opc2 = 0, @@ -4343,7 +4458,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { { .name = "ELR_EL3", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 1, - .access = PL3_RW, + .access = PL3_RW, .accessfn = access_exlock_el3, .fieldoffset = offsetof(CPUARMState, elr_el[3]) }, { .name = "ESR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 5, .crm = 2, .opc2 = 0, @@ -4354,7 +4469,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { { .name = "SPSR_EL3", .state = ARM_CP_STATE_AA64, .type = ARM_CP_ALIAS, .opc0 = 3, .opc1 = 6, .crn = 4, .crm = 0, .opc2 = 0, - .access = PL3_RW, + .access = PL3_RW, .accessfn = access_exlock_el3, .fieldoffset = offsetof(CPUARMState, banked_spsr[BANK_MON]) }, { .name = "VBAR_EL3", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 6, .crn = 12, .crm = 0, .opc2 = 0, @@ -4422,235 +4537,6 @@ static CPAccessResult access_el1nvvct(CPUARMState *env, const ARMCPRegInfo *ri, return e2h_access(env, ri, isread); } -/* Test if system register redirection is to occur in the current state. */ -static bool redirect_for_e2h(CPUARMState *env) -{ - return arm_current_el(env) == 2 && (arm_hcr_el2_eff(env) & HCR_E2H); -} - -static uint64_t el2_e2h_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - CPReadFn *readfn; - - if (redirect_for_e2h(env)) { - /* Switch to the saved EL2 version of the register. */ - ri = ri->opaque; - readfn = ri->readfn; - } else { - readfn = ri->orig_readfn; - } - if (readfn == NULL) { - readfn = raw_read; - } - return readfn(env, ri); -} - -static void el2_e2h_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - CPWriteFn *writefn; - - if (redirect_for_e2h(env)) { - /* Switch to the saved EL2 version of the register. */ - ri = ri->opaque; - writefn = ri->writefn; - } else { - writefn = ri->orig_writefn; - } - if (writefn == NULL) { - writefn = raw_write; - } - writefn(env, ri, value); -} - -static uint64_t el2_e2h_e12_read(CPUARMState *env, const ARMCPRegInfo *ri) -{ - /* Pass the EL1 register accessor its ri, not the EL12 alias ri */ - return ri->orig_readfn(env, ri->opaque); -} - -static void el2_e2h_e12_write(CPUARMState *env, const ARMCPRegInfo *ri, - uint64_t value) -{ - /* Pass the EL1 register accessor its ri, not the EL12 alias ri */ - return ri->orig_writefn(env, ri->opaque, value); -} - -static CPAccessResult el2_e2h_e12_access(CPUARMState *env, - const ARMCPRegInfo *ri, - bool isread) -{ - if (arm_current_el(env) == 1) { - /* - * This must be a FEAT_NV access (will either trap or redirect - * to memory). None of the registers with _EL12 aliases want to - * apply their trap controls for this kind of access, so don't - * call the orig_accessfn or do the "UNDEF when E2H is 0" check. - */ - return CP_ACCESS_OK; - } - /* FOO_EL12 aliases only exist when E2H is 1; otherwise they UNDEF */ - if (!(arm_hcr_el2_eff(env) & HCR_E2H)) { - return CP_ACCESS_UNDEFINED; - } - if (ri->orig_accessfn) { - return ri->orig_accessfn(env, ri->opaque, isread); - } - return CP_ACCESS_OK; -} - -static void define_arm_vh_e2h_redirects_aliases(ARMCPU *cpu) -{ - struct E2HAlias { - uint32_t src_key, dst_key, new_key; - const char *src_name, *dst_name, *new_name; - bool (*feature)(const ARMISARegisters *id); - }; - -#define K(op0, op1, crn, crm, op2) \ - ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) - - static const struct E2HAlias aliases[] = { - { K(3, 0, 1, 0, 0), K(3, 4, 1, 0, 0), K(3, 5, 1, 0, 0), - "SCTLR", "SCTLR_EL2", "SCTLR_EL12" }, - { K(3, 0, 1, 0, 2), K(3, 4, 1, 1, 2), K(3, 5, 1, 0, 2), - "CPACR", "CPTR_EL2", "CPACR_EL12" }, - { K(3, 0, 2, 0, 0), K(3, 4, 2, 0, 0), K(3, 5, 2, 0, 0), - "TTBR0_EL1", "TTBR0_EL2", "TTBR0_EL12" }, - { K(3, 0, 2, 0, 1), K(3, 4, 2, 0, 1), K(3, 5, 2, 0, 1), - "TTBR1_EL1", "TTBR1_EL2", "TTBR1_EL12" }, - { K(3, 0, 2, 0, 2), K(3, 4, 2, 0, 2), K(3, 5, 2, 0, 2), - "TCR_EL1", "TCR_EL2", "TCR_EL12" }, - { K(3, 0, 4, 0, 0), K(3, 4, 4, 0, 0), K(3, 5, 4, 0, 0), - "SPSR_EL1", "SPSR_EL2", "SPSR_EL12" }, - { K(3, 0, 4, 0, 1), K(3, 4, 4, 0, 1), K(3, 5, 4, 0, 1), - "ELR_EL1", "ELR_EL2", "ELR_EL12" }, - { K(3, 0, 5, 1, 0), K(3, 4, 5, 1, 0), K(3, 5, 5, 1, 0), - "AFSR0_EL1", "AFSR0_EL2", "AFSR0_EL12" }, - { K(3, 0, 5, 1, 1), K(3, 4, 5, 1, 1), K(3, 5, 5, 1, 1), - "AFSR1_EL1", "AFSR1_EL2", "AFSR1_EL12" }, - { K(3, 0, 5, 2, 0), K(3, 4, 5, 2, 0), K(3, 5, 5, 2, 0), - "ESR_EL1", "ESR_EL2", "ESR_EL12" }, - { K(3, 0, 6, 0, 0), K(3, 4, 6, 0, 0), K(3, 5, 6, 0, 0), - "FAR_EL1", "FAR_EL2", "FAR_EL12" }, - { K(3, 0, 10, 2, 0), K(3, 4, 10, 2, 0), K(3, 5, 10, 2, 0), - "MAIR_EL1", "MAIR_EL2", "MAIR_EL12" }, - { K(3, 0, 10, 3, 0), K(3, 4, 10, 3, 0), K(3, 5, 10, 3, 0), - "AMAIR0", "AMAIR_EL2", "AMAIR_EL12" }, - { K(3, 0, 12, 0, 0), K(3, 4, 12, 0, 0), K(3, 5, 12, 0, 0), - "VBAR", "VBAR_EL2", "VBAR_EL12" }, - { K(3, 0, 13, 0, 1), K(3, 4, 13, 0, 1), K(3, 5, 13, 0, 1), - "CONTEXTIDR_EL1", "CONTEXTIDR_EL2", "CONTEXTIDR_EL12" }, - { K(3, 0, 14, 1, 0), K(3, 4, 14, 1, 0), K(3, 5, 14, 1, 0), - "CNTKCTL", "CNTHCTL_EL2", "CNTKCTL_EL12" }, - - /* - * Note that redirection of ZCR is mentioned in the description - * of ZCR_EL2, and aliasing in the description of ZCR_EL1, but - * not in the summary table. - */ - { K(3, 0, 1, 2, 0), K(3, 4, 1, 2, 0), K(3, 5, 1, 2, 0), - "ZCR_EL1", "ZCR_EL2", "ZCR_EL12", isar_feature_aa64_sve }, - { K(3, 0, 1, 2, 6), K(3, 4, 1, 2, 6), K(3, 5, 1, 2, 6), - "SMCR_EL1", "SMCR_EL2", "SMCR_EL12", isar_feature_aa64_sme }, - - { K(3, 0, 5, 6, 0), K(3, 4, 5, 6, 0), K(3, 5, 5, 6, 0), - "TFSR_EL1", "TFSR_EL2", "TFSR_EL12", isar_feature_aa64_mte }, - - { K(3, 0, 13, 0, 7), K(3, 4, 13, 0, 7), K(3, 5, 13, 0, 7), - "SCXTNUM_EL1", "SCXTNUM_EL2", "SCXTNUM_EL12", - isar_feature_aa64_scxtnum }, - - /* TODO: ARMv8.2-SPE -- PMSCR_EL2 */ - /* TODO: ARMv8.4-Trace -- TRFCR_EL2 */ - }; -#undef K - - size_t i; - - for (i = 0; i < ARRAY_SIZE(aliases); i++) { - const struct E2HAlias *a = &aliases[i]; - ARMCPRegInfo *src_reg, *dst_reg, *new_reg; - bool ok; - - if (a->feature && !a->feature(&cpu->isar)) { - continue; - } - - src_reg = g_hash_table_lookup(cpu->cp_regs, - (gpointer)(uintptr_t)a->src_key); - dst_reg = g_hash_table_lookup(cpu->cp_regs, - (gpointer)(uintptr_t)a->dst_key); - g_assert(src_reg != NULL); - g_assert(dst_reg != NULL); - - /* Cross-compare names to detect typos in the keys. */ - g_assert(strcmp(src_reg->name, a->src_name) == 0); - g_assert(strcmp(dst_reg->name, a->dst_name) == 0); - - /* None of the core system registers use opaque; we will. */ - g_assert(src_reg->opaque == NULL); - - /* Create alias before redirection so we dup the right data. */ - new_reg = g_memdup(src_reg, sizeof(ARMCPRegInfo)); - - new_reg->name = a->new_name; - new_reg->type |= ARM_CP_ALIAS; - /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ - new_reg->access &= PL2_RW | PL3_RW; - /* The new_reg op fields are as per new_key, not the target reg */ - new_reg->crn = (a->new_key & CP_REG_ARM64_SYSREG_CRN_MASK) - >> CP_REG_ARM64_SYSREG_CRN_SHIFT; - new_reg->crm = (a->new_key & CP_REG_ARM64_SYSREG_CRM_MASK) - >> CP_REG_ARM64_SYSREG_CRM_SHIFT; - new_reg->opc0 = (a->new_key & CP_REG_ARM64_SYSREG_OP0_MASK) - >> CP_REG_ARM64_SYSREG_OP0_SHIFT; - new_reg->opc1 = (a->new_key & CP_REG_ARM64_SYSREG_OP1_MASK) - >> CP_REG_ARM64_SYSREG_OP1_SHIFT; - new_reg->opc2 = (a->new_key & CP_REG_ARM64_SYSREG_OP2_MASK) - >> CP_REG_ARM64_SYSREG_OP2_SHIFT; - new_reg->opaque = src_reg; - new_reg->orig_readfn = src_reg->readfn ?: raw_read; - new_reg->orig_writefn = src_reg->writefn ?: raw_write; - new_reg->orig_accessfn = src_reg->accessfn; - if (!new_reg->raw_readfn) { - new_reg->raw_readfn = raw_read; - } - if (!new_reg->raw_writefn) { - new_reg->raw_writefn = raw_write; - } - new_reg->readfn = el2_e2h_e12_read; - new_reg->writefn = el2_e2h_e12_write; - new_reg->accessfn = el2_e2h_e12_access; - - /* - * If the _EL1 register is redirected to memory by FEAT_NV2, - * then it shares the offset with the _EL12 register, - * and which one is redirected depends on HCR_EL2.NV1. - */ - if (new_reg->nv2_redirect_offset) { - assert(new_reg->nv2_redirect_offset & NV2_REDIR_NV1); - new_reg->nv2_redirect_offset &= ~NV2_REDIR_NV1; - new_reg->nv2_redirect_offset |= NV2_REDIR_NO_NV1; - } - - ok = g_hash_table_insert(cpu->cp_regs, - (gpointer)(uintptr_t)a->new_key, new_reg); - g_assert(ok); - - src_reg->opaque = dst_reg; - src_reg->orig_readfn = src_reg->readfn ?: raw_read; - src_reg->orig_writefn = src_reg->writefn ?: raw_write; - if (!src_reg->raw_readfn) { - src_reg->raw_readfn = raw_read; - } - if (!src_reg->raw_writefn) { - src_reg->raw_writefn = raw_write; - } - src_reg->readfn = el2_e2h_read; - src_reg->writefn = el2_e2h_write; - } -} #endif static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4943,6 +4829,8 @@ static const ARMCPRegInfo zcr_reginfo[] = { { .name = "ZCR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 0, .nv2_redirect_offset = 0x1e0 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 0), .access = PL1_RW, .type = ARM_CP_SVE, .fieldoffset = offsetof(CPUARMState, vfp.zcr_el[1]), .writefn = zcr_write, .raw_writefn = raw_write }, @@ -5088,6 +4976,8 @@ static const ARMCPRegInfo sme_reginfo[] = { { .name = "SMCR_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 2, .opc2 = 6, .nv2_redirect_offset = 0x1f0 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 2, 6), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 2, 6), .access = PL1_RW, .type = ARM_CP_SME, .fieldoffset = offsetof(CPUARMState, vfp.smcr_el[1]), .writefn = smcr_write, .raw_writefn = raw_write }, @@ -5134,6 +5024,11 @@ static void gpccr_write(CPUARMState *env, const ARMCPRegInfo *ri, R_GPCCR_ORGN_MASK | R_GPCCR_SH_MASK | R_GPCCR_PGS_MASK | R_GPCCR_GPC_MASK | R_GPCCR_GPCP_MASK; + if (cpu_isar_feature(aa64_rme_gpc2, env_archcpu(env))) { + rw_mask |= R_GPCCR_APPSAA_MASK | R_GPCCR_NSO_MASK | + R_GPCCR_SPAD_MASK | R_GPCCR_NSPAD_MASK | R_GPCCR_RLPAD_MASK; + } + env->cp15.gpccr_el3 = (value & rw_mask) | (env->cp15.gpccr_el3 & ~rw_mask); } @@ -5196,6 +5091,96 @@ static const ARMCPRegInfo nmi_reginfo[] = { .resetfn = arm_cp_reset_ignore }, }; +static CPAccessResult mecid_access(CPUARMState *env, + const ARMCPRegInfo *ri, bool isread) +{ + int el = arm_current_el(env); + + if (el == 2) { + if (arm_security_space(env) != ARMSS_Realm) { + return CP_ACCESS_UNDEFINED; + } + + if (!(env->cp15.scr_el3 & SCR_MECEN)) { + return CP_ACCESS_TRAP_EL3; + } + } + + return CP_ACCESS_OK; +} + +static void mecid_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + value = extract64(value, 0, MECID_WIDTH); + raw_write(env, ri, value); +} + +static CPAccessResult cipae_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + switch (arm_security_space(env)) { + case ARMSS_Root: /* EL3 */ + case ARMSS_Realm: /* Realm EL2 */ + return CP_ACCESS_OK; + default: + return CP_ACCESS_UNDEFINED; + } +} + +static const ARMCPRegInfo mec_reginfo[] = { + { .name = "MECIDR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 7, .crn = 10, .crm = 8, + .access = PL2_R, .type = ARM_CP_CONST | ARM_CP_NV_NO_TRAP, + .resetvalue = MECID_WIDTH - 1 }, + { .name = "MECID_P0_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 8, + .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP, + .accessfn = mecid_access, .writefn = mecid_write, + .fieldoffset = offsetof(CPUARMState, cp15.mecid_p0_el2) }, + { .name = "MECID_A0_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 8, + .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP, + .accessfn = mecid_access, .writefn = mecid_write, + .fieldoffset = offsetof(CPUARMState, cp15.mecid_a0_el2) }, + { .name = "MECID_P1_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 8, + .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP, + .accessfn = mecid_access, .writefn = mecid_write, + .fieldoffset = offsetof(CPUARMState, cp15.mecid_p1_el2) }, + { .name = "MECID_A1_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 8, + .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP, + .accessfn = mecid_access, .writefn = mecid_write, + .fieldoffset = offsetof(CPUARMState, cp15.mecid_a1_el2) }, + { .name = "MECID_RL_A_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .opc2 = 1, .crn = 10, .crm = 10, + .access = PL3_RW, .accessfn = mecid_access, + .writefn = mecid_write, + .fieldoffset = offsetof(CPUARMState, cp15.mecid_rl_a_el3) }, + { .name = "VMECID_P_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 0, .crn = 10, .crm = 9, + .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP, + .accessfn = mecid_access, .writefn = mecid_write, + .fieldoffset = offsetof(CPUARMState, cp15.vmecid_p_el2) }, + { .name = "VMECID_A_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 1, .crn = 10, .crm = 9, + .access = PL2_RW, .type = ARM_CP_NV_NO_TRAP, + .accessfn = mecid_access, .writefn = mecid_write, + .fieldoffset = offsetof(CPUARMState, cp15.vmecid_a_el2) }, + { .name = "DC_CIPAE", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 0, + .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP, + .accessfn = cipae_access }, +}; + +static const ARMCPRegInfo mec_mte_reginfo[] = { + { .name = "DC_CIGDPAE", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 14, .opc2 = 7, + .access = PL2_W, .type = ARM_CP_NOP | ARM_CP_NV_NO_TRAP, + .accessfn = cipae_access }, +}; + #ifndef CONFIG_USER_ONLY /* * We don't know until after realize whether there's a GICv3 @@ -5209,7 +5194,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) uint64_t pfr1 = GET_IDREG(&cpu->isar, ID_PFR1); if (env->gicv3state) { - pfr1 |= 1 << 28; + pfr1 = FIELD_DP64(pfr1, ID_PFR1, GIC, 1); } return pfr1; } @@ -5220,7 +5205,7 @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) uint64_t pfr0 = GET_IDREG(&cpu->isar, ID_AA64PFR0); if (env->gicv3state) { - pfr0 |= 1 << 24; + pfr0 = FIELD_DP64(pfr0, ID_AA64PFR0, GIC, 1); } return pfr0; } @@ -5396,7 +5381,7 @@ static const ARMCPRegInfo rndr_reginfo[] = { .access = PL0_R, .readfn = rndr_readfn }, }; -static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, +static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { #ifdef CONFIG_TCG @@ -5533,6 +5518,8 @@ static const ARMCPRegInfo mte_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 5, .crm = 6, .opc2 = 0, .access = PL1_RW, .accessfn = access_tfsr_el1, .nv2_redirect_offset = 0x190 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 5, 6, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 5, 6, 0), .fieldoffset = offsetof(CPUARMState, cp15.tfsr_el[1]) }, { .name = "TFSR_EL2", .state = ARM_CP_STATE_AA64, .type = ARM_CP_NV2_REDIRECT, @@ -5708,6 +5695,8 @@ static const ARMCPRegInfo scxtnum_reginfo[] = { .access = PL1_RW, .accessfn = access_scxtnum_el1, .fgt = FGT_SCXTNUM_EL1, .nv2_redirect_offset = 0x188 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 13, 0, 7), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 13, 0, 7), .fieldoffset = offsetof(CPUARMState, scxtnum_el[1]) }, { .name = "SCXTNUM_EL2", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 4, .crn = 13, .crm = 0, .opc2 = 7, @@ -5994,27 +5983,233 @@ static const ARMCPRegInfo actlr2_hactlr2_reginfo[] = { .resetvalue = 0 }, }; -void register_cp_regs_for_features(ARMCPU *cpu) +static CPAccessResult sctlr2_el2_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) { - /* Register all the coprocessor registers based on feature bits */ - CPUARMState *env = &cpu->env; - ARMISARegisters *isar = &cpu->isar; - - if (arm_feature(env, ARM_FEATURE_M)) { - /* M profile has no coprocessor registers */ - return; + if (arm_current_el(env) < 3 + && arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_SCTLR2EN)) { + return CP_ACCESS_TRAP_EL3; } + return CP_ACCESS_OK; +} - define_arm_cp_regs(cpu, cp_reginfo); - if (!arm_feature(env, ARM_FEATURE_V8)) { - /* - * Must go early as it is full of wildcards that may be - * overridden by later definitions. - */ - define_arm_cp_regs(cpu, not_v8_cp_reginfo); +static CPAccessResult sctlr2_el1_access(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + CPAccessResult ret = access_tvm_trvm(env, ri, isread); + if (ret != CP_ACCESS_OK) { + return ret; } - -#ifndef CONFIG_USER_ONLY + if (arm_current_el(env) < 2 && !(arm_hcrx_el2_eff(env) & HCRX_SCTLR2EN)) { + return CP_ACCESS_TRAP_EL2; + } + return sctlr2_el2_access(env, ri, isread); +} + +static void sctlr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint64_t valid_mask = 0; + + value &= valid_mask; + raw_write(env, ri, value); +} + +static void sctlr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint64_t valid_mask = 0; + + if (cpu_isar_feature(aa64_mec, env_archcpu(env))) { + valid_mask |= SCTLR2_EMEC; + } + value &= valid_mask; + raw_write(env, ri, value); +} + +static void sctlr2_el3_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + uint64_t valid_mask = 0; + + if (cpu_isar_feature(aa64_mec, env_archcpu(env))) { + valid_mask |= SCTLR2_EMEC; + } + value &= valid_mask; + raw_write(env, ri, value); +} + +static const ARMCPRegInfo sctlr2_reginfo[] = { + { .name = "SCTLR2_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 1, .crm = 0, + .access = PL1_RW, .accessfn = sctlr2_el1_access, + .writefn = sctlr2_el1_write, .fgt = FGT_SCTLR_EL1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 3), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 3), + .nv2_redirect_offset = 0x278 | NV2_REDIR_NV1, + .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[1]) }, + { .name = "SCTLR2_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 1, .crm = 0, + .access = PL2_RW, .accessfn = sctlr2_el2_access, + .writefn = sctlr2_el2_write, + .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[2]) }, + { .name = "SCTLR2_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 1, .crm = 0, + .access = PL3_RW, .writefn = sctlr2_el3_write, + .fieldoffset = offsetof(CPUARMState, cp15.sctlr2_el[3]) }, +}; + +static CPAccessResult tcr2_el2_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) < 3 + && arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_TCR2EN)) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static CPAccessResult tcr2_el1_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + CPAccessResult ret = access_tvm_trvm(env, ri, isread); + if (ret != CP_ACCESS_OK) { + return ret; + } + if (arm_current_el(env) < 2 && !(arm_hcrx_el2_eff(env) & HCRX_TCR2EN)) { + return CP_ACCESS_TRAP_EL2; + } + return tcr2_el2_access(env, ri, isread); +} + +static void tcr2_el1_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + uint64_t valid_mask = 0; + + if (cpu_isar_feature(aa64_s1pie, cpu)) { + valid_mask |= TCR2_PIE; + } + value &= valid_mask; + raw_write(env, ri, value); +} + +static void tcr2_el2_write(CPUARMState *env, const ARMCPRegInfo *ri, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + uint64_t valid_mask = 0; + + if (cpu_isar_feature(aa64_s1pie, cpu)) { + valid_mask |= TCR2_PIE; + } + if (cpu_isar_feature(aa64_mec, env_archcpu(env))) { + valid_mask |= TCR2_AMEC0 | TCR2_AMEC1; + } + value &= valid_mask; + raw_write(env, ri, value); +} + +static const ARMCPRegInfo tcr2_reginfo[] = { + { .name = "TCR2_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 2, .crm = 0, + .access = PL1_RW, .accessfn = tcr2_el1_access, + .writefn = tcr2_el1_write, .fgt = FGT_TCR_EL1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 2, 0, 3), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 2, 0, 3), + .nv2_redirect_offset = 0x270 | NV2_REDIR_NV1, + .fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[1]) }, + { .name = "TCR2_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 2, .crm = 0, + .access = PL2_RW, .accessfn = tcr2_el2_access, + .writefn = tcr2_el2_write, + .fieldoffset = offsetof(CPUARMState, cp15.tcr2_el[2]) }, +}; + +static CPAccessResult pien_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_PIEN) + && arm_current_el(env) < 3) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + +static CPAccessResult pien_el1_access(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + CPAccessResult ret = access_tvm_trvm(env, ri, isread); + if (ret == CP_ACCESS_OK) { + ret = pien_access(env, ri, isread); + } + return ret; +} + +static const ARMCPRegInfo s1pie_reginfo[] = { + { .name = "PIR_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .opc2 = 3, .crn = 10, .crm = 2, + .access = PL1_RW, .accessfn = pien_el1_access, + .fgt = FGT_NPIR_EL1, .nv2_redirect_offset = 0x2a0 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 3), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 3), + .fieldoffset = offsetof(CPUARMState, cp15.pir_el[1]) }, + { .name = "PIR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 3, .crn = 10, .crm = 2, + .access = PL2_RW, .accessfn = pien_access, + .fieldoffset = offsetof(CPUARMState, cp15.pir_el[2]) }, + { .name = "PIR_EL3", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 6, .opc2 = 3, .crn = 10, .crm = 2, + .access = PL3_RW, + .fieldoffset = offsetof(CPUARMState, cp15.pir_el[3]) }, + { .name = "PIRE0_EL1", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 0, .opc2 = 2, .crn = 10, .crm = 2, + .access = PL1_RW, .accessfn = pien_el1_access, + .fgt = FGT_NPIRE0_EL1, .nv2_redirect_offset = 0x290 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 10, 2, 2), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 10, 2, 2), + .fieldoffset = offsetof(CPUARMState, cp15.pir_el[0]) }, + { .name = "PIRE0_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 2, .crn = 10, .crm = 2, + .access = PL2_RW, .accessfn = pien_access, + .fieldoffset = offsetof(CPUARMState, cp15.pire0_el2) }, +}; + +static const ARMCPRegInfo s2pie_reginfo[] = { + { .name = "S2PIR_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .opc2 = 5, .crn = 10, .crm = 2, + .access = PL2_RW, .accessfn = pien_access, + .nv2_redirect_offset = 0x2b0, + .fieldoffset = offsetof(CPUARMState, cp15.s2pir_el2) }, +}; + +void register_cp_regs_for_features(ARMCPU *cpu) +{ + /* Register all the coprocessor registers based on feature bits */ + CPUARMState *env = &cpu->env; + ARMISARegisters *isar = &cpu->isar; + + if (arm_feature(env, ARM_FEATURE_M)) { + /* M profile has no coprocessor registers */ + return; + } + + define_arm_cp_regs(cpu, cp_reginfo); + if (!arm_feature(env, ARM_FEATURE_V8)) { + /* + * Must go early as it is full of wildcards that may be + * overridden by later definitions. + */ + define_arm_cp_regs(cpu, not_v8_cp_reginfo); + } + +#ifndef CONFIG_USER_ONLY if (tcg_enabled()) { define_tlb_insn_regs(cpu); define_at_insn_regs(cpu); @@ -6176,11 +6371,11 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, .resetvalue = GET_IDREG(isar, ID_AA64PFR1)}, - { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, + { .name = "ID_AA64PFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = 0 }, + .resetvalue = GET_IDREG(isar, ID_AA64PFR2)}, { .name = "ID_AA64PFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -6408,6 +6603,8 @@ void register_cp_regs_for_features(ARMCPU *cpu) R_ID_AA64PFR1_SSBS_MASK | R_ID_AA64PFR1_MTE_MASK | R_ID_AA64PFR1_SME_MASK }, + { .name = "ID_AA64PFR2_EL1", + .exported_bits = 0 }, { .name = "ID_AA64PFR*_EL1_RESERVED", .is_glob = true }, { .name = "ID_AA64ZFR0_EL1", @@ -6753,9 +6950,6 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_STRONGARM)) { define_arm_cp_regs(cpu, strongarm_cp_reginfo); } - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - define_arm_cp_regs(cpu, xscale_cp_reginfo); - } if (arm_feature(env, ARM_FEATURE_DUMMY_C15_REGS)) { define_arm_cp_regs(cpu, dummy_c15_cp_reginfo); } @@ -7078,12 +7272,14 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_VBAR)) { static const ARMCPRegInfo vbar_cp_reginfo[] = { - { .name = "VBAR", .state = ARM_CP_STATE_BOTH, + { .name = "VBAR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL1_RW, .writefn = vbar_write, .accessfn = access_nv1, .fgt = FGT_VBAR_EL1, .nv2_redirect_offset = 0x250 | NV2_REDIR_NV1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 12, 0, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 12, 0, 0), .bank_fieldoffsets = { offsetof(CPUARMState, cp15.vbar_s), offsetof(CPUARMState, cp15.vbar_ns) }, .resetvalue = 0 }, @@ -7094,24 +7290,18 @@ void register_cp_regs_for_features(ARMCPU *cpu) /* Generic registers whose values depend on the implementation */ { ARMCPRegInfo sctlr = { - .name = "SCTLR", .state = ARM_CP_STATE_BOTH, + .name = "SCTLR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 0, .access = PL1_RW, .accessfn = access_tvm_trvm, .fgt = FGT_SCTLR_EL1, + .vhe_redir_to_el2 = ENCODE_AA64_CP_REG(3, 4, 1, 0, 0), + .vhe_redir_to_el01 = ENCODE_AA64_CP_REG(3, 5, 1, 0, 0), .nv2_redirect_offset = 0x110 | NV2_REDIR_NV1, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.sctlr_s), offsetof(CPUARMState, cp15.sctlr_ns) }, .writefn = sctlr_write, .resetvalue = cpu->reset_sctlr, .raw_writefn = raw_write, }; - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - /* - * Normally we would always end the TB on an SCTLR write, but Linux - * arch/arm/mach-pxa/sleep.S expects two instructions following - * an MMU enable to execute from cache. Imitate this behaviour. - */ - sctlr.type |= ARM_CP_SUPPRESS_TB_END; - } define_one_arm_cp_reg(cpu, &sctlr); if (arm_feature(env, ARM_FEATURE_PMSA) && @@ -7223,6 +7413,27 @@ void register_cp_regs_for_features(ARMCPU *cpu) define_arm_cp_regs(cpu, nmi_reginfo); } + if (cpu_isar_feature(aa64_sctlr2, cpu)) { + define_arm_cp_regs(cpu, sctlr2_reginfo); + } + + if (cpu_isar_feature(aa64_tcr2, cpu)) { + define_arm_cp_regs(cpu, tcr2_reginfo); + } + + if (cpu_isar_feature(aa64_s1pie, cpu)) { + define_arm_cp_regs(cpu, s1pie_reginfo); + } + if (cpu_isar_feature(aa64_s2pie, cpu)) { + define_arm_cp_regs(cpu, s2pie_reginfo); + } + if (cpu_isar_feature(aa64_mec, cpu)) { + define_arm_cp_regs(cpu, mec_reginfo); + if (cpu_isar_feature(aa64_mte, cpu)) { + define_arm_cp_regs(cpu, mec_mte_reginfo); + } + } + if (cpu_isar_feature(any_predinv, cpu)) { define_arm_cp_regs(cpu, predinv_reginfo); } @@ -7232,61 +7443,41 @@ void register_cp_regs_for_features(ARMCPU *cpu) } define_pm_cpregs(cpu); + define_gcs_cpregs(cpu); +} -#ifndef CONFIG_USER_ONLY - /* - * Register redirections and aliases must be done last, - * after the registers from the other extensions have been defined. - */ - if (arm_feature(env, ARM_FEATURE_EL2) && cpu_isar_feature(aa64_vh, cpu)) { - define_arm_vh_e2h_redirects_aliases(cpu); +/* + * Copy a ARMCPRegInfo structure, allocating it along with the name + * and an optional suffix to the name. + */ +static ARMCPRegInfo *alloc_cpreg(const ARMCPRegInfo *in, const char *suffix) +{ + const char *name = in->name; + size_t name_len = strlen(name); + size_t suff_len = suffix ? strlen(suffix) : 0; + ARMCPRegInfo *out = g_malloc(sizeof(*in) + name_len + suff_len + 1); + char *p = (char *)(out + 1); + + *out = *in; + out->name = p; + + memcpy(p, name, name_len + 1); + if (suffix) { + memcpy(p + name_len, suffix, suff_len + 1); } -#endif + return out; } /* - * Private utility function for define_one_arm_cp_reg_with_opaque(): + * Private utility function for define_one_arm_cp_reg(): * add a single reginfo struct to the hash table. */ -static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, - void *opaque, CPState state, - CPSecureState secstate, - int crm, int opc1, int opc2, - const char *name) +static void add_cpreg_to_hashtable(ARMCPU *cpu, ARMCPRegInfo *r, + CPState state, CPSecureState secstate, + uint32_t key) { CPUARMState *env = &cpu->env; - uint32_t key; - ARMCPRegInfo *r2; - bool is64 = r->type & ARM_CP_64BIT; bool ns = secstate & ARM_CP_SECSTATE_NS; - int cp = r->cp; - size_t name_len; - bool make_const; - - switch (state) { - case ARM_CP_STATE_AA32: - /* We assume it is a cp15 register if the .cp field is left unset. */ - if (cp == 0 && r->state == ARM_CP_STATE_BOTH) { - cp = 15; - } - key = ENCODE_CP_REG(cp, is64, ns, r->crn, crm, opc1, opc2); - break; - case ARM_CP_STATE_AA64: - /* - * To allow abbreviation of ARMCPRegInfo definitions, we treat - * cp == 0 as equivalent to the value for "standard guest-visible - * sysreg". STATE_BOTH definitions are also always "standard sysreg" - * in their AArch64 view (the .cp value may be non-zero for the - * benefit of the AArch32 view). - */ - if (cp == 0 || r->state == ARM_CP_STATE_BOTH) { - cp = CP_REG_ARM64_SYSREG_CP; - } - key = ENCODE_AA64_CP_REG(cp, r->crn, crm, r->opc0, opc1, opc2); - break; - default: - g_assert_not_reached(); - } /* Overriding of an existing definition must be explicitly requested. */ if (!(r->type & ARM_CP_OVERRIDE)) { @@ -7296,84 +7487,7 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, } } - /* - * Eliminate registers that are not present because the EL is missing. - * Doing this here makes it easier to put all registers for a given - * feature into the same ARMCPRegInfo array and define them all at once. - */ - make_const = false; - if (arm_feature(env, ARM_FEATURE_EL3)) { - /* - * An EL2 register without EL2 but with EL3 is (usually) RES0. - * See rule RJFFP in section D1.1.3 of DDI0487H.a. - */ - int min_el = ctz32(r->access) / 2; - if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) { - if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) { - return; - } - make_const = !(r->type & ARM_CP_EL3_NO_EL2_KEEP); - } - } else { - CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2) - ? PL2_RW : PL1_RW); - if ((r->access & max_el) == 0) { - return; - } - } - - /* Combine cpreg and name into one allocation. */ - name_len = strlen(name) + 1; - r2 = g_malloc(sizeof(*r2) + name_len); - *r2 = *r; - r2->name = memcpy(r2 + 1, name, name_len); - - /* - * Update fields to match the instantiation, overwiting wildcards - * such as CP_ANY, ARM_CP_STATE_BOTH, or ARM_CP_SECSTATE_BOTH. - */ - r2->cp = cp; - r2->crm = crm; - r2->opc1 = opc1; - r2->opc2 = opc2; - r2->state = state; - r2->secure = secstate; - if (opaque) { - r2->opaque = opaque; - } - - if (make_const) { - /* This should not have been a very special register to begin. */ - int old_special = r2->type & ARM_CP_SPECIAL_MASK; - assert(old_special == 0 || old_special == ARM_CP_NOP); - /* - * Set the special function to CONST, retaining the other flags. - * This is important for e.g. ARM_CP_SVE so that we still - * take the SVE trap if CPTR_EL3.EZ == 0. - */ - r2->type = (r2->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST; - /* - * Usually, these registers become RES0, but there are a few - * special cases like VPIDR_EL2 which have a constant non-zero - * value with writes ignored. - */ - if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) { - r2->resetvalue = 0; - } - /* - * ARM_CP_CONST has precedence, so removing the callbacks and - * offsets are not strictly necessary, but it is potentially - * less confusing to debug later. - */ - r2->readfn = NULL; - r2->writefn = NULL; - r2->raw_readfn = NULL; - r2->raw_writefn = NULL; - r2->resetfn = NULL; - r2->fieldoffset = 0; - r2->bank_fieldoffsets[0] = 0; - r2->bank_fieldoffsets[1] = 0; - } else { + { bool isbanked = r->bank_fieldoffsets[0] && r->bank_fieldoffsets[1]; if (isbanked) { @@ -7382,7 +7496,7 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, * Overwriting fieldoffset as the array is only used to define * banked registers but later only fieldoffset is used. */ - r2->fieldoffset = r->bank_fieldoffsets[ns]; + r->fieldoffset = r->bank_fieldoffsets[ns]; } if (state == ARM_CP_STATE_AA32) { if (isbanked) { @@ -7399,54 +7513,187 @@ static void add_cpreg_to_hashtable(ARMCPU *cpu, const ARMCPRegInfo *r, */ if ((r->state == ARM_CP_STATE_BOTH && ns) || (arm_feature(env, ARM_FEATURE_V8) && !ns)) { - r2->type |= ARM_CP_ALIAS; + r->type |= ARM_CP_ALIAS; } } else if ((secstate != r->secure) && !ns) { /* * The register is not banked so we only want to allow * migration of the non-secure instance. */ - r2->type |= ARM_CP_ALIAS; - } - - if (HOST_BIG_ENDIAN && - r->state == ARM_CP_STATE_BOTH && r2->fieldoffset) { - r2->fieldoffset += sizeof(uint32_t); + r->type |= ARM_CP_ALIAS; } } } /* - * By convention, for wildcarded registers only the first - * entry is used for migration; the others are marked as - * ALIAS so we don't try to transfer the register - * multiple times. Special registers (ie NOP/WFI) are - * never migratable and not even raw-accessible. + * For 32-bit AArch32 regs shared with 64-bit AArch64 regs, + * adjust the field offset for endianness. This had to be + * delayed until banked registers were resolved. */ - if (r2->type & ARM_CP_SPECIAL_MASK) { - r2->type |= ARM_CP_NO_RAW; + if (HOST_BIG_ENDIAN && + state == ARM_CP_STATE_AA32 && + r->state == ARM_CP_STATE_BOTH && + r->fieldoffset) { + r->fieldoffset += sizeof(uint32_t); } - if (((r->crm == CP_ANY) && crm != 0) || - ((r->opc1 == CP_ANY) && opc1 != 0) || - ((r->opc2 == CP_ANY) && opc2 != 0)) { - r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; + + /* + * Special registers (ie NOP/WFI) are never migratable and + * are not even raw-accessible. + */ + if (r->type & ARM_CP_SPECIAL_MASK) { + r->type |= ARM_CP_NO_RAW; } + /* + * Update fields to match the instantiation, overwiting wildcards + * such as ARM_CP_STATE_BOTH or ARM_CP_SECSTATE_BOTH. + */ + r->state = state; + r->secure = secstate; + /* * Check that raw accesses are either forbidden or handled. Note that * we can't assert this earlier because the setup of fieldoffset for * banked registers has to be done first. */ - if (!(r2->type & ARM_CP_NO_RAW)) { - assert(!raw_accessors_invalid(r2)); + if (!(r->type & ARM_CP_NO_RAW)) { + assert(!raw_accessors_invalid(r)); } - g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r2); + g_hash_table_insert(cpu->cp_regs, (gpointer)(uintptr_t)key, r); +} + +static void add_cpreg_to_hashtable_aa32(ARMCPU *cpu, ARMCPRegInfo *r) +{ + /* + * Under AArch32 CP registers can be common + * (same for secure and non-secure world) or banked. + */ + ARMCPRegInfo *r_s; + bool is64 = r->type & ARM_CP_64BIT; + uint32_t key = ENCODE_CP_REG(r->cp, is64, 0, r->crn, + r->crm, r->opc1, r->opc2); + + assert(!(r->type & ARM_CP_ADD_TLBI_NXS)); /* aa64 only */ + r->vhe_redir_to_el2 = 0; + r->vhe_redir_to_el01 = 0; + + switch (r->secure) { + case ARM_CP_SECSTATE_NS: + key |= CP_REG_AA32_NS_MASK; + /* fall through */ + case ARM_CP_SECSTATE_S: + add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32, r->secure, key); + break; + case ARM_CP_SECSTATE_BOTH: + r_s = alloc_cpreg(r, "_S"); + add_cpreg_to_hashtable(cpu, r_s, ARM_CP_STATE_AA32, + ARM_CP_SECSTATE_S, key); + + key |= CP_REG_AA32_NS_MASK; + add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA32, + ARM_CP_SECSTATE_NS, key); + break; + default: + g_assert_not_reached(); + } } +static void add_cpreg_to_hashtable_aa64(ARMCPU *cpu, ARMCPRegInfo *r) +{ + uint32_t key = ENCODE_AA64_CP_REG(r->opc0, r->opc1, + r->crn, r->crm, r->opc2); + + if ((r->type & ARM_CP_ADD_TLBI_NXS) && + cpu_isar_feature(aa64_xs, cpu)) { + /* + * This is a TLBI insn which has an NXS variant. The + * NXS variant is at the same encoding except that + * crn is +1, and has the same behaviour except for + * fine-grained trapping. Add the NXS insn here and + * then fall through to add the normal register. + * add_cpreg_to_hashtable() copies the cpreg struct + * and name that it is passed, so it's OK to use + * a local struct here. + */ + ARMCPRegInfo *nxs_ri = alloc_cpreg(r, "NXS"); + uint32_t nxs_key; + + assert(nxs_ri->crn < 0xf); + nxs_ri->crn++; + /* Also increment the CRN field inside the key value */ + nxs_key = key + (1 << CP_REG_ARM64_SYSREG_CRN_SHIFT); + if (nxs_ri->fgt) { + nxs_ri->fgt |= R_FGT_NXS_MASK; + } + + add_cpreg_to_hashtable(cpu, nxs_ri, ARM_CP_STATE_AA64, + ARM_CP_SECSTATE_NS, nxs_key); + } + + if (!r->vhe_redir_to_el01) { + assert(!r->vhe_redir_to_el2); + } else if (!arm_feature(&cpu->env, ARM_FEATURE_EL2) || + !cpu_isar_feature(aa64_vh, cpu)) { + r->vhe_redir_to_el2 = 0; + r->vhe_redir_to_el01 = 0; + } else { + /* Create the FOO_EL12 alias. */ + ARMCPRegInfo *r2 = alloc_cpreg(r, "2"); + uint32_t key2 = r->vhe_redir_to_el01; + + /* + * Clear EL1 redirection on the FOO_EL1 reg; + * Clear EL2 redirection on the FOO_EL12 reg; + * Install redirection from FOO_EL12 back to FOO_EL1. + */ + r->vhe_redir_to_el01 = 0; + r2->vhe_redir_to_el2 = 0; + r2->vhe_redir_to_el01 = key; + + r2->type |= ARM_CP_ALIAS | ARM_CP_NO_RAW; + /* Remove PL1/PL0 access, leaving PL2/PL3 R/W in place. */ + r2->access &= PL2_RW | PL3_RW; + /* The new_reg op fields are as per new_key, not the target reg */ + r2->crn = (key2 & CP_REG_ARM64_SYSREG_CRN_MASK) + >> CP_REG_ARM64_SYSREG_CRN_SHIFT; + r2->crm = (key2 & CP_REG_ARM64_SYSREG_CRM_MASK) + >> CP_REG_ARM64_SYSREG_CRM_SHIFT; + r2->opc0 = (key2 & CP_REG_ARM64_SYSREG_OP0_MASK) + >> CP_REG_ARM64_SYSREG_OP0_SHIFT; + r2->opc1 = (key2 & CP_REG_ARM64_SYSREG_OP1_MASK) + >> CP_REG_ARM64_SYSREG_OP1_SHIFT; + r2->opc2 = (key2 & CP_REG_ARM64_SYSREG_OP2_MASK) + >> CP_REG_ARM64_SYSREG_OP2_SHIFT; + + /* Non-redirected access to this register will abort. */ + r2->readfn = NULL; + r2->writefn = NULL; + r2->raw_readfn = NULL; + r2->raw_writefn = NULL; + r2->accessfn = NULL; + r2->fieldoffset = 0; + + /* + * If the _EL1 register is redirected to memory by FEAT_NV2, + * then it shares the offset with the _EL12 register, + * and which one is redirected depends on HCR_EL2.NV1. + */ + if (r2->nv2_redirect_offset) { + assert(r2->nv2_redirect_offset & NV2_REDIR_NV1); + r2->nv2_redirect_offset &= ~NV2_REDIR_NV1; + r2->nv2_redirect_offset |= NV2_REDIR_NO_NV1; + } + add_cpreg_to_hashtable(cpu, r2, ARM_CP_STATE_AA64, + ARM_CP_SECSTATE_NS, key2); + } + + add_cpreg_to_hashtable(cpu, r, ARM_CP_STATE_AA64, + ARM_CP_SECSTATE_NS, key); +} -void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, - const ARMCPRegInfo *r, void *opaque) +void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *r) { /* * Define implementations of coprocessor registers. @@ -7472,21 +7719,27 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, * bits; the ARM_CP_64BIT* flag applies only to the AArch32 view of * the register, if any. */ - int crm, opc1, opc2; int crmmin = (r->crm == CP_ANY) ? 0 : r->crm; int crmmax = (r->crm == CP_ANY) ? 15 : r->crm; int opc1min = (r->opc1 == CP_ANY) ? 0 : r->opc1; int opc1max = (r->opc1 == CP_ANY) ? 7 : r->opc1; int opc2min = (r->opc2 == CP_ANY) ? 0 : r->opc2; int opc2max = (r->opc2 == CP_ANY) ? 7 : r->opc2; - CPState state; + int cp = r->cp; + ARMCPRegInfo r_const; + CPUARMState *env = &cpu->env; - /* 64 bit registers have only CRm and Opc1 fields */ - assert(!((r->type & ARM_CP_64BIT) && (r->opc2 || r->crn))); + /* + * AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless. + * Moreover, the encoding test just following in general prevents + * shared encoding so ARM_CP_STATE_BOTH won't work either. + */ + assert(r->state == ARM_CP_STATE_AA32 || !(r->type & ARM_CP_64BIT)); + /* AArch32 64-bit registers have only CRm and Opc1 fields. */ + assert(!(r->type & ARM_CP_64BIT) || !(r->opc2 || r->crn)); /* op0 only exists in the AArch64 encodings */ - assert((r->state != ARM_CP_STATE_AA32) || (r->opc0 == 0)); - /* AArch64 regs are all 64 bit so ARM_CP_64BIT is meaningless */ - assert((r->state != ARM_CP_STATE_AA64) || !(r->type & ARM_CP_64BIT)); + assert(r->state != ARM_CP_STATE_AA32 || r->opc0 == 0); + /* * This API is only for Arm's system coprocessors (14 and 15) or * (M-profile or v7A-and-earlier only) for implementation defined @@ -7497,21 +7750,25 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, */ switch (r->state) { case ARM_CP_STATE_BOTH: - /* 0 has a special meaning, but otherwise the same rules as AA32. */ - if (r->cp == 0) { + /* + * If the cp field is left unset, assume cp15. + * Otherwise apply the same rules as AA32. + */ + if (cp == 0) { + cp = 15; break; } /* fall through */ case ARM_CP_STATE_AA32: if (arm_feature(&cpu->env, ARM_FEATURE_V8) && !arm_feature(&cpu->env, ARM_FEATURE_M)) { - assert(r->cp >= 14 && r->cp <= 15); + assert(cp >= 14 && cp <= 15); } else { - assert(r->cp < 8 || (r->cp >= 14 && r->cp <= 15)); + assert(cp < 8 || (cp >= 14 && cp <= 15)); } break; case ARM_CP_STATE_AA64: - assert(r->cp == 0 || r->cp == CP_REG_ARM64_SYSREG_CP); + assert(cp == 0); break; default: g_assert_not_reached(); @@ -7576,75 +7833,104 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, } } - for (crm = crmmin; crm <= crmmax; crm++) { - for (opc1 = opc1min; opc1 <= opc1max; opc1++) { - for (opc2 = opc2min; opc2 <= opc2max; opc2++) { - for (state = ARM_CP_STATE_AA32; - state <= ARM_CP_STATE_AA64; state++) { - if (r->state != state && r->state != ARM_CP_STATE_BOTH) { - continue; - } - if ((r->type & ARM_CP_ADD_TLBI_NXS) && - cpu_isar_feature(aa64_xs, cpu)) { - /* - * This is a TLBI insn which has an NXS variant. The - * NXS variant is at the same encoding except that - * crn is +1, and has the same behaviour except for - * fine-grained trapping. Add the NXS insn here and - * then fall through to add the normal register. - * add_cpreg_to_hashtable() copies the cpreg struct - * and name that it is passed, so it's OK to use - * a local struct here. - */ - ARMCPRegInfo nxs_ri = *r; - g_autofree char *name = g_strdup_printf("%sNXS", r->name); - - assert(state == ARM_CP_STATE_AA64); - assert(nxs_ri.crn < 0xf); - nxs_ri.crn++; - if (nxs_ri.fgt) { - nxs_ri.fgt |= R_FGT_NXS_MASK; - } - add_cpreg_to_hashtable(cpu, &nxs_ri, opaque, state, - ARM_CP_SECSTATE_NS, - crm, opc1, opc2, name); - } - if (state == ARM_CP_STATE_AA32) { - /* - * Under AArch32 CP registers can be common - * (same for secure and non-secure world) or banked. - */ - char *name; - - switch (r->secure) { - case ARM_CP_SECSTATE_S: - case ARM_CP_SECSTATE_NS: - add_cpreg_to_hashtable(cpu, r, opaque, state, - r->secure, crm, opc1, opc2, - r->name); - break; - case ARM_CP_SECSTATE_BOTH: - name = g_strdup_printf("%s_S", r->name); - add_cpreg_to_hashtable(cpu, r, opaque, state, - ARM_CP_SECSTATE_S, - crm, opc1, opc2, name); - g_free(name); - add_cpreg_to_hashtable(cpu, r, opaque, state, - ARM_CP_SECSTATE_NS, - crm, opc1, opc2, r->name); - break; - default: - g_assert_not_reached(); - } - } else { - /* - * AArch64 registers get mapped to non-secure instance - * of AArch32 - */ - add_cpreg_to_hashtable(cpu, r, opaque, state, - ARM_CP_SECSTATE_NS, - crm, opc1, opc2, r->name); - } + /* + * Eliminate registers that are not present because the EL is missing. + * Doing this here makes it easier to put all registers for a given + * feature into the same ARMCPRegInfo array and define them all at once. + */ + if (arm_feature(env, ARM_FEATURE_EL3)) { + /* + * An EL2 register without EL2 but with EL3 is (usually) RES0. + * See rule RJFFP in section D1.1.3 of DDI0487H.a. + */ + int min_el = ctz32(r->access) / 2; + if (min_el == 2 && !arm_feature(env, ARM_FEATURE_EL2)) { + if (r->type & ARM_CP_EL3_NO_EL2_UNDEF) { + return; + } + if (!(r->type & ARM_CP_EL3_NO_EL2_KEEP)) { + /* This should not have been a very special register. */ + int old_special = r->type & ARM_CP_SPECIAL_MASK; + assert(old_special == 0 || old_special == ARM_CP_NOP); + + r_const = *r; + + /* + * Set the special function to CONST, retaining the other flags. + * This is important for e.g. ARM_CP_SVE so that we still + * take the SVE trap if CPTR_EL3.EZ == 0. + */ + r_const.type = (r->type & ~ARM_CP_SPECIAL_MASK) | ARM_CP_CONST; + /* + * Usually, these registers become RES0, but there are a few + * special cases like VPIDR_EL2 which have a constant non-zero + * value with writes ignored. + */ + if (!(r->type & ARM_CP_EL3_NO_EL2_C_NZ)) { + r_const.resetvalue = 0; + } + /* + * ARM_CP_CONST has precedence, so removing the callbacks and + * offsets are not strictly necessary, but it is potentially + * less confusing to debug later. + */ + r_const.readfn = NULL; + r_const.writefn = NULL; + r_const.raw_readfn = NULL; + r_const.raw_writefn = NULL; + r_const.resetfn = NULL; + r_const.fieldoffset = 0; + r_const.bank_fieldoffsets[0] = 0; + r_const.bank_fieldoffsets[1] = 0; + + r = &r_const; + } + } + } else { + CPAccessRights max_el = (arm_feature(env, ARM_FEATURE_EL2) + ? PL2_RW : PL1_RW); + if ((r->access & max_el) == 0) { + return; + } + } + + for (int crm = crmmin; crm <= crmmax; crm++) { + for (int opc1 = opc1min; opc1 <= opc1max; opc1++) { + for (int opc2 = opc2min; opc2 <= opc2max; opc2++) { + ARMCPRegInfo *r2 = alloc_cpreg(r, NULL); + ARMCPRegInfo *r3; + + /* + * By convention, for wildcarded registers only the first + * entry is used for migration; the others are marked as + * ALIAS so we don't try to transfer the register + * multiple times. + */ + if (crm != crmmin || opc1 != opc1min || opc2 != opc2min) { + r2->type |= ARM_CP_ALIAS | ARM_CP_NO_GDB; + } + + /* Overwrite CP_ANY with the instantiation. */ + r2->crm = crm; + r2->opc1 = opc1; + r2->opc2 = opc2; + + switch (r->state) { + case ARM_CP_STATE_AA32: + add_cpreg_to_hashtable_aa32(cpu, r2); + break; + case ARM_CP_STATE_AA64: + add_cpreg_to_hashtable_aa64(cpu, r2); + break; + case ARM_CP_STATE_BOTH: + r3 = alloc_cpreg(r2, NULL); + r2->cp = cp; + add_cpreg_to_hashtable_aa32(cpu, r2); + r3->cp = 0; + add_cpreg_to_hashtable_aa64(cpu, r3); + break; + default: + g_assert_not_reached(); } } } @@ -7652,12 +7938,10 @@ void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu, } /* Define a whole list of registers */ -void define_arm_cp_regs_with_opaque_len(ARMCPU *cpu, const ARMCPRegInfo *regs, - void *opaque, size_t len) +void define_arm_cp_regs_len(ARMCPU *cpu, const ARMCPRegInfo *regs, size_t len) { - size_t i; - for (i = 0; i < len; ++i) { - define_one_arm_cp_reg_with_opaque(cpu, regs + i, opaque); + for (size_t i = 0; i < len; ++i) { + define_one_arm_cp_reg(cpu, regs + i); } } @@ -7719,7 +8003,7 @@ uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri) return 0; } -void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque) +void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *ri) { /* Helper coprocessor reset function for do-nothing-on-reset registers */ } @@ -8786,7 +9070,7 @@ static int aarch64_regnum(CPUARMState *env, int aarch32_reg) } } -static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env) +uint32_t cpsr_read_for_spsr_elx(CPUARMState *env) { uint32_t ret = cpsr_read(env); @@ -8801,6 +9085,24 @@ static uint32_t cpsr_read_for_spsr_elx(CPUARMState *env) return ret; } +void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val) +{ + uint32_t mask; + + /* Save SPSR_ELx.SS into PSTATE. */ + env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS); + val &= ~PSTATE_SS; + + /* Move DIT to the correct location for CPSR */ + if (val & PSTATE_DIT) { + val &= ~PSTATE_DIT; + val |= CPSR_DIT; + } + + mask = aarch32_cpsr_valid_mask(env->features, &env_archcpu(env)->isar); + cpsr_write(env, val, mask, CPSRWriteRaw); +} + static bool syndrome_is_sync_extabt(uint32_t syndrome) { /* Return true if this syndrome value is a synchronous external abort */ @@ -8833,8 +9135,8 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) CPUARMState *env = &cpu->env; unsigned int new_el = env->exception.target_el; vaddr addr = env->cp15.vbar_el[new_el]; - unsigned int new_mode = aarch64_pstate_mode(new_el, true); - unsigned int old_mode; + uint64_t new_mode = aarch64_pstate_mode(new_el, true); + uint64_t old_mode; unsigned int cur_el = arm_current_el(env); int rt; @@ -8877,8 +9179,13 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) } else { addr += 0x600; } - } else if (pstate_read(env) & PSTATE_SP) { - addr += 0x200; + } else { + if (pstate_read(env) & PSTATE_SP) { + addr += 0x200; + } + if (is_a64(env) && (env->cp15.gcscr_el[new_el] & GCSCR_EXLOCKEN)) { + new_mode |= PSTATE_EXLOCK; + } } switch (cs->exception_index) { @@ -8982,7 +9289,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) * If NV2 is disabled, change SPSR when NV,NV1 == 1,0 (I_ZJRNN) * If NV2 is enabled, change SPSR when NV is 1 (I_DBTLM) */ - old_mode = deposit32(old_mode, 2, 2, 2); + old_mode = deposit64(old_mode, 2, 2, 2); } } } else { @@ -8995,7 +9302,7 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) } env->banked_spsr[aarch64_banked_spsr_index(new_el)] = old_mode; - qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%x\n", old_mode); + qemu_log_mask(CPU_LOG_INT, "...with SPSR 0x%" PRIx64 "\n", old_mode); qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", env->elr_el[new_el]); @@ -9049,7 +9356,8 @@ static void arm_cpu_do_interrupt_aarch64(CPUState *cs) env->pc = addr; - qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", + qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 + " PSTATE 0x%" PRIx64 "\n", new_el, env->pc, pstate_read(env)); } @@ -9105,7 +9413,7 @@ void arm_cpu_do_interrupt(CPUState *cs) new_el); if (qemu_loglevel_mask(CPU_LOG_INT) && !excp_is_internal(cs->exception_index)) { - qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx32 "\n", + qemu_log_mask(CPU_LOG_INT, "...with ESR 0x%x/0x%" PRIx64 "\n", syn_get_ec(env->exception.syndrome), env->exception.syndrome); } @@ -9147,7 +9455,7 @@ void arm_cpu_do_interrupt(CPUState *cs) arm_call_el_change_hook(cpu); if (!kvm_enabled()) { - cs->interrupt_request |= CPU_INTERRUPT_EXITTB; + cpu_set_interrupt(cs, CPU_INTERRUPT_EXITTB); } } #endif /* !CONFIG_USER_ONLY */ @@ -9295,21 +9603,34 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, bool el1_is_aa32) { uint64_t tcr = regime_tcr(env, mmu_idx); - bool epd, hpd, tsz_oob, ds, ha, hd; + bool epd, hpd, tsz_oob, ds, ha, hd, pie = false; int select, tsz, tbi, max_tsz, min_tsz, ps, sh; ARMGranuleSize gran; ARMCPU *cpu = env_archcpu(env); bool stage2 = regime_is_stage2(mmu_idx); + int r_el = regime_el(mmu_idx); if (!regime_has_2_ranges(mmu_idx)) { select = 0; tsz = extract32(tcr, 0, 6); gran = tg0_to_gran_size(extract32(tcr, 14, 2)); if (stage2) { - /* VTCR_EL2 */ - hpd = false; + /* + * Stage2 does not have hierarchical permissions. + * Thus disabling them makes things easier during ptw. + */ + hpd = true; + pie = extract64(tcr, 36, 1) && cpu_isar_feature(aa64_s2pie, cpu); } else { hpd = extract32(tcr, 24, 1); + if (r_el == 3) { + pie = (extract64(tcr, 35, 1) + && cpu_isar_feature(aa64_s1pie, cpu)); + } else { + pie = ((env->cp15.tcr2_el[2] & TCR2_PIE) + && (!arm_feature(env, ARM_FEATURE_EL3) + || (env->cp15.scr_el3 & SCR_TCR2EN))); + } } epd = false; sh = extract32(tcr, 12, 2); @@ -9346,10 +9667,16 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, ds = extract64(tcr, 59, 1); if (e0pd && cpu_isar_feature(aa64_e0pd, cpu) && - regime_is_user(env, mmu_idx)) { + regime_is_user(mmu_idx)) { epd = true; } + + pie = ((env->cp15.tcr2_el[r_el] & TCR2_PIE) + && (!arm_feature(env, ARM_FEATURE_EL3) + || (env->cp15.scr_el3 & SCR_TCR2EN)) + && (r_el == 2 || (arm_hcrx_el2_eff(env) & HCRX_TCR2EN))); } + hpd |= pie; gran = sanitize_gran_size(cpu, gran, stage2); @@ -9428,6 +9755,7 @@ ARMVAParameters aa64_va_parameters(CPUARMState *env, uint64_t va, .ha = ha, .hd = ha && hd, .gran = gran, + .pie = pie, }; } @@ -9542,33 +9870,6 @@ int fp_exception_el(CPUARMState *env, int cur_el) return 0; } -/* Return the exception level we're running at if this is our mmu_idx */ -int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx) -{ - if (mmu_idx & ARM_MMU_IDX_M) { - return mmu_idx & ARM_MMU_IDX_M_PRIV; - } - - switch (mmu_idx) { - case ARMMMUIdx_E10_0: - case ARMMMUIdx_E20_0: - case ARMMMUIdx_E30_0: - return 0; - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: - return 1; - case ARMMMUIdx_E2: - case ARMMMUIdx_E20_2: - case ARMMMUIdx_E20_2_PAN: - return 2; - case ARMMMUIdx_E3: - case ARMMMUIdx_E30_3_PAN: - return 3; - default: - g_assert_not_reached(); - } -} - #ifndef CONFIG_TCG ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate) { diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index c9cfcdc08bb16..0658a99a2d1fc 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -152,9 +152,6 @@ void hvf_arm_init_debug(void) g_array_sized_new(true, true, sizeof(HWWatchpoint), max_hw_wps); } -#define HVF_SYSREG(crn, crm, op0, op1, op2) \ - ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) - #define SYSREG_OP0_SHIFT 20 #define SYSREG_OP0_MASK 0x3 #define SYSREG_OP0(sysreg) ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK) @@ -186,6 +183,7 @@ void hvf_arm_init_debug(void) #define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4) #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4) #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4) +#define SYSREG_LORC_EL1 SYSREG(3, 0, 10, 4, 3) #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) #define SYSREG_CNTP_CTL_EL0 SYSREG(3, 3, 14, 2, 1) #define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0) @@ -396,156 +394,34 @@ static const struct hvf_reg_match hvf_fpreg_match[] = { { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) }, }; -struct hvf_sreg_match { - int reg; - uint32_t key; - uint32_t cp_idx; -}; +/* + * QEMU uses KVM system register ids in the migration format. + * Conveniently, HVF uses the same encoding of the op* and cr* parameters + * within the low 16 bits of the ids. Thus conversion between the + * formats is trivial. + */ -static struct hvf_sreg_match hvf_sreg_match[] = { - { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 2, 0, 7) }, - - { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 4) }, - { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 5) }, - { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 2, 0, 6) }, - { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 2, 0, 7) }, - -#ifdef SYNC_NO_RAW_REGS - /* - * The registers below are manually synced on init because they are - * marked as NO_RAW. We still list them to make number space sync easier. - */ - { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) }, - { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) }, - { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) }, - { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) }, -#endif - { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 1) }, - { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) }, - { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) }, - { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) }, - { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) }, -#ifdef SYNC_NO_MMFR0 - /* We keep the hardware MMFR0 around. HW limits are there anyway */ - { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) }, -#endif - { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) }, - { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) }, - /* Add ID_AA64MMFR3_EL1 here when HVF supports it */ - - { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) }, - { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) }, - { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) }, - { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) }, - { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) }, - { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) }, - - { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) }, - { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) }, - { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) }, - { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) }, - { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) }, - { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) }, - { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) }, - { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) }, - { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) }, - { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) }, - - { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) }, - { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) }, - { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) }, - { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) }, - { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) }, - { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) }, - { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) }, - { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) }, - { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) }, - { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) }, - { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) }, - { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) }, - { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) }, - { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) }, - { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) }, - { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) }, - { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) }, - { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) }, - { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) }, - { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) }, +#define KVMID_TO_HVF(KVM) ((KVM) & 0xffff) +#define HVF_TO_KVMID(HVF) \ + (CP_REG_ARM64 | CP_REG_SIZE_U64 | CP_REG_ARM64_SYSREG | (HVF)) + +/* Verify this at compile-time. */ + +#define DEF_SYSREG(HVF_ID, ...) \ + QEMU_BUILD_BUG_ON(HVF_ID != KVMID_TO_HVF(KVMID_AA64_SYS_REG64(__VA_ARGS__))); + +#include "sysreg.c.inc" + +#undef DEF_SYSREG + +#define DEF_SYSREG(HVF_ID, op0, op1, crn, crm, op2) HVF_ID, + +static const hv_sys_reg_t hvf_sreg_list[] = { +#include "sysreg.c.inc" }; +#undef DEF_SYSREG + int hvf_get_registers(CPUState *cpu) { ARMCPU *arm_cpu = ARM_CPU(cpu); @@ -553,7 +429,7 @@ int hvf_get_registers(CPUState *cpu) hv_return_t ret; uint64_t val; hv_simd_fp_uchar16_t fpval; - int i; + int i, n; for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { ret = hv_vcpu_get_reg(cpu->accel->fd, hvf_reg_match[i].reg, &val); @@ -582,14 +458,13 @@ int hvf_get_registers(CPUState *cpu) assert_hvf_ok(ret); pstate_write(env, val); - for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { - if (hvf_sreg_match[i].cp_idx == -1) { - continue; - } + for (i = 0, n = arm_cpu->cpreg_array_len; i < n; i++) { + uint64_t kvm_id = arm_cpu->cpreg_indexes[i]; + int hvf_id = KVMID_TO_HVF(kvm_id); if (cpu->accel->guest_debug_enabled) { /* Handle debug registers */ - switch (hvf_sreg_match[i].reg) { + switch (hvf_id) { case HV_SYS_REG_DBGBVR0_EL1: case HV_SYS_REG_DBGBCR0_EL1: case HV_SYS_REG_DBGWVR0_EL1: @@ -663,20 +538,22 @@ int hvf_get_registers(CPUState *cpu) * vCPU but simply keep the values from the previous * environment. */ - const ARMCPRegInfo *ri; - ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_sreg_match[i].key); + uint32_t key = kvm_to_cpreg_id(kvm_id); + const ARMCPRegInfo *ri = + get_arm_cp_reginfo(arm_cpu->cp_regs, key); + val = read_raw_cp_reg(env, ri); - arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val; + arm_cpu->cpreg_values[i] = val; continue; } } } - ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, &val); + ret = hv_vcpu_get_sys_reg(cpu->accel->fd, hvf_id, &val); assert_hvf_ok(ret); - arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val; + arm_cpu->cpreg_values[i] = val; } assert(write_list_to_cpustate(arm_cpu)); @@ -692,7 +569,7 @@ int hvf_put_registers(CPUState *cpu) hv_return_t ret; uint64_t val; hv_simd_fp_uchar16_t fpval; - int i; + int i, n; for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset); @@ -719,14 +596,13 @@ int hvf_put_registers(CPUState *cpu) aarch64_save_sp(env, arm_current_el(env)); assert(write_cpustate_to_list(arm_cpu, false)); - for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { - if (hvf_sreg_match[i].cp_idx == -1) { - continue; - } + for (i = 0, n = arm_cpu->cpreg_array_len; i < n; i++) { + uint64_t kvm_id = arm_cpu->cpreg_indexes[i]; + int hvf_id = KVMID_TO_HVF(kvm_id); if (cpu->accel->guest_debug_enabled) { /* Handle debug registers */ - switch (hvf_sreg_match[i].reg) { + switch (hvf_id) { case HV_SYS_REG_DBGBVR0_EL1: case HV_SYS_REG_DBGBCR0_EL1: case HV_SYS_REG_DBGWVR0_EL1: @@ -800,8 +676,8 @@ int hvf_put_registers(CPUState *cpu) } } - val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx]; - ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_sreg_match[i].reg, val); + val = arm_cpu->cpreg_values[i]; + ret = hv_vcpu_set_sys_reg(cpu->accel->fd, hvf_id, val); assert_hvf_ok(ret); } @@ -868,6 +744,7 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) } regs[] = { { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.idregs[ID_AA64PFR0_EL1_IDX] }, { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.idregs[ID_AA64PFR1_EL1_IDX] }, + /* Add ID_AA64PFR2_EL1 here when HVF supports it */ { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.idregs[ID_AA64DFR0_EL1_IDX] }, { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.idregs[ID_AA64DFR1_EL1_IDX] }, { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.idregs[ID_AA64ISAR0_EL1_IDX] }, @@ -1011,7 +888,7 @@ int hvf_arch_init_vcpu(CPUState *cpu) { ARMCPU *arm_cpu = ARM_CPU(cpu); CPUARMState *env = &arm_cpu->env; - uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match); + uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_list); uint32_t sregs_cnt = 0; uint64_t pfr; hv_return_t ret; @@ -1036,21 +913,22 @@ int hvf_arch_init_vcpu(CPUState *cpu) /* Populate cp list for all known sysregs */ for (i = 0; i < sregs_match_len; i++) { - const ARMCPRegInfo *ri; - uint32_t key = hvf_sreg_match[i].key; + hv_sys_reg_t hvf_id = hvf_sreg_list[i]; + uint64_t kvm_id = HVF_TO_KVMID(hvf_id); + uint32_t key = kvm_to_cpreg_id(kvm_id); + const ARMCPRegInfo *ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key); - ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key); if (ri) { assert(!(ri->type & ARM_CP_NO_RAW)); - hvf_sreg_match[i].cp_idx = sregs_cnt; - arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key); - } else { - hvf_sreg_match[i].cp_idx = -1; + arm_cpu->cpreg_indexes[sregs_cnt++] = kvm_id; } } arm_cpu->cpreg_array_len = sregs_cnt; arm_cpu->cpreg_vmstate_array_len = sregs_cnt; + /* cpreg tuples must be in strictly ascending order */ + qsort(arm_cpu->cpreg_indexes, sregs_cnt, sizeof(uint64_t), compare_u64); + assert(write_cpustate_to_list(arm_cpu, false)); /* Set CP_NO_RAW system registers on init */ @@ -1247,11 +1125,10 @@ static bool is_id_sysreg(uint32_t reg) static uint32_t hvf_reg2cp_reg(uint32_t reg) { - return ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, + return ENCODE_AA64_CP_REG((reg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK, + (reg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK, (reg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK, (reg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK, - (reg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK, - (reg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK, (reg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK); } @@ -1263,6 +1140,9 @@ static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val) ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg)); if (ri) { + if (!cp_access_ok(1, ri, true)) { + return false; + } if (ri->accessfn) { if (ri->accessfn(env, ri, true) != CP_ACCESS_OK) { return false; @@ -1273,7 +1153,7 @@ static bool hvf_sysreg_read_cp(CPUState *cpu, uint32_t reg, uint64_t *val) } else if (ri->readfn) { *val = ri->readfn(env, ri); } else { - *val = CPREG_FIELD64(env, ri); + *val = raw_read(env, ri); } trace_hvf_vgic_read(ri->name, *val); return true; @@ -1358,6 +1238,7 @@ static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint64_t *val) case SYSREG_ICC_IGRPEN0_EL1: case SYSREG_ICC_IGRPEN1_EL1: case SYSREG_ICC_PMR_EL1: + case SYSREG_ICC_RPR_EL1: case SYSREG_ICC_SGI0R_EL1: case SYSREG_ICC_SGI1R_EL1: case SYSREG_ICC_SRE_EL1: @@ -1543,6 +1424,9 @@ static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val) ri = get_arm_cp_reginfo(arm_cpu->cp_regs, hvf_reg2cp_reg(reg)); if (ri) { + if (!cp_access_ok(1, ri, false)) { + return false; + } if (ri->accessfn) { if (ri->accessfn(env, ri, false) != CP_ACCESS_OK) { return false; @@ -1551,7 +1435,7 @@ static bool hvf_sysreg_write_cp(CPUState *cpu, uint32_t reg, uint64_t val) if (ri->writefn) { ri->writefn(env, ri, val); } else { - CPREG_FIELD64(env, ri) = val; + raw_write(env, ri, val); } trace_hvf_vgic_write(ri->name, val); @@ -1650,6 +1534,9 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) case SYSREG_OSDLR_EL1: /* Dummy register */ return 0; + case SYSREG_LORC_EL1: + /* Dummy register */ + return 0; case SYSREG_ICC_AP0R0_EL1: case SYSREG_ICC_AP0R1_EL1: case SYSREG_ICC_AP0R2_EL1: @@ -1672,6 +1559,7 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) case SYSREG_ICC_IGRPEN0_EL1: case SYSREG_ICC_IGRPEN1_EL1: case SYSREG_ICC_PMR_EL1: + case SYSREG_ICC_RPR_EL1: case SYSREG_ICC_SGI0R_EL1: case SYSREG_ICC_SGI1R_EL1: case SYSREG_ICC_SRE_EL1: @@ -1770,13 +1658,13 @@ static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) static int hvf_inject_interrupts(CPUState *cpu) { - if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_FIQ)) { trace_hvf_inject_fiq(); hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_FIQ, true); } - if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { trace_hvf_inject_irq(); hv_vcpu_set_pending_interrupt(cpu->accel->fd, HV_INTERRUPT_TYPE_IRQ, true); @@ -1828,7 +1716,7 @@ static void hvf_wfi(CPUState *cpu) uint64_t nanos; uint32_t cntfrq; - if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) { /* Interrupt pending, no need to wait */ return; } @@ -2005,7 +1893,7 @@ int hvf_vcpu_exec(CPUState *cpu) uint32_t cm = (syndrome >> 8) & 0x1; uint64_t val = 0; - trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address, + trace_hvf_data_abort(hvf_exit->exception.virtual_address, hvf_exit->exception.physical_address, isv, iswrite, s1ptw, len, srt); diff --git a/target/arm/hvf/sysreg.c.inc b/target/arm/hvf/sysreg.c.inc new file mode 100644 index 0000000000000..067a8603fa785 --- /dev/null +++ b/target/arm/hvf/sysreg.c.inc @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +DEF_SYSREG(HV_SYS_REG_DBGBVR0_EL1, 2, 0, 0, 0, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR0_EL1, 2, 0, 0, 0, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR0_EL1, 2, 0, 0, 0, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR0_EL1, 2, 0, 0, 0, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR1_EL1, 2, 0, 0, 1, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR1_EL1, 2, 0, 0, 1, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR1_EL1, 2, 0, 0, 1, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR1_EL1, 2, 0, 0, 1, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR2_EL1, 2, 0, 0, 2, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR2_EL1, 2, 0, 0, 2, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR2_EL1, 2, 0, 0, 2, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR2_EL1, 2, 0, 0, 2, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR3_EL1, 2, 0, 0, 3, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR3_EL1, 2, 0, 0, 3, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR3_EL1, 2, 0, 0, 3, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR3_EL1, 2, 0, 0, 3, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR4_EL1, 2, 0, 0, 4, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR4_EL1, 2, 0, 0, 4, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR4_EL1, 2, 0, 0, 4, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR4_EL1, 2, 0, 0, 4, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR5_EL1, 2, 0, 0, 5, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR5_EL1, 2, 0, 0, 5, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR5_EL1, 2, 0, 0, 5, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR5_EL1, 2, 0, 0, 5, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR6_EL1, 2, 0, 0, 6, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR6_EL1, 2, 0, 0, 6, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR6_EL1, 2, 0, 0, 6, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR6_EL1, 2, 0, 0, 6, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR7_EL1, 2, 0, 0, 7, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR7_EL1, 2, 0, 0, 7, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR7_EL1, 2, 0, 0, 7, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR7_EL1, 2, 0, 0, 7, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR8_EL1, 2, 0, 0, 8, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR8_EL1, 2, 0, 0, 8, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR8_EL1, 2, 0, 0, 8, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR8_EL1, 2, 0, 0, 8, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR9_EL1, 2, 0, 0, 9, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR9_EL1, 2, 0, 0, 9, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR9_EL1, 2, 0, 0, 9, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR9_EL1, 2, 0, 0, 9, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR10_EL1, 2, 0, 0, 10, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR10_EL1, 2, 0, 0, 10, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR10_EL1, 2, 0, 0, 10, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR10_EL1, 2, 0, 0, 10, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR11_EL1, 2, 0, 0, 11, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR11_EL1, 2, 0, 0, 11, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR11_EL1, 2, 0, 0, 11, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR11_EL1, 2, 0, 0, 11, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR12_EL1, 2, 0, 0, 12, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR12_EL1, 2, 0, 0, 12, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR12_EL1, 2, 0, 0, 12, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR12_EL1, 2, 0, 0, 12, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR13_EL1, 2, 0, 0, 13, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR13_EL1, 2, 0, 0, 13, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR13_EL1, 2, 0, 0, 13, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR13_EL1, 2, 0, 0, 13, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR14_EL1, 2, 0, 0, 14, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR14_EL1, 2, 0, 0, 14, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR14_EL1, 2, 0, 0, 14, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR14_EL1, 2, 0, 0, 14, 7) + +DEF_SYSREG(HV_SYS_REG_DBGBVR15_EL1, 2, 0, 0, 15, 4) +DEF_SYSREG(HV_SYS_REG_DBGBCR15_EL1, 2, 0, 0, 15, 5) +DEF_SYSREG(HV_SYS_REG_DBGWVR15_EL1, 2, 0, 0, 15, 6) +DEF_SYSREG(HV_SYS_REG_DBGWCR15_EL1, 2, 0, 0, 15, 7) + +#ifdef SYNC_NO_RAW_REGS +/* + * The registers below are manually synced on init because they are + * marked as NO_RAW. We still list them to make number space sync easier. + */ +DEF_SYSREG(HV_SYS_REG_MDCCINT_EL1, 2, 0, 0, 2, 0) +DEF_SYSREG(HV_SYS_REG_MIDR_EL1, 3, 0, 0, 0, 0) +DEF_SYSREG(HV_SYS_REG_MPIDR_EL1, 3, 0, 0, 0, 5) +DEF_SYSREG(HV_SYS_REG_ID_AA64PFR0_EL1, 3, 0, 0, 4, 0) +#endif + +DEF_SYSREG(HV_SYS_REG_ID_AA64PFR1_EL1, 3, 0, 0, 4, 1) +/* Add ID_AA64PFR2_EL1 here when HVF supports it */ +DEF_SYSREG(HV_SYS_REG_ID_AA64DFR0_EL1, 3, 0, 0, 5, 0) +DEF_SYSREG(HV_SYS_REG_ID_AA64DFR1_EL1, 3, 0, 0, 5, 1) +DEF_SYSREG(HV_SYS_REG_ID_AA64ISAR0_EL1, 3, 0, 0, 6, 0) +DEF_SYSREG(HV_SYS_REG_ID_AA64ISAR1_EL1, 3, 0, 0, 6, 1) + +#ifdef SYNC_NO_MMFR0 +/* We keep the hardware MMFR0 around. HW limits are there anyway */ +DEF_SYSREG(HV_SYS_REG_ID_AA64MMFR0_EL1, 3, 0, 0, 7, 0) +#endif + +DEF_SYSREG(HV_SYS_REG_ID_AA64MMFR1_EL1, 3, 0, 0, 7, 1) +DEF_SYSREG(HV_SYS_REG_ID_AA64MMFR2_EL1, 3, 0, 0, 7, 2) +/* Add ID_AA64MMFR3_EL1 here when HVF supports it */ + +DEF_SYSREG(HV_SYS_REG_MDSCR_EL1, 2, 0, 0, 2, 2) +DEF_SYSREG(HV_SYS_REG_SCTLR_EL1, 3, 0, 1, 0, 0) +DEF_SYSREG(HV_SYS_REG_CPACR_EL1, 3, 0, 1, 0, 2) +DEF_SYSREG(HV_SYS_REG_TTBR0_EL1, 3, 0, 2, 0, 0) +DEF_SYSREG(HV_SYS_REG_TTBR1_EL1, 3, 0, 2, 0, 1) +DEF_SYSREG(HV_SYS_REG_TCR_EL1, 3, 0, 2, 0, 2) + +DEF_SYSREG(HV_SYS_REG_APIAKEYLO_EL1, 3, 0, 2, 1, 0) +DEF_SYSREG(HV_SYS_REG_APIAKEYHI_EL1, 3, 0, 2, 1, 1) +DEF_SYSREG(HV_SYS_REG_APIBKEYLO_EL1, 3, 0, 2, 1, 2) +DEF_SYSREG(HV_SYS_REG_APIBKEYHI_EL1, 3, 0, 2, 1, 3) +DEF_SYSREG(HV_SYS_REG_APDAKEYLO_EL1, 3, 0, 2, 2, 0) +DEF_SYSREG(HV_SYS_REG_APDAKEYHI_EL1, 3, 0, 2, 2, 1) +DEF_SYSREG(HV_SYS_REG_APDBKEYLO_EL1, 3, 0, 2, 2, 2) +DEF_SYSREG(HV_SYS_REG_APDBKEYHI_EL1, 3, 0, 2, 2, 3) +DEF_SYSREG(HV_SYS_REG_APGAKEYLO_EL1, 3, 0, 2, 3, 0) +DEF_SYSREG(HV_SYS_REG_APGAKEYHI_EL1, 3, 0, 2, 3, 1) + +DEF_SYSREG(HV_SYS_REG_SPSR_EL1, 3, 0, 4, 0, 0) +DEF_SYSREG(HV_SYS_REG_ELR_EL1, 3, 0, 4, 0, 1) +DEF_SYSREG(HV_SYS_REG_SP_EL0, 3, 0, 4, 1, 0) +DEF_SYSREG(HV_SYS_REG_AFSR0_EL1, 3, 0, 5, 1, 0) +DEF_SYSREG(HV_SYS_REG_AFSR1_EL1, 3, 0, 5, 1, 1) +DEF_SYSREG(HV_SYS_REG_ESR_EL1, 3, 0, 5, 2, 0) +DEF_SYSREG(HV_SYS_REG_FAR_EL1, 3, 0, 6, 0, 0) +DEF_SYSREG(HV_SYS_REG_PAR_EL1, 3, 0, 7, 4, 0) +DEF_SYSREG(HV_SYS_REG_MAIR_EL1, 3, 0, 10, 2, 0) +DEF_SYSREG(HV_SYS_REG_AMAIR_EL1, 3, 0, 10, 3, 0) +DEF_SYSREG(HV_SYS_REG_VBAR_EL1, 3, 0, 12, 0, 0) +DEF_SYSREG(HV_SYS_REG_CONTEXTIDR_EL1, 3, 0, 13, 0, 1) +DEF_SYSREG(HV_SYS_REG_TPIDR_EL1, 3, 0, 13, 0, 4) +DEF_SYSREG(HV_SYS_REG_CNTKCTL_EL1, 3, 0, 14, 1, 0) +DEF_SYSREG(HV_SYS_REG_CSSELR_EL1, 3, 2, 0, 0, 0) +DEF_SYSREG(HV_SYS_REG_TPIDR_EL0, 3, 3, 13, 0, 2) +DEF_SYSREG(HV_SYS_REG_TPIDRRO_EL0, 3, 3, 13, 0, 3) +DEF_SYSREG(HV_SYS_REG_CNTV_CTL_EL0, 3, 3, 14, 3, 1) +DEF_SYSREG(HV_SYS_REG_CNTV_CVAL_EL0, 3, 3, 14, 3, 2) +DEF_SYSREG(HV_SYS_REG_SP_EL1, 3, 4, 4, 1, 0) diff --git a/target/arm/hvf/trace-events b/target/arm/hvf/trace-events index b49746f28d1d4..b29a995f3d305 100644 --- a/target/arm/hvf/trace-events +++ b/target/arm/hvf/trace-events @@ -2,7 +2,7 @@ hvf_unhandled_sysreg_read(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, hvf_unhandled_sysreg_write(uint64_t pc, uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2) "unhandled sysreg write at pc=0x%"PRIx64": 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d)" hvf_inject_fiq(void) "injecting FIQ" hvf_inject_irq(void) "injecting IRQ" -hvf_data_abort(uint64_t pc, uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [pc=0x%"PRIx64" va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]" +hvf_data_abort(uint64_t va, uint64_t pa, bool isv, bool iswrite, bool s1ptw, uint32_t len, uint32_t srt) "data abort: [va=0x%016"PRIx64" pa=0x%016"PRIx64" isv=%d iswrite=%d s1ptw=%d len=%d srt=%d]" hvf_sysreg_read(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg read 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d) = 0x%016"PRIx64 hvf_sysreg_write(uint32_t reg, uint32_t op0, uint32_t op1, uint32_t crn, uint32_t crm, uint32_t op2, uint64_t val) "sysreg write 0x%08x (op0=%d op1=%d crn=%d crm=%d op2=%d, val=0x%016"PRIx64")" hvf_unknown_hvc(uint64_t pc, uint64_t x0) "pc=0x%"PRIx64" unknown HVC! 0x%016"PRIx64 diff --git a/target/arm/internals.h b/target/arm/internals.h index c4765e44893e3..f539bbe58e129 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -34,6 +34,7 @@ #include "system/memory.h" #include "syndrome.h" #include "cpu-features.h" +#include "mmuidx-internal.h" /* register banks for CPU modes */ #define BANK_USRSYS 0 @@ -113,11 +114,6 @@ FIELD(DBGWCR, WT, 20, 1) FIELD(DBGWCR, MASK, 24, 5) FIELD(DBGWCR, SSCE, 29, 1) -#define VTCR_NSW (1u << 29) -#define VTCR_NSA (1u << 30) -#define VSTCR_SW VTCR_NSW -#define VSTCR_SA VTCR_NSA - /* Bit definitions for CPACR (AArch32 only) */ FIELD(CPACR, CP10, 20, 2) FIELD(CPACR, CP11, 22, 2) @@ -201,6 +197,24 @@ FIELD(CPTR_EL3, TCPAC, 31, 1) #define TTBCR_SH1 (1U << 28) #define TTBCR_EAE (1U << 31) +#define TCR2_PNCH (1ULL << 0) +#define TCR2_PIE (1ULL << 1) +#define TCR2_E0POE (1ULL << 2) +#define TCR2_POE (1ULL << 3) +#define TCR2_AIE (1ULL << 4) +#define TCR2_D128 (1ULL << 5) +#define TCR2_PTTWI (1ULL << 10) +#define TCR2_HAFT (1ULL << 11) +#define TCR2_AMEC0 (1ULL << 12) +#define TCR2_AMEC1 (1ULL << 13) +#define TCR2_DISCH0 (1ULL << 14) +#define TCR2_DISCH1 (1ULL << 15) +#define TCR2_A2 (1ULL << 16) +#define TCR2_FNG0 (1ULL << 17) +#define TCR2_FNG1 (1ULL << 18) +#define TCR2_FNGNA0 (1ULL << 20) +#define TCR2_FNGNA1 (1ULL << 21) + FIELD(VTCR, T0SZ, 0, 6) FIELD(VTCR, SL0, 6, 2) FIELD(VTCR, IRGN0, 8, 2) @@ -220,6 +234,9 @@ FIELD(VTCR, NSA, 30, 1) FIELD(VTCR, DS, 32, 1) FIELD(VTCR, SL2, 33, 1) +FIELD(VSTCR, SW, 29, 1) +FIELD(VSTCR, SA, 30, 1) + #define HCRX_ENAS0 (1ULL << 0) #define HCRX_ENALS (1ULL << 1) #define HCRX_ENASR (1ULL << 2) @@ -232,6 +249,9 @@ FIELD(VTCR, SL2, 33, 1) #define HCRX_CMOW (1ULL << 9) #define HCRX_MCE2 (1ULL << 10) #define HCRX_MSCEN (1ULL << 11) +#define HCRX_TCR2EN (1ULL << 14) +#define HCRX_SCTLR2EN (1ULL << 15) +#define HCRX_GCSEN (1ULL << 22) #define HPFAR_NS (1ULL << 63) @@ -286,14 +306,14 @@ FIELD(CNTHCTL, CNTPMASK, 19, 1) * and never returns because we will longjump back up to the CPU main loop. */ G_NORETURN void raise_exception(CPUARMState *env, uint32_t excp, - uint32_t syndrome, uint32_t target_el); + uint64_t syndrome, uint32_t target_el); /* * Similarly, but also use unwinding to restore cpu state. */ G_NORETURN void raise_exception_ra(CPUARMState *env, uint32_t excp, - uint32_t syndrome, uint32_t target_el, - uintptr_t ra); + uint64_t syndrome, uint32_t target_el, + uintptr_t ra); /* * For AArch64, map a given EL to an index in the banked_spsr array. @@ -734,6 +754,7 @@ struct ARMMMUFaultInfo { bool s1ptw; bool s1ns; bool ea; + bool dirtybit; /* FEAT_S1PIE, FEAT_S2PIE */ }; /** @@ -965,8 +986,6 @@ static inline ARMMMUIdx core_to_aa64_mmu_idx(int mmu_idx) return mmu_idx | ARM_MMU_IDX_A; } -int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx); - /* Return the MMU index for a v7M CPU in the specified security state */ ARMMMUIdx arm_v7m_mmu_idx_for_secstate(CPUARMState *env, bool secstate); @@ -1009,108 +1028,10 @@ static inline void arm_call_el_change_hook(ARMCPU *cpu) } } -/* - * Return true if this address translation regime has two ranges. - * Note that this will not return the correct answer for AArch32 - * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is - * never called from a context where EL3 can be AArch32. (The - * correct return value for ARMMMUIdx_E3 would be different for - * that case, so we can't just make the function return the - * correct value anyway; we would need an extra "bool e3_is_aarch32" - * argument which all the current callsites would pass as 'false'.) - */ -static inline bool regime_has_2_ranges(ARMMMUIdx mmu_idx) -{ - switch (mmu_idx) { - case ARMMMUIdx_Stage1_E0: - case ARMMMUIdx_Stage1_E1: - case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_E10_0: - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: - case ARMMMUIdx_E20_0: - case ARMMMUIdx_E20_2: - case ARMMMUIdx_E20_2_PAN: - return true; - default: - return false; - } -} - -static inline bool regime_is_pan(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - switch (mmu_idx) { - case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_E10_1_PAN: - case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_E30_3_PAN: - return true; - default: - return false; - } -} - -static inline bool regime_is_stage2(ARMMMUIdx mmu_idx) -{ - return mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S; -} - -/* Return the exception level which controls this address translation regime */ -static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - switch (mmu_idx) { - case ARMMMUIdx_E20_0: - case ARMMMUIdx_E20_2: - case ARMMMUIdx_E20_2_PAN: - case ARMMMUIdx_Stage2: - case ARMMMUIdx_Stage2_S: - case ARMMMUIdx_E2: - return 2; - case ARMMMUIdx_E3: - case ARMMMUIdx_E30_0: - case ARMMMUIdx_E30_3_PAN: - return 3; - case ARMMMUIdx_E10_0: - case ARMMMUIdx_Stage1_E0: - case ARMMMUIdx_Stage1_E1: - case ARMMMUIdx_Stage1_E1_PAN: - case ARMMMUIdx_E10_1: - case ARMMMUIdx_E10_1_PAN: - case ARMMMUIdx_MPrivNegPri: - case ARMMMUIdx_MUserNegPri: - case ARMMMUIdx_MPriv: - case ARMMMUIdx_MUser: - case ARMMMUIdx_MSPrivNegPri: - case ARMMMUIdx_MSUserNegPri: - case ARMMMUIdx_MSPriv: - case ARMMMUIdx_MSUser: - return 1; - default: - g_assert_not_reached(); - } -} - -static inline bool regime_is_user(CPUARMState *env, ARMMMUIdx mmu_idx) -{ - switch (mmu_idx) { - case ARMMMUIdx_E10_0: - case ARMMMUIdx_E20_0: - case ARMMMUIdx_E30_0: - case ARMMMUIdx_Stage1_E0: - case ARMMMUIdx_MUser: - case ARMMMUIdx_MSUser: - case ARMMMUIdx_MUserNegPri: - case ARMMMUIdx_MSUserNegPri: - return true; - default: - return false; - } -} - /* Return the SCTLR value which controls this address translation regime */ static inline uint64_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx) { - return env->cp15.sctlr_el[regime_el(env, mmu_idx)]; + return env->cp15.sctlr_el[regime_el(mmu_idx)]; } /* @@ -1142,13 +1063,13 @@ static inline uint64_t regime_tcr(CPUARMState *env, ARMMMUIdx mmu_idx) v |= env->cp15.vtcr_el2 & VTCR_SHARED_FIELD_MASK; return v; } - return env->cp15.tcr_el[regime_el(env, mmu_idx)]; + return env->cp15.tcr_el[regime_el(mmu_idx)]; } /* Return true if the translation regime is using LPAE format page tables */ static inline bool regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) { - int el = regime_el(env, mmu_idx); + int el = regime_el(mmu_idx); if (el == 2 || arm_el_is_aa64(env, el)) { return true; } @@ -1274,6 +1195,11 @@ static inline const char *aarch32_mode_name(uint32_t psr) return cpu_mode_names[psr & 0xf]; } +/** + * arm_cpu_exec_interrupt(): Implementation of the cpu_exec_inrerrupt hook. + */ +bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request); + /** * arm_cpu_update_virq: Update CPU_INTERRUPT_VIRQ bit in cs->interrupt_request * @@ -1355,25 +1281,6 @@ ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx); ARMMMUIdx arm_stage1_mmu_idx(CPUARMState *env); #endif -/** - * arm_mmu_idx_is_stage1_of_2: - * @mmu_idx: The ARMMMUIdx to test - * - * Return true if @mmu_idx is a NOTLB mmu_idx that is the - * first stage of a two stage regime. - */ -static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx mmu_idx) -{ - switch (mmu_idx) { - case ARMMMUIdx_Stage1_E0: - case ARMMMUIdx_Stage1_E1: - case ARMMMUIdx_Stage1_E1_PAN: - return true; - default: - return false; - } -} - static inline uint32_t aarch32_cpsr_valid_mask(uint64_t features, const ARMISARegisters *id) { @@ -1468,7 +1375,7 @@ static inline int arm_granule_bits(ARMGranuleSize gran) /* * Parameters of a given virtual address, as extracted from the - * translation control register (TCR) for a given regime. + * translation controls for a given regime. */ typedef struct ARMVAParameters { unsigned tsz : 8; @@ -1483,6 +1390,7 @@ typedef struct ARMVAParameters { bool ha : 1; bool hd : 1; ARMGranuleSize gran : 2; + bool pie : 1; } ARMVAParameters; /** @@ -1553,6 +1461,13 @@ typedef struct ARMCacheAttrs { typedef struct GetPhysAddrResult { CPUTLBEntryFull f; ARMCacheAttrs cacheattrs; + /* + * For ARMMMUIdx_Stage2*, the protection installed into f.prot + * is the result for AccessType_TTW, i.e. the page table walk itself. + * The protection installed info s2prot is the one to be merged + * with the stage1 protection. + */ + int s2prot; } GetPhysAddrResult; /** @@ -1584,30 +1499,27 @@ bool get_phys_addr(CPUARMState *env, vaddr address, __attribute__((nonnull)); /** - * get_phys_addr_with_space_nogpc: get the physical address for a virtual - * address + * get_phys_addr_for_at: * @env: CPUARMState * @address: virtual address to get physical address for - * @access_type: 0 for read, 1 for write, 2 for execute - * @memop: memory operation feeding this access, or 0 for none + * @prot_check: PAGE_{READ,WRITE,EXEC}, or 0 * @mmu_idx: MMU index indicating required translation regime * @space: security space for the access * @result: set on translation success. * @fi: set to fault info if the translation fails * - * Similar to get_phys_addr, but use the given security space and don't perform - * a Granule Protection Check on the resulting address. + * Similar to get_phys_addr, but for use by AccessType_AT, i.e. + * system instructions for address translation. */ -bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, - MMUAccessType access_type, MemOp memop, - ARMMMUIdx mmu_idx, ARMSecuritySpace space, - GetPhysAddrResult *result, - ARMMMUFaultInfo *fi) +bool get_phys_addr_for_at(CPUARMState *env, vaddr address, unsigned prot_check, + ARMMMUIdx mmu_idx, ARMSecuritySpace space, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi) __attribute__((nonnull)); bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - bool is_secure, GetPhysAddrResult *result, + MMUAccessType access_type, unsigned prot_check, + ARMMMUIdx mmu_idx, bool is_secure, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi, uint32_t *mregion); void arm_log_exception(CPUState *cs); @@ -1623,19 +1535,13 @@ FIELD(PREDDESC, OPRSZ, 0, 6) FIELD(PREDDESC, ESZ, 6, 2) FIELD(PREDDESC, DATA, 8, 24) -/* - * The SVE simd_data field, for memory ops, contains either - * rd (5 bits) or a shift count (2 bits). - */ -#define SVE_MTEDESC_SHIFT 5 - /* Bits within a descriptor passed to the helper_mte_check* functions. */ FIELD(MTEDESC, MIDX, 0, 4) FIELD(MTEDESC, TBI, 4, 2) FIELD(MTEDESC, TCMA, 6, 2) FIELD(MTEDESC, WRITE, 8, 1) FIELD(MTEDESC, ALIGN, 9, 3) -FIELD(MTEDESC, SIZEM1, 12, SIMD_DATA_BITS - SVE_MTEDESC_SHIFT - 12) /* size - 1 */ +FIELD(MTEDESC, SIZEM1, 12, 32 - 12) /* size - 1 */ bool mte_probe(CPUARMState *env, uint32_t desc, uint64_t ptr); uint64_t mte_check(CPUARMState *env, uint32_t desc, uint64_t ptr, uintptr_t ra); @@ -1808,8 +1714,11 @@ static inline uint64_t pmu_counter_mask(CPUARMState *env) } GDBFeature *arm_gen_dynamic_svereg_feature(CPUState *cpu, int base_reg); +GDBFeature *arm_gen_dynamic_smereg_feature(CPUState *cpu, int base_reg); int aarch64_gdb_get_sve_reg(CPUState *cs, GByteArray *buf, int reg); int aarch64_gdb_set_sve_reg(CPUState *cs, uint8_t *buf, int reg); +int aarch64_gdb_get_sme_reg(CPUState *cs, GByteArray *buf, int reg); +int aarch64_gdb_set_sme_reg(CPUState *cs, uint8_t *buf, int reg); int aarch64_gdb_get_fpu_reg(CPUState *cs, GByteArray *buf, int reg); int aarch64_gdb_set_fpu_reg(CPUState *cs, uint8_t *buf, int reg); int aarch64_gdb_get_pauth_reg(CPUState *cs, GByteArray *buf, int reg); @@ -1875,6 +1784,8 @@ void define_tlb_insn_regs(ARMCPU *cpu); void define_at_insn_regs(ARMCPU *cpu); /* Add the cpreg definitions for PM cpregs */ void define_pm_cpregs(ARMCPU *cpu); +/* Add the cpreg definitions for GCS cpregs */ +void define_gcs_cpregs(ARMCPU *cpu); /* Effective value of MDCR_EL2 */ static inline uint64_t arm_mdcr_el2_eff(CPUARMState *env) @@ -1986,5 +1897,13 @@ void vfp_clear_float_status_exc_flags(CPUARMState *env); */ void vfp_set_fpcr_to_host(CPUARMState *env, uint32_t val, uint32_t mask); bool arm_pan_enabled(CPUARMState *env); +uint32_t cpsr_read_for_spsr_elx(CPUARMState *env); +void cpsr_write_from_spsr_elx(CPUARMState *env, uint32_t val); + +/* Compare uint64_t for qsort and bsearch. */ +int compare_u64(const void *a, const void *b); + +/* Used in FEAT_MEC to set the MECIDWidthm1 field in the MECIDR_EL2 register. */ +#define MECID_WIDTH 16 #endif diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h index c44d23dbe79c0..54ae5da7ce3ad 100644 --- a/target/arm/kvm-consts.h +++ b/target/arm/kvm-consts.h @@ -160,9 +160,6 @@ MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A53, KVM_ARM_TARGET_CORTEX_A53); #define CP_REG_ARM64_SYSREG_OP2_MASK 0x0000000000000007 #define CP_REG_ARM64_SYSREG_OP2_SHIFT 0 -/* No kernel define but it's useful to QEMU */ -#define CP_REG_ARM64_SYSREG_CP (CP_REG_ARM64_SYSREG >> CP_REG_ARM_COPROC_SHIFT) - MISMATCH_CHECK(CP_REG_ARM64, KVM_REG_ARM64); MISMATCH_CHECK(CP_REG_ARM_COPROC_MASK, KVM_REG_ARM_COPROC_MASK); MISMATCH_CHECK(CP_REG_ARM_COPROC_SHIFT, KVM_REG_ARM_COPROC_SHIFT); @@ -180,4 +177,15 @@ MISMATCH_CHECK(CP_REG_ARM64_SYSREG_OP2_SHIFT, KVM_REG_ARM64_SYSREG_OP2_SHIFT); #undef MISMATCH_CHECK +#define KVMID_AA64_SYS_REG_(op0, op1, crn, crm, op2) \ + (CP_REG_AA64_MASK | CP_REG_ARM64_SYSREG | \ + ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \ + ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \ + ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \ + ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \ + ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT)) + +#define KVMID_AA64_SYS_REG64(op0, op1, crn, crm, op2) \ + (KVMID_AA64_SYS_REG_(op0, op1, crn, crm, op2) | CP_REG_SIZE_U64) + #endif diff --git a/target/arm/kvm.c b/target/arm/kvm.c index 667234485547a..0d57081e69fbb 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -324,6 +324,7 @@ static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) err = 0; } else { err |= get_host_cpu_reg(fd, ahcf, ID_AA64PFR1_EL1_IDX); + err |= get_host_cpu_reg(fd, ahcf, ID_AA64PFR2_EL1_IDX); err |= get_host_cpu_reg(fd, ahcf, ID_AA64SMFR0_EL1_IDX); err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR0_EL1_IDX); err |= get_host_cpu_reg(fd, ahcf, ID_AA64DFR1_EL1_IDX); @@ -718,17 +719,6 @@ void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group, memory_region_ref(kd->mr); } -static int compare_u64(const void *a, const void *b) -{ - if (*(uint64_t *)a > *(uint64_t *)b) { - return 1; - } - if (*(uint64_t *)a < *(uint64_t *)b) { - return -1; - } - return 0; -} - /* * cpreg_values are sorted in ascending order by KVM register ID * (see kvm_arm_init_cpreg_list). This allows us to cheaply find @@ -900,6 +890,58 @@ bool write_kvmstate_to_list(ARMCPU *cpu) return ok; } +/* pretty-print a KVM register */ +#define CP_REG_ARM64_SYSREG_OP(_reg, _op) \ + ((uint8_t)((_reg & CP_REG_ARM64_SYSREG_ ## _op ## _MASK) >> \ + CP_REG_ARM64_SYSREG_ ## _op ## _SHIFT)) + +static gchar *kvm_print_sve_register_name(uint64_t regidx) +{ + uint16_t sve_reg = regidx & 0x000000000000ffff; + + if (regidx == KVM_REG_ARM64_SVE_VLS) { + return g_strdup_printf("SVE VLS"); + } + /* zreg, preg, ffr */ + switch (sve_reg & 0xfc00) { + case 0: + return g_strdup_printf("SVE zreg n:%d slice:%d", + (sve_reg & 0x03e0) >> 5, sve_reg & 0x001f); + case 0x04: + return g_strdup_printf("SVE preg n:%d slice:%d", + (sve_reg & 0x01e0) >> 5, sve_reg & 0x001f); + case 0x06: + return g_strdup_printf("SVE ffr slice:%d", sve_reg & 0x001f); + default: + return g_strdup_printf("SVE ???"); + } +} + +static gchar *kvm_print_register_name(uint64_t regidx) +{ + switch ((regidx & KVM_REG_ARM_COPROC_MASK)) { + case KVM_REG_ARM_CORE: + return g_strdup_printf("core reg %"PRIx64, regidx); + case KVM_REG_ARM_DEMUX: + return g_strdup_printf("demuxed reg %"PRIx64, regidx); + case KVM_REG_ARM64_SYSREG: + return g_strdup_printf("op0:%d op1:%d crn:%d crm:%d op2:%d", + CP_REG_ARM64_SYSREG_OP(regidx, OP0), + CP_REG_ARM64_SYSREG_OP(regidx, OP1), + CP_REG_ARM64_SYSREG_OP(regidx, CRN), + CP_REG_ARM64_SYSREG_OP(regidx, CRM), + CP_REG_ARM64_SYSREG_OP(regidx, OP2)); + case KVM_REG_ARM_FW: + return g_strdup_printf("fw reg %d", (int)(regidx & 0xffff)); + case KVM_REG_ARM64_SVE: + return kvm_print_sve_register_name(regidx); + case KVM_REG_ARM_FW_FEAT_BMAP: + return g_strdup_printf("fw feat reg %d", (int)(regidx & 0xffff)); + default: + return g_strdup_printf("%"PRIx64, regidx); + } +} + bool write_list_to_kvmstate(ARMCPU *cpu, int level) { CPUState *cs = CPU(cpu); @@ -927,11 +969,45 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) g_assert_not_reached(); } if (ret) { + gchar *reg_str = kvm_print_register_name(regidx); + /* We might fail for "unknown register" and also for * "you tried to set a register which is constant with * a different value from what it actually contains". */ ok = false; + switch (ret) { + case -ENOENT: + error_report("Could not set register %s: unknown to KVM", + reg_str); + break; + case -EINVAL: + if ((regidx & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { + if (!kvm_get_one_reg(cs, regidx, &v32)) { + error_report("Could not set register %s to %x (is %x)", + reg_str, (uint32_t)cpu->cpreg_values[i], + v32); + } else { + error_report("Could not set register %s to %x", + reg_str, (uint32_t)cpu->cpreg_values[i]); + } + } else /* U64 */ { + uint64_t v64; + + if (!kvm_get_one_reg(cs, regidx, &v64)) { + error_report("Could not set register %s to %"PRIx64" (is %"PRIx64")", + reg_str, cpu->cpreg_values[i], v64); + } else { + error_report("Could not set register %s to %"PRIx64, + reg_str, cpu->cpreg_values[i]); + } + } + break; + default: + error_report("Could not set register %s: %s", + reg_str, strerror(-ret)); + } + g_free(reg_str); } } return ok; @@ -2047,7 +2123,7 @@ static int kvm_arch_put_sve(CPUState *cs) return 0; } -int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) +int kvm_arch_put_registers(CPUState *cs, KvmPutState level, Error **errp) { uint64_t val; uint32_t fpr; @@ -2357,10 +2433,12 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) { ram_addr_t ram_addr; hwaddr paddr; + AcpiGhesState *ags; assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); - if (acpi_ghes_present() && addr) { + ags = acpi_ghes_get_state(); + if (ags && addr) { ram_addr = qemu_ram_addr_from_host(addr); if (ram_addr != RAM_ADDR_INVALID && kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { @@ -2378,7 +2456,8 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) */ if (code == BUS_MCEERR_AR) { kvm_cpu_synchronize_state(c); - if (!acpi_ghes_memory_errors(ACPI_HEST_SRC_ID_SEA, paddr)) { + if (!acpi_ghes_memory_errors(ags, ACPI_HEST_SRC_ID_SYNC, + paddr)) { kvm_inject_arm_sea(c); } else { error_report("failed to record the error"); diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index b4cad051551fb..6a9b6374a6df7 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -161,6 +161,14 @@ void kvm_arm_add_vcpu_properties(ARMCPU *cpu); */ void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp); +/* + * These "is some KVM subfeature enabled?" functions may be called + * when KVM support is not present, including in the user-mode + * emulators. The kvm-stub.c file is only built into the system + * emulators, so for user-mode emulation we provide "always false" + * stubs here. + */ +#ifndef CONFIG_USER_ONLY /** * kvm_arm_aarch32_supported: * @@ -197,6 +205,33 @@ bool kvm_arm_mte_supported(void); * Returns true if KVM can enable EL2 and false otherwise. */ bool kvm_arm_el2_supported(void); +#else + +static inline bool kvm_arm_aarch32_supported(void) +{ + return false; +} + +static inline bool kvm_arm_pmu_supported(void) +{ + return false; +} + +static inline bool kvm_arm_sve_supported(void) +{ + return false; +} + +static inline bool kvm_arm_mte_supported(void) +{ + return false; +} + +static inline bool kvm_arm_el2_supported(void) +{ + return false; +} +#endif /** * kvm_arm_get_max_vm_ipa_size: diff --git a/target/arm/machine.c b/target/arm/machine.c index 6986915bee876..44a0cf844b0bc 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -221,26 +221,6 @@ static const VMStateDescription vmstate_vfp = { } }; -static bool iwmmxt_needed(void *opaque) -{ - ARMCPU *cpu = opaque; - CPUARMState *env = &cpu->env; - - return arm_feature(env, ARM_FEATURE_IWMMXT); -} - -static const VMStateDescription vmstate_iwmmxt = { - .name = "cpu/iwmmxt", - .version_id = 1, - .minimum_version_id = 1, - .needed = iwmmxt_needed, - .fields = (const VMStateField[]) { - VMSTATE_UINT64_ARRAY(env.iwmmxt.regs, ARMCPU, 16), - VMSTATE_UINT32_ARRAY(env.iwmmxt.cregs, ARMCPU, 16), - VMSTATE_END_OF_LIST() - } -}; - /* The expression ARM_MAX_VQ - 2 is 0 for pure AArch32 build, * and ARMPredicateReg is actively empty. This triggers errors * in the expansion of the VMSTATE macros. @@ -836,6 +816,80 @@ static const VMStateInfo vmstate_cpsr = { .put = put_cpsr, }; +static int get_pstate64(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field) +{ + ARMCPU *cpu = opaque; + CPUARMState *env = &cpu->env; + uint64_t val = qemu_get_be64(f); + + env->aarch64 = ((val & PSTATE_nRW) == 0); + if (is_a64(env)) { + pstate_write(env, val); + } else { + cpsr_write_from_spsr_elx(env, val); + } + return 0; +} + +static int put_pstate64(QEMUFile *f, void *opaque, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + ARMCPU *cpu = opaque; + CPUARMState *env = &cpu->env; + uint64_t val; + + if (is_a64(env)) { + val = pstate_read(env); + } else { + val = cpsr_read_for_spsr_elx(env); + } + qemu_put_be64(f, val); + return 0; +} + +static bool pstate64_needed(void *opaque) +{ + ARMCPU *cpu = opaque; + CPUARMState *env = &cpu->env; + uint64_t val; + + if (arm_feature(env, ARM_FEATURE_M)) { + return false; + } + if (is_a64(env)) { + val = pstate_read(env); + } else { + val = cpsr_read_for_spsr_elx(env); + if (val & PSTATE_SS) { + return true; + } + } + return val > UINT32_MAX; +} + +static const VMStateDescription vmstate_pstate64 = { + .name = "cpu/pstate64", + .version_id = 1, + .minimum_version_id = 1, + .needed = pstate64_needed, + .fields = (const VMStateField[]) { + { + .name = "pstate64", + .version_id = 0, + .size = sizeof(uint64_t), + .info = &(const VMStateInfo) { + .name = "pstate64", + .get = get_pstate64, + .put = put_pstate64, + }, + .flags = VMS_SINGLE, + .offset = 0, + }, + VMSTATE_END_OF_LIST() + }, +}; + static int get_power(QEMUFile *f, void *opaque, size_t size, const VMStateField *field) { @@ -868,6 +922,23 @@ static const VMStateInfo vmstate_powered_off = { .put = put_power, }; +static bool syndrome64_needed(void *opaque) +{ + ARMCPU *cpu = opaque; + return cpu->env.exception.syndrome > UINT32_MAX; +} + +static const VMStateDescription vmstate_syndrome64 = { + .name = "cpu/syndrome64", + .version_id = 1, + .minimum_version_id = 1, + .needed = syndrome64_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT64(env.exception.syndrome, ARMCPU), + VMSTATE_END_OF_LIST() + }, +}; + static int cpu_pre_save(void *opaque) { ARMCPU *cpu = opaque; @@ -1055,6 +1126,12 @@ const VMStateDescription vmstate_arm_cpu = { VMSTATE_UINT32_ARRAY(env.regs, ARMCPU, 16), VMSTATE_UINT64_ARRAY(env.xregs, ARMCPU, 32), VMSTATE_UINT64(env.pc, ARMCPU), + /* + * If any bits are set in the upper 32 bits of cpsr/pstate, + * or if the cpu is in aa32 mode and PSTATE.SS is set, then + * the cpu/pstate64 subsection will override this with the + * full 64 bit state. + */ { .name = "cpsr", .version_id = 0, @@ -1085,7 +1162,19 @@ const VMStateDescription vmstate_arm_cpu = { VMSTATE_UINT64(env.exclusive_val, ARMCPU), VMSTATE_UINT64(env.exclusive_high, ARMCPU), VMSTATE_UNUSED(sizeof(uint64_t)), - VMSTATE_UINT32(env.exception.syndrome, ARMCPU), + /* + * If any bits are set in the upper 32 bits of syndrome, + * then the cpu/syndrome64 subsection will override this + * with the full 64 bit state. + */ + { + .name = "env.exception.syndrome", + .version_id = 0, + .size = sizeof(uint32_t), + .info = &vmstate_info_uint32, + .flags = VMS_SINGLE, + .offset = offsetoflow32(ARMCPU, env.exception.syndrome), + }, VMSTATE_UINT32(env.exception.fsr, ARMCPU), VMSTATE_UINT64(env.exception.vaddress, ARMCPU), VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU), @@ -1102,7 +1191,6 @@ const VMStateDescription vmstate_arm_cpu = { }, .subsections = (const VMStateDescription * const []) { &vmstate_vfp, - &vmstate_iwmmxt, &vmstate_m, &vmstate_thumb2ee, /* pmsav7_rnr must come before pmsav7 so that we have the @@ -1119,6 +1207,8 @@ const VMStateDescription vmstate_arm_cpu = { &vmstate_serror, &vmstate_irq_line_state, &vmstate_wfxt_timer, + &vmstate_syndrome64, + &vmstate_pstate64, NULL } }; diff --git a/target/arm/meson.build b/target/arm/meson.build index 07d9271aa4d88..3df7e03654ee6 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -6,7 +6,12 @@ arm_ss.add(files( arm_ss.add(when: 'TARGET_AARCH64', if_true: files( 'cpu64.c', - 'gdbstub64.c')) + 'gdbstub64.c' +)) + +arm_common_ss.add(files( + 'mmuidx.c', +)) arm_system_ss = ss.source_set() arm_common_system_ss = ss.source_set() @@ -22,22 +27,30 @@ arm_user_ss.add(when: 'TARGET_AARCH64', if_false: files( 'cpu32-stubs.c', )) arm_user_ss.add(files( + 'cpregs-gcs.c', 'cpregs-pmu.c', 'debug_helper.c', 'helper.c', 'vfp_fpscr.c', + 'el2-stubs.c', )) +arm_user_ss.add(when: 'CONFIG_ARM_COMPATIBLE_SEMIHOSTING', + if_true: files('common-semi-target.c')) arm_common_system_ss.add(files('cpu.c')) arm_common_system_ss.add(when: 'TARGET_AARCH64', if_false: files( 'cpu32-stubs.c')) arm_common_system_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c')) arm_common_system_ss.add(when: 'CONFIG_HVF', if_false: files('hvf-stub.c')) +arm_common_system_ss.add(when: 'CONFIG_ARM_COMPATIBLE_SEMIHOSTING', + if_true: files('common-semi-target.c')) arm_common_system_ss.add(files( 'arch_dump.c', 'arm-powerctl.c', 'cortex-regs.c', + 'cpregs-gcs.c', 'cpregs-pmu.c', + 'cpu-irq.c', 'debug_helper.c', 'helper.c', 'machine.c', diff --git a/target/arm/mmuidx-internal.h b/target/arm/mmuidx-internal.h new file mode 100644 index 0000000000000..962b0538526e1 --- /dev/null +++ b/target/arm/mmuidx-internal.h @@ -0,0 +1,113 @@ +/* + * QEMU Arm software mmu index internal definitions + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TARGET_ARM_MMUIDX_INTERNAL_H +#define TARGET_ARM_MMUIDX_INTERNAL_H + +#include "mmuidx.h" +#include "tcg/debug-assert.h" +#include "hw/registerfields.h" + + +FIELD(MMUIDXINFO, EL, 0, 2) +FIELD(MMUIDXINFO, ELVALID, 2, 1) +FIELD(MMUIDXINFO, REL, 3, 2) +FIELD(MMUIDXINFO, RELVALID, 5, 1) +FIELD(MMUIDXINFO, 2RANGES, 6, 1) +FIELD(MMUIDXINFO, PAN, 7, 1) +FIELD(MMUIDXINFO, USER, 8, 1) +FIELD(MMUIDXINFO, STAGE1, 9, 1) +FIELD(MMUIDXINFO, STAGE2, 10, 1) +FIELD(MMUIDXINFO, GCS, 11, 1) +FIELD(MMUIDXINFO, TG, 12, 5) + +extern const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8]; + +#define arm_mmuidx_is_valid(x) ((unsigned)(x) < ARRAY_SIZE(arm_mmuidx_table)) + +/* Return the exception level associated with this mmu index. */ +static inline int arm_mmu_idx_to_el(ARMMMUIdx idx) +{ + tcg_debug_assert(arm_mmuidx_is_valid(idx)); + tcg_debug_assert(FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, ELVALID)); + return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, EL); +} + +/* + * Return the exception level for the address translation regime + * associated with this mmu index. + */ +static inline uint32_t regime_el(ARMMMUIdx idx) +{ + tcg_debug_assert(arm_mmuidx_is_valid(idx)); + tcg_debug_assert(FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, RELVALID)); + return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, REL); +} + +/* + * Return true if this address translation regime has two ranges. + * Note that this will not return the correct answer for AArch32 + * Secure PL1&0 (i.e. mmu indexes E3, E30_0, E30_3_PAN), but it is + * never called from a context where EL3 can be AArch32. (The + * correct return value for ARMMMUIdx_E3 would be different for + * that case, so we can't just make the function return the + * correct value anyway; we would need an extra "bool e3_is_aarch32" + * argument which all the current callsites would pass as 'false'.) + */ +static inline bool regime_has_2_ranges(ARMMMUIdx idx) +{ + tcg_debug_assert(arm_mmuidx_is_valid(idx)); + return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, 2RANGES); +} + +/* Return true if Privileged Access Never is enabled for this mmu index. */ +static inline bool regime_is_pan(ARMMMUIdx idx) +{ + tcg_debug_assert(arm_mmuidx_is_valid(idx)); + return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, PAN); +} + +/* + * Return true if the exception level associated with this mmu index is 0. + * Differs from arm_mmu_idx_to_el(idx) == 0 in that this allows querying + * Stage1 and Stage2 mmu indexes. + */ +static inline bool regime_is_user(ARMMMUIdx idx) +{ + tcg_debug_assert(arm_mmuidx_is_valid(idx)); + return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, USER); +} + +/* Return true if this mmu index is stage 1 of a 2-stage translation. */ +static inline bool arm_mmu_idx_is_stage1_of_2(ARMMMUIdx idx) +{ + tcg_debug_assert(arm_mmuidx_is_valid(idx)); + return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, STAGE1); +} + +/* Return true if this mmu index is stage 2 of a 2-stage translation. */ +static inline bool regime_is_stage2(ARMMMUIdx idx) +{ + tcg_debug_assert(arm_mmuidx_is_valid(idx)); + return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, STAGE2); +} + +/* Return true if this mmu index implies AccessType_GCS. */ +static inline bool regime_is_gcs(ARMMMUIdx idx) +{ + tcg_debug_assert(arm_mmuidx_is_valid(idx)); + return FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, GCS); +} + +/* Return the GCS MMUIdx for a given regime. */ +static inline ARMMMUIdx regime_to_gcs(ARMMMUIdx idx) +{ + tcg_debug_assert(arm_mmuidx_is_valid(idx)); + uint32_t core = FIELD_EX32(arm_mmuidx_table[idx], MMUIDXINFO, TG); + tcg_debug_assert(core != 0); /* core 0 is E10_0, not a GCS index */ + return core | ARM_MMU_IDX_A; +} + +#endif /* TARGET_ARM_MMUIDX_INTERNAL_H */ diff --git a/target/arm/mmuidx.c b/target/arm/mmuidx.c new file mode 100644 index 0000000000000..a4663c8d87205 --- /dev/null +++ b/target/arm/mmuidx.c @@ -0,0 +1,66 @@ +/* + * QEMU Arm software mmu index definitions + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "mmuidx-internal.h" + + +#define EL(X) ((X << R_MMUIDXINFO_EL_SHIFT) | R_MMUIDXINFO_ELVALID_MASK | \ + ((X == 0) << R_MMUIDXINFO_USER_SHIFT)) +#define REL(X) ((X << R_MMUIDXINFO_REL_SHIFT) | R_MMUIDXINFO_RELVALID_MASK) +#define R2 R_MMUIDXINFO_2RANGES_MASK +#define PAN R_MMUIDXINFO_PAN_MASK +#define USER R_MMUIDXINFO_USER_MASK +#define S1 R_MMUIDXINFO_STAGE1_MASK +#define S2 R_MMUIDXINFO_STAGE2_MASK +#define GCS R_MMUIDXINFO_GCS_MASK +#define TG(X) \ + ((ARMMMUIdx_##X##_GCS & ARM_MMU_IDX_COREIDX_MASK) << R_MMUIDXINFO_TG_SHIFT) + +const uint32_t arm_mmuidx_table[ARM_MMU_IDX_M + 8] = { + /* + * A-profile. + */ + [ARMMMUIdx_E10_0] = EL(0) | REL(1) | R2 | TG(E10_0), + [ARMMMUIdx_E10_0_GCS] = EL(0) | REL(1) | R2 | GCS, + [ARMMMUIdx_E10_1] = EL(1) | REL(1) | R2 | TG(E10_1), + [ARMMMUIdx_E10_1_PAN] = EL(1) | REL(1) | R2 | TG(E10_1) | PAN, + [ARMMMUIdx_E10_1_GCS] = EL(1) | REL(1) | R2 | GCS, + + [ARMMMUIdx_E20_0] = EL(0) | REL(2) | R2 | TG(E20_0), + [ARMMMUIdx_E20_0_GCS] = EL(0) | REL(2) | R2 | GCS, + [ARMMMUIdx_E20_2] = EL(2) | REL(2) | R2 | TG(E20_2), + [ARMMMUIdx_E20_2_PAN] = EL(2) | REL(2) | R2 | TG(E20_2) | PAN, + [ARMMMUIdx_E20_2_GCS] = EL(2) | REL(2) | R2 | GCS, + + [ARMMMUIdx_E2] = EL(2) | REL(2) | TG(E2), + [ARMMMUIdx_E2_GCS] = EL(2) | REL(2) | GCS, + + [ARMMMUIdx_E3] = EL(3) | REL(3) | TG(E3), + [ARMMMUIdx_E3_GCS] = EL(3) | REL(3) | GCS, + [ARMMMUIdx_E30_0] = EL(0) | REL(3), + [ARMMMUIdx_E30_3_PAN] = EL(3) | REL(3) | PAN, + + [ARMMMUIdx_Stage2_S] = REL(2) | S2, + [ARMMMUIdx_Stage2] = REL(2) | S2, + + [ARMMMUIdx_Stage1_E0] = REL(1) | R2 | S1 | USER | TG(Stage1_E0), + [ARMMMUIdx_Stage1_E0_GCS] = REL(1) | R2 | S1 | USER | GCS, + [ARMMMUIdx_Stage1_E1] = REL(1) | R2 | S1 | TG(Stage1_E1), + [ARMMMUIdx_Stage1_E1_PAN] = REL(1) | R2 | S1 | TG(Stage1_E1) | PAN, + [ARMMMUIdx_Stage1_E1_GCS] = REL(1) | R2 | S1 | GCS, + + /* + * M-profile. + */ + [ARMMMUIdx_MUser] = EL(0) | REL(1), + [ARMMMUIdx_MPriv] = EL(1) | REL(1), + [ARMMMUIdx_MUserNegPri] = EL(0) | REL(1), + [ARMMMUIdx_MPrivNegPri] = EL(1) | REL(1), + [ARMMMUIdx_MSUser] = EL(0) | REL(1), + [ARMMMUIdx_MSPriv] = EL(1) | REL(1), + [ARMMMUIdx_MSUserNegPri] = EL(0) | REL(1), + [ARMMMUIdx_MSPrivNegPri] = EL(1) | REL(1), +}; diff --git a/target/arm/mmuidx.h b/target/arm/mmuidx.h new file mode 100644 index 0000000000000..8d8d27337e0d1 --- /dev/null +++ b/target/arm/mmuidx.h @@ -0,0 +1,241 @@ +/* + * QEMU Arm software mmu index definitions + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef TARGET_ARM_MMUIDX_H +#define TARGET_ARM_MMUIDX_H + +/* + * Arm has the following "translation regimes" (as the Arm ARM calls them): + * + * If EL3 is 64-bit: + * + NonSecure EL1 & 0 stage 1 + * + NonSecure EL1 & 0 stage 2 + * + NonSecure EL2 + * + NonSecure EL2 & 0 (ARMv8.1-VHE) + * + Secure EL1 & 0 stage 1 + * + Secure EL1 & 0 stage 2 (FEAT_SEL2) + * + Secure EL2 (FEAT_SEL2) + * + Secure EL2 & 0 (FEAT_SEL2) + * + Realm EL1 & 0 stage 1 (FEAT_RME) + * + Realm EL1 & 0 stage 2 (FEAT_RME) + * + Realm EL2 (FEAT_RME) + * + EL3 + * If EL3 is 32-bit: + * + NonSecure PL1 & 0 stage 1 + * + NonSecure PL1 & 0 stage 2 + * + NonSecure PL2 + * + Secure PL1 & 0 + * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.) + * + * For QEMU, an mmu_idx is not quite the same as a translation regime because: + * 1. we need to split the "EL1 & 0" and "EL2 & 0" regimes into two mmu_idxes, + * because they may differ in access permissions even if the VA->PA map is + * the same + * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2 + * translation, which means that we have one mmu_idx that deals with two + * concatenated translation regimes [this sort of combined s1+2 TLB is + * architecturally permitted] + * 3. we don't need to allocate an mmu_idx to translations that we won't be + * handling via the TLB. The only way to do a stage 1 translation without + * the immediate stage 2 translation is via the ATS or AT system insns, + * which can be slow-pathed and always do a page table walk. + * The only use of stage 2 translations is either as part of an s1+2 + * lookup or when loading the descriptors during a stage 1 page table walk, + * and in both those cases we don't use the TLB. + * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3" + * translation regimes, because they map reasonably well to each other + * and they can't both be active at the same time. + * 5. we want to be able to use the TLB for accesses done as part of a + * stage1 page table walk, rather than having to walk the stage2 page + * table over and over. + * 6. we need separate EL1/EL2 mmu_idx for handling the Privileged Access + * Never (PAN) bit within PSTATE. + * 7. we fold together most secure and non-secure regimes for A-profile, + * because there are no banked system registers for aarch64, so the + * process of switching between secure and non-secure is + * already heavyweight. + * 8. we cannot fold together Stage 2 Secure and Stage 2 NonSecure, + * because both are in use simultaneously for Secure EL2. + * 9. we need separate indexes for handling AccessType_GCS. + * + * This gives us the following list of cases: + * + * EL0 EL1&0 stage 1+2 (aka NS PL0 PL1&0 stage 1+2) + * EL0 EL1&0 stage 1+2 +GCS + * EL1 EL1&0 stage 1+2 (aka NS PL1 PL1&0 stage 1+2) + * EL1 EL1&0 stage 1+2 +PAN (aka NS PL1 P1&0 stage 1+2 +PAN) + * EL1 EL1&0 stage 1+2 +GCS + * EL0 EL2&0 + * EL0 EL2&0 +GCS + * EL2 EL2&0 + * EL2 EL2&0 +PAN + * EL2 EL2&0 +GCS + * EL2 (aka NS PL2) + * EL2 +GCS + * EL3 (aka AArch32 S PL1 PL1&0) + * EL3 +GCS + * AArch32 S PL0 PL1&0 (we call this EL30_0) + * AArch32 S PL1 PL1&0 +PAN (we call this EL30_3_PAN) + * Stage2 Secure + * Stage2 NonSecure + * plus one TLB per Physical address space: S, NS, Realm, Root + * + * for a total of 22 different mmu_idx. + * + * R profile CPUs have an MPU, but can use the same set of MMU indexes + * as A profile. They only need to distinguish EL0 and EL1 (and + * EL2 for cores like the Cortex-R52). + * + * M profile CPUs are rather different as they do not have a true MMU. + * They have the following different MMU indexes: + * User + * Privileged + * User, execution priority negative (ie the MPU HFNMIENA bit may apply) + * Privileged, execution priority negative (ditto) + * If the CPU supports the v8M Security Extension then there are also: + * Secure User + * Secure Privileged + * Secure User, execution priority negative + * Secure Privileged, execution priority negative + * + * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code + * are not quite the same -- different CPU types (most notably M profile + * vs A/R profile) would like to use MMU indexes with different semantics, + * but since we don't ever need to use all of those in a single CPU we + * can avoid having to set NB_MMU_MODES to "total number of A profile MMU + * modes + total number of M profile MMU modes". The lower bits of + * ARMMMUIdx are the core TLB mmu index, and the higher bits are always + * the same for any particular CPU. + * Variables of type ARMMUIdx are always full values, and the core + * index values are in variables of type 'int'. + * + * Our enumeration includes at the end some entries which are not "true" + * mmu_idx values in that they don't have corresponding TLBs and are only + * valid for doing slow path page table walks. + * + * The constant names here are patterned after the general style of the names + * of the AT/ATS operations. + * The values used are carefully arranged to make mmu_idx => EL lookup easy. + * For M profile we arrange them to have a bit for priv, a bit for negpri + * and a bit for secure. + */ +#define ARM_MMU_IDX_A 0x20 /* A profile */ +#define ARM_MMU_IDX_NOTLB 0x40 /* does not have a TLB */ +#define ARM_MMU_IDX_M 0x80 /* M profile */ + +/* Meanings of the bits for M profile mmu idx values */ +#define ARM_MMU_IDX_M_PRIV 0x1 +#define ARM_MMU_IDX_M_NEGPRI 0x2 +#define ARM_MMU_IDX_M_S 0x4 /* Secure */ + +#define ARM_MMU_IDX_TYPE_MASK \ + (ARM_MMU_IDX_A | ARM_MMU_IDX_M | ARM_MMU_IDX_NOTLB) +#define ARM_MMU_IDX_COREIDX_MASK 0x1f + +typedef enum ARMMMUIdx { + /* + * A-profile. + */ + + ARMMMUIdx_E10_0 = 0 | ARM_MMU_IDX_A, + ARMMMUIdx_E10_0_GCS = 1 | ARM_MMU_IDX_A, + ARMMMUIdx_E10_1 = 2 | ARM_MMU_IDX_A, + ARMMMUIdx_E10_1_PAN = 3 | ARM_MMU_IDX_A, + ARMMMUIdx_E10_1_GCS = 4 | ARM_MMU_IDX_A, + + ARMMMUIdx_E20_0 = 5 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_0_GCS = 6 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_2 = 7 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_2_PAN = 8 | ARM_MMU_IDX_A, + ARMMMUIdx_E20_2_GCS = 9 | ARM_MMU_IDX_A, + + ARMMMUIdx_E2 = 10 | ARM_MMU_IDX_A, + ARMMMUIdx_E2_GCS = 11 | ARM_MMU_IDX_A, + + ARMMMUIdx_E3 = 12 | ARM_MMU_IDX_A, + ARMMMUIdx_E3_GCS = 13 | ARM_MMU_IDX_A, + ARMMMUIdx_E30_0 = 14 | ARM_MMU_IDX_A, + ARMMMUIdx_E30_3_PAN = 15 | ARM_MMU_IDX_A, + + /* + * Used for second stage of an S12 page table walk, or for descriptor + * loads during first stage of an S1 page table walk. Note that both + * are in use simultaneously for SecureEL2: the security state for + * the S2 ptw is selected by the NS bit from the S1 ptw. + */ + ARMMMUIdx_Stage2_S = 16 | ARM_MMU_IDX_A, + ARMMMUIdx_Stage2 = 17 | ARM_MMU_IDX_A, + + /* TLBs with 1-1 mapping to the physical address spaces. */ + ARMMMUIdx_Phys_S = 18 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_NS = 19 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_Root = 20 | ARM_MMU_IDX_A, + ARMMMUIdx_Phys_Realm = 21 | ARM_MMU_IDX_A, + + /* + * These are not allocated TLBs and are used only for AT system + * instructions or for the first stage of an S12 page table walk. + */ + ARMMMUIdx_Stage1_E0 = 0 | ARM_MMU_IDX_NOTLB, + ARMMMUIdx_Stage1_E1 = 1 | ARM_MMU_IDX_NOTLB, + ARMMMUIdx_Stage1_E1_PAN = 2 | ARM_MMU_IDX_NOTLB, + ARMMMUIdx_Stage1_E0_GCS = 3 | ARM_MMU_IDX_NOTLB, + ARMMMUIdx_Stage1_E1_GCS = 4 | ARM_MMU_IDX_NOTLB, + + /* + * M-profile. + */ + ARMMMUIdx_MUser = ARM_MMU_IDX_M, + ARMMMUIdx_MPriv = ARM_MMU_IDX_M | ARM_MMU_IDX_M_PRIV, + ARMMMUIdx_MUserNegPri = ARMMMUIdx_MUser | ARM_MMU_IDX_M_NEGPRI, + ARMMMUIdx_MPrivNegPri = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_NEGPRI, + ARMMMUIdx_MSUser = ARMMMUIdx_MUser | ARM_MMU_IDX_M_S, + ARMMMUIdx_MSPriv = ARMMMUIdx_MPriv | ARM_MMU_IDX_M_S, + ARMMMUIdx_MSUserNegPri = ARMMMUIdx_MUserNegPri | ARM_MMU_IDX_M_S, + ARMMMUIdx_MSPrivNegPri = ARMMMUIdx_MPrivNegPri | ARM_MMU_IDX_M_S, +} ARMMMUIdx; + +/* + * Bit macros for the core-mmu-index values for each index, + * for use when calling tlb_flush_by_mmuidx() and friends. + */ +#define TO_CORE_BIT(NAME) \ + ARMMMUIdxBit_##NAME = 1 << (ARMMMUIdx_##NAME & ARM_MMU_IDX_COREIDX_MASK) + +typedef enum ARMMMUIdxBit { + TO_CORE_BIT(E10_0), + TO_CORE_BIT(E10_0_GCS), + TO_CORE_BIT(E10_1), + TO_CORE_BIT(E10_1_PAN), + TO_CORE_BIT(E10_1_GCS), + TO_CORE_BIT(E20_0), + TO_CORE_BIT(E20_0_GCS), + TO_CORE_BIT(E20_2), + TO_CORE_BIT(E20_2_PAN), + TO_CORE_BIT(E20_2_GCS), + TO_CORE_BIT(E2), + TO_CORE_BIT(E2_GCS), + TO_CORE_BIT(E3), + TO_CORE_BIT(E3_GCS), + TO_CORE_BIT(E30_0), + TO_CORE_BIT(E30_3_PAN), + TO_CORE_BIT(Stage2), + TO_CORE_BIT(Stage2_S), + + TO_CORE_BIT(MUser), + TO_CORE_BIT(MPriv), + TO_CORE_BIT(MUserNegPri), + TO_CORE_BIT(MPrivNegPri), + TO_CORE_BIT(MSUser), + TO_CORE_BIT(MSPriv), + TO_CORE_BIT(MSUserNegPri), + TO_CORE_BIT(MSPrivNegPri), +} ARMMMUIdxBit; + +#undef TO_CORE_BIT + +#define MMU_USER_IDX 0 + +#endif /* TARGET_ARM_MMUIDX_H */ diff --git a/target/arm/ptw.c b/target/arm/ptw.c index 561bf2678e5ac..d4386ede73e31 100644 --- a/target/arm/ptw.c +++ b/target/arm/ptw.c @@ -36,8 +36,6 @@ typedef struct S1Translate { /* * in_space: the security space for this walk. This plus * the in_mmu_idx specify the architectural translation regime. - * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit, - * this field is updated accordingly. * * Note that the security space for the in_ptw_idx may be different * from that for the in_mmu_idx. We do not need to explicitly track @@ -52,18 +50,37 @@ typedef struct S1Translate { * value being Stage2 vs Stage2_S distinguishes those. */ ARMSecuritySpace in_space; + /* + * Like in_space, except this may be "downgraded" to NonSecure + * by an NSTable bit. + */ + ARMSecuritySpace cur_space; /* * in_debug: is this a QEMU debug access (gdbstub, etc)? Debug * accesses will not update the guest page table access flags * and will not change the state of the softmmu TLBs. */ bool in_debug; + /* + * in_at: is this AccessType_AT? + * This is also set for debug, because at heart that is also + * an address translation, and simplifies a test. + */ + bool in_at; /* * If this is stage 2 of a stage 1+2 page table walk, then this must * be true if stage 1 is an EL0 access; otherwise this is ignored. * Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}. */ bool in_s1_is_el0; + /* + * The set of PAGE_* bits to be use in the permission check. + * This is normally directly related to the access_type, but + * may be suppressed for debug or AT insns. + */ + uint8_t in_prot_check; + /* Cached EffectiveHCR_EL2_NVx() bit */ + bool in_nv1; bool out_rw; bool out_be; ARMSecuritySpace out_space; @@ -152,6 +169,10 @@ ARMMMUIdx stage_1_mmu_idx(ARMMMUIdx mmu_idx) return ARMMMUIdx_Stage1_E1; case ARMMMUIdx_E10_1_PAN: return ARMMMUIdx_Stage1_E1_PAN; + case ARMMMUIdx_E10_0_GCS: + return ARMMMUIdx_Stage1_E0_GCS; + case ARMMMUIdx_E10_1_GCS: + return ARMMMUIdx_Stage1_E1_GCS; default: return mmu_idx; } @@ -193,9 +214,9 @@ static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx) return ARMMMUIdx_Phys_Realm; case ARMSS_Secure: if (stage2idx == ARMMMUIdx_Stage2_S) { - s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW); + s2walk_secure = !(env->cp15.vstcr_el2 & R_VSTCR_SW_MASK); } else { - s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW); + s2walk_secure = !(env->cp15.vtcr_el2 & R_VTCR_NSW_MASK); } return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS; default: @@ -218,9 +239,9 @@ static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn) return env->cp15.vsttbr_el2; } if (ttbrn == 0) { - return env->cp15.ttbr0_el[regime_el(env, mmu_idx)]; + return env->cp15.ttbr0_el[regime_el(mmu_idx)]; } else { - return env->cp15.ttbr1_el[regime_el(env, mmu_idx)]; + return env->cp15.ttbr1_el[regime_el(mmu_idx)]; } } @@ -259,8 +280,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, return (hcr_el2 & (HCR_DC | HCR_VM)) == 0; case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_0_GCS: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: + case ARMMMUIdx_E10_1_GCS: /* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */ hcr_el2 = arm_hcr_el2_eff_secstate(env, space); if (hcr_el2 & HCR_TGE) { @@ -269,8 +292,10 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, break; case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E0_GCS: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: + case ARMMMUIdx_Stage1_E1_GCS: /* HCR.DC means SCTLR_EL1.M behaves as 0 */ hcr_el2 = arm_hcr_el2_eff_secstate(env, space); if (hcr_el2 & HCR_DC) { @@ -279,10 +304,14 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, break; case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_0_GCS: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_E20_2_GCS: case ARMMMUIdx_E2: + case ARMMMUIdx_E2_GCS: case ARMMMUIdx_E3: + case ARMMMUIdx_E3_GCS: case ARMMMUIdx_E30_0: case ARMMMUIdx_E30_3_PAN: break; @@ -303,6 +332,7 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx, static bool granule_protection_check(CPUARMState *env, uint64_t paddress, ARMSecuritySpace pspace, + ARMSecuritySpace ss, ARMMMUFaultInfo *fi) { MemTxAttrs attrs = { @@ -371,18 +401,37 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress, l0gptsz = 30 + FIELD_EX64(gpccr, GPCCR, L0GPTSZ); /* - * GPC Priority 2: Secure, Realm or Root address exceeds PPS. + * GPC Priority 2: Access to Secure, NonSecure or Realm is prevented + * by one of the GPCCR_EL3 address space disable bits (R_TCWMD). + * All of these bits are checked vs aa64_rme_gpc2 in gpccr_write. + */ + { + static const uint8_t disable_masks[4] = { + [ARMSS_Secure] = R_GPCCR_SPAD_MASK, + [ARMSS_NonSecure] = R_GPCCR_NSPAD_MASK, + [ARMSS_Root] = 0, + [ARMSS_Realm] = R_GPCCR_RLPAD_MASK, + }; + + if (gpccr & disable_masks[pspace]) { + goto fault_fail; + } + } + + /* + * GPC Priority 3: Secure, Realm or Root address exceeds PPS. * R_CPDSB: A NonSecure physical address input exceeding PPS * does not experience any fault. + * R_PBPSH: Other address spaces have fault suppressed by APPSAA. */ if (paddress & ~pps_mask) { - if (pspace == ARMSS_NonSecure) { + if (pspace == ARMSS_NonSecure || FIELD_EX64(gpccr, GPCCR, APPSAA)) { return true; } - goto fault_size; + goto fault_fail; } - /* GPC Priority 3: the base address of GPTBR_EL3 exceeds PPS. */ + /* GPC Priority 4: the base address of GPTBR_EL3 exceeds PPS. */ tableaddr = env->cp15.gptbr_el3 << 12; if (tableaddr & ~pps_mask) { goto fault_size; @@ -463,18 +512,30 @@ static bool granule_protection_check(CPUARMState *env, uint64_t paddress, break; case 0b1111: /* all access */ return true; - case 0b1000: - case 0b1001: - case 0b1010: - case 0b1011: + case 0b1000: /* secure */ + if (!cpu_isar_feature(aa64_sel2, cpu)) { + goto fault_walk; + } + /* fall through */ + case 0b1001: /* non-secure */ + case 0b1010: /* root */ + case 0b1011: /* realm */ if (pspace == (gpi & 3)) { return true; } break; + case 0b1101: /* non-secure only */ + /* aa64_rme_gpc2 was checked in gpccr_write */ + if (FIELD_EX64(gpccr, GPCCR, NSO)) { + return (pspace == ARMSS_NonSecure && + (ss == ARMSS_NonSecure || ss == ARMSS_Root)); + } + goto fault_walk; default: goto fault_walk; /* reserved */ } + fault_fail: fi->gpcf = GPCF_Fail; goto fault_common; fault_eabt: @@ -575,12 +636,14 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, * From gdbstub, do not use softmmu so that we don't modify the * state of the cpu at all, including softmmu tlb contents. */ - ARMSecuritySpace s2_space = S2_security_space(ptw->in_space, s2_mmu_idx); + ARMSecuritySpace s2_space + = S2_security_space(ptw->cur_space, s2_mmu_idx); S1Translate s2ptw = { .in_mmu_idx = s2_mmu_idx, .in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx), .in_space = s2_space, .in_debug = true, + .in_prot_check = PAGE_READ, }; GetPhysAddrResult s2 = { }; @@ -617,7 +680,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, } if (regime_is_stage2(s2_mmu_idx)) { - uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); + uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->cur_space); if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) { /* @@ -628,7 +691,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, fi->s2addr = addr; fi->stage2 = true; fi->s1ptw = true; - fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); + fi->s1ns = fault_s1ns(ptw->cur_space, s2_mmu_idx); return false; } } @@ -644,7 +707,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw, fi->s2addr = addr; fi->stage2 = regime_is_stage2(s2_mmu_idx); fi->s1ptw = fi->stage2; - fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx); + fi->s1ns = fault_s1ns(ptw->cur_space, s2_mmu_idx); return false; } @@ -831,7 +894,7 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val, fi->s2addr = ptw->out_virt; fi->stage2 = true; fi->s1ptw = true; - fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx); + fi->s1ns = fault_s1ns(ptw->cur_space, ptw->in_ptw_idx); return 0; } @@ -949,7 +1012,7 @@ static int ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap, int domain_prot) { return ap_to_rw_prot_is_user(env, mmu_idx, ap, domain_prot, - regime_is_user(env, mmu_idx)); + regime_is_user(mmu_idx)); } /* @@ -975,7 +1038,7 @@ static int simple_ap_to_rw_prot_is_user(int ap, bool is_user) static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap) { - return simple_ap_to_rw_prot_is_user(ap, regime_is_user(env, mmu_idx)); + return simple_ap_to_rw_prot_is_user(ap, regime_is_user(mmu_idx)); } static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw, @@ -1008,7 +1071,7 @@ static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw, } type = (desc & 3); domain = (desc >> 5) & 0x0f; - if (regime_el(env, ptw->in_mmu_idx) == 1) { + if (regime_el(ptw->in_mmu_idx) == 1) { dacr = env->cp15.dacr_ns; } else { dacr = env->cp15.dacr_s; @@ -1061,11 +1124,10 @@ static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw, ap = (desc >> (4 + ((address >> 9) & 6))) & 3; result->f.lg_page_size = 12; break; - case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */ + case 3: /* 1k page, or ARMv6 "extended small (4k) page" */ if (type == 1) { - /* ARMv6/XScale extended small page format */ - if (arm_feature(env, ARM_FEATURE_XSCALE) - || arm_feature(env, ARM_FEATURE_V6)) { + /* ARMv6 extended small page format */ + if (arm_feature(env, ARM_FEATURE_V6)) { phys_addr = (desc & 0xfffff000) | (address & 0xfff); result->f.lg_page_size = 12; } else { @@ -1089,7 +1151,7 @@ static bool get_phys_addr_v5(CPUARMState *env, S1Translate *ptw, } result->f.prot = ap_to_rw_prot(env, ptw->in_mmu_idx, ap, domain_prot); result->f.prot |= result->f.prot ? PAGE_EXEC : 0; - if (!(result->f.prot & (1 << access_type))) { + if (ptw->in_prot_check & ~result->f.prot) { /* Access permission fault. */ fi->type = ARMFault_Permission; goto do_fault; @@ -1148,7 +1210,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, /* Page or Section. */ domain = (desc >> 5) & 0x0f; } - if (regime_el(env, mmu_idx) == 1) { + if (regime_el(mmu_idx) == 1) { dacr = env->cp15.dacr_ns; } else { dacr = env->cp15.dacr_s; @@ -1212,7 +1274,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, g_assert_not_reached(); } } - out_space = ptw->in_space; + out_space = ptw->cur_space; if (ns) { /* * The NS bit will (as required by the architecture) have no effect if @@ -1242,8 +1304,8 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, } result->f.prot = get_S1prot(env, mmu_idx, false, user_rw, prot_rw, - xn, pxn, result->f.attrs.space, out_space); - if (!(result->f.prot & (1 << access_type))) { + xn, pxn, ptw->in_space, out_space); + if (ptw->in_prot_check & ~result->f.prot) { /* Access permission fault. */ fi->type = ARMFault_Permission; goto do_fault; @@ -1266,7 +1328,7 @@ static bool get_phys_addr_v6(CPUARMState *env, S1Translate *ptw, * @xn: XN (execute-never) bits * @s1_is_el0: true if this is S2 of an S1+2 walk for EL0 */ -static int get_S2prot_noexecute(int s2ap) +static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) { int prot = 0; @@ -1276,12 +1338,6 @@ static int get_S2prot_noexecute(int s2ap) if (s2ap & 2) { prot |= PAGE_WRITE; } - return prot; -} - -static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) -{ - int prot = get_S2prot_noexecute(s2ap); if (cpu_isar_feature(any_tts2uxn, env_archcpu(env))) { switch (xn) { @@ -1313,6 +1369,44 @@ static int get_S2prot(CPUARMState *env, int s2ap, int xn, bool s1_is_el0) return prot; } +static int get_S2prot_indirect(CPUARMState *env, GetPhysAddrResult *result, + int pi_index, int po_index, bool s1_is_el0) +{ + /* Last index is (priv, unpriv, ttw) */ + static const uint8_t perm_table[16][3] = { + /* 0 */ { 0, 0, 0 }, /* no access */ + /* 1 */ { 0, 0, 0 }, /* reserved */ + /* 2 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE }, + /* 3 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE }, + /* 4 */ { PAGE_WRITE, PAGE_WRITE, 0 }, + /* 5 */ { 0, 0, 0 }, /* reserved */ + /* 6 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE }, + /* 7 */ { PAGE_READ, PAGE_READ, PAGE_READ | PAGE_WRITE }, + /* 8 */ { PAGE_READ, PAGE_READ, PAGE_READ }, + /* 9 */ { PAGE_READ, PAGE_READ | PAGE_EXEC, PAGE_READ }, + /* A */ { PAGE_READ | PAGE_EXEC, PAGE_READ, PAGE_READ }, + /* B */ { PAGE_READ | PAGE_EXEC, PAGE_READ | PAGE_EXEC, PAGE_READ }, + /* C */ { PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_WRITE }, + /* D */ { PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, + PAGE_READ | PAGE_WRITE }, + /* E */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC, + PAGE_READ | PAGE_WRITE, + PAGE_READ | PAGE_WRITE }, + /* F */ { PAGE_READ | PAGE_WRITE | PAGE_EXEC, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, + PAGE_READ | PAGE_WRITE }, + }; + + uint64_t pir = (env->cp15.scr_el3 & SCR_PIEN ? env->cp15.s2pir_el2 : 0); + int s2pi = extract64(pir, pi_index * 4, 4); + + result->f.prot = perm_table[s2pi][2]; + return perm_table[s2pi][s1_is_el0]; +} + /* * Translate section/page access permissions to protection flags * @env: CPUARMState @@ -1330,7 +1424,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, ARMSecuritySpace in_pa, ARMSecuritySpace out_pa) { ARMCPU *cpu = env_archcpu(env); - bool is_user = regime_is_user(env, mmu_idx); + bool is_user = regime_is_user(mmu_idx); bool have_wxn; int wxn = 0; @@ -1347,10 +1441,10 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, * We make the IMPDEF choices that SCR_EL3.SIF and Realm EL2&0 * do not affect EPAN. */ - if (user_rw && regime_is_pan(env, mmu_idx)) { + if (user_rw && regime_is_pan(mmu_idx)) { prot_rw = 0; } else if (cpu_isar_feature(aa64_pan3, cpu) && is_aa64 && - regime_is_pan(env, mmu_idx) && + regime_is_pan(mmu_idx) && (regime_sctlr(env, mmu_idx) & SCTLR_EPAN) && !xn) { prot_rw = 0; } @@ -1407,7 +1501,7 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, xn = pxn || (user_rw & PAGE_WRITE); } } else if (arm_feature(env, ARM_FEATURE_V7)) { - switch (regime_el(env, mmu_idx)) { + switch (regime_el(mmu_idx)) { case 1: case 3: if (is_user) { @@ -1434,11 +1528,115 @@ static int get_S1prot(CPUARMState *env, ARMMMUIdx mmu_idx, bool is_aa64, return prot_rw | PAGE_EXEC; } +/* Extra page permission bits, during get_S1prot_indirect only. */ +#define PAGE_GCS (1 << 3) +#define PAGE_WXN (1 << 4) +#define PAGE_OVERLAY (1 << 5) +QEMU_BUILD_BUG_ON(PAGE_RWX & (PAGE_GCS | PAGE_WXN | PAGE_OVERLAY)); + +static int get_S1prot_indirect(CPUARMState *env, S1Translate *ptw, + ARMMMUIdx mmu_idx, int pi_index, int po_index, + ARMSecuritySpace in_pa, ARMSecuritySpace out_pa) +{ + static const uint8_t perm_table[16] = { + /* 0 */ PAGE_OVERLAY, /* no access */ + /* 1 */ PAGE_OVERLAY | PAGE_READ, + /* 2 */ PAGE_OVERLAY | PAGE_EXEC, + /* 3 */ PAGE_OVERLAY | PAGE_READ | PAGE_EXEC, + /* 4 */ PAGE_OVERLAY, /* reserved */ + /* 5 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE, + /* 6 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_WXN, + /* 7 */ PAGE_OVERLAY | PAGE_READ | PAGE_WRITE | PAGE_EXEC, + /* 8 */ PAGE_READ, + /* 9 */ PAGE_READ | PAGE_GCS, + /* A */ PAGE_READ | PAGE_EXEC, + /* B */ 0, /* reserved */ + /* C */ PAGE_READ | PAGE_WRITE, + /* D */ 0, /* reserved */ + /* E */ PAGE_READ | PAGE_WRITE | PAGE_EXEC, + /* F */ 0, /* reserved */ + }; + + uint32_t el = regime_el(mmu_idx); + uint64_t pir = env->cp15.pir_el[el]; + uint64_t pire0 = 0; + int perm; + + if (el < 3) { + if (arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_PIEN)) { + pir = 0; + } else if (el == 2) { + pire0 = env->cp15.pire0_el2; + } else if (!ptw->in_nv1) { + pire0 = env->cp15.pir_el[0]; + } + } + perm = perm_table[extract64(pir, pi_index * 4, 4)]; + + if (regime_has_2_ranges(mmu_idx)) { + int p_perm = perm; + int u_perm = perm_table[extract64(pire0, pi_index * 4, 4)]; + + if ((p_perm & (PAGE_EXEC | PAGE_GCS)) && + (u_perm & (PAGE_WRITE | PAGE_GCS))) { + p_perm &= ~(PAGE_RWX | PAGE_GCS); + u_perm &= ~(PAGE_RWX | PAGE_GCS); + } + if ((u_perm & (PAGE_RWX | PAGE_GCS)) && regime_is_pan(mmu_idx)) { + p_perm &= ~(PAGE_READ | PAGE_WRITE); + } + perm = regime_is_user(mmu_idx) ? u_perm : p_perm; + } + + if (in_pa != out_pa) { + switch (in_pa) { + case ARMSS_Root: + /* + * R_ZWRVD: permission fault for insn fetched from non-Root, + * I_WWBFB: SIF has no effect in EL3. + */ + perm &= ~(PAGE_EXEC | PAGE_GCS); + break; + case ARMSS_Realm: + /* + * R_PKTDS: permission fault for insn fetched from non-Realm, + * for Realm EL2 or EL2&0. The corresponding fault for EL1&0 + * happens during any stage2 translation. + */ + if (el == 2) { + perm &= ~(PAGE_EXEC | PAGE_GCS); + } + break; + case ARMSS_Secure: + if (env->cp15.scr_el3 & SCR_SIF) { + perm &= ~(PAGE_EXEC | PAGE_GCS); + } + break; + default: + /* Input NonSecure must have output NonSecure. */ + g_assert_not_reached(); + } + } + + if (regime_is_gcs(mmu_idx)) { + /* + * Note that the one s1perms.gcs bit controls both read and write + * access via AccessType_GCS. See AArch64.S1CheckPermissions. + */ + perm = (perm & PAGE_GCS ? PAGE_READ | PAGE_WRITE : 0); + } else if (perm & PAGE_WXN) { + perm &= ~PAGE_EXEC; + } + + return perm & PAGE_RWX; +} + static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, ARMMMUIdx mmu_idx) { uint64_t tcr = regime_tcr(env, mmu_idx); - uint32_t el = regime_el(env, mmu_idx); + uint32_t el = regime_el(mmu_idx); int select, tsz; bool epd, hpd; @@ -1459,8 +1657,12 @@ static ARMVAParameters aa32_va_parameters(CPUARMState *env, uint32_t va, } tsz = sextract32(tcr, 0, 4) + 8; select = 0; - hpd = false; epd = false; + /* + * Stage2 does not have hierarchical permissions. + * Thus disabling them makes things easier during ptw. + */ + hpd = true; } else if (el == 2) { /* HTCR */ tsz = extract32(tcr, 0, 3); @@ -1625,12 +1827,6 @@ static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds, } } -static bool nv_nv1_enabled(CPUARMState *env, S1Translate *ptw) -{ - uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); - return (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1); -} - /** * get_phys_addr_lpae: perform one stage of page table walk, LPAE format * @@ -1665,8 +1861,8 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, int32_t stride; int addrsize, inputsize, outputsize; uint64_t tcr = regime_tcr(env, mmu_idx); - int ap, xn, pxn; - uint32_t el = regime_el(env, mmu_idx); + int ap, prot; + uint32_t el = regime_el(mmu_idx); uint64_t descaddrmask; bool aarch64 = arm_el_is_aa64(env, el); uint64_t descriptor, new_descriptor; @@ -1682,6 +1878,16 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, !arm_el_is_aa64(env, 1)); level = 0; + /* + * Cache NV1 before we adjust ptw->in_space for NSTable. + * Note that this is only relevant for EL1&0, and that + * computing it would assert for ARMSS_Root. + */ + if (el == 1) { + uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space); + ptw->in_nv1 = (hcr & (HCR_NV | HCR_NV1)) == (HCR_NV | HCR_NV1); + } + /* * If TxSZ is programmed to a value larger than the maximum, * or smaller than the effective minimum, it is IMPLEMENTATION @@ -1845,7 +2051,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, * NonSecure. With RME, the EL3 translation regime does not change * from Root to NonSecure. */ - if (ptw->in_space == ARMSS_Secure + if (ptw->cur_space == ARMSS_Secure && !regime_is_stage2(mmu_idx) && extract32(tableattrs, 4, 1)) { /* @@ -1855,7 +2061,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS); QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2); ptw->in_ptw_idx += 1; - ptw->in_space = ARMSS_NonSecure; + ptw->cur_space = ARMSS_NonSecure; } if (!S1_ptw_translate(env, ptw, descaddr, fi)) { @@ -1922,7 +2128,12 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, descaddr &= ~(hwaddr)(page_size - 1); descaddr |= (address & (page_size - 1)); - if (likely(!ptw->in_debug)) { + /* + * For AccessType_AT, DB is not updated (AArch64.SetDirtyFlag), + * and it is IMPLEMENTATION DEFINED whether AF is updated + * (AArch64.SetAccessFlag; qemu chooses to not update). + */ + if (likely(!ptw->in_at)) { /* * Access flag. * If HA is enabled, prepare to update the descriptor below. @@ -1961,21 +2172,31 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, * except NSTable (which we have already handled). */ attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14)); - if (!regime_is_stage2(mmu_idx)) { - if (!param.hpd) { - attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */ - /* - * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 - * means "force PL1 access only", which means forcing AP[1] to 0. - */ - attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */ - attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */ - } + if (!param.hpd) { + attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */ + /* + * The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1 + * means "force PL1 access only", which means forcing AP[1] to 0. + */ + attrs &= ~(extract64(tableattrs, 2, 1) << 6); /* !APT[0] => AP[1] */ + attrs |= extract32(tableattrs, 3, 1) << 7; /* APT[1] => AP[2] */ } ap = extract32(attrs, 6, 2); - out_space = ptw->in_space; + out_space = ptw->cur_space; if (regime_is_stage2(mmu_idx)) { + if (param.pie) { + int pi = extract64(attrs, 6, 1) + | (extract64(attrs, 51, 1) << 1) + | (extract64(attrs, 53, 2) << 2); + int po = extract64(attrs, 60, 3); + prot = get_S2prot_indirect(env, result, pi, po, ptw->in_s1_is_el0); + } else { + int xn = extract64(attrs, 53, 2); + prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); + /* Install TTW permissions in f.prot. */ + result->f.prot = prot & (PAGE_READ | PAGE_WRITE); + } /* * R_GYNXY: For stage2 in Realm security state, bit 55 is NS. * The bit remains ignored for other security states. @@ -1984,11 +2205,9 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, */ if (out_space == ARMSS_Realm && extract64(attrs, 55, 1)) { out_space = ARMSS_NonSecure; - result->f.prot = get_S2prot_noexecute(ap); - } else { - xn = extract64(attrs, 53, 2); - result->f.prot = get_S2prot(env, ap, xn, ptw->in_s1_is_el0); + prot &= ~PAGE_EXEC; } + result->s2prot = prot; result->cacheattrs.is_s2_format = true; result->cacheattrs.attrs = extract32(attrs, 2, 4); @@ -2002,7 +2221,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, int nse, ns = extract32(attrs, 5, 1); uint8_t attrindx; uint64_t mair; - int user_rw, prot_rw; switch (out_space) { case ARMSS_Root: @@ -2051,37 +2269,57 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, default: g_assert_not_reached(); } - xn = extract64(attrs, 54, 1); - pxn = extract64(attrs, 53, 1); - if (el == 1 && nv_nv1_enabled(env, ptw)) { + if (param.pie) { + int pi = extract64(attrs, 6, 1) + | (extract64(attrs, 51, 1) << 1) + | (extract64(attrs, 53, 2) << 2); + int po = extract64(attrs, 60, 3); /* - * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, the block/page - * descriptor bit 54 holds PXN, 53 is RES0, and the effective value - * of UXN is 0. Similarly for bits 59 and 60 in table descriptors - * (which we have already folded into bits 53 and 54 of attrs). - * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0. - * Similarly, APTable[0] from the table descriptor is treated as 0; - * we already folded this into AP[1] and squashing that to 0 does - * the right thing. + * Note that we modified ptw->in_space earlier for NSTable, but + * result->f.attrs retains a copy of the original security space. */ - pxn = xn; - xn = 0; - ap &= ~1; - } + prot = get_S1prot_indirect(env, ptw, mmu_idx, pi, po, + result->f.attrs.space, out_space); + } else if (regime_is_gcs(mmu_idx)) { + /* + * While one must use indirect permissions to successfully + * use GCS instructions, AArch64.S1DirectBasePermissions + * faithfully supplies s1perms.gcs = 0, Just In Case. + */ + prot = 0; + } else { + int xn = extract64(attrs, 54, 1); + int pxn = extract64(attrs, 53, 1); + int user_rw, prot_rw; - user_rw = simple_ap_to_rw_prot_is_user(ap, true); - prot_rw = simple_ap_to_rw_prot_is_user(ap, false); - /* - * Note that we modified ptw->in_space earlier for NSTable, but - * result->f.attrs retains a copy of the original security space. - */ - result->f.prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw, - xn, pxn, result->f.attrs.space, out_space); + if (el == 1 && ptw->in_nv1) { + /* + * With FEAT_NV, when HCR_EL2.{NV,NV1} == {1,1}, + * the block/page descriptor bit 54 holds PXN, + * 53 is RES0, and the effective value of UXN is 0. + * Similarly for bits 59 and 60 in table descriptors + * (which we have already folded into bits 53 and 54 of attrs). + * AP[1] (descriptor bit 6, our ap bit 0) is treated as 0. + * Similarly, APTable[0] from the table descriptor is treated + * as 0; we already folded this into AP[1] and squashing + * that to 0 does the right thing. + */ + pxn = xn; + xn = 0; + ap &= ~1; + } + + user_rw = simple_ap_to_rw_prot_is_user(ap, true); + prot_rw = simple_ap_to_rw_prot_is_user(ap, false); + prot = get_S1prot(env, mmu_idx, aarch64, user_rw, prot_rw, + xn, pxn, ptw->in_space, out_space); + } + result->f.prot = prot; /* Index into MAIR registers for cache attributes */ attrindx = extract32(attrs, 2, 3); - mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; + mair = env->cp15.mair_el[regime_el(mmu_idx)]; assert(attrindx <= 7); result->cacheattrs.is_s2_format = false; result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8); @@ -2123,11 +2361,27 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, result->f.tlb_fill_flags = 0; } - if (!(result->f.prot & (1 << access_type))) { + if (ptw->in_prot_check & ~prot) { fi->type = ARMFault_Permission; goto do_fault; } + /* S1PIE and S2PIE both have a bit for software dirty page tracking. */ + if (access_type == MMU_DATA_STORE && param.pie) { + /* + * For S1PIE, bit 7 is nDirty and both HA and HD are checked. + * For S2PIE, bit 7 is Dirty and only HD is checked. + */ + bool bit7 = extract64(attrs, 7, 1); + if (regime_is_stage2(mmu_idx) + ? !bit7 && !param.hd + : bit7 && !(param.ha && param.hd)) { + fi->type = ARMFault_Permission; + fi->dirtybit = true; + goto do_fault; + } + } + /* If FEAT_HAFDBS has made changes, update the PTE. */ if (new_descriptor != descriptor) { new_descriptor = arm_casq_ptw(env, descriptor, new_descriptor, ptw, fi); @@ -2175,7 +2429,7 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw, fi->level = level; fi->stage2 = regime_is_stage2(mmu_idx); } - fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx); + fi->s1ns = fault_s1ns(ptw->cur_space, mmu_idx); return true; } @@ -2190,7 +2444,7 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t mask; uint32_t base; ARMMMUIdx mmu_idx = ptw->in_mmu_idx; - bool is_user = regime_is_user(env, mmu_idx); + bool is_user = regime_is_user(mmu_idx); if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) { /* MPU disabled. */ @@ -2357,7 +2611,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, ARMCPU *cpu = env_archcpu(env); int n; ARMMMUIdx mmu_idx = ptw->in_mmu_idx; - bool is_user = regime_is_user(env, mmu_idx); + bool is_user = regime_is_user(mmu_idx); bool secure = arm_space_is_secure(ptw->in_space); result->f.phys_addr = address; @@ -2537,13 +2791,13 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, fi->type = ARMFault_Permission; fi->level = 1; - return !(result->f.prot & (1 << access_type)); + return (ptw->in_prot_check & ~result->f.prot) != 0; } static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx, uint32_t secure) { - if (regime_el(env, mmu_idx) == 2) { + if (regime_el(mmu_idx) == 2) { return env->pmsav8.hprbar; } else { return env->pmsav8.rbar[secure]; @@ -2553,7 +2807,7 @@ static uint32_t *regime_rbar(CPUARMState *env, ARMMMUIdx mmu_idx, static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx, uint32_t secure) { - if (regime_el(env, mmu_idx) == 2) { + if (regime_el(mmu_idx) == 2) { return env->pmsav8.hprlar; } else { return env->pmsav8.rlar[secure]; @@ -2561,8 +2815,9 @@ static uint32_t *regime_rlar(CPUARMState *env, ARMMMUIdx mmu_idx, } bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, - MMUAccessType access_type, ARMMMUIdx mmu_idx, - bool secure, GetPhysAddrResult *result, + MMUAccessType access_type, unsigned prot_check, + ARMMMUIdx mmu_idx, bool secure, + GetPhysAddrResult *result, ARMMMUFaultInfo *fi, uint32_t *mregion) { /* @@ -2576,7 +2831,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, * memory system to use a subpage. */ ARMCPU *cpu = env_archcpu(env); - bool is_user = regime_is_user(env, mmu_idx); + bool is_user = regime_is_user(mmu_idx); int n; int matchregion = -1; bool hit = false; @@ -2584,7 +2839,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1); int region_counter; - if (regime_el(env, mmu_idx) == 2) { + if (regime_el(mmu_idx) == 2) { region_counter = cpu->pmsav8r_hdregion; } else { region_counter = cpu->pmsav7_dregion; @@ -2710,7 +2965,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, xn = 1; } - if (regime_el(env, mmu_idx) == 2) { + if (regime_el(mmu_idx) == 2) { result->f.prot = simple_ap_to_rw_prot_is_user(ap, mmu_idx != ARMMMUIdx_E2); } else { @@ -2719,7 +2974,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, if (!arm_feature(env, ARM_FEATURE_M)) { uint8_t attrindx = extract32(matched_rlar, 1, 3); - uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)]; + uint64_t mair = env->cp15.mair_el[regime_el(mmu_idx)]; uint8_t sh = extract32(matched_rlar, 3, 2); if (regime_sctlr(env, mmu_idx) & SCTLR_WXN && @@ -2727,7 +2982,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, xn = 0x1; } - if ((regime_el(env, mmu_idx) == 1) && + if ((regime_el(mmu_idx) == 1) && regime_sctlr(env, mmu_idx) & SCTLR_UWXN && ap == 0x1) { pxn = 0x1; } @@ -2750,7 +3005,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address, if (arm_feature(env, ARM_FEATURE_M)) { fi->level = 1; } - return !(result->f.prot & (1 << access_type)); + return (prot_check & ~result->f.prot) != 0; } static bool v8m_is_sau_exempt(CPUARMState *env, @@ -2952,8 +3207,8 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, } } - ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure, - result, fi, NULL); + ret = pmsav8_mpu_lookup(env, address, access_type, ptw->in_prot_check, + mmu_idx, secure, result, fi, NULL); if (sattrs.subpage) { result->f.lg_page_size = 0; } @@ -3212,7 +3467,7 @@ static bool get_phys_addr_disabled(CPUARMState *env, break; default: - r_el = regime_el(env, mmu_idx); + r_el = regime_el(mmu_idx); if (arm_el_is_aa64(env, r_el)) { int pamax = arm_pamax(env_archcpu(env)); uint64_t tcr = env->cp15.tcr_el[r_el]; @@ -3320,7 +3575,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, fi->s2addr = ipa; /* Combine the S1 and S2 perms. */ - result->f.prot &= s1_prot; + result->f.prot = s1_prot & result->s2prot; /* If S2 fails, return early. */ if (ret) { @@ -3372,9 +3627,9 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw, */ if (in_space == ARMSS_Secure) { result->f.attrs.secure = - !(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)) + !(env->cp15.vstcr_el2 & (R_VSTCR_SA_MASK | R_VSTCR_SW_MASK)) && (ipa_secure - || !(env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))); + || !(env->cp15.vtcr_el2 & (R_VTCR_NSA_MASK | R_VTCR_NSW_MASK))); result->f.attrs.space = arm_secure_to_space(result->f.attrs.secure); } @@ -3395,6 +3650,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, * cannot upgrade a NonSecure translation regime's attributes * to Secure or Realm. */ + ptw->cur_space = ptw->in_space; result->f.attrs.space = ptw->in_space; result->f.attrs.secure = arm_space_is_secure(ptw->in_space); @@ -3456,7 +3712,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, break; } - result->f.attrs.user = regime_is_user(env, mmu_idx); + result->f.attrs.user = regime_is_user(mmu_idx); /* * Fast Context Switch Extension. This doesn't exist at all in v8. @@ -3464,7 +3720,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw, */ if (address < 0x02000000 && mmu_idx != ARMMMUIdx_Stage2 && !arm_feature(env, ARM_FEATURE_V8)) { - if (regime_el(env, mmu_idx) == 3) { + if (regime_el(mmu_idx) == 3) { address += env->cp15.fcseidr_s; } else { address += env->cp15.fcseidr_ns; @@ -3530,25 +3786,33 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw, return true; } if (!granule_protection_check(env, result->f.phys_addr, - result->f.attrs.space, fi)) { + result->f.attrs.space, ptw->in_space, fi)) { fi->type = ARMFault_GPCFOnOutput; return true; } return false; } -bool get_phys_addr_with_space_nogpc(CPUARMState *env, vaddr address, - MMUAccessType access_type, MemOp memop, - ARMMMUIdx mmu_idx, ARMSecuritySpace space, - GetPhysAddrResult *result, - ARMMMUFaultInfo *fi) +bool get_phys_addr_for_at(CPUARMState *env, vaddr address, + unsigned prot_check, ARMMMUIdx mmu_idx, + ARMSecuritySpace space, GetPhysAddrResult *result, + ARMMMUFaultInfo *fi) { S1Translate ptw = { .in_mmu_idx = mmu_idx, .in_space = space, + .in_at = true, + .in_prot_check = prot_check, }; - return get_phys_addr_nogpc(env, &ptw, address, access_type, - memop, result, fi); + /* + * I_MXTJT: Granule protection checks are not performed on the final + * address of a successful translation. This is a translation not a + * memory reference, so MMU_DATA_LOAD is arbitrary (the exact protection + * check is handled or bypassed by .in_prot_check) and "memop = MO_8" + * bypasses any alignment check. + */ + return get_phys_addr_nogpc(env, &ptw, address, + MMU_DATA_LOAD, MO_8, result, fi); } static ARMSecuritySpace @@ -3558,15 +3822,22 @@ arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx) switch (mmu_idx) { case ARMMMUIdx_E10_0: + case ARMMMUIdx_E10_0_GCS: case ARMMMUIdx_E10_1: case ARMMMUIdx_E10_1_PAN: + case ARMMMUIdx_E10_1_GCS: case ARMMMUIdx_E20_0: + case ARMMMUIdx_E20_0_GCS: case ARMMMUIdx_E20_2: case ARMMMUIdx_E20_2_PAN: + case ARMMMUIdx_E20_2_GCS: case ARMMMUIdx_Stage1_E0: + case ARMMMUIdx_Stage1_E0_GCS: case ARMMMUIdx_Stage1_E1: case ARMMMUIdx_Stage1_E1_PAN: + case ARMMMUIdx_Stage1_E1_GCS: case ARMMMUIdx_E2: + case ARMMMUIdx_E2_GCS: ss = arm_security_space_below_el3(env); break; case ARMMMUIdx_Stage2: @@ -3595,6 +3866,7 @@ arm_mmu_idx_to_security_space(CPUARMState *env, ARMMMUIdx mmu_idx) ss = ARMSS_Secure; break; case ARMMMUIdx_E3: + case ARMMMUIdx_E3_GCS: case ARMMMUIdx_E30_0: case ARMMMUIdx_E30_3_PAN: if (arm_feature(env, ARM_FEATURE_AARCH64) && @@ -3624,6 +3896,7 @@ bool get_phys_addr(CPUARMState *env, vaddr address, S1Translate ptw = { .in_mmu_idx = mmu_idx, .in_space = arm_mmu_idx_to_security_space(env, mmu_idx), + .in_prot_check = 1 << access_type, }; return get_phys_addr_gpc(env, &ptw, address, access_type, @@ -3637,6 +3910,8 @@ static hwaddr arm_cpu_get_phys_page(CPUARMState *env, vaddr addr, .in_mmu_idx = mmu_idx, .in_space = arm_mmu_idx_to_security_space(env, mmu_idx), .in_debug = true, + .in_at = true, + .in_prot_check = 0, }; GetPhysAddrResult res = {}; ARMMMUFaultInfo fi = {}; diff --git a/target/arm/syndrome.h b/target/arm/syndrome.h index c48d3b85871d8..bff61f052cc65 100644 --- a/target/arm/syndrome.h +++ b/target/arm/syndrome.h @@ -63,6 +63,7 @@ enum arm_exception_class { EC_MOP = 0x27, EC_AA32_FPTRAP = 0x28, EC_AA64_FPTRAP = 0x2c, + EC_GCS = 0x2d, EC_SERROR = 0x2f, EC_BREAKPOINT = 0x30, EC_BREAKPOINT_SAME_EL = 0x31, @@ -83,6 +84,23 @@ typedef enum { SME_ET_InaccessibleZT0, } SMEExceptionType; +typedef enum { + GCS_ET_DataCheck, + GCS_ET_EXLOCK, + GCS_ET_GCSSTR_GCSSTTR, +} GCSExceptionType; + +typedef enum { + GCS_IT_RET_nPauth = 0, + GCS_IT_GCSPOPM = 1, + GCS_IT_RET_PauthA = 2, + GCS_IT_RET_PauthB = 3, + GCS_IT_GCSSS1 = 4, + GCS_IT_GCSSS2 = 5, + GCS_IT_GCSPOPCX = 8, + GCS_IT_GCSPOPX = 9, +} GCSInstructionType; + #define ARM_EL_EC_LENGTH 6 #define ARM_EL_EC_SHIFT 26 #define ARM_EL_IL_SHIFT 25 @@ -351,6 +369,23 @@ static inline uint32_t syn_pcalignment(void) return (EC_PCALIGNMENT << ARM_EL_EC_SHIFT) | ARM_EL_IL; } +static inline uint32_t syn_gcs_data_check(GCSInstructionType it, int rn) +{ + return ((EC_GCS << ARM_EL_EC_SHIFT) | ARM_EL_IL | + (GCS_ET_DataCheck << 20) | (rn << 5) | it); +} + +static inline uint32_t syn_gcs_exlock(void) +{ + return (EC_GCS << ARM_EL_EC_SHIFT) | ARM_EL_IL | (GCS_ET_EXLOCK << 20); +} + +static inline uint32_t syn_gcs_gcsstr(int ra, int rn) +{ + return ((EC_GCS << ARM_EL_EC_SHIFT) | ARM_EL_IL | + (GCS_ET_GCSSTR_GCSSTTR << 20) | (ra << 10) | (rn << 5)); +} + static inline uint32_t syn_serror(uint32_t extra) { return (EC_SERROR << ARM_EL_EC_SHIFT) | ARM_EL_IL | extra; diff --git a/target/arm/tcg-stubs.c b/target/arm/tcg-stubs.c index aac99b2672a29..aeeede806619c 100644 --- a/target/arm/tcg-stubs.c +++ b/target/arm/tcg-stubs.c @@ -16,7 +16,7 @@ void write_v7m_exception(CPUARMState *env, uint32_t new_exc) g_assert_not_reached(); } -void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, +void raise_exception_ra(CPUARMState *env, uint32_t excp, uint64_t syndrome, uint32_t target_el, uintptr_t ra) { g_assert_not_reached(); diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode index 8c798cde2b43d..01b1b3e38be06 100644 --- a/target/arm/tcg/a64.decode +++ b/target/arm/tcg/a64.decode @@ -156,6 +156,16 @@ MOVZ . 10 100101 .. ................ ..... @movw_32 MOVK . 11 100101 .. ................ ..... @movw_64 MOVK . 11 100101 .. ................ ..... @movw_32 +# Min/Max (immediate) + +@minmaxi_s sf:1 .. ........... imm:s8 rn:5 rd:5 &rri_sf +@minmaxi_u sf:1 .. ........... imm:8 rn:5 rd:5 &rri_sf + +SMAX_i . 00 1000111 0000 ........ ..... ..... @minmaxi_s +SMIN_i . 00 1000111 0010 ........ ..... ..... @minmaxi_s +UMAX_i . 00 1000111 0001 ........ ..... ..... @minmaxi_u +UMIN_i . 00 1000111 0011 ........ ..... ..... @minmaxi_u + # Bitfield &bitfield rd rn sf immr imms @@ -238,6 +248,7 @@ ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB AUTIA1716 1101 0101 0000 0011 0010 0001 100 11111 AUTIB1716 1101 0101 0000 0011 0010 0001 110 11111 ESB 1101 0101 0000 0011 0010 0010 000 11111 + GCSB 1101 0101 0000 0011 0010 0010 011 11111 PACIAZ 1101 0101 0000 0011 0010 0011 000 11111 PACIASP 1101 0101 0000 0011 0010 0011 001 11111 PACIBZ 1101 0101 0000 0011 0010 0011 010 11111 @@ -246,6 +257,7 @@ ERETA 1101011 0100 11111 00001 m:1 11111 11111 &reta # ERETAA, ERETAB AUTIASP 1101 0101 0000 0011 0010 0011 101 11111 AUTIBZ 1101 0101 0000 0011 0010 0011 110 11111 AUTIBSP 1101 0101 0000 0011 0010 0011 111 11111 + CHKFEAT 1101 0101 0000 0011 0010 0101 000 11111 ] # The canonical NOP has CRm == op2 == 0, but all of the space # that isn't specifically allocated to an instruction must NOP @@ -536,6 +548,13 @@ SWP .. 111 0 00 . . 1 ..... 1000 00 ..... ..... @atomic LDAPR sz:2 111 0 00 1 0 1 11111 1100 00 rn:5 rt:5 +# Atomic 128-bit memory operations +&atomic128 rn rt rt2 a r +@atomic128 ........ a:1 r:1 . rt2:5 ...... rn:5 rt:5 &atomic128 +LDCLRP 00011001 . . 1 ..... 000100 ..... ..... @atomic128 +LDSETP 00011001 . . 1 ..... 001100 ..... ..... @atomic128 +SWPP 00011001 . . 1 ..... 100000 ..... ..... @atomic128 + # Load/store register (pointer authentication) # LDRA immediate is 10 bits signed and scaled, but the bits aren't all contiguous @@ -553,6 +572,9 @@ LDAPR_i 10 011001 10 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext LDAPR_i 00 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=0 LDAPR_i 01 011001 11 0 ......... 00 ..... ..... @ldapr_stlr_i sign=1 ext=1 sz=1 +# GCSSTR, GCSSTTR +GCSSTR 11011001 000 11111 000 unpriv:1 11 rn:5 rt:5 + # Load/store multiple structures # The 4-bit opcode in [15:12] encodes repeat count and structure elements &ldst_mult rm rn rt sz q p rpt selem @@ -698,6 +720,11 @@ GMI 1 00 11010110 ..... 000101 ..... ..... @rrr PACGA 1 00 11010110 ..... 001100 ..... ..... @rrr +SMAX . 00 11010110 ..... 011000 ..... ..... @rrr_sf +SMIN . 00 11010110 ..... 011010 ..... ..... @rrr_sf +UMAX . 00 11010110 ..... 011001 ..... ..... @rrr_sf +UMIN . 00 11010110 ..... 011011 ..... ..... @rrr_sf + # Data Processing (1-source) @rr . .......... ..... ...... rn:5 rd:5 &rr @@ -711,6 +738,10 @@ REV64 1 10 11010110 00000 000011 ..... ..... @rr CLZ . 10 11010110 00000 000100 ..... ..... @rr_sf CLS . 10 11010110 00000 000101 ..... ..... @rr_sf +CTZ . 10 11010110 00000 000110 ..... ..... @rr_sf +CNT . 10 11010110 00000 000111 ..... ..... @rr_sf +ABS . 10 11010110 00000 001000 ..... ..... @rr_sf + &pacaut rd rn z @pacaut . .. ........ ..... .. z:1 ... rn:5 rd:5 &pacaut diff --git a/target/arm/tcg/cpregs-at.c b/target/arm/tcg/cpregs-at.c index 398a61d39891d..0e8f229aa7f48 100644 --- a/target/arm/tcg/cpregs-at.c +++ b/target/arm/tcg/cpregs-at.c @@ -24,22 +24,15 @@ static int par_el1_shareability(GetPhysAddrResult *res) } static uint64_t do_ats_write(CPUARMState *env, uint64_t value, - MMUAccessType access_type, ARMMMUIdx mmu_idx, + unsigned prot_check, ARMMMUIdx mmu_idx, ARMSecuritySpace ss) { - bool ret; uint64_t par64; bool format64 = false; ARMMMUFaultInfo fi = {}; GetPhysAddrResult res = {}; - - /* - * I_MXTJT: Granule protection checks are not performed on the final - * address of a successful translation. This is a translation not a - * memory reference, so "memop = none = 0". - */ - ret = get_phys_addr_with_space_nogpc(env, value, access_type, 0, - mmu_idx, ss, &res, &fi); + bool ret = get_phys_addr_for_at(env, value, prot_check, + mmu_idx, ss, &res, &fi); /* * ATS operations only do S1 or S1+S2 translations, so we never @@ -198,7 +191,7 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value, static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; + unsigned access_perm = ri->opc2 & 1 ? PAGE_WRITE : PAGE_READ; uint64_t par64; ARMMMUIdx mmu_idx; int el = arm_current_el(env); @@ -260,7 +253,7 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) g_assert_not_reached(); } - par64 = do_ats_write(env, value, access_type, mmu_idx, ss); + par64 = do_ats_write(env, value, access_perm, mmu_idx, ss); A32_BANKED_CURRENT_REG_SET(env, par, par64); } @@ -268,11 +261,11 @@ static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; + unsigned access_perm = ri->opc2 & 1 ? PAGE_WRITE : PAGE_READ; uint64_t par64; /* There is no SecureEL2 for AArch32. */ - par64 = do_ats_write(env, value, access_type, ARMMMUIdx_E2, + par64 = do_ats_write(env, value, access_perm, ARMMMUIdx_E2, ARMSS_NonSecure); A32_BANKED_CURRENT_REG_SET(env, par, par64); @@ -316,7 +309,7 @@ static CPAccessResult at_s1e01_access(CPUARMState *env, const ARMCPRegInfo *ri, static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD; + unsigned access_perm = ri->opc2 & 1 ? PAGE_WRITE : PAGE_READ; ARMMMUIdx mmu_idx; uint64_t hcr_el2 = arm_hcr_el2_eff(env); bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE); @@ -359,7 +352,7 @@ static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri, } ss = for_el3 ? arm_security_space(env) : arm_security_space_below_el3(env); - env->cp15.par_el[1] = do_ats_write(env, value, access_type, mmu_idx, ss); + env->cp15.par_el[1] = do_ats_write(env, value, access_perm, mmu_idx, ss); } static CPAccessResult ats_access(CPUARMState *env, const ARMCPRegInfo *ri, @@ -495,6 +488,47 @@ static const ARMCPRegInfo ats1cp_reginfo[] = { .writefn = ats_write }, }; +static void ats_s1e1a(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + bool regime_e20 = (hcr_el2 & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE); + ARMMMUIdx mmu_idx = regime_e20 ? ARMMMUIdx_E20_2 : ARMMMUIdx_Stage1_E1; + ARMSecuritySpace ss = arm_security_space_below_el3(env); + + env->cp15.par_el[1] = do_ats_write(env, value, 0, mmu_idx, ss); +} + +static void ats_s1e2a(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + uint64_t hcr_el2 = arm_hcr_el2_eff(env); + ARMMMUIdx mmu_idx = hcr_el2 & HCR_E2H ? ARMMMUIdx_E20_2 : ARMMMUIdx_E2; + ARMSecuritySpace ss = arm_security_space_below_el3(env); + + env->cp15.par_el[1] = do_ats_write(env, value, 0, mmu_idx, ss); +} + +static void ats_s1e3a(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) +{ + env->cp15.par_el[1] = do_ats_write(env, value, 0, ARMMMUIdx_E3, + arm_security_space(env)); +} + +static const ARMCPRegInfo ats1a_reginfo[] = { + { .name = "AT_S1E1A", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 7, .crm = 9, .opc2 = 2, + .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .fgt = FGT_ATS1E1A, + .accessfn = at_s1e01_access, .writefn = ats_s1e1a }, + { .name = "AT_S1E2A", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 7, .crm = 9, .opc2 = 2, + .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .accessfn = at_s1e2_access, .writefn = ats_s1e2a }, + { .name = "AT_S1E3A", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 7, .crm = 9, .opc2 = 2, + .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, + .writefn = ats_s1e3a }, +}; + void define_at_insn_regs(ARMCPU *cpu) { CPUARMState *env = &cpu->env; @@ -516,4 +550,7 @@ void define_at_insn_regs(ARMCPU *cpu) if (cpu_isar_feature(aa32_ats1e1, cpu)) { define_arm_cp_regs(cpu, ats1cp_reginfo); } + if (cpu_isar_feature(aa64_ats1a, cpu)) { + define_arm_cp_regs(cpu, ats1a_reginfo); + } } diff --git a/target/arm/tcg/cpu32.c b/target/arm/tcg/cpu32.c index a2a23eae0d7b3..f0761410ad0db 100644 --- a/target/arm/tcg/cpu32.c +++ b/target/arm/tcg/cpu32.c @@ -807,144 +807,6 @@ static void sa1110_initfn(Object *obj) cpu->reset_sctlr = 0x00000070; } -static void pxa250_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052100; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa255_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052d00; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa260_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052903; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa261_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052d05; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa262_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - cpu->midr = 0x69052d06; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270a0_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054110; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270a1_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054111; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270b0_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054112; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270b1_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054113; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270c0_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054114; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - -static void pxa270c5_initfn(Object *obj) -{ - ARMCPU *cpu = ARM_CPU(obj); - - cpu->dtb_compatible = "marvell,xscale"; - set_feature(&cpu->env, ARM_FEATURE_V5); - set_feature(&cpu->env, ARM_FEATURE_XSCALE); - set_feature(&cpu->env, ARM_FEATURE_IWMMXT); - cpu->midr = 0x69054117; - cpu->ctr = 0xd172172; - cpu->reset_sctlr = 0x00000078; -} - #ifndef TARGET_AARCH64 /* * -cpu max: a CPU with as many features enabled as our emulation supports. @@ -1032,31 +894,6 @@ static const ARMCPUInfo arm_tcg_cpus[] = { { .name = "ti925t", .initfn = ti925t_initfn }, { .name = "sa1100", .initfn = sa1100_initfn }, { .name = "sa1110", .initfn = sa1110_initfn }, - { .name = "pxa250", .initfn = pxa250_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa255", .initfn = pxa255_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa260", .initfn = pxa260_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa261", .initfn = pxa261_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa262", .initfn = pxa262_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - /* "pxa270" is an alias for "pxa270-a0" */ - { .name = "pxa270", .initfn = pxa270a0_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa270-a0", .initfn = pxa270a0_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa270-a1", .initfn = pxa270a1_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa270-b0", .initfn = pxa270b0_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa270-b1", .initfn = pxa270b1_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa270-c0", .initfn = pxa270c0_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, - { .name = "pxa270-c5", .initfn = pxa270c5_initfn, - .deprecation_note = "iwMMXt CPUs are no longer supported", }, #ifndef TARGET_AARCH64 { .name = "max", .initfn = arm_max_initfn }, #endif diff --git a/target/arm/tcg/cpu64.c b/target/arm/tcg/cpu64.c index 35cddbafa4c75..1bffe66e81c44 100644 --- a/target/arm/tcg/cpu64.c +++ b/target/arm/tcg/cpu64.c @@ -159,7 +159,8 @@ static void cpu_arm_set_rme(Object *obj, bool value, Error **errp) { ARMCPU *cpu = ARM_CPU(obj); - FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, RME, value); + /* Enable FEAT_RME_GPC2 */ + FIELD_DP64_IDREG(&cpu->isar, ID_AA64PFR0, RME, value ? 2 : 0); } static void cpu_max_set_l0gptsz(Object *obj, Visitor *v, const char *name, @@ -406,6 +407,79 @@ static void aarch64_a76_initfn(Object *obj) cpu->isar.reset_pmcr_el0 = 0x410b3000; } +static void aarch64_a78ae_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; + + cpu->dtb_compatible = "arm,cortex-a78ae"; + set_feature(&cpu->env, ARM_FEATURE_V8); + set_feature(&cpu->env, ARM_FEATURE_NEON); + set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); + set_feature(&cpu->env, ARM_FEATURE_AARCH64); + set_feature(&cpu->env, ARM_FEATURE_EL2); + set_feature(&cpu->env, ARM_FEATURE_EL3); + set_feature(&cpu->env, ARM_FEATURE_PMU); + + /* Ordered by 3.2.4 AArch64 registers by functional group */ + SET_IDREG(isar, CLIDR, 0x82000023); + cpu->ctr = 0x9444c004; + cpu->dcz_blocksize = 4; + SET_IDREG(isar, ID_AA64DFR0, 0x0000000110305408ull); + SET_IDREG(isar, ID_AA64ISAR0, 0x0010100010211120ull); + SET_IDREG(isar, ID_AA64ISAR1, 0x0000000001200031ull); + SET_IDREG(isar, ID_AA64MMFR0, 0x0000000000101125ull); + SET_IDREG(isar, ID_AA64MMFR1, 0x0000000010212122ull); + SET_IDREG(isar, ID_AA64MMFR2, 0x0000000100001011ull); + SET_IDREG(isar, ID_AA64PFR0, 0x1100000010111112ull); /* GIC filled in later */ + SET_IDREG(isar, ID_AA64PFR1, 0x0000000000000010ull); + SET_IDREG(isar, ID_AFR0, 0x00000000); + SET_IDREG(isar, ID_DFR0, 0x04010088); + SET_IDREG(isar, ID_ISAR0, 0x02101110); + SET_IDREG(isar, ID_ISAR1, 0x13112111); + SET_IDREG(isar, ID_ISAR2, 0x21232042); + SET_IDREG(isar, ID_ISAR3, 0x01112131); + SET_IDREG(isar, ID_ISAR4, 0x00010142); + SET_IDREG(isar, ID_ISAR5, 0x01011121); + SET_IDREG(isar, ID_ISAR6, 0x00000010); + SET_IDREG(isar, ID_MMFR0, 0x10201105); + SET_IDREG(isar, ID_MMFR1, 0x40000000); + SET_IDREG(isar, ID_MMFR2, 0x01260000); + SET_IDREG(isar, ID_MMFR3, 0x02122211); + SET_IDREG(isar, ID_MMFR4, 0x00021110); + SET_IDREG(isar, ID_PFR0, 0x10010131); + SET_IDREG(isar, ID_PFR1, 0x00010000); /* GIC filled in later */ + SET_IDREG(isar, ID_PFR2, 0x00000011); + cpu->midr = 0x410fd423; /* r0p3 */ + cpu->revidr = 0; + + /* From 3.2.33 CCSIDR_EL1 */ + /* 64KB L1 dcache */ + cpu->ccsidr[0] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 7); + /* 64KB L1 icache */ + cpu->ccsidr[1] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 4, 64, 64 * KiB, 2); + /* 512KB L2 cache */ + cpu->ccsidr[2] = make_ccsidr(CCSIDR_FORMAT_LEGACY, 8, 64, 512 * KiB, 7); + + /* From 3.2.118 SCTLR_EL3 */ + cpu->reset_sctlr = 0x30c50838; + + /* From 3.4.23 ICH_VTR_EL2 */ + cpu->gic_num_lrs = 4; + cpu->gic_vpribits = 5; + cpu->gic_vprebits = 5; + /* From 3.4.8 ICC_CTLR_EL3 */ + cpu->gic_pribits = 5; + + /* From 3.5.1 AdvSIMD AArch64 register summary */ + cpu->isar.mvfr0 = 0x10110222; + cpu->isar.mvfr1 = 0x13211111; + cpu->isar.mvfr2 = 0x00000043; + + /* From 5.5.1 AArch64 PMU register summary */ + cpu->isar.reset_pmcr_el0 = 0x41223000; +} + static void aarch64_a64fx_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -1145,7 +1219,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */ t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* FEAT_SHA512 */ t = FIELD_DP64(t, ID_AA64ISAR0, CRC32, 1); /* FEAT_CRC32 */ - t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 2); /* FEAT_LSE */ + t = FIELD_DP64(t, ID_AA64ISAR0, ATOMIC, 3); /* FEAT_LSE, FEAT_LSE128 */ t = FIELD_DP64(t, ID_AA64ISAR0, RDM, 1); /* FEAT_RDM */ t = FIELD_DP64(t, ID_AA64ISAR0, SHA3, 1); /* FEAT_SHA3 */ t = FIELD_DP64(t, ID_AA64ISAR0, SM3, 1); /* FEAT_SM3 */ @@ -1178,6 +1252,8 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ISAR2, MOPS, 1); /* FEAT_MOPS */ t = FIELD_DP64(t, ID_AA64ISAR2, BC, 1); /* FEAT_HBC */ t = FIELD_DP64(t, ID_AA64ISAR2, WFXT, 2); /* FEAT_WFxT */ + t = FIELD_DP64(t, ID_AA64ISAR2, CSSC, 1); /* FEAT_CSSC */ + t = FIELD_DP64(t, ID_AA64ISAR2, ATS1A, 1); /* FEAT_ATS1A */ SET_IDREG(isar, ID_AA64ISAR2, t); t = GET_IDREG(isar, ID_AA64PFR0); @@ -1204,6 +1280,7 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64PFR1, SME, 2); /* FEAT_SME2 */ t = FIELD_DP64(t, ID_AA64PFR1, CSV2_FRAC, 0); /* FEAT_CSV2_3 */ t = FIELD_DP64(t, ID_AA64PFR1, NMI, 1); /* FEAT_NMI */ + t = FIELD_DP64(t, ID_AA64PFR1, GCS, 1); /* FEAT_GCS */ SET_IDREG(isar, ID_AA64PFR1, t); t = GET_IDREG(isar, ID_AA64MMFR0); @@ -1247,7 +1324,14 @@ void aarch64_max_tcg_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64MMFR2, E0PD, 1); /* FEAT_E0PD */ SET_IDREG(isar, ID_AA64MMFR2, t); - FIELD_DP64_IDREG(isar, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */ + t = GET_IDREG(isar, ID_AA64MMFR3); + t = FIELD_DP64(t, ID_AA64MMFR3, TCRX, 1); /* FEAT_TCR2 */ + t = FIELD_DP64(t, ID_AA64MMFR3, SCTLRX, 1); /* FEAT_SCTLR2 */ + t = FIELD_DP64(t, ID_AA64MMFR3, MEC, 1); /* FEAT_MEC */ + t = FIELD_DP64(t, ID_AA64MMFR3, SPEC_FPACC, 1); /* FEAT_FPACC_SPEC */ + t = FIELD_DP64(t, ID_AA64MMFR3, S1PIE, 1); /* FEAT_S1PIE */ + t = FIELD_DP64(t, ID_AA64MMFR3, S2PIE, 1); /* FEAT_S2PIE */ + SET_IDREG(isar, ID_AA64MMFR3, t); t = GET_IDREG(isar, ID_AA64ZFR0); t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 2); /* FEAT_SVE2p1 */ @@ -1315,6 +1399,11 @@ static const ARMCPUInfo aarch64_cpus[] = { { .name = "cortex-a55", .initfn = aarch64_a55_initfn }, { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, { .name = "cortex-a76", .initfn = aarch64_a76_initfn }, + /* + * The Cortex-A78AE differs slightly from the plain Cortex-A78. We don't + * currently model the latter. + */ + { .name = "cortex-a78ae", .initfn = aarch64_a78ae_initfn }, { .name = "cortex-a710", .initfn = aarch64_a710_initfn }, { .name = "a64fx", .initfn = aarch64_a64fx_initfn }, { .name = "neoverse-n1", .initfn = aarch64_neoverse_n1_initfn }, diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c index 71c6c44ee8ac2..ba1d775d818eb 100644 --- a/target/arm/tcg/helper-a64.c +++ b/target/arm/tcg/helper-a64.c @@ -576,6 +576,7 @@ uint32_t HELPER(advsimd_rinth)(uint32_t x, float_status *fp_status) return ret; } +#ifndef CONFIG_USER_ONLY static int el_from_spsr(uint32_t spsr) { /* Return the exception level that this SPSR is requesting a return to, @@ -614,32 +615,12 @@ static int el_from_spsr(uint32_t spsr) } } -static void cpsr_write_from_spsr_elx(CPUARMState *env, - uint32_t val) -{ - uint32_t mask; - - /* Save SPSR_ELx.SS into PSTATE. */ - env->pstate = (env->pstate & ~PSTATE_SS) | (val & PSTATE_SS); - val &= ~PSTATE_SS; - - /* Move DIT to the correct location for CPSR */ - if (val & PSTATE_DIT) { - val &= ~PSTATE_DIT; - val |= CPSR_DIT; - } - - mask = aarch32_cpsr_valid_mask(env->features, \ - &env_archcpu(env)->isar); - cpsr_write(env, val, mask, CPSRWriteRaw); -} - void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) { ARMCPU *cpu = env_archcpu(env); int cur_el = arm_current_el(env); unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); - uint32_t spsr = env->banked_spsr[spsr_idx]; + uint64_t spsr = env->banked_spsr[spsr_idx]; int new_el; bool return_to_aa64 = (spsr & PSTATE_nRW) == 0; @@ -694,6 +675,17 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) goto illegal_return; } + /* + * If GetCurrentEXLOCKEN, the exception return path must use GCSPOPCX, + * which will set PSTATE.EXLOCK. We need not explicitly check FEAT_GCS, + * because GCSCR_ELx cannot be set without it. + */ + if (new_el == cur_el && + (env->cp15.gcscr_el[cur_el] & GCSCR_EXLOCKEN) && + !(env->pstate & PSTATE_EXLOCK)) { + goto illegal_return; + } + bql_lock(); arm_call_pre_el_change_hook(cpu); bql_unlock(); @@ -787,6 +779,7 @@ void HELPER(exception_return)(CPUARMState *env, uint64_t new_pc) qemu_log_mask(LOG_GUEST_ERROR, "Illegal exception return at EL%d: " "resuming execution at 0x%" PRIx64 "\n", cur_el, env->pc); } +#endif /* !CONFIG_USER_ONLY */ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) { diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h index 85023465b7686..b6008b5a3ac4d 100644 --- a/target/arm/tcg/helper-a64.h +++ b/target/arm/tcg/helper-a64.h @@ -80,7 +80,6 @@ DEF_HELPER_3(vfp_ah_maxh, f16, f16, f16, fpst) DEF_HELPER_3(vfp_ah_maxs, f32, f32, f32, fpst) DEF_HELPER_3(vfp_ah_maxd, f64, f64, f64, fpst) -DEF_HELPER_2(exception_return, void, env, i64) DEF_HELPER_FLAGS_2(dc_zva, TCG_CALL_NO_WG, void, env, i64) DEF_HELPER_FLAGS_3(pacia, TCG_CALL_NO_WG, i64, env, i64, i64) @@ -145,3 +144,7 @@ DEF_HELPER_FLAGS_5(gvec_fmulx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32 DEF_HELPER_FLAGS_5(gvec_fmulx_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fmulx_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fmulx_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) + +#ifndef CONFIG_USER_ONLY +DEF_HELPER_2(exception_return, void, env, i64) +#endif diff --git a/target/arm/tcg/helper-sme.h b/target/arm/tcg/helper-sme.h index 1fc756bec6e55..c551797c6fa6a 100644 --- a/target/arm/tcg/helper-sme.h +++ b/target/arm/tcg/helper-sme.h @@ -48,87 +48,87 @@ DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(sme2p1_movaz_zc_q, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) - -DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) -DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i32) +DEF_HELPER_FLAGS_5(sme_ld1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_ld1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_ld1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_ld1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_ld1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_ld1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1b_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1b_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1b_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1b_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1h_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1h_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1s_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1s_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1d_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1d_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) + +DEF_HELPER_FLAGS_5(sme_st1q_be_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_le_h, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_be_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_le_v, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_be_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_le_h_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_be_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) +DEF_HELPER_FLAGS_5(sme_st1q_le_v_mte, TCG_CALL_NO_WG, void, env, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_5(sme_addha_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_5(sme_addva_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32) diff --git a/target/arm/tcg/helper-sve.h b/target/arm/tcg/helper-sve.h index c36090d13d1d2..c3541a8ca864a 100644 --- a/target/arm/tcg/helper-sve.h +++ b/target/arm/tcg/helper-sve.h @@ -1196,6 +1196,8 @@ DEF_HELPER_FLAGS_5(sve_fcmne0_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_5(sve_fcmne0_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fadd_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fadd_s, TCG_CALL_NO_RWG, @@ -1203,6 +1205,8 @@ DEF_HELPER_FLAGS_6(sve_fadd_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fsub_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fsub_s, TCG_CALL_NO_RWG, @@ -1210,6 +1214,8 @@ DEF_HELPER_FLAGS_6(sve_fsub_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fmul_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmul_s, TCG_CALL_NO_RWG, @@ -1224,6 +1230,8 @@ DEF_HELPER_FLAGS_6(sve_fdiv_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fdiv_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fmin_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmin_s, TCG_CALL_NO_RWG, @@ -1231,6 +1239,8 @@ DEF_HELPER_FLAGS_6(sve_fmin_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fmax_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmax_s, TCG_CALL_NO_RWG, @@ -1238,6 +1248,8 @@ DEF_HELPER_FLAGS_6(sve_fmax_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmin_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_ah_fmin_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_ah_fmin_s, TCG_CALL_NO_RWG, @@ -1245,6 +1257,8 @@ DEF_HELPER_FLAGS_6(sve_ah_fmin_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_ah_fmin_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_ah_fmax_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_ah_fmax_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_ah_fmax_s, TCG_CALL_NO_RWG, @@ -1252,6 +1266,8 @@ DEF_HELPER_FLAGS_6(sve_ah_fmax_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_ah_fmax_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fminnum_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fminnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fminnum_s, TCG_CALL_NO_RWG, @@ -1259,6 +1275,8 @@ DEF_HELPER_FLAGS_6(sve_fminnum_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fminnum_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_6(sve_fmaxnum_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxnum_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_6(sve_fmaxnum_s, TCG_CALL_NO_RWG, @@ -1523,6 +1541,8 @@ DEF_HELPER_FLAGS_6(sve_fcadd_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_6(sve_fcadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG, @@ -1530,6 +1550,8 @@ DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_7(sve_fmla_zpzzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_s, TCG_CALL_NO_RWG, @@ -1537,6 +1559,8 @@ DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_7(sve_fmls_zpzzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_s, TCG_CALL_NO_RWG, @@ -1544,6 +1568,8 @@ DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_7(sve_fnmla_zpzzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_s, TCG_CALL_NO_RWG, @@ -1551,6 +1577,8 @@ DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_7(sve_fnmls_zpzzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_s, TCG_CALL_NO_RWG, @@ -1558,6 +1586,8 @@ DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_7(sve_ah_fmls_zpzzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_s, TCG_CALL_NO_RWG, @@ -1565,6 +1595,8 @@ DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_7(sve_ah_fnmla_zpzzz_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_7(sve_ah_fnmls_zpzzz_s, TCG_CALL_NO_RWG, @@ -1623,1015 +1655,1015 @@ DEF_HELPER_FLAGS_4(sve2_usubw_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_usubw_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(sve2_usubw_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) -DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld2qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld2qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1squ_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1dqu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1squ_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1dqu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld2qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld2qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld3qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld4qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1hds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1sds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1squ_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1dqu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1sds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ld1squ_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ld1dqu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldff1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldff1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) +DEF_HELPER_FLAGS_4(sve_ld1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld2qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld2qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1squ_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1dqu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1squ_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1dqu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld2qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld2qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld3qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld4qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1hsu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1hds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1sds_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1squ_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1dqu_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1sdu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1sds_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ld1squ_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ld1dqu_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldff1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldff1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hh_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hsu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hdu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hss_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hds_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hh_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hsu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hdu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hss_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1hds_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1ss_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1sdu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1sds_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1ss_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1sdu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1sds_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1dd_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldff1dd_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_ldnf1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_ldnf1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bss_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bds_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_ldnf1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bhu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bsu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bdu_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bhs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bss_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_ldnf1bds_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hh_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hsu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hdu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hss_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hds_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hh_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hsu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hdu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hss_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1hds_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1ss_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1sdu_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1sds_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1ss_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1sdu_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1sds_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1dd_le_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) + void, env, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve_ldnf1dd_be_r_mte, TCG_CALL_NO_WG, - void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st2qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st2qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hs_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hs_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1sq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1sq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1dq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1dq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st2qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st2qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st3qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st4qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1bh_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1bs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1bd_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1hs_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hs_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1hd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1sd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1sd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) - -DEF_HELPER_FLAGS_4(sve_st1sq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1sq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1dq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) -DEF_HELPER_FLAGS_4(sve_st1dq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i32) + void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4bb_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4hh_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4hh_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4ss_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4ss_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4dd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4dd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st2qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4qq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st2qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4qq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1bh_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1bs_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1bd_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hs_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hs_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1sd_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1sd_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1sq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1sq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1dq_le_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1dq_be_r, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4bb_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4hh_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4hh_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4ss_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4ss_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4dd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st2dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4dd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st2qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4qq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st2qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st3qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st4qq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1bh_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1bs_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1bd_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1hs_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hs_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1hd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1sd_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1sd_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) + +DEF_HELPER_FLAGS_4(sve_st1sq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1sq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1dq_le_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) +DEF_HELPER_FLAGS_4(sve_st1dq_be_r_mte, TCG_CALL_NO_WG, void, env, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbsu_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbss_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbsu_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbss_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldqq_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldqq_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbsu_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbss_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbsu_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhsu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbss_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbdu_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhdu_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsdu_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_lddd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldbds_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldhds_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldsds_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldqq_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldqq_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbss_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbsu_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbss_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbsu_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbss_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbsu_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhsu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbss_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbdu_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhdu_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsdu_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffdd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffbds_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffhds_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_ldffsds_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbs_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbs_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zsu, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zss, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stqq_le_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stqq_be_zd, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbs_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbs_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sths_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stss_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zsu_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zss_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stbd_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_sthd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stsd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stdd_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stqq_le_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_6(sve_stqq_be_zd_mte, TCG_CALL_NO_WG, - void, env, ptr, ptr, ptr, tl, i32) + void, env, ptr, ptr, ptr, tl, i64) DEF_HELPER_FLAGS_4(sve2_sqdmull_zzz_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) @@ -3119,18 +3151,18 @@ DEF_HELPER_FLAGS_3(pmov_vp_h, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(pmov_vp_s, TCG_CALL_NO_RWG, void, ptr, ptr, i32) DEF_HELPER_FLAGS_3(pmov_vp_d, TCG_CALL_NO_RWG, void, ptr, ptr, i32) -DEF_HELPER_FLAGS_5(sve2p1_ld1bb_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_ld1hh_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_ld1hh_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_ld1ss_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_ld1ss_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_ld1dd_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_ld1dd_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) - -DEF_HELPER_FLAGS_5(sve2p1_st1bb_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_st1hh_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_st1hh_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_st1ss_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_st1ss_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_st1dd_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) -DEF_HELPER_FLAGS_5(sve2p1_st1dd_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i32) +DEF_HELPER_FLAGS_5(sve2p1_ld1bb_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1hh_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1hh_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1ss_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1ss_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1dd_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_ld1dd_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) + +DEF_HELPER_FLAGS_5(sve2p1_st1bb_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1hh_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1hh_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1ss_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1ss_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1dd_le_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) +DEF_HELPER_FLAGS_5(sve2p1_st1dd_be_c, TCG_CALL_NO_WG, void, env, ptr, tl, i32, i64) diff --git a/target/arm/tcg/helper.h b/target/arm/tcg/helper.h index 0a006d95142ab..4636d1bc039c5 100644 --- a/target/arm/tcg/helper.h +++ b/target/arm/tcg/helper.h @@ -444,101 +444,6 @@ DEF_HELPER_3(neon_acgt_f32, i32, i32, i32, fpst) DEF_HELPER_3(neon_acge_f64, i64, i64, i64, fpst) DEF_HELPER_3(neon_acgt_f64, i64, i64, i64, fpst) -/* iwmmxt_helper.c */ -DEF_HELPER_2(iwmmxt_maddsq, i64, i64, i64) -DEF_HELPER_2(iwmmxt_madduq, i64, i64, i64) -DEF_HELPER_2(iwmmxt_sadb, i64, i64, i64) -DEF_HELPER_2(iwmmxt_sadw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_mulslw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_mulshw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_mululw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_muluhw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_macsw, i64, i64, i64) -DEF_HELPER_2(iwmmxt_macuw, i64, i64, i64) -DEF_HELPER_1(iwmmxt_setpsr_nz, i32, i64) - -#define DEF_IWMMXT_HELPER_SIZE_ENV(name) \ -DEF_HELPER_3(iwmmxt_##name##b, i64, env, i64, i64) \ -DEF_HELPER_3(iwmmxt_##name##w, i64, env, i64, i64) \ -DEF_HELPER_3(iwmmxt_##name##l, i64, env, i64, i64) \ - -DEF_IWMMXT_HELPER_SIZE_ENV(unpackl) -DEF_IWMMXT_HELPER_SIZE_ENV(unpackh) - -DEF_HELPER_2(iwmmxt_unpacklub, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackluw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklul, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhub, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhuw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhul, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklsb, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklsw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpacklsl, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhsb, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhsw, i64, env, i64) -DEF_HELPER_2(iwmmxt_unpackhsl, i64, env, i64) - -DEF_IWMMXT_HELPER_SIZE_ENV(cmpeq) -DEF_IWMMXT_HELPER_SIZE_ENV(cmpgtu) -DEF_IWMMXT_HELPER_SIZE_ENV(cmpgts) - -DEF_IWMMXT_HELPER_SIZE_ENV(mins) -DEF_IWMMXT_HELPER_SIZE_ENV(minu) -DEF_IWMMXT_HELPER_SIZE_ENV(maxs) -DEF_IWMMXT_HELPER_SIZE_ENV(maxu) - -DEF_IWMMXT_HELPER_SIZE_ENV(subn) -DEF_IWMMXT_HELPER_SIZE_ENV(addn) -DEF_IWMMXT_HELPER_SIZE_ENV(subu) -DEF_IWMMXT_HELPER_SIZE_ENV(addu) -DEF_IWMMXT_HELPER_SIZE_ENV(subs) -DEF_IWMMXT_HELPER_SIZE_ENV(adds) - -DEF_HELPER_3(iwmmxt_avgb0, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_avgb1, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_avgw0, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_avgw1, i64, env, i64, i64) - -DEF_HELPER_3(iwmmxt_align, i64, i64, i64, i32) -DEF_HELPER_4(iwmmxt_insr, i64, i64, i32, i32, i32) - -DEF_HELPER_1(iwmmxt_bcstb, i64, i32) -DEF_HELPER_1(iwmmxt_bcstw, i64, i32) -DEF_HELPER_1(iwmmxt_bcstl, i64, i32) - -DEF_HELPER_1(iwmmxt_addcb, i64, i64) -DEF_HELPER_1(iwmmxt_addcw, i64, i64) -DEF_HELPER_1(iwmmxt_addcl, i64, i64) - -DEF_HELPER_1(iwmmxt_msbb, i32, i64) -DEF_HELPER_1(iwmmxt_msbw, i32, i64) -DEF_HELPER_1(iwmmxt_msbl, i32, i64) - -DEF_HELPER_3(iwmmxt_srlw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_srll, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_srlq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sllw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_slll, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sllq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sraw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sral, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_sraq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_rorw, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_rorl, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_rorq, i64, env, i64, i32) -DEF_HELPER_3(iwmmxt_shufh, i64, env, i64, i32) - -DEF_HELPER_3(iwmmxt_packuw, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packul, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packuq, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packsw, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packsl, i64, env, i64, i64) -DEF_HELPER_3(iwmmxt_packsq, i64, env, i64, i64) - -DEF_HELPER_3(iwmmxt_muladdsl, i64, i64, i32, i32) -DEF_HELPER_3(iwmmxt_muladdsw, i64, i64, i32, i32) -DEF_HELPER_3(iwmmxt_muladdswl, i64, i64, i32, i32) - DEF_HELPER_FLAGS_2(neon_unzip8, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_unzip16, TCG_CALL_NO_RWG, void, ptr, ptr) DEF_HELPER_FLAGS_2(neon_qunzip8, TCG_CALL_NO_RWG, void, ptr, ptr) @@ -728,16 +633,19 @@ DEF_HELPER_FLAGS_4(gvec_fclt0_h, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(gvec_fclt0_s, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_4(gvec_fclt0_d, TCG_CALL_NO_RWG, void, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fadd_b16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fadd_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fadd_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fadd_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_bfadd, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fsub_b16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fsub_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fsub_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fsub_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_bfsub, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_b16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fmul_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fmul_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) @@ -820,6 +728,8 @@ DEF_HELPER_FLAGS_5(gvec_ftsmul_s, TCG_CALL_NO_RWG, DEF_HELPER_FLAGS_5(gvec_ftsmul_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) +DEF_HELPER_FLAGS_5(gvec_fmul_idx_b16, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fmul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, fpst, i32) DEF_HELPER_FLAGS_5(gvec_fmul_idx_s, TCG_CALL_NO_RWG, diff --git a/target/arm/tcg/hflags.c b/target/arm/tcg/hflags.c index 59ab5263753ae..5c9b9bec3b2c6 100644 --- a/target/arm/tcg/hflags.c +++ b/target/arm/tcg/hflags.c @@ -258,6 +258,11 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, DP_TBFLAG_A64(flags, TBII, tbii); DP_TBFLAG_A64(flags, TBID, tbid); + /* E2H is used by both VHE and NV2. */ + if (hcr & HCR_E2H) { + DP_TBFLAG_A64(flags, E2H, 1); + } + if (cpu_isar_feature(aa64_sve, env_archcpu(env))) { int sve_el = sve_exception_el(env, el); @@ -390,9 +395,6 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, } if (hcr & HCR_NV2) { DP_TBFLAG_A64(flags, NV2, 1); - if (hcr & HCR_E2H) { - DP_TBFLAG_A64(flags, NV2_MEM_E20, 1); - } if (env->cp15.sctlr_el[2] & SCTLR_EE) { DP_TBFLAG_A64(flags, NV2_MEM_BE, 1); } @@ -449,6 +451,44 @@ static CPUARMTBFlags rebuild_hflags_a64(CPUARMState *env, int el, int fp_el, DP_TBFLAG_A64(flags, TCMA, aa64_va_parameter_tcma(tcr, mmu_idx)); } + if (cpu_isar_feature(aa64_gcs, env_archcpu(env))) { + /* C.f. GCSEnabled */ + if (env->cp15.gcscr_el[el] & GCSCR_PCRSEL) { + switch (el) { + default: + if (!el_is_in_host(env, el) + && !(arm_hcrx_el2_eff(env) & HCRX_GCSEN)) { + break; + } + /* fall through */ + case 2: + if (arm_feature(env, ARM_FEATURE_EL3) + && !(env->cp15.scr_el3 & SCR_GCSEN)) { + break; + } + /* fall through */ + case 3: + DP_TBFLAG_A64(flags, GCS_EN, 1); + break; + } + } + + /* C.f. GCSReturnValueCheckEnabled */ + if (env->cp15.gcscr_el[el] & GCSCR_RVCHKEN) { + DP_TBFLAG_A64(flags, GCS_RVCEN, 1); + } + + /* C.f. CheckGCSSTREnabled */ + if (!(env->cp15.gcscr_el[el] & GCSCR_STREN)) { + DP_TBFLAG_A64(flags, GCSSTR_EL, el ? el : 1); + } else if (el == 1 + && EX_TBFLAG_ANY(flags, FGT_ACTIVE) + && !FIELD_EX64(env->cp15.fgt_exec[FGTREG_HFGITR], + HFGITR_EL2, NGCSSTR_EL1)) { + DP_TBFLAG_A64(flags, GCSSTR_EL, 2); + } + } + if (env->vfp.fpcr & FPCR_AH) { DP_TBFLAG_A64(flags, AH, 1); } @@ -624,16 +664,9 @@ TCGTBCPUState arm_get_tb_cpu_state(CPUState *cs) DP_TBFLAG_M32(flags, MVE_NO_PRED, 1); } } else { - /* - * Note that XSCALE_CPAR shares bits with VECSTRIDE. - * Note that VECLEN+VECSTRIDE are RES0 for M-profile. - */ - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - DP_TBFLAG_A32(flags, XSCALE_CPAR, env->cp15.c15_cpar); - } else { - DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); - DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); - } + /* Note that VECLEN+VECSTRIDE are RES0 for M-profile. */ + DP_TBFLAG_A32(flags, VECLEN, env->vfp.vec_len); + DP_TBFLAG_A32(flags, VECSTRIDE, env->vfp.vec_stride); if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) { DP_TBFLAG_A32(flags, VFPEN, 1); } diff --git a/target/arm/tcg/iwmmxt_helper.c b/target/arm/tcg/iwmmxt_helper.c deleted file mode 100644 index ba054b6b4db23..0000000000000 --- a/target/arm/tcg/iwmmxt_helper.c +++ /dev/null @@ -1,672 +0,0 @@ -/* - * iwMMXt micro operations for XScale. - * - * Copyright (c) 2007 OpenedHand, Ltd. - * Written by Andrzej Zaborowski - * Copyright (c) 2008 CodeSourcery - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, see . - */ - -#include "qemu/osdep.h" - -#include "cpu.h" - -#define HELPER_H "tcg/helper.h" -#include "exec/helper-proto.h.inc" - -/* iwMMXt macros extracted from GNU gdb. */ - -/* Set the SIMD wCASF flags for 8, 16, 32 or 64-bit operations. */ -#define SIMD8_SET(v, n, b) ((v != 0) << ((((b) + 1) * 4) + (n))) -#define SIMD16_SET(v, n, h) ((v != 0) << ((((h) + 1) * 8) + (n))) -#define SIMD32_SET(v, n, w) ((v != 0) << ((((w) + 1) * 16) + (n))) -#define SIMD64_SET(v, n) ((v != 0) << (32 + (n))) -/* Flags to pass as "n" above. */ -#define SIMD_NBIT -1 -#define SIMD_ZBIT -2 -#define SIMD_CBIT -3 -#define SIMD_VBIT -4 -/* Various status bit macros. */ -#define NBIT8(x) ((x) & 0x80) -#define NBIT16(x) ((x) & 0x8000) -#define NBIT32(x) ((x) & 0x80000000) -#define NBIT64(x) ((x) & 0x8000000000000000ULL) -#define ZBIT8(x) (((x) & 0xff) == 0) -#define ZBIT16(x) (((x) & 0xffff) == 0) -#define ZBIT32(x) (((x) & 0xffffffff) == 0) -#define ZBIT64(x) (x == 0) -/* Sign extension macros. */ -#define EXTEND8H(a) ((uint16_t) (int8_t) (a)) -#define EXTEND8(a) ((uint32_t) (int8_t) (a)) -#define EXTEND16(a) ((uint32_t) (int16_t) (a)) -#define EXTEND16S(a) ((int32_t) (int16_t) (a)) -#define EXTEND32(a) ((uint64_t) (int32_t) (a)) - -uint64_t HELPER(iwmmxt_maddsq)(uint64_t a, uint64_t b) -{ - a = (( - EXTEND16S((a >> 0) & 0xffff) * EXTEND16S((b >> 0) & 0xffff) + - EXTEND16S((a >> 16) & 0xffff) * EXTEND16S((b >> 16) & 0xffff) - ) & 0xffffffff) | ((uint64_t) ( - EXTEND16S((a >> 32) & 0xffff) * EXTEND16S((b >> 32) & 0xffff) + - EXTEND16S((a >> 48) & 0xffff) * EXTEND16S((b >> 48) & 0xffff) - ) << 32); - return a; -} - -uint64_t HELPER(iwmmxt_madduq)(uint64_t a, uint64_t b) -{ - a = (( - ((a >> 0) & 0xffff) * ((b >> 0) & 0xffff) + - ((a >> 16) & 0xffff) * ((b >> 16) & 0xffff) - ) & 0xffffffff) | (( - ((a >> 32) & 0xffff) * ((b >> 32) & 0xffff) + - ((a >> 48) & 0xffff) * ((b >> 48) & 0xffff) - ) << 32); - return a; -} - -uint64_t HELPER(iwmmxt_sadb)(uint64_t a, uint64_t b) -{ -#define abs(x) (((x) >= 0) ? x : -x) -#define SADB(SHR) abs((int) ((a >> SHR) & 0xff) - (int) ((b >> SHR) & 0xff)) - return - SADB(0) + SADB(8) + SADB(16) + SADB(24) + - SADB(32) + SADB(40) + SADB(48) + SADB(56); -#undef SADB -} - -uint64_t HELPER(iwmmxt_sadw)(uint64_t a, uint64_t b) -{ -#define SADW(SHR) \ - abs((int) ((a >> SHR) & 0xffff) - (int) ((b >> SHR) & 0xffff)) - return SADW(0) + SADW(16) + SADW(32) + SADW(48); -#undef SADW -} - -uint64_t HELPER(iwmmxt_mulslw)(uint64_t a, uint64_t b) -{ -#define MULS(SHR) ((uint64_t) ((( \ - EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ - ) >> 0) & 0xffff) << SHR) - return MULS(0) | MULS(16) | MULS(32) | MULS(48); -#undef MULS -} - -uint64_t HELPER(iwmmxt_mulshw)(uint64_t a, uint64_t b) -{ -#define MULS(SHR) ((uint64_t) ((( \ - EXTEND16S((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff) \ - ) >> 16) & 0xffff) << SHR) - return MULS(0) | MULS(16) | MULS(32) | MULS(48); -#undef MULS -} - -uint64_t HELPER(iwmmxt_mululw)(uint64_t a, uint64_t b) -{ -#define MULU(SHR) ((uint64_t) ((( \ - ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ - ) >> 0) & 0xffff) << SHR) - return MULU(0) | MULU(16) | MULU(32) | MULU(48); -#undef MULU -} - -uint64_t HELPER(iwmmxt_muluhw)(uint64_t a, uint64_t b) -{ -#define MULU(SHR) ((uint64_t) ((( \ - ((a >> SHR) & 0xffff) * ((b >> SHR) & 0xffff) \ - ) >> 16) & 0xffff) << SHR) - return MULU(0) | MULU(16) | MULU(32) | MULU(48); -#undef MULU -} - -uint64_t HELPER(iwmmxt_macsw)(uint64_t a, uint64_t b) -{ -#define MACS(SHR) ( \ - EXTEND16((a >> SHR) & 0xffff) * EXTEND16S((b >> SHR) & 0xffff)) - return (int64_t) (MACS(0) + MACS(16) + MACS(32) + MACS(48)); -#undef MACS -} - -uint64_t HELPER(iwmmxt_macuw)(uint64_t a, uint64_t b) -{ -#define MACU(SHR) ( \ - (uint32_t) ((a >> SHR) & 0xffff) * \ - (uint32_t) ((b >> SHR) & 0xffff)) - return MACU(0) + MACU(16) + MACU(32) + MACU(48); -#undef MACU -} - -#define NZBIT8(x, i) \ - SIMD8_SET(NBIT8((x) & 0xff), SIMD_NBIT, i) | \ - SIMD8_SET(ZBIT8((x) & 0xff), SIMD_ZBIT, i) -#define NZBIT16(x, i) \ - SIMD16_SET(NBIT16((x) & 0xffff), SIMD_NBIT, i) | \ - SIMD16_SET(ZBIT16((x) & 0xffff), SIMD_ZBIT, i) -#define NZBIT32(x, i) \ - SIMD32_SET(NBIT32((x) & 0xffffffff), SIMD_NBIT, i) | \ - SIMD32_SET(ZBIT32((x) & 0xffffffff), SIMD_ZBIT, i) -#define NZBIT64(x) \ - SIMD64_SET(NBIT64(x), SIMD_NBIT) | \ - SIMD64_SET(ZBIT64(x), SIMD_ZBIT) -#define IWMMXT_OP_UNPACK(S, SH0, SH1, SH2, SH3) \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, b)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = \ - (((a >> SH0) & 0xff) << 0) | (((b >> SH0) & 0xff) << 8) | \ - (((a >> SH1) & 0xff) << 16) | (((b >> SH1) & 0xff) << 24) | \ - (((a >> SH2) & 0xff) << 32) | (((b >> SH2) & 0xff) << 40) | \ - (((a >> SH3) & 0xff) << 48) | (((b >> SH3) & 0xff) << 56); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, w)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = \ - (((a >> SH0) & 0xffff) << 0) | \ - (((b >> SH0) & 0xffff) << 16) | \ - (((a >> SH2) & 0xffff) << 32) | \ - (((b >> SH2) & 0xffff) << 48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT8(a >> 0, 0) | NZBIT8(a >> 16, 1) | \ - NZBIT8(a >> 32, 2) | NZBIT8(a >> 48, 3); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, l)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = \ - (((a >> SH0) & 0xffffffff) << 0) | \ - (((b >> SH0) & 0xffffffff) << 32); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ub)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = \ - (((x >> SH0) & 0xff) << 0) | \ - (((x >> SH1) & 0xff) << 16) | \ - (((x >> SH2) & 0xff) << 32) | \ - (((x >> SH3) & 0xff) << 48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, uw)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = \ - (((x >> SH0) & 0xffff) << 0) | \ - (((x >> SH2) & 0xffff) << 32); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, ul)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = (((x >> SH0) & 0xffffffff) << 0); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sb)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = \ - ((uint64_t) EXTEND8H((x >> SH0) & 0xff) << 0) | \ - ((uint64_t) EXTEND8H((x >> SH1) & 0xff) << 16) | \ - ((uint64_t) EXTEND8H((x >> SH2) & 0xff) << 32) | \ - ((uint64_t) EXTEND8H((x >> SH3) & 0xff) << 48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | \ - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sw)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = \ - ((uint64_t) EXTEND16((x >> SH0) & 0xffff) << 0) | \ - ((uint64_t) EXTEND16((x >> SH2) & 0xffff) << 32); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); \ - return x; \ -} \ -uint64_t HELPER(glue(iwmmxt_unpack, glue(S, sl)))(CPUARMState *env, \ - uint64_t x) \ -{ \ - x = EXTEND32((x >> SH0) & 0xffffffff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x >> 0); \ - return x; \ -} -IWMMXT_OP_UNPACK(l, 0, 8, 16, 24) -IWMMXT_OP_UNPACK(h, 32, 40, 48, 56) - -#define IWMMXT_OP_CMP(SUFF, Tb, Tw, Tl, O) \ -uint64_t HELPER(glue(iwmmxt_, glue(SUFF, b)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = \ - CMP(0, Tb, O, 0xff) | CMP(8, Tb, O, 0xff) | \ - CMP(16, Tb, O, 0xff) | CMP(24, Tb, O, 0xff) | \ - CMP(32, Tb, O, 0xff) | CMP(40, Tb, O, 0xff) | \ - CMP(48, Tb, O, 0xff) | CMP(56, Tb, O, 0xff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | \ - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | \ - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | \ - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_, glue(SUFF, w)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = CMP(0, Tw, O, 0xffff) | CMP(16, Tw, O, 0xffff) | \ - CMP(32, Tw, O, 0xffff) | CMP(48, Tw, O, 0xffff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | \ - NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); \ - return a; \ -} \ -uint64_t HELPER(glue(iwmmxt_, glue(SUFF, l)))(CPUARMState *env, \ - uint64_t a, uint64_t b) \ -{ \ - a = CMP(0, Tl, O, 0xffffffff) | \ - CMP(32, Tl, O, 0xffffffff); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); \ - return a; \ -} -#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ - (TYPE) ((b >> SHR) & MASK)) ? (uint64_t) MASK : 0) << SHR) -IWMMXT_OP_CMP(cmpeq, uint8_t, uint16_t, uint32_t, ==) -IWMMXT_OP_CMP(cmpgts, int8_t, int16_t, int32_t, >) -IWMMXT_OP_CMP(cmpgtu, uint8_t, uint16_t, uint32_t, >) -#undef CMP -#define CMP(SHR, TYPE, OPER, MASK) ((((TYPE) ((a >> SHR) & MASK) OPER \ - (TYPE) ((b >> SHR) & MASK)) ? a : b) & ((uint64_t) MASK << SHR)) -IWMMXT_OP_CMP(mins, int8_t, int16_t, int32_t, <) -IWMMXT_OP_CMP(minu, uint8_t, uint16_t, uint32_t, <) -IWMMXT_OP_CMP(maxs, int8_t, int16_t, int32_t, >) -IWMMXT_OP_CMP(maxu, uint8_t, uint16_t, uint32_t, >) -#undef CMP -#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ - OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) -IWMMXT_OP_CMP(subn, uint8_t, uint16_t, uint32_t, -) -IWMMXT_OP_CMP(addn, uint8_t, uint16_t, uint32_t, +) -#undef CMP -/* TODO Signed- and Unsigned-Saturation */ -#define CMP(SHR, TYPE, OPER, MASK) ((uint64_t) (((TYPE) ((a >> SHR) & MASK) \ - OPER (TYPE) ((b >> SHR) & MASK)) & MASK) << SHR) -IWMMXT_OP_CMP(subu, uint8_t, uint16_t, uint32_t, -) -IWMMXT_OP_CMP(addu, uint8_t, uint16_t, uint32_t, +) -IWMMXT_OP_CMP(subs, int8_t, int16_t, int32_t, -) -IWMMXT_OP_CMP(adds, int8_t, int16_t, int32_t, +) -#undef CMP -#undef IWMMXT_OP_CMP - -#define AVGB(SHR) ((( \ - ((a >> SHR) & 0xff) + ((b >> SHR) & 0xff) + round) >> 1) << SHR) -#define IWMMXT_OP_AVGB(r) \ -uint64_t HELPER(iwmmxt_avgb##r)(CPUARMState *env, uint64_t a, uint64_t b) \ -{ \ - const int round = r; \ - a = AVGB(0) | AVGB(8) | AVGB(16) | AVGB(24) | \ - AVGB(32) | AVGB(40) | AVGB(48) | AVGB(56); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - SIMD8_SET(ZBIT8((a >> 0) & 0xff), SIMD_ZBIT, 0) | \ - SIMD8_SET(ZBIT8((a >> 8) & 0xff), SIMD_ZBIT, 1) | \ - SIMD8_SET(ZBIT8((a >> 16) & 0xff), SIMD_ZBIT, 2) | \ - SIMD8_SET(ZBIT8((a >> 24) & 0xff), SIMD_ZBIT, 3) | \ - SIMD8_SET(ZBIT8((a >> 32) & 0xff), SIMD_ZBIT, 4) | \ - SIMD8_SET(ZBIT8((a >> 40) & 0xff), SIMD_ZBIT, 5) | \ - SIMD8_SET(ZBIT8((a >> 48) & 0xff), SIMD_ZBIT, 6) | \ - SIMD8_SET(ZBIT8((a >> 56) & 0xff), SIMD_ZBIT, 7); \ - return a; \ -} -IWMMXT_OP_AVGB(0) -IWMMXT_OP_AVGB(1) -#undef IWMMXT_OP_AVGB -#undef AVGB - -#define AVGW(SHR) ((( \ - ((a >> SHR) & 0xffff) + ((b >> SHR) & 0xffff) + round) >> 1) << SHR) -#define IWMMXT_OP_AVGW(r) \ -uint64_t HELPER(iwmmxt_avgw##r)(CPUARMState *env, uint64_t a, uint64_t b) \ -{ \ - const int round = r; \ - a = AVGW(0) | AVGW(16) | AVGW(32) | AVGW(48); \ - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = \ - SIMD16_SET(ZBIT16((a >> 0) & 0xffff), SIMD_ZBIT, 0) | \ - SIMD16_SET(ZBIT16((a >> 16) & 0xffff), SIMD_ZBIT, 1) | \ - SIMD16_SET(ZBIT16((a >> 32) & 0xffff), SIMD_ZBIT, 2) | \ - SIMD16_SET(ZBIT16((a >> 48) & 0xffff), SIMD_ZBIT, 3); \ - return a; \ -} -IWMMXT_OP_AVGW(0) -IWMMXT_OP_AVGW(1) -#undef IWMMXT_OP_AVGW -#undef AVGW - -uint64_t HELPER(iwmmxt_align)(uint64_t a, uint64_t b, uint32_t n) -{ - a >>= n << 3; - a |= b << (64 - (n << 3)); - return a; -} - -uint64_t HELPER(iwmmxt_insr)(uint64_t x, uint32_t a, uint32_t b, uint32_t n) -{ - x &= ~((uint64_t) b << n); - x |= (uint64_t) (a & b) << n; - return x; -} - -uint32_t HELPER(iwmmxt_setpsr_nz)(uint64_t x) -{ - return SIMD64_SET((x == 0), SIMD_ZBIT) | - SIMD64_SET((x & (1ULL << 63)), SIMD_NBIT); -} - -uint64_t HELPER(iwmmxt_bcstb)(uint32_t arg) -{ - arg &= 0xff; - return - ((uint64_t) arg << 0 ) | ((uint64_t) arg << 8 ) | - ((uint64_t) arg << 16) | ((uint64_t) arg << 24) | - ((uint64_t) arg << 32) | ((uint64_t) arg << 40) | - ((uint64_t) arg << 48) | ((uint64_t) arg << 56); -} - -uint64_t HELPER(iwmmxt_bcstw)(uint32_t arg) -{ - arg &= 0xffff; - return - ((uint64_t) arg << 0 ) | ((uint64_t) arg << 16) | - ((uint64_t) arg << 32) | ((uint64_t) arg << 48); -} - -uint64_t HELPER(iwmmxt_bcstl)(uint32_t arg) -{ - return arg | ((uint64_t) arg << 32); -} - -uint64_t HELPER(iwmmxt_addcb)(uint64_t x) -{ - return - ((x >> 0) & 0xff) + ((x >> 8) & 0xff) + - ((x >> 16) & 0xff) + ((x >> 24) & 0xff) + - ((x >> 32) & 0xff) + ((x >> 40) & 0xff) + - ((x >> 48) & 0xff) + ((x >> 56) & 0xff); -} - -uint64_t HELPER(iwmmxt_addcw)(uint64_t x) -{ - return - ((x >> 0) & 0xffff) + ((x >> 16) & 0xffff) + - ((x >> 32) & 0xffff) + ((x >> 48) & 0xffff); -} - -uint64_t HELPER(iwmmxt_addcl)(uint64_t x) -{ - return (x & 0xffffffff) + (x >> 32); -} - -uint32_t HELPER(iwmmxt_msbb)(uint64_t x) -{ - return - ((x >> 7) & 0x01) | ((x >> 14) & 0x02) | - ((x >> 21) & 0x04) | ((x >> 28) & 0x08) | - ((x >> 35) & 0x10) | ((x >> 42) & 0x20) | - ((x >> 49) & 0x40) | ((x >> 56) & 0x80); -} - -uint32_t HELPER(iwmmxt_msbw)(uint64_t x) -{ - return - ((x >> 15) & 0x01) | ((x >> 30) & 0x02) | - ((x >> 45) & 0x04) | ((x >> 52) & 0x08); -} - -uint32_t HELPER(iwmmxt_msbl)(uint64_t x) -{ - return ((x >> 31) & 0x01) | ((x >> 62) & 0x02); -} - -/* FIXME: Split wCASF setting into a separate op to avoid env use. */ -uint64_t HELPER(iwmmxt_srlw)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = (((x & (0xffffll << 0)) >> n) & (0xffffll << 0)) | - (((x & (0xffffll << 16)) >> n) & (0xffffll << 16)) | - (((x & (0xffffll << 32)) >> n) & (0xffffll << 32)) | - (((x & (0xffffll << 48)) >> n) & (0xffffll << 48)); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); - return x; -} - -uint64_t HELPER(iwmmxt_srll)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((x & (0xffffffffll << 0)) >> n) | - ((x >> n) & (0xffffffffll << 32)); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); - return x; -} - -uint64_t HELPER(iwmmxt_srlq)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x >>= n; - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); - return x; -} - -uint64_t HELPER(iwmmxt_sllw)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = (((x & (0xffffll << 0)) << n) & (0xffffll << 0)) | - (((x & (0xffffll << 16)) << n) & (0xffffll << 16)) | - (((x & (0xffffll << 32)) << n) & (0xffffll << 32)) | - (((x & (0xffffll << 48)) << n) & (0xffffll << 48)); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); - return x; -} - -uint64_t HELPER(iwmmxt_slll)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((x << n) & (0xffffffffll << 0)) | - ((x & (0xffffffffll << 32)) << n); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); - return x; -} - -uint64_t HELPER(iwmmxt_sllq)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x <<= n; - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); - return x; -} - -uint64_t HELPER(iwmmxt_sraw)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((uint64_t) ((EXTEND16(x >> 0) >> n) & 0xffff) << 0) | - ((uint64_t) ((EXTEND16(x >> 16) >> n) & 0xffff) << 16) | - ((uint64_t) ((EXTEND16(x >> 32) >> n) & 0xffff) << 32) | - ((uint64_t) ((EXTEND16(x >> 48) >> n) & 0xffff) << 48); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); - return x; -} - -uint64_t HELPER(iwmmxt_sral)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = (((EXTEND32(x >> 0) >> n) & 0xffffffff) << 0) | - (((EXTEND32(x >> 32) >> n) & 0xffffffff) << 32); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); - return x; -} - -uint64_t HELPER(iwmmxt_sraq)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = (int64_t) x >> n; - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); - return x; -} - -uint64_t HELPER(iwmmxt_rorw)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((((x & (0xffffll << 0)) >> n) | - ((x & (0xffffll << 0)) << (16 - n))) & (0xffffll << 0)) | - ((((x & (0xffffll << 16)) >> n) | - ((x & (0xffffll << 16)) << (16 - n))) & (0xffffll << 16)) | - ((((x & (0xffffll << 32)) >> n) | - ((x & (0xffffll << 32)) << (16 - n))) & (0xffffll << 32)) | - ((((x & (0xffffll << 48)) >> n) | - ((x & (0xffffll << 48)) << (16 - n))) & (0xffffll << 48)); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); - return x; -} - -uint64_t HELPER(iwmmxt_rorl)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ((x & (0xffffffffll << 0)) >> n) | - ((x >> n) & (0xffffffffll << 32)) | - ((x << (32 - n)) & (0xffffffffll << 0)) | - ((x & (0xffffffffll << 32)) << (32 - n)); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(x >> 0, 0) | NZBIT32(x >> 32, 1); - return x; -} - -uint64_t HELPER(iwmmxt_rorq)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = ror64(x, n); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = NZBIT64(x); - return x; -} - -uint64_t HELPER(iwmmxt_shufh)(CPUARMState *env, uint64_t x, uint32_t n) -{ - x = (((x >> ((n << 4) & 0x30)) & 0xffff) << 0) | - (((x >> ((n << 2) & 0x30)) & 0xffff) << 16) | - (((x >> ((n << 0) & 0x30)) & 0xffff) << 32) | - (((x >> ((n >> 2) & 0x30)) & 0xffff) << 48); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(x >> 0, 0) | NZBIT16(x >> 16, 1) | - NZBIT16(x >> 32, 2) | NZBIT16(x >> 48, 3); - return x; -} - -/* TODO: Unsigned-Saturation */ -uint64_t HELPER(iwmmxt_packuw)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | - (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | - (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | - (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); - return a; -} - -uint64_t HELPER(iwmmxt_packul)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | - (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | - NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); - return a; -} - -uint64_t HELPER(iwmmxt_packuq)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); - return a; -} - -/* TODO: Signed-Saturation */ -uint64_t HELPER(iwmmxt_packsw)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (((a >> 0) & 0xff) << 0) | (((a >> 16) & 0xff) << 8) | - (((a >> 32) & 0xff) << 16) | (((a >> 48) & 0xff) << 24) | - (((b >> 0) & 0xff) << 32) | (((b >> 16) & 0xff) << 40) | - (((b >> 32) & 0xff) << 48) | (((b >> 48) & 0xff) << 56); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT8(a >> 0, 0) | NZBIT8(a >> 8, 1) | - NZBIT8(a >> 16, 2) | NZBIT8(a >> 24, 3) | - NZBIT8(a >> 32, 4) | NZBIT8(a >> 40, 5) | - NZBIT8(a >> 48, 6) | NZBIT8(a >> 56, 7); - return a; -} - -uint64_t HELPER(iwmmxt_packsl)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (((a >> 0) & 0xffff) << 0) | (((a >> 32) & 0xffff) << 16) | - (((b >> 0) & 0xffff) << 32) | (((b >> 32) & 0xffff) << 48); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT16(a >> 0, 0) | NZBIT16(a >> 16, 1) | - NZBIT16(a >> 32, 2) | NZBIT16(a >> 48, 3); - return a; -} - -uint64_t HELPER(iwmmxt_packsq)(CPUARMState *env, uint64_t a, uint64_t b) -{ - a = (a & 0xffffffff) | ((b & 0xffffffff) << 32); - env->iwmmxt.cregs[ARM_IWMMXT_wCASF] = - NZBIT32(a >> 0, 0) | NZBIT32(a >> 32, 1); - return a; -} - -uint64_t HELPER(iwmmxt_muladdsl)(uint64_t c, uint32_t a, uint32_t b) -{ - return c + ((int32_t) EXTEND32(a) * (int32_t) EXTEND32(b)); -} - -uint64_t HELPER(iwmmxt_muladdsw)(uint64_t c, uint32_t a, uint32_t b) -{ - c += EXTEND32(EXTEND16S((a >> 0) & 0xffff) * - EXTEND16S((b >> 0) & 0xffff)); - c += EXTEND32(EXTEND16S((a >> 16) & 0xffff) * - EXTEND16S((b >> 16) & 0xffff)); - return c; -} - -uint64_t HELPER(iwmmxt_muladdswl)(uint64_t c, uint32_t a, uint32_t b) -{ - return c + (EXTEND32(EXTEND16S(a & 0xffff) * - EXTEND16S(b & 0xffff))); -} diff --git a/target/arm/tcg/m_helper.c b/target/arm/tcg/m_helper.c index 28307b56151b9..d856e3bc8e2ab 100644 --- a/target/arm/tcg/m_helper.c +++ b/target/arm/tcg/m_helper.c @@ -2829,8 +2829,8 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op) ARMMMUFaultInfo fi = {}; /* We can ignore the return value as prot is always set */ - pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, targetsec, - &res, &fi, &mregion); + pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, PAGE_READ, mmu_idx, + targetsec, &res, &fi, &mregion); if (mregion == -1) { mrvalid = false; mregion = 0; diff --git a/target/arm/tcg/meson.build b/target/arm/tcg/meson.build index 895facdc30b3d..1b115656c464f 100644 --- a/target/arm/tcg/meson.build +++ b/target/arm/tcg/meson.build @@ -66,7 +66,6 @@ arm_common_ss.add(files( arm_common_system_ss.add(files( 'cpregs-at.c', 'hflags.c', - 'iwmmxt_helper.c', 'neon_helper.c', 'tlb_helper.c', 'tlb-insns.c', @@ -74,7 +73,6 @@ arm_common_system_ss.add(files( )) arm_user_ss.add(files( 'hflags.c', - 'iwmmxt_helper.c', 'neon_helper.c', 'tlb_helper.c', 'vfp_helper.c', diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c index 0efc18a181ea4..bb48fe359b8c0 100644 --- a/target/arm/tcg/mte_helper.c +++ b/target/arm/tcg/mte_helper.c @@ -21,12 +21,13 @@ #include "qemu/log.h" #include "cpu.h" #include "internals.h" +#include "exec/target_page.h" #include "exec/page-protection.h" #ifdef CONFIG_USER_ONLY #include "user/cpu_loop.h" #include "user/page-protection.h" #else -#include "system/ram_addr.h" +#include "system/physmem.h" #endif #include "accel/tcg/cpu-ldst.h" #include "accel/tcg/probe.h" @@ -188,7 +189,7 @@ uint8_t *allocation_tag_mem_probe(CPUARMState *env, int ptr_mmu_idx, */ if (tag_access == MMU_DATA_STORE) { ram_addr_t tag_ra = memory_region_get_ram_addr(mr) + xlat; - cpu_physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION); + physical_memory_set_dirty_flag(tag_ra, DIRTY_MEMORY_MIGRATION); } return memory_region_get_ram_ptr(mr) + xlat; @@ -591,7 +592,7 @@ static void mte_async_check_fail(CPUARMState *env, uint64_t dirty_ptr, * which is rather sooner than "normal". But the alternative * is waiting until the next syscall. */ - qemu_cpu_kick(env_cpu(env)); + cpu_exit(env_cpu(env)); #endif } @@ -604,7 +605,7 @@ void mte_check_fail(CPUARMState *env, uint32_t desc, int el, reg_el, tcf; uint64_t sctlr; - reg_el = regime_el(env, arm_mmu_idx); + reg_el = regime_el(arm_mmu_idx); sctlr = env->cp15.sctlr_el[reg_el]; switch (arm_mmu_idx) { diff --git a/target/arm/tcg/op_helper.c b/target/arm/tcg/op_helper.c index 575e566280ba0..4fbd219555df1 100644 --- a/target/arm/tcg/op_helper.c +++ b/target/arm/tcg/op_helper.c @@ -46,7 +46,7 @@ int exception_target_el(CPUARMState *env) } void raise_exception(CPUARMState *env, uint32_t excp, - uint32_t syndrome, uint32_t target_el) + uint64_t syndrome, uint32_t target_el) { CPUState *cs = env_cpu(env); @@ -70,7 +70,7 @@ void raise_exception(CPUARMState *env, uint32_t excp, cpu_loop_exit(cs); } -void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, +void raise_exception_ra(CPUARMState *env, uint32_t excp, uint64_t syndrome, uint32_t target_el, uintptr_t ra) { CPUState *cs = env_cpu(env); @@ -768,12 +768,6 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key, assert(ri != NULL); - if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 - && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { - res = CP_ACCESS_UNDEFINED; - goto fail; - } - if (ri->accessfn) { res = ri->accessfn(env, ri, isread); } @@ -887,6 +881,13 @@ const void *HELPER(access_check_cp_reg)(CPUARMState *env, uint32_t key, } syndrome = syn_uncategorized(); break; + case CP_ACCESS_EXLOCK: + /* + * CP_ACCESS_EXLOCK is always directed to the current EL, + * which is going to be the same as the usual target EL. + */ + syndrome = syn_gcs_exlock(); + break; default: g_assert_not_reached(); } diff --git a/target/arm/tcg/sme_helper.c b/target/arm/tcg/sme_helper.c index bb8ed1ed0e2f3..075360d8b8abf 100644 --- a/target/arm/tcg/sme_helper.c +++ b/target/arm/tcg/sme_helper.c @@ -666,19 +666,16 @@ void sme_ld1(CPUARMState *env, void *za, uint64_t *vg, static inline QEMU_ALWAYS_INLINE void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg, - target_ulong addr, uint32_t desc, uintptr_t ra, + target_ulong addr, uint64_t desc, uintptr_t ra, const int esz, bool vertical, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn, ClearFn *clr_fn, CopyFn *cpy_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -691,28 +688,28 @@ void sme_ld1_mte(CPUARMState *env, void *za, uint64_t *vg, #define DO_LD(L, END, ESZ) \ void HELPER(sme_ld1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ clear_horizontal, copy_horizontal); \ } \ void HELPER(sme_ld1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_ld1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ clear_vertical_##L, copy_vertical_##L); \ } \ void HELPER(sme_ld1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ sve_ld1##L##L##END##_host, sve_ld1##L##L##END##_tlb, \ clear_horizontal, copy_horizontal); \ } \ void HELPER(sme_ld1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_ld1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ sme_ld1##L##END##_v_host, sme_ld1##L##END##_v_tlb, \ @@ -854,16 +851,13 @@ void sme_st1(CPUARMState *env, void *za, uint64_t *vg, static inline QEMU_ALWAYS_INLINE void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr, - uint32_t desc, uintptr_t ra, int esz, bool vertical, + uint64_t desc, uintptr_t ra, int esz, bool vertical, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -876,25 +870,25 @@ void sme_st1_mte(CPUARMState *env, void *za, uint64_t *vg, target_ulong addr, #define DO_ST(L, END, ESZ) \ void HELPER(sme_st1##L##END##_h)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, false, \ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ } \ void HELPER(sme_st1##L##END##_v)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_st1(env, za, vg, addr, desc, GETPC(), ESZ, 0, true, \ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ } \ void HELPER(sme_st1##L##END##_h_mte)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, false, \ sve_st1##L##L##END##_host, sve_st1##L##L##END##_tlb); \ } \ void HELPER(sme_st1##L##END##_v_mte)(CPUARMState *env, void *za, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sme_st1_mte(env, za, vg, addr, desc, GETPC(), ESZ, true, \ sme_st1##L##END##_v_host, sme_st1##L##END##_v_tlb); \ diff --git a/target/arm/tcg/sve.decode b/target/arm/tcg/sve.decode index 2efd5f57e457e..ab63cfaa0f0c0 100644 --- a/target/arm/tcg/sve.decode +++ b/target/arm/tcg/sve.decode @@ -1052,9 +1052,11 @@ FCMLA_zzxz 01100100 11 1 index:1 rm:4 0001 rot:2 rn:5 rd:5 \ ### SVE FP Multiply-Add Indexed Group # SVE floating-point multiply-add (indexed) +FMLA_zzxz 01100100 0. 1 ..... 000010 ..... ..... @rrxr_3 esz=0 FMLA_zzxz 01100100 0. 1 ..... 000000 ..... ..... @rrxr_3 esz=1 FMLA_zzxz 01100100 10 1 ..... 000000 ..... ..... @rrxr_2 esz=2 FMLA_zzxz 01100100 11 1 ..... 000000 ..... ..... @rrxr_1 esz=3 +FMLS_zzxz 01100100 0. 1 ..... 000011 ..... ..... @rrxr_3 esz=0 FMLS_zzxz 01100100 0. 1 ..... 000001 ..... ..... @rrxr_3 esz=1 FMLS_zzxz 01100100 10 1 ..... 000001 ..... ..... @rrxr_2 esz=2 FMLS_zzxz 01100100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 @@ -1062,6 +1064,7 @@ FMLS_zzxz 01100100 11 1 ..... 000001 ..... ..... @rrxr_1 esz=3 ### SVE FP Multiply Indexed Group # SVE floating-point multiply (indexed) +FMUL_zzx 01100100 0. 1 ..... 001010 ..... ..... @rrx_3 esz=0 FMUL_zzx 01100100 0. 1 ..... 001000 ..... ..... @rrx_3 esz=1 FMUL_zzx 01100100 10 1 ..... 001000 ..... ..... @rrx_2 esz=2 FMUL_zzx 01100100 11 1 ..... 001000 ..... ..... @rrx_1 esz=3 @@ -1340,9 +1343,9 @@ LD1_zprz 1100010 10 1. ..... 1.. ... ..... ..... \ LD1_zprz 1100010 11 1. ..... 11. ... ..... ..... \ @rprr_g_load_sc esz=3 msz=3 u=1 -# LD1Q -LD1_zprz 1100 0100 000 rm:5 101 pg:3 rn:5 rd:5 \ - &rprr_gather_load u=0 ff=0 xs=2 esz=4 msz=4 scale=0 +# LD1Q. Note that this is subtly different from LD1_zprz because +# it is vector + scalar, not scalar + vector. +LD1Q 1100 0100 000 rm:5 101 pg:3 rn:5 rd:5 # SVE 64-bit gather load (vector plus immediate) LD1_zpiz 1100010 .. 01 ..... 1.. ... ..... ..... \ @@ -1447,9 +1450,9 @@ ST1_zprz 1110010 .. 01 ..... 101 ... ..... ..... \ ST1_zprz 1110010 .. 00 ..... 101 ... ..... ..... \ @rprr_scatter_store xs=2 esz=3 scale=0 -# ST1Q -ST1_zprz 1110 0100 001 rm:5 001 pg:3 rn:5 rd:5 \ - &rprr_scatter_store xs=2 msz=4 esz=4 scale=0 +# ST1Q. Note that this is subtly different from ST1_zprz because +# it is vector + scalar, not scalar + vector. +ST1Q 1110 0100 001 rm:5 001 pg:3 rn:5 rd:5 # SVE 64-bit scatter store (vector plus immediate) ST1_zpiz 1110010 .. 10 ..... 101 ... ..... ..... \ diff --git a/target/arm/tcg/sve_helper.c b/target/arm/tcg/sve_helper.c index 43b872c7fd6f8..c442fcb540df2 100644 --- a/target/arm/tcg/sve_helper.c +++ b/target/arm/tcg/sve_helper.c @@ -4484,33 +4484,35 @@ static TYPE FUNC##_reduce(TYPE *data, float_status *status, uintptr_t n) \ } \ } \ uint64_t helper_sve_##NAME##v_##SUF(void *vn, void *vg, \ - float_status *s, uint32_t desc) \ + float_status *status, uint32_t desc) \ { \ uintptr_t i, oprsz = simd_oprsz(desc), maxsz = simd_data(desc); \ TYPE data[sizeof(ARMVectorReg) / sizeof(TYPE)]; \ + TYPE ident = IDENT; \ for (i = 0; i < oprsz; ) { \ uint16_t pg = *(uint16_t *)(vg + H1_2(i >> 3)); \ do { \ TYPE nn = *(TYPE *)(vn + H(i)); \ - *(TYPE *)((void *)data + i) = (pg & 1 ? nn : IDENT); \ + *(TYPE *)((void *)data + i) = (pg & 1 ? nn : ident); \ i += sizeof(TYPE), pg >>= sizeof(TYPE); \ } while (i & 15); \ } \ for (; i < maxsz; i += sizeof(TYPE)) { \ - *(TYPE *)((void *)data + i) = IDENT; \ + *(TYPE *)((void *)data + i) = ident; \ } \ - return FUNC##_reduce(data, s, maxsz / sizeof(TYPE)); \ + return FUNC##_reduce(data, status, maxsz / sizeof(TYPE)); \ } \ void helper_sve2p1_##NAME##qv_##SUF(void *vd, void *vn, void *vg, \ float_status *status, uint32_t desc) \ { \ unsigned oprsz = simd_oprsz(desc), segments = oprsz / 16; \ + TYPE ident = IDENT; \ for (unsigned e = 0; e < 16; e += sizeof(TYPE)) { \ TYPE data[ARM_MAX_VQ]; \ for (unsigned s = 0; s < segments; s++) { \ uint16_t pg = *(uint16_t *)(vg + H1_2(s * 2)); \ - TYPE nn = *(TYPE *)(vn + H(s * 16 + H(e))); \ - data[s] = (pg >> e) & 1 ? nn : IDENT; \ + TYPE nn = *(TYPE *)(vn + (s * 16 + H(e))); \ + data[s] = (pg >> e) & 1 ? nn : ident; \ } \ *(TYPE *)(vd + H(e)) = FUNC##_reduce(data, status, segments); \ } \ @@ -4521,14 +4523,17 @@ DO_REDUCE(fadd,h, float16, H1_2, float16_add, float16_zero) DO_REDUCE(fadd,s, float32, H1_4, float32_add, float32_zero) DO_REDUCE(fadd,d, float64, H1_8, float64_add, float64_zero) -/* Identity is floatN_default_nan, without the function call. */ -DO_REDUCE(fminnm,h, float16, H1_2, float16_minnum, 0x7E00) -DO_REDUCE(fminnm,s, float32, H1_4, float32_minnum, 0x7FC00000) -DO_REDUCE(fminnm,d, float64, H1_8, float64_minnum, 0x7FF8000000000000ULL) +/* + * We can't avoid the function call for the default NaN value, because + * it changes when FPCR.AH is set. + */ +DO_REDUCE(fminnm,h, float16, H1_2, float16_minnum, float16_default_nan(status)) +DO_REDUCE(fminnm,s, float32, H1_4, float32_minnum, float32_default_nan(status)) +DO_REDUCE(fminnm,d, float64, H1_8, float64_minnum, float64_default_nan(status)) -DO_REDUCE(fmaxnm,h, float16, H1_2, float16_maxnum, 0x7E00) -DO_REDUCE(fmaxnm,s, float32, H1_4, float32_maxnum, 0x7FC00000) -DO_REDUCE(fmaxnm,d, float64, H1_8, float64_maxnum, 0x7FF8000000000000ULL) +DO_REDUCE(fmaxnm,h, float16, H1_2, float16_maxnum, float16_default_nan(status)) +DO_REDUCE(fmaxnm,s, float32, H1_4, float32_maxnum, float32_default_nan(status)) +DO_REDUCE(fmaxnm,d, float64, H1_8, float64_maxnum, float64_default_nan(status)) DO_REDUCE(fmin,h, float16, H1_2, float16_min, float16_infinity) DO_REDUCE(fmin,s, float32, H1_4, float32_min, float32_infinity) @@ -4629,14 +4634,17 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, void *vg, \ } while (i != 0); \ } +DO_ZPZZ_FP(sve_fadd_b16, uint16_t, H1_2, bfloat16_add) DO_ZPZZ_FP(sve_fadd_h, uint16_t, H1_2, float16_add) DO_ZPZZ_FP(sve_fadd_s, uint32_t, H1_4, float32_add) DO_ZPZZ_FP(sve_fadd_d, uint64_t, H1_8, float64_add) +DO_ZPZZ_FP(sve_fsub_b16, uint16_t, H1_2, bfloat16_sub) DO_ZPZZ_FP(sve_fsub_h, uint16_t, H1_2, float16_sub) DO_ZPZZ_FP(sve_fsub_s, uint32_t, H1_4, float32_sub) DO_ZPZZ_FP(sve_fsub_d, uint64_t, H1_8, float64_sub) +DO_ZPZZ_FP(sve_fmul_b16, uint16_t, H1_2, bfloat16_mul) DO_ZPZZ_FP(sve_fmul_h, uint16_t, H1_2, float16_mul) DO_ZPZZ_FP(sve_fmul_s, uint32_t, H1_4, float32_mul) DO_ZPZZ_FP(sve_fmul_d, uint64_t, H1_8, float64_mul) @@ -4645,26 +4653,32 @@ DO_ZPZZ_FP(sve_fdiv_h, uint16_t, H1_2, float16_div) DO_ZPZZ_FP(sve_fdiv_s, uint32_t, H1_4, float32_div) DO_ZPZZ_FP(sve_fdiv_d, uint64_t, H1_8, float64_div) +DO_ZPZZ_FP(sve_fmin_b16, uint16_t, H1_2, bfloat16_min) DO_ZPZZ_FP(sve_fmin_h, uint16_t, H1_2, float16_min) DO_ZPZZ_FP(sve_fmin_s, uint32_t, H1_4, float32_min) DO_ZPZZ_FP(sve_fmin_d, uint64_t, H1_8, float64_min) +DO_ZPZZ_FP(sve_fmax_b16, uint16_t, H1_2, bfloat16_max) DO_ZPZZ_FP(sve_fmax_h, uint16_t, H1_2, float16_max) DO_ZPZZ_FP(sve_fmax_s, uint32_t, H1_4, float32_max) DO_ZPZZ_FP(sve_fmax_d, uint64_t, H1_8, float64_max) +DO_ZPZZ_FP(sve_ah_fmin_b16, uint16_t, H1_2, helper_sme2_ah_fmin_b16) DO_ZPZZ_FP(sve_ah_fmin_h, uint16_t, H1_2, helper_vfp_ah_minh) DO_ZPZZ_FP(sve_ah_fmin_s, uint32_t, H1_4, helper_vfp_ah_mins) DO_ZPZZ_FP(sve_ah_fmin_d, uint64_t, H1_8, helper_vfp_ah_mind) +DO_ZPZZ_FP(sve_ah_fmax_b16, uint16_t, H1_2, helper_sme2_ah_fmax_b16) DO_ZPZZ_FP(sve_ah_fmax_h, uint16_t, H1_2, helper_vfp_ah_maxh) DO_ZPZZ_FP(sve_ah_fmax_s, uint32_t, H1_4, helper_vfp_ah_maxs) DO_ZPZZ_FP(sve_ah_fmax_d, uint64_t, H1_8, helper_vfp_ah_maxd) +DO_ZPZZ_FP(sve_fminnum_b16, uint16_t, H1_2, bfloat16_minnum) DO_ZPZZ_FP(sve_fminnum_h, uint16_t, H1_2, float16_minnum) DO_ZPZZ_FP(sve_fminnum_s, uint32_t, H1_4, float32_minnum) DO_ZPZZ_FP(sve_fminnum_d, uint64_t, H1_8, float64_minnum) +DO_ZPZZ_FP(sve_fmaxnum_b16, uint16_t, H1_2, bfloat16_maxnum) DO_ZPZZ_FP(sve_fmaxnum_h, uint16_t, H1_2, float16_maxnum) DO_ZPZZ_FP(sve_fmaxnum_s, uint32_t, H1_4, float32_maxnum) DO_ZPZZ_FP(sve_fmaxnum_d, uint64_t, H1_8, float64_maxnum) @@ -5090,6 +5104,75 @@ DO_ZPZ_FP(flogb_d, float64, H1_8, do_float64_logb_as_int) #undef DO_ZPZ_FP +static void do_fmla_zpzzz_b16(void *vd, void *vn, void *vm, void *va, void *vg, + float_status *status, uint32_t desc, + uint16_t neg1, uint16_t neg3, int flags) +{ + intptr_t i = simd_oprsz(desc); + uint64_t *g = vg; + + do { + uint64_t pg = g[(i - 1) >> 6]; + do { + i -= 2; + if (likely((pg >> (i & 63)) & 1)) { + float16 e1, e2, e3, r; + + e1 = *(uint16_t *)(vn + H1_2(i)) ^ neg1; + e2 = *(uint16_t *)(vm + H1_2(i)); + e3 = *(uint16_t *)(va + H1_2(i)) ^ neg3; + r = bfloat16_muladd(e1, e2, e3, flags, status); + *(uint16_t *)(vd + H1_2(i)) = r; + } + } while (i & 63); + } while (i != 0); +} + +void HELPER(sve_fmla_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0, 0); +} + +void HELPER(sve_fmls_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0x8000, 0, 0); +} + +void HELPER(sve_fnmla_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0x8000, 0x8000, 0); +} + +void HELPER(sve_fnmls_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0x8000, 0); +} + +void HELPER(sve_ah_fmls_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product); +} + +void HELPER(sve_ah_fnmla_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_product | float_muladd_negate_c); +} + +void HELPER(sve_ah_fnmls_zpzzz_b16)(void *vd, void *vn, void *vm, void *va, + void *vg, float_status *status, uint32_t desc) +{ + do_fmla_zpzzz_b16(vd, vn, vm, va, vg, status, desc, 0, 0, + float_muladd_negate_c); +} + static void do_fmla_zpzzz_h(void *vd, void *vn, void *vm, void *va, void *vg, float_status *status, uint32_t desc, uint16_t neg1, uint16_t neg3, int flags) @@ -6279,17 +6362,14 @@ void sve_ldN_r(CPUARMState *env, uint64_t *vg, const target_ulong addr, static inline QEMU_ALWAYS_INLINE void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, - uint32_t desc, const uintptr_t ra, + uint64_t desc, const uintptr_t ra, const int esz, const int msz, const int N, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -6301,13 +6381,13 @@ void sve_ldN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, #define DO_LD1_1(NAME, ESZ) \ void HELPER(sve_##NAME##_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, 0, \ sve_##NAME##_host, sve_##NAME##_tlb); \ } \ void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, 1, \ sve_##NAME##_host, sve_##NAME##_tlb); \ @@ -6315,25 +6395,25 @@ void HELPER(sve_##NAME##_r_mte)(CPUARMState *env, void *vg, \ #define DO_LD1_2(NAME, ESZ, MSZ) \ void HELPER(sve_##NAME##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ } \ void HELPER(sve_##NAME##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, 0, \ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ } \ void HELPER(sve_##NAME##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ } \ void HELPER(sve_##NAME##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, 1, \ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ @@ -6359,21 +6439,21 @@ DO_LD1_2(ld1sds, MO_64, MO_32) DO_LD1_2(ld1dd, MO_64, MO_64) -DO_LD1_2(ld1squ, MO_32, MO_128) -DO_LD1_2(ld1dqu, MO_64, MO_128) +DO_LD1_2(ld1squ, MO_128, MO_32) +DO_LD1_2(ld1dqu, MO_128, MO_64) #undef DO_LD1_1 #undef DO_LD1_2 #define DO_LDN_1(N) \ void HELPER(sve_ld##N##bb_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, 0, \ sve_ld1bb_host, sve_ld1bb_tlb); \ } \ void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), MO_8, MO_8, N, \ sve_ld1bb_host, sve_ld1bb_tlb); \ @@ -6381,25 +6461,25 @@ void HELPER(sve_ld##N##bb_r_mte)(CPUARMState *env, void *vg, \ #define DO_LDN_2(N, SUFF, ESZ) \ void HELPER(sve_ld##N##SUFF##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \ } \ void HELPER(sve_ld##N##SUFF##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, 0, \ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \ } \ void HELPER(sve_ld##N##SUFF##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \ sve_ld1##SUFF##_le_host, sve_ld1##SUFF##_le_tlb); \ } \ void HELPER(sve_ld##N##SUFF##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldN_r_mte(env, vg, addr, desc, GETPC(), ESZ, ESZ, N, \ sve_ld1##SUFF##_be_host, sve_ld1##SUFF##_be_tlb); \ @@ -6644,17 +6724,14 @@ void sve_ldnfff1_r(CPUARMState *env, void *vg, const target_ulong addr, static inline QEMU_ALWAYS_INLINE void sve_ldnfff1_r_mte(CPUARMState *env, void *vg, target_ulong addr, - uint32_t desc, const uintptr_t retaddr, + uint64_t desc, const uintptr_t retaddr, const int esz, const int msz, const SVEContFault fault, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -6667,25 +6744,25 @@ void sve_ldnfff1_r_mte(CPUARMState *env, void *vg, target_ulong addr, #define DO_LDFF1_LDNF1_1(PART, ESZ) \ void HELPER(sve_ldff1##PART##_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MO_8, FAULT_FIRST, \ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ } \ void HELPER(sve_ldnf1##PART##_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MO_8, FAULT_NO, \ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ } \ void HELPER(sve_ldff1##PART##_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, FAULT_FIRST, \ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ } \ void HELPER(sve_ldnf1##PART##_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, FAULT_NO, \ sve_ld1##PART##_host, sve_ld1##PART##_tlb); \ @@ -6693,49 +6770,49 @@ void HELPER(sve_ldnf1##PART##_r_mte)(CPUARMState *env, void *vg, \ #define DO_LDFF1_LDNF1_2(PART, ESZ, MSZ) \ void HELPER(sve_ldff1##PART##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_FIRST, \ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ } \ void HELPER(sve_ldnf1##PART##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_NO, \ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ } \ void HELPER(sve_ldff1##PART##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_FIRST, \ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ } \ void HELPER(sve_ldnf1##PART##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r(env, vg, addr, desc, GETPC(), 0, ESZ, MSZ, FAULT_NO, \ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ } \ void HELPER(sve_ldff1##PART##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_FIRST, \ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ } \ void HELPER(sve_ldnf1##PART##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_NO, \ sve_ld1##PART##_le_host, sve_ld1##PART##_le_tlb); \ } \ void HELPER(sve_ldff1##PART##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_FIRST, \ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ } \ void HELPER(sve_ldnf1##PART##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_ldnfff1_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, FAULT_NO, \ sve_ld1##PART##_be_host, sve_ld1##PART##_be_tlb); \ @@ -6902,17 +6979,14 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, static inline QEMU_ALWAYS_INLINE void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, - uint32_t desc, const uintptr_t ra, + uint64_t desc, const uintptr_t ra, const int esz, const int msz, const int N, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; int bit55 = extract64(addr, 55, 1); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Perform gross MTE suppression early. */ if (!tbi_check(mtedesc, bit55) || tcma_check(mtedesc, bit55, allocation_tag_from_addr(addr))) { @@ -6924,13 +6998,13 @@ void sve_stN_r_mte(CPUARMState *env, uint64_t *vg, target_ulong addr, #define DO_STN_1(N, NAME, ESZ) \ void HELPER(sve_st##N##NAME##_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, 0, \ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \ } \ void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MO_8, N, \ sve_st1##NAME##_host, sve_st1##NAME##_tlb); \ @@ -6938,25 +7012,25 @@ void HELPER(sve_st##N##NAME##_r_mte)(CPUARMState *env, void *vg, \ #define DO_STN_2(N, NAME, ESZ, MSZ) \ void HELPER(sve_st##N##NAME##_le_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \ } \ void HELPER(sve_st##N##NAME##_be_r)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, 0, \ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \ } \ void HELPER(sve_st##N##NAME##_le_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, \ sve_st1##NAME##_le_host, sve_st1##NAME##_le_tlb); \ } \ void HELPER(sve_st##N##NAME##_be_r_mte)(CPUARMState *env, void *vg, \ - target_ulong addr, uint32_t desc) \ + target_ulong addr, uint64_t desc) \ { \ sve_stN_r_mte(env, vg, addr, desc, GETPC(), ESZ, MSZ, N, \ sve_st1##NAME##_be_host, sve_st1##NAME##_be_tlb); \ @@ -7100,14 +7174,12 @@ void sve_ld1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, static inline QEMU_ALWAYS_INLINE void sve_ld1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, - target_ulong base, uint32_t desc, uintptr_t retaddr, + target_ulong base, uint64_t desc, uintptr_t retaddr, int esize, int msize, zreg_off_fn *off_fn, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; /* * ??? TODO: For the 32-bit offset extractions, base + ofs cannot @@ -7121,13 +7193,13 @@ void sve_ld1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, #define DO_LD1_ZPZ_S(MEM, OFS, MSZ) \ void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } \ void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ @@ -7135,18 +7207,32 @@ void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ #define DO_LD1_ZPZ_D(MEM, OFS, MSZ) \ void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } \ void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } +#define DO_LD1_ZPZ_Q(MEM, OFS, MSZ) \ +void HELPER(sve_ld##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint64_t desc) \ +{ \ + sve_ld1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 16, 1 << MSZ, \ + off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +} \ +void HELPER(sve_ld##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint64_t desc) \ +{ \ + sve_ld1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 16, 1 << MSZ, \ + off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ +} + DO_LD1_ZPZ_S(bsu, zsu, MO_8) DO_LD1_ZPZ_S(bsu, zss, MO_8) DO_LD1_ZPZ_D(bdu, zsu, MO_8) @@ -7211,8 +7297,8 @@ DO_LD1_ZPZ_D(dd_be, zsu, MO_64) DO_LD1_ZPZ_D(dd_be, zss, MO_64) DO_LD1_ZPZ_D(dd_be, zd, MO_64) -DO_LD1_ZPZ_D(qq_le, zd, MO_128) -DO_LD1_ZPZ_D(qq_be, zd, MO_128) +DO_LD1_ZPZ_Q(qq_le, zd, MO_128) +DO_LD1_ZPZ_Q(qq_be, zd, MO_128) #undef DO_LD1_ZPZ_S #undef DO_LD1_ZPZ_D @@ -7312,15 +7398,13 @@ void sve_ldff1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, static inline QEMU_ALWAYS_INLINE void sve_ldff1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, - target_ulong base, uint32_t desc, uintptr_t retaddr, + target_ulong base, uint64_t desc, uintptr_t retaddr, const int esz, const int msz, zreg_off_fn *off_fn, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; /* * ??? TODO: For the 32-bit offset extractions, base + ofs cannot @@ -7335,14 +7419,14 @@ void sve_ldff1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, #define DO_LDFF1_ZPZ_S(MEM, OFS, MSZ) \ void HELPER(sve_ldff##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_32, MSZ, \ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } \ void HELPER(sve_ldff##MEM##_##OFS##_mte) \ (CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_32, MSZ, \ off_##OFS##_s, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ @@ -7351,14 +7435,14 @@ void HELPER(sve_ldff##MEM##_##OFS##_mte) \ #define DO_LDFF1_ZPZ_D(MEM, OFS, MSZ) \ void HELPER(sve_ldff##MEM##_##OFS) \ (CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ldff1_z(env, vd, vg, vm, base, desc, GETPC(), 0, MO_64, MSZ, \ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ } \ void HELPER(sve_ldff##MEM##_##OFS##_mte) \ (CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_ldff1_z_mte(env, vd, vg, vm, base, desc, GETPC(), MO_64, MSZ, \ off_##OFS##_d, sve_ld1##MEM##_host, sve_ld1##MEM##_tlb); \ @@ -7517,14 +7601,12 @@ void sve_st1_z(CPUARMState *env, void *vd, uint64_t *vg, void *vm, static inline QEMU_ALWAYS_INLINE void sve_st1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, - target_ulong base, uint32_t desc, uintptr_t retaddr, + target_ulong base, uint64_t desc, uintptr_t retaddr, int esize, int msize, zreg_off_fn *off_fn, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); - /* Remove mtedesc from the normal sve descriptor. */ - desc = extract32(desc, 0, SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); + uint32_t mtedesc = desc >> 32; /* * ??? TODO: For the 32-bit offset extractions, base + ofs cannot @@ -7538,13 +7620,13 @@ void sve_st1_z_mte(CPUARMState *env, void *vd, uint64_t *vg, void *vm, #define DO_ST1_ZPZ_S(MEM, OFS, MSZ) \ void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 4, 1 << MSZ, \ off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ } \ void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 4, 1 << MSZ, \ off_##OFS##_s, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ @@ -7552,18 +7634,32 @@ void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ #define DO_ST1_ZPZ_D(MEM, OFS, MSZ) \ void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 8, 1 << MSZ, \ off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ } \ void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ - void *vm, target_ulong base, uint32_t desc) \ + void *vm, target_ulong base, uint64_t desc) \ { \ sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 8, 1 << MSZ, \ off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ } +#define DO_ST1_ZPZ_Q(MEM, OFS, MSZ) \ +void HELPER(sve_st##MEM##_##OFS)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint64_t desc) \ +{ \ + sve_st1_z(env, vd, vg, vm, base, desc, GETPC(), 0, 16, 1 << MSZ, \ + off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ +} \ +void HELPER(sve_st##MEM##_##OFS##_mte)(CPUARMState *env, void *vd, void *vg, \ + void *vm, target_ulong base, uint64_t desc) \ +{ \ + sve_st1_z_mte(env, vd, vg, vm, base, desc, GETPC(), 16, 1 << MSZ, \ + off_##OFS##_d, sve_st1##MEM##_host, sve_st1##MEM##_tlb); \ +} + DO_ST1_ZPZ_S(bs, zsu, MO_8) DO_ST1_ZPZ_S(hs_le, zsu, MO_16) DO_ST1_ZPZ_S(hs_be, zsu, MO_16) @@ -7600,8 +7696,8 @@ DO_ST1_ZPZ_D(sd_be, zd, MO_32) DO_ST1_ZPZ_D(dd_le, zd, MO_64) DO_ST1_ZPZ_D(dd_be, zd, MO_64) -DO_ST1_ZPZ_D(qq_le, zd, MO_128) -DO_ST1_ZPZ_D(qq_be, zd, MO_128) +DO_ST1_ZPZ_Q(qq_le, zd, MO_128) +DO_ST1_ZPZ_Q(qq_be, zd, MO_128) #undef DO_ST1_ZPZ_S #undef DO_ST1_ZPZ_D @@ -7770,14 +7866,15 @@ static void sve2p1_cont_ldst_mte_check(SVEContLdSt *info, CPUARMState *env, static inline QEMU_ALWAYS_INLINE void sve2p1_ld1_c(CPUARMState *env, ARMVectorReg *zd, const vaddr addr, - uint32_t png, uint32_t desc, + uint32_t png, uint64_t desc64, const uintptr_t ra, const MemOp esz, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { + uint32_t mtedesc = desc64 >> 32; + uint32_t desc = desc64; const unsigned N = (desc >> SIMD_DATA_SHIFT) & 1 ? 4 : 2; const unsigned rstride = 1 << ((desc >> (SIMD_DATA_SHIFT + 1)) % 4); - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); const intptr_t reg_max = simd_oprsz(desc); const unsigned esize = 1 << esz; intptr_t count_off, count_last; @@ -7912,7 +8009,7 @@ void sve2p1_ld1_c(CPUARMState *env, ARMVectorReg *zd, const vaddr addr, } void HELPER(sve2p1_ld1bb_c)(CPUARMState *env, void *vd, target_ulong addr, - uint32_t png, uint32_t desc) + uint32_t png, uint64_t desc) { sve2p1_ld1_c(env, vd, addr, png, desc, GETPC(), MO_8, sve_ld1bb_host, sve_ld1bb_tlb); @@ -7921,14 +8018,14 @@ void HELPER(sve2p1_ld1bb_c)(CPUARMState *env, void *vd, target_ulong addr, #define DO_LD1_2(NAME, ESZ) \ void HELPER(sve2p1_##NAME##_le_c)(CPUARMState *env, void *vd, \ target_ulong addr, uint32_t png, \ - uint32_t desc) \ + uint64_t desc) \ { \ sve2p1_ld1_c(env, vd, addr, png, desc, GETPC(), ESZ, \ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ } \ void HELPER(sve2p1_##NAME##_be_c)(CPUARMState *env, void *vd, \ target_ulong addr, uint32_t png, \ - uint32_t desc) \ + uint64_t desc) \ { \ sve2p1_ld1_c(env, vd, addr, png, desc, GETPC(), ESZ, \ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ @@ -7942,14 +8039,15 @@ DO_LD1_2(ld1dd, MO_64) static inline QEMU_ALWAYS_INLINE void sve2p1_st1_c(CPUARMState *env, ARMVectorReg *zd, const vaddr addr, - uint32_t png, uint32_t desc, + uint32_t png, uint64_t desc64, const uintptr_t ra, const int esz, sve_ldst1_host_fn *host_fn, sve_ldst1_tlb_fn *tlb_fn) { + uint32_t mtedesc = desc64 >> 32; + uint32_t desc = desc64; const unsigned N = (desc >> SIMD_DATA_SHIFT) & 1 ? 4 : 2; const unsigned rstride = 1 << ((desc >> (SIMD_DATA_SHIFT + 1)) % 4); - uint32_t mtedesc = desc >> (SIMD_DATA_SHIFT + SVE_MTEDESC_SHIFT); const intptr_t reg_max = simd_oprsz(desc); const unsigned esize = 1 << esz; intptr_t count_off, count_last; @@ -8071,7 +8169,7 @@ void sve2p1_st1_c(CPUARMState *env, ARMVectorReg *zd, const vaddr addr, } void HELPER(sve2p1_st1bb_c)(CPUARMState *env, void *vd, target_ulong addr, - uint32_t png, uint32_t desc) + uint32_t png, uint64_t desc) { sve2p1_st1_c(env, vd, addr, png, desc, GETPC(), MO_8, sve_st1bb_host, sve_st1bb_tlb); @@ -8080,14 +8178,14 @@ void HELPER(sve2p1_st1bb_c)(CPUARMState *env, void *vd, target_ulong addr, #define DO_ST1_2(NAME, ESZ) \ void HELPER(sve2p1_##NAME##_le_c)(CPUARMState *env, void *vd, \ target_ulong addr, uint32_t png, \ - uint32_t desc) \ + uint64_t desc) \ { \ sve2p1_st1_c(env, vd, addr, png, desc, GETPC(), ESZ, \ sve_##NAME##_le_host, sve_##NAME##_le_tlb); \ } \ void HELPER(sve2p1_##NAME##_be_c)(CPUARMState *env, void *vd, \ target_ulong addr, uint32_t png, \ - uint32_t desc) \ + uint64_t desc) \ { \ sve2p1_st1_c(env, vd, addr, png, desc, GETPC(), ESZ, \ sve_##NAME##_be_host, sve_##NAME##_be_tlb); \ diff --git a/target/arm/tcg/tlb-insns.c b/target/arm/tcg/tlb-insns.c index 95c26c6d46384..1a0a332583641 100644 --- a/target/arm/tcg/tlb-insns.c +++ b/target/arm/tcg/tlb-insns.c @@ -149,7 +149,8 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = env_cpu(env); uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E2); + tlb_flush_page_by_mmuidx(cs, pageaddr, + ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS); } static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -159,7 +160,8 @@ static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_E2); + ARMMMUIdxBit_E2 | + ARMMMUIdxBit_E2_GCS); } static void tlbiipas2_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -202,7 +204,7 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); - tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2); + tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS); } static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -210,7 +212,8 @@ static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); - tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2); + tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E2 | + ARMMMUIdxBit_E2_GCS); } /* @@ -228,12 +231,16 @@ static int vae1_tlbmask(CPUARMState *env) if ((hcr & (HCR_E2H | HCR_TGE)) == (HCR_E2H | HCR_TGE)) { mask = ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E20_0; + ARMMMUIdxBit_E20_2_GCS | + ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E20_0_GCS; } else { /* This is AArch64 only, so we don't need to touch the EL30_x TLBs */ mask = ARMMMUIdxBit_E10_1 | ARMMMUIdxBit_E10_1_PAN | - ARMMMUIdxBit_E10_0; + ARMMMUIdxBit_E10_1_GCS | + ARMMMUIdxBit_E10_0 | + ARMMMUIdxBit_E10_0_GCS; } return mask; } @@ -246,13 +253,20 @@ static int vae2_tlbmask(CPUARMState *env) if (hcr & HCR_E2H) { mask = ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E20_0; + ARMMMUIdxBit_E20_2_GCS | + ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E20_0_GCS; } else { - mask = ARMMMUIdxBit_E2; + mask = ARMMMUIdxBit_E2 | ARMMMUIdxBit_E2_GCS; } return mask; } +static int vae3_tlbmask(void) +{ + return ARMMMUIdxBit_E3 | ARMMMUIdxBit_E3_GCS; +} + /* Return 56 if TBI is enabled, 64 otherwise. */ static int tlbbits_for_regime(CPUARMState *env, ARMMMUIdx mmu_idx, uint64_t addr) @@ -325,9 +339,12 @@ static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, static int e2_tlbmask(CPUARMState *env) { return (ARMMMUIdxBit_E20_0 | + ARMMMUIdxBit_E20_0_GCS | ARMMMUIdxBit_E20_2 | ARMMMUIdxBit_E20_2_PAN | - ARMMMUIdxBit_E2); + ARMMMUIdxBit_E20_2_GCS | + ARMMMUIdxBit_E2 | + ARMMMUIdxBit_E2_GCS); } static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -354,7 +371,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, ARMCPU *cpu = env_archcpu(env); CPUState *cs = CPU(cpu); - tlb_flush_by_mmuidx(cs, ARMMMUIdxBit_E3); + tlb_flush_by_mmuidx(cs, vae3_tlbmask()); } static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -380,7 +397,7 @@ static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = env_cpu(env); - tlb_flush_by_mmuidx_all_cpus_synced(cs, ARMMMUIdxBit_E3); + tlb_flush_by_mmuidx_all_cpus_synced(cs, vae3_tlbmask()); } static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -411,7 +428,7 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = CPU(cpu); uint64_t pageaddr = sextract64(value << 12, 0, 56); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdxBit_E3); + tlb_flush_page_by_mmuidx(cs, pageaddr, vae3_tlbmask()); } static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -465,7 +482,7 @@ static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, int bits = tlbbits_for_regime(env, ARMMMUIdx_E3, pageaddr); tlb_flush_page_bits_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_E3, bits); + vae3_tlbmask(), bits); } static int ipas2e1_tlbmask(CPUARMState *env, int64_t value) @@ -963,7 +980,7 @@ static void tlbi_aa64_rvae3_write(CPUARMState *env, * flush-last-level-only. */ - do_rvae_write(env, value, ARMMMUIdxBit_E3, tlb_force_broadcast(env)); + do_rvae_write(env, value, vae3_tlbmask(), tlb_force_broadcast(env)); } static void tlbi_aa64_rvae3is_write(CPUARMState *env, @@ -977,7 +994,7 @@ static void tlbi_aa64_rvae3is_write(CPUARMState *env, * flush-last-level-only or inner/outer specific flushes. */ - do_rvae_write(env, value, ARMMMUIdxBit_E3, true); + do_rvae_write(env, value, vae3_tlbmask(), true); } static void tlbi_aa64_ripas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, diff --git a/target/arm/tcg/tlb_helper.c b/target/arm/tcg/tlb_helper.c index 23c72a99f5c53..f1983a5732e65 100644 --- a/target/arm/tcg/tlb_helper.c +++ b/target/arm/tcg/tlb_helper.c @@ -24,13 +24,13 @@ bool arm_s1_regime_using_lpae_format(CPUARMState *env, ARMMMUIdx mmu_idx) return regime_using_lpae_format(env, mmu_idx); } -static inline uint32_t merge_syn_data_abort(uint32_t template_syn, +static inline uint64_t merge_syn_data_abort(uint32_t template_syn, ARMMMUFaultInfo *fi, unsigned int target_el, bool same_el, bool is_write, - int fsc) + int fsc, bool gcs) { - uint32_t syn; + uint64_t syn; /* * ISV is only set for stage-2 data aborts routed to EL2 and @@ -75,6 +75,11 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn, /* Merge the runtime syndrome with the template syndrome. */ syn |= template_syn; } + + /* Form ISS2 at the top of the syndrome. */ + syn |= (uint64_t)fi->dirtybit << 37; + syn |= (uint64_t)gcs << 40; + return syn; } @@ -176,7 +181,9 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, int target_el = exception_target_el(env); int current_el = arm_current_el(env); bool same_el; - uint32_t syn, exc, fsr, fsc; + uint32_t exc, fsr, fsc; + uint64_t syn; + /* * We know this must be a data or insn abort, and that * env->exception.syndrome contains the template syndrome set @@ -246,9 +253,10 @@ void arm_deliver_fault(ARMCPU *cpu, vaddr addr, syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc); exc = EXCP_PREFETCH_ABORT; } else { + bool gcs = regime_is_gcs(core_to_arm_mmu_idx(env, mmu_idx)); syn = merge_syn_data_abort(env->exception.syndrome, fi, target_el, same_el, access_type == MMU_DATA_STORE, - fsc); + fsc, gcs); if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) { fsr |= (1 << 11); diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c index dbf47595dbe2e..3292d7cbfd9b8 100644 --- a/target/arm/tcg/translate-a64.c +++ b/target/arm/tcg/translate-a64.c @@ -26,6 +26,7 @@ #include "cpregs.h" static TCGv_i64 cpu_X[32]; +static TCGv_i64 cpu_gcspr[4]; static TCGv_i64 cpu_pc; /* Load/store exclusive handling */ @@ -77,6 +78,10 @@ static int scale_by_log2_tag_granule(DisasContext *s, int x) /* initialize TCG globals. */ void a64_translate_init(void) { + static const char gcspr_names[4][12] = { + "gcspr_el0", "gcspr_el1", "gcspr_el2", "gcspr_el3" + }; + int i; cpu_pc = tcg_global_mem_new_i64(tcg_env, @@ -90,10 +95,17 @@ void a64_translate_init(void) cpu_exclusive_high = tcg_global_mem_new_i64(tcg_env, offsetof(CPUARMState, exclusive_high), "exclusive_high"); + + for (i = 0; i < 4; i++) { + cpu_gcspr[i] = + tcg_global_mem_new_i64(tcg_env, + offsetof(CPUARMState, cp15.gcspr_el[i]), + gcspr_names[i]); + } } /* - * Return the core mmu_idx to use for A64 load/store insns which + * Return the full arm mmu_idx to use for A64 load/store insns which * have a "unprivileged load/store" variant. Those insns access * EL0 if executed from an EL which has control over EL0 (usually * EL1) but behave like normal loads and stores if executed from @@ -103,7 +115,7 @@ void a64_translate_init(void) * normal encoding (in which case we will return the same * thing as get_mem_index(). */ -static int get_a64_user_mem_index(DisasContext *s, bool unpriv) +static ARMMMUIdx full_a64_user_mem_index(DisasContext *s, bool unpriv) { /* * If AccType_UNPRIV is not used, the insn uses AccType_NORMAL, @@ -130,7 +142,19 @@ static int get_a64_user_mem_index(DisasContext *s, bool unpriv) g_assert_not_reached(); } } - return arm_to_core_mmu_idx(useridx); + return useridx; +} + +/* Return the core mmu_idx per above. */ +static int core_a64_user_mem_index(DisasContext *s, bool unpriv) +{ + return arm_to_core_mmu_idx(full_a64_user_mem_index(s, unpriv)); +} + +/* For a given translation regime, return the core mmu_idx for gcs access. */ +static int core_gcs_mem_index(ARMMMUIdx armidx) +{ + return arm_to_core_mmu_idx(regime_to_gcs(armidx)); } static void set_btype_raw(int val) @@ -408,6 +432,39 @@ static MemOp check_ordered_align(DisasContext *s, int rn, int imm, return finalize_memop(s, mop); } +static void gen_add_gcs_record(DisasContext *s, TCGv_i64 value) +{ + TCGv_i64 addr = tcg_temp_new_i64(); + TCGv_i64 gcspr = cpu_gcspr[s->current_el]; + int mmuidx = core_gcs_mem_index(s->mmu_idx); + MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN); + + tcg_gen_addi_i64(addr, gcspr, -8); + tcg_gen_qemu_st_i64(value, clean_data_tbi(s, addr), mmuidx, mop); + tcg_gen_mov_i64(gcspr, addr); +} + +static void gen_load_check_gcs_record(DisasContext *s, TCGv_i64 target, + GCSInstructionType it, int rt) +{ + TCGv_i64 gcspr = cpu_gcspr[s->current_el]; + int mmuidx = core_gcs_mem_index(s->mmu_idx); + MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN); + TCGv_i64 rec_va = tcg_temp_new_i64(); + + tcg_gen_qemu_ld_i64(rec_va, clean_data_tbi(s, gcspr), mmuidx, mop); + + if (s->gcs_rvcen) { + TCGLabel *fail_label = + delay_exception(s, EXCP_UDEF, syn_gcs_data_check(it, rt)); + + tcg_gen_brcond_i64(TCG_COND_NE, rec_va, target, fail_label); + } + + gen_a64_set_pc(s, rec_va); + tcg_gen_addi_i64(gcspr, gcspr, 8); +} + typedef struct DisasCompare64 { TCGCond cond; TCGv_i64 value; @@ -471,7 +528,7 @@ static inline bool use_goto_tb(DisasContext *s, uint64_t dest) return translator_use_goto_tb(&s->base, dest); } -static void gen_goto_tb(DisasContext *s, int n, int64_t diff) +static void gen_goto_tb(DisasContext *s, unsigned tb_slot_idx, int64_t diff) { if (use_goto_tb(s, s->pc_curr + diff)) { /* @@ -484,12 +541,12 @@ static void gen_goto_tb(DisasContext *s, int n, int64_t diff) */ if (tb_cflags(s->base.tb) & CF_PCREL) { gen_a64_update_pc(s, diff); - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); } else { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); gen_a64_update_pc(s, diff); } - tcg_gen_exit_tb(s->base.tb, n); + tcg_gen_exit_tb(s->base.tb, tb_slot_idx); s->base.is_jmp = DISAS_NORETURN; } else { gen_a64_update_pc(s, diff); @@ -1642,7 +1699,14 @@ static bool trans_B(DisasContext *s, arg_i *a) static bool trans_BL(DisasContext *s, arg_i *a) { - gen_pc_plus_diff(s, cpu_reg(s, 30), curr_insn_len(s)); + TCGv_i64 link = tcg_temp_new_i64(); + + gen_pc_plus_diff(s, link, 4); + if (s->gcs_en) { + gen_add_gcs_record(s, link); + } + tcg_gen_mov_i64(cpu_reg(s, 30), link); + reset_btype(s); gen_goto_tb(s, 0, a->imm); return true; @@ -1739,15 +1803,15 @@ static bool trans_BR(DisasContext *s, arg_r *a) static bool trans_BLR(DisasContext *s, arg_r *a) { - TCGv_i64 dst = cpu_reg(s, a->rn); - TCGv_i64 lr = cpu_reg(s, 30); - if (dst == lr) { - TCGv_i64 tmp = tcg_temp_new_i64(); - tcg_gen_mov_i64(tmp, dst); - dst = tmp; + TCGv_i64 link = tcg_temp_new_i64(); + + gen_pc_plus_diff(s, link, 4); + if (s->gcs_en) { + gen_add_gcs_record(s, link); } - gen_pc_plus_diff(s, lr, curr_insn_len(s)); - gen_a64_set_pc(s, dst); + gen_a64_set_pc(s, cpu_reg(s, a->rn)); + tcg_gen_mov_i64(cpu_reg(s, 30), link); + set_btype_for_blr(s); s->base.is_jmp = DISAS_JUMP; return true; @@ -1755,7 +1819,13 @@ static bool trans_BLR(DisasContext *s, arg_r *a) static bool trans_RET(DisasContext *s, arg_r *a) { - gen_a64_set_pc(s, cpu_reg(s, a->rn)); + TCGv_i64 target = cpu_reg(s, a->rn); + + if (s->gcs_en) { + gen_load_check_gcs_record(s, target, GCS_IT_RET_nPauth, a->rn); + } else { + gen_a64_set_pc(s, target); + } s->base.is_jmp = DISAS_JUMP; return true; } @@ -1799,21 +1869,21 @@ static bool trans_BRAZ(DisasContext *s, arg_braz *a) static bool trans_BLRAZ(DisasContext *s, arg_braz *a) { - TCGv_i64 dst, lr; + TCGv_i64 dst, link; if (!dc_isar_feature(aa64_pauth, s)) { return false; } - dst = auth_branch_target(s, cpu_reg(s, a->rn), tcg_constant_i64(0), !a->m); - lr = cpu_reg(s, 30); - if (dst == lr) { - TCGv_i64 tmp = tcg_temp_new_i64(); - tcg_gen_mov_i64(tmp, dst); - dst = tmp; + + link = tcg_temp_new_i64(); + gen_pc_plus_diff(s, link, 4); + if (s->gcs_en) { + gen_add_gcs_record(s, link); } - gen_pc_plus_diff(s, lr, curr_insn_len(s)); gen_a64_set_pc(s, dst); + tcg_gen_mov_i64(cpu_reg(s, 30), link); + set_btype_for_blr(s); s->base.is_jmp = DISAS_JUMP; return true; @@ -1828,7 +1898,12 @@ static bool trans_RETA(DisasContext *s, arg_reta *a) } dst = auth_branch_target(s, cpu_reg(s, 30), cpu_X[31], !a->m); - gen_a64_set_pc(s, dst); + if (s->gcs_en) { + GCSInstructionType it = a->m ? GCS_IT_RET_PauthB : GCS_IT_RET_PauthA; + gen_load_check_gcs_record(s, dst, it, 30); + } else { + gen_a64_set_pc(s, dst); + } s->base.is_jmp = DISAS_JUMP; return true; } @@ -1849,20 +1924,21 @@ static bool trans_BRA(DisasContext *s, arg_bra *a) static bool trans_BLRA(DisasContext *s, arg_bra *a) { - TCGv_i64 dst, lr; + TCGv_i64 dst, link; if (!dc_isar_feature(aa64_pauth, s)) { return false; } dst = auth_branch_target(s, cpu_reg(s, a->rn), cpu_reg_sp(s, a->rm), !a->m); - lr = cpu_reg(s, 30); - if (dst == lr) { - TCGv_i64 tmp = tcg_temp_new_i64(); - tcg_gen_mov_i64(tmp, dst); - dst = tmp; + + link = tcg_temp_new_i64(); + gen_pc_plus_diff(s, link, 4); + if (s->gcs_en) { + gen_add_gcs_record(s, link); } - gen_pc_plus_diff(s, lr, curr_insn_len(s)); gen_a64_set_pc(s, dst); + tcg_gen_mov_i64(cpu_reg(s, 30), link); + set_btype_for_blr(s); s->base.is_jmp = DISAS_JUMP; return true; @@ -1870,6 +1946,9 @@ static bool trans_BLRA(DisasContext *s, arg_bra *a) static bool trans_ERET(DisasContext *s, arg_ERET *a) { +#ifdef CONFIG_USER_ONLY + return false; +#else TCGv_i64 dst; if (s->current_el == 0) { @@ -1889,10 +1968,14 @@ static bool trans_ERET(DisasContext *s, arg_ERET *a) /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; return true; +#endif } static bool trans_ERETA(DisasContext *s, arg_reta *a) { +#ifdef CONFIG_USER_ONLY + return false; +#else TCGv_i64 dst; if (!dc_isar_feature(aa64_pauth, s)) { @@ -1918,6 +2001,7 @@ static bool trans_ERETA(DisasContext *s, arg_reta *a) /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; return true; +#endif } static bool trans_NOP(DisasContext *s, arg_NOP *a) @@ -2060,6 +2144,14 @@ static bool trans_ESB(DisasContext *s, arg_ESB *a) return true; } +static bool trans_GCSB(DisasContext *s, arg_GCSB *a) +{ + if (dc_isar_feature(aa64_gcs, s)) { + tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); + } + return true; +} + static bool trans_PACIAZ(DisasContext *s, arg_PACIAZ *a) { if (s->pauth_active) { @@ -2124,6 +2216,20 @@ static bool trans_AUTIBSP(DisasContext *s, arg_AUTIBSP *a) return true; } +static bool trans_CHKFEAT(DisasContext *s, arg_CHKFEAT *a) +{ + uint64_t feat_en = 0; + + if (s->gcs_en) { + feat_en |= 1 << 0; + } + if (feat_en) { + TCGv_i64 x16 = cpu_reg(s, 16); + tcg_gen_andi_i64(x16, x16, ~feat_en); + } + return true; +} + static bool trans_CLREX(DisasContext *s, arg_CLREX *a) { tcg_gen_movi_i64(cpu_exclusive_addr, -1); @@ -2455,6 +2561,195 @@ static void gen_sysreg_undef(DisasContext *s, bool isread, gen_exception_insn(s, 0, EXCP_UDEF, syndrome); } +static void gen_gcspopm(DisasContext *s, int rt) +{ + TCGv_i64 gcspr = cpu_gcspr[s->current_el]; + int mmuidx = core_gcs_mem_index(s->mmu_idx); + MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN); + TCGv_i64 value = tcg_temp_new_i64(); + TCGLabel *fail_label = + delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSPOPM, rt)); + + /* The value at top-of-stack must have low 2 bits clear. */ + tcg_gen_qemu_ld_i64(value, clean_data_tbi(s, gcspr), mmuidx, mop); + tcg_gen_brcondi_i64(TCG_COND_TSTNE, value, 3, fail_label); + + /* Complete the pop and return the value. */ + tcg_gen_addi_i64(gcspr, gcspr, 8); + tcg_gen_mov_i64(cpu_reg(s, rt), value); +} + +static void gen_gcspushx(DisasContext *s) +{ + TCGv_i64 gcspr = cpu_gcspr[s->current_el]; + int spsr_idx = aarch64_banked_spsr_index(s->current_el); + int spsr_off = offsetof(CPUARMState, banked_spsr[spsr_idx]); + int elr_off = offsetof(CPUARMState, elr_el[s->current_el]); + int mmuidx = core_gcs_mem_index(s->mmu_idx); + MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN); + TCGv_i64 addr = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_new_i64(); + + tcg_gen_addi_i64(addr, gcspr, -8); + tcg_gen_qemu_st_i64(cpu_reg(s, 30), addr, mmuidx, mop); + + tcg_gen_ld_i64(tmp, tcg_env, spsr_off); + tcg_gen_addi_i64(addr, addr, -8); + tcg_gen_qemu_st_i64(tmp, addr, mmuidx, mop); + + tcg_gen_ld_i64(tmp, tcg_env, elr_off); + tcg_gen_addi_i64(addr, addr, -8); + tcg_gen_qemu_st_i64(tmp, addr, mmuidx, mop); + + tcg_gen_addi_i64(addr, addr, -8); + tcg_gen_qemu_st_i64(tcg_constant_i64(0b1001), addr, mmuidx, mop); + + tcg_gen_mov_i64(gcspr, addr); + clear_pstate_bits(PSTATE_EXLOCK); +} + +static void gen_gcspopcx(DisasContext *s) +{ + TCGv_i64 gcspr = cpu_gcspr[s->current_el]; + int spsr_idx = aarch64_banked_spsr_index(s->current_el); + int spsr_off = offsetof(CPUARMState, banked_spsr[spsr_idx]); + int elr_off = offsetof(CPUARMState, elr_el[s->current_el]); + int gcscr_off = offsetof(CPUARMState, cp15.gcscr_el[s->current_el]); + int pstate_off = offsetof(CPUARMState, pstate); + int mmuidx = core_gcs_mem_index(s->mmu_idx); + MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN); + TCGv_i64 addr = tcg_temp_new_i64(); + TCGv_i64 tmp1 = tcg_temp_new_i64(); + TCGv_i64 tmp2 = tcg_temp_new_i64(); + TCGLabel *fail_label = + delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSPOPCX, 31)); + + /* The value at top-of-stack must be an exception token. */ + tcg_gen_qemu_ld_i64(tmp1, gcspr, mmuidx, mop); + tcg_gen_brcondi_i64(TCG_COND_NE, tmp1, 0b1001, fail_label); + + /* Validate in turn, ELR ... */ + tcg_gen_addi_i64(addr, gcspr, 8); + tcg_gen_qemu_ld_i64(tmp1, addr, mmuidx, mop); + tcg_gen_ld_i64(tmp2, tcg_env, elr_off); + tcg_gen_brcond_i64(TCG_COND_NE, tmp1, tmp2, fail_label); + + /* ... SPSR ... */ + tcg_gen_addi_i64(addr, addr, 8); + tcg_gen_qemu_ld_i64(tmp1, addr, mmuidx, mop); + tcg_gen_ld_i64(tmp2, tcg_env, spsr_off); + tcg_gen_brcond_i64(TCG_COND_NE, tmp1, tmp2, fail_label); + + /* ... and LR. */ + tcg_gen_addi_i64(addr, addr, 8); + tcg_gen_qemu_ld_i64(tmp1, addr, mmuidx, mop); + tcg_gen_brcond_i64(TCG_COND_NE, tmp1, cpu_reg(s, 30), fail_label); + + /* Writeback stack pointer after pop. */ + tcg_gen_addi_i64(gcspr, addr, 8); + + /* PSTATE.EXLOCK = GetCurrentEXLOCKEN(). */ + tcg_gen_ld_i64(tmp1, tcg_env, gcscr_off); + tcg_gen_ld_i64(tmp2, tcg_env, pstate_off); + tcg_gen_shri_i64(tmp1, tmp1, ctz64(GCSCR_EXLOCKEN)); + tcg_gen_deposit_i64(tmp2, tmp2, tmp1, ctz64(PSTATE_EXLOCK), 1); + tcg_gen_st_i64(tmp2, tcg_env, pstate_off); +} + +static void gen_gcspopx(DisasContext *s) +{ + TCGv_i64 gcspr = cpu_gcspr[s->current_el]; + int mmuidx = core_gcs_mem_index(s->mmu_idx); + MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN); + TCGv_i64 addr = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_new_i64(); + TCGLabel *fail_label = + delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSPOPX, 31)); + + /* The value at top-of-stack must be an exception token. */ + tcg_gen_qemu_ld_i64(tmp, gcspr, mmuidx, mop); + tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 0b1001, fail_label); + + /* + * The other three values in the exception return record + * are ignored, but are loaded anyway to raise faults. + */ + tcg_gen_addi_i64(addr, gcspr, 8); + tcg_gen_qemu_ld_i64(tmp, addr, mmuidx, mop); + tcg_gen_addi_i64(addr, addr, 8); + tcg_gen_qemu_ld_i64(tmp, addr, mmuidx, mop); + tcg_gen_addi_i64(addr, addr, 8); + tcg_gen_qemu_ld_i64(tmp, addr, mmuidx, mop); + tcg_gen_addi_i64(gcspr, addr, 8); +} + +static void gen_gcsss1(DisasContext *s, int rt) +{ + TCGv_i64 gcspr = cpu_gcspr[s->current_el]; + int mmuidx = core_gcs_mem_index(s->mmu_idx); + MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN); + TCGv_i64 inptr = cpu_reg(s, rt); + TCGv_i64 cmp = tcg_temp_new_i64(); + TCGv_i64 new = tcg_temp_new_i64(); + TCGv_i64 old = tcg_temp_new_i64(); + TCGLabel *fail_label = + delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSSS1, rt)); + + /* Compute the valid cap entry that the new stack must have. */ + tcg_gen_deposit_i64(cmp, inptr, tcg_constant_i64(1), 0, 12); + /* Compute the in-progress cap entry for the old stack. */ + tcg_gen_deposit_i64(new, gcspr, tcg_constant_i64(5), 0, 3); + + /* Swap the valid cap the with the in-progress cap. */ + tcg_gen_atomic_cmpxchg_i64(old, inptr, cmp, new, mmuidx, mop); + tcg_gen_brcond_i64(TCG_COND_NE, old, cmp, fail_label); + + /* The new stack had a valid cap: change gcspr. */ + tcg_gen_andi_i64(gcspr, inptr, ~7); +} + +static void gen_gcsss2(DisasContext *s, int rt) +{ + TCGv_i64 gcspr = cpu_gcspr[s->current_el]; + int mmuidx = core_gcs_mem_index(s->mmu_idx); + MemOp mop = finalize_memop(s, MO_64 | MO_ALIGN); + TCGv_i64 outptr = tcg_temp_new_i64(); + TCGv_i64 tmp = tcg_temp_new_i64(); + TCGLabel *fail_label = + delay_exception(s, EXCP_UDEF, syn_gcs_data_check(GCS_IT_GCSSS2, rt)); + + /* Validate that the new stack has an in-progress cap. */ + tcg_gen_qemu_ld_i64(outptr, gcspr, mmuidx, mop); + tcg_gen_andi_i64(tmp, outptr, 7); + tcg_gen_brcondi_i64(TCG_COND_NE, tmp, 5, fail_label); + + /* Push a valid cap to the old stack. */ + tcg_gen_andi_i64(outptr, outptr, ~7); + tcg_gen_addi_i64(outptr, outptr, -8); + tcg_gen_deposit_i64(tmp, outptr, tcg_constant_i64(1), 0, 12); + tcg_gen_qemu_st_i64(tmp, outptr, mmuidx, mop); + tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); + + /* Pop the in-progress cap from the new stack. */ + tcg_gen_addi_i64(gcspr, gcspr, 8); + + /* Return a pointer to the old stack cap. */ + tcg_gen_mov_i64(cpu_reg(s, rt), outptr); +} + +/* + * Look up @key, returning the cpreg, which must exist. + * Additionally, the new cpreg must also be accessible. + */ +static const ARMCPRegInfo * +redirect_cpreg(DisasContext *s, uint32_t key, bool isread) +{ + const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key); + assert(ri); + assert(cp_access_ok(s->current_el, ri, isread)); + return ri; +} + /* MRS - move from system register * MSR (register) - move to system register * SYS @@ -2466,8 +2761,7 @@ static void handle_sys(DisasContext *s, bool isread, unsigned int op0, unsigned int op1, unsigned int op2, unsigned int crn, unsigned int crm, unsigned int rt) { - uint32_t key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, - crn, crm, op0, op1, op2); + uint32_t key = ENCODE_AA64_CP_REG(op0, op1, crn, crm, op2); const ARMCPRegInfo *ri = get_arm_cp_reginfo(s->cp_regs, key); bool need_exit_tb = false; bool nv_trap_to_el2 = false; @@ -2561,6 +2855,27 @@ static void handle_sys(DisasContext *s, bool isread, } } + if (ri->vhe_redir_to_el2 && s->current_el == 2 && s->e2h) { + /* + * This one of the FOO_EL1 registers which redirect to FOO_EL2 + * from EL2 when HCR_EL2.E2H is set. + */ + key = ri->vhe_redir_to_el2; + ri = redirect_cpreg(s, key, isread); + } else if (ri->vhe_redir_to_el01 && s->current_el >= 2) { + /* + * This is one of the FOO_EL12 or FOO_EL02 registers. + * With !E2H, they all UNDEF. + * With E2H, from EL2 or EL3, they redirect to FOO_EL1/FOO_EL0. + */ + if (!s->e2h) { + gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt); + return; + } + key = ri->vhe_redir_to_el01; + ri = redirect_cpreg(s, key, isread); + } + if (ri->accessfn || (ri->fgt && s->fgt_active)) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. @@ -2603,11 +2918,8 @@ static void handle_sys(DisasContext *s, bool isread, * We don't use the EL1 register's access function, and * fine-grained-traps on EL1 also do not apply here. */ - key = ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, - crn, crm, op0, 0, op2); - ri = get_arm_cp_reginfo(s->cp_regs, key); - assert(ri); - assert(cp_access_ok(s->current_el, ri, isread)); + key = ENCODE_AA64_CP_REG(op0, 0, crn, crm, op2); + ri = redirect_cpreg(s, key, isread); /* * We might not have done an update_pc earlier, so check we don't * need it. We could support this in future if necessary. @@ -2731,6 +3043,51 @@ static void handle_sys(DisasContext *s, bool isread, } } return; + case ARM_CP_GCSPUSHM: + if (s->gcs_en) { + gen_add_gcs_record(s, cpu_reg(s, rt)); + } + return; + case ARM_CP_GCSPOPM: + /* Note that X[rt] is unchanged if !GCSEnabled. */ + if (s->gcs_en) { + gen_gcspopm(s, rt); + } + return; + case ARM_CP_GCSPUSHX: + /* Choose the CONSTRAINED UNPREDICTABLE for UNDEF. */ + if (rt != 31) { + unallocated_encoding(s); + } else if (s->gcs_en) { + gen_gcspushx(s); + } + return; + case ARM_CP_GCSPOPCX: + /* Choose the CONSTRAINED UNPREDICTABLE for UNDEF. */ + if (rt != 31) { + unallocated_encoding(s); + } else if (s->gcs_en) { + gen_gcspopcx(s); + } + return; + case ARM_CP_GCSPOPX: + /* Choose the CONSTRAINED UNPREDICTABLE for UNDEF. */ + if (rt != 31) { + unallocated_encoding(s); + } else if (s->gcs_en) { + gen_gcspopx(s); + } + return; + case ARM_CP_GCSSS1: + if (s->gcs_en) { + gen_gcsss1(s, rt); + } + return; + case ARM_CP_GCSSS2: + if (s->gcs_en) { + gen_gcsss2(s, rt); + } + return; default: g_assert_not_reached(); } @@ -3237,7 +3594,7 @@ static bool trans_LDXP(DisasContext *s, arg_stxr *a) static bool trans_CASP(DisasContext *s, arg_CASP *a) { - if (!dc_isar_feature(aa64_atomics, s)) { + if (!dc_isar_feature(aa64_lse, s)) { return false; } if (((a->rt | a->rs) & 1) != 0) { @@ -3250,7 +3607,7 @@ static bool trans_CASP(DisasContext *s, arg_CASP *a) static bool trans_CAS(DisasContext *s, arg_CAS *a) { - if (!dc_isar_feature(aa64_atomics, s)) { + if (!dc_isar_feature(aa64_lse, s)) { return false; } gen_compare_and_swap(s, a->rs, a->rt, a->rn, a->sz); @@ -3525,7 +3882,7 @@ static void op_addr_ldst_imm_pre(DisasContext *s, arg_ldst_imm *a, if (!a->p) { tcg_gen_addi_i64(*dirty_addr, *dirty_addr, offset); } - memidx = get_a64_user_mem_index(s, a->unpriv); + memidx = core_a64_user_mem_index(s, a->unpriv); *clean_addr = gen_mte_check1_mmuidx(s, *dirty_addr, is_store, a->w || a->rn != 31, mop, a->unpriv, memidx); @@ -3546,7 +3903,7 @@ static bool trans_STR_i(DisasContext *s, arg_ldst_imm *a) { bool iss_sf, iss_valid = !a->w; TCGv_i64 clean_addr, dirty_addr, tcg_rt; - int memidx = get_a64_user_mem_index(s, a->unpriv); + int memidx = core_a64_user_mem_index(s, a->unpriv); MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN); op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, true, mop); @@ -3564,7 +3921,7 @@ static bool trans_LDR_i(DisasContext *s, arg_ldst_imm *a) { bool iss_sf, iss_valid = !a->w; TCGv_i64 clean_addr, dirty_addr, tcg_rt; - int memidx = get_a64_user_mem_index(s, a->unpriv); + int memidx = core_a64_user_mem_index(s, a->unpriv); MemOp mop = finalize_memop(s, a->sz + a->sign * MO_SIGN); op_addr_ldst_imm_pre(s, a, &clean_addr, &dirty_addr, a->imm, false, mop); @@ -3743,15 +4100,64 @@ static bool do_atomic_ld(DisasContext *s, arg_atomic *a, AtomicThreeOpFn *fn, return true; } -TRANS_FEAT(LDADD, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_add_i64, 0, false) -TRANS_FEAT(LDCLR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_and_i64, 0, true) -TRANS_FEAT(LDEOR, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_xor_i64, 0, false) -TRANS_FEAT(LDSET, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_or_i64, 0, false) -TRANS_FEAT(LDSMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smax_i64, MO_SIGN, false) -TRANS_FEAT(LDSMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_smin_i64, MO_SIGN, false) -TRANS_FEAT(LDUMAX, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umax_i64, 0, false) -TRANS_FEAT(LDUMIN, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_fetch_umin_i64, 0, false) -TRANS_FEAT(SWP, aa64_atomics, do_atomic_ld, a, tcg_gen_atomic_xchg_i64, 0, false) +TRANS_FEAT(LDADD, aa64_lse, do_atomic_ld, a, tcg_gen_atomic_fetch_add_i64, 0, false) +TRANS_FEAT(LDCLR, aa64_lse, do_atomic_ld, a, tcg_gen_atomic_fetch_and_i64, 0, true) +TRANS_FEAT(LDEOR, aa64_lse, do_atomic_ld, a, tcg_gen_atomic_fetch_xor_i64, 0, false) +TRANS_FEAT(LDSET, aa64_lse, do_atomic_ld, a, tcg_gen_atomic_fetch_or_i64, 0, false) +TRANS_FEAT(LDSMAX, aa64_lse, do_atomic_ld, a, tcg_gen_atomic_fetch_smax_i64, MO_SIGN, false) +TRANS_FEAT(LDSMIN, aa64_lse, do_atomic_ld, a, tcg_gen_atomic_fetch_smin_i64, MO_SIGN, false) +TRANS_FEAT(LDUMAX, aa64_lse, do_atomic_ld, a, tcg_gen_atomic_fetch_umax_i64, 0, false) +TRANS_FEAT(LDUMIN, aa64_lse, do_atomic_ld, a, tcg_gen_atomic_fetch_umin_i64, 0, false) +TRANS_FEAT(SWP, aa64_lse, do_atomic_ld, a, tcg_gen_atomic_xchg_i64, 0, false) + +typedef void Atomic128ThreeOpFn(TCGv_i128, TCGv_i64, TCGv_i128, TCGArg, MemOp); + +static bool do_atomic128_ld(DisasContext *s, arg_atomic128 *a, + Atomic128ThreeOpFn *fn, bool invert) +{ + MemOp mop; + int rlo, rhi; + TCGv_i64 clean_addr, tlo, thi; + TCGv_i128 t16; + + if (a->rt == 31 || a->rt2 == 31 || a->rt == a->rt2) { + return false; + } + if (a->rn == 31) { + gen_check_sp_alignment(s); + } + mop = check_atomic_align(s, a->rn, MO_128); + clean_addr = gen_mte_check1(s, cpu_reg_sp(s, a->rn), false, + a->rn != 31, mop); + + rlo = (s->be_data == MO_LE ? a->rt : a->rt2); + rhi = (s->be_data == MO_LE ? a->rt2 : a->rt); + + tlo = read_cpu_reg(s, rlo, true); + thi = read_cpu_reg(s, rhi, true); + if (invert) { + tcg_gen_not_i64(tlo, tlo); + tcg_gen_not_i64(thi, thi); + } + /* + * The tcg atomic primitives are all full barriers. Therefore we + * can ignore the Acquire and Release bits of this instruction. + */ + t16 = tcg_temp_new_i128(); + tcg_gen_concat_i64_i128(t16, tlo, thi); + + fn(t16, clean_addr, t16, get_mem_index(s), mop); + + tcg_gen_extr_i128_i64(cpu_reg(s, rlo), cpu_reg(s, rhi), t16); + return true; +} + +TRANS_FEAT(LDCLRP, aa64_lse128, do_atomic128_ld, + a, tcg_gen_atomic_fetch_and_i128, true) +TRANS_FEAT(LDSETP, aa64_lse128, do_atomic128_ld, + a, tcg_gen_atomic_fetch_or_i128, false) +TRANS_FEAT(SWPP, aa64_lse128, do_atomic128_ld, + a, tcg_gen_atomic_xchg_i128, false) static bool trans_LDAPR(DisasContext *s, arg_LDAPR *a) { @@ -3759,7 +4165,7 @@ static bool trans_LDAPR(DisasContext *s, arg_LDAPR *a) TCGv_i64 clean_addr; MemOp mop; - if (!dc_isar_feature(aa64_atomics, s) || + if (!dc_isar_feature(aa64_lse, s) || !dc_isar_feature(aa64_rcpc_8_3, s)) { return false; } @@ -3882,6 +4288,42 @@ static bool trans_STLR_i(DisasContext *s, arg_ldapr_stlr_i *a) return true; } +static bool trans_GCSSTR(DisasContext *s, arg_GCSSTR *a) +{ + ARMMMUIdx armidx; + + if (!dc_isar_feature(aa64_gcs, s)) { + return false; + } + + /* + * The pseudocode for GCSSTTR is + * + * effective_el = AArch64.IsUnprivAccessPriv() ? PSTATE.EL : EL0; + * if (effective_el == PSTATE.EL) CheckGCSSTREnabled(); + * + * We have cached the result of IsUnprivAccessPriv in DisasContext, + * but since we need the result of full_a64_user_mem_index anyway, + * use the mmu_idx test as a proxy for the effective_el test. + */ + armidx = full_a64_user_mem_index(s, a->unpriv); + if (armidx == s->mmu_idx && s->gcsstr_el != 0) { + gen_exception_insn_el(s, 0, EXCP_UDEF, + syn_gcs_gcsstr(a->rn, a->rt), + s->gcsstr_el); + return true; + } + + if (a->rn == 31) { + gen_check_sp_alignment(s); + } + tcg_gen_qemu_st_i64(cpu_reg(s, a->rt), + clean_data_tbi(s, cpu_reg_sp(s, a->rn)), + core_gcs_mem_index(armidx), + finalize_memop(s, MO_64 | MO_ALIGN)); + return true; +} + static bool trans_LD_mult(DisasContext *s, arg_ldst_mult *a) { TCGv_i64 clean_addr, tcg_rn, tcg_ebytes; @@ -4413,7 +4855,7 @@ static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, return false; } - memidx = get_a64_user_mem_index(s, a->unpriv); + memidx = core_a64_user_mem_index(s, a->unpriv); /* * We pass option_a == true, matching our implementation; @@ -4467,8 +4909,8 @@ static bool do_CPY(DisasContext *s, arg_cpy *a, bool is_epilogue, CpyFn fn) return false; } - rmemidx = get_a64_user_mem_index(s, runpriv); - wmemidx = get_a64_user_mem_index(s, wunpriv); + rmemidx = core_a64_user_mem_index(s, runpriv); + wmemidx = core_a64_user_mem_index(s, wunpriv); /* * We pass option_a == true, matching our implementation; @@ -4552,6 +4994,50 @@ TRANS(SUB_i, gen_rri, a, 1, 1, tcg_gen_sub_i64) TRANS(ADDS_i, gen_rri, a, 0, 1, a->sf ? gen_add64_CC : gen_add32_CC) TRANS(SUBS_i, gen_rri, a, 0, 1, a->sf ? gen_sub64_CC : gen_sub32_CC) +/* + * Min/Max (immediate) + */ + +static void gen_wrap3_i32(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, NeonGenTwoOpFn fn) +{ + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); + + tcg_gen_extrl_i64_i32(t1, n); + tcg_gen_extrl_i64_i32(t2, m); + fn(t1, t1, t2); + tcg_gen_extu_i32_i64(d, t1); +} + +static void gen_smax32_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) +{ + gen_wrap3_i32(d, n, m, tcg_gen_smax_i32); +} + +static void gen_smin32_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) +{ + gen_wrap3_i32(d, n, m, tcg_gen_smin_i32); +} + +static void gen_umax32_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) +{ + gen_wrap3_i32(d, n, m, tcg_gen_umax_i32); +} + +static void gen_umin32_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) +{ + gen_wrap3_i32(d, n, m, tcg_gen_umin_i32); +} + +TRANS_FEAT(SMAX_i, aa64_cssc, gen_rri, a, 0, 0, + a->sf ? tcg_gen_smax_i64 : gen_smax32_i64) +TRANS_FEAT(SMIN_i, aa64_cssc, gen_rri, a, 0, 0, + a->sf ? tcg_gen_smin_i64 : gen_smin32_i64) +TRANS_FEAT(UMAX_i, aa64_cssc, gen_rri, a, 0, 0, + a->sf ? tcg_gen_umax_i64 : gen_umax32_i64) +TRANS_FEAT(UMIN_i, aa64_cssc, gen_rri, a, 0, 0, + a->sf ? tcg_gen_umin_i64 : gen_umin32_i64) + /* * Add/subtract (immediate, with tags) */ @@ -8157,6 +8643,28 @@ static bool trans_PACGA(DisasContext *s, arg_rrr *a) return false; } +static bool gen_rrr(DisasContext *s, arg_rrr_sf *a, ArithTwoOp fn) +{ + TCGv_i64 tcg_rm = cpu_reg(s, a->rm); + TCGv_i64 tcg_rn = cpu_reg(s, a->rn); + TCGv_i64 tcg_rd = cpu_reg(s, a->rd); + + fn(tcg_rd, tcg_rn, tcg_rm); + if (!a->sf) { + tcg_gen_ext32u_i64(tcg_rd, tcg_rd); + } + return true; +} + +TRANS_FEAT(SMAX, aa64_cssc, gen_rrr, a, + a->sf ? tcg_gen_smax_i64 : gen_smax32_i64) +TRANS_FEAT(SMIN, aa64_cssc, gen_rrr, a, + a->sf ? tcg_gen_smin_i64 : gen_smin32_i64) +TRANS_FEAT(UMAX, aa64_cssc, gen_rrr, a, + a->sf ? tcg_gen_umax_i64 : gen_umax32_i64) +TRANS_FEAT(UMIN, aa64_cssc, gen_rrr, a, + a->sf ? tcg_gen_umin_i64 : gen_umin32_i64) + typedef void ArithOneOp(TCGv_i64, TCGv_i64); static bool gen_rr(DisasContext *s, int rd, int rn, ArithOneOp fn) @@ -8165,13 +8673,22 @@ static bool gen_rr(DisasContext *s, int rd, int rn, ArithOneOp fn) return true; } -static void gen_rbit32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +/* + * Perform 32-bit operation fn on the low half of n; + * the high half of the output is zeroed. + */ +static void gen_wrap2_i32(TCGv_i64 d, TCGv_i64 n, NeonGenOneOpFn fn) { - TCGv_i32 t32 = tcg_temp_new_i32(); + TCGv_i32 t = tcg_temp_new_i32(); - tcg_gen_extrl_i64_i32(t32, tcg_rn); - gen_helper_rbit(t32, t32); - tcg_gen_extu_i32_i64(tcg_rd, t32); + tcg_gen_extrl_i64_i32(t, n); + fn(t, t); + tcg_gen_extu_i32_i64(d, t); +} + +static void gen_rbit32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + gen_wrap2_i32(tcg_rd, tcg_rn, gen_helper_rbit); } static void gen_rev16_xx(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn, TCGv_i64 mask) @@ -8226,16 +8743,43 @@ static void gen_clz64(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) } static void gen_cls32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + gen_wrap2_i32(tcg_rd, tcg_rn, tcg_gen_clrsb_i32); +} + +TRANS(CLZ, gen_rr, a->rd, a->rn, a->sf ? gen_clz64 : gen_clz32) +TRANS(CLS, gen_rr, a->rd, a->rn, a->sf ? tcg_gen_clrsb_i64 : gen_cls32) + +static void gen_ctz32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) { TCGv_i32 t32 = tcg_temp_new_i32(); tcg_gen_extrl_i64_i32(t32, tcg_rn); - tcg_gen_clrsb_i32(t32, t32); + tcg_gen_ctzi_i32(t32, t32, 32); tcg_gen_extu_i32_i64(tcg_rd, t32); } -TRANS(CLZ, gen_rr, a->rd, a->rn, a->sf ? gen_clz64 : gen_clz32) -TRANS(CLS, gen_rr, a->rd, a->rn, a->sf ? tcg_gen_clrsb_i64 : gen_cls32) +static void gen_ctz64(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + tcg_gen_ctzi_i64(tcg_rd, tcg_rn, 64); +} + +static void gen_cnt32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + gen_wrap2_i32(tcg_rd, tcg_rn, tcg_gen_ctpop_i32); +} + +static void gen_abs32(TCGv_i64 tcg_rd, TCGv_i64 tcg_rn) +{ + gen_wrap2_i32(tcg_rd, tcg_rn, tcg_gen_abs_i32); +} + +TRANS_FEAT(CTZ, aa64_cssc, gen_rr, a->rd, a->rn, + a->sf ? gen_ctz64 : gen_ctz32) +TRANS_FEAT(CNT, aa64_cssc, gen_rr, a->rd, a->rn, + a->sf ? tcg_gen_ctpop_i64 : gen_cnt32) +TRANS_FEAT(ABS, aa64_cssc, gen_rr, a->rd, a->rn, + a->sf ? tcg_gen_abs_i64 : gen_abs32) static bool gen_pacaut(DisasContext *s, arg_pacaut *a, NeonGenTwo64OpEnvFn fn) { @@ -10155,13 +10699,17 @@ static void aarch64_tr_init_disas_context(DisasContextBase *dcbase, dc->pstate_za = EX_TBFLAG_A64(tb_flags, PSTATE_ZA); dc->sme_trap_nonstreaming = EX_TBFLAG_A64(tb_flags, SME_TRAP_NONSTREAMING); dc->naa = EX_TBFLAG_A64(tb_flags, NAA); + dc->e2h = EX_TBFLAG_A64(tb_flags, E2H); dc->nv = EX_TBFLAG_A64(tb_flags, NV); dc->nv1 = EX_TBFLAG_A64(tb_flags, NV1); dc->nv2 = EX_TBFLAG_A64(tb_flags, NV2); - dc->nv2_mem_e20 = EX_TBFLAG_A64(tb_flags, NV2_MEM_E20); + dc->nv2_mem_e20 = dc->nv2 && dc->e2h; dc->nv2_mem_be = EX_TBFLAG_A64(tb_flags, NV2_MEM_BE); dc->fpcr_ah = EX_TBFLAG_A64(tb_flags, AH); dc->fpcr_nep = EX_TBFLAG_A64(tb_flags, NEP); + dc->gcs_en = EX_TBFLAG_A64(tb_flags, GCS_EN); + dc->gcs_rvcen = EX_TBFLAG_A64(tb_flags, GCS_RVCEN); + dc->gcsstr_el = EX_TBFLAG_A64(tb_flags, GCSSTR_EL); dc->vec_len = 0; dc->vec_stride = 0; dc->cp_regs = arm_cpu->cp_regs; @@ -10388,6 +10936,8 @@ static void aarch64_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) break; } } + + emit_delayed_exceptions(dc); } const TranslatorOps aarch64_translator_ops = { diff --git a/target/arm/tcg/translate-a64.h b/target/arm/tcg/translate-a64.h index 993dde61a4de4..9c45f89305b4c 100644 --- a/target/arm/tcg/translate-a64.h +++ b/target/arm/tcg/translate-a64.h @@ -28,7 +28,7 @@ bool logic_imm_decode_wmask(uint64_t *result, unsigned int immn, bool sve_access_check(DisasContext *s); bool sme_enabled_check(DisasContext *s); bool sme_enabled_check_with_svcr(DisasContext *s, unsigned); -uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, +uint64_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, uint32_t msz, bool is_write, uint32_t data); /* This function corresponds to CheckStreamingSVEEnabled. */ diff --git a/target/arm/tcg/translate-sme.c b/target/arm/tcg/translate-sme.c index 65fc8bc9b2ff5..091c56da4f4ea 100644 --- a/target/arm/tcg/translate-sme.c +++ b/target/arm/tcg/translate-sme.c @@ -387,7 +387,7 @@ TRANS_FEAT(MOVT_ztr, aa64_sme2, do_movt, a, tcg_gen_st_i64) static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) { - typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i32); + typedef void GenLdSt1(TCGv_env, TCGv_ptr, TCGv_ptr, TCGv, TCGv_i64); /* * Indexed by [esz][be][v][mte][st], which is (except for load/store) @@ -415,7 +415,7 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) TCGv_ptr t_za, t_pg; TCGv_i64 addr; - uint32_t desc; + uint64_t desc; bool be = s->be_data == MO_BE; bool mte = s->mte_active[0]; @@ -440,7 +440,7 @@ static bool trans_LDST1(DisasContext *s, arg_LDST1 *a) desc = make_svemte_desc(s, streaming_vec_reg_size(s), 1, a->esz, a->st, 0); fns[a->esz][be][a->v][mte][a->st](tcg_env, t_za, t_pg, addr, - tcg_constant_i32(desc)); + tcg_constant_i64(desc)); return true; } diff --git a/target/arm/tcg/translate-sve.c b/target/arm/tcg/translate-sve.c index 7b575734fdea7..07b827fa8e898 100644 --- a/target/arm/tcg/translate-sve.c +++ b/target/arm/tcg/translate-sve.c @@ -31,9 +31,9 @@ typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr, typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32); -typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32); +typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i64); typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr, - TCGv_ptr, TCGv_i64, TCGv_i32); + TCGv_ptr, TCGv_i64, TCGv_i64); /* * Helpers for extracting complex instruction fields. @@ -190,6 +190,10 @@ static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn, arg_rrr_esz *a, int data) { + /* These insns use MO_8 to encode BFloat16 */ + if (a->esz == MO_8 && !dc_isar_feature(aa64_sve_b16b16, s)) { + return false; + } return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); } @@ -403,6 +407,10 @@ static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn, static bool gen_gvec_fpst_arg_zpzz(DisasContext *s, gen_helper_gvec_4_ptr *fn, arg_rprr_esz *a) { + /* These insns use MO_8 to encode BFloat16. */ + if (a->esz == MO_8 && !dc_isar_feature(aa64_sve_b16b16, s)) { + return false; + } return gen_gvec_fpst_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); } @@ -3875,31 +3883,38 @@ DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d) *** SVE Floating Point Multiply-Add Indexed Group */ +static bool do_fmla_zzxz(DisasContext *s, arg_rrxr_esz *a, + gen_helper_gvec_4_ptr *fn) +{ + /* These insns use MO_8 to encode BFloat16 */ + if (a->esz == MO_8 && !dc_isar_feature(aa64_sve_b16b16, s)) { + return false; + } + return gen_gvec_fpst_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); +} + static gen_helper_gvec_4_ptr * const fmla_idx_fns[4] = { - NULL, gen_helper_gvec_fmla_idx_h, + gen_helper_gvec_bfmla_idx, gen_helper_gvec_fmla_idx_h, gen_helper_gvec_fmla_idx_s, gen_helper_gvec_fmla_idx_d }; -TRANS_FEAT(FMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, - fmla_idx_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->index, - a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) +TRANS_FEAT(FMLA_zzxz, aa64_sve, do_fmla_zzxz, a, fmla_idx_fns[a->esz]) static gen_helper_gvec_4_ptr * const fmls_idx_fns[4][2] = { - { NULL, NULL }, + { gen_helper_gvec_bfmls_idx, gen_helper_gvec_ah_bfmls_idx }, { gen_helper_gvec_fmls_idx_h, gen_helper_gvec_ah_fmls_idx_h }, { gen_helper_gvec_fmls_idx_s, gen_helper_gvec_ah_fmls_idx_s }, { gen_helper_gvec_fmls_idx_d, gen_helper_gvec_ah_fmls_idx_d }, }; -TRANS_FEAT(FMLS_zzxz, aa64_sve, gen_gvec_fpst_zzzz, - fmls_idx_fns[a->esz][s->fpcr_ah], - a->rd, a->rn, a->rm, a->ra, a->index, - a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) +TRANS_FEAT(FMLS_zzxz, aa64_sve, do_fmla_zzxz, a, + fmls_idx_fns[a->esz][s->fpcr_ah]) /* *** SVE Floating Point Multiply Indexed Group */ static gen_helper_gvec_3_ptr * const fmul_idx_fns[4] = { - NULL, gen_helper_gvec_fmul_idx_h, + gen_helper_gvec_fmul_idx_b16, gen_helper_gvec_fmul_idx_h, gen_helper_gvec_fmul_idx_s, gen_helper_gvec_fmul_idx_d, }; TRANS_FEAT(FMUL_zzx, aa64_sve, gen_gvec_fpst_zzz, @@ -4005,7 +4020,7 @@ static gen_helper_gvec_3_ptr * const fmaxqv_ah_fns[4] = { gen_helper_sve2p1_ah_fmaxqv_s, gen_helper_sve2p1_ah_fmaxqv_d, }; TRANS_FEAT(FMAXQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz, - (s->fpcr_ah ? fmaxqv_fns : fmaxqv_ah_fns)[a->esz], a, 0, + (s->fpcr_ah ? fmaxqv_ah_fns : fmaxqv_fns)[a->esz], a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) static gen_helper_gvec_3_ptr * const fminqv_fns[4] = { @@ -4017,7 +4032,7 @@ static gen_helper_gvec_3_ptr * const fminqv_ah_fns[4] = { gen_helper_sve2p1_ah_fminqv_s, gen_helper_sve2p1_ah_fminqv_d, }; TRANS_FEAT(FMINQV, aa64_sme2p1_or_sve2p1, gen_gvec_fpst_arg_zpz, - (s->fpcr_ah ? fminqv_fns : fminqv_ah_fns)[a->esz], a, 0, + (s->fpcr_ah ? fminqv_ah_fns : fminqv_fns)[a->esz], a, 0, a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) /* @@ -4146,7 +4161,7 @@ static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a) #define DO_FP3(NAME, name) \ static gen_helper_gvec_3_ptr * const name##_fns[4] = { \ - NULL, gen_helper_gvec_##name##_h, \ + gen_helper_gvec_##name##_b16, gen_helper_gvec_##name##_h, \ gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \ }; \ TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0) @@ -4202,13 +4217,34 @@ TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz, s->fpcr_ah ? name##_ah_zpzz_fns[a->esz] : \ name##_zpzz_fns[a->esz], a) -DO_ZPZZ_FP(FADD_zpzz, aa64_sve, sve_fadd) -DO_ZPZZ_FP(FSUB_zpzz, aa64_sve, sve_fsub) -DO_ZPZZ_FP(FMUL_zpzz, aa64_sve, sve_fmul) -DO_ZPZZ_AH_FP(FMIN_zpzz, aa64_sve, sve_fmin, sve_ah_fmin) -DO_ZPZZ_AH_FP(FMAX_zpzz, aa64_sve, sve_fmax, sve_ah_fmax) -DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum) -DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) +/* Similar, but for insns where sz == 0 encodes bfloat16 */ +#define DO_ZPZZ_FP_B16(NAME, FEAT, name) \ + static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ + gen_helper_##name##_b16, gen_helper_##name##_h, \ + gen_helper_##name##_s, gen_helper_##name##_d \ + }; \ + TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a) + +#define DO_ZPZZ_AH_FP_B16(NAME, FEAT, name, ah_name) \ + static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \ + gen_helper_##name##_b16, gen_helper_##name##_h, \ + gen_helper_##name##_s, gen_helper_##name##_d \ + }; \ + static gen_helper_gvec_4_ptr * const name##_ah_zpzz_fns[4] = { \ + gen_helper_##ah_name##_b16, gen_helper_##ah_name##_h, \ + gen_helper_##ah_name##_s, gen_helper_##ah_name##_d \ + }; \ + TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, \ + s->fpcr_ah ? name##_ah_zpzz_fns[a->esz] : \ + name##_zpzz_fns[a->esz], a) + +DO_ZPZZ_FP_B16(FADD_zpzz, aa64_sve, sve_fadd) +DO_ZPZZ_FP_B16(FSUB_zpzz, aa64_sve, sve_fsub) +DO_ZPZZ_FP_B16(FMUL_zpzz, aa64_sve, sve_fmul) +DO_ZPZZ_AH_FP_B16(FMIN_zpzz, aa64_sve, sve_fmin, sve_ah_fmin) +DO_ZPZZ_AH_FP_B16(FMAX_zpzz, aa64_sve, sve_fmax, sve_ah_fmax) +DO_ZPZZ_FP_B16(FMINNM_zpzz, aa64_sve, sve_fminnum) +DO_ZPZZ_FP_B16(FMAXNM_zpzz, aa64_sve, sve_fmaxnum) DO_ZPZZ_AH_FP(FABD, aa64_sve, sve_fabd, sve_ah_fabd) DO_ZPZZ_FP(FSCALE, aa64_sve, sve_fscalbn) DO_ZPZZ_FP(FDIV, aa64_sve, sve_fdiv) @@ -4339,19 +4375,28 @@ TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz], a->rd, a->rn, a->rm, a->pg, a->rot | (s->fpcr_ah << 1), a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) +static bool do_fmla_zpzzz(DisasContext *s, arg_rprrr_esz *a, + gen_helper_gvec_5_ptr *fn) +{ + /* These insns use MO_8 to encode BFloat16 */ + if (a->esz == MO_8 && !dc_isar_feature(aa64_sve_b16b16, s)) { + return false; + } + return gen_gvec_fpst_zzzzp(s, fn, a->rd, a->rn, a->rm, a->ra, a->pg, 0, + a->esz == MO_16 ? FPST_A64_F16 : FPST_A64); +} + #define DO_FMLA(NAME, name, ah_name) \ static gen_helper_gvec_5_ptr * const name##_fns[4] = { \ - NULL, gen_helper_sve_##name##_h, \ + gen_helper_sve_##name##_b16, gen_helper_sve_##name##_h, \ gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \ }; \ static gen_helper_gvec_5_ptr * const name##_ah_fns[4] = { \ - NULL, gen_helper_sve_##ah_name##_h, \ + gen_helper_sve_##ah_name##_b16, gen_helper_sve_##ah_name##_h, \ gen_helper_sve_##ah_name##_s, gen_helper_sve_##ah_name##_d \ }; \ - TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, \ - s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz], \ - a->rd, a->rn, a->rm, a->ra, a->pg, 0, \ - a->esz == MO_16 ? FPST_A64_F16 : FPST_A64) + TRANS_FEAT(NAME, aa64_sve, do_fmla_zpzzz, a, \ + s->fpcr_ah ? name##_ah_fns[a->esz] : name##_fns[a->esz]) /* We don't need an ah_fmla_zpzzz because fmla doesn't negate anything */ DO_FMLA(FMLA_zpzzz, fmla_zpzzz, fmla_zpzzz) @@ -4838,17 +4883,16 @@ static const uint8_t dtype_esz[19] = { 4, 4, 4, }; -uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, +uint64_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, uint32_t msz, bool is_write, uint32_t data) { uint32_t sizem1; - uint32_t desc = 0; + uint64_t desc = 0; /* Assert all of the data fits, with or without MTE enabled. */ assert(nregs >= 1 && nregs <= 4); sizem1 = (nregs << msz) - 1; assert(sizem1 <= R_MTEDESC_SIZEM1_MASK >> R_MTEDESC_SIZEM1_SHIFT); - assert(data < 1u << SVE_MTEDESC_SHIFT); if (s->mte_active[0]) { desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s)); @@ -4856,9 +4900,9 @@ uint32_t make_svemte_desc(DisasContext *s, unsigned vsz, uint32_t nregs, desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma); desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write); desc = FIELD_DP32(desc, MTEDESC, SIZEM1, sizem1); - desc <<= SVE_MTEDESC_SHIFT; + desc <<= 32; } - return simd_desc(vsz, vsz, desc | data); + return simd_desc(vsz, vsz, data) | desc; } static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, @@ -4866,7 +4910,7 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, gen_helper_gvec_mem *fn) { TCGv_ptr t_pg; - uint32_t desc; + uint64_t desc; if (!s->mte_active[0]) { addr = clean_data_tbi(s, addr); @@ -4882,7 +4926,7 @@ static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr, t_pg = tcg_temp_new_ptr(); tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); - fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); + fn(tcg_env, t_pg, addr, tcg_constant_i64(desc)); } /* Indexed by [mte][be][dtype][nreg] */ @@ -5334,7 +5378,7 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) unsigned vsz = vec_full_reg_size(s); TCGv_ptr t_pg; int poff; - uint32_t desc; + uint64_t desc; /* Load the first quadword using the normal predicated load helpers. */ if (!s->mte_active[0]) { @@ -5365,7 +5409,7 @@ static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) gen_helper_gvec_mem *fn = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; desc = make_svemte_desc(s, 16, 1, dtype_msz(dtype), false, zt); - fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); + fn(tcg_env, t_pg, addr, tcg_constant_i64(desc)); /* Replicate that first quadword. */ if (vsz > 16) { @@ -5408,7 +5452,7 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) unsigned vsz_r32; TCGv_ptr t_pg; int poff, doff; - uint32_t desc; + uint64_t desc; if (vsz < 32) { /* @@ -5449,7 +5493,7 @@ static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype) gen_helper_gvec_mem *fn = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0]; desc = make_svemte_desc(s, 32, 1, dtype_msz(dtype), false, zt); - fn(tcg_env, t_pg, addr, tcg_constant_i32(desc)); + fn(tcg_env, t_pg, addr, tcg_constant_i64(desc)); /* * Replicate that first octaword. @@ -5783,14 +5827,14 @@ static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm, TCGv_ptr t_zm = tcg_temp_new_ptr(); TCGv_ptr t_pg = tcg_temp_new_ptr(); TCGv_ptr t_zt = tcg_temp_new_ptr(); - uint32_t desc; + uint64_t desc; tcg_gen_addi_ptr(t_pg, tcg_env, pred_full_reg_offset(s, pg)); tcg_gen_addi_ptr(t_zm, tcg_env, vec_full_reg_offset(s, zm)); tcg_gen_addi_ptr(t_zt, tcg_env, vec_full_reg_offset(s, zt)); desc = make_svemte_desc(s, vec_full_reg_size(s), 1, msz, is_write, scale); - fn(tcg_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc)); + fn(tcg_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i64(desc)); } /* Indexed by [mte][be][ff][xs][u][msz]. */ @@ -6135,9 +6179,7 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) bool be = s->be_data == MO_BE; bool mte = s->mte_active[0]; - if (a->esz < MO_128 - ? !dc_isar_feature(aa64_sve, s) - : !dc_isar_feature(aa64_sve2p1, s)) { + if (!dc_isar_feature(aa64_sve, s)) { return false; } s->is_nonstreaming = true; @@ -6152,10 +6194,6 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) case MO_64: fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz]; break; - case MO_128: - assert(!a->ff && a->u && a->xs == 2 && a->msz == MO_128); - fn = gather_load_fn128[mte][be]; - break; default: g_assert_not_reached(); } @@ -6166,6 +6204,32 @@ static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a) return true; } +static bool trans_LD1Q(DisasContext *s, arg_LD1Q *a) +{ + gen_helper_gvec_mem_scatter *fn = NULL; + bool be = s->be_data == MO_BE; + bool mte = s->mte_active[0]; + + if (!dc_isar_feature(aa64_sve2p1, s)) { + return false; + } + s->is_nonstreaming = true; + if (!sve_access_check(s)) { + return true; + } + + fn = gather_load_fn128[mte][be]; + assert(fn != NULL); + + /* + * Unlike LD1_zprz, a->rm is the scalar register and it can be XZR, not XSP. + * a->rn is the vector register. + */ + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, + cpu_reg(s, a->rm), MO_128, false, fn); + return true; +} + static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a) { gen_helper_gvec_mem_scatter *fn = NULL; @@ -6342,9 +6406,7 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) if (a->esz < a->msz || (a->msz == 0 && a->scale)) { return false; } - if (a->esz < MO_128 - ? !dc_isar_feature(aa64_sve, s) - : !dc_isar_feature(aa64_sve2p1, s)) { + if (!dc_isar_feature(aa64_sve, s)) { return false; } s->is_nonstreaming = true; @@ -6358,10 +6420,6 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) case MO_64: fn = scatter_store_fn64[mte][be][a->xs][a->msz]; break; - case MO_128: - assert(a->xs == 2 && a->msz == MO_128); - fn = scatter_store_fn128[mte][be]; - break; default: g_assert_not_reached(); } @@ -6370,6 +6428,29 @@ static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a) return true; } +static bool trans_ST1Q(DisasContext *s, arg_ST1Q *a) +{ + gen_helper_gvec_mem_scatter *fn; + bool be = s->be_data == MO_BE; + bool mte = s->mte_active[0]; + + if (!dc_isar_feature(aa64_sve2p1, s)) { + return false; + } + s->is_nonstreaming = true; + if (!sve_access_check(s)) { + return true; + } + fn = scatter_store_fn128[mte][be]; + /* + * Unlike ST1_zprz, a->rm is the scalar register, and it + * can be XZR, not XSP. a->rn is the vector register. + */ + do_mem_zpz(s, a->rd, a->pg, a->rn, 0, + cpu_reg(s, a->rm), MO_128, true, fn); + return true; +} + static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a) { gen_helper_gvec_mem_scatter *fn = NULL; @@ -8034,7 +8115,7 @@ static bool gen_ldst_c(DisasContext *s, TCGv_i64 addr, int zd, int png, MemOp esz, bool is_write, int n, bool strided) { typedef void ldst_c_fn(TCGv_env, TCGv_ptr, TCGv_i64, - TCGv_i32, TCGv_i32); + TCGv_i32, TCGv_i64); static ldst_c_fn * const f_ldst[2][2][4] = { { { gen_helper_sve2p1_ld1bb_c, gen_helper_sve2p1_ld1hh_le_c, @@ -8055,9 +8136,10 @@ static bool gen_ldst_c(DisasContext *s, TCGv_i64 addr, int zd, int png, gen_helper_sve2p1_st1dd_be_c, } } }; - TCGv_i32 t_png, t_desc; + TCGv_i32 t_png; + TCGv_i64 t_desc; TCGv_ptr t_zd; - uint32_t desc, lg2_rstride = 0; + uint64_t desc, lg2_rstride = 0; bool be = s->be_data == MO_BE; assert(n == 2 || n == 4); @@ -8087,7 +8169,7 @@ static bool gen_ldst_c(DisasContext *s, TCGv_i64 addr, int zd, int png, desc = n == 2 ? 0 : 1; desc = desc | (lg2_rstride << 1); desc = make_svemte_desc(s, vec_full_reg_size(s), 1, esz, is_write, desc); - t_desc = tcg_constant_i32(desc); + t_desc = tcg_constant_i64(desc); t_png = tcg_temp_new_i32(); tcg_gen_ld16u_i32(t_png, tcg_env, diff --git a/target/arm/tcg/translate.c b/target/arm/tcg/translate.c index f7d6d8ce196e2..5f64fed220275 100644 --- a/target/arm/tcg/translate.c +++ b/target/arm/tcg/translate.c @@ -44,8 +44,6 @@ #define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7) #define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8) -/* These are TCG temporaries used only by the legacy iwMMXt decoder */ -static TCGv_i64 cpu_V0, cpu_V1, cpu_M0; /* These are TCG globals which alias CPUARMState fields */ static TCGv_i32 cpu_R[16]; TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF; @@ -1090,6 +1088,57 @@ void gen_exception_insn(DisasContext *s, target_long pc_diff, s->base.is_jmp = DISAS_NORETURN; } +TCGLabel *delay_exception_el(DisasContext *s, int excp, + uint32_t syn, uint32_t target_el) +{ + /* Use tcg_malloc for automatic release on longjmp out of translation. */ + DisasDelayException *e = tcg_malloc(sizeof(DisasDelayException)); + + memset(e, 0, sizeof(*e)); + + /* Save enough of the current state to satisfy gen_exception_insn. */ + e->pc_curr = s->pc_curr; + e->pc_save = s->pc_save; + if (!s->aarch64) { + e->condexec_cond = s->condexec_cond; + e->condexec_mask = s->condexec_mask; + } + + e->excp = excp; + e->syn = syn; + e->target_el = target_el; + + e->next = s->delay_excp_list; + s->delay_excp_list = e; + + e->lab = gen_new_label(); + return e->lab; +} + +TCGLabel *delay_exception(DisasContext *s, int excp, uint32_t syn) +{ + return delay_exception_el(s, excp, syn, 0); +} + +void emit_delayed_exceptions(DisasContext *s) +{ + for (DisasDelayException *e = s->delay_excp_list; e ; e = e->next) { + gen_set_label(e->lab); + + /* Restore the insn state to satisfy gen_exception_insn. */ + s->pc_curr = e->pc_curr; + s->pc_save = e->pc_save; + s->condexec_cond = e->condexec_cond; + s->condexec_mask = e->condexec_mask; + + if (e->target_el) { + gen_exception_insn_el(s, 0, e->excp, e->syn, e->target_el); + } else { + gen_exception_insn(s, 0, e->excp, e->syn); + } + } +} + static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn) { gen_set_condexec(s); @@ -1252,1263 +1301,6 @@ void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop) } } -#define ARM_CP_RW_BIT (1 << 20) - -static inline void iwmmxt_load_reg(TCGv_i64 var, int reg) -{ - tcg_gen_ld_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg])); -} - -static inline void iwmmxt_store_reg(TCGv_i64 var, int reg) -{ - tcg_gen_st_i64(var, tcg_env, offsetof(CPUARMState, iwmmxt.regs[reg])); -} - -static inline TCGv_i32 iwmmxt_load_creg(int reg) -{ - TCGv_i32 var = tcg_temp_new_i32(); - tcg_gen_ld_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); - return var; -} - -static inline void iwmmxt_store_creg(int reg, TCGv_i32 var) -{ - tcg_gen_st_i32(var, tcg_env, offsetof(CPUARMState, iwmmxt.cregs[reg])); -} - -static inline void gen_op_iwmmxt_movq_wRn_M0(int rn) -{ - iwmmxt_store_reg(cpu_M0, rn); -} - -static inline void gen_op_iwmmxt_movq_M0_wRn(int rn) -{ - iwmmxt_load_reg(cpu_M0, rn); -} - -static inline void gen_op_iwmmxt_orq_M0_wRn(int rn) -{ - iwmmxt_load_reg(cpu_V1, rn); - tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1); -} - -static inline void gen_op_iwmmxt_andq_M0_wRn(int rn) -{ - iwmmxt_load_reg(cpu_V1, rn); - tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1); -} - -static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn) -{ - iwmmxt_load_reg(cpu_V1, rn); - tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1); -} - -#define IWMMXT_OP(name) \ -static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \ -{ \ - iwmmxt_load_reg(cpu_V1, rn); \ - gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \ -} - -#define IWMMXT_OP_ENV(name) \ -static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \ -{ \ - iwmmxt_load_reg(cpu_V1, rn); \ - gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0, cpu_V1); \ -} - -#define IWMMXT_OP_ENV_SIZE(name) \ -IWMMXT_OP_ENV(name##b) \ -IWMMXT_OP_ENV(name##w) \ -IWMMXT_OP_ENV(name##l) - -#define IWMMXT_OP_ENV1(name) \ -static inline void gen_op_iwmmxt_##name##_M0(void) \ -{ \ - gen_helper_iwmmxt_##name(cpu_M0, tcg_env, cpu_M0); \ -} - -IWMMXT_OP(maddsq) -IWMMXT_OP(madduq) -IWMMXT_OP(sadb) -IWMMXT_OP(sadw) -IWMMXT_OP(mulslw) -IWMMXT_OP(mulshw) -IWMMXT_OP(mululw) -IWMMXT_OP(muluhw) -IWMMXT_OP(macsw) -IWMMXT_OP(macuw) - -IWMMXT_OP_ENV_SIZE(unpackl) -IWMMXT_OP_ENV_SIZE(unpackh) - -IWMMXT_OP_ENV1(unpacklub) -IWMMXT_OP_ENV1(unpackluw) -IWMMXT_OP_ENV1(unpacklul) -IWMMXT_OP_ENV1(unpackhub) -IWMMXT_OP_ENV1(unpackhuw) -IWMMXT_OP_ENV1(unpackhul) -IWMMXT_OP_ENV1(unpacklsb) -IWMMXT_OP_ENV1(unpacklsw) -IWMMXT_OP_ENV1(unpacklsl) -IWMMXT_OP_ENV1(unpackhsb) -IWMMXT_OP_ENV1(unpackhsw) -IWMMXT_OP_ENV1(unpackhsl) - -IWMMXT_OP_ENV_SIZE(cmpeq) -IWMMXT_OP_ENV_SIZE(cmpgtu) -IWMMXT_OP_ENV_SIZE(cmpgts) - -IWMMXT_OP_ENV_SIZE(mins) -IWMMXT_OP_ENV_SIZE(minu) -IWMMXT_OP_ENV_SIZE(maxs) -IWMMXT_OP_ENV_SIZE(maxu) - -IWMMXT_OP_ENV_SIZE(subn) -IWMMXT_OP_ENV_SIZE(addn) -IWMMXT_OP_ENV_SIZE(subu) -IWMMXT_OP_ENV_SIZE(addu) -IWMMXT_OP_ENV_SIZE(subs) -IWMMXT_OP_ENV_SIZE(adds) - -IWMMXT_OP_ENV(avgb0) -IWMMXT_OP_ENV(avgb1) -IWMMXT_OP_ENV(avgw0) -IWMMXT_OP_ENV(avgw1) - -IWMMXT_OP_ENV(packuw) -IWMMXT_OP_ENV(packul) -IWMMXT_OP_ENV(packuq) -IWMMXT_OP_ENV(packsw) -IWMMXT_OP_ENV(packsl) -IWMMXT_OP_ENV(packsq) - -static void gen_op_iwmmxt_set_mup(void) -{ - TCGv_i32 tmp; - tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]); - tcg_gen_ori_i32(tmp, tmp, 2); - store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); -} - -static void gen_op_iwmmxt_set_cup(void) -{ - TCGv_i32 tmp; - tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]); - tcg_gen_ori_i32(tmp, tmp, 1); - store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]); -} - -static void gen_op_iwmmxt_setpsr_nz(void) -{ - TCGv_i32 tmp = tcg_temp_new_i32(); - gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0); - store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]); -} - -static inline void gen_op_iwmmxt_addl_M0_wRn(int rn) -{ - iwmmxt_load_reg(cpu_V1, rn); - tcg_gen_ext32u_i64(cpu_V1, cpu_V1); - tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); -} - -static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn, - TCGv_i32 dest) -{ - int rd; - uint32_t offset; - TCGv_i32 tmp; - - rd = (insn >> 16) & 0xf; - tmp = load_reg(s, rd); - - offset = (insn & 0xff) << ((insn >> 7) & 2); - if (insn & (1 << 24)) { - /* Pre indexed */ - if (insn & (1 << 23)) - tcg_gen_addi_i32(tmp, tmp, offset); - else - tcg_gen_addi_i32(tmp, tmp, -offset); - tcg_gen_mov_i32(dest, tmp); - if (insn & (1 << 21)) { - store_reg(s, rd, tmp); - } - } else if (insn & (1 << 21)) { - /* Post indexed */ - tcg_gen_mov_i32(dest, tmp); - if (insn & (1 << 23)) - tcg_gen_addi_i32(tmp, tmp, offset); - else - tcg_gen_addi_i32(tmp, tmp, -offset); - store_reg(s, rd, tmp); - } else if (!(insn & (1 << 23))) - return 1; - return 0; -} - -static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest) -{ - int rd = (insn >> 0) & 0xf; - TCGv_i32 tmp; - - if (insn & (1 << 8)) { - if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) { - return 1; - } else { - tmp = iwmmxt_load_creg(rd); - } - } else { - tmp = tcg_temp_new_i32(); - iwmmxt_load_reg(cpu_V0, rd); - tcg_gen_extrl_i64_i32(tmp, cpu_V0); - } - tcg_gen_andi_i32(tmp, tmp, mask); - tcg_gen_mov_i32(dest, tmp); - return 0; -} - -/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred - (ie. an undefined instruction). */ -static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn) -{ - int rd, wrd; - int rdhi, rdlo, rd0, rd1, i; - TCGv_i32 addr; - TCGv_i32 tmp, tmp2, tmp3; - - if ((insn & 0x0e000e00) == 0x0c000000) { - if ((insn & 0x0fe00ff0) == 0x0c400000) { - wrd = insn & 0xf; - rdlo = (insn >> 12) & 0xf; - rdhi = (insn >> 16) & 0xf; - if (insn & ARM_CP_RW_BIT) { /* TMRRC */ - iwmmxt_load_reg(cpu_V0, wrd); - tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0); - tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0); - } else { /* TMCRR */ - tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); - iwmmxt_store_reg(cpu_V0, wrd); - gen_op_iwmmxt_set_mup(); - } - return 0; - } - - wrd = (insn >> 12) & 0xf; - addr = tcg_temp_new_i32(); - if (gen_iwmmxt_address(s, insn, addr)) { - return 1; - } - if (insn & ARM_CP_RW_BIT) { - if ((insn >> 28) == 0xf) { /* WLDRW wCx */ - tmp = tcg_temp_new_i32(); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - iwmmxt_store_creg(wrd, tmp); - } else { - i = 1; - if (insn & (1 << 8)) { - if (insn & (1 << 22)) { /* WLDRD */ - gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s)); - i = 0; - } else { /* WLDRW wRd */ - tmp = tcg_temp_new_i32(); - gen_aa32_ld32u(s, tmp, addr, get_mem_index(s)); - } - } else { - tmp = tcg_temp_new_i32(); - if (insn & (1 << 22)) { /* WLDRH */ - gen_aa32_ld16u(s, tmp, addr, get_mem_index(s)); - } else { /* WLDRB */ - gen_aa32_ld8u(s, tmp, addr, get_mem_index(s)); - } - } - if (i) { - tcg_gen_extu_i32_i64(cpu_M0, tmp); - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - } - } else { - if ((insn >> 28) == 0xf) { /* WSTRW wCx */ - tmp = iwmmxt_load_creg(wrd); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - } else { - gen_op_iwmmxt_movq_M0_wRn(wrd); - tmp = tcg_temp_new_i32(); - if (insn & (1 << 8)) { - if (insn & (1 << 22)) { /* WSTRD */ - gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s)); - } else { /* WSTRW wRd */ - tcg_gen_extrl_i64_i32(tmp, cpu_M0); - gen_aa32_st32(s, tmp, addr, get_mem_index(s)); - } - } else { - if (insn & (1 << 22)) { /* WSTRH */ - tcg_gen_extrl_i64_i32(tmp, cpu_M0); - gen_aa32_st16(s, tmp, addr, get_mem_index(s)); - } else { /* WSTRB */ - tcg_gen_extrl_i64_i32(tmp, cpu_M0); - gen_aa32_st8(s, tmp, addr, get_mem_index(s)); - } - } - } - } - return 0; - } - - if ((insn & 0x0f000000) != 0x0e000000) - return 1; - - switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) { - case 0x000: /* WOR */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - gen_op_iwmmxt_orq_M0_wRn(rd1); - gen_op_iwmmxt_setpsr_nz(); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x011: /* TMCR */ - if (insn & 0xf) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - switch (wrd) { - case ARM_IWMMXT_wCID: - case ARM_IWMMXT_wCASF: - break; - case ARM_IWMMXT_wCon: - gen_op_iwmmxt_set_cup(); - /* Fall through. */ - case ARM_IWMMXT_wCSSF: - tmp = iwmmxt_load_creg(wrd); - tmp2 = load_reg(s, rd); - tcg_gen_andc_i32(tmp, tmp, tmp2); - iwmmxt_store_creg(wrd, tmp); - break; - case ARM_IWMMXT_wCGR0: - case ARM_IWMMXT_wCGR1: - case ARM_IWMMXT_wCGR2: - case ARM_IWMMXT_wCGR3: - gen_op_iwmmxt_set_cup(); - tmp = load_reg(s, rd); - iwmmxt_store_creg(wrd, tmp); - break; - default: - return 1; - } - break; - case 0x100: /* WXOR */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - gen_op_iwmmxt_xorq_M0_wRn(rd1); - gen_op_iwmmxt_setpsr_nz(); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x111: /* TMRC */ - if (insn & 0xf) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - tmp = iwmmxt_load_creg(wrd); - store_reg(s, rd, tmp); - break; - case 0x300: /* WANDN */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tcg_gen_neg_i64(cpu_M0, cpu_M0); - gen_op_iwmmxt_andq_M0_wRn(rd1); - gen_op_iwmmxt_setpsr_nz(); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x200: /* WAND */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - gen_op_iwmmxt_andq_M0_wRn(rd1); - gen_op_iwmmxt_setpsr_nz(); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x810: case 0xa10: /* WMADD */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 0) & 0xf; - rd1 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 21)) - gen_op_iwmmxt_maddsq_M0_wRn(rd1); - else - gen_op_iwmmxt_madduq_M0_wRn(rd1); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_op_iwmmxt_unpacklb_M0_wRn(rd1); - break; - case 1: - gen_op_iwmmxt_unpacklw_M0_wRn(rd1); - break; - case 2: - gen_op_iwmmxt_unpackll_M0_wRn(rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_op_iwmmxt_unpackhb_M0_wRn(rd1); - break; - case 1: - gen_op_iwmmxt_unpackhw_M0_wRn(rd1); - break; - case 2: - gen_op_iwmmxt_unpackhl_M0_wRn(rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 22)) - gen_op_iwmmxt_sadw_M0_wRn(rd1); - else - gen_op_iwmmxt_sadb_M0_wRn(rd1); - if (!(insn & (1 << 20))) - gen_op_iwmmxt_addl_M0_wRn(wrd); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 21)) { - if (insn & (1 << 20)) - gen_op_iwmmxt_mulshw_M0_wRn(rd1); - else - gen_op_iwmmxt_mulslw_M0_wRn(rd1); - } else { - if (insn & (1 << 20)) - gen_op_iwmmxt_muluhw_M0_wRn(rd1); - else - gen_op_iwmmxt_mululw_M0_wRn(rd1); - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 21)) - gen_op_iwmmxt_macsw_M0_wRn(rd1); - else - gen_op_iwmmxt_macuw_M0_wRn(rd1); - if (!(insn & (1 << 20))) { - iwmmxt_load_reg(cpu_V1, wrd); - tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1); - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_op_iwmmxt_cmpeqb_M0_wRn(rd1); - break; - case 1: - gen_op_iwmmxt_cmpeqw_M0_wRn(rd1); - break; - case 2: - gen_op_iwmmxt_cmpeql_M0_wRn(rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - if (insn & (1 << 22)) { - if (insn & (1 << 20)) - gen_op_iwmmxt_avgw1_M0_wRn(rd1); - else - gen_op_iwmmxt_avgw0_M0_wRn(rd1); - } else { - if (insn & (1 << 20)) - gen_op_iwmmxt_avgb1_M0_wRn(rd1); - else - gen_op_iwmmxt_avgb0_M0_wRn(rd1); - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3)); - tcg_gen_andi_i32(tmp, tmp, 7); - iwmmxt_load_reg(cpu_V1, rd1); - gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */ - if (((insn >> 6) & 3) == 3) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - tmp = load_reg(s, rd); - gen_op_iwmmxt_movq_M0_wRn(wrd); - switch ((insn >> 6) & 3) { - case 0: - tmp2 = tcg_constant_i32(0xff); - tmp3 = tcg_constant_i32((insn & 7) << 3); - break; - case 1: - tmp2 = tcg_constant_i32(0xffff); - tmp3 = tcg_constant_i32((insn & 3) << 4); - break; - case 2: - tmp2 = tcg_constant_i32(0xffffffff); - tmp3 = tcg_constant_i32((insn & 1) << 5); - break; - default: - g_assert_not_reached(); - } - gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */ - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - if (rd == 15 || ((insn >> 22) & 3) == 3) - return 1; - gen_op_iwmmxt_movq_M0_wRn(wrd); - tmp = tcg_temp_new_i32(); - switch ((insn >> 22) & 3) { - case 0: - tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3); - tcg_gen_extrl_i64_i32(tmp, cpu_M0); - if (insn & 8) { - tcg_gen_ext8s_i32(tmp, tmp); - } else { - tcg_gen_andi_i32(tmp, tmp, 0xff); - } - break; - case 1: - tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4); - tcg_gen_extrl_i64_i32(tmp, cpu_M0); - if (insn & 8) { - tcg_gen_ext16s_i32(tmp, tmp); - } else { - tcg_gen_andi_i32(tmp, tmp, 0xffff); - } - break; - case 2: - tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5); - tcg_gen_extrl_i64_i32(tmp, cpu_M0); - break; - } - store_reg(s, rd, tmp); - break; - case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */ - if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3) - return 1; - tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); - switch ((insn >> 22) & 3) { - case 0: - tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0); - break; - case 1: - tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4); - break; - case 2: - tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12); - break; - } - tcg_gen_shli_i32(tmp, tmp, 28); - gen_set_nzcv(tmp); - break; - case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */ - if (((insn >> 6) & 3) == 3) - return 1; - rd = (insn >> 12) & 0xf; - wrd = (insn >> 16) & 0xf; - tmp = load_reg(s, rd); - switch ((insn >> 6) & 3) { - case 0: - gen_helper_iwmmxt_bcstb(cpu_M0, tmp); - break; - case 1: - gen_helper_iwmmxt_bcstw(cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_bcstl(cpu_M0, tmp); - break; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */ - if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) - return 1; - tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); - tmp2 = tcg_temp_new_i32(); - tcg_gen_mov_i32(tmp2, tmp); - switch ((insn >> 22) & 3) { - case 0: - for (i = 0; i < 7; i ++) { - tcg_gen_shli_i32(tmp2, tmp2, 4); - tcg_gen_and_i32(tmp, tmp, tmp2); - } - break; - case 1: - for (i = 0; i < 3; i ++) { - tcg_gen_shli_i32(tmp2, tmp2, 8); - tcg_gen_and_i32(tmp, tmp, tmp2); - } - break; - case 2: - tcg_gen_shli_i32(tmp2, tmp2, 16); - tcg_gen_and_i32(tmp, tmp, tmp2); - break; - } - gen_set_nzcv(tmp); - break; - case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */ - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0); - break; - case 1: - gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0); - break; - case 2: - gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */ - if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3) - return 1; - tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF); - tmp2 = tcg_temp_new_i32(); - tcg_gen_mov_i32(tmp2, tmp); - switch ((insn >> 22) & 3) { - case 0: - for (i = 0; i < 7; i ++) { - tcg_gen_shli_i32(tmp2, tmp2, 4); - tcg_gen_or_i32(tmp, tmp, tmp2); - } - break; - case 1: - for (i = 0; i < 3; i ++) { - tcg_gen_shli_i32(tmp2, tmp2, 8); - tcg_gen_or_i32(tmp, tmp, tmp2); - } - break; - case 2: - tcg_gen_shli_i32(tmp2, tmp2, 16); - tcg_gen_or_i32(tmp, tmp, tmp2); - break; - } - gen_set_nzcv(tmp); - break; - case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */ - rd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3) - return 1; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - switch ((insn >> 22) & 3) { - case 0: - gen_helper_iwmmxt_msbb(tmp, cpu_M0); - break; - case 1: - gen_helper_iwmmxt_msbw(tmp, cpu_M0); - break; - case 2: - gen_helper_iwmmxt_msbl(tmp, cpu_M0); - break; - } - store_reg(s, rd, tmp); - break; - case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */ - case 0x906: case 0xb06: case 0xd06: case 0xf06: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1); - else - gen_op_iwmmxt_cmpgtub_M0_wRn(rd1); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1); - else - gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1); - else - gen_op_iwmmxt_cmpgtul_M0_wRn(rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */ - case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpacklsb_M0(); - else - gen_op_iwmmxt_unpacklub_M0(); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpacklsw_M0(); - else - gen_op_iwmmxt_unpackluw_M0(); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpacklsl_M0(); - else - gen_op_iwmmxt_unpacklul_M0(); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */ - case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpackhsb_M0(); - else - gen_op_iwmmxt_unpackhub_M0(); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpackhsw_M0(); - else - gen_op_iwmmxt_unpackhuw_M0(); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_unpackhsl_M0(); - else - gen_op_iwmmxt_unpackhul_M0(); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */ - case 0x214: case 0x614: case 0xa14: case 0xe14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - if (gen_iwmmxt_shift(insn, 0xff, tmp)) { - return 1; - } - switch ((insn >> 22) & 3) { - case 1: - gen_helper_iwmmxt_srlw(cpu_M0, tcg_env, cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_srll(cpu_M0, tcg_env, cpu_M0, tmp); - break; - case 3: - gen_helper_iwmmxt_srlq(cpu_M0, tcg_env, cpu_M0, tmp); - break; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */ - case 0x014: case 0x414: case 0x814: case 0xc14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - if (gen_iwmmxt_shift(insn, 0xff, tmp)) { - return 1; - } - switch ((insn >> 22) & 3) { - case 1: - gen_helper_iwmmxt_sraw(cpu_M0, tcg_env, cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_sral(cpu_M0, tcg_env, cpu_M0, tmp); - break; - case 3: - gen_helper_iwmmxt_sraq(cpu_M0, tcg_env, cpu_M0, tmp); - break; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */ - case 0x114: case 0x514: case 0x914: case 0xd14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - if (gen_iwmmxt_shift(insn, 0xff, tmp)) { - return 1; - } - switch ((insn >> 22) & 3) { - case 1: - gen_helper_iwmmxt_sllw(cpu_M0, tcg_env, cpu_M0, tmp); - break; - case 2: - gen_helper_iwmmxt_slll(cpu_M0, tcg_env, cpu_M0, tmp); - break; - case 3: - gen_helper_iwmmxt_sllq(cpu_M0, tcg_env, cpu_M0, tmp); - break; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */ - case 0x314: case 0x714: case 0xb14: case 0xf14: - if (((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_temp_new_i32(); - switch ((insn >> 22) & 3) { - case 1: - if (gen_iwmmxt_shift(insn, 0xf, tmp)) { - return 1; - } - gen_helper_iwmmxt_rorw(cpu_M0, tcg_env, cpu_M0, tmp); - break; - case 2: - if (gen_iwmmxt_shift(insn, 0x1f, tmp)) { - return 1; - } - gen_helper_iwmmxt_rorl(cpu_M0, tcg_env, cpu_M0, tmp); - break; - case 3: - if (gen_iwmmxt_shift(insn, 0x3f, tmp)) { - return 1; - } - gen_helper_iwmmxt_rorq(cpu_M0, tcg_env, cpu_M0, tmp); - break; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */ - case 0x916: case 0xb16: case 0xd16: case 0xf16: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_minsb_M0_wRn(rd1); - else - gen_op_iwmmxt_minub_M0_wRn(rd1); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_minsw_M0_wRn(rd1); - else - gen_op_iwmmxt_minuw_M0_wRn(rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_minsl_M0_wRn(rd1); - else - gen_op_iwmmxt_minul_M0_wRn(rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */ - case 0x816: case 0xa16: case 0xc16: case 0xe16: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 0: - if (insn & (1 << 21)) - gen_op_iwmmxt_maxsb_M0_wRn(rd1); - else - gen_op_iwmmxt_maxub_M0_wRn(rd1); - break; - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_maxsw_M0_wRn(rd1); - else - gen_op_iwmmxt_maxuw_M0_wRn(rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_maxsl_M0_wRn(rd1); - else - gen_op_iwmmxt_maxul_M0_wRn(rd1); - break; - case 3: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */ - case 0x402: case 0x502: case 0x602: case 0x702: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - iwmmxt_load_reg(cpu_V1, rd1); - gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, - tcg_constant_i32((insn >> 20) & 3)); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */ - case 0x41a: case 0x51a: case 0x61a: case 0x71a: - case 0x81a: case 0x91a: case 0xa1a: case 0xb1a: - case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 20) & 0xf) { - case 0x0: - gen_op_iwmmxt_subnb_M0_wRn(rd1); - break; - case 0x1: - gen_op_iwmmxt_subub_M0_wRn(rd1); - break; - case 0x3: - gen_op_iwmmxt_subsb_M0_wRn(rd1); - break; - case 0x4: - gen_op_iwmmxt_subnw_M0_wRn(rd1); - break; - case 0x5: - gen_op_iwmmxt_subuw_M0_wRn(rd1); - break; - case 0x7: - gen_op_iwmmxt_subsw_M0_wRn(rd1); - break; - case 0x8: - gen_op_iwmmxt_subnl_M0_wRn(rd1); - break; - case 0x9: - gen_op_iwmmxt_subul_M0_wRn(rd1); - break; - case 0xb: - gen_op_iwmmxt_subsl_M0_wRn(rd1); - break; - default: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */ - case 0x41e: case 0x51e: case 0x61e: case 0x71e: - case 0x81e: case 0x91e: case 0xa1e: case 0xb1e: - case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - tmp = tcg_constant_i32(((insn >> 16) & 0xf0) | (insn & 0x0f)); - gen_helper_iwmmxt_shufh(cpu_M0, tcg_env, cpu_M0, tmp); - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */ - case 0x418: case 0x518: case 0x618: case 0x718: - case 0x818: case 0x918: case 0xa18: case 0xb18: - case 0xc18: case 0xd18: case 0xe18: case 0xf18: - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 20) & 0xf) { - case 0x0: - gen_op_iwmmxt_addnb_M0_wRn(rd1); - break; - case 0x1: - gen_op_iwmmxt_addub_M0_wRn(rd1); - break; - case 0x3: - gen_op_iwmmxt_addsb_M0_wRn(rd1); - break; - case 0x4: - gen_op_iwmmxt_addnw_M0_wRn(rd1); - break; - case 0x5: - gen_op_iwmmxt_adduw_M0_wRn(rd1); - break; - case 0x7: - gen_op_iwmmxt_addsw_M0_wRn(rd1); - break; - case 0x8: - gen_op_iwmmxt_addnl_M0_wRn(rd1); - break; - case 0x9: - gen_op_iwmmxt_addul_M0_wRn(rd1); - break; - case 0xb: - gen_op_iwmmxt_addsl_M0_wRn(rd1); - break; - default: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */ - case 0x408: case 0x508: case 0x608: case 0x708: - case 0x808: case 0x908: case 0xa08: case 0xb08: - case 0xc08: case 0xd08: case 0xe08: case 0xf08: - if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0) - return 1; - wrd = (insn >> 12) & 0xf; - rd0 = (insn >> 16) & 0xf; - rd1 = (insn >> 0) & 0xf; - gen_op_iwmmxt_movq_M0_wRn(rd0); - switch ((insn >> 22) & 3) { - case 1: - if (insn & (1 << 21)) - gen_op_iwmmxt_packsw_M0_wRn(rd1); - else - gen_op_iwmmxt_packuw_M0_wRn(rd1); - break; - case 2: - if (insn & (1 << 21)) - gen_op_iwmmxt_packsl_M0_wRn(rd1); - else - gen_op_iwmmxt_packul_M0_wRn(rd1); - break; - case 3: - if (insn & (1 << 21)) - gen_op_iwmmxt_packsq_M0_wRn(rd1); - else - gen_op_iwmmxt_packuq_M0_wRn(rd1); - break; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - gen_op_iwmmxt_set_cup(); - break; - case 0x201: case 0x203: case 0x205: case 0x207: - case 0x209: case 0x20b: case 0x20d: case 0x20f: - case 0x211: case 0x213: case 0x215: case 0x217: - case 0x219: case 0x21b: case 0x21d: case 0x21f: - wrd = (insn >> 5) & 0xf; - rd0 = (insn >> 12) & 0xf; - rd1 = (insn >> 0) & 0xf; - if (rd0 == 0xf || rd1 == 0xf) - return 1; - gen_op_iwmmxt_movq_M0_wRn(wrd); - tmp = load_reg(s, rd0); - tmp2 = load_reg(s, rd1); - switch ((insn >> 16) & 0xf) { - case 0x0: /* TMIA */ - gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2); - break; - case 0x8: /* TMIAPH */ - gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2); - break; - case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */ - if (insn & (1 << 16)) - tcg_gen_shri_i32(tmp, tmp, 16); - if (insn & (1 << 17)) - tcg_gen_shri_i32(tmp2, tmp2, 16); - gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); - break; - default: - return 1; - } - gen_op_iwmmxt_movq_wRn_M0(wrd); - gen_op_iwmmxt_set_mup(); - break; - default: - return 1; - } - - return 0; -} - -/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred - (ie. an undefined instruction). */ -static int disas_dsp_insn(DisasContext *s, uint32_t insn) -{ - int acc, rd0, rd1, rdhi, rdlo; - TCGv_i32 tmp, tmp2; - - if ((insn & 0x0ff00f10) == 0x0e200010) { - /* Multiply with Internal Accumulate Format */ - rd0 = (insn >> 12) & 0xf; - rd1 = insn & 0xf; - acc = (insn >> 5) & 7; - - if (acc != 0) - return 1; - - tmp = load_reg(s, rd0); - tmp2 = load_reg(s, rd1); - switch ((insn >> 16) & 0xf) { - case 0x0: /* MIA */ - gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2); - break; - case 0x8: /* MIAPH */ - gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2); - break; - case 0xc: /* MIABB */ - case 0xd: /* MIABT */ - case 0xe: /* MIATB */ - case 0xf: /* MIATT */ - if (insn & (1 << 16)) - tcg_gen_shri_i32(tmp, tmp, 16); - if (insn & (1 << 17)) - tcg_gen_shri_i32(tmp2, tmp2, 16); - gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2); - break; - default: - return 1; - } - - gen_op_iwmmxt_movq_wRn_M0(acc); - return 0; - } - - if ((insn & 0x0fe00ff8) == 0x0c400000) { - /* Internal Accumulator Access Format */ - rdhi = (insn >> 16) & 0xf; - rdlo = (insn >> 12) & 0xf; - acc = insn & 7; - - if (acc != 0) - return 1; - - if (insn & ARM_CP_RW_BIT) { /* MRA */ - iwmmxt_load_reg(cpu_V0, acc); - tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0); - tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0); - tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1); - } else { /* MAR */ - tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]); - iwmmxt_store_reg(cpu_V0, acc); - } - return 0; - } - - return 1; -} - static void gen_goto_ptr(void) { tcg_gen_lookup_and_goto_ptr(); @@ -2518,7 +1310,7 @@ static void gen_goto_ptr(void) * cpu_loop_exec. Any live exit_requests will be processed as we * enter the next TB. */ -static void gen_goto_tb(DisasContext *s, int n, target_long diff) +static void gen_goto_tb(DisasContext *s, unsigned tb_slot_idx, target_long diff) { if (translator_use_goto_tb(&s->base, s->pc_curr + diff)) { /* @@ -2531,12 +1323,12 @@ static void gen_goto_tb(DisasContext *s, int n, target_long diff) */ if (tb_cflags(s->base.tb) & CF_PCREL) { gen_update_pc(s, diff); - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); } else { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); gen_update_pc(s, diff); } - tcg_gen_exit_tb(s->base.tb, n); + tcg_gen_exit_tb(s->base.tb, tb_slot_idx); } else { gen_update_pc(s, diff); gen_goto_ptr(); @@ -2982,21 +1774,11 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, if (maskbit != 4 && maskbit != 14) { /* T4 and T14 are RES0 so never cause traps */ - TCGv_i32 t; - DisasLabel over = gen_disas_label(s); - - t = load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2)); - tcg_gen_andi_i32(t, t, 1u << maskbit); - tcg_gen_brcondi_i32(TCG_COND_EQ, t, 0, over.label); + TCGLabel *fail = delay_exception_el(s, EXCP_UDEF, syndrome, 2); + TCGv_i32 t = + load_cpu_offset(offsetoflow32(CPUARMState, cp15.hstr_el2)); - gen_exception_insn_el(s, 0, EXCP_UDEF, syndrome, 2); - /* - * gen_exception_insn() will set is_jmp to DISAS_NORETURN, - * but since we're conditionally branching over it, we want - * to assume continue-to-next-instruction. - */ - s->base.is_jmp = DISAS_NEXT; - set_disas_label(s, over); + tcg_gen_brcondi_i32(TCG_COND_TSTNE, t, 1u << maskbit, fail); } } @@ -3048,13 +1830,10 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, } if ((s->hstr_active && s->current_el == 0) || ri->accessfn || - (ri->fgt && s->fgt_active) || - (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) { + (ri->fgt && s->fgt_active)) { /* * Emit code to perform further access permissions checks at * runtime; this may result in an exception. - * Note that on XScale all cp0..c13 registers do an access check - * call in order to handle c15_cpar. */ gen_set_condexec(s); gen_update_pc(s, 0); @@ -3192,24 +1971,6 @@ static void do_coproc_insn(DisasContext *s, int cpnum, int is64, } } -/* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */ -static void disas_xscale_insn(DisasContext *s, uint32_t insn) -{ - int cpnum = (insn >> 8) & 0xf; - - if (extract32(s->c15_cpar, cpnum, 1) == 0) { - unallocated_encoding(s); - } else if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { - if (disas_iwmmxt_insn(s, insn)) { - unallocated_encoding(s); - } - } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) { - if (disas_dsp_insn(s, insn)) { - unallocated_encoding(s); - } - } -} - /* Store a 64-bit value to a register pair. Clobbers val. */ static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val) { @@ -3569,14 +2330,7 @@ static bool valid_cp(DisasContext *s, int cp) * only cp14 and cp15 are valid, and other values aren't considered * to be in the coprocessor-instruction space at all. v8M still * permits coprocessors 0..7. - * For XScale, we must not decode the XScale cp0, cp1 space as - * a standard coprocessor insn, because we want to fall through to - * the legacy disas_xscale_insn() decoder after decodetree is done. */ - if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cp == 0 || cp == 1)) { - return false; - } - if (arm_dc_feature(s, ARM_FEATURE_V8) && !arm_dc_feature(s, ARM_FEATURE_M)) { return cp >= 14; @@ -6844,11 +5598,10 @@ static bool trans_LE(DisasContext *s, arg_LE *a) if (!a->tp && dc_isar_feature(aa32_mve, s) && fpu_active) { /* Need to do a runtime check for LTPSIZE != 4 */ - DisasLabel skipexc = gen_disas_label(s); + TCGLabel *fail = delay_exception(s, EXCP_INVSTATE, syn_uncategorized()); + tmp = load_cpu_field(v7m.ltpsize); - tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 4, skipexc.label); - gen_exception_insn(s, 0, EXCP_INVSTATE, syn_uncategorized()); - set_disas_label(s, skipexc); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 4, fail); } if (a->f) { @@ -7343,18 +6096,6 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) disas_neon_shared(s, insn)) { return; } - /* fall back to legacy decoder */ - - if ((insn & 0x0e000f00) == 0x0c000100) { - if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) { - /* iWMMXt register transfer. */ - if (extract32(s->c15_cpar, 1, 1)) { - if (!disas_iwmmxt_insn(s, insn)) { - return; - } - } - } - } goto illegal_op; } if (cond != 0xe) { @@ -7368,16 +6109,7 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) disas_vfp(s, insn)) { return; } - /* fall back to legacy decoder */ - /* TODO: convert xscale/iwmmxt decoder to decodetree ?? */ - if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) { - if (((insn & 0x0c000e00) == 0x0c000000) - && ((insn & 0x03000000) != 0x03000000)) { - /* Coprocessor insn, coprocessor 0 or 1 */ - disas_xscale_insn(s, insn); - return; - } - } + /* We didn't match anything in the decoder: UNDEF */ illegal_op: unallocated_encoding(s); @@ -7606,12 +6338,8 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) dc->hstr_active = EX_TBFLAG_A32(tb_flags, HSTR_ACTIVE); dc->ns = EX_TBFLAG_A32(tb_flags, NS); dc->vfp_enabled = EX_TBFLAG_A32(tb_flags, VFPEN); - if (arm_feature(env, ARM_FEATURE_XSCALE)) { - dc->c15_cpar = EX_TBFLAG_A32(tb_flags, XSCALE_CPAR); - } else { - dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN); - dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE); - } + dc->vec_len = EX_TBFLAG_A32(tb_flags, VECLEN); + dc->vec_stride = EX_TBFLAG_A32(tb_flags, VECSTRIDE); dc->sme_trap_nonstreaming = EX_TBFLAG_A32(tb_flags, SME_TRAP_NONSTREAMING); } @@ -7651,10 +6379,6 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; dc->base.max_insns = MIN(dc->base.max_insns, bound); } - - cpu_V0 = tcg_temp_new_i64(); - cpu_V1 = tcg_temp_new_i64(); - cpu_M0 = tcg_temp_new_i64(); } static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) @@ -8107,6 +6831,8 @@ static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) gen_goto_tb(dc, 1, curr_insn_len(dc)); } } + + emit_delayed_exceptions(dc); } static const TranslatorOps arm_translator_ops = { diff --git a/target/arm/tcg/translate.h b/target/arm/tcg/translate.h index f974996f3f857..9a85ea74dbb6a 100644 --- a/target/arm/tcg/translate.h +++ b/target/arm/tcg/translate.h @@ -21,9 +21,25 @@ typedef struct DisasLabel { target_ulong pc_save; } DisasLabel; +/* + * Emit an exception call out of line. + */ +typedef struct DisasDelayException { + struct DisasDelayException *next; + TCGLabel *lab; + target_long pc_curr; + target_long pc_save; + int condexec_mask; + int condexec_cond; + uint32_t excp; + uint32_t syn; + uint32_t target_el; +} DisasDelayException; + typedef struct DisasContext { DisasContextBase base; const ARMISARegisters *isar; + DisasDelayException *delay_excp_list; /* The address of the current instruction being translated. */ target_ulong pc_curr; @@ -150,6 +166,8 @@ typedef struct DisasContext { bool trap_eret; /* True if FEAT_LSE2 SCTLR_ELx.nAA is set */ bool naa; + /* True if HCR_EL2.E2H is set */ + bool e2h; /* True if FEAT_NV HCR_EL2.NV is enabled */ bool nv; /* True if NV enabled and HCR_EL2.NV1 is set */ @@ -164,6 +182,12 @@ typedef struct DisasContext { bool fpcr_ah; /* True if FPCR.NEP is 1 (FEAT_AFP scalar upper-element result handling) */ bool fpcr_nep; + /* True if GCSEnabled. */ + bool gcs_en; + /* True if GCSReturnValueCheckEnabled. */ + bool gcs_rvcen; + /* GCSSTR exception EL or 0 if enabled */ + uint8_t gcsstr_el; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. @@ -175,8 +199,6 @@ typedef struct DisasContext { uint8_t gm_blocksize; /* True if the current insn_start has been updated. */ bool insn_start_updated; - /* Bottom two bits of XScale c15_cpar coprocessor access control reg */ - int c15_cpar; /* Offset from VNCR_EL2 when FEAT_NV2 redirects this reg to memory */ uint32_t nv2_redirect_offset; } DisasContext; @@ -359,6 +381,10 @@ void gen_exception_insn_el(DisasContext *s, target_long pc_diff, int excp, uint32_t syn, uint32_t target_el); void gen_exception_insn(DisasContext *s, target_long pc_diff, int excp, uint32_t syn); +TCGLabel *delay_exception_el(DisasContext *s, int excp, + uint32_t syn, uint32_t target_el); +TCGLabel *delay_exception(DisasContext *s, int excp, uint32_t syn); +void emit_delayed_exceptions(DisasContext *s); /* Return state of Alternate Half-precision flag, caller frees result */ static inline TCGv_i32 get_ahp_flag(void) @@ -372,27 +398,27 @@ static inline TCGv_i32 get_ahp_flag(void) } /* Set bits within PSTATE. */ -static inline void set_pstate_bits(uint32_t bits) +static inline void set_pstate_bits(uint64_t bits) { - TCGv_i32 p = tcg_temp_new_i32(); + TCGv_i64 p = tcg_temp_new_i64(); tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); - tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate)); - tcg_gen_ori_i32(p, p, bits); - tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate)); + tcg_gen_ld_i64(p, tcg_env, offsetof(CPUARMState, pstate)); + tcg_gen_ori_i64(p, p, bits); + tcg_gen_st_i64(p, tcg_env, offsetof(CPUARMState, pstate)); } /* Clear bits within PSTATE. */ -static inline void clear_pstate_bits(uint32_t bits) +static inline void clear_pstate_bits(uint64_t bits) { - TCGv_i32 p = tcg_temp_new_i32(); + TCGv_i64 p = tcg_temp_new_i64(); tcg_debug_assert(!(bits & CACHED_PSTATE_BITS)); - tcg_gen_ld_i32(p, tcg_env, offsetof(CPUARMState, pstate)); - tcg_gen_andi_i32(p, p, ~bits); - tcg_gen_st_i32(p, tcg_env, offsetof(CPUARMState, pstate)); + tcg_gen_ld_i64(p, tcg_env, offsetof(CPUARMState, pstate)); + tcg_gen_andi_i64(p, p, ~bits); + tcg_gen_st_i64(p, tcg_env, offsetof(CPUARMState, pstate)); } /* If the singlestep state is Active-not-pending, advance to Active-pending. */ diff --git a/target/arm/tcg/vec_helper.c b/target/arm/tcg/vec_helper.c index bae6165b505c7..33a136b90a61e 100644 --- a/target/arm/tcg/vec_helper.c +++ b/target/arm/tcg/vec_helper.c @@ -1467,16 +1467,19 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, \ clear_tail(d, oprsz, simd_maxsz(desc)); \ } +DO_3OP(gvec_fadd_b16, bfloat16_add, float16) DO_3OP(gvec_fadd_h, float16_add, float16) DO_3OP(gvec_fadd_s, float32_add, float32) DO_3OP(gvec_fadd_d, float64_add, float64) DO_3OP(gvec_bfadd, bfloat16_add, bfloat16) +DO_3OP(gvec_fsub_b16, bfloat16_sub, float16) DO_3OP(gvec_fsub_h, float16_sub, float16) DO_3OP(gvec_fsub_s, float32_sub, float32) DO_3OP(gvec_fsub_d, float64_sub, float64) DO_3OP(gvec_bfsub, bfloat16_sub, bfloat16) +DO_3OP(gvec_fmul_b16, bfloat16_mul, float16) DO_3OP(gvec_fmul_h, float16_mul, float16) DO_3OP(gvec_fmul_s, float32_mul, float32) DO_3OP(gvec_fmul_d, float64_mul, float64) @@ -1782,6 +1785,7 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, \ #define nop(N, M, S) (M) +DO_FMUL_IDX(gvec_fmul_idx_b16, nop, bfloat16_mul, float16, H2) DO_FMUL_IDX(gvec_fmul_idx_h, nop, float16_mul, float16, H2) DO_FMUL_IDX(gvec_fmul_idx_s, nop, float32_mul, float32, H4) DO_FMUL_IDX(gvec_fmul_idx_d, nop, float64_mul, float64, H8) diff --git a/target/arm/trace-events b/target/arm/trace-events index 4438dce7becc2..72a2c7d0969d4 100644 --- a/target/arm/trace-events +++ b/target/arm/trace-events @@ -13,3 +13,13 @@ arm_gt_update_irq(int timer, int irqstate) "gt_update_irq: timer %d irqstate %d" # kvm.c kvm_arm_fixup_msi_route(uint64_t iova, uint64_t gpa) "MSI iova = 0x%"PRIx64" is translated into 0x%"PRIx64 + +# cpu.c +arm_cpu_reset(uint64_t mp_aff) "cpu %" PRIu64 +arm_emulate_firmware_reset(uint64_t mp_aff, unsigned target_el) "cpu %" PRIu64 " @EL%u" + +# arm-powerctl.c +arm_powerctl_set_cpu_on(uint64_t mp_aff, unsigned target_el, const char *mode, uint64_t entry, uint64_t context_id) "cpu %" PRIu64 " (EL %u, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 +arm_powerctl_set_cpu_on_and_reset(uint64_t mp_aff) "cpu %" PRIu64 +arm_powerctl_set_cpu_off(uint64_t mp_aff) "cpu %" PRIu64 +arm_powerctl_reset_cpu(uint64_t mp_aff) "cpu %" PRIu64 diff --git a/target/avr/cpu.c b/target/avr/cpu.c index 6995de6a12b7e..a6df71d020574 100644 --- a/target/avr/cpu.c +++ b/target/avr/cpu.c @@ -45,7 +45,7 @@ static vaddr avr_cpu_get_pc(CPUState *cs) static bool avr_cpu_has_work(CPUState *cs) { - return (cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_RESET)) + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_RESET) && cpu_interrupts_enabled(cpu_env(cs)); } diff --git a/target/avr/helper.c b/target/avr/helper.c index b9cd6d5ef278b..4b29ab35263e1 100644 --- a/target/avr/helper.c +++ b/target/avr/helper.c @@ -47,7 +47,7 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) cs->exception_index = EXCP_RESET; avr_cpu_do_interrupt(cs); - cs->interrupt_request &= ~CPU_INTERRUPT_RESET; + cpu_reset_interrupt(cs, CPU_INTERRUPT_RESET); return true; } } @@ -59,7 +59,7 @@ bool avr_cpu_exec_interrupt(CPUState *cs, int interrupt_request) env->intsrc &= env->intsrc - 1; /* clear the interrupt */ if (!env->intsrc) { - cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } return true; } diff --git a/target/avr/translate.c b/target/avr/translate.c index 804b0b21dbd56..ef6f655a458b7 100644 --- a/target/avr/translate.c +++ b/target/avr/translate.c @@ -981,14 +981,15 @@ static void gen_pop_ret(DisasContext *ctx, TCGv ret) } } -static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, + target_ulong dest) { const TranslationBlock *tb = ctx->base.tb; if (translator_use_goto_tb(&ctx->base, dest)) { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); tcg_gen_movi_i32(cpu_pc, dest); - tcg_gen_exit_tb(tb, n); + tcg_gen_exit_tb(tb, tb_slot_idx); } else { tcg_gen_movi_i32(cpu_pc, dest); tcg_gen_lookup_and_goto_ptr(); diff --git a/target/hexagon/decode.c b/target/hexagon/decode.c index 23deba2426f84..b5ece604505b6 100644 --- a/target/hexagon/decode.c +++ b/target/hexagon/decode.c @@ -236,9 +236,9 @@ static void decode_set_insn_attr_fields(Packet *pkt) if (GET_ATTRIB(opcode, A_SCALAR_STORE) && !GET_ATTRIB(opcode, A_MEMSIZE_0B)) { if (pkt->insn[i].slot == 0) { - pkt->pkt_has_store_s0 = true; + pkt->pkt_has_scalar_store_s0 = true; } else { - pkt->pkt_has_store_s1 = true; + pkt->pkt_has_scalar_store_s1 = true; } } } diff --git a/target/hexagon/gen_helper_funcs.py b/target/hexagon/gen_helper_funcs.py index c1f806ac4b25b..a9c0e27a801ff 100755 --- a/target/hexagon/gen_helper_funcs.py +++ b/target/hexagon/gen_helper_funcs.py @@ -69,7 +69,7 @@ def gen_helper_function(f, tag, tagregs, tagimms): if hex_common.need_slot(tag): if "A_LOAD" in hex_common.attribdict[tag]: f.write(hex_common.code_fmt(f"""\ - bool pkt_has_store_s1 = slotval & 0x1; + bool pkt_has_scalar_store_s1 = slotval & 0x1; """)) f.write(hex_common.code_fmt(f"""\ uint32_t slot = slotval >> 1; diff --git a/target/hexagon/genptr.c b/target/hexagon/genptr.c index 08fc5413de79b..cecaece4ae5f7 100644 --- a/target/hexagon/genptr.c +++ b/target/hexagon/genptr.c @@ -395,7 +395,8 @@ static inline void gen_store_conditional8(DisasContext *ctx, #ifndef CONFIG_HEXAGON_IDEF_PARSER static TCGv gen_slotval(DisasContext *ctx) { - int slotval = (ctx->pkt->pkt_has_store_s1 & 1) | (ctx->insn->slot << 1); + int slotval = + (ctx->pkt->pkt_has_scalar_store_s1 & 1) | (ctx->insn->slot << 1); return tcg_constant_tl(slotval); } #endif diff --git a/target/hexagon/hex_common.py b/target/hexagon/hex_common.py index 758e5fd12dfed..6803908718179 100755 --- a/target/hexagon/hex_common.py +++ b/target/hexagon/hex_common.py @@ -350,6 +350,7 @@ def helper_arg(self): f"{self.helper_arg_type()} {self.helper_arg_name()}" ) + # # Every register is either Single or Pair or Hvx # @@ -1070,11 +1071,22 @@ def init_registers(): for reg in new_regs: new_registers[f"{reg.regtype}{reg.regid}"] = reg -def get_register(tag, regtype, regid): - if f"{regtype}{regid}V" in semdict[tag]: - return registers[f"{regtype}{regid}"] - else: - return new_registers[f"{regtype}{regid}"] +def is_new_reg(tag, regid): + if regid[0] in "NO": + return True + return regid[0] == "P" and \ + f"{regid}N" in semdict[tag] and \ + f"{regid}V" not in semdict[tag] + +def get_register(tag, regtype, regid, subtype=""): + regid = f"{regtype}{regid}" + is_new = is_new_reg(tag, regid) + try: + reg = new_registers[regid] if is_new else registers[regid] + except KeyError: + raise Exception(f"Unknown {'new ' if is_new else ''}register {regid}" +\ + f"from '{tag}' with syntax '{semdict[tag]}'") from None + return reg def helper_ret_type(tag, regs): ## If there is a scalar result, it is the return type diff --git a/target/hexagon/idef-parser/README.rst b/target/hexagon/idef-parser/README.rst index 7199177ee33e6..235e3debee3c3 100644 --- a/target/hexagon/idef-parser/README.rst +++ b/target/hexagon/idef-parser/README.rst @@ -637,7 +637,7 @@ tinycode for the Hexagon ``add`` instruction :: ---- 00021094 - mov_i32 pkt_has_store_s1,$0x0 + mov_i32 pkt_has_scalar_store_s1,$0x0 add_i32 tmp0,r2,r2 mov_i32 loc2,tmp0 mov_i32 new_r1,loc2 diff --git a/target/hexagon/idef-parser/parser-helpers.c b/target/hexagon/idef-parser/parser-helpers.c index 542af8d0a65b3..1dc52b4e02990 100644 --- a/target/hexagon/idef-parser/parser-helpers.c +++ b/target/hexagon/idef-parser/parser-helpers.c @@ -1725,7 +1725,7 @@ void gen_cancel(Context *c, YYLTYPE *locp) void gen_load_cancel(Context *c, YYLTYPE *locp) { - OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n"); + OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_scalar_store_s1) {\n"); OUT(c, locp, "ctx->s1_store_processed = false;\n"); OUT(c, locp, "process_store(ctx, 1);\n"); OUT(c, locp, "}\n"); @@ -1750,7 +1750,7 @@ void gen_load(Context *c, YYLTYPE *locp, HexValue *width, /* Lookup the effective address EA */ find_variable(c, locp, ea, ea); - OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_store_s1) {\n"); + OUT(c, locp, "if (insn->slot == 0 && pkt->pkt_has_scalar_store_s1) {\n"); OUT(c, locp, "probe_noshuf_load(", ea, ", ", width, ", ctx->mem_idx);\n"); OUT(c, locp, "process_store(ctx, 1);\n"); OUT(c, locp, "}\n"); diff --git a/target/hexagon/idef-parser/prepare b/target/hexagon/idef-parser/prepare deleted file mode 100755 index cb3622d4f8d9e..0000000000000 --- a/target/hexagon/idef-parser/prepare +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env bash - -# -# Copyright(c) 2019-2021 rev.ng Labs Srl. All Rights Reserved. -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, see . -# - -set -e -set -o pipefail - -# Run the preprocessor and drop comments -cpp "$@" diff --git a/target/hexagon/insn.h b/target/hexagon/insn.h index 24dcf7fe9f385..5d59430da9e12 100644 --- a/target/hexagon/insn.h +++ b/target/hexagon/insn.h @@ -66,8 +66,8 @@ struct Packet { bool pkt_has_dczeroa; - bool pkt_has_store_s0; - bool pkt_has_store_s1; + bool pkt_has_scalar_store_s0; + bool pkt_has_scalar_store_s1; bool pkt_has_hvx; Insn *vhist_insn; diff --git a/target/hexagon/macros.h b/target/hexagon/macros.h index 9ba9be408db4c..088e5961ab7af 100644 --- a/target/hexagon/macros.h +++ b/target/hexagon/macros.h @@ -83,7 +83,7 @@ */ #define CHECK_NOSHUF(VA, SIZE) \ do { \ - if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \ + if (insn->slot == 0 && ctx->pkt->pkt_has_scalar_store_s1) { \ probe_noshuf_load(VA, SIZE, ctx->mem_idx); \ process_store(ctx, 1); \ } \ @@ -94,11 +94,11 @@ TCGLabel *noshuf_label = gen_new_label(); \ tcg_gen_brcondi_tl(TCG_COND_EQ, PRED, 0, noshuf_label); \ GET_EA; \ - if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \ + if (insn->slot == 0 && ctx->pkt->pkt_has_scalar_store_s1) { \ probe_noshuf_load(EA, SIZE, ctx->mem_idx); \ } \ gen_set_label(noshuf_label); \ - if (insn->slot == 0 && ctx->pkt->pkt_has_store_s1) { \ + if (insn->slot == 0 && ctx->pkt->pkt_has_scalar_store_s1) { \ process_store(ctx, 1); \ } \ } while (0) @@ -525,7 +525,7 @@ static inline TCGv gen_read_ireg(TCGv result, TCGv val, int shift) #define fLOAD(NUM, SIZE, SIGN, EA, DST) \ do { \ - check_noshuf(env, pkt_has_store_s1, slot, EA, SIZE, GETPC()); \ + check_noshuf(env, pkt_has_scalar_store_s1, slot, EA, SIZE, GETPC()); \ DST = (size##SIZE##SIGN##_t)MEM_LOAD##SIZE(env, EA, GETPC()); \ } while (0) #endif diff --git a/target/hexagon/meson.build b/target/hexagon/meson.build index bb4ebaae816ea..d26787a9b948e 100644 --- a/target/hexagon/meson.build +++ b/target/hexagon/meson.build @@ -280,12 +280,13 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs command: [python, files('gen_idef_parser_funcs.py'), semantics_generated, '@OUTPUT@'], ) + compiler = meson.get_compiler('c').get_id() preprocessed_idef_parser_input_generated = custom_target( 'idef_parser_input.preprocessed.h.inc', output: 'idef_parser_input.preprocessed.h.inc', input: idef_parser_input_generated, depend_files: [idef_parser_dir / 'macros.h.inc'], - command: [idef_parser_dir / 'prepare', '@INPUT@', '-I' + idef_parser_dir, '-o', '@OUTPUT@'], + command: [compiler, '-x', 'c', '-E', '-I', idef_parser_dir, '-o', '@OUTPUT@', '@INPUT@'], ) flex = generator( @@ -323,7 +324,7 @@ if idef_parser_enabled and 'hexagon-linux-user' in target_dirs ) indent = find_program('indent', required: false) - if indent.found() + if indent.found() and host_os == 'linux' idef_generated_tcg_c = custom_target( 'indent', input: idef_generated_tcg[0], diff --git a/target/hexagon/op_helper.c b/target/hexagon/op_helper.c index 444799d3adec8..e2e80ca7efac3 100644 --- a/target/hexagon/op_helper.c +++ b/target/hexagon/op_helper.c @@ -463,11 +463,11 @@ void HELPER(probe_pkt_scalar_hvx_stores)(CPUHexagonState *env, int mask) * If the load is in slot 0 and there is a store in slot1 (that * wasn't cancelled), we have to do the store first. */ -static void check_noshuf(CPUHexagonState *env, bool pkt_has_store_s1, +static void check_noshuf(CPUHexagonState *env, bool pkt_has_scalar_store_s1, uint32_t slot, target_ulong vaddr, int size, uintptr_t ra) { - if (slot == 0 && pkt_has_store_s1 && + if (slot == 0 && pkt_has_scalar_store_s1 && ((env->slot_cancelled & (1 << 1)) == 0)) { probe_read(env, vaddr, size, MMU_USER_IDX, ra); commit_store(env, 1, ra); diff --git a/target/hexagon/translate.c b/target/hexagon/translate.c index 02fd40c160feb..8fce219c0de48 100644 --- a/target/hexagon/translate.c +++ b/target/hexagon/translate.c @@ -133,15 +133,15 @@ static bool use_goto_tb(DisasContext *ctx, target_ulong dest) return translator_use_goto_tb(&ctx->base, dest); } -static void gen_goto_tb(DisasContext *ctx, int idx, target_ulong dest, bool - move_to_pc) +static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, + target_ulong dest, bool move_to_pc) { if (use_goto_tb(ctx, dest)) { - tcg_gen_goto_tb(idx); + tcg_gen_goto_tb(tb_slot_idx); if (move_to_pc) { tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest); } - tcg_gen_exit_tb(ctx->base.tb, idx); + tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx); } else { if (move_to_pc) { tcg_gen_movi_tl(hex_gpr[HEX_REG_PC], dest); @@ -693,11 +693,11 @@ static void process_store_log(DisasContext *ctx) * the memory accesses overlap. */ Packet *pkt = ctx->pkt; - if (pkt->pkt_has_store_s1) { + if (pkt->pkt_has_scalar_store_s1) { g_assert(!pkt->pkt_has_dczeroa); process_store(ctx, 1); } - if (pkt->pkt_has_store_s0) { + if (pkt->pkt_has_scalar_store_s0) { g_assert(!pkt->pkt_has_dczeroa); process_store(ctx, 0); } @@ -822,8 +822,9 @@ static void gen_commit_packet(DisasContext *ctx) * involved in committing the packet. */ Packet *pkt = ctx->pkt; - bool has_store_s0 = pkt->pkt_has_store_s0; - bool has_store_s1 = (pkt->pkt_has_store_s1 && !ctx->s1_store_processed); + bool has_store_s0 = pkt->pkt_has_scalar_store_s0; + bool has_store_s1 = + (pkt->pkt_has_scalar_store_s1 && !ctx->s1_store_processed); bool has_hvx_store = pkt_has_hvx_store(pkt); if (pkt->pkt_has_dczeroa) { /* diff --git a/target/hppa/cpu.c b/target/hppa/cpu.c index 24777727e6203..0ca79ee5e23a5 100644 --- a/target/hppa/cpu.c +++ b/target/hppa/cpu.c @@ -135,7 +135,7 @@ static void hppa_restore_state_to_opc(CPUState *cs, #ifndef CONFIG_USER_ONLY static bool hppa_cpu_has_work(CPUState *cs) { - return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); } #endif /* !CONFIG_USER_ONLY */ diff --git a/target/hppa/cpu.h b/target/hppa/cpu.h index 11d59d11ca8a9..c652ef945ac4f 100644 --- a/target/hppa/cpu.h +++ b/target/hppa/cpu.h @@ -29,21 +29,21 @@ #include "qemu/interval-tree.h" #include "hw/registerfields.h" -#define MMU_ABS_W_IDX 6 -#define MMU_ABS_IDX 7 -#define MMU_KERNEL_IDX 8 -#define MMU_KERNEL_P_IDX 9 -#define MMU_PL1_IDX 10 -#define MMU_PL1_P_IDX 11 -#define MMU_PL2_IDX 12 -#define MMU_PL2_P_IDX 13 -#define MMU_USER_IDX 14 -#define MMU_USER_P_IDX 15 - -#define MMU_IDX_MMU_DISABLED(MIDX) ((MIDX) < MMU_KERNEL_IDX) -#define MMU_IDX_TO_PRIV(MIDX) (((MIDX) - MMU_KERNEL_IDX) / 2) -#define MMU_IDX_TO_P(MIDX) (((MIDX) - MMU_KERNEL_IDX) & 1) -#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P) + MMU_KERNEL_IDX) +#define MMU_KERNEL_IDX 0 +#define MMU_KERNEL_P_IDX 1 +#define MMU_PL1_IDX 2 +#define MMU_PL1_P_IDX 3 +#define MMU_PL2_IDX 4 +#define MMU_PL2_P_IDX 5 +#define MMU_USER_IDX 6 +#define MMU_USER_P_IDX 7 +#define MMU_ABS_IDX 8 +#define MMU_ABS_W_IDX 9 + +#define MMU_IDX_MMU_DISABLED(MIDX) ((MIDX) >= MMU_ABS_IDX) +#define MMU_IDX_TO_PRIV(MIDX) ((MIDX) / 2) +#define MMU_IDX_TO_P(MIDX) ((MIDX) & 1) +#define PRIV_P_TO_MMU_IDX(PRIV, P) ((PRIV) * 2 + !!(P)) #define PRIV_KERNEL 0 #define PRIV_USER 3 @@ -187,7 +187,7 @@ typedef struct HPPATLBEntry { struct HPPATLBEntry *unused_next; }; - target_ulong pa; + hwaddr pa; unsigned entry_valid : 1; @@ -320,8 +320,8 @@ void hppa_translate_code(CPUState *cs, TranslationBlock *tb, #define CPU_RESOLVING_TYPE TYPE_HPPA_CPU -static inline target_ulong hppa_form_gva_mask(uint64_t gva_offset_mask, - uint64_t spc, target_ulong off) +static inline vaddr hppa_form_gva_mask(uint64_t gva_offset_mask, + uint64_t spc, target_ulong off) { #ifdef CONFIG_USER_ONLY return off & gva_offset_mask; @@ -330,8 +330,8 @@ static inline target_ulong hppa_form_gva_mask(uint64_t gva_offset_mask, #endif } -static inline target_ulong hppa_form_gva(CPUHPPAState *env, uint64_t spc, - target_ulong off) +static inline vaddr hppa_form_gva(CPUHPPAState *env, uint64_t spc, + target_ulong off) { return hppa_form_gva_mask(env->gva_offset_mask, spc, off); } diff --git a/target/hppa/helper.c b/target/hppa/helper.c index d7f8495d9825b..edcd2bf27c83a 100644 --- a/target/hppa/helper.c +++ b/target/hppa/helper.c @@ -148,8 +148,8 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags) m = UINT32_MAX; } - qemu_fprintf(f, "IA_F %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n" - "IA_B %08" PRIx64 ":%0*" PRIx64 " (" TARGET_FMT_lx ")\n", + qemu_fprintf(f, "IA_F %08" PRIx64 ":%0*" PRIx64 " (0x%" VADDR_PRIx ")\n" + "IA_B %08" PRIx64 ":%0*" PRIx64 " (0x%" VADDR_PRIx ")\n", env->iasq_f >> 32, w, m & env->iaoq_f, hppa_form_gva_mask(env->gva_offset_mask, env->iasq_f, env->iaoq_f), diff --git a/target/hppa/insns.decode b/target/hppa/insns.decode index 4eaac750ea8c0..13c6a55bf2af0 100644 --- a/target/hppa/insns.decode +++ b/target/hppa/insns.decode @@ -365,10 +365,10 @@ fstd 011100 ..... ..... .. ............1. @ldstim11 &mpyadd rm1 rm2 ta ra tm @mpyadd ...... rm1:5 rm2:5 ta:5 ra:5 . tm:5 &mpyadd -fmpyadd_f 000110 ..... ..... ..... ..... 0 ..... @mpyadd -fmpyadd_d 000110 ..... ..... ..... ..... 1 ..... @mpyadd -fmpysub_f 100110 ..... ..... ..... ..... 0 ..... @mpyadd -fmpysub_d 100110 ..... ..... ..... ..... 1 ..... @mpyadd +fmpyadd_f 000110 ..... ..... ..... ..... 1 ..... @mpyadd +fmpyadd_d 000110 ..... ..... ..... ..... 0 ..... @mpyadd +fmpysub_f 100110 ..... ..... ..... ..... 1 ..... @mpyadd +fmpysub_d 100110 ..... ..... ..... ..... 0 ..... @mpyadd #### # Conditional Branches diff --git a/target/hppa/mem_helper.c b/target/hppa/mem_helper.c index 9bdd0a6f23d6b..cce82e6599989 100644 --- a/target/hppa/mem_helper.c +++ b/target/hppa/mem_helper.c @@ -803,7 +803,7 @@ void HELPER(diag_btlb)(CPUHPPAState *env) uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f) { - uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f); + vaddr gva = hppa_form_gva(env, env->iasq_f, iaoq_f); HPPATLBEntry *ent = hppa_find_tlb(env, gva); if (ent == NULL) { diff --git a/target/hppa/trace-events b/target/hppa/trace-events index a10ba73d5d45c..01761a4559b2a 100644 --- a/target/hppa/trace-events +++ b/target/hppa/trace-events @@ -1,13 +1,13 @@ # See docs/devel/tracing.rst for syntax documentation. # mem_helper.c -disable hppa_tlb_flush_ent(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx" -disable hppa_tlb_find_entry(void *env, void *ent, int valid, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p valid=%d va_b=0x%lx va_e=0x%lx pa=0x%lx" +disable hppa_tlb_flush_ent(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%" PRIx64 +disable hppa_tlb_find_entry(void *env, void *ent, int valid, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p valid=%d va_b=0x%lx va_e=0x%lx pa=0x%" PRIx64 disable hppa_tlb_find_entry_not_found(void *env, uint64_t addr) "env=%p addr=%08lx" disable hppa_tlb_get_physical_address(void *env, int ret, int prot, uint64_t addr, uint64_t phys) "env=%p ret=%d prot=%d addr=0x%lx phys=0x%lx" disable hppa_tlb_fill_excp(void *env, uint64_t addr, int size, int type, int mmu_idx) "env=%p addr=0x%lx size=%d type=%d mmu_idx=%d" disable hppa_tlb_fill_success(void *env, uint64_t addr, uint64_t phys, int size, int type, int mmu_idx) "env=%p addr=0x%lx phys=0x%lx size=%d type=%d mmu_idx=%d" -disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%lx" +disable hppa_tlb_itlba(void *env, void *ent, uint64_t va_b, uint64_t va_e, uint64_t pa) "env=%p ent=%p va_b=0x%lx va_e=0x%lx pa=0x%" PRIx64 disable hppa_tlb_itlbp(void *env, void *ent, int access_id, int u, int pl2, int pl1, int type, int b, int d, int t) "env=%p ent=%p access_id=%x u=%d pl2=%d pl1=%d type=%d b=%d d=%d t=%d" disable hppa_tlb_ptlb(void *env) "env=%p" disable hppa_tlb_ptlb_local(void *env) "env=%p" diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 7a81cfcb88794..853cba2ba4f15 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -104,6 +104,12 @@ typedef struct DisasContext { #define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx) #endif +static inline MemOp mo_endian(DisasContext *ctx) +{ + /* The PSW_E bit sets the (little) endianness, but we don't implement it. */ + return MO_BE; +} + /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */ static int expand_sm_imm(DisasContext *ctx, int val) { @@ -1599,6 +1605,7 @@ static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb, /* Caller uses nullify_over/nullify_end. */ assert(ctx->null_cond.c == TCG_COND_NEVER); + mop |= mo_endian(ctx); form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, MMU_DISABLED(ctx)); tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); @@ -1617,6 +1624,7 @@ static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb, /* Caller uses nullify_over/nullify_end. */ assert(ctx->null_cond.c == TCG_COND_NEVER); + mop |= mo_endian(ctx); form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, MMU_DISABLED(ctx)); tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); @@ -1635,6 +1643,7 @@ static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb, /* Caller uses nullify_over/nullify_end. */ assert(ctx->null_cond.c == TCG_COND_NEVER); + mop |= mo_endian(ctx); form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, MMU_DISABLED(ctx)); tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); @@ -1653,6 +1662,7 @@ static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb, /* Caller uses nullify_over/nullify_end. */ assert(ctx->null_cond.c == TCG_COND_NEVER); + mop |= mo_endian(ctx); form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify, MMU_DISABLED(ctx)); tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx)); @@ -1691,7 +1701,7 @@ static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb, nullify_over(ctx); tmp = tcg_temp_new_i32(); - do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); + do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UL); save_frw_i32(rt, tmp); if (rt == 0) { @@ -1716,7 +1726,7 @@ static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb, nullify_over(ctx); tmp = tcg_temp_new_i64(); - do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); + do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UQ); save_frd(rt, tmp); if (rt == 0) { @@ -1750,7 +1760,7 @@ static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb, nullify_over(ctx); tmp = load_frw_i32(rt); - do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL); + do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UL); return nullify_end(ctx); } @@ -1770,7 +1780,7 @@ static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb, nullify_over(ctx); tmp = load_frd(rt); - do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ); + do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_UQ); return nullify_end(ctx); } @@ -3302,7 +3312,7 @@ static bool trans_ld(DisasContext *ctx, arg_ldst *a) return gen_illegal(ctx); } return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0, - a->disp, a->sp, a->m, a->size | MO_TE); + a->disp, a->sp, a->m, a->size); } static bool trans_st(DisasContext *ctx, arg_ldst *a) @@ -3311,12 +3321,12 @@ static bool trans_st(DisasContext *ctx, arg_ldst *a) if (!ctx->is_pa20 && a->size > MO_32) { return gen_illegal(ctx); } - return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE); + return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size); } static bool trans_ldc(DisasContext *ctx, arg_ldst *a) { - MemOp mop = MO_TE | MO_ALIGN | a->size; + MemOp mop = mo_endian(ctx) | MO_ALIGN | a->size; TCGv_i64 dest, ofs; TCGv_i64 addr; diff --git a/target/i386/arch_memory_mapping.c b/target/i386/arch_memory_mapping.c index a2398c2173226..560f4689abc26 100644 --- a/target/i386/arch_memory_mapping.c +++ b/target/i386/arch_memory_mapping.c @@ -35,7 +35,7 @@ static void walk_pte(MemoryMappingList *list, AddressSpace *as, } start_paddr = (pte & ~0xfff) & ~(0x1ULL << 63); - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } @@ -65,7 +65,7 @@ static void walk_pte2(MemoryMappingList *list, AddressSpace *as, } start_paddr = pte & ~0xfff; - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } @@ -100,7 +100,7 @@ static void walk_pde(MemoryMappingList *list, AddressSpace *as, if (pde & PG_PSE_MASK) { /* 2 MB page */ start_paddr = (pde & ~0x1fffff) & ~(0x1ULL << 63); - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } @@ -142,7 +142,7 @@ static void walk_pde2(MemoryMappingList *list, AddressSpace *as, */ high_paddr = ((hwaddr)(pde & 0x1fe000) << 19); start_paddr = (pde & ~0x3fffff) | high_paddr; - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } @@ -203,7 +203,7 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, if (pdpe & PG_PSE_MASK) { /* 1 GB page */ start_paddr = (pdpe & ~0x3fffffff) & ~(0x1ULL << 63); - if (cpu_physical_memory_is_io(start_paddr)) { + if (address_space_is_io(as, start_paddr)) { /* I/O region */ continue; } diff --git a/target/i386/cpu-apic.c b/target/i386/cpu-apic.c index 242a05fdbe9c1..564c1288e47d0 100644 --- a/target/i386/cpu-apic.c +++ b/target/i386/cpu-apic.c @@ -41,34 +41,31 @@ APICCommonClass *apic_get_class(Error **errp) void x86_cpu_apic_create(X86CPU *cpu, Error **errp) { - APICCommonState *apic; APICCommonClass *apic_class = apic_get_class(errp); if (!apic_class) { return; } - cpu->apic_state = DEVICE(object_new_with_class(OBJECT_CLASS(apic_class))); + cpu->apic_state = APIC_COMMON(object_new_with_class(OBJECT_CLASS(apic_class))); object_property_add_child(OBJECT(cpu), "lapic", OBJECT(cpu->apic_state)); object_unref(OBJECT(cpu->apic_state)); /* TODO: convert to link<> */ - apic = APIC_COMMON(cpu->apic_state); - apic->cpu = cpu; - apic->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; + cpu->apic_state->cpu = cpu; + cpu->apic_state->apicbase = APIC_DEFAULT_ADDRESS | MSR_IA32_APICBASE_ENABLE; /* * apic_common_set_id needs to check if the CPU has x2APIC - * feature in case APIC ID >= 255, so we need to set apic->cpu + * feature in case APIC ID >= 255, so we need to set cpu->apic_state->cpu * before setting APIC ID */ - qdev_prop_set_uint32(cpu->apic_state, "id", cpu->apic_id); + qdev_prop_set_uint32(DEVICE(cpu->apic_state), "id", cpu->apic_id); } void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) { - APICCommonState *apic; static bool apic_mmio_map_once; if (cpu->apic_state == NULL) { @@ -77,12 +74,11 @@ void x86_cpu_apic_realize(X86CPU *cpu, Error **errp) qdev_realize(DEVICE(cpu->apic_state), NULL, errp); /* Map APIC MMIO area */ - apic = APIC_COMMON(cpu->apic_state); if (!apic_mmio_map_once) { memory_region_add_subregion_overlap(get_system_memory(), - apic->apicbase & + cpu->apic_state->apicbase & MSR_IA32_APICBASE_BASE, - &apic->io_memory, + &cpu->apic_state->io_memory, 0x1000); apic_mmio_map_once = true; } diff --git a/target/i386/cpu-dump.c b/target/i386/cpu-dump.c index a72ed93bd2f34..67bf31e0caafb 100644 --- a/target/i386/cpu-dump.c +++ b/target/i386/cpu-dump.c @@ -291,7 +291,7 @@ static void dump_apic_interrupt(const char *name, uint32_t *ireg_tab, void x86_cpu_dump_local_apic_state(CPUState *cs, int flags) { X86CPU *cpu = X86_CPU(cs); - APICCommonState *s = APIC_COMMON(cpu->apic_state); + APICCommonState *s = cpu->apic_state; if (!s) { qemu_printf("local apic state not available\n"); return; diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 251d5760a0bd1..0a66e1fec939e 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1397,7 +1397,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .type = CPUID_FEATURE_WORD, .feat_names = { "no-nested-data-bp", "fs-gs-base-ns", "lfence-always-serializing", NULL, - NULL, NULL, "null-sel-clr-base", NULL, + NULL, "verw-clear", "null-sel-clr-base", NULL, "auto-ibrs", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1415,6 +1415,22 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .tcg_features = 0, .unmigratable_flags = 0, }, + [FEAT_8000_0021_ECX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, "tsa-sq-no", "tsa-l1-no", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 0x80000021, .reg = R_ECX, }, + .tcg_features = 0, + .unmigratable_flags = 0, + }, [FEAT_8000_0022_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -7539,6 +7555,20 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w) #endif break; + case FEAT_7_0_EDX: + /* + * Windows does not like ARCH_CAPABILITIES on AMD machines at all. + * Do not show the fake ARCH_CAPABILITIES MSR that KVM sets up, + * except if needed for migration. + * + * When arch_cap_always_on is removed, this tweak can move to + * kvm_arch_get_supported_cpuid. + */ + if (cpu && IS_AMD_CPU(&cpu->env) && !cpu->arch_cap_always_on) { + unavail = CPUID_7_0_EDX_ARCH_CAPABILITIES; + } + break; + default: break; } @@ -7885,8 +7915,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, * count, but Intel needs maximum number of addressable IDs for * logical processors per package. */ - if (cpu->vendor_cpuid_only_v2 && - (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) { + if ((IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))) { num = 1 << apicid_pkg_offset(topo_info); } else { num = threads_per_pkg; @@ -7895,6 +7924,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, /* Fixup overflow: max value for bits 23-16 is 255. */ *ebx |= MIN(num, 255) << 16; } + if (cpu->pdcm_on_even_without_pmu) { + if (!cpu->enable_pmu) { + *ecx &= ~CPUID_EXT_PDCM; + } + } break; case 2: { /* cache info: needed for Pentium Pro compatibility */ const CPUCaches *caches; @@ -8508,6 +8542,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *eax = *ebx = *ecx = *edx = 0; *eax = env->features[FEAT_8000_0021_EAX]; *ebx = env->features[FEAT_8000_0021_EBX]; + *ecx = env->features[FEAT_8000_0021_ECX]; break; case 0x80000022: *eax = *ebx = *ecx = *edx = 0; @@ -8614,7 +8649,11 @@ static void x86_cpu_reset_hold(Object *obj, ResetType type) env->idt.limit = 0xffff; env->gdt.limit = 0xffff; +#if defined(CONFIG_USER_ONLY) + env->ldt.limit = 0; +#else env->ldt.limit = 0xffff; +#endif env->ldt.flags = DESC_P_MASK | (2 << DESC_TYPE_SHIFT); env->tr.limit = 0xffff; env->tr.flags = DESC_P_MASK | (11 << DESC_TYPE_SHIFT); @@ -8750,7 +8789,7 @@ void x86_cpu_after_reset(X86CPU *cpu) } if (cpu->apic_state) { - device_cold_reset(cpu->apic_state); + device_cold_reset(DEVICE(cpu->apic_state)); } #endif } @@ -8945,12 +8984,11 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) } } - /* PDCM is fixed1 bit for TDX */ - if (!cpu->enable_pmu && !is_tdx_vm()) { - mark_unavailable_features(cpu, FEAT_1_ECX, - env->user_features[FEAT_1_ECX] & CPUID_EXT_PDCM, - "This feature is not available due to PMU being disabled"); - env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM; + if (!cpu->pdcm_on_even_without_pmu) { + /* PDCM is fixed1 bit for TDX */ + if (!cpu->enable_pmu && !is_tdx_vm()) { + env->features[FEAT_1_ECX] &= ~CPUID_EXT_PDCM; + } } for (i = 0; i < ARRAY_SIZE(feature_dependencies); i++) { @@ -10008,6 +10046,11 @@ static const Property x86_cpu_properties[] = { true), DEFINE_PROP_BOOL("x-l1-cache-per-thread", X86CPU, l1_cache_per_core, true), DEFINE_PROP_BOOL("x-force-cpuid-0x1f", X86CPU, force_cpuid_0x1f, false), + + DEFINE_PROP_BOOL("x-arch-cap-always-on", X86CPU, + arch_cap_always_on, false), + DEFINE_PROP_BOOL("x-pdcm-on-even-without-pmu", X86CPU, + pdcm_on_even_without_pmu, false), }; #ifndef CONFIG_USER_ONLY diff --git a/target/i386/cpu.h b/target/i386/cpu.h index f977fc49a774e..d0da9bfe58ce1 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -27,6 +27,7 @@ #include "exec/cpu-defs.h" #include "exec/cpu-interrupt.h" #include "exec/memop.h" +#include "hw/i386/apic.h" #include "hw/i386/topology.h" #include "qapi/qapi-types-common.h" #include "qemu/cpu-float.h" @@ -435,9 +436,11 @@ typedef enum X86Seg { #define MSR_SMI_COUNT 0x34 #define MSR_CORE_THREAD_COUNT 0x35 #define MSR_MTRRcap 0xfe +#define MSR_MTRR_MEM_TYPE_WB 0x06 #define MSR_MTRRcap_VCNT 8 #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) +#define MSR_MTRR_ENABLE (1 << 11) #define MSR_IA32_SYSENTER_CS 0x174 #define MSR_IA32_SYSENTER_ESP 0x175 @@ -641,6 +644,7 @@ typedef enum FeatureWord { FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */ FEAT_8000_0021_EBX, /* CPUID[8000_0021].EBX */ + FEAT_8000_0021_ECX, /* CPUID[8000_0021].ECX */ FEAT_8000_0022_EAX, /* CPUID[8000_0022].EAX */ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ @@ -1101,6 +1105,8 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); #define CPUID_8000_0021_EAX_FS_GS_BASE_NS (1U << 1) /* LFENCE is always serializing */ #define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2) +/* Memory form of VERW mitigates TSA */ +#define CPUID_8000_0021_EAX_VERW_CLEAR (1U << 5) /* Null Selector Clears Base */ #define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6) /* Automatic IBRS */ @@ -1124,6 +1130,11 @@ uint64_t x86_cpu_get_supported_feature_word(X86CPU *cpu, FeatureWord w); */ #define CPUID_8000_0021_EBX_RAPSIZE (8U << 16) +/* CPU is not vulnerable TSA SA-SQ attack */ +#define CPUID_8000_0021_ECX_TSA_SQ_NO (1U << 1) +/* CPU is not vulnerable TSA SA-L1 attack */ +#define CPUID_8000_0021_ECX_TSA_L1_NO (1U << 2) + /* Performance Monitoring Version 2 */ #define CPUID_8000_0022_EAX_PERFMON_V2 (1U << 0) @@ -2126,7 +2137,7 @@ typedef struct CPUArchState { QEMUTimer *xen_periodic_timer; QemuMutex xen_timers_lock; #endif -#if defined(CONFIG_HVF) +#if defined(CONFIG_HVF) || defined(CONFIG_MSHV) void *emu_mmio_buf; #endif @@ -2314,6 +2325,18 @@ struct ArchCPU { /* Forcefully disable KVM PV features not exposed in guest CPUIDs */ bool kvm_pv_enforce_cpuid; + /* + * Expose arch-capabilities unconditionally even on AMD models, for backwards + * compatibility with QEMU <10.1. + */ + bool arch_cap_always_on; + + /* + * Backwards compatibility with QEMU <10.1. The PDCM feature is now disabled when + * PMU is not available, but prior to 10.1 it was enabled even if PMU is off. + */ + bool pdcm_on_even_without_pmu; + /* Number of physical address bits supported */ uint32_t phys_bits; @@ -2327,7 +2350,7 @@ struct ArchCPU { /* in order to simplify APIC support, we leave this pointer to the user */ - struct DeviceState *apic_state; + APICCommonState *apic_state; struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; Notifier machine_done; @@ -2574,6 +2597,11 @@ static inline bool x86_has_cpuid_0x1f(X86CPU *cpu) void x86_cpu_set_a20(X86CPU *cpu, int a20_state); void cpu_sync_avx_hflag(CPUX86State *env); +typedef enum X86ASIdx { + X86ASIdx_MEM = 0, + X86ASIdx_SMM = 1, +} X86ASIdx; + #ifndef CONFIG_USER_ONLY static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) { @@ -2803,7 +2831,7 @@ bool cpu_svm_has_intercept(CPUX86State *env, uint32_t type); /* apic.c */ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); -void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, +void apic_handle_tpr_access_report(APICCommonState *s, target_ulong ip, TPRAccess access); /* Special values for X86CPUVersion: */ diff --git a/target/i386/emulate/meson.build b/target/i386/emulate/meson.build index 4edd4f462fc74..b6dafb6a5be68 100644 --- a/target/i386/emulate/meson.build +++ b/target/i386/emulate/meson.build @@ -1,5 +1,8 @@ -i386_system_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: files( +emulator_files = files( 'x86_decode.c', 'x86_emu.c', 'x86_flags.c', -)) +) + +i386_system_ss.add(when: [hvf, 'CONFIG_HVF'], if_true: emulator_files) +i386_system_ss.add(when: 'CONFIG_MSHV', if_true: emulator_files) diff --git a/target/i386/emulate/x86_decode.c b/target/i386/emulate/x86_decode.c index 2eca39802e36d..97bd6f1a3be94 100644 --- a/target/i386/emulate/x86_decode.c +++ b/target/i386/emulate/x86_decode.c @@ -71,10 +71,16 @@ static inline uint64_t decode_bytes(CPUX86State *env, struct x86_decode *decode, VM_PANIC_EX("%s invalid size %d\n", __func__, size); break; } - target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len; - emul_ops->read_mem(env_cpu(env), &val, va, size); + + /* copy the bytes from the instruction stream, if available */ + if (decode->stream && decode->len + size <= decode->stream->len) { + memcpy(&val, decode->stream->bytes + decode->len, size); + } else { + target_ulong va = linear_rip(env_cpu(env), env->eip) + decode->len; + emul_ops->fetch_instruction(env_cpu(env), &val, va, size); + } decode->len += size; - + return val; } @@ -2076,9 +2082,10 @@ static void decode_opcodes(CPUX86State *env, struct x86_decode *decode) } } -uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode) +static uint32_t decode_opcode(CPUX86State *env, struct x86_decode *decode) { memset(decode, 0, sizeof(*decode)); + decode_prefix(env, decode); set_addressing_size(env, decode); set_operand_size(env, decode); @@ -2088,6 +2095,18 @@ uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode) return decode->len; } +uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode) +{ + return decode_opcode(env, decode); +} + +uint32_t decode_instruction_stream(CPUX86State *env, struct x86_decode *decode, + struct x86_insn_stream *stream) +{ + decode->stream = stream; + return decode_opcode(env, decode); +} + void init_decoder(void) { int i; diff --git a/target/i386/emulate/x86_decode.h b/target/i386/emulate/x86_decode.h index 927645af1a30c..1cadf3694f02b 100644 --- a/target/i386/emulate/x86_decode.h +++ b/target/i386/emulate/x86_decode.h @@ -272,6 +272,11 @@ typedef struct x86_decode_op { }; } x86_decode_op; +typedef struct x86_insn_stream { + const uint8_t *bytes; + size_t len; +} x86_insn_stream; + typedef struct x86_decode { int len; uint8_t opcode[4]; @@ -298,11 +303,15 @@ typedef struct x86_decode { struct x86_modrm modrm; struct x86_decode_op op[4]; bool is_fpu; + + x86_insn_stream *stream; } x86_decode; uint64_t sign(uint64_t val, int size); uint32_t decode_instruction(CPUX86State *env, struct x86_decode *decode); +uint32_t decode_instruction_stream(CPUX86State *env, struct x86_decode *decode, + struct x86_insn_stream *stream); void *get_reg_ref(CPUX86State *env, int reg, int rex_present, int is_extended, int size); diff --git a/target/i386/emulate/x86_emu.c b/target/i386/emulate/x86_emu.c index db7a7f7437da7..4409f7bc134bf 100644 --- a/target/i386/emulate/x86_emu.c +++ b/target/i386/emulate/x86_emu.c @@ -1246,7 +1246,8 @@ static void init_cmd_handler(void) bool exec_instruction(CPUX86State *env, struct x86_decode *ins) { if (!_cmd_handler[ins->cmd].handler) { - printf("Unimplemented handler (" TARGET_FMT_lx ") for %d (%x %x) \n", env->eip, + printf("Unimplemented handler (" TARGET_FMT_lx ") for %d (%x %x)\n", + env->eip, ins->cmd, ins->opcode[0], ins->opcode_len > 1 ? ins->opcode[1] : 0); env->eip += ins->len; diff --git a/target/i386/emulate/x86_emu.h b/target/i386/emulate/x86_emu.h index a1a961284b2d4..05686b162f640 100644 --- a/target/i386/emulate/x86_emu.h +++ b/target/i386/emulate/x86_emu.h @@ -24,6 +24,8 @@ #include "cpu.h" struct x86_emul_ops { + void (*fetch_instruction)(CPUState *cpu, void *data, target_ulong addr, + int bytes); void (*read_mem)(CPUState *cpu, void *data, target_ulong addr, int bytes); void (*write_mem)(CPUState *cpu, void *data, target_ulong addr, int bytes); void (*read_segment_descriptor)(CPUState *cpu, struct x86_segment_descriptor *desc, diff --git a/target/i386/helper.c b/target/i386/helper.c index e0aaed3c4c4e0..72b2e195a31ed 100644 --- a/target/i386/helper.c +++ b/target/i386/helper.c @@ -110,6 +110,7 @@ int cpu_x86_support_mca_broadcast(CPUX86State *env) /* x86 mmu */ /* XXX: add PGE support */ +#ifndef CONFIG_USER_ONLY void x86_cpu_set_a20(X86CPU *cpu, int a20_state) { CPUX86State *env = &cpu->env; @@ -129,6 +130,7 @@ void x86_cpu_set_a20(X86CPU *cpu, int a20_state) env->a20_mask = ~(1 << 20) | (a20_state << 20); } } +#endif void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0) { @@ -619,6 +621,10 @@ void do_cpu_init(X86CPU *cpu) void do_cpu_sipi(X86CPU *cpu) { + CPUX86State *env = &cpu->env; + if (env->hflags & HF_SMM_MASK) { + return; + } apic_sipi(cpu->apic_state); } diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index 818b50419f40d..33f723a76a771 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -527,7 +527,7 @@ void hvf_simulate_rdmsr(CPUState *cs) int ret; int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; - ret = apic_msr_read(index, &val); + ret = apic_msr_read(cpu->apic_state, index, &val); if (ret < 0) { x86_emul_raise_exception(env, EXCP0D_GPF, 0); } @@ -638,7 +638,7 @@ void hvf_simulate_wrmsr(CPUState *cs) int ret; int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; - ret = apic_msr_write(index, data); + ret = apic_msr_write(cpu->apic_state, index, data); if (ret < 0) { x86_emul_raise_exception(env, EXCP0D_GPF, 0); } @@ -773,9 +773,9 @@ int hvf_vcpu_exec(CPUState *cpu) switch (exit_reason) { case EXIT_REASON_HLT: { macvm_set_rip(cpu, rip + ins_len); - if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) - && !(cpu->interrupt_request & CPU_INTERRUPT_NMI) && + && !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI) && !(idtvec_info & VMCS_IDT_VEC_VALID)) { cpu->halted = 1; ret = EXCP_HLT; diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 17fce1d3cdd2c..a502437c3031e 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -395,9 +395,9 @@ bool hvf_inject_interrupts(CPUState *cs) }; } - if (cs->interrupt_request & CPU_INTERRUPT_NMI) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { - cs->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_reset_interrupt(cs, CPU_INTERRUPT_NMI); info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, info); } else { @@ -406,20 +406,19 @@ bool hvf_inject_interrupts(CPUState *cs) } if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && - (cs->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) { int line = cpu_get_pic_interrupt(env); - cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); if (line >= 0) { wvmcs(cs->accel->fd, VMCS_ENTRY_INTR_INFO, line | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); } } - if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) { vmx_set_int_window_exiting(cs); } - return (cs->interrupt_request - & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)); + return cpu_test_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR); } int hvf_process_events(CPUState *cs) @@ -432,26 +431,26 @@ int hvf_process_events(CPUState *cs) env->eflags = rreg(cs->accel->fd, HV_X86_RFLAGS); } - if (cs->interrupt_request & CPU_INTERRUPT_INIT) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_INIT)) { cpu_synchronize_state(cs); do_cpu_init(cpu); } - if (cs->interrupt_request & CPU_INTERRUPT_POLL) { - cs->interrupt_request &= ~CPU_INTERRUPT_POLL; + if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) { + cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL); apic_poll_irq(cpu->apic_state); } - if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && + if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cs->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { cs->halted = 0; } - if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_SIPI)) { cpu_synchronize_state(cs); do_cpu_sipi(cpu); } - if (cs->interrupt_request & CPU_INTERRUPT_TPR) { - cs->interrupt_request &= ~CPU_INTERRUPT_TPR; + if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) { + cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR); cpu_synchronize_state(cs); apic_handle_tpr_access_report(cpu->apic_state, env->eip, env->tpr_access_type); diff --git a/target/i386/kvm/hyperv.c b/target/i386/kvm/hyperv.c index 9865120cc434d..f7a81bd270036 100644 --- a/target/i386/kvm/hyperv.c +++ b/target/i386/kvm/hyperv.c @@ -81,7 +81,6 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) * necessary because memory hierarchy is being changed */ async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL); - cpu_exit(CPU(cpu)); return EXCP_INTERRUPT; case KVM_EXIT_HYPERV_HCALL: { diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index 89a7953659452..9c25b55839557 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -13,6 +13,7 @@ #include "qapi/error.h" #include "system/system.h" #include "hw/boards.h" +#include "hw/i386/x86.h" #include "kvm_i386.h" #include "accel/accel-cpu-target.h" @@ -91,6 +92,15 @@ static bool kvm_cpu_realizefn(CPUState *cs, Error **errp) kvm_set_guest_phys_bits(cs); } + /* + * When SMM is enabled, there is 2 address spaces. Otherwise only 1. + * + * Only initialize address space 0 here, the second one for SMM is + * initialized at register_smram_listener() after machine init done. + */ + cs->num_ases = x86_machine_is_smm_enabled(X86_MACHINE(current_machine)) ? 2 : 1; + cpu_address_space_init(cs, X86ASIdx_MEM, "cpu-memory", cs->memory); + return true; } diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 369626f8c8d78..f7a6ef650af73 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -503,12 +503,8 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is * returned by KVM_GET_MSR_INDEX_LIST. - * - * But also, because Windows does not like ARCH_CAPABILITIES on AMD - * mcahines at all, do not show the fake ARCH_CAPABILITIES MSR that - * KVM sets up. */ - if (!has_msr_arch_capabs || !(edx & CPUID_7_0_EDX_ARCH_CAPABILITIES)) { + if (!has_msr_arch_capabs) { ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; } } else if (function == 7 && index == 1 && reg == R_EAX) { @@ -657,6 +653,23 @@ uint64_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) must_be_one = (uint32_t)value; can_be_one = (uint32_t)(value >> 32); return can_be_one & ~must_be_one; + case MSR_IA32_ARCH_CAPABILITIES: + /* + * Special handling for fb-clear bit in ARCH_CAPABILITIES MSR. + * KVM will only report the bit if it is enabled in the host, + * but, for live migration capability purposes, we want to + * expose the bit to the guest even if it is disabled in the + * host, as long as the host itself is not vulnerable to + * the issue that the fb-clear bit is meant to mitigate. + */ + if ((value & MSR_ARCH_CAP_MDS_NO) && + (value & MSR_ARCH_CAP_TAA_NO) && + (value & MSR_ARCH_CAP_SBDR_SSDP_NO) && + (value & MSR_ARCH_CAP_FBSDP_NO) && + (value & MSR_ARCH_CAP_PSDP_NO)) { + value |= MSR_ARCH_CAP_FB_CLEAR; + } + return value; default: return value; @@ -2704,6 +2717,7 @@ static MemoryRegion smram_as_mem; static void register_smram_listener(Notifier *n, void *unused) { + CPUState *cpu; MemoryRegion *smram = (MemoryRegion *) object_resolve_path("/machine/smram", NULL); @@ -2727,7 +2741,11 @@ static void register_smram_listener(Notifier *n, void *unused) address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM"); kvm_memory_listener_register(kvm_state, &smram_listener, - &smram_address_space, 1, "kvm-smram"); + &smram_address_space, X86ASIdx_SMM, "kvm-smram"); + + CPU_FOREACH(cpu) { + cpu_address_space_init(cpu, X86ASIdx_SMM, "cpu-smm", &smram_as_root); + } } static void *kvm_msr_energy_thread(void *data) @@ -3310,8 +3328,7 @@ int kvm_arch_init(MachineState *ms, KVMState *s) return ret; } - if (kvm_check_extension(s, KVM_CAP_X86_SMM) && - object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) && + if (object_dynamic_cast(OBJECT(ms), TYPE_X86_MACHINE) && x86_machine_is_smm_enabled(X86_MACHINE(ms))) { smram_machine_done.notify = register_smram_listener; qemu_add_machine_init_done_notifier(&smram_machine_done); @@ -3907,7 +3924,7 @@ static void kvm_init_msrs(X86CPU *cpu) assert(kvm_buf_set_msrs(cpu) == 0); } -static int kvm_put_msrs(X86CPU *cpu, int level) +static int kvm_put_msrs(X86CPU *cpu, KvmPutState level) { CPUX86State *env = &cpu->env; int i; @@ -5012,7 +5029,7 @@ static int kvm_get_mp_state(X86CPU *cpu) static int kvm_get_apic(X86CPU *cpu) { - DeviceState *apic = cpu->apic_state; + APICCommonState *apic = cpu->apic_state; struct kvm_lapic_state kapic; int ret; @@ -5027,7 +5044,7 @@ static int kvm_get_apic(X86CPU *cpu) return 0; } -static int kvm_put_vcpu_events(X86CPU *cpu, int level) +static int kvm_put_vcpu_events(X86CPU *cpu, KvmPutState level) { CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; @@ -5066,7 +5083,7 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) */ events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; - cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); + cpu_reset_interrupt(cs, CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); } else { /* Keep these in cs->interrupt_request. */ events.smi.pending = 0; @@ -5270,7 +5287,7 @@ static int kvm_get_nested_state(X86CPU *cpu) return ret; } -int kvm_arch_put_registers(CPUState *cpu, int level, Error **errp) +int kvm_arch_put_registers(CPUState *cpu, KvmPutState level, Error **errp) { X86CPU *x86_cpu = X86_CPU(cpu); int ret; @@ -5453,10 +5470,10 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) int ret; /* Inject NMI */ - if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { - if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { bql_lock(); - cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI); bql_unlock(); DPRINTF("injected NMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_NMI); @@ -5465,9 +5482,9 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) strerror(-ret)); } } - if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) { bql_lock(); - cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI); bql_unlock(); DPRINTF("injected SMI\n"); ret = kvm_vcpu_ioctl(cpu, KVM_SMI); @@ -5478,32 +5495,31 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } } - if (!kvm_pic_in_kernel()) { - bql_lock(); - } /* Force the VCPU out of its inner loop to process any INIT requests * or (for userspace APIC, but it is cheap to combine the checks here) * pending TPR access reports. */ - if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { - if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, true); } - if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { - cpu->exit_request = 1; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { + qatomic_set(&cpu->exit_request, true); } } if (!kvm_pic_in_kernel()) { /* Try to inject an interrupt if the guest can accept it */ if (run->ready_for_interrupt_injection && - (cpu->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) { int irq; - cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + bql_lock(); + + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); irq = cpu_get_pic_interrupt(env); if (irq >= 0) { struct kvm_interrupt intr; @@ -5517,13 +5533,14 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) strerror(-ret)); } } + bql_unlock(); } /* If we have an interrupt but the guest is not ready to receive an * interrupt, request an interrupt window exit. This will * cause a return to userspace as soon as the guest is ready to * receive interrupts. */ - if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { run->request_interrupt_window = 1; } else { run->request_interrupt_window = 0; @@ -5531,8 +5548,6 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) DPRINTF("setting tpr\n"); run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); - - bql_unlock(); } } @@ -5595,18 +5610,18 @@ int kvm_arch_process_async_events(CPUState *cs) X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - if (cs->interrupt_request & CPU_INTERRUPT_MCE) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_MCE)) { /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ assert(env->mcg_cap); - cs->interrupt_request &= ~CPU_INTERRUPT_MCE; + cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE); kvm_cpu_synchronize_state(cs); if (env->exception_nr == EXCP08_DBLE) { /* this means triple fault */ qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); - cs->exit_request = 1; + qatomic_set(&cs->exit_request, true); return 0; } kvm_queue_exception(env, EXCP12_MCHK, 0, 0); @@ -5618,7 +5633,7 @@ int kvm_arch_process_async_events(CPUState *cs) } } - if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && + if (cpu_test_interrupt(cs, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { kvm_cpu_synchronize_state(cs); do_cpu_init(cpu); @@ -5628,21 +5643,21 @@ int kvm_arch_process_async_events(CPUState *cs) return 0; } - if (cs->interrupt_request & CPU_INTERRUPT_POLL) { - cs->interrupt_request &= ~CPU_INTERRUPT_POLL; + if (cpu_test_interrupt(cs, CPU_INTERRUPT_POLL)) { + cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL); apic_poll_irq(cpu->apic_state); } - if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && + if ((cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cs->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { cs->halted = 0; } - if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_SIPI)) { kvm_cpu_synchronize_state(cs); do_cpu_sipi(cpu); } - if (cs->interrupt_request & CPU_INTERRUPT_TPR) { - cs->interrupt_request &= ~CPU_INTERRUPT_TPR; + if (cpu_test_interrupt(cs, CPU_INTERRUPT_TPR)) { + cpu_reset_interrupt(cs, CPU_INTERRUPT_TPR); kvm_cpu_synchronize_state(cs); apic_handle_tpr_access_report(cpu->apic_state, env->eip, env->tpr_access_type); @@ -5656,9 +5671,9 @@ static int kvm_handle_halt(X86CPU *cpu) CPUState *cs = CPU(cpu); CPUX86State *env = &cpu->env; - if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) && + if (!(cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && - !(cs->interrupt_request & CPU_INTERRUPT_NMI)) { + !cpu_test_interrupt(cs, CPU_INTERRUPT_NMI)) { cs->halted = 1; return EXCP_HLT; } diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index 5f83e8850a2b7..5c908fdd6a584 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -56,7 +56,7 @@ bool kvm_has_adjust_clock_stable(void); bool kvm_has_exception_payload(void); void kvm_synchronize_all_tsc(void); -void kvm_get_apic_state(DeviceState *d, struct kvm_lapic_state *kapic); +void kvm_get_apic_state(APICCommonState *s, struct kvm_lapic_state *kapic); void kvm_put_apicbase(X86CPU *cpu, uint64_t value); bool kvm_has_x2apic_api(void); diff --git a/target/i386/kvm/vmsr_energy.c b/target/i386/kvm/vmsr_energy.c index 58ce3df53a3e3..890322ae37226 100644 --- a/target/i386/kvm/vmsr_energy.c +++ b/target/i386/kvm/vmsr_energy.c @@ -57,13 +57,9 @@ QIOChannelSocket *vmsr_open_socket(const char *path) }; QIOChannelSocket *sioc = qio_channel_socket_new(); - Error *local_err = NULL; qio_channel_set_name(QIO_CHANNEL(sioc), "vmsr-helper"); - qio_channel_socket_connect_sync(sioc, - &saddr, - &local_err); - if (local_err) { + if (qio_channel_socket_connect_sync(sioc, &saddr, NULL) < 0) { /* Close socket. */ qio_channel_close(QIO_CHANNEL(sioc), NULL); object_unref(OBJECT(sioc)); diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c index 284c5ef6f68ca..52de019834363 100644 --- a/target/i386/kvm/xen-emu.c +++ b/target/i386/kvm/xen-emu.c @@ -21,6 +21,7 @@ #include "system/address-spaces.h" #include "xen-emu.h" #include "trace.h" +#include "system/memory.h" #include "system/runstate.h" #include "hw/pci/msi.h" @@ -75,6 +76,7 @@ static bool kvm_gva_to_gpa(CPUState *cs, uint64_t gva, uint64_t *gpa, static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz, bool is_write) { + AddressSpace *as = cpu_addressspace(cs, MEMTXATTRS_UNSPECIFIED); uint8_t *buf = (uint8_t *)_buf; uint64_t gpa; size_t len; @@ -87,7 +89,7 @@ static int kvm_gva_rw(CPUState *cs, uint64_t gva, void *_buf, size_t sz, len = sz; } - cpu_physical_memory_rw(gpa, buf, len, is_write); + address_space_rw(as, gpa, MEMTXATTRS_UNSPECIFIED, buf, len, is_write); buf += len; sz -= len; diff --git a/target/i386/machine.c b/target/i386/machine.c index dd2dac1d443c8..45b7cea80aa7f 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -462,6 +462,24 @@ static const VMStateDescription vmstate_exception_info = { } }; +static bool cpu_errcode_needed(void *opaque) +{ + X86CPU *cpu = opaque; + + return cpu->env.has_error_code != 0; +} + +static const VMStateDescription vmstate_error_code = { + .name = "cpu/error_code", + .version_id = 1, + .minimum_version_id = 1, + .needed = cpu_errcode_needed, + .fields = (const VMStateField[]) { + VMSTATE_INT32(env.error_code, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + /* Poll control MSR enabled by default */ static bool poll_control_msr_needed(void *opaque) { @@ -1746,6 +1764,7 @@ const VMStateDescription vmstate_x86_cpu = { }, .subsections = (const VMStateDescription * const []) { &vmstate_exception_info, + &vmstate_error_code, &vmstate_async_pf_msr, &vmstate_async_pf_int_msr, &vmstate_pv_eoi_msr, diff --git a/target/i386/meson.build b/target/i386/meson.build index 092af34e2d859..89ba4912aaeb1 100644 --- a/target/i386/meson.build +++ b/target/i386/meson.build @@ -13,6 +13,7 @@ i386_ss.add(when: 'CONFIG_KVM', if_true: files('host-cpu.c')) i386_ss.add(when: 'CONFIG_HVF', if_true: files('host-cpu.c')) i386_ss.add(when: 'CONFIG_WHPX', if_true: files('host-cpu.c')) i386_ss.add(when: 'CONFIG_NVMM', if_true: files('host-cpu.c')) +i386_ss.add(when: 'CONFIG_MSHV', if_true: files('host-cpu.c')) i386_system_ss = ss.source_set() i386_system_ss.add(files( @@ -34,6 +35,7 @@ subdir('nvmm') subdir('hvf') subdir('tcg') subdir('emulate') +subdir('mshv') target_arch += {'i386': i386_ss} target_system_arch += {'i386': i386_system_ss} diff --git a/target/i386/monitor.c b/target/i386/monitor.c index 3c9b6ca62f2a2..d2bb873d49473 100644 --- a/target/i386/monitor.c +++ b/target/i386/monitor.c @@ -30,6 +30,7 @@ #include "qobject/qdict.h" #include "qapi/error.h" #include "qapi/qapi-commands-misc.h" +#include "system/memory.h" /* Perform linear address sign extension */ static hwaddr addr_canonical(CPUArchState *env, hwaddr addr) @@ -68,23 +69,23 @@ static void print_pte(Monitor *mon, CPUArchState *env, hwaddr addr, pte & PG_RW_MASK ? 'W' : '-'); } -static void tlb_info_32(Monitor *mon, CPUArchState *env) +static void tlb_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; unsigned int l1, l2; uint32_t pgd, pde, pte; pgd = env->cr[3] & ~0xfff; for(l1 = 0; l1 < 1024; l1++) { - cpu_physical_memory_read(pgd + l1 * 4, &pde, 4); - pde = le32_to_cpu(pde); + pde = address_space_ldl_le(as, pgd + l1 * 4, attrs, NULL); if (pde & PG_PRESENT_MASK) { if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { /* 4M pages */ print_pte(mon, env, (l1 << 22), pde, ~((1 << 21) - 1)); } else { for(l2 = 0; l2 < 1024; l2++) { - cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4); - pte = le32_to_cpu(pte); + pte = address_space_ldl_le(as, (pde & ~0xfff) + l2 * 4, + attrs, NULL); if (pte & PG_PRESENT_MASK) { print_pte(mon, env, (l1 << 22) + (l2 << 12), pte & ~PG_PSE_MASK, @@ -96,21 +97,20 @@ static void tlb_info_32(Monitor *mon, CPUArchState *env) } } -static void tlb_info_pae32(Monitor *mon, CPUArchState *env) +static void tlb_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; unsigned int l1, l2, l3; uint64_t pdpe, pde, pte; uint64_t pdp_addr, pd_addr, pt_addr; pdp_addr = env->cr[3] & ~0x1f; for (l1 = 0; l1 < 4; l1++) { - cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8); - pdpe = le64_to_cpu(pdpe); + pdpe = address_space_ldq_le(as, pdp_addr + l1 * 8, attrs, NULL); if (pdpe & PG_PRESENT_MASK) { pd_addr = pdpe & 0x3fffffffff000ULL; for (l2 = 0; l2 < 512; l2++) { - cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8); - pde = le64_to_cpu(pde); + pde = address_space_ldq_le(as, pd_addr + l2 * 8, attrs, NULL); if (pde & PG_PRESENT_MASK) { if (pde & PG_PSE_MASK) { /* 2M pages with PAE, CR4.PSE is ignored */ @@ -119,8 +119,8 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState *env) } else { pt_addr = pde & 0x3fffffffff000ULL; for (l3 = 0; l3 < 512; l3++) { - cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8); - pte = le64_to_cpu(pte); + pte = address_space_ldq_le(as, pt_addr + l3 * 8, + attrs, NULL); if (pte & PG_PRESENT_MASK) { print_pte(mon, env, (l1 << 30) + (l2 << 21) + (l3 << 12), @@ -136,24 +136,23 @@ static void tlb_info_pae32(Monitor *mon, CPUArchState *env) } #ifdef TARGET_X86_64 -static void tlb_info_la48(Monitor *mon, CPUArchState *env, +static void tlb_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as, uint64_t l0, uint64_t pml4_addr) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; uint64_t l1, l2, l3, l4; uint64_t pml4e, pdpe, pde, pte; uint64_t pdp_addr, pd_addr, pt_addr; for (l1 = 0; l1 < 512; l1++) { - cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8); - pml4e = le64_to_cpu(pml4e); + pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL); if (!(pml4e & PG_PRESENT_MASK)) { continue; } pdp_addr = pml4e & 0x3fffffffff000ULL; for (l2 = 0; l2 < 512; l2++) { - cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8); - pdpe = le64_to_cpu(pdpe); + pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL); if (!(pdpe & PG_PRESENT_MASK)) { continue; } @@ -167,8 +166,7 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env, pd_addr = pdpe & 0x3fffffffff000ULL; for (l3 = 0; l3 < 512; l3++) { - cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8); - pde = le64_to_cpu(pde); + pde = address_space_ldq_le(as, pd_addr + l3 * 8, attrs, NULL); if (!(pde & PG_PRESENT_MASK)) { continue; } @@ -182,10 +180,8 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env, pt_addr = pde & 0x3fffffffff000ULL; for (l4 = 0; l4 < 512; l4++) { - cpu_physical_memory_read(pt_addr - + l4 * 8, - &pte, 8); - pte = le64_to_cpu(pte); + pte = address_space_ldq_le(as, pt_addr + l4 * 8, + attrs, NULL); if (pte & PG_PRESENT_MASK) { print_pte(mon, env, (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21) + (l4 << 12), @@ -197,18 +193,18 @@ static void tlb_info_la48(Monitor *mon, CPUArchState *env, } } -static void tlb_info_la57(Monitor *mon, CPUArchState *env) +static void tlb_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; uint64_t l0; uint64_t pml5e; uint64_t pml5_addr; pml5_addr = env->cr[3] & 0x3fffffffff000ULL; for (l0 = 0; l0 < 512; l0++) { - cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8); - pml5e = le64_to_cpu(pml5e); + pml5e = address_space_ldq_le(as, pml5_addr + l0 * 8, attrs, NULL); if (pml5e & PG_PRESENT_MASK) { - tlb_info_la48(mon, env, l0, pml5e & 0x3fffffffff000ULL); + tlb_info_la48(mon, env, as, l0, pml5e & 0x3fffffffff000ULL); } } } @@ -217,6 +213,7 @@ static void tlb_info_la57(Monitor *mon, CPUArchState *env) void hmp_info_tlb(Monitor *mon, const QDict *qdict) { CPUArchState *env; + AddressSpace *as; env = mon_get_cpu_env(mon); if (!env) { @@ -228,21 +225,22 @@ void hmp_info_tlb(Monitor *mon, const QDict *qdict) monitor_printf(mon, "PG disabled\n"); return; } + as = cpu_get_address_space(env_cpu(env), X86ASIdx_MEM); if (env->cr[4] & CR4_PAE_MASK) { #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { if (env->cr[4] & CR4_LA57_MASK) { - tlb_info_la57(mon, env); + tlb_info_la57(mon, env, as); } else { - tlb_info_la48(mon, env, 0, env->cr[3] & 0x3fffffffff000ULL); + tlb_info_la48(mon, env, as, 0, env->cr[3] & 0x3fffffffff000ULL); } } else #endif { - tlb_info_pae32(mon, env); + tlb_info_pae32(mon, env, as); } } else { - tlb_info_32(mon, env); + tlb_info_32(mon, env, as); } } @@ -271,8 +269,9 @@ static void mem_print(Monitor *mon, CPUArchState *env, } } -static void mem_info_32(Monitor *mon, CPUArchState *env) +static void mem_info_32(Monitor *mon, CPUArchState *env, AddressSpace *as) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; unsigned int l1, l2; int prot, last_prot; uint32_t pgd, pde, pte; @@ -282,8 +281,7 @@ static void mem_info_32(Monitor *mon, CPUArchState *env) last_prot = 0; start = -1; for(l1 = 0; l1 < 1024; l1++) { - cpu_physical_memory_read(pgd + l1 * 4, &pde, 4); - pde = le32_to_cpu(pde); + pde = address_space_ldl_le(as, pgd + l1 * 4, attrs, NULL); end = l1 << 22; if (pde & PG_PRESENT_MASK) { if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { @@ -291,8 +289,8 @@ static void mem_info_32(Monitor *mon, CPUArchState *env) mem_print(mon, env, &start, &last_prot, end, prot); } else { for(l2 = 0; l2 < 1024; l2++) { - cpu_physical_memory_read((pde & ~0xfff) + l2 * 4, &pte, 4); - pte = le32_to_cpu(pte); + pte = address_space_ldl_le(as, (pde & ~0xfff) + l2 * 4, + attrs, NULL); end = (l1 << 22) + (l2 << 12); if (pte & PG_PRESENT_MASK) { prot = pte & pde & @@ -312,8 +310,9 @@ static void mem_info_32(Monitor *mon, CPUArchState *env) mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 32, 0); } -static void mem_info_pae32(Monitor *mon, CPUArchState *env) +static void mem_info_pae32(Monitor *mon, CPUArchState *env, AddressSpace *as) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; unsigned int l1, l2, l3; int prot, last_prot; uint64_t pdpe, pde, pte; @@ -324,14 +323,12 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env) last_prot = 0; start = -1; for (l1 = 0; l1 < 4; l1++) { - cpu_physical_memory_read(pdp_addr + l1 * 8, &pdpe, 8); - pdpe = le64_to_cpu(pdpe); + pdpe = address_space_ldq_le(as, pdp_addr + l1 * 8, attrs, NULL); end = l1 << 30; if (pdpe & PG_PRESENT_MASK) { pd_addr = pdpe & 0x3fffffffff000ULL; for (l2 = 0; l2 < 512; l2++) { - cpu_physical_memory_read(pd_addr + l2 * 8, &pde, 8); - pde = le64_to_cpu(pde); + pde = address_space_ldq_le(as, pd_addr + l2 * 8, attrs, NULL); end = (l1 << 30) + (l2 << 21); if (pde & PG_PRESENT_MASK) { if (pde & PG_PSE_MASK) { @@ -341,8 +338,8 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env) } else { pt_addr = pde & 0x3fffffffff000ULL; for (l3 = 0; l3 < 512; l3++) { - cpu_physical_memory_read(pt_addr + l3 * 8, &pte, 8); - pte = le64_to_cpu(pte); + pte = address_space_ldq_le(as, pt_addr + l3 * 8, + attrs, NULL); end = (l1 << 30) + (l2 << 21) + (l3 << 12); if (pte & PG_PRESENT_MASK) { prot = pte & pde & (PG_USER_MASK | PG_RW_MASK | @@ -369,8 +366,9 @@ static void mem_info_pae32(Monitor *mon, CPUArchState *env) #ifdef TARGET_X86_64 -static void mem_info_la48(Monitor *mon, CPUArchState *env) +static void mem_info_la48(Monitor *mon, CPUArchState *env, AddressSpace *as) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; int prot, last_prot; uint64_t l1, l2, l3, l4; uint64_t pml4e, pdpe, pde, pte; @@ -380,14 +378,12 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env) last_prot = 0; start = -1; for (l1 = 0; l1 < 512; l1++) { - cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8); - pml4e = le64_to_cpu(pml4e); + pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL); end = l1 << 39; if (pml4e & PG_PRESENT_MASK) { pdp_addr = pml4e & 0x3fffffffff000ULL; for (l2 = 0; l2 < 512; l2++) { - cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8); - pdpe = le64_to_cpu(pdpe); + pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL); end = (l1 << 39) + (l2 << 30); if (pdpe & PG_PRESENT_MASK) { if (pdpe & PG_PSE_MASK) { @@ -398,8 +394,8 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env) } else { pd_addr = pdpe & 0x3fffffffff000ULL; for (l3 = 0; l3 < 512; l3++) { - cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8); - pde = le64_to_cpu(pde); + pde = address_space_ldq_le(as, pd_addr + l3 * 8, + attrs, NULL); end = (l1 << 39) + (l2 << 30) + (l3 << 21); if (pde & PG_PRESENT_MASK) { if (pde & PG_PSE_MASK) { @@ -411,10 +407,10 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env) } else { pt_addr = pde & 0x3fffffffff000ULL; for (l4 = 0; l4 < 512; l4++) { - cpu_physical_memory_read(pt_addr - + l4 * 8, - &pte, 8); - pte = le64_to_cpu(pte); + pte = address_space_ldq_le(as, + pt_addr + + l4 * 8, + attrs, NULL); end = (l1 << 39) + (l2 << 30) + (l3 << 21) + (l4 << 12); if (pte & PG_PRESENT_MASK) { @@ -449,8 +445,9 @@ static void mem_info_la48(Monitor *mon, CPUArchState *env) mem_print(mon, env, &start, &last_prot, (hwaddr)1 << 48, 0); } -static void mem_info_la57(Monitor *mon, CPUArchState *env) +static void mem_info_la57(Monitor *mon, CPUArchState *env, AddressSpace *as) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; int prot, last_prot; uint64_t l0, l1, l2, l3, l4; uint64_t pml5e, pml4e, pdpe, pde, pte; @@ -460,8 +457,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) last_prot = 0; start = -1; for (l0 = 0; l0 < 512; l0++) { - cpu_physical_memory_read(pml5_addr + l0 * 8, &pml5e, 8); - pml5e = le64_to_cpu(pml5e); + pml5e = address_space_ldq_le(as, pml5_addr + l0 * 8, attrs, NULL); end = l0 << 48; if (!(pml5e & PG_PRESENT_MASK)) { prot = 0; @@ -471,8 +467,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) pml4_addr = pml5e & 0x3fffffffff000ULL; for (l1 = 0; l1 < 512; l1++) { - cpu_physical_memory_read(pml4_addr + l1 * 8, &pml4e, 8); - pml4e = le64_to_cpu(pml4e); + pml4e = address_space_ldq_le(as, pml4_addr + l1 * 8, attrs, NULL); end = (l0 << 48) + (l1 << 39); if (!(pml4e & PG_PRESENT_MASK)) { prot = 0; @@ -482,8 +477,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) pdp_addr = pml4e & 0x3fffffffff000ULL; for (l2 = 0; l2 < 512; l2++) { - cpu_physical_memory_read(pdp_addr + l2 * 8, &pdpe, 8); - pdpe = le64_to_cpu(pdpe); + pdpe = address_space_ldq_le(as, pdp_addr + l2 * 8, attrs, NULL); end = (l0 << 48) + (l1 << 39) + (l2 << 30); if (pdpe & PG_PRESENT_MASK) { prot = 0; @@ -501,8 +495,8 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) pd_addr = pdpe & 0x3fffffffff000ULL; for (l3 = 0; l3 < 512; l3++) { - cpu_physical_memory_read(pd_addr + l3 * 8, &pde, 8); - pde = le64_to_cpu(pde); + pde = address_space_ldq_le(as, pd_addr + l3 * 8, + attrs, NULL); end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21); if (pde & PG_PRESENT_MASK) { prot = 0; @@ -520,8 +514,8 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) pt_addr = pde & 0x3fffffffff000ULL; for (l4 = 0; l4 < 512; l4++) { - cpu_physical_memory_read(pt_addr + l4 * 8, &pte, 8); - pte = le64_to_cpu(pte); + pte = address_space_ldq_le(as, pt_addr + l4 * 8, + attrs, NULL); end = (l0 << 48) + (l1 << 39) + (l2 << 30) + (l3 << 21) + (l4 << 12); if (pte & PG_PRESENT_MASK) { @@ -545,6 +539,7 @@ static void mem_info_la57(Monitor *mon, CPUArchState *env) void hmp_info_mem(Monitor *mon, const QDict *qdict) { CPUArchState *env; + AddressSpace *as; env = mon_get_cpu_env(mon); if (!env) { @@ -556,21 +551,22 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict) monitor_printf(mon, "PG disabled\n"); return; } + as = cpu_get_address_space(env_cpu(env), X86ASIdx_MEM); if (env->cr[4] & CR4_PAE_MASK) { #ifdef TARGET_X86_64 if (env->hflags & HF_LMA_MASK) { if (env->cr[4] & CR4_LA57_MASK) { - mem_info_la57(mon, env); + mem_info_la57(mon, env, as); } else { - mem_info_la48(mon, env); + mem_info_la48(mon, env, as); } } else #endif { - mem_info_pae32(mon, env); + mem_info_pae32(mon, env, as); } } else { - mem_info_32(mon, env); + mem_info_32(mon, env, as); } } diff --git a/target/i386/mshv/meson.build b/target/i386/mshv/meson.build new file mode 100644 index 0000000000000..647e5dafb77f7 --- /dev/null +++ b/target/i386/mshv/meson.build @@ -0,0 +1,8 @@ +i386_mshv_ss = ss.source_set() + +i386_mshv_ss.add(files( + 'mshv-cpu.c', + 'x86.c', +)) + +i386_system_ss.add_all(when: 'CONFIG_MSHV', if_true: i386_mshv_ss) diff --git a/target/i386/mshv/mshv-cpu.c b/target/i386/mshv/mshv-cpu.c new file mode 100644 index 0000000000000..1f7b9cb37ec23 --- /dev/null +++ b/target/i386/mshv/mshv-cpu.c @@ -0,0 +1,1763 @@ +/* + * QEMU MSHV support + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: Ziqiao Zhou + * Magnus Kulke + * Jinank Jain + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/error-report.h" +#include "qemu/memalign.h" +#include "qemu/typedefs.h" + +#include "system/mshv.h" +#include "system/mshv_int.h" +#include "system/address-spaces.h" +#include "linux/mshv.h" +#include "hw/hyperv/hvgdk.h" +#include "hw/hyperv/hvgdk_mini.h" +#include "hw/hyperv/hvhdk_mini.h" +#include "hw/i386/apic_internal.h" + +#include "cpu.h" +#include "emulate/x86_decode.h" +#include "emulate/x86_emu.h" +#include "emulate/x86_flags.h" + +#include "trace-accel_mshv.h" +#include "trace.h" + +#include + +#define MAX_REGISTER_COUNT (MAX_CONST(ARRAY_SIZE(STANDARD_REGISTER_NAMES), \ + MAX_CONST(ARRAY_SIZE(SPECIAL_REGISTER_NAMES), \ + ARRAY_SIZE(FPU_REGISTER_NAMES)))) + +static enum hv_register_name STANDARD_REGISTER_NAMES[18] = { + HV_X64_REGISTER_RAX, + HV_X64_REGISTER_RBX, + HV_X64_REGISTER_RCX, + HV_X64_REGISTER_RDX, + HV_X64_REGISTER_RSI, + HV_X64_REGISTER_RDI, + HV_X64_REGISTER_RSP, + HV_X64_REGISTER_RBP, + HV_X64_REGISTER_R8, + HV_X64_REGISTER_R9, + HV_X64_REGISTER_R10, + HV_X64_REGISTER_R11, + HV_X64_REGISTER_R12, + HV_X64_REGISTER_R13, + HV_X64_REGISTER_R14, + HV_X64_REGISTER_R15, + HV_X64_REGISTER_RIP, + HV_X64_REGISTER_RFLAGS, +}; + +static enum hv_register_name SPECIAL_REGISTER_NAMES[17] = { + HV_X64_REGISTER_CS, + HV_X64_REGISTER_DS, + HV_X64_REGISTER_ES, + HV_X64_REGISTER_FS, + HV_X64_REGISTER_GS, + HV_X64_REGISTER_SS, + HV_X64_REGISTER_TR, + HV_X64_REGISTER_LDTR, + HV_X64_REGISTER_GDTR, + HV_X64_REGISTER_IDTR, + HV_X64_REGISTER_CR0, + HV_X64_REGISTER_CR2, + HV_X64_REGISTER_CR3, + HV_X64_REGISTER_CR4, + HV_X64_REGISTER_CR8, + HV_X64_REGISTER_EFER, + HV_X64_REGISTER_APIC_BASE, +}; + +static enum hv_register_name FPU_REGISTER_NAMES[26] = { + HV_X64_REGISTER_XMM0, + HV_X64_REGISTER_XMM1, + HV_X64_REGISTER_XMM2, + HV_X64_REGISTER_XMM3, + HV_X64_REGISTER_XMM4, + HV_X64_REGISTER_XMM5, + HV_X64_REGISTER_XMM6, + HV_X64_REGISTER_XMM7, + HV_X64_REGISTER_XMM8, + HV_X64_REGISTER_XMM9, + HV_X64_REGISTER_XMM10, + HV_X64_REGISTER_XMM11, + HV_X64_REGISTER_XMM12, + HV_X64_REGISTER_XMM13, + HV_X64_REGISTER_XMM14, + HV_X64_REGISTER_XMM15, + HV_X64_REGISTER_FP_MMX0, + HV_X64_REGISTER_FP_MMX1, + HV_X64_REGISTER_FP_MMX2, + HV_X64_REGISTER_FP_MMX3, + HV_X64_REGISTER_FP_MMX4, + HV_X64_REGISTER_FP_MMX5, + HV_X64_REGISTER_FP_MMX6, + HV_X64_REGISTER_FP_MMX7, + HV_X64_REGISTER_FP_CONTROL_STATUS, + HV_X64_REGISTER_XMM_CONTROL_STATUS, +}; + +static int translate_gva(const CPUState *cpu, uint64_t gva, uint64_t *gpa, + uint64_t flags) +{ + int ret; + int cpu_fd = mshv_vcpufd(cpu); + int vp_index = cpu->cpu_index; + + hv_input_translate_virtual_address in = { 0 }; + hv_output_translate_virtual_address out = { 0 }; + struct mshv_root_hvcall args = {0}; + uint64_t gva_page = gva >> HV_HYP_PAGE_SHIFT; + + in.vp_index = vp_index; + in.control_flags = flags; + in.gva_page = gva_page; + + /* create the hvcall envelope */ + args.code = HVCALL_TRANSLATE_VIRTUAL_ADDRESS; + args.in_sz = sizeof(in); + args.in_ptr = (uint64_t) ∈ + args.out_sz = sizeof(out); + args.out_ptr = (uint64_t) &out; + + /* perform the call */ + ret = mshv_hvcall(cpu_fd, &args); + if (ret < 0) { + error_report("Failed to invoke gva->gpa translation"); + return -errno; + } + + if (out.translation_result.result_code != HV_TRANSLATE_GVA_SUCCESS) { + error_report("Failed to translate gva (" TARGET_FMT_lx ") to gpa", gva); + return -1; + } + + *gpa = ((out.gpa_page << HV_HYP_PAGE_SHIFT) + | (gva & ~(uint64_t)HV_HYP_PAGE_MASK)); + + return 0; +} + +int mshv_set_generic_regs(const CPUState *cpu, const hv_register_assoc *assocs, + size_t n_regs) +{ + int cpu_fd = mshv_vcpufd(cpu); + int vp_index = cpu->cpu_index; + size_t in_sz, assocs_sz; + hv_input_set_vp_registers *in = cpu->accel->hvcall_args.input_page; + struct mshv_root_hvcall args = {0}; + int ret; + + /* find out the size of the struct w/ a flexible array at the tail */ + assocs_sz = n_regs * sizeof(hv_register_assoc); + in_sz = sizeof(hv_input_set_vp_registers) + assocs_sz; + + /* fill the input struct */ + memset(in, 0, sizeof(hv_input_set_vp_registers)); + in->vp_index = vp_index; + memcpy(in->elements, assocs, assocs_sz); + + /* create the hvcall envelope */ + args.code = HVCALL_SET_VP_REGISTERS; + args.in_sz = in_sz; + args.in_ptr = (uint64_t) in; + args.reps = (uint16_t) n_regs; + + /* perform the call */ + ret = mshv_hvcall(cpu_fd, &args); + if (ret < 0) { + error_report("Failed to set registers"); + return -1; + } + + /* assert we set all registers */ + if (args.reps != n_regs) { + error_report("Failed to set registers: expected %zu elements" + ", got %u", n_regs, args.reps); + return -1; + } + + return 0; +} + +static int get_generic_regs(CPUState *cpu, hv_register_assoc *assocs, + size_t n_regs) +{ + int cpu_fd = mshv_vcpufd(cpu); + int vp_index = cpu->cpu_index; + hv_input_get_vp_registers *in = cpu->accel->hvcall_args.input_page; + hv_register_value *values = cpu->accel->hvcall_args.output_page; + size_t in_sz, names_sz, values_sz; + int i, ret; + struct mshv_root_hvcall args = {0}; + + /* find out the size of the struct w/ a flexible array at the tail */ + names_sz = n_regs * sizeof(hv_register_name); + in_sz = sizeof(hv_input_get_vp_registers) + names_sz; + + /* fill the input struct */ + memset(in, 0, sizeof(hv_input_get_vp_registers)); + in->vp_index = vp_index; + for (i = 0; i < n_regs; i++) { + in->names[i] = assocs[i].name; + } + + /* determine size of value output buffer */ + values_sz = n_regs * sizeof(union hv_register_value); + + /* create the hvcall envelope */ + args.code = HVCALL_GET_VP_REGISTERS; + args.in_sz = in_sz; + args.in_ptr = (uint64_t) in; + args.out_sz = values_sz; + args.out_ptr = (uint64_t) values; + args.reps = (uint16_t) n_regs; + + /* perform the call */ + ret = mshv_hvcall(cpu_fd, &args); + if (ret < 0) { + error_report("Failed to retrieve registers"); + return -1; + } + + /* assert we got all registers */ + if (args.reps != n_regs) { + error_report("Failed to retrieve registers: expected %zu elements" + ", got %u", n_regs, args.reps); + return -1; + } + + /* copy values into assoc */ + for (i = 0; i < n_regs; i++) { + assocs[i].value = values[i]; + } + + return 0; +} + +static int set_standard_regs(const CPUState *cpu) +{ + X86CPU *x86cpu = X86_CPU(cpu); + CPUX86State *env = &x86cpu->env; + hv_register_assoc assocs[ARRAY_SIZE(STANDARD_REGISTER_NAMES)]; + int ret; + size_t n_regs = ARRAY_SIZE(STANDARD_REGISTER_NAMES); + + /* set names */ + for (size_t i = 0; i < ARRAY_SIZE(STANDARD_REGISTER_NAMES); i++) { + assocs[i].name = STANDARD_REGISTER_NAMES[i]; + } + assocs[0].value.reg64 = env->regs[R_EAX]; + assocs[1].value.reg64 = env->regs[R_EBX]; + assocs[2].value.reg64 = env->regs[R_ECX]; + assocs[3].value.reg64 = env->regs[R_EDX]; + assocs[4].value.reg64 = env->regs[R_ESI]; + assocs[5].value.reg64 = env->regs[R_EDI]; + assocs[6].value.reg64 = env->regs[R_ESP]; + assocs[7].value.reg64 = env->regs[R_EBP]; + assocs[8].value.reg64 = env->regs[R_R8]; + assocs[9].value.reg64 = env->regs[R_R9]; + assocs[10].value.reg64 = env->regs[R_R10]; + assocs[11].value.reg64 = env->regs[R_R11]; + assocs[12].value.reg64 = env->regs[R_R12]; + assocs[13].value.reg64 = env->regs[R_R13]; + assocs[14].value.reg64 = env->regs[R_R14]; + assocs[15].value.reg64 = env->regs[R_R15]; + assocs[16].value.reg64 = env->eip; + lflags_to_rflags(env); + assocs[17].value.reg64 = env->eflags; + + ret = mshv_set_generic_regs(cpu, assocs, n_regs); + if (ret < 0) { + error_report("failed to set standard registers"); + return -errno; + } + return 0; +} + +int mshv_store_regs(CPUState *cpu) +{ + int ret; + + ret = set_standard_regs(cpu); + if (ret < 0) { + error_report("Failed to store standard registers"); + return -1; + } + + return 0; +} + +static void populate_standard_regs(const hv_register_assoc *assocs, + CPUX86State *env) +{ + env->regs[R_EAX] = assocs[0].value.reg64; + env->regs[R_EBX] = assocs[1].value.reg64; + env->regs[R_ECX] = assocs[2].value.reg64; + env->regs[R_EDX] = assocs[3].value.reg64; + env->regs[R_ESI] = assocs[4].value.reg64; + env->regs[R_EDI] = assocs[5].value.reg64; + env->regs[R_ESP] = assocs[6].value.reg64; + env->regs[R_EBP] = assocs[7].value.reg64; + env->regs[R_R8] = assocs[8].value.reg64; + env->regs[R_R9] = assocs[9].value.reg64; + env->regs[R_R10] = assocs[10].value.reg64; + env->regs[R_R11] = assocs[11].value.reg64; + env->regs[R_R12] = assocs[12].value.reg64; + env->regs[R_R13] = assocs[13].value.reg64; + env->regs[R_R14] = assocs[14].value.reg64; + env->regs[R_R15] = assocs[15].value.reg64; + + env->eip = assocs[16].value.reg64; + env->eflags = assocs[17].value.reg64; + rflags_to_lflags(env); +} + +int mshv_get_standard_regs(CPUState *cpu) +{ + struct hv_register_assoc assocs[ARRAY_SIZE(STANDARD_REGISTER_NAMES)]; + int ret; + X86CPU *x86cpu = X86_CPU(cpu); + CPUX86State *env = &x86cpu->env; + size_t n_regs = ARRAY_SIZE(STANDARD_REGISTER_NAMES); + + for (size_t i = 0; i < n_regs; i++) { + assocs[i].name = STANDARD_REGISTER_NAMES[i]; + } + ret = get_generic_regs(cpu, assocs, n_regs); + if (ret < 0) { + error_report("failed to get standard registers"); + return -1; + } + + populate_standard_regs(assocs, env); + return 0; +} + +static inline void populate_segment_reg(const hv_x64_segment_register *hv_seg, + SegmentCache *seg) +{ + memset(seg, 0, sizeof(SegmentCache)); + + seg->base = hv_seg->base; + seg->limit = hv_seg->limit; + seg->selector = hv_seg->selector; + + seg->flags = (hv_seg->segment_type << DESC_TYPE_SHIFT) + | (hv_seg->present * DESC_P_MASK) + | (hv_seg->descriptor_privilege_level << DESC_DPL_SHIFT) + | (hv_seg->_default << DESC_B_SHIFT) + | (hv_seg->non_system_segment * DESC_S_MASK) + | (hv_seg->_long << DESC_L_SHIFT) + | (hv_seg->granularity * DESC_G_MASK) + | (hv_seg->available * DESC_AVL_MASK); + +} + +static inline void populate_table_reg(const hv_x64_table_register *hv_seg, + SegmentCache *tbl) +{ + memset(tbl, 0, sizeof(SegmentCache)); + + tbl->base = hv_seg->base; + tbl->limit = hv_seg->limit; +} + +static void populate_special_regs(const hv_register_assoc *assocs, + X86CPU *x86cpu) +{ + CPUX86State *env = &x86cpu->env; + + populate_segment_reg(&assocs[0].value.segment, &env->segs[R_CS]); + populate_segment_reg(&assocs[1].value.segment, &env->segs[R_DS]); + populate_segment_reg(&assocs[2].value.segment, &env->segs[R_ES]); + populate_segment_reg(&assocs[3].value.segment, &env->segs[R_FS]); + populate_segment_reg(&assocs[4].value.segment, &env->segs[R_GS]); + populate_segment_reg(&assocs[5].value.segment, &env->segs[R_SS]); + + populate_segment_reg(&assocs[6].value.segment, &env->tr); + populate_segment_reg(&assocs[7].value.segment, &env->ldt); + + populate_table_reg(&assocs[8].value.table, &env->gdt); + populate_table_reg(&assocs[9].value.table, &env->idt); + + env->cr[0] = assocs[10].value.reg64; + env->cr[2] = assocs[11].value.reg64; + env->cr[3] = assocs[12].value.reg64; + env->cr[4] = assocs[13].value.reg64; + + cpu_set_apic_tpr(x86cpu->apic_state, assocs[14].value.reg64); + env->efer = assocs[15].value.reg64; + cpu_set_apic_base(x86cpu->apic_state, assocs[16].value.reg64); +} + + +int mshv_get_special_regs(CPUState *cpu) +{ + struct hv_register_assoc assocs[ARRAY_SIZE(SPECIAL_REGISTER_NAMES)]; + int ret; + X86CPU *x86cpu = X86_CPU(cpu); + size_t n_regs = ARRAY_SIZE(SPECIAL_REGISTER_NAMES); + + for (size_t i = 0; i < n_regs; i++) { + assocs[i].name = SPECIAL_REGISTER_NAMES[i]; + } + ret = get_generic_regs(cpu, assocs, n_regs); + if (ret < 0) { + error_report("failed to get special registers"); + return -errno; + } + + populate_special_regs(assocs, x86cpu); + return 0; +} + +int mshv_load_regs(CPUState *cpu) +{ + int ret; + + ret = mshv_get_standard_regs(cpu); + if (ret < 0) { + error_report("Failed to load standard registers"); + return -1; + } + + ret = mshv_get_special_regs(cpu); + if (ret < 0) { + error_report("Failed to load special registers"); + return -1; + } + + return 0; +} + +static void add_cpuid_entry(GList *cpuid_entries, + uint32_t function, uint32_t index, + uint32_t eax, uint32_t ebx, + uint32_t ecx, uint32_t edx) +{ + struct hv_cpuid_entry *entry; + + entry = g_malloc0(sizeof(struct hv_cpuid_entry)); + entry->function = function; + entry->index = index; + entry->eax = eax; + entry->ebx = ebx; + entry->ecx = ecx; + entry->edx = edx; + + cpuid_entries = g_list_append(cpuid_entries, entry); +} + +static void collect_cpuid_entries(const CPUState *cpu, GList *cpuid_entries) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + uint32_t eax, ebx, ecx, edx; + uint32_t leaf, subleaf; + size_t max_leaf = 0x1F; + size_t max_subleaf = 0x20; + + uint32_t leaves_with_subleaves[] = {0x4, 0x7, 0xD, 0xF, 0x10}; + int n_subleaf_leaves = ARRAY_SIZE(leaves_with_subleaves); + + /* Regular leaves without subleaves */ + for (leaf = 0; leaf <= max_leaf; leaf++) { + bool has_subleaves = false; + for (int i = 0; i < n_subleaf_leaves; i++) { + if (leaf == leaves_with_subleaves[i]) { + has_subleaves = true; + break; + } + } + + if (!has_subleaves) { + cpu_x86_cpuid(env, leaf, 0, &eax, &ebx, &ecx, &edx); + if (eax == 0 && ebx == 0 && ecx == 0 && edx == 0) { + /* all zeroes indicates no more leaves */ + continue; + } + + add_cpuid_entry(cpuid_entries, leaf, 0, eax, ebx, ecx, edx); + continue; + } + + subleaf = 0; + while (subleaf < max_subleaf) { + cpu_x86_cpuid(env, leaf, subleaf, &eax, &ebx, &ecx, &edx); + + if (eax == 0 && ebx == 0 && ecx == 0 && edx == 0) { + /* all zeroes indicates no more leaves */ + break; + } + add_cpuid_entry(cpuid_entries, leaf, 0, eax, ebx, ecx, edx); + subleaf++; + } + } +} + +static int register_intercept_result_cpuid_entry(const CPUState *cpu, + uint8_t subleaf_specific, + uint8_t always_override, + struct hv_cpuid_entry *entry) +{ + int ret; + int vp_index = cpu->cpu_index; + int cpu_fd = mshv_vcpufd(cpu); + + struct hv_register_x64_cpuid_result_parameters cpuid_params = { + .input.eax = entry->function, + .input.ecx = entry->index, + .input.subleaf_specific = subleaf_specific, + .input.always_override = always_override, + .input.padding = 0, + /* + * With regard to masks - these are to specify bits to be overwritten + * The current CpuidEntry structure wouldn't allow to carry the masks + * in addition to the actual register values. For this reason, the + * masks are set to the exact values of the corresponding register bits + * to be registered for an overwrite. To view resulting values the + * hypervisor would return, HvCallGetVpCpuidValues hypercall can be + * used. + */ + .result.eax = entry->eax, + .result.eax_mask = entry->eax, + .result.ebx = entry->ebx, + .result.ebx_mask = entry->ebx, + .result.ecx = entry->ecx, + .result.ecx_mask = entry->ecx, + .result.edx = entry->edx, + .result.edx_mask = entry->edx, + }; + union hv_register_intercept_result_parameters parameters = { + .cpuid = cpuid_params, + }; + + hv_input_register_intercept_result in = {0}; + in.vp_index = vp_index; + in.intercept_type = HV_INTERCEPT_TYPE_X64_CPUID; + in.parameters = parameters; + + struct mshv_root_hvcall args = {0}; + args.code = HVCALL_REGISTER_INTERCEPT_RESULT; + args.in_sz = sizeof(in); + args.in_ptr = (uint64_t)∈ + + ret = mshv_hvcall(cpu_fd, &args); + if (ret < 0) { + error_report("failed to register intercept result for cpuid"); + return -1; + } + + return 0; +} + +static int register_intercept_result_cpuid(const CPUState *cpu, + struct hv_cpuid *cpuid) +{ + int ret = 0, entry_ret; + struct hv_cpuid_entry *entry; + uint8_t subleaf_specific, always_override; + + for (size_t i = 0; i < cpuid->nent; i++) { + entry = &cpuid->entries[i]; + + /* set defaults */ + subleaf_specific = 0; + always_override = 1; + + /* Intel */ + /* 0xb - Extended Topology Enumeration Leaf */ + /* 0x1f - V2 Extended Topology Enumeration Leaf */ + /* AMD */ + /* 0x8000_001e - Processor Topology Information */ + /* 0x8000_0026 - Extended CPU Topology */ + if (entry->function == 0xb + || entry->function == 0x1f + || entry->function == 0x8000001e + || entry->function == 0x80000026) { + subleaf_specific = 1; + always_override = 1; + } else if (entry->function == 0x00000001 + || entry->function == 0x80000000 + || entry->function == 0x80000001 + || entry->function == 0x80000008) { + subleaf_specific = 0; + always_override = 1; + } + + entry_ret = register_intercept_result_cpuid_entry(cpu, subleaf_specific, + always_override, + entry); + if ((entry_ret < 0) && (ret == 0)) { + ret = entry_ret; + } + } + + return ret; +} + +static int set_cpuid2(const CPUState *cpu) +{ + int ret; + size_t n_entries, cpuid_size; + struct hv_cpuid *cpuid; + struct hv_cpuid_entry *entry; + GList *entries = NULL; + + collect_cpuid_entries(cpu, entries); + n_entries = g_list_length(entries); + + cpuid_size = sizeof(struct hv_cpuid) + + n_entries * sizeof(struct hv_cpuid_entry); + + cpuid = g_malloc0(cpuid_size); + cpuid->nent = n_entries; + cpuid->padding = 0; + + for (size_t i = 0; i < n_entries; i++) { + entry = g_list_nth_data(entries, i); + cpuid->entries[i] = *entry; + g_free(entry); + } + g_list_free(entries); + + ret = register_intercept_result_cpuid(cpu, cpuid); + g_free(cpuid); + if (ret < 0) { + return ret; + } + + return 0; +} + +static inline void populate_hv_segment_reg(SegmentCache *seg, + hv_x64_segment_register *hv_reg) +{ + uint32_t flags = seg->flags; + + hv_reg->base = seg->base; + hv_reg->limit = seg->limit; + hv_reg->selector = seg->selector; + hv_reg->segment_type = (flags >> DESC_TYPE_SHIFT) & 0xF; + hv_reg->non_system_segment = (flags & DESC_S_MASK) != 0; + hv_reg->descriptor_privilege_level = (flags >> DESC_DPL_SHIFT) & 0x3; + hv_reg->present = (flags & DESC_P_MASK) != 0; + hv_reg->reserved = 0; + hv_reg->available = (flags & DESC_AVL_MASK) != 0; + hv_reg->_long = (flags >> DESC_L_SHIFT) & 0x1; + hv_reg->_default = (flags >> DESC_B_SHIFT) & 0x1; + hv_reg->granularity = (flags & DESC_G_MASK) != 0; +} + +static inline void populate_hv_table_reg(const struct SegmentCache *seg, + hv_x64_table_register *hv_reg) +{ + memset(hv_reg, 0, sizeof(*hv_reg)); + + hv_reg->base = seg->base; + hv_reg->limit = seg->limit; +} + +static int set_special_regs(const CPUState *cpu) +{ + X86CPU *x86cpu = X86_CPU(cpu); + CPUX86State *env = &x86cpu->env; + struct hv_register_assoc assocs[ARRAY_SIZE(SPECIAL_REGISTER_NAMES)]; + size_t n_regs = ARRAY_SIZE(SPECIAL_REGISTER_NAMES); + int ret; + + /* set names */ + for (size_t i = 0; i < n_regs; i++) { + assocs[i].name = SPECIAL_REGISTER_NAMES[i]; + } + populate_hv_segment_reg(&env->segs[R_CS], &assocs[0].value.segment); + populate_hv_segment_reg(&env->segs[R_DS], &assocs[1].value.segment); + populate_hv_segment_reg(&env->segs[R_ES], &assocs[2].value.segment); + populate_hv_segment_reg(&env->segs[R_FS], &assocs[3].value.segment); + populate_hv_segment_reg(&env->segs[R_GS], &assocs[4].value.segment); + populate_hv_segment_reg(&env->segs[R_SS], &assocs[5].value.segment); + populate_hv_segment_reg(&env->tr, &assocs[6].value.segment); + populate_hv_segment_reg(&env->ldt, &assocs[7].value.segment); + + populate_hv_table_reg(&env->gdt, &assocs[8].value.table); + populate_hv_table_reg(&env->idt, &assocs[9].value.table); + + assocs[10].value.reg64 = env->cr[0]; + assocs[11].value.reg64 = env->cr[2]; + assocs[12].value.reg64 = env->cr[3]; + assocs[13].value.reg64 = env->cr[4]; + assocs[14].value.reg64 = cpu_get_apic_tpr(x86cpu->apic_state); + assocs[15].value.reg64 = env->efer; + assocs[16].value.reg64 = cpu_get_apic_base(x86cpu->apic_state); + + ret = mshv_set_generic_regs(cpu, assocs, n_regs); + if (ret < 0) { + error_report("failed to set special registers"); + return -1; + } + + return 0; +} + +static int set_fpu(const CPUState *cpu, const struct MshvFPU *regs) +{ + struct hv_register_assoc assocs[ARRAY_SIZE(FPU_REGISTER_NAMES)]; + union hv_register_value *value; + size_t fp_i; + union hv_x64_fp_control_status_register *ctrl_status; + union hv_x64_xmm_control_status_register *xmm_ctrl_status; + int ret; + size_t n_regs = ARRAY_SIZE(FPU_REGISTER_NAMES); + + /* first 16 registers are xmm0-xmm15 */ + for (size_t i = 0; i < 16; i++) { + assocs[i].name = FPU_REGISTER_NAMES[i]; + value = &assocs[i].value; + memcpy(&value->reg128, ®s->xmm[i], 16); + } + + /* next 8 registers are fp_mmx0-fp_mmx7 */ + for (size_t i = 16; i < 24; i++) { + assocs[i].name = FPU_REGISTER_NAMES[i]; + fp_i = (i - 16); + value = &assocs[i].value; + memcpy(&value->reg128, ®s->fpr[fp_i], 16); + } + + /* last two registers are fp_control_status and xmm_control_status */ + assocs[24].name = FPU_REGISTER_NAMES[24]; + value = &assocs[24].value; + ctrl_status = &value->fp_control_status; + ctrl_status->fp_control = regs->fcw; + ctrl_status->fp_status = regs->fsw; + ctrl_status->fp_tag = regs->ftwx; + ctrl_status->reserved = 0; + ctrl_status->last_fp_op = regs->last_opcode; + ctrl_status->last_fp_rip = regs->last_ip; + + assocs[25].name = FPU_REGISTER_NAMES[25]; + value = &assocs[25].value; + xmm_ctrl_status = &value->xmm_control_status; + xmm_ctrl_status->xmm_status_control = regs->mxcsr; + xmm_ctrl_status->xmm_status_control_mask = 0; + xmm_ctrl_status->last_fp_rdp = regs->last_dp; + + ret = mshv_set_generic_regs(cpu, assocs, n_regs); + if (ret < 0) { + error_report("failed to set fpu registers"); + return -1; + } + + return 0; +} + +static int set_xc_reg(const CPUState *cpu, uint64_t xcr0) +{ + int ret; + struct hv_register_assoc assoc = { + .name = HV_X64_REGISTER_XFEM, + .value.reg64 = xcr0, + }; + + ret = mshv_set_generic_regs(cpu, &assoc, 1); + if (ret < 0) { + error_report("failed to set xcr0"); + return -errno; + } + return 0; +} + +static int set_cpu_state(const CPUState *cpu, const MshvFPU *fpu_regs, + uint64_t xcr0) +{ + int ret; + + ret = set_standard_regs(cpu); + if (ret < 0) { + return ret; + } + ret = set_special_regs(cpu); + if (ret < 0) { + return ret; + } + ret = set_fpu(cpu, fpu_regs); + if (ret < 0) { + return ret; + } + ret = set_xc_reg(cpu, xcr0); + if (ret < 0) { + return ret; + } + return 0; +} + +static int get_vp_state(int cpu_fd, struct mshv_get_set_vp_state *state) +{ + int ret; + + ret = ioctl(cpu_fd, MSHV_GET_VP_STATE, state); + if (ret < 0) { + error_report("failed to get partition state: %s", strerror(errno)); + return -1; + } + + return 0; +} + +static int get_lapic(int cpu_fd, + struct hv_local_interrupt_controller_state *state) +{ + int ret; + size_t size = 4096; + /* buffer aligned to 4k, as *state requires that */ + void *buffer = qemu_memalign(size, size); + struct mshv_get_set_vp_state mshv_state = { 0 }; + + mshv_state.buf_ptr = (uint64_t) buffer; + mshv_state.buf_sz = size; + mshv_state.type = MSHV_VP_STATE_LAPIC; + + ret = get_vp_state(cpu_fd, &mshv_state); + if (ret == 0) { + memcpy(state, buffer, sizeof(*state)); + } + qemu_vfree(buffer); + if (ret < 0) { + error_report("failed to get lapic"); + return -1; + } + + return 0; +} + +static uint32_t set_apic_delivery_mode(uint32_t reg, uint32_t mode) +{ + return ((reg) & ~0x700) | ((mode) << 8); +} + +static int set_vp_state(int cpu_fd, const struct mshv_get_set_vp_state *state) +{ + int ret; + + ret = ioctl(cpu_fd, MSHV_SET_VP_STATE, state); + if (ret < 0) { + error_report("failed to set partition state: %s", strerror(errno)); + return -1; + } + + return 0; +} + +static int set_lapic(int cpu_fd, + const struct hv_local_interrupt_controller_state *state) +{ + int ret; + size_t size = 4096; + /* buffer aligned to 4k, as *state requires that */ + void *buffer = qemu_memalign(size, size); + struct mshv_get_set_vp_state mshv_state = { 0 }; + + if (!state) { + error_report("lapic state is NULL"); + return -1; + } + memcpy(buffer, state, sizeof(*state)); + + mshv_state.buf_ptr = (uint64_t) buffer; + mshv_state.buf_sz = size; + mshv_state.type = MSHV_VP_STATE_LAPIC; + + ret = set_vp_state(cpu_fd, &mshv_state); + qemu_vfree(buffer); + if (ret < 0) { + error_report("failed to set lapic: %s", strerror(errno)); + return -1; + } + + return 0; +} + +static int set_lint(int cpu_fd) +{ + int ret; + uint32_t *lvt_lint0, *lvt_lint1; + + struct hv_local_interrupt_controller_state lapic_state = { 0 }; + ret = get_lapic(cpu_fd, &lapic_state); + if (ret < 0) { + return ret; + } + + lvt_lint0 = &lapic_state.apic_lvt_lint0; + *lvt_lint0 = set_apic_delivery_mode(*lvt_lint0, APIC_DM_EXTINT); + + lvt_lint1 = &lapic_state.apic_lvt_lint1; + *lvt_lint1 = set_apic_delivery_mode(*lvt_lint1, APIC_DM_NMI); + + /* TODO: should we skip setting lapic if the values are the same? */ + + return set_lapic(cpu_fd, &lapic_state); +} + +static int setup_msrs(const CPUState *cpu) +{ + int ret; + uint64_t default_type = MSR_MTRR_ENABLE | MSR_MTRR_MEM_TYPE_WB; + + /* boot msr entries */ + MshvMsrEntry msrs[9] = { + { .index = IA32_MSR_SYSENTER_CS, .data = 0x0, }, + { .index = IA32_MSR_SYSENTER_ESP, .data = 0x0, }, + { .index = IA32_MSR_SYSENTER_EIP, .data = 0x0, }, + { .index = IA32_MSR_STAR, .data = 0x0, }, + { .index = IA32_MSR_CSTAR, .data = 0x0, }, + { .index = IA32_MSR_LSTAR, .data = 0x0, }, + { .index = IA32_MSR_KERNEL_GS_BASE, .data = 0x0, }, + { .index = IA32_MSR_SFMASK, .data = 0x0, }, + { .index = IA32_MSR_MTRR_DEF_TYPE, .data = default_type, }, + }; + + ret = mshv_configure_msr(cpu, msrs, 9); + if (ret < 0) { + error_report("failed to setup msrs"); + return -1; + } + + return 0; +} + +/* + * TODO: populate topology info: + * + * X86CPU *x86cpu = X86_CPU(cpu); + * CPUX86State *env = &x86cpu->env; + * X86CPUTopoInfo *topo_info = &env->topo_info; + */ +int mshv_configure_vcpu(const CPUState *cpu, const struct MshvFPU *fpu, + uint64_t xcr0) +{ + int ret; + int cpu_fd = mshv_vcpufd(cpu); + + ret = set_cpuid2(cpu); + if (ret < 0) { + error_report("failed to set cpuid"); + return -1; + } + + ret = setup_msrs(cpu); + if (ret < 0) { + error_report("failed to setup msrs"); + return -1; + } + + ret = set_cpu_state(cpu, fpu, xcr0); + if (ret < 0) { + error_report("failed to set cpu state"); + return -1; + } + + ret = set_lint(cpu_fd); + if (ret < 0) { + error_report("failed to set lpic int"); + return -1; + } + + return 0; +} + +static int put_regs(const CPUState *cpu) +{ + X86CPU *x86cpu = X86_CPU(cpu); + CPUX86State *env = &x86cpu->env; + MshvFPU fpu = {0}; + int ret; + + memset(&fpu, 0, sizeof(fpu)); + + ret = mshv_configure_vcpu(cpu, &fpu, env->xcr0); + if (ret < 0) { + error_report("failed to configure vcpu"); + return ret; + } + + return 0; +} + +struct MsrPair { + uint32_t index; + uint64_t value; +}; + +static int put_msrs(const CPUState *cpu) +{ + int ret = 0; + X86CPU *x86cpu = X86_CPU(cpu); + CPUX86State *env = &x86cpu->env; + MshvMsrEntries *msrs = g_malloc0(sizeof(MshvMsrEntries)); + + struct MsrPair pairs[] = { + { MSR_IA32_SYSENTER_CS, env->sysenter_cs }, + { MSR_IA32_SYSENTER_ESP, env->sysenter_esp }, + { MSR_IA32_SYSENTER_EIP, env->sysenter_eip }, + { MSR_EFER, env->efer }, + { MSR_PAT, env->pat }, + { MSR_STAR, env->star }, + { MSR_CSTAR, env->cstar }, + { MSR_LSTAR, env->lstar }, + { MSR_KERNELGSBASE, env->kernelgsbase }, + { MSR_FMASK, env->fmask }, + { MSR_MTRRdefType, env->mtrr_deftype }, + { MSR_VM_HSAVE_PA, env->vm_hsave }, + { MSR_SMI_COUNT, env->msr_smi_count }, + { MSR_IA32_PKRS, env->pkrs }, + { MSR_IA32_BNDCFGS, env->msr_bndcfgs }, + { MSR_IA32_XSS, env->xss }, + { MSR_IA32_UMWAIT_CONTROL, env->umwait }, + { MSR_IA32_TSX_CTRL, env->tsx_ctrl }, + { MSR_AMD64_TSC_RATIO, env->amd_tsc_scale_msr }, + { MSR_TSC_AUX, env->tsc_aux }, + { MSR_TSC_ADJUST, env->tsc_adjust }, + { MSR_IA32_SMBASE, env->smbase }, + { MSR_IA32_SPEC_CTRL, env->spec_ctrl }, + { MSR_VIRT_SSBD, env->virt_ssbd }, + }; + + if (ARRAY_SIZE(pairs) > MSHV_MSR_ENTRIES_COUNT) { + error_report("MSR entries exceed maximum size"); + g_free(msrs); + return -1; + } + + for (size_t i = 0; i < ARRAY_SIZE(pairs); i++) { + MshvMsrEntry *entry = &msrs->entries[i]; + entry->index = pairs[i].index; + entry->reserved = 0; + entry->data = pairs[i].value; + msrs->nmsrs++; + } + + ret = mshv_configure_msr(cpu, &msrs->entries[0], msrs->nmsrs); + g_free(msrs); + return ret; +} + + +int mshv_arch_put_registers(const CPUState *cpu) +{ + int ret; + + ret = put_regs(cpu); + if (ret < 0) { + error_report("Failed to put registers"); + return -1; + } + + ret = put_msrs(cpu); + if (ret < 0) { + error_report("Failed to put msrs"); + return -1; + } + + return 0; +} + +void mshv_arch_amend_proc_features( + union hv_partition_synthetic_processor_features *features) +{ + features->access_guest_idle_reg = 1; +} + +static int set_memory_info(const struct hyperv_message *msg, + struct hv_x64_memory_intercept_message *info) +{ + if (msg->header.message_type != HVMSG_GPA_INTERCEPT + && msg->header.message_type != HVMSG_UNMAPPED_GPA + && msg->header.message_type != HVMSG_UNACCEPTED_GPA) { + error_report("invalid message type"); + return -1; + } + memcpy(info, msg->payload, sizeof(*info)); + + return 0; +} + +static int emulate_instruction(CPUState *cpu, + const uint8_t *insn_bytes, size_t insn_len, + uint64_t gva, uint64_t gpa) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + struct x86_decode decode = { 0 }; + int ret; + x86_insn_stream stream = { .bytes = insn_bytes, .len = insn_len }; + + ret = mshv_load_regs(cpu); + if (ret < 0) { + error_report("failed to load registers"); + return -1; + } + + decode_instruction_stream(env, &decode, &stream); + exec_instruction(env, &decode); + + ret = mshv_store_regs(cpu); + if (ret < 0) { + error_report("failed to store registers"); + return -1; + } + + return 0; +} + +static int handle_mmio(CPUState *cpu, const struct hyperv_message *msg, + MshvVmExit *exit_reason) +{ + struct hv_x64_memory_intercept_message info = { 0 }; + size_t insn_len; + uint8_t access_type; + uint8_t *instruction_bytes; + int ret; + + ret = set_memory_info(msg, &info); + if (ret < 0) { + error_report("failed to convert message to memory info"); + return -1; + } + insn_len = info.instruction_byte_count; + access_type = info.header.intercept_access_type; + + if (access_type == HV_X64_INTERCEPT_ACCESS_TYPE_EXECUTE) { + error_report("invalid intercept access type: execute"); + return -1; + } + + if (insn_len > 16) { + error_report("invalid mmio instruction length: %zu", insn_len); + return -1; + } + + trace_mshv_handle_mmio(info.guest_virtual_address, + info.guest_physical_address, + info.instruction_byte_count, access_type); + + instruction_bytes = info.instruction_bytes; + + ret = emulate_instruction(cpu, instruction_bytes, insn_len, + info.guest_virtual_address, + info.guest_physical_address); + if (ret < 0) { + error_report("failed to emulate mmio"); + return -1; + } + + *exit_reason = MshvVmExitIgnore; + + return 0; +} + +static int handle_unmapped_mem(int vm_fd, CPUState *cpu, + const struct hyperv_message *msg, + MshvVmExit *exit_reason) +{ + struct hv_x64_memory_intercept_message info = { 0 }; + uint64_t gpa; + int ret; + enum MshvRemapResult remap_result; + + ret = set_memory_info(msg, &info); + if (ret < 0) { + error_report("failed to convert message to memory info"); + return -1; + } + + gpa = info.guest_physical_address; + + /* attempt to remap the region, in case of overlapping userspace mappings */ + remap_result = mshv_remap_overlap_region(vm_fd, gpa); + *exit_reason = MshvVmExitIgnore; + + switch (remap_result) { + case MshvRemapNoMapping: + /* if we didn't find a mapping, it is probably mmio */ + return handle_mmio(cpu, msg, exit_reason); + case MshvRemapOk: + break; + case MshvRemapNoOverlap: + /* This should not happen, but we are forgiving it */ + warn_report("found no overlap for unmapped region"); + *exit_reason = MshvVmExitSpecial; + break; + } + + return 0; +} + +static int set_ioport_info(const struct hyperv_message *msg, + hv_x64_io_port_intercept_message *info) +{ + if (msg->header.message_type != HVMSG_X64_IO_PORT_INTERCEPT) { + error_report("Invalid message type"); + return -1; + } + memcpy(info, msg->payload, sizeof(*info)); + + return 0; +} + +static int set_x64_registers(const CPUState *cpu, const uint32_t *names, + const uint64_t *values) +{ + + hv_register_assoc assocs[2]; + int ret; + + for (size_t i = 0; i < ARRAY_SIZE(assocs); i++) { + assocs[i].name = names[i]; + assocs[i].value.reg64 = values[i]; + } + + ret = mshv_set_generic_regs(cpu, assocs, ARRAY_SIZE(assocs)); + if (ret < 0) { + error_report("failed to set x64 registers"); + return -1; + } + + return 0; +} + +static inline MemTxAttrs get_mem_attrs(bool is_secure_mode) +{ + MemTxAttrs memattr = {0}; + memattr.secure = is_secure_mode; + return memattr; +} + +static void pio_read(uint64_t port, uint8_t *data, uintptr_t size, + bool is_secure_mode) +{ + int ret = 0; + MemTxAttrs memattr = get_mem_attrs(is_secure_mode); + ret = address_space_rw(&address_space_io, port, memattr, (void *)data, size, + false); + if (ret != MEMTX_OK) { + error_report("Failed to read from port %lx: %d", port, ret); + abort(); + } +} + +static int pio_write(uint64_t port, const uint8_t *data, uintptr_t size, + bool is_secure_mode) +{ + int ret = 0; + MemTxAttrs memattr = get_mem_attrs(is_secure_mode); + ret = address_space_rw(&address_space_io, port, memattr, (void *)data, size, + true); + return ret; +} + +static int handle_pio_non_str(const CPUState *cpu, + hv_x64_io_port_intercept_message *info) +{ + size_t len = info->access_info.access_size; + uint8_t access_type = info->header.intercept_access_type; + int ret; + uint32_t val, eax; + const uint32_t eax_mask = 0xffffffffu >> (32 - len * 8); + size_t insn_len; + uint64_t rip, rax; + uint32_t reg_names[2]; + uint64_t reg_values[2]; + uint16_t port = info->port_number; + + if (access_type == HV_X64_INTERCEPT_ACCESS_TYPE_WRITE) { + union { + uint32_t u32; + uint8_t bytes[4]; + } conv; + + /* convert the first 4 bytes of rax to bytes */ + conv.u32 = (uint32_t)info->rax; + /* secure mode is set to false */ + ret = pio_write(port, conv.bytes, len, false); + if (ret < 0) { + error_report("Failed to write to io port"); + return -1; + } + } else { + uint8_t data[4] = { 0 }; + /* secure mode is set to false */ + pio_read(info->port_number, data, len, false); + + /* Preserve high bits in EAX, but clear out high bits in RAX */ + val = *(uint32_t *)data; + eax = (((uint32_t)info->rax) & ~eax_mask) | (val & eax_mask); + info->rax = (uint64_t)eax; + } + + insn_len = info->header.instruction_length; + + /* Advance RIP and update RAX */ + rip = info->header.rip + insn_len; + rax = info->rax; + + reg_names[0] = HV_X64_REGISTER_RIP; + reg_values[0] = rip; + reg_names[1] = HV_X64_REGISTER_RAX; + reg_values[1] = rax; + + ret = set_x64_registers(cpu, reg_names, reg_values); + if (ret < 0) { + error_report("Failed to set x64 registers"); + return -1; + } + + cpu->accel->dirty = false; + + return 0; +} + +static int fetch_guest_state(CPUState *cpu) +{ + int ret; + + ret = mshv_get_standard_regs(cpu); + if (ret < 0) { + error_report("Failed to get standard registers"); + return -1; + } + + ret = mshv_get_special_regs(cpu); + if (ret < 0) { + error_report("Failed to get special registers"); + return -1; + } + + return 0; +} + +static int read_memory(const CPUState *cpu, uint64_t initial_gva, + uint64_t initial_gpa, uint64_t gva, uint8_t *data, + size_t len) +{ + int ret; + uint64_t gpa, flags; + + if (gva == initial_gva) { + gpa = initial_gpa; + } else { + flags = HV_TRANSLATE_GVA_VALIDATE_READ; + ret = translate_gva(cpu, gva, &gpa, flags); + if (ret < 0) { + return -1; + } + + ret = mshv_guest_mem_read(gpa, data, len, false, false); + if (ret < 0) { + error_report("failed to read guest mem"); + return -1; + } + } + + return 0; +} + +static int write_memory(const CPUState *cpu, uint64_t initial_gva, + uint64_t initial_gpa, uint64_t gva, const uint8_t *data, + size_t len) +{ + int ret; + uint64_t gpa, flags; + + if (gva == initial_gva) { + gpa = initial_gpa; + } else { + flags = HV_TRANSLATE_GVA_VALIDATE_WRITE; + ret = translate_gva(cpu, gva, &gpa, flags); + if (ret < 0) { + error_report("failed to translate gva to gpa"); + return -1; + } + } + ret = mshv_guest_mem_write(gpa, data, len, false); + if (ret != MEMTX_OK) { + error_report("failed to write to mmio"); + return -1; + } + + return 0; +} + +static int handle_pio_str_write(CPUState *cpu, + hv_x64_io_port_intercept_message *info, + size_t repeat, uint16_t port, + bool direction_flag) +{ + int ret; + uint64_t src; + uint8_t data[4] = { 0 }; + size_t len = info->access_info.access_size; + + src = linear_addr(cpu, info->rsi, R_DS); + + for (size_t i = 0; i < repeat; i++) { + ret = read_memory(cpu, 0, 0, src, data, len); + if (ret < 0) { + error_report("Failed to read memory"); + return -1; + } + ret = pio_write(port, data, len, false); + if (ret < 0) { + error_report("Failed to write to io port"); + return -1; + } + src += direction_flag ? -len : len; + info->rsi += direction_flag ? -len : len; + } + + return 0; +} + +static int handle_pio_str_read(CPUState *cpu, + hv_x64_io_port_intercept_message *info, + size_t repeat, uint16_t port, + bool direction_flag) +{ + int ret; + uint64_t dst; + size_t len = info->access_info.access_size; + uint8_t data[4] = { 0 }; + + dst = linear_addr(cpu, info->rdi, R_ES); + + for (size_t i = 0; i < repeat; i++) { + pio_read(port, data, len, false); + + ret = write_memory(cpu, 0, 0, dst, data, len); + if (ret < 0) { + error_report("Failed to write memory"); + return -1; + } + dst += direction_flag ? -len : len; + info->rdi += direction_flag ? -len : len; + } + + return 0; +} + +static int handle_pio_str(CPUState *cpu, hv_x64_io_port_intercept_message *info) +{ + uint8_t access_type = info->header.intercept_access_type; + uint16_t port = info->port_number; + bool repop = info->access_info.rep_prefix == 1; + size_t repeat = repop ? info->rcx : 1; + size_t insn_len = info->header.instruction_length; + bool direction_flag; + uint32_t reg_names[3]; + uint64_t reg_values[3]; + int ret; + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + + ret = fetch_guest_state(cpu); + if (ret < 0) { + error_report("Failed to fetch guest state"); + return -1; + } + + direction_flag = (env->eflags & DESC_E_MASK) != 0; + + if (access_type == HV_X64_INTERCEPT_ACCESS_TYPE_WRITE) { + ret = handle_pio_str_write(cpu, info, repeat, port, direction_flag); + if (ret < 0) { + error_report("Failed to handle pio str write"); + return -1; + } + reg_names[0] = HV_X64_REGISTER_RSI; + reg_values[0] = info->rsi; + } else { + ret = handle_pio_str_read(cpu, info, repeat, port, direction_flag); + reg_names[0] = HV_X64_REGISTER_RDI; + reg_values[0] = info->rdi; + } + + reg_names[1] = HV_X64_REGISTER_RIP; + reg_values[1] = info->header.rip + insn_len; + reg_names[2] = HV_X64_REGISTER_RAX; + reg_values[2] = info->rax; + + ret = set_x64_registers(cpu, reg_names, reg_values); + if (ret < 0) { + error_report("Failed to set x64 registers"); + return -1; + } + + cpu->accel->dirty = false; + + return 0; +} + +static int handle_pio(CPUState *cpu, const struct hyperv_message *msg) +{ + struct hv_x64_io_port_intercept_message info = { 0 }; + int ret; + + ret = set_ioport_info(msg, &info); + if (ret < 0) { + error_report("Failed to convert message to ioport info"); + return -1; + } + + if (info.access_info.string_op) { + return handle_pio_str(cpu, &info); + } + + return handle_pio_non_str(cpu, &info); +} + +int mshv_run_vcpu(int vm_fd, CPUState *cpu, hv_message *msg, MshvVmExit *exit) +{ + int ret; + enum MshvVmExit exit_reason; + int cpu_fd = mshv_vcpufd(cpu); + + ret = ioctl(cpu_fd, MSHV_RUN_VP, msg); + if (ret < 0) { + return MshvVmExitShutdown; + } + + switch (msg->header.message_type) { + case HVMSG_UNRECOVERABLE_EXCEPTION: + return MshvVmExitShutdown; + case HVMSG_UNMAPPED_GPA: + ret = handle_unmapped_mem(vm_fd, cpu, msg, &exit_reason); + if (ret < 0) { + error_report("failed to handle unmapped memory"); + return -1; + } + return exit_reason; + case HVMSG_GPA_INTERCEPT: + ret = handle_mmio(cpu, msg, &exit_reason); + if (ret < 0) { + error_report("failed to handle mmio"); + return -1; + } + return exit_reason; + case HVMSG_X64_IO_PORT_INTERCEPT: + ret = handle_pio(cpu, msg); + if (ret < 0) { + return MshvVmExitSpecial; + } + return MshvVmExitIgnore; + default: + break; + } + + *exit = MshvVmExitIgnore; + return 0; +} + +void mshv_remove_vcpu(int vm_fd, int cpu_fd) +{ + close(cpu_fd); +} + + +int mshv_create_vcpu(int vm_fd, uint8_t vp_index, int *cpu_fd) +{ + int ret; + struct mshv_create_vp vp_arg = { + .vp_index = vp_index, + }; + ret = ioctl(vm_fd, MSHV_CREATE_VP, &vp_arg); + if (ret < 0) { + error_report("failed to create mshv vcpu: %s", strerror(errno)); + return -1; + } + + *cpu_fd = ret; + + return 0; +} + +static int guest_mem_read_with_gva(const CPUState *cpu, uint64_t gva, + uint8_t *data, uintptr_t size, + bool fetch_instruction) +{ + int ret; + uint64_t gpa, flags; + + flags = HV_TRANSLATE_GVA_VALIDATE_READ; + ret = translate_gva(cpu, gva, &gpa, flags); + if (ret < 0) { + error_report("failed to translate gva to gpa"); + return -1; + } + + ret = mshv_guest_mem_read(gpa, data, size, false, fetch_instruction); + if (ret < 0) { + error_report("failed to read from guest memory"); + return -1; + } + + return 0; +} + +static int guest_mem_write_with_gva(const CPUState *cpu, uint64_t gva, + const uint8_t *data, uintptr_t size) +{ + int ret; + uint64_t gpa, flags; + + flags = HV_TRANSLATE_GVA_VALIDATE_WRITE; + ret = translate_gva(cpu, gva, &gpa, flags); + if (ret < 0) { + error_report("failed to translate gva to gpa"); + return -1; + } + ret = mshv_guest_mem_write(gpa, data, size, false); + if (ret < 0) { + error_report("failed to write to guest memory"); + return -1; + } + return 0; +} + +static void write_mem(CPUState *cpu, void *data, target_ulong addr, int bytes) +{ + if (guest_mem_write_with_gva(cpu, addr, data, bytes) < 0) { + error_report("failed to write memory"); + abort(); + } +} + +static void fetch_instruction(CPUState *cpu, void *data, + target_ulong addr, int bytes) +{ + if (guest_mem_read_with_gva(cpu, addr, data, bytes, true) < 0) { + error_report("failed to fetch instruction"); + abort(); + } +} + +static void read_mem(CPUState *cpu, void *data, target_ulong addr, int bytes) +{ + if (guest_mem_read_with_gva(cpu, addr, data, bytes, false) < 0) { + error_report("failed to read memory"); + abort(); + } +} + +static void read_segment_descriptor(CPUState *cpu, + struct x86_segment_descriptor *desc, + enum X86Seg seg_idx) +{ + bool ret; + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + SegmentCache *seg = &env->segs[seg_idx]; + x86_segment_selector sel = { .sel = seg->selector & 0xFFFF }; + + ret = x86_read_segment_descriptor(cpu, desc, sel); + if (ret == false) { + error_report("failed to read segment descriptor"); + abort(); + } +} + +static const struct x86_emul_ops mshv_x86_emul_ops = { + .fetch_instruction = fetch_instruction, + .read_mem = read_mem, + .write_mem = write_mem, + .read_segment_descriptor = read_segment_descriptor, +}; + +void mshv_init_mmio_emu(void) +{ + init_decoder(); + init_emu(&mshv_x86_emul_ops); +} + +void mshv_arch_init_vcpu(CPUState *cpu) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + AccelCPUState *state = cpu->accel; + size_t page = HV_HYP_PAGE_SIZE; + void *mem = qemu_memalign(page, 2 * page); + + /* sanity check, to make sure we don't overflow the page */ + QEMU_BUILD_BUG_ON((MAX_REGISTER_COUNT + * sizeof(hv_register_assoc) + + sizeof(hv_input_get_vp_registers) + > HV_HYP_PAGE_SIZE)); + + state->hvcall_args.base = mem; + state->hvcall_args.input_page = mem; + state->hvcall_args.output_page = (uint8_t *)mem + page; + + env->emu_mmio_buf = g_new(char, 4096); +} + +void mshv_arch_destroy_vcpu(CPUState *cpu) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + AccelCPUState *state = cpu->accel; + + g_free(state->hvcall_args.base); + state->hvcall_args = (MshvHvCallArgs){0}; + g_clear_pointer(&env->emu_mmio_buf, g_free); +} + +/* + * Default Microsoft Hypervisor behavior for unimplemented MSR is to send a + * fault to the guest if it tries to access it. It is possible to override + * this behavior with a more suitable option i.e., ignore writes from the guest + * and return zero in attempt to read unimplemented. + */ +static int set_unimplemented_msr_action(int vm_fd) +{ + struct hv_input_set_partition_property in = {0}; + struct mshv_root_hvcall args = {0}; + + in.property_code = HV_PARTITION_PROPERTY_UNIMPLEMENTED_MSR_ACTION; + in.property_value = HV_UNIMPLEMENTED_MSR_ACTION_IGNORE_WRITE_READ_ZERO; + + args.code = HVCALL_SET_PARTITION_PROPERTY; + args.in_sz = sizeof(in); + args.in_ptr = (uint64_t)∈ + + trace_mshv_hvcall_args("unimplemented_msr_action", args.code, args.in_sz); + + int ret = mshv_hvcall(vm_fd, &args); + if (ret < 0) { + error_report("Failed to set unimplemented MSR action"); + return -1; + } + return 0; +} + +int mshv_arch_post_init_vm(int vm_fd) +{ + int ret; + + ret = set_unimplemented_msr_action(vm_fd); + if (ret < 0) { + error_report("Failed to set unimplemented MSR action"); + } + + return ret; +} diff --git a/target/i386/mshv/x86.c b/target/i386/mshv/x86.c new file mode 100644 index 0000000000000..d574b3bc52fe1 --- /dev/null +++ b/target/i386/mshv/x86.c @@ -0,0 +1,297 @@ +/* + * QEMU MSHV support + * + * Copyright Microsoft, Corp. 2025 + * + * Authors: Magnus Kulke + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" + +#include "cpu.h" +#include "emulate/x86_decode.h" +#include "emulate/x86_emu.h" +#include "qemu/typedefs.h" +#include "qemu/error-report.h" +#include "system/mshv.h" + +/* RW or Exec segment */ +static const uint8_t RWRX_SEGMENT_TYPE = 0x2; +static const uint8_t CODE_SEGMENT_TYPE = 0x8; +static const uint8_t EXPAND_DOWN_SEGMENT_TYPE = 0x4; + +typedef enum CpuMode { + REAL_MODE, + PROTECTED_MODE, + LONG_MODE, +} CpuMode; + +static CpuMode cpu_mode(CPUState *cpu) +{ + enum CpuMode m = REAL_MODE; + + if (x86_is_protected(cpu)) { + m = PROTECTED_MODE; + + if (x86_is_long_mode(cpu)) { + m = LONG_MODE; + } + } + + return m; +} + +static bool segment_type_ro(const SegmentCache *seg) +{ + uint32_t type_ = (seg->flags >> DESC_TYPE_SHIFT) & 15; + return (type_ & (~RWRX_SEGMENT_TYPE)) == 0; +} + +static bool segment_type_code(const SegmentCache *seg) +{ + uint32_t type_ = (seg->flags >> DESC_TYPE_SHIFT) & 15; + return (type_ & CODE_SEGMENT_TYPE) != 0; +} + +static bool segment_expands_down(const SegmentCache *seg) +{ + uint32_t type_ = (seg->flags >> DESC_TYPE_SHIFT) & 15; + + if (segment_type_code(seg)) { + return false; + } + + return (type_ & EXPAND_DOWN_SEGMENT_TYPE) != 0; +} + +static uint32_t segment_limit(const SegmentCache *seg) +{ + uint32_t limit = seg->limit; + uint32_t granularity = (seg->flags & DESC_G_MASK) != 0; + + if (granularity != 0) { + limit = (limit << 12) | 0xFFF; + } + + return limit; +} + +static uint8_t segment_db(const SegmentCache *seg) +{ + return (seg->flags >> DESC_B_SHIFT) & 1; +} + +static uint32_t segment_max_limit(const SegmentCache *seg) +{ + if (segment_db(seg) != 0) { + return 0xFFFFFFFF; + } + return 0xFFFF; +} + +static int linearize(CPUState *cpu, + target_ulong logical_addr, target_ulong *linear_addr, + X86Seg seg_idx) +{ + enum CpuMode mode; + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + SegmentCache *seg = &env->segs[seg_idx]; + target_ulong base = seg->base; + target_ulong logical_addr_32b; + uint32_t limit; + /* TODO: the emulator will not pass us "write" indicator yet */ + bool write = false; + + mode = cpu_mode(cpu); + + switch (mode) { + case LONG_MODE: + if (__builtin_add_overflow(logical_addr, base, linear_addr)) { + error_report("Address overflow"); + return -1; + } + break; + case PROTECTED_MODE: + case REAL_MODE: + if (segment_type_ro(seg) && write) { + error_report("Cannot write to read-only segment"); + return -1; + } + + logical_addr_32b = logical_addr & 0xFFFFFFFF; + limit = segment_limit(seg); + + if (segment_expands_down(seg)) { + if (logical_addr_32b >= limit) { + error_report("Address exceeds limit (expands down)"); + return -1; + } + + limit = segment_max_limit(seg); + } + + if (logical_addr_32b > limit) { + error_report("Address exceeds limit %u", limit); + return -1; + } + *linear_addr = logical_addr_32b + base; + break; + default: + error_report("Unknown cpu mode: %d", mode); + return -1; + } + + return 0; +} + +bool x86_read_segment_descriptor(CPUState *cpu, + struct x86_segment_descriptor *desc, + x86_segment_selector sel) +{ + target_ulong base; + uint32_t limit; + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + target_ulong gva; + + memset(desc, 0, sizeof(*desc)); + + /* valid gdt descriptors start from index 1 */ + if (!sel.index && GDT_SEL == sel.ti) { + return false; + } + + if (GDT_SEL == sel.ti) { + base = env->gdt.base; + limit = env->gdt.limit; + } else { + base = env->ldt.base; + limit = env->ldt.limit; + } + + if (sel.index * 8 >= limit) { + return false; + } + + gva = base + sel.index * 8; + emul_ops->read_mem(cpu, desc, gva, sizeof(*desc)); + + return true; +} + +bool x86_read_call_gate(CPUState *cpu, struct x86_call_gate *idt_desc, + int gate) +{ + target_ulong base; + uint32_t limit; + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + target_ulong gva; + + base = env->idt.base; + limit = env->idt.limit; + + memset(idt_desc, 0, sizeof(*idt_desc)); + if (gate * 8 >= limit) { + perror("call gate exceeds idt limit"); + return false; + } + + gva = base + gate * 8; + emul_ops->read_mem(cpu, idt_desc, gva, sizeof(*idt_desc)); + + return true; +} + +bool x86_is_protected(CPUState *cpu) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + uint64_t cr0 = env->cr[0]; + + return cr0 & CR0_PE_MASK; +} + +bool x86_is_real(CPUState *cpu) +{ + return !x86_is_protected(cpu); +} + +bool x86_is_v8086(CPUState *cpu) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + return x86_is_protected(cpu) && (env->eflags & VM_MASK); +} + +bool x86_is_long_mode(CPUState *cpu) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + uint64_t efer = env->efer; + uint64_t lme_lma = (MSR_EFER_LME | MSR_EFER_LMA); + + return ((efer & lme_lma) == lme_lma); +} + +bool x86_is_long64_mode(CPUState *cpu) +{ + error_report("unimplemented: is_long64_mode()"); + abort(); +} + +bool x86_is_paging_mode(CPUState *cpu) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + uint64_t cr0 = env->cr[0]; + + return cr0 & CR0_PG_MASK; +} + +bool x86_is_pae_enabled(CPUState *cpu) +{ + X86CPU *x86_cpu = X86_CPU(cpu); + CPUX86State *env = &x86_cpu->env; + uint64_t cr4 = env->cr[4]; + + return cr4 & CR4_PAE_MASK; +} + +target_ulong linear_addr(CPUState *cpu, target_ulong addr, X86Seg seg) +{ + int ret; + target_ulong linear_addr; + + ret = linearize(cpu, addr, &linear_addr, seg); + if (ret < 0) { + error_report("failed to linearize address"); + abort(); + } + + return linear_addr; +} + +target_ulong linear_addr_size(CPUState *cpu, target_ulong addr, int size, + X86Seg seg) +{ + switch (size) { + case 2: + addr = (uint16_t)addr; + break; + case 4: + addr = (uint32_t)addr; + break; + default: + break; + } + return linear_addr(cpu, addr, seg); +} + +target_ulong linear_rip(CPUState *cpu, target_ulong rip) +{ + return linear_addr(cpu, rip, R_CS); +} diff --git a/target/i386/nvmm/nvmm-accel-ops.c b/target/i386/nvmm/nvmm-accel-ops.c index 3799260bbdef9..dd5d5428b1c00 100644 --- a/target/i386/nvmm/nvmm-accel-ops.c +++ b/target/i386/nvmm/nvmm-accel-ops.c @@ -42,16 +42,14 @@ static void *qemu_nvmm_cpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { + qemu_process_cpu_events(cpu); + if (cpu_can_run(cpu)) { r = nvmm_vcpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } - while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait_bql(cpu->halt_cond); - } - qemu_wait_io_event_common(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); nvmm_destroy_vcpu(cpu); @@ -77,7 +75,7 @@ static void nvmm_start_vcpu_thread(CPUState *cpu) */ static void nvmm_kick_vcpu_thread(CPUState *cpu) { - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, true); cpus_kick_thread(cpu); } diff --git a/target/i386/nvmm/nvmm-all.c b/target/i386/nvmm/nvmm-all.c index 92e3b8b2f45f5..2e442baf4b715 100644 --- a/target/i386/nvmm/nvmm-all.c +++ b/target/i386/nvmm/nvmm-all.c @@ -15,6 +15,7 @@ #include "accel/accel-ops.h" #include "system/nvmm.h" #include "system/cpus.h" +#include "system/memory.h" #include "system/runstate.h" #include "qemu/main-loop.h" #include "qemu/error-report.h" @@ -413,22 +414,22 @@ nvmm_vcpu_pre_run(CPUState *cpu) * Force the VCPU out of its inner loop to process any INIT requests * or commit pending TPR access. */ - if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { - cpu->exit_request = 1; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { + qatomic_set(&cpu->exit_request, true); } - if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { if (nvmm_can_take_nmi(cpu)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI); event->type = NVMM_VCPU_EVENT_INTR; event->vector = 2; has_event = true; } } - if (!has_event && (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { + if (!has_event && cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { if (nvmm_can_take_int(cpu)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); event->type = NVMM_VCPU_EVENT_INTR; event->vector = cpu_get_pic_interrupt(env); has_event = true; @@ -436,8 +437,8 @@ nvmm_vcpu_pre_run(CPUState *cpu) } /* Don't want SMIs. */ - if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { - cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI); } if (sync_tpr) { @@ -516,7 +517,9 @@ nvmm_io_callback(struct nvmm_io *io) static void nvmm_mem_callback(struct nvmm_mem *mem) { - cpu_physical_memory_rw(mem->gpa, mem->data, mem->size, mem->write); + /* TODO: Get CPUState via mem->vcpu? */ + address_space_rw(&address_space_memory, mem->gpa, MEMTXATTRS_UNSPECIFIED, + mem->data, mem->size, mem->write); /* Needed, otherwise infinite loop. */ current_cpu->vcpu_dirty = false; @@ -651,9 +654,9 @@ nvmm_handle_halted(struct nvmm_machine *mach, CPUState *cpu, bql_lock(); - if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (cpu_env(cpu)->eflags & IF_MASK)) && - !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { cpu->exception_index = EXCP_HLT; cpu->halted = true; ret = 1; @@ -691,26 +694,26 @@ nvmm_vcpu_loop(CPUState *cpu) * Some asynchronous events must be handled outside of the inner * VCPU loop. They are handled here. */ - if (cpu->interrupt_request & CPU_INTERRUPT_INIT) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT)) { nvmm_cpu_synchronize_state(cpu); do_cpu_init(x86_cpu); /* set int/nmi windows back to the reset state */ } - if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { - cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); apic_poll_irq(x86_cpu->apic_state); } - if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { cpu->halted = false; } - if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SIPI)) { nvmm_cpu_synchronize_state(cpu); do_cpu_sipi(x86_cpu); } - if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { - cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_TPR); nvmm_cpu_synchronize_state(cpu); apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, env->tpr_access_type); @@ -743,7 +746,8 @@ nvmm_vcpu_loop(CPUState *cpu) nvmm_vcpu_pre_run(cpu); - if (qatomic_read(&cpu->exit_request)) { + /* Corresponding store-release is in cpu_exit. */ + if (qatomic_load_acquire(&cpu->exit_request)) { #if NVMM_USER_VERSION >= 2 nvmm_vcpu_stop(vcpu); #else @@ -751,8 +755,6 @@ nvmm_vcpu_loop(CPUState *cpu) #endif } - /* Read exit_request before the kernel reads the immediate exit flag */ - smp_rmb(); ret = nvmm_vcpu_run(mach, vcpu); if (ret == -1) { error_report("NVMM: Failed to exec a virtual processor," @@ -818,8 +820,6 @@ nvmm_vcpu_loop(CPUState *cpu) cpu_exec_end(cpu); bql_lock(); - qatomic_set(&cpu->exit_request, false); - return ret < 0; } diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc index 853b1c8bf95e8..a50f57dbaab31 100644 --- a/target/i386/tcg/decode-new.c.inc +++ b/target/i386/tcg/decode-new.c.inc @@ -878,10 +878,10 @@ static const X86OpEntry opcodes_0F3A[256] = { [0x0e] = X86_OP_ENTRY4(VPBLENDW, V,x, H,x, W,x, vex4 cpuid(SSE41) avx2_256 p_66), [0x0f] = X86_OP_ENTRY4(PALIGNR, V,x, H,x, W,x, vex4 cpuid(SSSE3) mmx avx2_256 p_00_66), - [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX) p_66), + [0x18] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,dq, vex6 chk(W0) cpuid(AVX) p_66), [0x19] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX) p_66), - [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,qq, vex6 chk(W0) cpuid(AVX2) p_66), + [0x38] = X86_OP_ENTRY4(VINSERTx128, V,qq, H,qq, W,dq, vex6 chk(W0) cpuid(AVX2) p_66), [0x39] = X86_OP_ENTRY3(VEXTRACTx128, W,dq, V,qq, I,b, vex6 chk(W0) cpuid(AVX2) p_66), /* Listed incorrectly as type 4 */ @@ -1541,7 +1541,7 @@ static void decode_group4_5(DisasContext *s, CPUX86State *env, X86OpEntry *entry [0x0b] = X86_OP_ENTRYr(CALLF_m, M,p), [0x0c] = X86_OP_ENTRYr(JMP_m, E,f64, zextT0), [0x0d] = X86_OP_ENTRYr(JMPF_m, M,p), - [0x0e] = X86_OP_ENTRYr(PUSH, E,f64), + [0x0e] = X86_OP_ENTRYr(PUSH, E,d64), }; int w = (*b & 1); diff --git a/target/i386/tcg/seg_helper.c b/target/i386/tcg/seg_helper.c index 071f3fbd83d24..f49fe851cdffd 100644 --- a/target/i386/tcg/seg_helper.c +++ b/target/i386/tcg/seg_helper.c @@ -456,7 +456,7 @@ static void switch_tss_ra(CPUX86State *env, int tss_selector, new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4)); } new_ldt = access_ldw(&new, tss_base + 0x60); - new_trap = access_ldl(&new, tss_base + 0x64); + new_trap = access_ldw(&new, tss_base + 0x64) & 1; } else { /* 16 bit */ new_cr3 = 0; diff --git a/target/i386/tcg/system/excp_helper.c b/target/i386/tcg/system/excp_helper.c index 50040f6fcafab..f622b5d588eb3 100644 --- a/target/i386/tcg/system/excp_helper.c +++ b/target/i386/tcg/system/excp_helper.c @@ -592,7 +592,8 @@ static bool get_physical_address(CPUX86State *env, vaddr addr, if (sext != 0 && sext != -1) { *err = (TranslateFault){ .exception_index = EXCP0D_GPF, - .cr2 = addr, + /* non-canonical #GP doesn't change CR2 */ + .cr2 = env->cr[2], }; return false; } diff --git a/target/i386/tcg/system/misc_helper.c b/target/i386/tcg/system/misc_helper.c index 9c3f5cc99b356..0c32424d36ada 100644 --- a/target/i386/tcg/system/misc_helper.c +++ b/target/i386/tcg/system/misc_helper.c @@ -299,7 +299,7 @@ void helper_wrmsr(CPUX86State *env) int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; bql_lock(); - ret = apic_msr_write(index, val); + ret = apic_msr_write(env_archcpu(env)->apic_state, index, val); bql_unlock(); if (ret < 0) { goto error; @@ -477,7 +477,7 @@ void helper_rdmsr(CPUX86State *env) int index = (uint32_t)env->regs[R_ECX] - MSR_APIC_START; bql_lock(); - ret = apic_msr_read(index, &val); + ret = apic_msr_read(x86_cpu->apic_state, index, &val); bql_unlock(); if (ret < 0) { raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC()); diff --git a/target/i386/tcg/system/seg_helper.c b/target/i386/tcg/system/seg_helper.c index d4ea890c12440..8c7856be81e24 100644 --- a/target/i386/tcg/system/seg_helper.c +++ b/target/i386/tcg/system/seg_helper.c @@ -133,7 +133,7 @@ bool x86_cpu_exec_halt(CPUState *cpu) X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; - if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) { bql_lock(); apic_poll_irq(x86_cpu->apic_state); cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); @@ -178,31 +178,31 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) */ switch (interrupt_request) { case CPU_INTERRUPT_POLL: - cs->interrupt_request &= ~CPU_INTERRUPT_POLL; + cpu_reset_interrupt(cs, CPU_INTERRUPT_POLL); apic_poll_irq(cpu->apic_state); break; case CPU_INTERRUPT_SIPI: + cpu_reset_interrupt(cs, CPU_INTERRUPT_SIPI); do_cpu_sipi(cpu); break; case CPU_INTERRUPT_SMI: cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_SMI; + cpu_reset_interrupt(cs, CPU_INTERRUPT_SMI); do_smm_enter(cpu); break; case CPU_INTERRUPT_NMI: cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_reset_interrupt(cs, CPU_INTERRUPT_NMI); env->hflags2 |= HF2_NMI_MASK; do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); break; case CPU_INTERRUPT_MCE: - cs->interrupt_request &= ~CPU_INTERRUPT_MCE; + cpu_reset_interrupt(cs, CPU_INTERRUPT_MCE); do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); break; case CPU_INTERRUPT_HARD: cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); - cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | - CPU_INTERRUPT_VIRQ); + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); intno = cpu_get_pic_interrupt(env); qemu_log_mask(CPU_LOG_INT, "Servicing hardware INT=0x%02x\n", intno); @@ -215,7 +215,7 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) qemu_log_mask(CPU_LOG_INT, "Servicing virtual hardware INT=0x%02x\n", intno); do_interrupt_x86_hardirq(env, intno, 1); - cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); env->int_ctl &= ~V_IRQ_MASK; break; } diff --git a/target/i386/tcg/system/smm_helper.c b/target/i386/tcg/system/smm_helper.c index 251eb7856ce76..fb028a8272f25 100644 --- a/target/i386/tcg/system/smm_helper.c +++ b/target/i386/tcg/system/smm_helper.c @@ -168,7 +168,7 @@ void do_smm_enter(X86CPU *cpu) env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK)); cpu_x86_update_cr4(env, 0); - env->dr[7] = 0x00000400; + helper_set_dr(env, 7, 0x00000400); cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase, 0xffffffff, @@ -233,8 +233,8 @@ void helper_rsm(CPUX86State *env) env->eip = x86_ldq_phys(cs, sm_state + 0x7f78); cpu_load_eflags(env, x86_ldl_phys(cs, sm_state + 0x7f70), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); - env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7f68); - env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7f60); + helper_set_dr(env, 6, x86_ldl_phys(cs, sm_state + 0x7f68)); + helper_set_dr(env, 7, x86_ldl_phys(cs, sm_state + 0x7f60)); cpu_x86_update_cr4(env, x86_ldl_phys(cs, sm_state + 0x7f48)); cpu_x86_update_cr3(env, x86_ldq_phys(cs, sm_state + 0x7f50)); @@ -268,8 +268,8 @@ void helper_rsm(CPUX86State *env) env->regs[R_EDX] = x86_ldl_phys(cs, sm_state + 0x7fd8); env->regs[R_ECX] = x86_ldl_phys(cs, sm_state + 0x7fd4); env->regs[R_EAX] = x86_ldl_phys(cs, sm_state + 0x7fd0); - env->dr[6] = x86_ldl_phys(cs, sm_state + 0x7fcc); - env->dr[7] = x86_ldl_phys(cs, sm_state + 0x7fc8); + helper_set_dr(env, 6, x86_ldl_phys(cs, sm_state + 0x7fcc)); + helper_set_dr(env, 7, x86_ldl_phys(cs, sm_state + 0x7fc8)); env->tr.selector = x86_ldl_phys(cs, sm_state + 0x7fc4) & 0xffff; env->tr.base = x86_ldl_phys(cs, sm_state + 0x7f64); diff --git a/target/i386/tcg/system/svm_helper.c b/target/i386/tcg/system/svm_helper.c index b27049b9ed138..505788b0e26c6 100644 --- a/target/i386/tcg/system/svm_helper.c +++ b/target/i386/tcg/system/svm_helper.c @@ -49,7 +49,7 @@ static void svm_save_seg(CPUX86State *env, int mmu_idx, hwaddr addr, static inline void svm_canonicalization(CPUX86State *env, target_ulong *seg_base) { uint16_t shift_amt = 64 - cpu_x86_virtual_addr_width(env); - *seg_base = ((((long) *seg_base) << shift_amt) >> shift_amt); + *seg_base = (((int64_t) *seg_base) << shift_amt) >> shift_amt; } static void svm_load_seg(CPUX86State *env, int mmu_idx, hwaddr addr, @@ -403,7 +403,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) env->hflags2 |= HF2_GIF_MASK; if (ctl_has_irq(env)) { - cs->interrupt_request |= CPU_INTERRUPT_VIRQ; + cpu_set_interrupt(cs, CPU_INTERRUPT_VIRQ); } if (virtual_gif_set(env)) { @@ -824,7 +824,7 @@ void do_vmexit(CPUX86State *env) env->intercept_exceptions = 0; /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ - cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ); env->int_ctl = 0; /* Clears the TSC_OFFSET inside the processor. */ diff --git a/target/i386/tcg/system/tcg-cpu.c b/target/i386/tcg/system/tcg-cpu.c index 0538a4fd51a35..7255862c2449f 100644 --- a/target/i386/tcg/system/tcg-cpu.c +++ b/target/i386/tcg/system/tcg-cpu.c @@ -74,8 +74,8 @@ bool tcg_cpu_realizefn(CPUState *cs, Error **errp) memory_region_set_enabled(cpu->cpu_as_mem, true); cs->num_ases = 2; - cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); - cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); + cpu_address_space_init(cs, X86ASIdx_MEM, "cpu-memory", cs->memory); + cpu_address_space_init(cs, X86ASIdx_SMM, "cpu-smm", cpu->cpu_as_root); /* ... SMRAM with higher priority, linked from /machine/smram. */ cpu->machine_done.notify = tcg_cpu_machine_done; diff --git a/target/i386/whpx/whpx-accel-ops.c b/target/i386/whpx/whpx-accel-ops.c index da58805b1a65e..f75886128d0d3 100644 --- a/target/i386/whpx/whpx-accel-ops.c +++ b/target/i386/whpx/whpx-accel-ops.c @@ -42,16 +42,14 @@ static void *whpx_cpu_thread_fn(void *arg) qemu_guest_random_seed_thread_part2(cpu->random_seed); do { + qemu_process_cpu_events(cpu); + if (cpu_can_run(cpu)) { r = whpx_vcpu_exec(cpu); if (r == EXCP_DEBUG) { cpu_handle_guest_debug(cpu); } } - while (cpu_thread_is_idle(cpu)) { - qemu_cond_wait_bql(cpu->halt_cond); - } - qemu_wait_io_event_common(cpu); } while (!cpu->unplug || cpu_can_run(cpu)); whpx_destroy_vcpu(cpu); diff --git a/target/i386/whpx/whpx-all.c b/target/i386/whpx/whpx-all.c index b72dcff3c8d46..256761834c975 100644 --- a/target/i386/whpx/whpx-all.c +++ b/target/i386/whpx/whpx-all.c @@ -788,8 +788,11 @@ static HRESULT CALLBACK whpx_emu_mmio_callback( void *ctx, WHV_EMULATOR_MEMORY_ACCESS_INFO *ma) { - cpu_physical_memory_rw(ma->GpaAddress, ma->Data, ma->AccessSize, - ma->Direction); + CPUState *cs = (CPUState *)ctx; + AddressSpace *as = cpu_addressspace(cs, MEMTXATTRS_UNSPECIFIED); + + address_space_rw(as, ma->GpaAddress, MEMTXATTRS_UNSPECIFIED, + ma->Data, ma->AccessSize, ma->Direction); return S_OK; } @@ -1436,9 +1439,9 @@ static int whpx_handle_halt(CPUState *cpu) int ret = 0; bql_lock(); - if (!((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if (!(cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (cpu_env(cpu)->eflags & IF_MASK)) && - !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + !cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { cpu->exception_index = EXCP_HLT; cpu->halted = true; ret = 1; @@ -1469,16 +1472,16 @@ static void whpx_vcpu_pre_run(CPUState *cpu) /* Inject NMI */ if (!vcpu->interruption_pending && - cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { - if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { - cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; + cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_NMI); vcpu->interruptable = false; new_int.InterruptionType = WHvX64PendingNmi; new_int.InterruptionPending = 1; new_int.InterruptionVector = 2; } - if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { - cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SMI)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_SMI); } } @@ -1486,13 +1489,13 @@ static void whpx_vcpu_pre_run(CPUState *cpu) * Force the VCPU out of its inner loop to process any INIT requests or * commit pending TPR access. */ - if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { - if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, true); } - if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { - cpu->exit_request = 1; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { + qatomic_set(&cpu->exit_request, true); } } @@ -1501,8 +1504,8 @@ static void whpx_vcpu_pre_run(CPUState *cpu) if (!vcpu->interruption_pending && vcpu->interruptable && (env->eflags & IF_MASK)) { assert(!new_int.InterruptionPending); - if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { - cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); irq = cpu_get_pic_interrupt(env); if (irq >= 0) { new_int.InterruptionType = WHvX64PendingInterrupt; @@ -1519,8 +1522,8 @@ static void whpx_vcpu_pre_run(CPUState *cpu) reg_count += 1; } } else if (vcpu->ready_for_pic_interrupt && - (cpu->interrupt_request & CPU_INTERRUPT_HARD)) { - cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_HARD); irq = cpu_get_pic_interrupt(env); if (irq >= 0) { reg_names[reg_count] = WHvRegisterPendingEvent; @@ -1539,14 +1542,14 @@ static void whpx_vcpu_pre_run(CPUState *cpu) if (tpr != vcpu->tpr) { vcpu->tpr = tpr; reg_values[reg_count].Reg64 = tpr; - cpu->exit_request = 1; + qatomic_set(&cpu->exit_request, true); reg_names[reg_count] = WHvX64RegisterCr8; reg_count += 1; } /* Update the state of the interrupt delivery notification */ if (!vcpu->window_registered && - cpu->interrupt_request & CPU_INTERRUPT_HARD) { + cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD)) { reg_values[reg_count].DeliverabilityNotifications = (WHV_X64_DELIVERABILITY_NOTIFICATIONS_REGISTER) { .InterruptNotification = 1 @@ -1599,31 +1602,31 @@ static void whpx_vcpu_process_async_events(CPUState *cpu) CPUX86State *env = &x86_cpu->env; AccelCPUState *vcpu = cpu->accel; - if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { whpx_cpu_synchronize_state(cpu); do_cpu_init(x86_cpu); vcpu->interruptable = true; } - if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { - cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_POLL)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); apic_poll_irq(x86_cpu->apic_state); } - if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && + if ((cpu_test_interrupt(cpu, CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || - (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { + cpu_test_interrupt(cpu, CPU_INTERRUPT_NMI)) { cpu->halted = false; } - if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_SIPI)) { whpx_cpu_synchronize_state(cpu); do_cpu_sipi(x86_cpu); } - if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { - cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; + if (cpu_test_interrupt(cpu, CPU_INTERRUPT_TPR)) { + cpu_reset_interrupt(cpu, CPU_INTERRUPT_TPR); whpx_cpu_synchronize_state(cpu); apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, env->tpr_access_type); @@ -1714,7 +1717,8 @@ static int whpx_vcpu_run(CPUState *cpu) if (exclusive_step_mode == WHPX_STEP_NONE) { whpx_vcpu_pre_run(cpu); - if (qatomic_read(&cpu->exit_request)) { + /* Corresponding store-release is in cpu_exit. */ + if (qatomic_load_acquire(&cpu->exit_request)) { whpx_vcpu_kick(cpu); } } @@ -2049,8 +2053,6 @@ static int whpx_vcpu_run(CPUState *cpu) whpx_last_vcpu_stopping(cpu); } - qatomic_set(&cpu->exit_request, false); - return ret < 0; } diff --git a/target/i386/whpx/whpx-apic.c b/target/i386/whpx/whpx-apic.c index e1ef6d4e6d4df..afcb25843b515 100644 --- a/target/i386/whpx/whpx-apic.c +++ b/target/i386/whpx/whpx-apic.c @@ -151,9 +151,8 @@ static void whpx_apic_put(CPUState *cs, run_on_cpu_data data) } } -void whpx_apic_get(DeviceState *dev) +void whpx_apic_get(APICCommonState *s) { - APICCommonState *s = APIC_COMMON(dev); CPUState *cpu = CPU(s->cpu); struct whpx_lapic_state kapic; diff --git a/target/i386/whpx/whpx-internal.h b/target/i386/whpx/whpx-internal.h index 6633e9c4ca31f..2dcad1f56502c 100644 --- a/target/i386/whpx/whpx-internal.h +++ b/target/i386/whpx/whpx-internal.h @@ -5,6 +5,8 @@ #include #include +#include "hw/i386/apic.h" + typedef enum WhpxBreakpointState { WHPX_BP_CLEARED = 0, WHPX_BP_SET_PENDING, @@ -44,7 +46,7 @@ struct whpx_state { }; extern struct whpx_state whpx_global; -void whpx_apic_get(DeviceState *s); +void whpx_apic_get(APICCommonState *s); #define WHV_E_UNKNOWN_CAPABILITY 0x80370300L diff --git a/target/loongarch/cpu-csr.h b/target/loongarch/cpu-csr.h index 0834e91f30e3b..9097fddee109a 100644 --- a/target/loongarch/cpu-csr.h +++ b/target/loongarch/cpu-csr.h @@ -34,11 +34,13 @@ FIELD(CSR_MISC, ALCL, 12, 4) FIELD(CSR_MISC, DWPL, 16, 3) #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */ -FIELD(CSR_ECFG, LIE, 0, 13) +FIELD(CSR_ECFG, LIE, 0, 15) /* bit 15 is msg interrupt enabled */ +FIELD(CSR_ECFG, MSGINT, 14, 1) FIELD(CSR_ECFG, VS, 16, 3) #define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */ -FIELD(CSR_ESTAT, IS, 0, 13) +FIELD(CSR_ESTAT, IS, 0, 15) /* bit 15 is msg interrupt enabled */ +FIELD(CSR_ESTAT, MSGINT, 14, 1) FIELD(CSR_ESTAT, ECODE, 16, 6) FIELD(CSR_ESTAT, ESUBCODE, 22, 9) @@ -106,6 +108,7 @@ FIELD(CSR_PWCH, DIR4_WIDTH, 18, 6) #define LOONGARCH_CSR_STLBPS 0x1e /* Stlb page size */ FIELD(CSR_STLBPS, PS, 0, 5) +FIELD(CSR_STLBPS, RESERVE, 5, 27) #define LOONGARCH_CSR_RVACFG 0x1f /* Reduced virtual address config */ FIELD(CSR_RVACFG, RBITS, 0, 4) @@ -186,6 +189,9 @@ FIELD(CSR_MERRCTL, ISMERR, 0, 1) #define LOONGARCH_CSR_CTAG 0x98 /* TagLo + TagHi */ +#define LOONGARCH_CSR_MSGIS(N) (0xa0 + N) +#define LOONGARCH_CSR_MSGIR 0xa4 + /* Direct map windows CSRs*/ #define LOONGARCH_CSR_DMW(N) (0x180 + N) FIELD(CSR_DMW, PLV0, 0, 1) diff --git a/target/loongarch/cpu-mmu.h b/target/loongarch/cpu-mmu.h new file mode 100644 index 0000000000000..dbc69c7c0f2d8 --- /dev/null +++ b/target/loongarch/cpu-mmu.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch CPU parameters for QEMU. + * + * Copyright (c) 2025 Loongson Technology Corporation Limited + */ + +#ifndef LOONGARCH_CPU_MMU_H +#define LOONGARCH_CPU_MMU_H + +typedef enum TLBRet { + TLBRET_MATCH, + TLBRET_BADADDR, + TLBRET_NOMATCH, + TLBRET_INVALID, + TLBRET_DIRTY, + TLBRET_RI, + TLBRET_XI, + TLBRET_PE, +} TLBRet; + +typedef struct MMUContext { + vaddr addr; + uint64_t pte; + hwaddr physical; + int ps; /* page size shift */ + int prot; +} MMUContext; + +bool check_ps(CPULoongArchState *ent, uint8_t ps); +TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context, + MMUAccessType access_type, int mmu_idx); +TLBRet get_physical_address(CPULoongArchState *env, MMUContext *context, + MMUAccessType access_type, int mmu_idx, + int is_debug); +void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, + uint64_t *dir_width, unsigned int level); +hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); + +#endif /* LOONGARCH_CPU_MMU_H */ diff --git a/target/loongarch/cpu.c b/target/loongarch/cpu.c index abad84c054707..86490e0f7285d 100644 --- a/target/loongarch/cpu.c +++ b/target/loongarch/cpu.c @@ -17,6 +17,7 @@ #include "hw/qdev-properties.h" #include "exec/translation-block.h" #include "cpu.h" +#include "cpu-mmu.h" #include "internals.h" #include "fpu/softfloat-helpers.h" #include "csr.h" @@ -27,11 +28,6 @@ #ifdef CONFIG_KVM #include #endif -#ifdef CONFIG_TCG -#include "accel/tcg/cpu-ldst.h" -#include "accel/tcg/cpu-ops.h" -#include "tcg/tcg.h" -#endif #include "tcg/tcg_loongarch.h" const char * const regnames[32] = { @@ -48,62 +44,6 @@ const char * const fregnames[32] = { "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", }; -struct TypeExcp { - int32_t exccode; - const char * const name; -}; - -static const struct TypeExcp excp_names[] = { - {EXCCODE_INT, "Interrupt"}, - {EXCCODE_PIL, "Page invalid exception for load"}, - {EXCCODE_PIS, "Page invalid exception for store"}, - {EXCCODE_PIF, "Page invalid exception for fetch"}, - {EXCCODE_PME, "Page modified exception"}, - {EXCCODE_PNR, "Page Not Readable exception"}, - {EXCCODE_PNX, "Page Not Executable exception"}, - {EXCCODE_PPI, "Page Privilege error"}, - {EXCCODE_ADEF, "Address error for instruction fetch"}, - {EXCCODE_ADEM, "Address error for Memory access"}, - {EXCCODE_SYS, "Syscall"}, - {EXCCODE_BRK, "Break"}, - {EXCCODE_INE, "Instruction Non-Existent"}, - {EXCCODE_IPE, "Instruction privilege error"}, - {EXCCODE_FPD, "Floating Point Disabled"}, - {EXCCODE_FPE, "Floating Point Exception"}, - {EXCCODE_DBP, "Debug breakpoint"}, - {EXCCODE_BCE, "Bound Check Exception"}, - {EXCCODE_SXD, "128 bit vector instructions Disable exception"}, - {EXCCODE_ASXD, "256 bit vector instructions Disable exception"}, - {EXCP_HLT, "EXCP_HLT"}, -}; - -const char *loongarch_exception_name(int32_t exception) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(excp_names); i++) { - if (excp_names[i].exccode == exception) { - return excp_names[i].name; - } - } - return "Unknown"; -} - -void G_NORETURN do_raise_exception(CPULoongArchState *env, - uint32_t exception, - uintptr_t pc) -{ - CPUState *cs = env_cpu(env); - - qemu_log_mask(CPU_LOG_INT, "%s: exception: %d (%s)\n", - __func__, - exception, - loongarch_exception_name(exception)); - cs->exception_index = exception; - - cpu_loop_exit_restore(cs, pc); -} - static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) { set_pc(cpu_env(cs), value); @@ -139,18 +79,8 @@ void loongarch_cpu_set_irq(void *opaque, int irq, int level) } } -static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env) -{ - bool ret = 0; - - ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) && - !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST))); - - return ret; -} - /* Check if there is pending and not masked out interrupt */ -static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) +bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) { uint32_t pending; uint32_t status; @@ -162,264 +92,156 @@ static inline bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env) } #endif -#ifdef CONFIG_TCG #ifndef CONFIG_USER_ONLY -static void loongarch_cpu_do_interrupt(CPUState *cs) +bool loongarch_cpu_has_work(CPUState *cs) { - CPULoongArchState *env = cpu_env(cs); - bool update_badinstr = 1; - int cause = -1; - bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); - uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); - - if (cs->exception_index != EXCCODE_INT) { - qemu_log_mask(CPU_LOG_INT, - "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx - " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n", - __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA, - cs->exception_index, - loongarch_exception_name(cs->exception_index)); - } - - switch (cs->exception_index) { - case EXCCODE_DBP: - env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1); - env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC); - goto set_DERA; - set_DERA: - env->CSR_DERA = env->pc; - env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1); - set_pc(env, env->CSR_EENTRY + 0x480); - break; - case EXCCODE_INT: - if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { - env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1); - goto set_DERA; - } - QEMU_FALLTHROUGH; - case EXCCODE_PIF: - case EXCCODE_ADEF: - cause = cs->exception_index; - update_badinstr = 0; - break; - case EXCCODE_SYS: - case EXCCODE_BRK: - case EXCCODE_INE: - case EXCCODE_IPE: - case EXCCODE_FPD: - case EXCCODE_FPE: - case EXCCODE_SXD: - case EXCCODE_ASXD: - env->CSR_BADV = env->pc; - QEMU_FALLTHROUGH; - case EXCCODE_BCE: - case EXCCODE_ADEM: - case EXCCODE_PIL: - case EXCCODE_PIS: - case EXCCODE_PME: - case EXCCODE_PNR: - case EXCCODE_PNX: - case EXCCODE_PPI: - cause = cs->exception_index; - break; - default: - qemu_log("Error: exception(%d) has not been supported\n", - cs->exception_index); - abort(); - } - - if (update_badinstr) { - env->CSR_BADI = cpu_ldl_code(env, env->pc); - } + bool has_work = false; - /* Save PLV and IE */ - if (tlbfill) { - env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV, - FIELD_EX64(env->CSR_CRMD, - CSR_CRMD, PLV)); - env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE, - FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); - /* set the DA mode */ - env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); - env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); - env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, - PC, (env->pc >> 2)); - } else { - env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE, - EXCODE_MCODE(cause)); - env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE, - EXCODE_SUBCODE(cause)); - env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV, - FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV)); - env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE, - FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); - env->CSR_ERA = env->pc; + if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && + cpu_loongarch_hw_interrupts_pending(cpu_env(cs))) { + has_work = true; } - env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); - env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); + return has_work; +} +#endif /* !CONFIG_USER_ONLY */ - if (vec_size) { - vec_size = (1 << vec_size) * 4; - } +static void loongarch_la464_init_csr(Object *obj) +{ +#ifndef CONFIG_USER_ONLY + static bool initialized; + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + CPULoongArchState *env = &cpu->env; + int i, num; - if (cs->exception_index == EXCCODE_INT) { - /* Interrupt */ - uint32_t vector = 0; - uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); - pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); - - /* Find the highest-priority interrupt. */ - vector = 31 - clz32(pending); - set_pc(env, env->CSR_EENTRY + \ - (EXCCODE_EXTERNAL_INT + vector) * vec_size); - qemu_log_mask(CPU_LOG_INT, - "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx - " cause %d\n" " A " TARGET_FMT_lx " D " - TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS" - TARGET_FMT_lx "\n", - __func__, env->pc, env->CSR_ERA, - cause, env->CSR_BADV, env->CSR_DERA, vector, - env->CSR_ECFG, env->CSR_ESTAT); - } else { - if (tlbfill) { - set_pc(env, env->CSR_TLBRENTRY); - } else { - set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size); + if (!initialized) { + initialized = true; + num = FIELD_EX64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM); + for (i = num; i < 16; i++) { + set_csr_flag(LOONGARCH_CSR_SAVE(i), CSRFL_UNUSED); } - qemu_log_mask(CPU_LOG_INT, - "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx - " cause %d%s\n, ESTAT " TARGET_FMT_lx - " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx - "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu - " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc, - tlbfill ? env->CSR_TLBRERA : env->CSR_ERA, - cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT, - env->CSR_ECFG, - tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV, - env->CSR_BADI, env->gpr[11], cs->cpu_index, - env->CSR_ASID); + set_csr_flag(LOONGARCH_CSR_IMPCTL1, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_IMPCTL2, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRCTL, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRINFO1, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRINFO2, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRENTRY, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRERA, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_MERRSAVE, CSRFL_UNUSED); + set_csr_flag(LOONGARCH_CSR_CTAG, CSRFL_UNUSED); } - cs->exception_index = -1; +#endif } -static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, - vaddr addr, unsigned size, - MMUAccessType access_type, - int mmu_idx, MemTxAttrs attrs, - MemTxResult response, - uintptr_t retaddr) +static bool loongarch_get_lsx(Object *obj, Error **errp) { - CPULoongArchState *env = cpu_env(cs); - - if (access_type == MMU_INST_FETCH) { - do_raise_exception(env, EXCCODE_ADEF, retaddr); - } else { - do_raise_exception(env, EXCCODE_ADEM, retaddr); - } + return LOONGARCH_CPU(obj)->lsx != ON_OFF_AUTO_OFF; } -static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +static void loongarch_set_lsx(Object *obj, bool value, Error **errp) { - if (interrupt_request & CPU_INTERRUPT_HARD) { - CPULoongArchState *env = cpu_env(cs); - - if (cpu_loongarch_hw_interrupts_enabled(env) && - cpu_loongarch_hw_interrupts_pending(env)) { - /* Raise it */ - cs->exception_index = EXCCODE_INT; - loongarch_cpu_do_interrupt(cs); - return true; + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + uint32_t val; + + cpu->lsx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + if (cpu->lsx == ON_OFF_AUTO_OFF) { + cpu->lasx = ON_OFF_AUTO_OFF; + if (cpu->lasx == ON_OFF_AUTO_ON) { + error_setg(errp, "Failed to disable LSX since LASX is enabled"); + return; } } - return false; -} -static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx, - vaddr result, vaddr base) -{ - return is_va32(cpu_env(cs)) ? (uint32_t)result : result; -} -#endif - -static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs) -{ - CPULoongArchState *env = cpu_env(cs); - uint32_t flags; + if (kvm_enabled()) { + /* kvm feature detection in function kvm_arch_init_vcpu */ + return; + } - flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK); - flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE; - flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE; - flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE; - flags |= is_va32(env) * HW_FLAGS_VA32; + /* LSX feature detection in TCG mode */ + val = cpu->env.cpucfg[2]; + if (cpu->lsx == ON_OFF_AUTO_ON) { + if (FIELD_EX32(val, CPUCFG2, LSX) == 0) { + error_setg(errp, "Failed to enable LSX in TCG mode"); + return; + } + } else { + cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, 0); + val = cpu->env.cpucfg[2]; + } - return (TCGTBCPUState){ .pc = env->pc, .flags = flags }; + cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LSX, value); } -static void loongarch_cpu_synchronize_from_tb(CPUState *cs, - const TranslationBlock *tb) +static bool loongarch_get_lasx(Object *obj, Error **errp) { - tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); - set_pc(cpu_env(cs), tb->pc); + return LOONGARCH_CPU(obj)->lasx != ON_OFF_AUTO_OFF; } -static void loongarch_restore_state_to_opc(CPUState *cs, - const TranslationBlock *tb, - const uint64_t *data) +static void loongarch_set_lasx(Object *obj, bool value, Error **errp) { - set_pc(cpu_env(cs), data[0]); -} -#endif /* CONFIG_TCG */ + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + uint32_t val; -#ifndef CONFIG_USER_ONLY -static bool loongarch_cpu_has_work(CPUState *cs) -{ - bool has_work = false; + cpu->lasx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + if ((cpu->lsx == ON_OFF_AUTO_OFF) && (cpu->lasx == ON_OFF_AUTO_ON)) { + error_setg(errp, "Failed to enable LASX since lSX is disabled"); + return; + } - if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && - cpu_loongarch_hw_interrupts_pending(cpu_env(cs))) { - has_work = true; + if (kvm_enabled()) { + /* kvm feature detection in function kvm_arch_init_vcpu */ + return; } - return has_work; + /* LASX feature detection in TCG mode */ + val = cpu->env.cpucfg[2]; + if (cpu->lasx == ON_OFF_AUTO_ON) { + if (FIELD_EX32(val, CPUCFG2, LASX) == 0) { + error_setg(errp, "Failed to enable LASX in TCG mode"); + return; + } + } + + cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, value); } -#endif /* !CONFIG_USER_ONLY */ -static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch) +static bool loongarch_get_msgint(Object *obj, Error **errp) { - CPULoongArchState *env = cpu_env(cs); + return LOONGARCH_CPU(obj)->msgint != ON_OFF_AUTO_OFF; +} - if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) { - return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV); +static void loongarch_set_msgint(Object *obj, bool value, Error **errp) +{ + LoongArchCPU *cpu = LOONGARCH_CPU(obj); + + cpu->msgint = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + + if (kvm_enabled()) { + /* kvm feature detection in function kvm_arch_init_vcpu */ + return; } - return MMU_DA_IDX; + + cpu->env.cpucfg[1] = FIELD_DP32(cpu->env.cpucfg[1], CPUCFG1, MSG_INT, value); } -static void loongarch_la464_init_csr(Object *obj) +static void loongarch_cpu_post_init(Object *obj) { -#ifndef CONFIG_USER_ONLY - static bool initialized; LoongArchCPU *cpu = LOONGARCH_CPU(obj); - CPULoongArchState *env = &cpu->env; - int i, num; - if (!initialized) { - initialized = true; - num = FIELD_EX64(env->CSR_PRCFG1, CSR_PRCFG1, SAVE_NUM); - for (i = num; i < 16; i++) { - set_csr_flag(LOONGARCH_CSR_SAVE(i), CSRFL_UNUSED); - } - set_csr_flag(LOONGARCH_CSR_IMPCTL1, CSRFL_UNUSED); - set_csr_flag(LOONGARCH_CSR_IMPCTL2, CSRFL_UNUSED); - set_csr_flag(LOONGARCH_CSR_MERRCTL, CSRFL_UNUSED); - set_csr_flag(LOONGARCH_CSR_MERRINFO1, CSRFL_UNUSED); - set_csr_flag(LOONGARCH_CSR_MERRINFO2, CSRFL_UNUSED); - set_csr_flag(LOONGARCH_CSR_MERRENTRY, CSRFL_UNUSED); - set_csr_flag(LOONGARCH_CSR_MERRERA, CSRFL_UNUSED); - set_csr_flag(LOONGARCH_CSR_MERRSAVE, CSRFL_UNUSED); - set_csr_flag(LOONGARCH_CSR_CTAG, CSRFL_UNUSED); + cpu->lbt = ON_OFF_AUTO_OFF; + cpu->pmu = ON_OFF_AUTO_OFF; + cpu->lsx = ON_OFF_AUTO_AUTO; + cpu->lasx = ON_OFF_AUTO_AUTO; + object_property_add_bool(obj, "lsx", loongarch_get_lsx, + loongarch_set_lsx); + object_property_add_bool(obj, "lasx", loongarch_get_lasx, + loongarch_set_lasx); + object_property_add_bool(obj, "msgint", loongarch_get_msgint, + loongarch_set_msgint); + /* lbt is enabled only in kvm mode, not supported in tcg mode */ + if (kvm_enabled()) { + kvm_loongarch_cpu_post_init(cpu); } -#endif } static void loongarch_la464_initfn(Object *obj) @@ -523,6 +345,7 @@ static void loongarch_la464_initfn(Object *obj) env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_WAYS, 7); env->CSR_PRCFG3 = FIELD_DP64(env->CSR_PRCFG3, CSR_PRCFG3, STLB_SETS, 8); + cpu->msgint = ON_OFF_AUTO_OFF; loongarch_la464_init_csr(obj); loongarch_cpu_post_init(obj); } @@ -553,12 +376,19 @@ static void loongarch_la132_initfn(Object *obj) data = FIELD_DP32(data, CPUCFG1, HP, 1); data = FIELD_DP32(data, CPUCFG1, CRC, 1); env->cpucfg[1] = data; + cpu->msgint = ON_OFF_AUTO_OFF; } static void loongarch_max_initfn(Object *obj) { + LoongArchCPU *cpu = LOONGARCH_CPU(obj); /* '-cpu max' for TCG: we use cpu la464. */ loongarch_la464_initfn(obj); + + if (tcg_enabled()) { + cpu->env.cpucfg[1] = FIELD_DP32(cpu->env.cpucfg[1], CPUCFG1, MSG_INT, 1); + cpu->msgint = ON_OFF_AUTO_AUTO; + } } static void loongarch_cpu_reset_hold(Object *obj, ResetType type) @@ -683,96 +513,6 @@ static void loongarch_cpu_unrealizefn(DeviceState *dev) lacc->parent_unrealize(dev); } -static bool loongarch_get_lsx(Object *obj, Error **errp) -{ - return LOONGARCH_CPU(obj)->lsx != ON_OFF_AUTO_OFF; -} - -static void loongarch_set_lsx(Object *obj, bool value, Error **errp) -{ - LoongArchCPU *cpu = LOONGARCH_CPU(obj); - uint32_t val; - - cpu->lsx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; - if (cpu->lsx == ON_OFF_AUTO_OFF) { - cpu->lasx = ON_OFF_AUTO_OFF; - if (cpu->lasx == ON_OFF_AUTO_ON) { - error_setg(errp, "Failed to disable LSX since LASX is enabled"); - return; - } - } - - if (kvm_enabled()) { - /* kvm feature detection in function kvm_arch_init_vcpu */ - return; - } - - /* LSX feature detection in TCG mode */ - val = cpu->env.cpucfg[2]; - if (cpu->lsx == ON_OFF_AUTO_ON) { - if (FIELD_EX32(val, CPUCFG2, LSX) == 0) { - error_setg(errp, "Failed to enable LSX in TCG mode"); - return; - } - } else { - cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, 0); - val = cpu->env.cpucfg[2]; - } - - cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LSX, value); -} - -static bool loongarch_get_lasx(Object *obj, Error **errp) -{ - return LOONGARCH_CPU(obj)->lasx != ON_OFF_AUTO_OFF; -} - -static void loongarch_set_lasx(Object *obj, bool value, Error **errp) -{ - LoongArchCPU *cpu = LOONGARCH_CPU(obj); - uint32_t val; - - cpu->lasx = value ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; - if ((cpu->lsx == ON_OFF_AUTO_OFF) && (cpu->lasx == ON_OFF_AUTO_ON)) { - error_setg(errp, "Failed to enable LASX since lSX is disabled"); - return; - } - - if (kvm_enabled()) { - /* kvm feature detection in function kvm_arch_init_vcpu */ - return; - } - - /* LASX feature detection in TCG mode */ - val = cpu->env.cpucfg[2]; - if (cpu->lasx == ON_OFF_AUTO_ON) { - if (FIELD_EX32(val, CPUCFG2, LASX) == 0) { - error_setg(errp, "Failed to enable LASX in TCG mode"); - return; - } - } - - cpu->env.cpucfg[2] = FIELD_DP32(val, CPUCFG2, LASX, value); -} - -void loongarch_cpu_post_init(Object *obj) -{ - LoongArchCPU *cpu = LOONGARCH_CPU(obj); - - cpu->lbt = ON_OFF_AUTO_OFF; - cpu->pmu = ON_OFF_AUTO_OFF; - cpu->lsx = ON_OFF_AUTO_AUTO; - cpu->lasx = ON_OFF_AUTO_AUTO; - object_property_add_bool(obj, "lsx", loongarch_get_lsx, - loongarch_set_lsx); - object_property_add_bool(obj, "lasx", loongarch_get_lasx, - loongarch_set_lasx); - /* lbt is enabled only in kvm mode, not supported in tcg mode */ - if (kvm_enabled()) { - kvm_loongarch_cpu_post_init(cpu); - } -} - static void loongarch_cpu_init(Object *obj) { #ifndef CONFIG_USER_ONLY @@ -881,30 +621,6 @@ static void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } -#ifdef CONFIG_TCG -static const TCGCPUOps loongarch_tcg_ops = { - .guest_default_memory_order = 0, - .mttcg_supported = true, - - .initialize = loongarch_translate_init, - .translate_code = loongarch_translate_code, - .get_tb_cpu_state = loongarch_get_tb_cpu_state, - .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, - .restore_state_to_opc = loongarch_restore_state_to_opc, - .mmu_index = loongarch_cpu_mmu_index, - -#ifndef CONFIG_USER_ONLY - .tlb_fill = loongarch_cpu_tlb_fill, - .pointer_wrap = loongarch_pointer_wrap, - .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, - .cpu_exec_halt = loongarch_cpu_has_work, - .cpu_exec_reset = cpu_reset, - .do_interrupt = loongarch_cpu_do_interrupt, - .do_transaction_failed = loongarch_cpu_do_transaction_failed, -#endif -}; -#endif /* CONFIG_TCG */ - #ifndef CONFIG_USER_ONLY #include "hw/core/sysemu-cpu-ops.h" diff --git a/target/loongarch/cpu.h b/target/loongarch/cpu.h index 9538e8d61d75a..b8e3b46c3a012 100644 --- a/target/loongarch/cpu.h +++ b/target/loongarch/cpu.h @@ -21,27 +21,6 @@ #include "cpu-csr.h" #include "cpu-qom.h" -#define IOCSRF_TEMP 0 -#define IOCSRF_NODECNT 1 -#define IOCSRF_MSI 2 -#define IOCSRF_EXTIOI 3 -#define IOCSRF_CSRIPI 4 -#define IOCSRF_FREQCSR 5 -#define IOCSRF_FREQSCALE 6 -#define IOCSRF_DVFSV1 7 -#define IOCSRF_GMOD 9 -#define IOCSRF_VM 11 - -#define VERSION_REG 0x0 -#define FEATURE_REG 0x8 -#define VENDOR_REG 0x10 -#define CPUNAME_REG 0x20 -#define MISC_FUNC_REG 0x420 -#define IOCSRM_EXTIOI_EN 48 -#define IOCSRM_EXTIOI_INT_ENCODE 49 - -#define IOCSR_MEM_SIZE 0x428 - #define FCSR0_M1 0x1f /* FCSR1 mask, Enables */ #define FCSR0_M2 0x1f1f0000 /* FCSR2 mask, Cause and Flags */ #define FCSR0_M3 0x300 /* FCSR3 mask, Round Mode */ @@ -238,9 +217,10 @@ FIELD(CSR_CRMD, WE, 9, 1) extern const char * const regnames[32]; extern const char * const fregnames[32]; -#define N_IRQS 13 +#define N_IRQS 15 #define IRQ_TIMER 11 #define IRQ_IPI 12 +#define INT_DMSI 14 #define LOONGARCH_STLB 2048 /* 2048 STLB */ #define LOONGARCH_MTLB 64 /* 64 MTLB */ @@ -254,6 +234,13 @@ FIELD(TLB_MISC, ASID, 1, 10) FIELD(TLB_MISC, VPPN, 13, 35) FIELD(TLB_MISC, PS, 48, 6) +/*Msg interrupt registers */ +#define N_MSGIS 4 +FIELD(CSR_MSGIS, IS, 0, 63) +FIELD(CSR_MSGIR, INTNUM, 0, 8) +FIELD(CSR_MSGIR, ACTIVE, 31, 1) +FIELD(CSR_MSGIE, PT, 0, 8) + #define LSX_LEN (128) #define LASX_LEN (256) @@ -371,6 +358,10 @@ typedef struct CPUArchState { uint64_t CSR_DBG; uint64_t CSR_DERA; uint64_t CSR_DSAVE; + /* Msg interrupt registers */ + uint64_t CSR_MSGIS[N_MSGIS]; + uint64_t CSR_MSGIR; + uint64_t CSR_MSGIE; struct { uint64_t guest_addr; } stealtime; @@ -387,11 +378,7 @@ typedef struct CPUArchState { #endif AddressSpace *address_space_iocsr; - bool load_elf; - uint64_t elf_address; uint32_t mp_state; - - struct loongarch_boot_info *boot_info; #endif } CPULoongArchState; @@ -417,6 +404,7 @@ struct ArchCPU { OnOffAuto pmu; OnOffAuto lsx; OnOffAuto lasx; + OnOffAuto msgint; OnOffAuto kvm_pv_ipi; OnOffAuto kvm_steal_time; int32_t socket_id; /* socket-id of this CPU */ @@ -494,15 +482,4 @@ static inline void set_pc(CPULoongArchState *env, uint64_t value) #define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU -void loongarch_cpu_post_init(Object *obj); - -#ifdef CONFIG_KVM -void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu); -#else -static inline void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu) -{ -} -#endif -void kvm_loongarch_init_irq_routing(void); - #endif /* LOONGARCH_CPU_H */ diff --git a/target/loongarch/cpu_helper.c b/target/loongarch/cpu_helper.c index e172b11ce1546..867e7c88670a7 100644 --- a/target/loongarch/cpu_helper.c +++ b/target/loongarch/cpu_helper.c @@ -13,10 +13,11 @@ #include "exec/target_page.h" #include "internals.h" #include "cpu-csr.h" +#include "cpu-mmu.h" #include "tcg/tcg_loongarch.h" void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, - uint64_t *dir_width, target_ulong level) + uint64_t *dir_width, unsigned int level) { switch (level) { case 1: @@ -43,15 +44,79 @@ void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, } } -static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address) +TLBRet loongarch_check_pte(CPULoongArchState *env, MMUContext *context, + MMUAccessType access_type, int mmu_idx) +{ + uint64_t plv = mmu_idx; + uint64_t tlb_entry, tlb_ppn; + uint8_t tlb_ps, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; + + tlb_entry = context->pte; + tlb_ps = context->ps; + tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); + tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); + tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); + if (is_la64(env)) { + tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); + tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); + tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); + tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); + } else { + tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); + tlb_nx = 0; + tlb_nr = 0; + tlb_rplv = 0; + } + + /* Remove sw bit between bit12 -- bit PS*/ + tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) - 1)); + + /* Check access rights */ + if (!tlb_v) { + return TLBRET_INVALID; + } + + if (access_type == MMU_INST_FETCH && tlb_nx) { + return TLBRET_XI; + } + + if (access_type == MMU_DATA_LOAD && tlb_nr) { + return TLBRET_RI; + } + + if (((tlb_rplv == 0) && (plv > tlb_plv)) || + ((tlb_rplv == 1) && (plv != tlb_plv))) { + return TLBRET_PE; + } + + if ((access_type == MMU_DATA_STORE) && !tlb_d) { + return TLBRET_DIRTY; + } + + context->physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | + (context->addr & MAKE_64BIT_MASK(0, tlb_ps)); + context->prot = PAGE_READ; + if (tlb_d) { + context->prot |= PAGE_WRITE; + } + if (!tlb_nx) { + context->prot |= PAGE_EXEC; + } + return TLBRET_MATCH; +} + +static TLBRet loongarch_page_table_walker(CPULoongArchState *env, + MMUContext *context, + int access_type, int mmu_idx) { CPUState *cs = env_cpu(env); target_ulong index, phys; uint64_t dir_base, dir_width; uint64_t base; int level; + vaddr address; + address = context->addr; if ((address >> 63) & 0x1) { base = env->CSR_PGDH; } else { @@ -93,41 +158,20 @@ static int loongarch_page_table_walker(CPULoongArchState *env, hwaddr *physical, base = ldq_phys(cs->as, phys); } - /* TODO: check plv and other bits? */ - - /* base is pte, in normal pte format */ - if (!FIELD_EX64(base, TLBENTRY, V)) { - return TLBRET_NOMATCH; - } - - if (!FIELD_EX64(base, TLBENTRY, D)) { - *prot = PAGE_READ; - } else { - *prot = PAGE_READ | PAGE_WRITE; - } - - /* get TARGET_PAGE_SIZE aligned physical address */ - base += (address & TARGET_PHYS_MASK) & ((1 << dir_base) - 1); - /* mask RPLV, NX, NR bits */ - base = FIELD_DP64(base, TLBENTRY_64, RPLV, 0); - base = FIELD_DP64(base, TLBENTRY_64, NX, 0); - base = FIELD_DP64(base, TLBENTRY_64, NR, 0); - /* mask other attribute bits */ - *physical = base & TARGET_PAGE_MASK; - - return 0; + context->ps = dir_base; + context->pte = base; + return loongarch_check_pte(env, context, access_type, mmu_idx); } -static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx, - int is_debug) +static TLBRet loongarch_map_address(CPULoongArchState *env, + MMUContext *context, + MMUAccessType access_type, int mmu_idx, + int is_debug) { - int ret; + TLBRet ret; if (tcg_enabled()) { - ret = loongarch_get_addr_from_tlb(env, physical, prot, address, - access_type, mmu_idx); + ret = loongarch_get_addr_from_tlb(env, context, access_type, mmu_idx); if (ret != TLBRET_NOMATCH) { return ret; } @@ -139,14 +183,13 @@ static int loongarch_map_address(CPULoongArchState *env, hwaddr *physical, * legal mapping, even if the mapping is not yet in TLB. return 0 if * there is a valid map, else none zero. */ - return loongarch_page_table_walker(env, physical, prot, address); + return loongarch_page_table_walker(env, context, access_type, mmu_idx); } return TLBRET_NOMATCH; } -static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, - target_ulong dmw) +static hwaddr dmw_va2pa(CPULoongArchState *env, vaddr va, target_ulong dmw) { if (is_la64(env)) { return va & TARGET_VIRT_MASK; @@ -157,9 +200,9 @@ static hwaddr dmw_va2pa(CPULoongArchState *env, target_ulong va, } } -int get_physical_address(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx, int is_debug) +TLBRet get_physical_address(CPULoongArchState *env, MMUContext *context, + MMUAccessType access_type, int mmu_idx, + int is_debug) { int user_mode = mmu_idx == MMU_USER_IDX; int kernel_mode = mmu_idx == MMU_KERNEL_IDX; @@ -167,11 +210,13 @@ int get_physical_address(CPULoongArchState *env, hwaddr *physical, int64_t addr_high; uint8_t da = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, DA); uint8_t pg = FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG); + vaddr address; /* Check PG and DA */ + address = context->addr; if (da & !pg) { - *physical = address & TARGET_PHYS_MASK; - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + context->physical = address & TARGET_PHYS_MASK; + context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TLBRET_MATCH; } @@ -189,32 +234,31 @@ int get_physical_address(CPULoongArchState *env, hwaddr *physical, base_c = FIELD_EX64(env->CSR_DMW[i], CSR_DMW_32, VSEG); } if ((plv & env->CSR_DMW[i]) && (base_c == base_v)) { - *physical = dmw_va2pa(env, address, env->CSR_DMW[i]); - *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + context->physical = dmw_va2pa(env, address, env->CSR_DMW[i]); + context->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TLBRET_MATCH; } } /* Check valid extension */ - addr_high = sextract64(address, TARGET_VIRT_ADDR_SPACE_BITS, 16); - if (!(addr_high == 0 || addr_high == -1)) { + addr_high = (int64_t)address >> (TARGET_VIRT_ADDR_SPACE_BITS - 1); + if (!(addr_high == 0 || addr_high == -1ULL)) { return TLBRET_BADADDR; } /* Mapped address */ - return loongarch_map_address(env, physical, prot, address, - access_type, mmu_idx, is_debug); + return loongarch_map_address(env, context, access_type, mmu_idx, is_debug); } hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { CPULoongArchState *env = cpu_env(cs); - hwaddr phys_addr; - int prot; + MMUContext context; - if (get_physical_address(env, &phys_addr, &prot, addr, MMU_DATA_LOAD, - cpu_mmu_index(cs, false), 1) != 0) { + context.addr = addr; + if (get_physical_address(env, &context, MMU_DATA_LOAD, + cpu_mmu_index(cs, false), 1) != TLBRET_MATCH) { return -1; } - return phys_addr; + return context.physical; } diff --git a/target/loongarch/csr.c b/target/loongarch/csr.c index 7ea0a3045066d..f973780bba367 100644 --- a/target/loongarch/csr.c +++ b/target/loongarch/csr.c @@ -97,6 +97,11 @@ static CSRInfo csr_info[] = { CSR_OFF(DBG), CSR_OFF(DERA), CSR_OFF(DSAVE), + CSR_OFF_ARRAY(MSGIS, 0), + CSR_OFF_ARRAY(MSGIS, 1), + CSR_OFF_ARRAY(MSGIS, 2), + CSR_OFF_ARRAY(MSGIS, 3), + CSR_OFF(MSGIR), }; CSRInfo *get_csr(unsigned int csr_num) diff --git a/target/loongarch/gdbstub.c b/target/loongarch/gdbstub.c index 471eda28c7307..23a5eecc20bd0 100644 --- a/target/loongarch/gdbstub.c +++ b/target/loongarch/gdbstub.c @@ -62,7 +62,7 @@ int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { CPULoongArchState *env = cpu_env(cs); - target_ulong tmp; + uint64_t tmp; int length = 0; if (n < 0 || n > 34) { diff --git a/target/loongarch/internals.h b/target/loongarch/internals.h index a7384b0d31515..8793bd9df65a6 100644 --- a/target/loongarch/internals.h +++ b/target/loongarch/internals.h @@ -24,27 +24,12 @@ void G_NORETURN do_raise_exception(CPULoongArchState *env, uint32_t exception, uintptr_t pc); -const char *loongarch_exception_name(int32_t exception); - #ifdef CONFIG_TCG int ieee_ex_to_loongarch(int xcpt); void restore_fp_status(CPULoongArchState *env); #endif #ifndef CONFIG_USER_ONLY -enum { - TLBRET_MATCH = 0, - TLBRET_BADADDR = 1, - TLBRET_NOMATCH = 2, - TLBRET_INVALID = 3, - TLBRET_DIRTY = 4, - TLBRET_RI = 5, - TLBRET_XI = 6, - TLBRET_PE = 7, -}; - -bool check_ps(CPULoongArchState *ent, uint8_t ps); - extern const VMStateDescription vmstate_loongarch_cpu; void loongarch_cpu_set_irq(void *opaque, int irq, int level); @@ -54,13 +39,8 @@ uint64_t cpu_loongarch_get_constant_timer_counter(LoongArchCPU *cpu); uint64_t cpu_loongarch_get_constant_timer_ticks(LoongArchCPU *cpu); void cpu_loongarch_store_constant_timer_config(LoongArchCPU *cpu, uint64_t value); -int get_physical_address(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx, int is_debug); -void get_dir_base_width(CPULoongArchState *env, uint64_t *dir_base, - uint64_t *dir_width, target_ulong level); -hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); - +bool loongarch_cpu_has_work(CPUState *cs); +bool cpu_loongarch_hw_interrupts_pending(CPULoongArchState *env); #endif /* !CONFIG_USER_ONLY */ uint64_t read_fcc(CPULoongArchState *env); diff --git a/target/loongarch/kvm/kvm.c b/target/loongarch/kvm/kvm.c index e5ea2dba9daa0..4e4f4e79f6472 100644 --- a/target/loongarch/kvm/kvm.c +++ b/target/loongarch/kvm/kvm.c @@ -325,7 +325,7 @@ static int kvm_loongarch_get_csr(CPUState *cs) return ret; } -static int kvm_loongarch_put_csr(CPUState *cs, int level) +static int kvm_loongarch_put_csr(CPUState *cs, KvmPutState level) { int ret = 0; CPULoongArchState *env = cpu_env(cs); @@ -763,7 +763,7 @@ int kvm_arch_get_registers(CPUState *cs, Error **errp) return ret; } -int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) +int kvm_arch_put_registers(CPUState *cs, KvmPutState level, Error **errp) { int ret; static int once; diff --git a/target/loongarch/kvm/kvm_loongarch.h b/target/loongarch/kvm/kvm_loongarch.h index 1051a341ec263..51475675d6f6b 100644 --- a/target/loongarch/kvm/kvm_loongarch.h +++ b/target/loongarch/kvm/kvm_loongarch.h @@ -5,11 +5,11 @@ * Copyright (c) 2023 Loongson Technology Corporation Limited */ -#include "cpu.h" - #ifndef QEMU_KVM_LOONGARCH_H #define QEMU_KVM_LOONGARCH_H +void kvm_loongarch_cpu_post_init(LoongArchCPU *cpu); +void kvm_loongarch_init_irq_routing(void); int kvm_loongarch_set_interrupt(LoongArchCPU *cpu, int irq, int level); void kvm_arch_reset_vcpu(CPUState *cs); diff --git a/target/loongarch/machine.c b/target/loongarch/machine.c index 4e70f5c8798bd..0366a5076375e 100644 --- a/target/loongarch/machine.c +++ b/target/loongarch/machine.c @@ -45,6 +45,26 @@ static const VMStateDescription vmstate_fpu = { }, }; +static bool msgint_needed(void *opaque) +{ + LoongArchCPU *cpu = opaque; + + return FIELD_EX64(cpu->env.cpucfg[1], CPUCFG1, MSG_INT); +} + +static const VMStateDescription vmstate_msgint = { + .name = "cpu/msgint", + .version_id = 1, + .minimum_version_id = 1, + .needed = msgint_needed, + .fields = (const VMStateField[]) { + VMSTATE_UINT64_ARRAY(env.CSR_MSGIS, LoongArchCPU, N_MSGIS), + VMSTATE_UINT64(env.CSR_MSGIR, LoongArchCPU), + VMSTATE_UINT64(env.CSR_MSGIE, LoongArchCPU), + VMSTATE_END_OF_LIST() + }, +}; + static const VMStateDescription vmstate_lsxh_reg = { .name = "lsxh_reg", .version_id = 1, @@ -168,11 +188,11 @@ static const VMStateDescription vmstate_tlb = { /* LoongArch CPU state */ const VMStateDescription vmstate_loongarch_cpu = { .name = "cpu", - .version_id = 3, - .minimum_version_id = 3, + .version_id = 4, + .minimum_version_id = 4, .fields = (const VMStateField[]) { - VMSTATE_UINTTL_ARRAY(env.gpr, LoongArchCPU, 32), - VMSTATE_UINTTL(env.pc, LoongArchCPU), + VMSTATE_UINT64_ARRAY(env.gpr, LoongArchCPU, 32), + VMSTATE_UINT64(env.pc, LoongArchCPU), /* Remaining CSRs */ VMSTATE_UINT64(env.CSR_CRMD, LoongArchCPU), @@ -245,6 +265,7 @@ const VMStateDescription vmstate_loongarch_cpu = { &vmstate_tlb, #endif &vmstate_lbt, + &vmstate_msgint, NULL } }; diff --git a/target/loongarch/tcg/csr_helper.c b/target/loongarch/tcg/csr_helper.c index 28b1bb86bd977..5ebe15f9937bf 100644 --- a/target/loongarch/tcg/csr_helper.c +++ b/target/loongarch/tcg/csr_helper.c @@ -16,6 +16,7 @@ #include "accel/tcg/cpu-ldst.h" #include "hw/irq.h" #include "cpu-csr.h" +#include "cpu-mmu.h" target_ulong helper_csrwr_stlbps(CPULoongArchState *env, target_ulong val) { @@ -25,13 +26,14 @@ target_ulong helper_csrwr_stlbps(CPULoongArchState *env, target_ulong val) * The real hardware only supports the min tlb_ps is 12 * tlb_ps=0 may cause undefined-behavior. */ - uint8_t tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + uint8_t tlb_ps = FIELD_EX64(val, CSR_STLBPS, PS); if (!check_ps(env, tlb_ps)) { qemu_log_mask(LOG_GUEST_ERROR, "Attempted set ps %d\n", tlb_ps); } else { /* Only update PS field, reserved bit keeps zero */ - env->CSR_STLBPS = FIELD_DP64(old_v, CSR_STLBPS, PS, tlb_ps); + val = FIELD_DP64(val, CSR_STLBPS, RESERVE, 0); + env->CSR_STLBPS = val; } return old_v; @@ -72,6 +74,27 @@ target_ulong helper_csrrd_tval(CPULoongArchState *env) return cpu_loongarch_get_constant_timer_ticks(cpu); } +target_ulong helper_csrrd_msgir(CPULoongArchState *env) +{ + int irq, new; + + irq = find_first_bit((unsigned long *)env->CSR_MSGIS, 256); + if (irq < 256) { + clear_bit(irq, (unsigned long *)env->CSR_MSGIS); + new = find_first_bit((unsigned long *)env->CSR_MSGIS, 256); + if (new < 256) { + return irq; + } + + env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, MSGINT, 0); + } else { + /* bit 31 set 1 for no invalid irq */ + irq = BIT(31); + } + + return irq; +} + target_ulong helper_csrwr_estat(CPULoongArchState *env, target_ulong val) { int64_t old_v = env->CSR_ESTAT; diff --git a/target/loongarch/tcg/helper.h b/target/loongarch/tcg/helper.h index 1d5cb0198c971..7e508c5a7b9a3 100644 --- a/target/loongarch/tcg/helper.h +++ b/target/loongarch/tcg/helper.h @@ -100,6 +100,7 @@ DEF_HELPER_1(rdtime_d, i64, env) DEF_HELPER_1(csrrd_pgd, i64, env) DEF_HELPER_1(csrrd_cpuid, i64, env) DEF_HELPER_1(csrrd_tval, i64, env) +DEF_HELPER_1(csrrd_msgir, i64, env) DEF_HELPER_2(csrwr_stlbps, i64, env, tl) DEF_HELPER_2(csrwr_estat, i64, env, tl) DEF_HELPER_2(csrwr_asid, i64, env, tl) @@ -128,7 +129,7 @@ DEF_HELPER_2(invtlb_all_asid, void, env, tl) DEF_HELPER_3(invtlb_page_asid, void, env, tl, tl) DEF_HELPER_3(invtlb_page_asid_or_g, void, env, tl, tl) -DEF_HELPER_4(lddir, tl, env, tl, tl, i32) +DEF_HELPER_4(lddir, tl, env, tl, i32, i32) DEF_HELPER_4(ldpte, void, env, tl, tl, i32) DEF_HELPER_1(ertn, void, env) DEF_HELPER_1(idle, void, env) diff --git a/target/loongarch/tcg/insn_trans/trans_atomic.c.inc b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc index 3d70d75941715..77eeedbc42b17 100644 --- a/target/loongarch/tcg/insn_trans/trans_atomic.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_atomic.c.inc @@ -74,38 +74,38 @@ TRANS(sc_w, ALL, gen_sc, MO_TESL) TRANS(ll_d, 64, gen_ll, MO_TEUQ) TRANS(sc_d, 64, gen_sc, MO_TEUQ) TRANS(amswap_w, LAM, gen_am, tcg_gen_atomic_xchg_tl, MO_TESL) -TRANS(amswap_d, LAM, gen_am, tcg_gen_atomic_xchg_tl, MO_TEUQ) +TRANS64(amswap_d, LAM, gen_am, tcg_gen_atomic_xchg_tl, MO_TEUQ) TRANS(amadd_w, LAM, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TESL) -TRANS(amadd_d, LAM, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TEUQ) +TRANS64(amadd_d, LAM, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TEUQ) TRANS(amand_w, LAM, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TESL) -TRANS(amand_d, LAM, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TEUQ) +TRANS64(amand_d, LAM, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TEUQ) TRANS(amor_w, LAM, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TESL) -TRANS(amor_d, LAM, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TEUQ) +TRANS64(amor_d, LAM, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TEUQ) TRANS(amxor_w, LAM, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TESL) -TRANS(amxor_d, LAM, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TEUQ) +TRANS64(amxor_d, LAM, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TEUQ) TRANS(ammax_w, LAM, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TESL) -TRANS(ammax_d, LAM, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TEUQ) +TRANS64(ammax_d, LAM, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TEUQ) TRANS(ammin_w, LAM, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TESL) -TRANS(ammin_d, LAM, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TEUQ) +TRANS64(ammin_d, LAM, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TEUQ) TRANS(ammax_wu, LAM, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TESL) -TRANS(ammax_du, LAM, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TEUQ) +TRANS64(ammax_du, LAM, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TEUQ) TRANS(ammin_wu, LAM, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TESL) -TRANS(ammin_du, LAM, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TEUQ) +TRANS64(ammin_du, LAM, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TEUQ) TRANS(amswap_db_w, LAM, gen_am, tcg_gen_atomic_xchg_tl, MO_TESL) -TRANS(amswap_db_d, LAM, gen_am, tcg_gen_atomic_xchg_tl, MO_TEUQ) +TRANS64(amswap_db_d, LAM, gen_am, tcg_gen_atomic_xchg_tl, MO_TEUQ) TRANS(amadd_db_w, LAM, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TESL) -TRANS(amadd_db_d, LAM, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TEUQ) +TRANS64(amadd_db_d, LAM, gen_am, tcg_gen_atomic_fetch_add_tl, MO_TEUQ) TRANS(amand_db_w, LAM, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TESL) -TRANS(amand_db_d, LAM, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TEUQ) +TRANS64(amand_db_d, LAM, gen_am, tcg_gen_atomic_fetch_and_tl, MO_TEUQ) TRANS(amor_db_w, LAM, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TESL) -TRANS(amor_db_d, LAM, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TEUQ) +TRANS64(amor_db_d, LAM, gen_am, tcg_gen_atomic_fetch_or_tl, MO_TEUQ) TRANS(amxor_db_w, LAM, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TESL) -TRANS(amxor_db_d, LAM, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TEUQ) +TRANS64(amxor_db_d, LAM, gen_am, tcg_gen_atomic_fetch_xor_tl, MO_TEUQ) TRANS(ammax_db_w, LAM, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TESL) -TRANS(ammax_db_d, LAM, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TEUQ) +TRANS64(ammax_db_d, LAM, gen_am, tcg_gen_atomic_fetch_smax_tl, MO_TEUQ) TRANS(ammin_db_w, LAM, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TESL) -TRANS(ammin_db_d, LAM, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TEUQ) +TRANS64(ammin_db_d, LAM, gen_am, tcg_gen_atomic_fetch_smin_tl, MO_TEUQ) TRANS(ammax_db_wu, LAM, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TESL) -TRANS(ammax_db_du, LAM, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TEUQ) +TRANS64(ammax_db_du, LAM, gen_am, tcg_gen_atomic_fetch_umax_tl, MO_TEUQ) TRANS(ammin_db_wu, LAM, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TESL) -TRANS(ammin_db_du, LAM, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TEUQ) +TRANS64(ammin_db_du, LAM, gen_am, tcg_gen_atomic_fetch_umin_tl, MO_TEUQ) diff --git a/target/loongarch/tcg/insn_trans/trans_extra.c.inc b/target/loongarch/tcg/insn_trans/trans_extra.c.inc index eda3d6e56111b..298a80cff55e0 100644 --- a/target/loongarch/tcg/insn_trans/trans_extra.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_extra.c.inc @@ -69,6 +69,10 @@ static bool trans_rdtimeh_w(DisasContext *ctx, arg_rdtimeh_w *a) static bool trans_rdtime_d(DisasContext *ctx, arg_rdtime_d *a) { + if (!avail_64(ctx)) { + return false; + } + return gen_rdtime(ctx, a, 0, 0); } @@ -100,8 +104,8 @@ static bool gen_crc(DisasContext *ctx, arg_rrr *a, TRANS(crc_w_b_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(1)) TRANS(crc_w_h_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(2)) TRANS(crc_w_w_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(4)) -TRANS(crc_w_d_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(8)) +TRANS64(crc_w_d_w, CRC, gen_crc, gen_helper_crc32, tcg_constant_tl(8)) TRANS(crcc_w_b_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(1)) TRANS(crcc_w_h_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(2)) TRANS(crcc_w_w_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(4)) -TRANS(crcc_w_d_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(8)) +TRANS64(crcc_w_d_w, CRC, gen_crc, gen_helper_crc32c, tcg_constant_tl(8)) diff --git a/target/loongarch/tcg/insn_trans/trans_farith.c.inc b/target/loongarch/tcg/insn_trans/trans_farith.c.inc index f4a0dea72701a..ff6cf3448e725 100644 --- a/target/loongarch/tcg/insn_trans/trans_farith.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_farith.c.inc @@ -183,16 +183,16 @@ TRANS(fmaxa_s, FP_SP, gen_fff, gen_helper_fmaxa_s) TRANS(fmaxa_d, FP_DP, gen_fff, gen_helper_fmaxa_d) TRANS(fmina_s, FP_SP, gen_fff, gen_helper_fmina_s) TRANS(fmina_d, FP_DP, gen_fff, gen_helper_fmina_d) -TRANS(fscaleb_s, FP_SP, gen_fff, gen_helper_fscaleb_s) -TRANS(fscaleb_d, FP_DP, gen_fff, gen_helper_fscaleb_d) +TRANS64(fscaleb_s, FP_SP, gen_fff, gen_helper_fscaleb_s) +TRANS64(fscaleb_d, FP_DP, gen_fff, gen_helper_fscaleb_d) TRANS(fsqrt_s, FP_SP, gen_ff, gen_helper_fsqrt_s) TRANS(fsqrt_d, FP_DP, gen_ff, gen_helper_fsqrt_d) TRANS(frecip_s, FP_SP, gen_ff, gen_helper_frecip_s) TRANS(frecip_d, FP_DP, gen_ff, gen_helper_frecip_d) TRANS(frsqrt_s, FP_SP, gen_ff, gen_helper_frsqrt_s) TRANS(frsqrt_d, FP_DP, gen_ff, gen_helper_frsqrt_d) -TRANS(flogb_s, FP_SP, gen_ff, gen_helper_flogb_s) -TRANS(flogb_d, FP_DP, gen_ff, gen_helper_flogb_d) +TRANS64(flogb_s, FP_SP, gen_ff, gen_helper_flogb_s) +TRANS64(flogb_d, FP_DP, gen_ff, gen_helper_flogb_d) TRANS(fclass_s, FP_SP, gen_ff, gen_helper_fclass_s) TRANS(fclass_d, FP_DP, gen_ff, gen_helper_fclass_d) TRANS(fmadd_s, FP_SP, gen_muladd, gen_helper_fmuladd_s, 0) diff --git a/target/loongarch/tcg/insn_trans/trans_fcnv.c.inc b/target/loongarch/tcg/insn_trans/trans_fcnv.c.inc index 833c059d6da31..ca1d76a3661c1 100644 --- a/target/loongarch/tcg/insn_trans/trans_fcnv.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_fcnv.c.inc @@ -29,5 +29,5 @@ TRANS(ffint_s_w, FP_SP, gen_ff, gen_helper_ffint_s_w) TRANS(ffint_s_l, FP_SP, gen_ff, gen_helper_ffint_s_l) TRANS(ffint_d_w, FP_DP, gen_ff, gen_helper_ffint_d_w) TRANS(ffint_d_l, FP_DP, gen_ff, gen_helper_ffint_d_l) -TRANS(frint_s, FP_SP, gen_ff, gen_helper_frint_s) -TRANS(frint_d, FP_DP, gen_ff, gen_helper_frint_d) +TRANS64(frint_s, FP_SP, gen_ff, gen_helper_frint_s) +TRANS64(frint_d, FP_DP, gen_ff, gen_helper_frint_d) diff --git a/target/loongarch/tcg/insn_trans/trans_fmemory.c.inc b/target/loongarch/tcg/insn_trans/trans_fmemory.c.inc index 13452bc7e56ae..79da4718a5606 100644 --- a/target/loongarch/tcg/insn_trans/trans_fmemory.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_fmemory.c.inc @@ -148,11 +148,11 @@ TRANS(fldx_s, FP_SP, gen_floadx, MO_TEUL) TRANS(fldx_d, FP_DP, gen_floadx, MO_TEUQ) TRANS(fstx_s, FP_SP, gen_fstorex, MO_TEUL) TRANS(fstx_d, FP_DP, gen_fstorex, MO_TEUQ) -TRANS(fldgt_s, FP_SP, gen_fload_gt, MO_TEUL) -TRANS(fldgt_d, FP_DP, gen_fload_gt, MO_TEUQ) -TRANS(fldle_s, FP_SP, gen_fload_le, MO_TEUL) -TRANS(fldle_d, FP_DP, gen_fload_le, MO_TEUQ) -TRANS(fstgt_s, FP_SP, gen_fstore_gt, MO_TEUL) -TRANS(fstgt_d, FP_DP, gen_fstore_gt, MO_TEUQ) -TRANS(fstle_s, FP_SP, gen_fstore_le, MO_TEUL) -TRANS(fstle_d, FP_DP, gen_fstore_le, MO_TEUQ) +TRANS64(fldgt_s, FP_SP, gen_fload_gt, MO_TEUL) +TRANS64(fldgt_d, FP_DP, gen_fload_gt, MO_TEUQ) +TRANS64(fldle_s, FP_SP, gen_fload_le, MO_TEUL) +TRANS64(fldle_d, FP_DP, gen_fload_le, MO_TEUQ) +TRANS64(fstgt_s, FP_SP, gen_fstore_gt, MO_TEUL) +TRANS64(fstgt_d, FP_DP, gen_fstore_gt, MO_TEUQ) +TRANS64(fstle_s, FP_SP, gen_fstore_le, MO_TEUL) +TRANS64(fstle_d, FP_DP, gen_fstore_le, MO_TEUQ) diff --git a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc index ecbfe23b6362b..64e53a44606e1 100644 --- a/target/loongarch/tcg/insn_trans/trans_privileged.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_privileged.c.inc @@ -83,6 +83,7 @@ void loongarch_csr_translate_init(void) SET_CSR_FUNC(TCFG, NULL, gen_helper_csrwr_tcfg); SET_CSR_FUNC(TVAL, gen_helper_csrrd_tval, NULL); SET_CSR_FUNC(TICLR, NULL, gen_helper_csrwr_ticlr); + SET_CSR_FUNC(MSGIR, gen_helper_csrrd_msgir, NULL); } #undef SET_CSR_FUNC @@ -233,11 +234,11 @@ static bool gen_iocsrwr(DisasContext *ctx, arg_rr *a, TRANS(iocsrrd_b, IOCSR, gen_iocsrrd, gen_helper_iocsrrd_b) TRANS(iocsrrd_h, IOCSR, gen_iocsrrd, gen_helper_iocsrrd_h) TRANS(iocsrrd_w, IOCSR, gen_iocsrrd, gen_helper_iocsrrd_w) -TRANS(iocsrrd_d, IOCSR, gen_iocsrrd, gen_helper_iocsrrd_d) +TRANS64(iocsrrd_d, IOCSR, gen_iocsrrd, gen_helper_iocsrrd_d) TRANS(iocsrwr_b, IOCSR, gen_iocsrwr, gen_helper_iocsrwr_b) TRANS(iocsrwr_h, IOCSR, gen_iocsrwr, gen_helper_iocsrwr_h) TRANS(iocsrwr_w, IOCSR, gen_iocsrwr, gen_helper_iocsrwr_w) -TRANS(iocsrwr_d, IOCSR, gen_iocsrwr, gen_helper_iocsrwr_d) +TRANS64(iocsrwr_d, IOCSR, gen_iocsrwr, gen_helper_iocsrwr_d) static void check_mmu_idx(DisasContext *ctx) { @@ -379,7 +380,7 @@ static bool trans_lddir(DisasContext *ctx, arg_lddir *a) if (check_plv(ctx)) { return false; } - gen_helper_lddir(dest, tcg_env, src, tcg_constant_tl(a->imm), mem_idx); + gen_helper_lddir(dest, tcg_env, src, tcg_constant_i32(a->imm), mem_idx); return true; } diff --git a/target/loongarch/tcg/insn_trans/trans_shift.c.inc b/target/loongarch/tcg/insn_trans/trans_shift.c.inc index 377307785aab4..136c4c845527f 100644 --- a/target/loongarch/tcg/insn_trans/trans_shift.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_shift.c.inc @@ -78,7 +78,7 @@ TRANS(sra_w, ALL, gen_rrr, EXT_SIGN, EXT_NONE, EXT_SIGN, gen_sra_w) TRANS(sll_d, 64, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_sll_d) TRANS(srl_d, 64, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_srl_d) TRANS(sra_d, 64, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_sra_d) -TRANS(rotr_w, 64, gen_rrr, EXT_ZERO, EXT_NONE, EXT_SIGN, gen_rotr_w) +TRANS(rotr_w, ALL, gen_rrr, EXT_ZERO, EXT_NONE, EXT_SIGN, gen_rotr_w) TRANS(rotr_d, 64, gen_rrr, EXT_NONE, EXT_NONE, EXT_NONE, gen_rotr_d) TRANS(slli_w, ALL, gen_rri_c, EXT_NONE, EXT_SIGN, tcg_gen_shli_tl) TRANS(slli_d, 64, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_shli_tl) @@ -86,5 +86,5 @@ TRANS(srli_w, ALL, gen_rri_c, EXT_ZERO, EXT_SIGN, tcg_gen_shri_tl) TRANS(srli_d, 64, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_shri_tl) TRANS(srai_w, ALL, gen_rri_c, EXT_NONE, EXT_NONE, gen_sari_w) TRANS(srai_d, 64, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_sari_tl) -TRANS(rotri_w, 64, gen_rri_v, EXT_NONE, EXT_NONE, gen_rotr_w) +TRANS(rotri_w, ALL, gen_rri_v, EXT_NONE, EXT_NONE, gen_rotr_w) TRANS(rotri_d, 64, gen_rri_c, EXT_NONE, EXT_NONE, tcg_gen_rotri_tl) diff --git a/target/loongarch/tcg/insn_trans/trans_vec.c.inc b/target/loongarch/tcg/insn_trans/trans_vec.c.inc index 78730029cbaad..38bccf28386fc 100644 --- a/target/loongarch/tcg/insn_trans/trans_vec.c.inc +++ b/target/loongarch/tcg/insn_trans/trans_vec.c.inc @@ -3585,7 +3585,9 @@ static bool gen_vldi(DisasContext *ctx, arg_vldi *a, uint32_t oprsz) int sel, vece; uint64_t value; - if (!check_valid_vldi_mode(a)) { + sel = (a->imm >> 12) & 0x1; + + if (sel && !check_valid_vldi_mode(a)) { generate_exception(ctx, EXCCODE_INE); return true; } @@ -3594,8 +3596,6 @@ static bool gen_vldi(DisasContext *ctx, arg_vldi *a, uint32_t oprsz) return true; } - sel = (a->imm >> 12) & 0x1; - if (sel) { value = vldi_get_value(ctx, a->imm); vece = MO_64; diff --git a/target/loongarch/tcg/meson.build b/target/loongarch/tcg/meson.build index bdf34f9673b68..b7adfe46f1dbf 100644 --- a/target/loongarch/tcg/meson.build +++ b/target/loongarch/tcg/meson.build @@ -7,6 +7,7 @@ loongarch_ss.add([zlib, gen]) loongarch_ss.add(files( 'fpu_helper.c', 'op_helper.c', + 'tcg_cpu.c', 'translate.c', 'vec_helper.c', )) diff --git a/target/loongarch/tcg/tcg_cpu.c b/target/loongarch/tcg/tcg_cpu.c new file mode 100644 index 0000000000000..82b54e6dc3cc3 --- /dev/null +++ b/target/loongarch/tcg/tcg_cpu.c @@ -0,0 +1,322 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * LoongArch CPU parameters for QEMU. + * + * Copyright (c) 2025 Loongson Technology Corporation Limited + */ +#include "qemu/osdep.h" +#include "qemu/accel.h" +#include "qemu/error-report.h" +#include "qemu/log.h" +#include "accel/accel-cpu-target.h" +#include "accel/tcg/cpu-ldst.h" +#include "accel/tcg/cpu-ops.h" +#include "exec/translation-block.h" +#include "exec/target_page.h" +#include "tcg_loongarch.h" +#include "internals.h" + +struct TypeExcp { + int32_t exccode; + const char * const name; +}; + +static const struct TypeExcp excp_names[] = { + {EXCCODE_INT, "Interrupt"}, + {EXCCODE_PIL, "Page invalid exception for load"}, + {EXCCODE_PIS, "Page invalid exception for store"}, + {EXCCODE_PIF, "Page invalid exception for fetch"}, + {EXCCODE_PME, "Page modified exception"}, + {EXCCODE_PNR, "Page Not Readable exception"}, + {EXCCODE_PNX, "Page Not Executable exception"}, + {EXCCODE_PPI, "Page Privilege error"}, + {EXCCODE_ADEF, "Address error for instruction fetch"}, + {EXCCODE_ADEM, "Address error for Memory access"}, + {EXCCODE_SYS, "Syscall"}, + {EXCCODE_BRK, "Break"}, + {EXCCODE_INE, "Instruction Non-Existent"}, + {EXCCODE_IPE, "Instruction privilege error"}, + {EXCCODE_FPD, "Floating Point Disabled"}, + {EXCCODE_FPE, "Floating Point Exception"}, + {EXCCODE_DBP, "Debug breakpoint"}, + {EXCCODE_BCE, "Bound Check Exception"}, + {EXCCODE_SXD, "128 bit vector instructions Disable exception"}, + {EXCCODE_ASXD, "256 bit vector instructions Disable exception"}, + {EXCP_HLT, "EXCP_HLT"}, +}; + +static const char *loongarch_exception_name(int32_t exception) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(excp_names); i++) { + if (excp_names[i].exccode == exception) { + return excp_names[i].name; + } + } + return "Unknown"; +} + +void G_NORETURN do_raise_exception(CPULoongArchState *env, + uint32_t exception, + uintptr_t pc) +{ + CPUState *cs = env_cpu(env); + + qemu_log_mask(CPU_LOG_INT, "%s: exception: %d (%s)\n", + __func__, + exception, + loongarch_exception_name(exception)); + cs->exception_index = exception; + + cpu_loop_exit_restore(cs, pc); +} + +#ifndef CONFIG_USER_ONLY +static void loongarch_cpu_do_interrupt(CPUState *cs) +{ + CPULoongArchState *env = cpu_env(cs); + bool update_badinstr = 1; + int cause = -1; + bool tlbfill = FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR); + uint32_t vec_size = FIELD_EX64(env->CSR_ECFG, CSR_ECFG, VS); + + if (cs->exception_index != EXCCODE_INT) { + qemu_log_mask(CPU_LOG_INT, + "%s enter: pc " TARGET_FMT_lx " ERA " TARGET_FMT_lx + " TLBRERA " TARGET_FMT_lx " exception: %d (%s)\n", + __func__, env->pc, env->CSR_ERA, env->CSR_TLBRERA, + cs->exception_index, + loongarch_exception_name(cs->exception_index)); + } + + switch (cs->exception_index) { + case EXCCODE_DBP: + env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DCL, 1); + env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, ECODE, 0xC); + goto set_DERA; + set_DERA: + env->CSR_DERA = env->pc; + env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DST, 1); + set_pc(env, env->CSR_EENTRY + 0x480); + break; + case EXCCODE_INT: + if (FIELD_EX64(env->CSR_DBG, CSR_DBG, DST)) { + env->CSR_DBG = FIELD_DP64(env->CSR_DBG, CSR_DBG, DEI, 1); + goto set_DERA; + } + QEMU_FALLTHROUGH; + case EXCCODE_PIF: + case EXCCODE_ADEF: + cause = cs->exception_index; + update_badinstr = 0; + break; + case EXCCODE_SYS: + case EXCCODE_BRK: + case EXCCODE_INE: + case EXCCODE_IPE: + case EXCCODE_FPD: + case EXCCODE_FPE: + case EXCCODE_SXD: + case EXCCODE_ASXD: + env->CSR_BADV = env->pc; + QEMU_FALLTHROUGH; + case EXCCODE_BCE: + case EXCCODE_ADEM: + case EXCCODE_PIL: + case EXCCODE_PIS: + case EXCCODE_PME: + case EXCCODE_PNR: + case EXCCODE_PNX: + case EXCCODE_PPI: + cause = cs->exception_index; + break; + default: + qemu_log("Error: exception(%d) has not been supported\n", + cs->exception_index); + abort(); + } + + if (update_badinstr) { + env->CSR_BADI = cpu_ldl_code(env, env->pc); + } + + /* Save PLV and IE */ + if (tlbfill) { + env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PPLV, + FIELD_EX64(env->CSR_CRMD, + CSR_CRMD, PLV)); + env->CSR_TLBRPRMD = FIELD_DP64(env->CSR_TLBRPRMD, CSR_TLBRPRMD, PIE, + FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); + /* set the DA mode */ + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, DA, 1); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PG, 0); + env->CSR_TLBRERA = FIELD_DP64(env->CSR_TLBRERA, CSR_TLBRERA, + PC, (env->pc >> 2)); + } else { + env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ECODE, + EXCODE_MCODE(cause)); + env->CSR_ESTAT = FIELD_DP64(env->CSR_ESTAT, CSR_ESTAT, ESUBCODE, + EXCODE_SUBCODE(cause)); + env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PPLV, + FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV)); + env->CSR_PRMD = FIELD_DP64(env->CSR_PRMD, CSR_PRMD, PIE, + FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE)); + env->CSR_ERA = env->pc; + } + + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, PLV, 0); + env->CSR_CRMD = FIELD_DP64(env->CSR_CRMD, CSR_CRMD, IE, 0); + + if (vec_size) { + vec_size = (1 << vec_size) * 4; + } + + if (cs->exception_index == EXCCODE_INT) { + /* Interrupt */ + uint32_t vector = 0; + uint32_t pending = FIELD_EX64(env->CSR_ESTAT, CSR_ESTAT, IS); + pending &= FIELD_EX64(env->CSR_ECFG, CSR_ECFG, LIE); + + /* Find the highest-priority interrupt. */ + vector = 31 - clz32(pending); + set_pc(env, env->CSR_EENTRY + \ + (EXCCODE_EXTERNAL_INT + vector) * vec_size); + qemu_log_mask(CPU_LOG_INT, + "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx + " cause %d\n" " A " TARGET_FMT_lx " D " + TARGET_FMT_lx " vector = %d ExC " TARGET_FMT_lx "ExS" + TARGET_FMT_lx "\n", + __func__, env->pc, env->CSR_ERA, + cause, env->CSR_BADV, env->CSR_DERA, vector, + env->CSR_ECFG, env->CSR_ESTAT); + } else { + if (tlbfill) { + set_pc(env, env->CSR_TLBRENTRY); + } else { + set_pc(env, env->CSR_EENTRY + EXCODE_MCODE(cause) * vec_size); + } + qemu_log_mask(CPU_LOG_INT, + "%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx + " cause %d%s\n, ESTAT " TARGET_FMT_lx + " EXCFG " TARGET_FMT_lx " BADVA " TARGET_FMT_lx + "BADI " TARGET_FMT_lx " SYS_NUM " TARGET_FMT_lu + " cpu %d asid " TARGET_FMT_lx "\n", __func__, env->pc, + tlbfill ? env->CSR_TLBRERA : env->CSR_ERA, + cause, tlbfill ? "(refill)" : "", env->CSR_ESTAT, + env->CSR_ECFG, + tlbfill ? env->CSR_TLBRBADV : env->CSR_BADV, + env->CSR_BADI, env->gpr[11], cs->cpu_index, + env->CSR_ASID); + } + cs->exception_index = -1; +} + +static void loongarch_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, + vaddr addr, unsigned size, + MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, + uintptr_t retaddr) +{ + CPULoongArchState *env = cpu_env(cs); + + if (access_type == MMU_INST_FETCH) { + do_raise_exception(env, EXCCODE_ADEF, retaddr); + } else { + do_raise_exception(env, EXCCODE_ADEM, retaddr); + } +} + +static inline bool cpu_loongarch_hw_interrupts_enabled(CPULoongArchState *env) +{ + bool ret = 0; + + ret = (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, IE) && + !(FIELD_EX64(env->CSR_DBG, CSR_DBG, DST))); + + return ret; +} + +static bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + CPULoongArchState *env = cpu_env(cs); + + if (cpu_loongarch_hw_interrupts_enabled(env) && + cpu_loongarch_hw_interrupts_pending(env)) { + /* Raise it */ + cs->exception_index = EXCCODE_INT; + loongarch_cpu_do_interrupt(cs); + return true; + } + } + return false; +} + +static vaddr loongarch_pointer_wrap(CPUState *cs, int mmu_idx, + vaddr result, vaddr base) +{ + return is_va32(cpu_env(cs)) ? (uint32_t)result : result; +} +#endif + +static TCGTBCPUState loongarch_get_tb_cpu_state(CPUState *cs) +{ + CPULoongArchState *env = cpu_env(cs); + uint32_t flags; + + flags = env->CSR_CRMD & (R_CSR_CRMD_PLV_MASK | R_CSR_CRMD_PG_MASK); + flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, FPE) * HW_FLAGS_EUEN_FPE; + flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, SXE) * HW_FLAGS_EUEN_SXE; + flags |= FIELD_EX64(env->CSR_EUEN, CSR_EUEN, ASXE) * HW_FLAGS_EUEN_ASXE; + flags |= is_va32(env) * HW_FLAGS_VA32; + + return (TCGTBCPUState){ .pc = env->pc, .flags = flags }; +} + +static void loongarch_cpu_synchronize_from_tb(CPUState *cs, + const TranslationBlock *tb) +{ + tcg_debug_assert(!tcg_cflags_has(cs, CF_PCREL)); + set_pc(cpu_env(cs), tb->pc); +} + +static void loongarch_restore_state_to_opc(CPUState *cs, + const TranslationBlock *tb, + const uint64_t *data) +{ + set_pc(cpu_env(cs), data[0]); +} + +static int loongarch_cpu_mmu_index(CPUState *cs, bool ifetch) +{ + CPULoongArchState *env = cpu_env(cs); + + if (FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PG)) { + return FIELD_EX64(env->CSR_CRMD, CSR_CRMD, PLV); + } + return MMU_DA_IDX; +} + +const TCGCPUOps loongarch_tcg_ops = { + .guest_default_memory_order = 0, + .mttcg_supported = true, + + .initialize = loongarch_translate_init, + .translate_code = loongarch_translate_code, + .get_tb_cpu_state = loongarch_get_tb_cpu_state, + .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, + .restore_state_to_opc = loongarch_restore_state_to_opc, + .mmu_index = loongarch_cpu_mmu_index, + +#ifndef CONFIG_USER_ONLY + .tlb_fill = loongarch_cpu_tlb_fill, + .pointer_wrap = loongarch_pointer_wrap, + .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, + .cpu_exec_halt = loongarch_cpu_has_work, + .cpu_exec_reset = cpu_reset, + .do_interrupt = loongarch_cpu_do_interrupt, + .do_transaction_failed = loongarch_cpu_do_transaction_failed, +#endif +}; diff --git a/target/loongarch/tcg/tcg_loongarch.h b/target/loongarch/tcg/tcg_loongarch.h index fd4e1160229ed..7fb627f2d6303 100644 --- a/target/loongarch/tcg/tcg_loongarch.h +++ b/target/loongarch/tcg/tcg_loongarch.h @@ -7,15 +7,17 @@ #ifndef TARGET_LOONGARCH_TCG_LOONGARCH_H #define TARGET_LOONGARCH_TCG_LOONGARCH_H #include "cpu.h" +#include "cpu-mmu.h" +extern const TCGCPUOps loongarch_tcg_ops; void loongarch_csr_translate_init(void); bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); -int loongarch_get_addr_from_tlb(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx); +TLBRet loongarch_get_addr_from_tlb(CPULoongArchState *env, + MMUContext *context, + MMUAccessType access_type, int mmu_idx); #endif /* TARGET_LOONGARCH_TCG_LOONGARCH_H */ diff --git a/target/loongarch/tcg/tlb_helper.c b/target/loongarch/tcg/tlb_helper.c index 8872593ff0105..e119f78d92585 100644 --- a/target/loongarch/tcg/tlb_helper.c +++ b/target/loongarch/tcg/tlb_helper.c @@ -10,6 +10,7 @@ #include "qemu/guest-random.h" #include "cpu.h" +#include "cpu-mmu.h" #include "internals.h" #include "exec/helper-proto.h" #include "exec/cputlb.h" @@ -20,6 +21,18 @@ #include "cpu-csr.h" #include "tcg/tcg_loongarch.h" +typedef bool (*tlb_match)(bool global, int asid, int tlb_asid); + +static bool tlb_match_any(bool global, int asid, int tlb_asid) +{ + return global || tlb_asid == asid; +} + +static bool tlb_match_asid(bool global, int asid, int tlb_asid) +{ + return !global && tlb_asid == asid; +} + bool check_ps(CPULoongArchState *env, uint8_t tlb_ps) { if (tlb_ps >= 64) { @@ -28,8 +41,8 @@ bool check_ps(CPULoongArchState *env, uint8_t tlb_ps) return BIT_ULL(tlb_ps) & (env->CSR_PRCFG2); } -static void raise_mmu_exception(CPULoongArchState *env, target_ulong address, - MMUAccessType access_type, int tlb_error) +static void raise_mmu_exception(CPULoongArchState *env, vaddr address, + MMUAccessType access_type, TLBRet tlb_error) { CPUState *cs = env_cpu(env); @@ -100,34 +113,25 @@ static void invalidate_tlb_entry(CPULoongArchState *env, int index) target_ulong addr, mask, pagesize; uint8_t tlb_ps; LoongArchTLB *tlb = &env->tlb[index]; - - int mmu_idx = cpu_mmu_index(env_cpu(env), false); + int idxmap = BIT(MMU_KERNEL_IDX) | BIT(MMU_USER_IDX); uint8_t tlb_v0 = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, V); uint8_t tlb_v1 = FIELD_EX64(tlb->tlb_entry1, TLBENTRY, V); uint64_t tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); - uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); - if (!tlb_e) { - return; - } - if (index >= LOONGARCH_STLB) { - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - } else { - tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - } + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); pagesize = MAKE_64BIT_MASK(tlb_ps, 1); mask = MAKE_64BIT_MASK(0, tlb_ps + 1); + addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; + addr = sextract64(addr, 0, TARGET_VIRT_ADDR_SPACE_BITS); if (tlb_v0) { - addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & ~mask; /* even */ tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, - mmu_idx, TARGET_LONG_BITS); + idxmap, TARGET_LONG_BITS); } if (tlb_v1) { - addr = (tlb_vppn << R_TLB_MISC_VPPN_SHIFT) & pagesize; /* odd */ - tlb_flush_range_by_mmuidx(env_cpu(env), addr, pagesize, - mmu_idx, TARGET_LONG_BITS); + tlb_flush_range_by_mmuidx(env_cpu(env), addr + pagesize, pagesize, + idxmap, TARGET_LONG_BITS); } } @@ -135,20 +139,27 @@ static void invalidate_tlb(CPULoongArchState *env, int index) { LoongArchTLB *tlb; uint16_t csr_asid, tlb_asid, tlb_g; + uint8_t tlb_e; csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); tlb = &env->tlb[index]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (!tlb_e) { + return; + } + + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + /* QEMU TLB is flushed when asid is changed */ if (tlb_g == 0 && tlb_asid != csr_asid) { return; } invalidate_tlb_entry(env, index); } -static void fill_tlb_entry(CPULoongArchState *env, int index) +static void fill_tlb_entry(CPULoongArchState *env, LoongArchTLB *tlb) { - LoongArchTLB *tlb = &env->tlb[index]; uint64_t lo0, lo1, csr_vppn; uint16_t csr_asid; uint8_t csr_ps; @@ -173,11 +184,8 @@ static void fill_tlb_entry(CPULoongArchState *env, int index) lo1 = env->CSR_TLBELO1; } - /* Only MTLB has the ps fields */ - if (index >= LOONGARCH_STLB) { - tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); - } - + /* Store page size in field PS */ + tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, PS, csr_ps); tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, VPPN, csr_vppn); tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 1); csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); @@ -203,17 +211,18 @@ static uint32_t get_random_tlb(uint32_t low, uint32_t high) * field in tlb entry contains bit[47:13], so need adjust. * virt_vpn = vaddr[47:13] */ -static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, - int *index) +static LoongArchTLB *loongarch_tlb_search_cb(CPULoongArchState *env, + vaddr vaddr, int csr_asid, + tlb_match func) { LoongArchTLB *tlb; - uint16_t csr_asid, tlb_asid, stlb_idx; - uint8_t tlb_e, tlb_ps, tlb_g, stlb_ps; + uint16_t tlb_asid, stlb_idx; + uint8_t tlb_e, tlb_ps, stlb_ps; + bool tlb_g; int i, compare_shift; uint64_t vpn, tlb_vppn; - csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); - stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); vpn = (vaddr & TARGET_VIRT_MASK) >> (stlb_ps + 1); stlb_idx = vpn & 0xff; /* VA[25:15] <==> TLBIDX.index for 16KiB Page */ compare_shift = stlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; @@ -225,12 +234,11 @@ static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, if (tlb_e) { tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); - tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + tlb_g = !!FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); - if ((tlb_g == 1 || tlb_asid == csr_asid) && + if (func(tlb_g, csr_asid, tlb_asid) && (vpn == (tlb_vppn >> compare_shift))) { - *index = i * 256 + stlb_idx; - return true; + return tlb; } } } @@ -246,13 +254,30 @@ static bool loongarch_tlb_search(CPULoongArchState *env, target_ulong vaddr, tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; vpn = (vaddr & TARGET_VIRT_MASK) >> (tlb_ps + 1); - if ((tlb_g == 1 || tlb_asid == csr_asid) && + if (func(tlb_g, csr_asid, tlb_asid) && (vpn == (tlb_vppn >> compare_shift))) { - *index = i; - return true; + return tlb; } } } + return NULL; +} + +static bool loongarch_tlb_search(CPULoongArchState *env, vaddr vaddr, + int *index) +{ + int csr_asid; + tlb_match func; + LoongArchTLB *tlb; + + func = tlb_match_any; + csr_asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); + tlb = loongarch_tlb_search_cb(env, vaddr, csr_asid, func); + if (tlb) { + *index = tlb - env->tlb; + return true; + } + return false; } @@ -283,12 +308,7 @@ void helper_tlbrd(CPULoongArchState *env) index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); tlb = &env->tlb[index]; - - if (index >= LOONGARCH_STLB) { - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - } else { - tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - } + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); if (!tlb_e) { @@ -314,23 +334,44 @@ void helper_tlbrd(CPULoongArchState *env) void helper_tlbwr(CPULoongArchState *env) { int index = FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, INDEX); + LoongArchTLB *old, new = {}; + bool skip_inv = false; + uint8_t tlb_v0, tlb_v1; - invalidate_tlb(env, index); - + old = env->tlb + index; if (FIELD_EX64(env->CSR_TLBIDX, CSR_TLBIDX, NE)) { - env->tlb[index].tlb_misc = FIELD_DP64(env->tlb[index].tlb_misc, - TLB_MISC, E, 0); + invalidate_tlb(env, index); return; } - fill_tlb_entry(env, index); + fill_tlb_entry(env, &new); + /* Check whether ASID/VPPN is the same */ + if (old->tlb_misc == new.tlb_misc) { + /* Check whether both even/odd pages is the same or invalid */ + tlb_v0 = FIELD_EX64(old->tlb_entry0, TLBENTRY, V); + tlb_v1 = FIELD_EX64(old->tlb_entry1, TLBENTRY, V); + if ((!tlb_v0 || new.tlb_entry0 == old->tlb_entry0) && + (!tlb_v1 || new.tlb_entry1 == old->tlb_entry1)) { + skip_inv = true; + } + } + + /* flush tlb before updating the entry */ + if (!skip_inv) { + invalidate_tlb(env, index); + } + + *old = new; } void helper_tlbfill(CPULoongArchState *env) { uint64_t address, entryhi; - int index, set, stlb_idx; + int index, set, i, stlb_idx; uint16_t pagesize, stlb_ps; + uint16_t asid, tlb_asid; + LoongArchTLB *tlb; + uint8_t tlb_e, tlb_g; if (FIELD_EX64(env->CSR_TLBRERA, CSR_TLBRERA, ISTLBR)) { entryhi = env->CSR_TLBREHI; @@ -344,24 +385,58 @@ void helper_tlbfill(CPULoongArchState *env) /* Validity of stlb_ps is checked in helper_csrwr_stlbps() */ stlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); + asid = FIELD_EX64(env->CSR_ASID, CSR_ASID, ASID); if (pagesize == stlb_ps) { /* Only write into STLB bits [47:13] */ address = entryhi & ~MAKE_64BIT_MASK(0, R_CSR_TLBEHI_64_VPPN_SHIFT); - - /* Choose one set ramdomly */ - set = get_random_tlb(0, 7); - - /* Index in one set */ + set = -1; stlb_idx = (address >> (stlb_ps + 1)) & 0xff; /* [0,255] */ + for (i = 0; i < 8; ++i) { + tlb = &env->tlb[i * 256 + stlb_idx]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + if (!tlb_e) { + set = i; + break; + } + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + if (tlb_g == 0 && asid != tlb_asid) { + set = i; + } + } + + /* Choose one set randomly */ + if (set < 0) { + set = get_random_tlb(0, 7); + } index = set * 256 + stlb_idx; } else { /* Only write into MTLB */ - index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); + index = -1; + for (i = LOONGARCH_STLB; i < LOONGARCH_TLB_MAX; i++) { + tlb = &env->tlb[i]; + tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); + + if (!tlb_e) { + index = i; + break; + } + + tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); + tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); + if (tlb_g == 0 && asid != tlb_asid) { + index = i; + } + } + + if (index < 0) { + index = get_random_tlb(LOONGARCH_STLB, LOONGARCH_TLB_MAX - 1); + } } invalidate_tlb(env, index); - fill_tlb_entry(env, index); + fill_tlb_entry(env, env->tlb + index); } void helper_tlbclr(CPULoongArchState *env) @@ -463,67 +538,29 @@ void helper_invtlb_all_asid(CPULoongArchState *env, target_ulong info) void helper_invtlb_page_asid(CPULoongArchState *env, target_ulong info, target_ulong addr) { - uint16_t asid = info & 0x3ff; - - for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { - LoongArchTLB *tlb = &env->tlb[i]; - uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); - uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); - uint64_t vpn, tlb_vppn; - uint8_t tlb_ps, compare_shift; - uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); - - if (!tlb_e) { - continue; - } - if (i >= LOONGARCH_STLB) { - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - } else { - tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - } - tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); - vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); - compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + int asid = info & 0x3ff; + LoongArchTLB *tlb; + tlb_match func; - if (!tlb_g && (tlb_asid == asid) && - (vpn == (tlb_vppn >> compare_shift))) { - tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); - } + func = tlb_match_asid; + tlb = loongarch_tlb_search_cb(env, addr, asid, func); + if (tlb) { + invalidate_tlb(env, tlb - env->tlb); } - tlb_flush(env_cpu(env)); } void helper_invtlb_page_asid_or_g(CPULoongArchState *env, target_ulong info, target_ulong addr) { - uint16_t asid = info & 0x3ff; - - for (int i = 0; i < LOONGARCH_TLB_MAX; i++) { - LoongArchTLB *tlb = &env->tlb[i]; - uint8_t tlb_g = FIELD_EX64(tlb->tlb_entry0, TLBENTRY, G); - uint16_t tlb_asid = FIELD_EX64(tlb->tlb_misc, TLB_MISC, ASID); - uint64_t vpn, tlb_vppn; - uint8_t tlb_ps, compare_shift; - uint8_t tlb_e = FIELD_EX64(tlb->tlb_misc, TLB_MISC, E); - - if (!tlb_e) { - continue; - } - if (i >= LOONGARCH_STLB) { - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - } else { - tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - } - tlb_vppn = FIELD_EX64(tlb->tlb_misc, TLB_MISC, VPPN); - vpn = (addr & TARGET_VIRT_MASK) >> (tlb_ps + 1); - compare_shift = tlb_ps + 1 - R_TLB_MISC_VPPN_SHIFT; + int asid = info & 0x3ff; + LoongArchTLB *tlb; + tlb_match func; - if ((tlb_g || (tlb_asid == asid)) && - (vpn == (tlb_vppn >> compare_shift))) { - tlb->tlb_misc = FIELD_DP64(tlb->tlb_misc, TLB_MISC, E, 0); - } + func = tlb_match_any; + tlb = loongarch_tlb_search_cb(env, addr, asid, func); + if (tlb) { + invalidate_tlb(env, tlb - env->tlb); } - tlb_flush(env_cpu(env)); } bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, @@ -533,13 +570,15 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, CPULoongArchState *env = cpu_env(cs); hwaddr physical; int prot; - int ret; + MMUContext context; + TLBRet ret; /* Data access */ - ret = get_physical_address(env, &physical, &prot, address, - access_type, mmu_idx, 0); - + context.addr = address; + ret = get_physical_address(env, &context, access_type, mmu_idx, 0); if (ret == TLBRET_MATCH) { + physical = context.physical; + prot = context.prot; tlb_set_page(cs, address & TARGET_PAGE_MASK, physical & TARGET_PAGE_MASK, prot, mmu_idx, TARGET_PAGE_SIZE); @@ -560,7 +599,7 @@ bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, - target_ulong level, uint32_t mem_idx) + uint32_t level, uint32_t mem_idx) { CPUState *cs = env_cpu(env); target_ulong badvaddr, index, phys; @@ -568,7 +607,7 @@ target_ulong helper_lddir(CPULoongArchState *env, target_ulong base, if (unlikely((level == 0) || (level > 4))) { qemu_log_mask(LOG_GUEST_ERROR, - "Attepted LDDIR with level %"PRId64"\n", level); + "Attepted LDDIR with level %u\n", level); return base; } @@ -664,85 +703,31 @@ void helper_ldpte(CPULoongArchState *env, target_ulong base, target_ulong odd, env->CSR_TLBREHI = FIELD_DP64(env->CSR_TLBREHI, CSR_TLBREHI, PS, ps); } -static int loongarch_map_tlb_entry(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - int access_type, int index, int mmu_idx) +static TLBRet loongarch_map_tlb_entry(CPULoongArchState *env, + MMUContext *context, + MMUAccessType access_type, int index, + int mmu_idx) { LoongArchTLB *tlb = &env->tlb[index]; - uint64_t plv = mmu_idx; - uint64_t tlb_entry, tlb_ppn; - uint8_t tlb_ps, n, tlb_v, tlb_d, tlb_plv, tlb_nx, tlb_nr, tlb_rplv; - - if (index >= LOONGARCH_STLB) { - tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); - } else { - tlb_ps = FIELD_EX64(env->CSR_STLBPS, CSR_STLBPS, PS); - } - n = (address >> tlb_ps) & 0x1;/* Odd or even */ - - tlb_entry = n ? tlb->tlb_entry1 : tlb->tlb_entry0; - tlb_v = FIELD_EX64(tlb_entry, TLBENTRY, V); - tlb_d = FIELD_EX64(tlb_entry, TLBENTRY, D); - tlb_plv = FIELD_EX64(tlb_entry, TLBENTRY, PLV); - if (is_la64(env)) { - tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_64, PPN); - tlb_nx = FIELD_EX64(tlb_entry, TLBENTRY_64, NX); - tlb_nr = FIELD_EX64(tlb_entry, TLBENTRY_64, NR); - tlb_rplv = FIELD_EX64(tlb_entry, TLBENTRY_64, RPLV); - } else { - tlb_ppn = FIELD_EX64(tlb_entry, TLBENTRY_32, PPN); - tlb_nx = 0; - tlb_nr = 0; - tlb_rplv = 0; - } - - /* Remove sw bit between bit12 -- bit PS*/ - tlb_ppn = tlb_ppn & ~(((0x1UL << (tlb_ps - 12)) - 1)); - - /* Check access rights */ - if (!tlb_v) { - return TLBRET_INVALID; - } - - if (access_type == MMU_INST_FETCH && tlb_nx) { - return TLBRET_XI; - } - - if (access_type == MMU_DATA_LOAD && tlb_nr) { - return TLBRET_RI; - } - - if (((tlb_rplv == 0) && (plv > tlb_plv)) || - ((tlb_rplv == 1) && (plv != tlb_plv))) { - return TLBRET_PE; - } + uint8_t tlb_ps, n; - if ((access_type == MMU_DATA_STORE) && !tlb_d) { - return TLBRET_DIRTY; - } - - *physical = (tlb_ppn << R_TLBENTRY_64_PPN_SHIFT) | - (address & MAKE_64BIT_MASK(0, tlb_ps)); - *prot = PAGE_READ; - if (tlb_d) { - *prot |= PAGE_WRITE; - } - if (!tlb_nx) { - *prot |= PAGE_EXEC; - } - return TLBRET_MATCH; + tlb_ps = FIELD_EX64(tlb->tlb_misc, TLB_MISC, PS); + n = (context->addr >> tlb_ps) & 0x1;/* Odd or even */ + context->pte = n ? tlb->tlb_entry1 : tlb->tlb_entry0; + context->ps = tlb_ps; + return loongarch_check_pte(env, context, access_type, mmu_idx); } -int loongarch_get_addr_from_tlb(CPULoongArchState *env, hwaddr *physical, - int *prot, target_ulong address, - MMUAccessType access_type, int mmu_idx) +TLBRet loongarch_get_addr_from_tlb(CPULoongArchState *env, + MMUContext *context, + MMUAccessType access_type, int mmu_idx) { int index, match; - match = loongarch_tlb_search(env, address, &index); + match = loongarch_tlb_search(env, context->addr, &index); if (match) { - return loongarch_map_tlb_entry(env, physical, prot, - address, access_type, index, mmu_idx); + return loongarch_map_tlb_entry(env, context, access_type, index, + mmu_idx); } return TLBRET_NOMATCH; diff --git a/target/loongarch/tcg/translate.c b/target/loongarch/tcg/translate.c index 53a0b4c3ce91e..055f6fb6046b9 100644 --- a/target/loongarch/tcg/translate.c +++ b/target/loongarch/tcg/translate.c @@ -99,16 +99,16 @@ void generate_exception(DisasContext *ctx, int excp) ctx->base.is_jmp = DISAS_NORETURN; } -static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, vaddr dest) { if (ctx->va32) { dest = (uint32_t) dest; } if (translator_use_goto_tb(&ctx->base, dest)) { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); tcg_gen_movi_tl(cpu_pc, dest); - tcg_gen_exit_tb(ctx->base.tb, n); + tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx); } else { tcg_gen_movi_tl(cpu_pc, dest); tcg_gen_lookup_and_goto_ptr(); diff --git a/target/loongarch/translate.h b/target/loongarch/translate.h index 018dc5eb171bb..bbe015ba579f5 100644 --- a/target/loongarch/translate.h +++ b/target/loongarch/translate.h @@ -14,6 +14,10 @@ static bool trans_##NAME(DisasContext *ctx, arg_##NAME * a) \ { return avail_##AVAIL(ctx) && FUNC(ctx, a, __VA_ARGS__); } +#define TRANS64(NAME, AVAIL, FUNC, ...) \ + static bool trans_##NAME(DisasContext *ctx, arg_##NAME * a) \ + { return avail_64(ctx) && avail_##AVAIL(ctx) && FUNC(ctx, a, __VA_ARGS__); } + #define avail_ALL(C) true #define avail_64(C) (FIELD_EX32((C)->cpucfg1, CPUCFG1, ARCH) == \ CPUCFG1_ARCH_LA64) diff --git a/target/m68k/cpu.c b/target/m68k/cpu.c index 6a09db3a6f6df..f1b673119d6b4 100644 --- a/target/m68k/cpu.c +++ b/target/m68k/cpu.c @@ -74,7 +74,7 @@ static void m68k_restore_state_to_opc(CPUState *cs, #ifndef CONFIG_USER_ONLY static bool m68k_cpu_has_work(CPUState *cs) { - return cs->interrupt_request & CPU_INTERRUPT_HARD; + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD); } #endif /* !CONFIG_USER_ONLY */ diff --git a/target/m68k/translate.c b/target/m68k/translate.c index 97afceb129701..eb1ba150745e9 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -44,9 +44,6 @@ #undef DEFO32 #undef DEFO64 -static TCGv_i32 cpu_halted; -static TCGv_i32 cpu_exception_index; - static char cpu_reg_names[2 * 8 * 3 + 5 * 4]; static TCGv cpu_dregs[8]; static TCGv cpu_aregs[8]; @@ -78,14 +75,6 @@ void m68k_tcg_init(void) #undef DEFO32 #undef DEFO64 - cpu_halted = tcg_global_mem_new_i32(tcg_env, - -offsetof(M68kCPU, env) + - offsetof(CPUState, halted), "HALTED"); - cpu_exception_index = tcg_global_mem_new_i32(tcg_env, - -offsetof(M68kCPU, env) + - offsetof(CPUState, exception_index), - "EXCEPTION"); - p = cpu_reg_names; for (i = 0; i < 8; i++) { sprintf(p, "D%d", i); @@ -4512,7 +4501,8 @@ DISAS_INSN(halt) gen_exception(s, s->pc, EXCP_SEMIHOSTING); return; } - tcg_gen_movi_i32(cpu_halted, 1); + tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, + offsetof(CPUState, halted) - offsetof(M68kCPU, env)); gen_exception(s, s->pc, EXCP_HLT); } @@ -4528,7 +4518,8 @@ DISAS_INSN(stop) ext = read_im16(env, s); gen_set_sr_im(s, ext, 0); - tcg_gen_movi_i32(cpu_halted, 1); + tcg_gen_st_i32(tcg_constant_i32(1), tcg_env, + offsetof(CPUState, halted) - offsetof(M68kCPU, env)); gen_exception(s, s->pc, EXCP_HLT); } diff --git a/target/microblaze/cpu.c b/target/microblaze/cpu.c index ee0a869a94aac..22231f09e602a 100644 --- a/target/microblaze/cpu.c +++ b/target/microblaze/cpu.c @@ -129,7 +129,7 @@ static void mb_restore_state_to_opc(CPUState *cs, #ifndef CONFIG_USER_ONLY static bool mb_cpu_has_work(CPUState *cs) { - return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_NMI); } #endif /* !CONFIG_USER_ONLY */ diff --git a/target/microblaze/cpu.h b/target/microblaze/cpu.h index 3ce28b302fe55..14b107876a432 100644 --- a/target/microblaze/cpu.h +++ b/target/microblaze/cpu.h @@ -259,7 +259,7 @@ struct CPUArchState { /* lwx/swx reserved address */ #define RES_ADDR_NONE 0xffffffff /* Use 0xffffffff to indicate no reservation */ - target_ulong res_addr; + uint32_t res_addr; uint32_t res_val; /* Internal flags. */ diff --git a/target/microblaze/helper.c b/target/microblaze/helper.c index ef0e2f973fa79..cf577a722689e 100644 --- a/target/microblaze/helper.c +++ b/target/microblaze/helper.c @@ -274,7 +274,8 @@ hwaddr mb_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, MemTxAttrs *attrs) { MicroBlazeCPU *cpu = MICROBLAZE_CPU(cs); - target_ulong vaddr, paddr = 0; + vaddr vaddr; + hwaddr paddr = 0; MicroBlazeMMULookup lu; int mmu_idx = cpu_mmu_index(cs, false); unsigned int hit; diff --git a/target/microblaze/helper.h b/target/microblaze/helper.h index ef4fad9b91eb6..01eba592b2604 100644 --- a/target/microblaze/helper.h +++ b/target/microblaze/helper.h @@ -20,7 +20,7 @@ DEF_HELPER_FLAGS_3(fcmp_ne, TCG_CALL_NO_WG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(fcmp_ge, TCG_CALL_NO_WG, i32, env, i32, i32) DEF_HELPER_FLAGS_2(pcmpbf, TCG_CALL_NO_RWG_SE, i32, i32, i32) -DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, tl) +DEF_HELPER_FLAGS_2(stackprot, TCG_CALL_NO_WG, void, env, i32) DEF_HELPER_FLAGS_2(get, TCG_CALL_NO_RWG, i32, i32, i32) DEF_HELPER_FLAGS_3(put, TCG_CALL_NO_RWG, void, i32, i32, i32) diff --git a/target/microblaze/machine.c b/target/microblaze/machine.c index a4cf38dc89177..48efa546d3913 100644 --- a/target/microblaze/machine.c +++ b/target/microblaze/machine.c @@ -78,7 +78,7 @@ static const VMStateField vmstate_env_fields[] = { VMSTATE_UINT32(iflags, CPUMBState), VMSTATE_UINT32(res_val, CPUMBState), - VMSTATE_UINTTL(res_addr, CPUMBState), + VMSTATE_UINT32(res_addr, CPUMBState), VMSTATE_STRUCT(mmu, CPUMBState, 0, vmstate_mmu, MicroBlazeMMU), @@ -87,8 +87,8 @@ static const VMStateField vmstate_env_fields[] = { static const VMStateDescription vmstate_env = { .name = "env", - .version_id = 0, - .minimum_version_id = 0, + .version_id = 1, + .minimum_version_id = 1, .fields = vmstate_env_fields, }; diff --git a/target/microblaze/mmu.c b/target/microblaze/mmu.c index 8703ff5c657ed..db24cb399cec4 100644 --- a/target/microblaze/mmu.c +++ b/target/microblaze/mmu.c @@ -78,7 +78,7 @@ static void mmu_change_pid(CPUMBState *env, unsigned int newpid) /* rw - 0 = read, 1 = write, 2 = fetch. */ unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu, - target_ulong vaddr, MMUAccessType rw, int mmu_idx) + vaddr vaddr, MMUAccessType rw, int mmu_idx) { MicroBlazeMMU *mmu = &cpu->env.mmu; unsigned int i, hit = 0; @@ -172,7 +172,7 @@ unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu, } done: qemu_log_mask(CPU_LOG_MMU, - "MMU vaddr=0x" TARGET_FMT_lx + "MMU vaddr=0x%" VADDR_PRIx " rw=%d tlb_wr=%d tlb_ex=%d hit=%d\n", vaddr, rw, tlb_wr, tlb_ex, hit); return hit; diff --git a/target/microblaze/mmu.h b/target/microblaze/mmu.h index 1068bd2d52b77..2aca39c923b81 100644 --- a/target/microblaze/mmu.h +++ b/target/microblaze/mmu.h @@ -86,7 +86,7 @@ typedef struct { } MicroBlazeMMULookup; unsigned int mmu_translate(MicroBlazeCPU *cpu, MicroBlazeMMULookup *lu, - target_ulong vaddr, MMUAccessType rw, int mmu_idx); + vaddr vaddr, MMUAccessType rw, int mmu_idx); uint32_t mmu_read(CPUMBState *env, bool ea, uint32_t rn); void mmu_write(CPUMBState *env, bool ea, uint32_t rn, uint32_t v); void mmu_init(MicroBlazeMMU *mmu); diff --git a/target/microblaze/op_helper.c b/target/microblaze/op_helper.c index b8365b3b1d289..df93c4229d69b 100644 --- a/target/microblaze/op_helper.c +++ b/target/microblaze/op_helper.c @@ -365,13 +365,13 @@ uint32_t helper_pcmpbf(uint32_t a, uint32_t b) return 0; } -void helper_stackprot(CPUMBState *env, target_ulong addr) +void helper_stackprot(CPUMBState *env, uint32_t addr) { if (addr < env->slr || addr > env->shr) { CPUState *cs = env_cpu(env); qemu_log_mask(CPU_LOG_INT, "Stack protector violation at " - TARGET_FMT_lx " %x %x\n", + "0x%x 0x%x 0x%x\n", addr, env->slr, env->shr); env->ear = addr; diff --git a/target/microblaze/translate.c b/target/microblaze/translate.c index 5098a1db4dc7d..6442a250c5dd0 100644 --- a/target/microblaze/translate.c +++ b/target/microblaze/translate.c @@ -55,7 +55,7 @@ static TCGv_i32 cpu_imm; static TCGv_i32 cpu_bvalue; static TCGv_i32 cpu_btarget; static TCGv_i32 cpu_iflags; -static TCGv cpu_res_addr; +static TCGv_i32 cpu_res_addr; static TCGv_i32 cpu_res_val; /* This is the state at translation time. */ @@ -116,12 +116,12 @@ static void gen_raise_hw_excp(DisasContext *dc, uint32_t esr_ec) gen_raise_exception_sync(dc, EXCP_HW_EXCP); } -static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) +static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest) { if (translator_use_goto_tb(&dc->base, dest)) { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); tcg_gen_movi_i32(cpu_pc, dest); - tcg_gen_exit_tb(dc->base.tb, n); + tcg_gen_exit_tb(dc->base.tb, tb_slot_idx); } else { tcg_gen_movi_i32(cpu_pc, dest); tcg_gen_lookup_and_goto_ptr(); @@ -604,9 +604,9 @@ static bool trans_wdic(DisasContext *dc, arg_wdic *a) DO_TYPEA(xor, false, tcg_gen_xor_i32) DO_TYPEBI(xori, false, tcg_gen_xori_i32) -static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb) +static TCGv_i32 compute_ldst_addr_typea(DisasContext *dc, int ra, int rb) { - TCGv ret; + TCGv_i32 ret; /* If any of the regs is r0, set t to the value of the other reg. */ if (ra && rb) { @@ -626,9 +626,9 @@ static TCGv compute_ldst_addr_typea(DisasContext *dc, int ra, int rb) return ret; } -static TCGv compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm) +static TCGv_i32 compute_ldst_addr_typeb(DisasContext *dc, int ra, int imm) { - TCGv ret; + TCGv_i32 ret; /* If any of the regs is r0, set t to the value of the other reg. */ if (ra && imm) { @@ -708,7 +708,7 @@ static inline MemOp mo_endian(DisasContext *dc) return dc->cfg->endi ? MO_LE : MO_BE; } -static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, +static bool do_load(DisasContext *dc, int rd, TCGv_i32 addr, MemOp mop, int mem_index, bool rev) { MemOp size = mop & MO_SIZE; @@ -726,7 +726,7 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, mop ^= MO_BSWAP; } if (size < MO_32) { - tcg_gen_xori_tl(addr, addr, 3 - size); + tcg_gen_xori_i32(addr, addr, 3 - size); } } @@ -750,13 +750,13 @@ static bool do_load(DisasContext *dc, int rd, TCGv addr, MemOp mop, static bool trans_lbu(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false); } static bool trans_lbur(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, true); } @@ -776,19 +776,19 @@ static bool trans_lbuea(DisasContext *dc, arg_typea *arg) static bool trans_lbui(DisasContext *dc, arg_typeb *arg) { - TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); + TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); return do_load(dc, arg->rd, addr, MO_UB, dc->mem_index, false); } static bool trans_lhu(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false); } static bool trans_lhur(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, true); } @@ -810,19 +810,19 @@ static bool trans_lhuea(DisasContext *dc, arg_typea *arg) static bool trans_lhui(DisasContext *dc, arg_typeb *arg) { - TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); + TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); return do_load(dc, arg->rd, addr, MO_UW, dc->mem_index, false); } static bool trans_lw(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false); } static bool trans_lwr(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, true); } @@ -844,20 +844,20 @@ static bool trans_lwea(DisasContext *dc, arg_typea *arg) static bool trans_lwi(DisasContext *dc, arg_typeb *arg) { - TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); + TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); return do_load(dc, arg->rd, addr, MO_UL, dc->mem_index, false); } static bool trans_lwx(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); /* lwx does not throw unaligned access errors, so force alignment */ - tcg_gen_andi_tl(addr, addr, ~3); + tcg_gen_andi_i32(addr, addr, ~3); tcg_gen_qemu_ld_i32(cpu_res_val, addr, dc->mem_index, mo_endian(dc) | MO_UL); - tcg_gen_mov_tl(cpu_res_addr, addr); + tcg_gen_mov_i32(cpu_res_addr, addr); if (arg->rd) { tcg_gen_mov_i32(cpu_R[arg->rd], cpu_res_val); @@ -868,7 +868,7 @@ static bool trans_lwx(DisasContext *dc, arg_typea *arg) return true; } -static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, +static bool do_store(DisasContext *dc, int rd, TCGv_i32 addr, MemOp mop, int mem_index, bool rev) { MemOp size = mop & MO_SIZE; @@ -886,7 +886,7 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, mop ^= MO_BSWAP; } if (size < MO_32) { - tcg_gen_xori_tl(addr, addr, 3 - size); + tcg_gen_xori_i32(addr, addr, 3 - size); } } @@ -910,13 +910,13 @@ static bool do_store(DisasContext *dc, int rd, TCGv addr, MemOp mop, static bool trans_sb(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false); } static bool trans_sbr(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, true); } @@ -936,19 +936,19 @@ static bool trans_sbea(DisasContext *dc, arg_typea *arg) static bool trans_sbi(DisasContext *dc, arg_typeb *arg) { - TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); + TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); return do_store(dc, arg->rd, addr, MO_UB, dc->mem_index, false); } static bool trans_sh(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false); } static bool trans_shr(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, true); } @@ -970,19 +970,19 @@ static bool trans_shea(DisasContext *dc, arg_typea *arg) static bool trans_shi(DisasContext *dc, arg_typeb *arg) { - TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); + TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); return do_store(dc, arg->rd, addr, MO_UW, dc->mem_index, false); } static bool trans_sw(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false); } static bool trans_swr(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, true); } @@ -1004,19 +1004,19 @@ static bool trans_swea(DisasContext *dc, arg_typea *arg) static bool trans_swi(DisasContext *dc, arg_typeb *arg) { - TCGv addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); + TCGv_i32 addr = compute_ldst_addr_typeb(dc, arg->ra, arg->imm); return do_store(dc, arg->rd, addr, MO_UL, dc->mem_index, false); } static bool trans_swx(DisasContext *dc, arg_typea *arg) { - TCGv addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); + TCGv_i32 addr = compute_ldst_addr_typea(dc, arg->ra, arg->rb); TCGLabel *swx_done = gen_new_label(); TCGLabel *swx_fail = gen_new_label(); TCGv_i32 tval; /* swx does not throw unaligned access errors, so force alignment */ - tcg_gen_andi_tl(addr, addr, ~3); + tcg_gen_andi_i32(addr, addr, ~3); /* * Compare the address vs the one we used during lwx. @@ -1024,7 +1024,7 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg) * branch, but we know we can use the equal version in the global. * In either case, addr is no longer needed. */ - tcg_gen_brcond_tl(TCG_COND_NE, cpu_res_addr, addr, swx_fail); + tcg_gen_brcond_i32(TCG_COND_NE, cpu_res_addr, addr, swx_fail); /* * Compare the value loaded during lwx with current contents of @@ -1052,7 +1052,7 @@ static bool trans_swx(DisasContext *dc, arg_typea *arg) * Prevent the saved address from working again without another ldx. * Akin to the pseudocode setting reservation = 0. */ - tcg_gen_movi_tl(cpu_res_addr, -1); + tcg_gen_movi_i32(cpu_res_addr, RES_ADDR_NONE); return true; } @@ -1173,7 +1173,7 @@ static bool trans_brk(DisasContext *dc, arg_typea_br *arg) tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next); } tcg_gen_ori_i32(cpu_msr, cpu_msr, MSR_BIP); - tcg_gen_movi_tl(cpu_res_addr, -1); + tcg_gen_movi_i32(cpu_res_addr, RES_ADDR_NONE); dc->base.is_jmp = DISAS_EXIT; return true; @@ -1194,7 +1194,7 @@ static bool trans_brki(DisasContext *dc, arg_typeb_br *arg) if (arg->rd) { tcg_gen_movi_i32(cpu_R[arg->rd], dc->base.pc_next); } - tcg_gen_movi_tl(cpu_res_addr, -1); + tcg_gen_movi_i32(cpu_res_addr, RES_ADDR_NONE); #ifdef CONFIG_USER_ONLY switch (imm) { @@ -1885,6 +1885,7 @@ void mb_tcg_init(void) tcg_global_mem_new_i32(tcg_env, i32s[i].ofs, i32s[i].name); } - cpu_res_addr = - tcg_global_mem_new(tcg_env, offsetof(CPUMBState, res_addr), "res_addr"); + cpu_res_addr = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUMBState, res_addr), + "res_addr"); } diff --git a/target/mips/cpu.c b/target/mips/cpu.c index 1f6c41fd3401e..5989c3ba17782 100644 --- a/target/mips/cpu.c +++ b/target/mips/cpu.c @@ -145,7 +145,7 @@ static bool mips_cpu_has_work(CPUState *cs) * check for interrupts that can be taken. For pre-release 6 CPUs, * check for CP0 Config7 'Wait IE ignore' bit. */ - if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && + if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && cpu_mips_hw_interrupts_pending(env)) { if (cpu_mips_hw_interrupts_enabled(env) || (env->CP0_Config7 & (1 << CP0C7_WII)) || @@ -160,7 +160,7 @@ static bool mips_cpu_has_work(CPUState *cs) * The QEMU model will issue an _WAKE request whenever the CPUs * should be woken up. */ - if (cs->interrupt_request & CPU_INTERRUPT_WAKE) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_WAKE)) { has_work = true; } @@ -170,7 +170,7 @@ static bool mips_cpu_has_work(CPUState *cs) } /* MIPS Release 6 has the ability to halt the CPU. */ if (env->CP0_Config5 & (1 << CP0C5_VP)) { - if (cs->interrupt_request & CPU_INTERRUPT_WAKE) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_WAKE)) { has_work = true; } if (!mips_vp_active(env)) { diff --git a/target/mips/kvm.c b/target/mips/kvm.c index ec53acb51a1fd..912cd5dfa0e06 100644 --- a/target/mips/kvm.c +++ b/target/mips/kvm.c @@ -144,7 +144,7 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) bql_lock(); - if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && + if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && cpu_mips_io_interrupts_pending(cpu)) { intr.cpu = -1; intr.irq = 2; @@ -590,7 +590,7 @@ static void kvm_mips_update_state(void *opaque, bool running, RunState state) } } -static int kvm_mips_put_fpu_registers(CPUState *cs, int level) +static int kvm_mips_put_fpu_registers(CPUState *cs, KvmPutState level) { CPUMIPSState *env = cpu_env(cs); int err, ret = 0; @@ -749,7 +749,7 @@ static int kvm_mips_get_fpu_registers(CPUState *cs) } -static int kvm_mips_put_cp0_registers(CPUState *cs, int level) +static int kvm_mips_put_cp0_registers(CPUState *cs, KvmPutState level) { CPUMIPSState *env = cpu_env(cs); int err, ret = 0; @@ -1177,7 +1177,7 @@ static int kvm_mips_get_cp0_registers(CPUState *cs) return ret; } -int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) +int kvm_arch_put_registers(CPUState *cs, KvmPutState level, Error **errp) { CPUMIPSState *env = cpu_env(cs); struct kvm_regs regs; diff --git a/target/mips/tcg/system/cp0_helper.c b/target/mips/tcg/system/cp0_helper.c index 101b1e65fdd3a..b69e70d7fcf74 100644 --- a/target/mips/tcg/system/cp0_helper.c +++ b/target/mips/tcg/system/cp0_helper.c @@ -1562,12 +1562,14 @@ target_ulong helper_dvpe(CPUMIPSState *env) CPUState *other_cs = first_cpu; target_ulong prev = env->mvp->CP0_MVPControl; - CPU_FOREACH(other_cs) { - MIPSCPU *other_cpu = MIPS_CPU(other_cs); - /* Turn off all VPEs except the one executing the dvpe. */ - if (&other_cpu->env != env) { - other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); - mips_vpe_sleep(other_cpu); + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); + /* Turn off all VPEs except the one executing the dvpe. */ + if (&other_cpu->env != env) { + other_cpu->env.mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP); + mips_vpe_sleep(other_cpu); + } } } return prev; @@ -1578,15 +1580,17 @@ target_ulong helper_evpe(CPUMIPSState *env) CPUState *other_cs = first_cpu; target_ulong prev = env->mvp->CP0_MVPControl; - CPU_FOREACH(other_cs) { - MIPSCPU *other_cpu = MIPS_CPU(other_cs); + if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) { + CPU_FOREACH(other_cs) { + MIPSCPU *other_cpu = MIPS_CPU(other_cs); - if (&other_cpu->env != env - /* If the VPE is WFI, don't disturb its sleep. */ - && !mips_vpe_is_wfi(other_cpu)) { - /* Enable the VPE. */ - other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); - mips_vpe_wake(other_cpu); /* And wake it up. */ + if (&other_cpu->env != env + /* If the VPE is WFI, don't disturb its sleep. */ + && !mips_vpe_is_wfi(other_cpu)) { + /* Enable the VPE. */ + other_cpu->env.mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP); + mips_vpe_wake(other_cpu); /* And wake it up. */ + } } } return prev; diff --git a/target/mips/tcg/system/tlb_helper.c b/target/mips/tcg/system/tlb_helper.c index eccaf3624cb3d..1e8901556d660 100644 --- a/target/mips/tcg/system/tlb_helper.c +++ b/target/mips/tcg/system/tlb_helper.c @@ -652,7 +652,7 @@ static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, return 0; } - if ((entry & (1 << psn)) && hugepg) { + if (extract64(entry, psn, 1) && hugepg) { *huge_page = true; *hgpg_directory_hit = true; entry = get_tlb_entry_layout(env, entry, leaf_mop, pf_ptew); diff --git a/target/mips/tcg/translate.c b/target/mips/tcg/translate.c index d91d6efe02ca3..54849e9ff1a4f 100644 --- a/target/mips/tcg/translate.c +++ b/target/mips/tcg/translate.c @@ -4362,12 +4362,13 @@ static void gen_trap(DisasContext *ctx, uint32_t opc, } } -static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, + target_ulong dest) { if (translator_use_goto_tb(&ctx->base, dest)) { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); gen_save_pc(dest); - tcg_gen_exit_tb(ctx->base.tb, n); + tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx); } else { gen_save_pc(dest); tcg_gen_lookup_and_goto_ptr(); diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c index dfbb2df643a09..9bbfe22ed3a85 100644 --- a/target/openrisc/cpu.c +++ b/target/openrisc/cpu.c @@ -78,8 +78,7 @@ static void openrisc_restore_state_to_opc(CPUState *cs, #ifndef CONFIG_USER_ONLY static bool openrisc_cpu_has_work(CPUState *cs) { - return cs->interrupt_request & (CPU_INTERRUPT_HARD | - CPU_INTERRUPT_TIMER); + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER); } #endif /* !CONFIG_USER_ONLY */ diff --git a/target/openrisc/cpu.h b/target/openrisc/cpu.h index f4bcf00b07357..c8e2827930be8 100644 --- a/target/openrisc/cpu.h +++ b/target/openrisc/cpu.h @@ -220,33 +220,24 @@ typedef struct OpenRISCTLBEntry { typedef struct CPUOpenRISCTLBContext { OpenRISCTLBEntry itlb[TLB_SIZE]; OpenRISCTLBEntry dtlb[TLB_SIZE]; - - int (*cpu_openrisc_map_address_code)(OpenRISCCPU *cpu, - hwaddr *physical, - int *prot, - target_ulong address, int rw); - int (*cpu_openrisc_map_address_data)(OpenRISCCPU *cpu, - hwaddr *physical, - int *prot, - target_ulong address, int rw); } CPUOpenRISCTLBContext; #endif typedef struct CPUArchState { - target_ulong shadow_gpr[16][32]; /* Shadow registers */ + uint32_t shadow_gpr[16][32]; /* Shadow registers */ - target_ulong pc; /* Program counter */ - target_ulong ppc; /* Prev PC */ - target_ulong jmp_pc; /* Jump PC */ + uint32_t pc; /* Program counter */ + uint32_t ppc; /* Prev PC */ + uint32_t jmp_pc; /* Jump PC */ uint64_t mac; /* Multiply registers MACHI:MACLO */ - target_ulong epcr; /* Exception PC register */ - target_ulong eear; /* Exception EA register */ + uint32_t epcr; /* Exception PC register */ + uint32_t eear; /* Exception EA register */ - target_ulong sr_f; /* the SR_F bit, values 0, 1. */ - target_ulong sr_cy; /* the SR_CY bit, values 0, 1. */ - target_long sr_ov; /* the SR_OV bit (in the sign bit only) */ + uint32_t sr_f; /* the SR_F bit, values 0, 1. */ + uint32_t sr_cy; /* the SR_CY bit, values 0, 1. */ + int32_t sr_ov; /* the SR_OV bit (in the sign bit only) */ uint32_t sr; /* Supervisor register, without SR_{F,CY,OV} */ uint32_t esr; /* Exception supervisor register */ uint32_t evbar; /* Exception vector base address register */ @@ -254,8 +245,8 @@ typedef struct CPUArchState { uint32_t fpcsr; /* Float register */ float_status fp_status; - target_ulong lock_addr; - target_ulong lock_value; + uint32_t lock_addr; + uint32_t lock_value; uint32_t dflag; /* In delay slot (boolean) */ diff --git a/target/openrisc/fpu_helper.c b/target/openrisc/fpu_helper.c index dba997255c6b8..39b6195dd7d35 100644 --- a/target/openrisc/fpu_helper.c +++ b/target/openrisc/fpu_helper.c @@ -146,10 +146,10 @@ uint32_t helper_float_madd_s(CPUOpenRISCState *env, uint32_t a, #define FLOAT_CMP(name, impl) \ -target_ulong helper_float_ ## name ## _d(CPUOpenRISCState *env, \ +uint32_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \ uint64_t fdt0, uint64_t fdt1) \ { return float64_ ## impl(fdt0, fdt1, &env->fp_status); } \ -target_ulong helper_float_ ## name ## _s(CPUOpenRISCState *env, \ +uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \ uint32_t fdt0, uint32_t fdt1) \ { return float32_ ## impl(fdt0, fdt1, &env->fp_status); } @@ -160,13 +160,13 @@ FLOAT_CMP(un, unordered_quiet) #undef FLOAT_CMP #define FLOAT_UCMP(name, expr) \ -target_ulong helper_float_ ## name ## _d(CPUOpenRISCState *env, \ +uint32_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \ uint64_t fdt0, uint64_t fdt1) \ { \ FloatRelation r = float64_compare_quiet(fdt0, fdt1, &env->fp_status); \ return expr; \ } \ -target_ulong helper_float_ ## name ## _s(CPUOpenRISCState *env, \ +uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \ uint32_t fdt0, uint32_t fdt1) \ { \ FloatRelation r = float32_compare_quiet(fdt0, fdt1, &env->fp_status); \ diff --git a/target/openrisc/helper.h b/target/openrisc/helper.h index d847814a28d01..e0a8d402271bb 100644 --- a/target/openrisc/helper.h +++ b/target/openrisc/helper.h @@ -47,8 +47,8 @@ FOP_CALC(rem) #undef FOP_CALC #define FOP_CMP(op) \ -DEF_HELPER_FLAGS_3(float_ ## op ## _s, TCG_CALL_NO_RWG, tl, env, i32, i32) \ -DEF_HELPER_FLAGS_3(float_ ## op ## _d, TCG_CALL_NO_RWG, tl, env, i64, i64) +DEF_HELPER_FLAGS_3(float_ ## op ## _s, TCG_CALL_NO_RWG, i32, env, i32, i32) \ +DEF_HELPER_FLAGS_3(float_ ## op ## _d, TCG_CALL_NO_RWG, i32, env, i64, i64) FOP_CMP(eq) FOP_CMP(lt) FOP_CMP(le) @@ -62,5 +62,5 @@ FOP_CMP(ult) DEF_HELPER_FLAGS_1(rfe, 0, void, env) /* sys */ -DEF_HELPER_FLAGS_3(mtspr, 0, void, env, tl, tl) -DEF_HELPER_FLAGS_3(mfspr, TCG_CALL_NO_WG, tl, env, tl, tl) +DEF_HELPER_FLAGS_3(mtspr, 0, void, env, i32, i32) +DEF_HELPER_FLAGS_3(mfspr, TCG_CALL_NO_WG, i32, env, i32, i32) diff --git a/target/openrisc/machine.c b/target/openrisc/machine.c index 081c706d02c42..f2853674f0ffa 100644 --- a/target/openrisc/machine.c +++ b/target/openrisc/machine.c @@ -26,8 +26,8 @@ static const VMStateDescription vmstate_tlb_entry = { .version_id = 1, .minimum_version_id = 1, .fields = (const VMStateField[]) { - VMSTATE_UINTTL(mr, OpenRISCTLBEntry), - VMSTATE_UINTTL(tr, OpenRISCTLBEntry), + VMSTATE_UINT32(mr, OpenRISCTLBEntry), + VMSTATE_UINT32(tr, OpenRISCTLBEntry), VMSTATE_END_OF_LIST() } }; @@ -72,14 +72,14 @@ static const VMStateDescription vmstate_env = { .version_id = 6, .minimum_version_id = 6, .fields = (const VMStateField[]) { - VMSTATE_UINTTL_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32), - VMSTATE_UINTTL(pc, CPUOpenRISCState), - VMSTATE_UINTTL(ppc, CPUOpenRISCState), - VMSTATE_UINTTL(jmp_pc, CPUOpenRISCState), - VMSTATE_UINTTL(lock_addr, CPUOpenRISCState), - VMSTATE_UINTTL(lock_value, CPUOpenRISCState), - VMSTATE_UINTTL(epcr, CPUOpenRISCState), - VMSTATE_UINTTL(eear, CPUOpenRISCState), + VMSTATE_UINT32_2DARRAY(shadow_gpr, CPUOpenRISCState, 16, 32), + VMSTATE_UINT32(pc, CPUOpenRISCState), + VMSTATE_UINT32(ppc, CPUOpenRISCState), + VMSTATE_UINT32(jmp_pc, CPUOpenRISCState), + VMSTATE_UINT32(lock_addr, CPUOpenRISCState), + VMSTATE_UINT32(lock_value, CPUOpenRISCState), + VMSTATE_UINT32(epcr, CPUOpenRISCState), + VMSTATE_UINT32(eear, CPUOpenRISCState), /* Save the architecture value of the SR, not the internally expanded version. Since this architecture value does not diff --git a/target/openrisc/mmu.c b/target/openrisc/mmu.c index acea50c41eb76..ffb732e0d1f6d 100644 --- a/target/openrisc/mmu.c +++ b/target/openrisc/mmu.c @@ -28,15 +28,14 @@ #include "qemu/host-utils.h" #include "hw/loader.h" -static inline void get_phys_nommu(hwaddr *phys_addr, int *prot, - target_ulong address) +static void get_phys_nommu(hwaddr *phys_addr, int *prot, vaddr address) { *phys_addr = address; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; } static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot, - target_ulong addr, int need, bool super) + vaddr addr, int need, bool super) { int idx = (addr >> TARGET_PAGE_BITS) & TLB_MASK; uint32_t imr = cpu->env.tlb.itlb[idx].mr; @@ -95,7 +94,7 @@ static int get_phys_mmu(OpenRISCCPU *cpu, hwaddr *phys_addr, int *prot, } } -static void raise_mmu_exception(OpenRISCCPU *cpu, target_ulong address, +static void raise_mmu_exception(OpenRISCCPU *cpu, vaddr address, int exception) { CPUState *cs = CPU(cpu); diff --git a/target/openrisc/sys_helper.c b/target/openrisc/sys_helper.c index d96b41a01c2b3..7ad908b632221 100644 --- a/target/openrisc/sys_helper.c +++ b/target/openrisc/sys_helper.c @@ -40,12 +40,12 @@ static inline bool is_user(CPUOpenRISCState *env) #endif } -void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) +void HELPER(mtspr)(CPUOpenRISCState *env, uint32_t spr, uint32_t rb) { OpenRISCCPU *cpu = env_archcpu(env); #ifndef CONFIG_USER_ONLY CPUState *cs = env_cpu(env); - target_ulong mr; + uint32_t mr; int idx; #endif @@ -196,7 +196,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) env->ttmr = (rb & ~TTMR_IP) | ip; } else { /* Clear IP bit. */ env->ttmr = rb & ~TTMR_IP; - cs->interrupt_request &= ~CPU_INTERRUPT_TIMER; + cpu_reset_interrupt(cs, CPU_INTERRUPT_TIMER); } cpu_openrisc_timer_update(cpu); bql_unlock(); @@ -213,8 +213,7 @@ void HELPER(mtspr)(CPUOpenRISCState *env, target_ulong spr, target_ulong rb) #endif } -target_ulong HELPER(mfspr)(CPUOpenRISCState *env, target_ulong rd, - target_ulong spr) +uint32_t HELPER(mfspr)(CPUOpenRISCState *env, uint32_t rd, uint32_t spr) { OpenRISCCPU *cpu = env_archcpu(env); #ifndef CONFIG_USER_ONLY diff --git a/target/openrisc/translate.c b/target/openrisc/translate.c index 5ab3bc7021dc4..6fa4d6cfa709c 100644 --- a/target/openrisc/translate.c +++ b/target/openrisc/translate.c @@ -51,14 +51,20 @@ typedef struct DisasContext { uint32_t avr; /* If not -1, jmp_pc contains this value and so is a direct jump. */ - target_ulong jmp_pc_imm; + vaddr jmp_pc_imm; /* The temporary corresponding to register 0 for this compilation. */ - TCGv R0; + TCGv_i32 R0; /* The constant zero. */ - TCGv zero; + TCGv_i32 zero; } DisasContext; +static inline MemOp mo_endian(DisasContext *dc) +{ + /* The SR_LEE bit sets the (little) endianness, but we don't implement it. */ + return MO_BE; +} + static inline bool is_user(DisasContext *dc) { #ifdef CONFIG_USER_ONLY @@ -71,16 +77,16 @@ static inline bool is_user(DisasContext *dc) /* Include the auto-generated decoder. */ #include "decode-insns.c.inc" -static TCGv cpu_sr; -static TCGv cpu_regs[32]; -static TCGv cpu_pc; -static TCGv jmp_pc; /* l.jr/l.jalr temp pc */ -static TCGv cpu_ppc; -static TCGv cpu_sr_f; /* bf/bnf, F flag taken */ -static TCGv cpu_sr_cy; /* carry (unsigned overflow) */ -static TCGv cpu_sr_ov; /* signed overflow */ -static TCGv cpu_lock_addr; -static TCGv cpu_lock_value; +static TCGv_i32 cpu_sr; +static TCGv_i32 cpu_regs[32]; +static TCGv_i32 cpu_pc; +static TCGv_i32 jmp_pc; /* l.jr/l.jalr temp pc */ +static TCGv_i32 cpu_ppc; +static TCGv_i32 cpu_sr_f; /* bf/bnf, F flag taken */ +static TCGv_i32 cpu_sr_cy; /* carry (unsigned overflow) */ +static TCGv_i32 cpu_sr_ov; /* signed overflow */ +static TCGv_i32 cpu_lock_addr; +static TCGv_i32 cpu_lock_value; static TCGv_i32 fpcsr; static TCGv_i64 cpu_mac; /* MACHI:MACLO */ static TCGv_i32 cpu_dflag; @@ -95,27 +101,27 @@ void openrisc_translate_init(void) }; int i; - cpu_sr = tcg_global_mem_new(tcg_env, + cpu_sr = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, sr), "sr"); cpu_dflag = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, dflag), "dflag"); - cpu_pc = tcg_global_mem_new(tcg_env, + cpu_pc = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, pc), "pc"); - cpu_ppc = tcg_global_mem_new(tcg_env, + cpu_ppc = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, ppc), "ppc"); - jmp_pc = tcg_global_mem_new(tcg_env, + jmp_pc = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc"); - cpu_sr_f = tcg_global_mem_new(tcg_env, + cpu_sr_f = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, sr_f), "sr_f"); - cpu_sr_cy = tcg_global_mem_new(tcg_env, + cpu_sr_cy = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, sr_cy), "sr_cy"); - cpu_sr_ov = tcg_global_mem_new(tcg_env, + cpu_sr_ov = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, sr_ov), "sr_ov"); - cpu_lock_addr = tcg_global_mem_new(tcg_env, + cpu_lock_addr = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, lock_addr), "lock_addr"); - cpu_lock_value = tcg_global_mem_new(tcg_env, + cpu_lock_value = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, lock_value), "lock_value"); fpcsr = tcg_global_mem_new_i32(tcg_env, @@ -125,7 +131,7 @@ void openrisc_translate_init(void) offsetof(CPUOpenRISCState, mac), "mac"); for (i = 0; i < 32; i++) { - cpu_regs[i] = tcg_global_mem_new(tcg_env, + cpu_regs[i] = tcg_global_mem_new_i32(tcg_env, offsetof(CPUOpenRISCState, shadow_gpr[0][i]), regnames[i]); @@ -139,7 +145,7 @@ static void gen_exception(DisasContext *dc, unsigned int excp) static void gen_illegal_exception(DisasContext *dc) { - tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); + tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); gen_exception(dc, EXCP_ILLEGAL); dc->base.is_jmp = DISAS_NORETURN; } @@ -159,7 +165,7 @@ static bool check_of64a32s(DisasContext *dc) return dc->cpucfgr & CPUCFGR_OF64A32S; } -static TCGv cpu_R(DisasContext *dc, int reg) +static TCGv_i32 cpu_R(DisasContext *dc, int reg) { if (reg == 0) { return dc->R0; @@ -200,147 +206,133 @@ static void gen_ove_cyov(DisasContext *dc) } } -static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) +static void gen_add(DisasContext *dc, TCGv_i32 dest, + TCGv_i32 srca, TCGv_i32 srcb) { - TCGv t0 = tcg_temp_new(); - TCGv res = tcg_temp_new(); + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 res = tcg_temp_new_i32(); - tcg_gen_add2_tl(res, cpu_sr_cy, srca, dc->zero, srcb, dc->zero); - tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); - tcg_gen_xor_tl(t0, res, srcb); - tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); + tcg_gen_add2_i32(res, cpu_sr_cy, srca, dc->zero, srcb, dc->zero); + tcg_gen_xor_i32(cpu_sr_ov, srca, srcb); + tcg_gen_xor_i32(t0, res, srcb); + tcg_gen_andc_i32(cpu_sr_ov, t0, cpu_sr_ov); - tcg_gen_mov_tl(dest, res); + tcg_gen_mov_i32(dest, res); gen_ove_cyov(dc); } -static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) +static void gen_addc(DisasContext *dc, TCGv_i32 dest, + TCGv_i32 srca, TCGv_i32 srcb) { - TCGv t0 = tcg_temp_new(); - TCGv res = tcg_temp_new(); + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 res = tcg_temp_new_i32(); - tcg_gen_addcio_tl(res, cpu_sr_cy, srca, srcb, cpu_sr_cy); - tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); - tcg_gen_xor_tl(t0, res, srcb); - tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); + tcg_gen_addcio_i32(res, cpu_sr_cy, srca, srcb, cpu_sr_cy); + tcg_gen_xor_i32(cpu_sr_ov, srca, srcb); + tcg_gen_xor_i32(t0, res, srcb); + tcg_gen_andc_i32(cpu_sr_ov, t0, cpu_sr_ov); - tcg_gen_mov_tl(dest, res); + tcg_gen_mov_i32(dest, res); gen_ove_cyov(dc); } -static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) +static void gen_sub(DisasContext *dc, TCGv_i32 dest, + TCGv_i32 srca, TCGv_i32 srcb) { - TCGv res = tcg_temp_new(); + TCGv_i32 res = tcg_temp_new_i32(); - tcg_gen_sub_tl(res, srca, srcb); - tcg_gen_xor_tl(cpu_sr_cy, srca, srcb); - tcg_gen_xor_tl(cpu_sr_ov, res, srcb); - tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy); - tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb); + tcg_gen_sub_i32(res, srca, srcb); + tcg_gen_xor_i32(cpu_sr_cy, srca, srcb); + tcg_gen_xor_i32(cpu_sr_ov, res, srcb); + tcg_gen_and_i32(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy); + tcg_gen_setcond_i32(TCG_COND_LTU, cpu_sr_cy, srca, srcb); - tcg_gen_mov_tl(dest, res); + tcg_gen_mov_i32(dest, res); gen_ove_cyov(dc); } -static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) +static void gen_mul(DisasContext *dc, TCGv_i32 dest, + TCGv_i32 srca, TCGv_i32 srcb) { - TCGv t0 = tcg_temp_new(); + TCGv_i32 t0 = tcg_temp_new_i32(); - tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb); - tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1); - tcg_gen_negsetcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0); + tcg_gen_muls2_i32(dest, cpu_sr_ov, srca, srcb); + tcg_gen_sari_i32(t0, dest, TARGET_LONG_BITS - 1); + tcg_gen_negsetcond_i32(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0); gen_ove_ov(dc); } -static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) +static void gen_mulu(DisasContext *dc, TCGv_i32 dest, + TCGv_i32 srca, TCGv_i32 srcb) { - tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb); - tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0); + tcg_gen_muls2_i32(dest, cpu_sr_cy, srca, srcb); + tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0); gen_ove_cy(dc); } -static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) +static void gen_div(DisasContext *dc, TCGv_i32 dest, + TCGv_i32 srca, TCGv_i32 srcb) { - TCGv t0 = tcg_temp_new(); + TCGv_i32 t0 = tcg_temp_new_i32(); - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_ov, srcb, 0); /* The result of divide-by-zero is undefined. Suppress the host-side exception by dividing by 1. */ - tcg_gen_or_tl(t0, srcb, cpu_sr_ov); - tcg_gen_div_tl(dest, srca, t0); + tcg_gen_or_i32(t0, srcb, cpu_sr_ov); + tcg_gen_div_i32(dest, srca, t0); - tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); + tcg_gen_neg_i32(cpu_sr_ov, cpu_sr_ov); gen_ove_ov(dc); } -static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) +static void gen_divu(DisasContext *dc, TCGv_i32 dest, + TCGv_i32 srca, TCGv_i32 srcb) { - TCGv t0 = tcg_temp_new(); + TCGv_i32 t0 = tcg_temp_new_i32(); - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_cy, srcb, 0); /* The result of divide-by-zero is undefined. Suppress the host-side exception by dividing by 1. */ - tcg_gen_or_tl(t0, srcb, cpu_sr_cy); - tcg_gen_divu_tl(dest, srca, t0); + tcg_gen_or_i32(t0, srcb, cpu_sr_cy); + tcg_gen_divu_i32(dest, srca, t0); gen_ove_cy(dc); } -static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb) +static void gen_muld(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); - tcg_gen_ext_tl_i64(t1, srca); - tcg_gen_ext_tl_i64(t2, srcb); - if (TARGET_LONG_BITS == 32) { - tcg_gen_mul_i64(cpu_mac, t1, t2); - tcg_gen_movi_tl(cpu_sr_ov, 0); - } else { - TCGv_i64 high = tcg_temp_new_i64(); - - tcg_gen_muls2_i64(cpu_mac, high, t1, t2); - tcg_gen_sari_i64(t1, cpu_mac, 63); - tcg_gen_negsetcond_i64(TCG_COND_NE, t1, t1, high); - tcg_gen_trunc_i64_tl(cpu_sr_ov, t1); - - gen_ove_ov(dc); - } + tcg_gen_ext_i32_i64(t1, srca); + tcg_gen_ext_i32_i64(t2, srcb); + tcg_gen_mul_i64(cpu_mac, t1, t2); + tcg_gen_movi_i32(cpu_sr_ov, 0); } -static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb) +static void gen_muldu(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); - tcg_gen_extu_tl_i64(t1, srca); - tcg_gen_extu_tl_i64(t2, srcb); - if (TARGET_LONG_BITS == 32) { - tcg_gen_mul_i64(cpu_mac, t1, t2); - tcg_gen_movi_tl(cpu_sr_cy, 0); - } else { - TCGv_i64 high = tcg_temp_new_i64(); - - tcg_gen_mulu2_i64(cpu_mac, high, t1, t2); - tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0); - tcg_gen_trunc_i64_tl(cpu_sr_cy, high); - - gen_ove_cy(dc); - } + tcg_gen_extu_i32_i64(t1, srca); + tcg_gen_extu_i32_i64(t2, srcb); + tcg_gen_mul_i64(cpu_mac, t1, t2); + tcg_gen_movi_i32(cpu_sr_cy, 0); } -static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) +static void gen_mac(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); - tcg_gen_ext_tl_i64(t1, srca); - tcg_gen_ext_tl_i64(t2, srcb); + tcg_gen_ext_i32_i64(t1, srca); + tcg_gen_ext_i32_i64(t2, srcb); tcg_gen_mul_i64(t1, t1, t2); /* Note that overflow is only computed during addition stage. */ @@ -349,39 +341,35 @@ static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) tcg_gen_xor_i64(t1, t1, cpu_mac); tcg_gen_andc_i64(t1, t1, t2); -#if TARGET_LONG_BITS == 32 tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); -#else - tcg_gen_mov_i64(cpu_sr_ov, t1); -#endif gen_ove_ov(dc); } -static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb) +static void gen_macu(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); - tcg_gen_extu_tl_i64(t1, srca); - tcg_gen_extu_tl_i64(t2, srcb); + tcg_gen_extu_i32_i64(t1, srca); + tcg_gen_extu_i32_i64(t2, srcb); tcg_gen_mul_i64(t1, t1, t2); /* Note that overflow is only computed during addition stage. */ tcg_gen_add_i64(cpu_mac, cpu_mac, t1); tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1); - tcg_gen_trunc_i64_tl(cpu_sr_cy, t1); + tcg_gen_extrl_i64_i32(cpu_sr_cy, t1); gen_ove_cy(dc); } -static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb) +static void gen_msb(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); - tcg_gen_ext_tl_i64(t1, srca); - tcg_gen_ext_tl_i64(t2, srcb); + tcg_gen_ext_i32_i64(t1, srca); + tcg_gen_ext_i32_i64(t2, srcb); tcg_gen_mul_i64(t1, t1, t2); /* Note that overflow is only computed during subtraction stage. */ @@ -399,19 +387,19 @@ static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb) gen_ove_ov(dc); } -static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb) +static void gen_msbu(DisasContext *dc, TCGv_i32 srca, TCGv_i32 srcb) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); - tcg_gen_extu_tl_i64(t1, srca); - tcg_gen_extu_tl_i64(t2, srcb); + tcg_gen_extu_i32_i64(t1, srca); + tcg_gen_extu_i32_i64(t2, srcb); tcg_gen_mul_i64(t1, t1, t2); /* Note that overflow is only computed during subtraction stage. */ tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1); tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); - tcg_gen_trunc_i64_tl(cpu_sr_cy, t2); + tcg_gen_extrl_i64_i32(cpu_sr_cy, t2); gen_ove_cy(dc); } @@ -440,84 +428,84 @@ static bool trans_l_sub(DisasContext *dc, arg_dab *a) static bool trans_l_and(DisasContext *dc, arg_dab *a) { check_r0_write(dc, a->d); - tcg_gen_and_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); + tcg_gen_and_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_or(DisasContext *dc, arg_dab *a) { check_r0_write(dc, a->d); - tcg_gen_or_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); + tcg_gen_or_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_xor(DisasContext *dc, arg_dab *a) { check_r0_write(dc, a->d); - tcg_gen_xor_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); + tcg_gen_xor_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sll(DisasContext *dc, arg_dab *a) { check_r0_write(dc, a->d); - tcg_gen_shl_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); + tcg_gen_shl_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_srl(DisasContext *dc, arg_dab *a) { check_r0_write(dc, a->d); - tcg_gen_shr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); + tcg_gen_shr_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sra(DisasContext *dc, arg_dab *a) { check_r0_write(dc, a->d); - tcg_gen_sar_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); + tcg_gen_sar_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_ror(DisasContext *dc, arg_dab *a) { check_r0_write(dc, a->d); - tcg_gen_rotr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); + tcg_gen_rotr_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_exths(DisasContext *dc, arg_da *a) { check_r0_write(dc, a->d); - tcg_gen_ext16s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a)); + tcg_gen_ext16s_i32(cpu_R(dc, a->d), cpu_R(dc, a->a)); return true; } static bool trans_l_extbs(DisasContext *dc, arg_da *a) { check_r0_write(dc, a->d); - tcg_gen_ext8s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a)); + tcg_gen_ext8s_i32(cpu_R(dc, a->d), cpu_R(dc, a->a)); return true; } static bool trans_l_exthz(DisasContext *dc, arg_da *a) { check_r0_write(dc, a->d); - tcg_gen_ext16u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a)); + tcg_gen_ext16u_i32(cpu_R(dc, a->d), cpu_R(dc, a->a)); return true; } static bool trans_l_extbz(DisasContext *dc, arg_da *a) { check_r0_write(dc, a->d); - tcg_gen_ext8u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a)); + tcg_gen_ext8u_i32(cpu_R(dc, a->d), cpu_R(dc, a->a)); return true; } static bool trans_l_cmov(DisasContext *dc, arg_dab *a) { check_r0_write(dc, a->d); - tcg_gen_movcond_tl(TCG_COND_NE, cpu_R(dc, a->d), cpu_sr_f, dc->zero, + tcg_gen_movcond_i32(TCG_COND_NE, cpu_R(dc, a->d), cpu_sr_f, dc->zero, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } @@ -525,16 +513,16 @@ static bool trans_l_cmov(DisasContext *dc, arg_dab *a) static bool trans_l_ff1(DisasContext *dc, arg_da *a) { check_r0_write(dc, a->d); - tcg_gen_ctzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), -1); - tcg_gen_addi_tl(cpu_R(dc, a->d), cpu_R(dc, a->d), 1); + tcg_gen_ctzi_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), -1); + tcg_gen_addi_i32(cpu_R(dc, a->d), cpu_R(dc, a->d), 1); return true; } static bool trans_l_fl1(DisasContext *dc, arg_da *a) { check_r0_write(dc, a->d); - tcg_gen_clzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), TARGET_LONG_BITS); - tcg_gen_subfi_tl(cpu_R(dc, a->d), TARGET_LONG_BITS, cpu_R(dc, a->d)); + tcg_gen_clzi_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), TARGET_LONG_BITS); + tcg_gen_subfi_i32(cpu_R(dc, a->d), TARGET_LONG_BITS, cpu_R(dc, a->d)); return true; } @@ -580,9 +568,9 @@ static bool trans_l_muldu(DisasContext *dc, arg_ab *a) static bool trans_l_j(DisasContext *dc, arg_l_j *a) { - target_ulong tmp_pc = dc->base.pc_next + a->n * 4; + vaddr tmp_pc = dc->base.pc_next + a->n * 4; - tcg_gen_movi_tl(jmp_pc, tmp_pc); + tcg_gen_movi_i32(jmp_pc, tmp_pc); dc->jmp_pc_imm = tmp_pc; dc->delayed_branch = 2; return true; @@ -590,13 +578,13 @@ static bool trans_l_j(DisasContext *dc, arg_l_j *a) static bool trans_l_jal(DisasContext *dc, arg_l_jal *a) { - target_ulong tmp_pc = dc->base.pc_next + a->n * 4; - target_ulong ret_pc = dc->base.pc_next + 8; + vaddr tmp_pc = dc->base.pc_next + a->n * 4; + vaddr ret_pc = dc->base.pc_next + 8; - tcg_gen_movi_tl(cpu_regs[9], ret_pc); + tcg_gen_movi_i32(cpu_regs[9], ret_pc); /* Optimize jal being used to load the PC for PIC. */ if (tmp_pc != ret_pc) { - tcg_gen_movi_tl(jmp_pc, tmp_pc); + tcg_gen_movi_i32(jmp_pc, tmp_pc); dc->jmp_pc_imm = tmp_pc; dc->delayed_branch = 2; } @@ -605,11 +593,11 @@ static bool trans_l_jal(DisasContext *dc, arg_l_jal *a) static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond) { - target_ulong tmp_pc = dc->base.pc_next + a->n * 4; - TCGv t_next = tcg_constant_tl(dc->base.pc_next + 8); - TCGv t_true = tcg_constant_tl(tmp_pc); + vaddr tmp_pc = dc->base.pc_next + a->n * 4; + TCGv_i32 t_next = tcg_constant_i32(dc->base.pc_next + 8); + TCGv_i32 t_true = tcg_constant_i32(tmp_pc); - tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, dc->zero, t_true, t_next); + tcg_gen_movcond_i32(cond, jmp_pc, cpu_sr_f, dc->zero, t_true, t_next); dc->delayed_branch = 2; } @@ -627,51 +615,54 @@ static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a) static bool trans_l_jr(DisasContext *dc, arg_l_jr *a) { - tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b)); + tcg_gen_mov_i32(jmp_pc, cpu_R(dc, a->b)); dc->delayed_branch = 2; return true; } static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a) { - tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b)); - tcg_gen_movi_tl(cpu_regs[9], dc->base.pc_next + 8); + tcg_gen_mov_i32(jmp_pc, cpu_R(dc, a->b)); + tcg_gen_movi_i32(cpu_regs[9], dc->base.pc_next + 8); dc->delayed_branch = 2; return true; } static bool trans_l_lwa(DisasContext *dc, arg_load *a) { - TCGv ea; + TCGv_i32 ea; check_r0_write(dc, a->d); - ea = tcg_temp_new(); - tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i); - tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TEUL); - tcg_gen_mov_tl(cpu_lock_addr, ea); - tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d)); + ea = tcg_temp_new_i32(); + tcg_gen_addi_i32(ea, cpu_R(dc, a->a), a->i); + tcg_gen_qemu_ld_i32(cpu_R(dc, a->d), ea, dc->mem_idx, + mo_endian(dc) | MO_UL); + tcg_gen_mov_i32(cpu_lock_addr, ea); + tcg_gen_mov_i32(cpu_lock_value, cpu_R(dc, a->d)); return true; } static void do_load(DisasContext *dc, arg_load *a, MemOp mop) { - TCGv ea; + TCGv_i32 ea; + + mop |= mo_endian(dc); check_r0_write(dc, a->d); - ea = tcg_temp_new(); - tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i); - tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, mop); + ea = tcg_temp_new_i32(); + tcg_gen_addi_i32(ea, cpu_R(dc, a->a), a->i); + tcg_gen_qemu_ld_i32(cpu_R(dc, a->d), ea, dc->mem_idx, mop); } static bool trans_l_lwz(DisasContext *dc, arg_load *a) { - do_load(dc, a, MO_TEUL); + do_load(dc, a, MO_UL); return true; } static bool trans_l_lws(DisasContext *dc, arg_load *a) { - do_load(dc, a, MO_TESL); + do_load(dc, a, MO_SL); return true; } @@ -689,53 +680,57 @@ static bool trans_l_lbs(DisasContext *dc, arg_load *a) static bool trans_l_lhz(DisasContext *dc, arg_load *a) { - do_load(dc, a, MO_TEUW); + do_load(dc, a, MO_UW); return true; } static bool trans_l_lhs(DisasContext *dc, arg_load *a) { - do_load(dc, a, MO_TESW); + do_load(dc, a, MO_SW); return true; } static bool trans_l_swa(DisasContext *dc, arg_store *a) { - TCGv ea, val; + TCGv_i32 ea, val; TCGLabel *lab_fail, *lab_done; - ea = tcg_temp_new(); - tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i); + ea = tcg_temp_new_i32(); + tcg_gen_addi_i32(ea, cpu_R(dc, a->a), a->i); lab_fail = gen_new_label(); lab_done = gen_new_label(); - tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail); + tcg_gen_brcond_i32(TCG_COND_NE, ea, cpu_lock_addr, lab_fail); - val = tcg_temp_new(); - tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value, - cpu_R(dc, a->b), dc->mem_idx, MO_TEUL); - tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value); + val = tcg_temp_new_i32(); + tcg_gen_atomic_cmpxchg_i32(val, cpu_lock_addr, cpu_lock_value, + cpu_R(dc, a->b), dc->mem_idx, + mo_endian(dc) | MO_UL); + tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value); tcg_gen_br(lab_done); gen_set_label(lab_fail); - tcg_gen_movi_tl(cpu_sr_f, 0); + tcg_gen_movi_i32(cpu_sr_f, 0); gen_set_label(lab_done); - tcg_gen_movi_tl(cpu_lock_addr, -1); + tcg_gen_movi_i32(cpu_lock_addr, -1); return true; } static void do_store(DisasContext *dc, arg_store *a, MemOp mop) { - TCGv t0 = tcg_temp_new(); - tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i); - tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop); + TCGv_i32 t0 = tcg_temp_new_i32(); + + mop |= mo_endian(dc); + + tcg_gen_addi_i32(t0, cpu_R(dc, a->a), a->i); + tcg_gen_qemu_st_i32(cpu_R(dc, a->b), t0, dc->mem_idx, mop); } static bool trans_l_sw(DisasContext *dc, arg_store *a) { - do_store(dc, a, MO_TEUL); + do_store(dc, a, MO_UL); return true; } @@ -747,7 +742,7 @@ static bool trans_l_sb(DisasContext *dc, arg_store *a) static bool trans_l_sh(DisasContext *dc, arg_store *a) { - do_store(dc, a, MO_TEUW); + do_store(dc, a, MO_UW); return true; } @@ -772,75 +767,75 @@ static bool trans_l_adrp(DisasContext *dc, arg_l_adrp *a) static bool trans_l_addi(DisasContext *dc, arg_rri *a) { check_r0_write(dc, a->d); - gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i)); + gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_i32(a->i)); return true; } static bool trans_l_addic(DisasContext *dc, arg_rri *a) { check_r0_write(dc, a->d); - gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i)); + gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_i32(a->i)); return true; } static bool trans_l_muli(DisasContext *dc, arg_rri *a) { check_r0_write(dc, a->d); - gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_tl(a->i)); + gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), tcg_constant_i32(a->i)); return true; } static bool trans_l_maci(DisasContext *dc, arg_l_maci *a) { - gen_mac(dc, cpu_R(dc, a->a), tcg_constant_tl(a->i)); + gen_mac(dc, cpu_R(dc, a->a), tcg_constant_i32(a->i)); return true; } static bool trans_l_andi(DisasContext *dc, arg_rrk *a) { check_r0_write(dc, a->d); - tcg_gen_andi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k); + tcg_gen_andi_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k); return true; } static bool trans_l_ori(DisasContext *dc, arg_rrk *a) { check_r0_write(dc, a->d); - tcg_gen_ori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k); + tcg_gen_ori_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k); return true; } static bool trans_l_xori(DisasContext *dc, arg_rri *a) { check_r0_write(dc, a->d); - tcg_gen_xori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->i); + tcg_gen_xori_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->i); return true; } static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a) { - TCGv spr = tcg_temp_new(); + TCGv_i32 spr = tcg_temp_new_i32(); check_r0_write(dc, a->d); if (translator_io_start(&dc->base)) { if (dc->delayed_branch) { - tcg_gen_mov_tl(cpu_pc, jmp_pc); - tcg_gen_discard_tl(jmp_pc); + tcg_gen_mov_i32(cpu_pc, jmp_pc); + tcg_gen_discard_i32(jmp_pc); } else { - tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4); + tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4); } dc->base.is_jmp = DISAS_EXIT; } - tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); + tcg_gen_ori_i32(spr, cpu_R(dc, a->a), a->k); gen_helper_mfspr(cpu_R(dc, a->d), tcg_env, cpu_R(dc, a->d), spr); return true; } static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a) { - TCGv spr = tcg_temp_new(); + TCGv_i32 spr = tcg_temp_new_i32(); translator_io_start(&dc->base); @@ -851,14 +846,14 @@ static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a) * of the cpu state first, allowing it to be overwritten. */ if (dc->delayed_branch) { - tcg_gen_mov_tl(cpu_pc, jmp_pc); - tcg_gen_discard_tl(jmp_pc); + tcg_gen_mov_i32(cpu_pc, jmp_pc); + tcg_gen_discard_i32(jmp_pc); } else { - tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4); + tcg_gen_movi_i32(cpu_pc, dc->base.pc_next + 4); } dc->base.is_jmp = DISAS_EXIT; - tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); + tcg_gen_ori_i32(spr, cpu_R(dc, a->a), a->k); gen_helper_mtspr(tcg_env, spr, cpu_R(dc, a->b)); return true; } @@ -890,7 +885,7 @@ static bool trans_l_msbu(DisasContext *dc, arg_ab *a) static bool trans_l_slli(DisasContext *dc, arg_dal *a) { check_r0_write(dc, a->d); - tcg_gen_shli_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), + tcg_gen_shli_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->l & (TARGET_LONG_BITS - 1)); return true; } @@ -898,7 +893,7 @@ static bool trans_l_slli(DisasContext *dc, arg_dal *a) static bool trans_l_srli(DisasContext *dc, arg_dal *a) { check_r0_write(dc, a->d); - tcg_gen_shri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), + tcg_gen_shri_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->l & (TARGET_LONG_BITS - 1)); return true; } @@ -906,7 +901,7 @@ static bool trans_l_srli(DisasContext *dc, arg_dal *a) static bool trans_l_srai(DisasContext *dc, arg_dal *a) { check_r0_write(dc, a->d); - tcg_gen_sari_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), + tcg_gen_sari_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->l & (TARGET_LONG_BITS - 1)); return true; } @@ -914,7 +909,7 @@ static bool trans_l_srai(DisasContext *dc, arg_dal *a) static bool trans_l_rori(DisasContext *dc, arg_dal *a) { check_r0_write(dc, a->d); - tcg_gen_rotri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), + tcg_gen_rotri_i32(cpu_R(dc, a->d), cpu_R(dc, a->a), a->l & (TARGET_LONG_BITS - 1)); return true; } @@ -922,151 +917,151 @@ static bool trans_l_rori(DisasContext *dc, arg_dal *a) static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a) { check_r0_write(dc, a->d); - tcg_gen_movi_tl(cpu_R(dc, a->d), a->k << 16); + tcg_gen_movi_i32(cpu_R(dc, a->d), a->k << 16); return true; } static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a) { check_r0_write(dc, a->d); - tcg_gen_trunc_i64_tl(cpu_R(dc, a->d), cpu_mac); + tcg_gen_extrl_i64_i32(cpu_R(dc, a->d), cpu_mac); tcg_gen_movi_i64(cpu_mac, 0); return true; } static bool trans_l_sfeq(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, + tcg_gen_setcond_i32(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sfne(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, + tcg_gen_setcond_i32(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, + tcg_gen_setcond_i32(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, + tcg_gen_setcond_i32(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sfltu(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, + tcg_gen_setcond_i32(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sfleu(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, + tcg_gen_setcond_i32(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sfgts(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, + tcg_gen_setcond_i32(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sfges(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, + tcg_gen_setcond_i32(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sflts(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, + tcg_gen_setcond_i32(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sfles(DisasContext *dc, arg_ab *a) { - tcg_gen_setcond_tl(TCG_COND_LE, + tcg_gen_setcond_i32(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); return true; } static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sfnei(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sfltui(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sfleui(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sflesi(DisasContext *dc, arg_ai *a) { - tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), a->i); + tcg_gen_setcondi_i32(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), a->i); return true; } static bool trans_l_sys(DisasContext *dc, arg_l_sys *a) { - tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); + tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); gen_exception(dc, EXCP_SYSCALL); dc->base.is_jmp = DISAS_NORETURN; return true; @@ -1074,7 +1069,7 @@ static bool trans_l_sys(DisasContext *dc, arg_l_sys *a) static bool trans_l_trap(DisasContext *dc, arg_l_trap *a) { - tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); + tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); gen_exception(dc, EXCP_TRAP); dc->base.is_jmp = DISAS_NORETURN; return true; @@ -1108,7 +1103,7 @@ static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a) } static bool do_fp2(DisasContext *dc, arg_da *a, - void (*fn)(TCGv, TCGv_env, TCGv)) + void (*fn)(TCGv_i32, TCGv_env, TCGv_i32)) { if (!check_of32s(dc)) { return false; @@ -1120,7 +1115,7 @@ static bool do_fp2(DisasContext *dc, arg_da *a, } static bool do_fp3(DisasContext *dc, arg_dab *a, - void (*fn)(TCGv, TCGv_env, TCGv, TCGv)) + void (*fn)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32)) { if (!check_of32s(dc)) { return false; @@ -1132,7 +1127,7 @@ static bool do_fp3(DisasContext *dc, arg_dab *a, } static bool do_fpcmp(DisasContext *dc, arg_ab *a, - void (*fn)(TCGv, TCGv_env, TCGv, TCGv), + void (*fn)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32), bool inv, bool swap) { if (!check_of32s(dc)) { @@ -1144,7 +1139,7 @@ static bool do_fpcmp(DisasContext *dc, arg_ab *a, fn(cpu_sr_f, tcg_env, cpu_R(dc, a->a), cpu_R(dc, a->b)); } if (inv) { - tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1); + tcg_gen_xori_i32(cpu_sr_f, cpu_sr_f, 1); } gen_helper_update_fpcsr(tcg_env); return true; @@ -1337,7 +1332,7 @@ static bool do_dp2(DisasContext *dc, arg_da_pair *a, } static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a, - void (*fn)(TCGv, TCGv_env, TCGv_i64, TCGv_i64), + void (*fn)(TCGv_i32, TCGv_env, TCGv_i64, TCGv_i64), bool inv, bool swap) { TCGv_i64 t0, t1; @@ -1359,7 +1354,7 @@ static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a, } if (inv) { - tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1); + tcg_gen_xori_i32(cpu_sr_f, cpu_sr_f, 1); } gen_helper_update_fpcsr(tcg_env); return true; @@ -1544,7 +1539,7 @@ static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs) /* Allow the TCG optimizer to see that R0 == 0, when it's true, which is the common case. */ - dc->zero = tcg_constant_tl(0); + dc->zero = tcg_constant_i32(0); if (dc->tb_flags & TB_FLAGS_R0_0) { dc->R0 = dc->zero; } else { @@ -1586,7 +1581,7 @@ static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) { DisasContext *dc = container_of(dcbase, DisasContext, base); - target_ulong jmp_dest; + vaddr jmp_dest; /* If we have already exited the TB, nothing following has effect. */ if (dc->base.is_jmp == DISAS_NORETURN) { @@ -1600,32 +1595,32 @@ static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) /* For DISAS_TOO_MANY, jump to the next insn. */ jmp_dest = dc->base.pc_next; - tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4); + tcg_gen_movi_i32(cpu_ppc, jmp_dest - 4); switch (dc->base.is_jmp) { case DISAS_JUMP: jmp_dest = dc->jmp_pc_imm; if (jmp_dest == -1) { /* The jump destination is indirect/computed; use jmp_pc. */ - tcg_gen_mov_tl(cpu_pc, jmp_pc); - tcg_gen_discard_tl(jmp_pc); + tcg_gen_mov_i32(cpu_pc, jmp_pc); + tcg_gen_discard_i32(jmp_pc); tcg_gen_lookup_and_goto_ptr(); break; } /* The jump destination is direct; use jmp_pc_imm. However, we will have stored into jmp_pc as well; we know now that it wasn't needed. */ - tcg_gen_discard_tl(jmp_pc); + tcg_gen_discard_i32(jmp_pc); /* fallthru */ case DISAS_TOO_MANY: if (translator_use_goto_tb(&dc->base, jmp_dest)) { tcg_gen_goto_tb(0); - tcg_gen_movi_tl(cpu_pc, jmp_dest); + tcg_gen_movi_i32(cpu_pc, jmp_dest); tcg_gen_exit_tb(dc->base.tb, 0); break; } - tcg_gen_movi_tl(cpu_pc, jmp_dest); + tcg_gen_movi_i32(cpu_pc, jmp_dest); tcg_gen_lookup_and_goto_ptr(); break; diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c index ea86ea202abea..89ae763c7f6e2 100644 --- a/target/ppc/cpu-models.c +++ b/target/ppc/cpu-models.c @@ -32,17 +32,20 @@ /* PowerPC CPU definitions */ #define POWERPC_DEF_PREFIX(pvr, svr, type) \ glue(glue(glue(glue(pvr, _), svr), _), type) -#define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \ +#define POWERPC_DEF_SVR_DEPR(_name, _desc, _pvr, _svr, _type, _deprecation_note) \ static void \ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_class_init) \ (ObjectClass *oc, const void *data) \ { \ DeviceClass *dc = DEVICE_CLASS(oc); \ + CPUClass *cc = CPU_CLASS(oc); \ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); \ \ pcc->pvr = _pvr; \ pcc->svr = _svr; \ dc->desc = _desc; \ + \ + cc->deprecation_note = _deprecation_note; \ } \ \ static const TypeInfo \ @@ -63,6 +66,13 @@ type_init( \ glue(POWERPC_DEF_PREFIX(_pvr, _svr, _type), _cpu_register_types)) +#define POWERPC_DEF_SVR(_name, _desc, _pvr, _svr, _type) \ + POWERPC_DEF_SVR_DEPR(_name, _desc, _pvr, _svr, _type, NULL) + +#define POWERPC_DEPRECATED_CPU(_name, _pvr, _type, _desc, _deprecation_note)\ + POWERPC_DEF_SVR_DEPR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type, \ + _deprecation_note) + #define POWERPC_DEF(_name, _pvr, _type, _desc) \ POWERPC_DEF_SVR(_name, _desc, _pvr, POWERPC_SVR_NONE, _type) @@ -116,6 +126,13 @@ NULL) POWERPC_DEF("x2vp20", CPU_POWERPC_X2VP20, 405, NULL) + /* PPE42 Embedded Controllers */ + POWERPC_DEF("PPE42", CPU_POWERPC_PPE42, ppe42, + "Generic PPE 42") + POWERPC_DEF("PPE42X", CPU_POWERPC_PPE42X, ppe42x, + "Generic PPE 42X") + POWERPC_DEF("PPE42XM", CPU_POWERPC_PPE42XM, ppe42xm, + "Generic PPE 42XM") /* PowerPC 440 family */ #if defined(TODO_USER_ONLY) POWERPC_DEF("440", CPU_POWERPC_440, 440GP, @@ -722,12 +739,12 @@ "POWER7 v2.3") POWERPC_DEF("power7p_v2.1", CPU_POWERPC_POWER7P_v21, POWER7, "POWER7+ v2.1") - POWERPC_DEF("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8, - "POWER8E v2.1") + POWERPC_DEPRECATED_CPU("power8e_v2.1", CPU_POWERPC_POWER8E_v21, POWER8, + "POWER8E v2.1", "CPU is unmaintained.") POWERPC_DEF("power8_v2.0", CPU_POWERPC_POWER8_v20, POWER8, "POWER8 v2.0") - POWERPC_DEF("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8, - "POWER8NVL v1.0") + POWERPC_DEPRECATED_CPU("power8nvl_v1.0", CPU_POWERPC_POWER8NVL_v10, POWER8, + "POWER8NVL v1.0", "CPU is unmaintained.") POWERPC_DEF("power9_v2.0", CPU_POWERPC_POWER9_DD20, POWER9, "POWER9 v2.0") POWERPC_DEF("power9_v2.2", CPU_POWERPC_POWER9_DD22, POWER9, diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h index 72ad31ba50d73..c6cd27f390e7b 100644 --- a/target/ppc/cpu-models.h +++ b/target/ppc/cpu-models.h @@ -69,6 +69,10 @@ enum { /* Xilinx cores */ CPU_POWERPC_X2VP4 = 0x20010820, CPU_POWERPC_X2VP20 = 0x20010860, + /* IBM PPE42 Family */ + CPU_POWERPC_PPE42 = 0x42000000, + CPU_POWERPC_PPE42X = 0x42100000, + CPU_POWERPC_PPE42XM = 0x42200000, /* PowerPC 440 family */ /* Generic PowerPC 440 */ #define CPU_POWERPC_440 CPU_POWERPC_440GXf diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 6b90543811f05..787020f6f90cb 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -220,6 +220,8 @@ typedef enum powerpc_excp_t { POWERPC_EXCP_POWER10, /* POWER11 exception model */ POWERPC_EXCP_POWER11, + /* PPE42 exception model */ + POWERPC_EXCP_PPE42, } powerpc_excp_t; /*****************************************************************************/ @@ -282,6 +284,8 @@ typedef enum powerpc_input_t { PPC_FLAGS_INPUT_POWER9, /* Freescale RCPU bus */ PPC_FLAGS_INPUT_RCPU, + /* PPE42 bus */ + PPC_FLAGS_INPUT_PPE42, } powerpc_input_t; #define PPC_INPUT(env) ((env)->bus_model) @@ -433,39 +437,64 @@ typedef enum { #define MSR_TM PPC_BIT_NR(31) /* Transactional Memory Available (Book3s) */ #define MSR_CM PPC_BIT_NR(32) /* Computation mode for BookE hflags */ #define MSR_ICM PPC_BIT_NR(33) /* Interrupt computation mode for BookE */ +#define MSR_SEM0 PPC_BIT_NR(33) /* SIB Error Mask Bit 0 (PPE42) */ +#define MSR_SEM1 PPC_BIT_NR(34) /* SIB Error Mask Bit 1 (PPE42) */ +#define MSR_SEM2 PPC_BIT_NR(35) /* SIB Error Mask Bit 2 (PPE42) */ #define MSR_GS PPC_BIT_NR(35) /* guest state for BookE */ +#define MSR_SEM3 PPC_BIT_NR(36) /* SIB Error Mask Bit 3 (PPE42) */ +#define MSR_SEM4 PPC_BIT_NR(37) /* SIB Error Mask Bit 4 (PPE42) */ #define MSR_UCLE PPC_BIT_NR(37) /* User-mode cache lock enable for BookE */ #define MSR_VR PPC_BIT_NR(38) /* altivec available x hflags */ #define MSR_SPE PPC_BIT_NR(38) /* SPE enable for BookE x hflags */ +#define MSR_SEM5 PPC_BIT_NR(38) /* SIB Error Mask Bit 5 (PPE42) */ +#define MSR_SEM6 PPC_BIT_NR(39) /* SIB Error Mask Bit 6 (PPE42) */ #define MSR_VSX PPC_BIT_NR(40) /* Vector Scalar Extension (>= 2.06)x hflags */ +#define MSR_IS0 PPC_BIT_NR(40) /* Instance Specific Bit 0 (PPE42) */ #define MSR_S PPC_BIT_NR(41) /* Secure state */ +#define MSR_SIBRC0 PPC_BIT_NR(41) /* Last SIB return code Bit 0 (PPE42) */ +#define MSR_SIBRC1 PPC_BIT_NR(42) /* Last SIB return code Bit 1 (PPE42) */ +#define MSR_SIBRC2 PPC_BIT_NR(43) /* Last SIB return code Bit 2 (PPE42) */ +#define MSR_LP PPC_BIT_NR(44) /* Low Priority (PPE42) */ #define MSR_KEY PPC_BIT_NR(44) /* key bit on 603e */ #define MSR_POW PPC_BIT_NR(45) /* Power management */ #define MSR_WE PPC_BIT_NR(45) /* Wait State Enable on 405 */ +#define MSR_IS1 PPC_BIT_NR(46) /* Instance Specific Bit 1 (PPE42) */ #define MSR_TGPR PPC_BIT_NR(46) /* TGPR usage on 602/603 x */ #define MSR_CE PPC_BIT_NR(46) /* Critical int. enable on embedded PPC x */ #define MSR_ILE PPC_BIT_NR(47) /* Interrupt little-endian mode */ +#define MSR_UIE PPC_BIT_NR(47) /* Unmaskable Interrupt Enable (PPE42) */ #define MSR_EE PPC_BIT_NR(48) /* External interrupt enable */ #define MSR_PR PPC_BIT_NR(49) /* Problem state hflags */ #define MSR_FP PPC_BIT_NR(50) /* Floating point available hflags */ #define MSR_ME PPC_BIT_NR(51) /* Machine check interrupt enable */ #define MSR_FE0 PPC_BIT_NR(52) /* Floating point exception mode 0 */ +#define MSR_IS2 PPC_BIT_NR(52) /* Instance Specific Bit 2 (PPE42) */ +#define MSR_IS3 PPC_BIT_NR(53) /* Instance Specific Bit 3 (PPE42) */ #define MSR_SE PPC_BIT_NR(53) /* Single-step trace enable x hflags */ #define MSR_DWE PPC_BIT_NR(53) /* Debug wait enable on 405 x */ #define MSR_UBLE PPC_BIT_NR(53) /* User BTB lock enable on e500 x */ #define MSR_BE PPC_BIT_NR(54) /* Branch trace enable x hflags */ #define MSR_DE PPC_BIT_NR(54) /* Debug int. enable on embedded PPC x */ #define MSR_FE1 PPC_BIT_NR(55) /* Floating point exception mode 1 */ +#define MSR_IPE PPC_BIT_NR(55) /* Imprecise Mode Enable (PPE42) */ #define MSR_AL PPC_BIT_NR(56) /* AL bit on POWER */ +#define MSR_SIBRCA0 PPC_BIT_NR(56) /* SIB Return Code Accumulator 0 (PPE42) */ +#define MSR_SIBRCA1 PPC_BIT_NR(57) /* SIB Return Code Accumulator 1 (PPE42) */ #define MSR_EP PPC_BIT_NR(57) /* Exception prefix on 601 */ #define MSR_IR PPC_BIT_NR(58) /* Instruction relocate */ #define MSR_IS PPC_BIT_NR(58) /* Instruction address space (BookE) */ +#define MSR_SIBRCA2 PPC_BIT_NR(58) /* SIB Return Code Accumulator 2 (PPE42) */ +#define MSR_SIBRCA3 PPC_BIT_NR(59) /* SIB Return Code Accumulator 3 (PPE42) */ #define MSR_DR PPC_BIT_NR(59) /* Data relocate */ #define MSR_DS PPC_BIT_NR(59) /* Data address space (BookE) */ #define MSR_PE PPC_BIT_NR(60) /* Protection enable on 403 */ +#define MSR_SIBRCA4 PPC_BIT_NR(60) /* SIB Return Code Accumulator 4 (PPE42) */ +#define MSR_SIBRCA5 PPC_BIT_NR(61) /* SIB Return Code Accumulator 5 (PPE42) */ #define MSR_PX PPC_BIT_NR(61) /* Protection exclusive on 403 x */ #define MSR_PMM PPC_BIT_NR(61) /* Performance monitor mark on POWER x */ #define MSR_RI PPC_BIT_NR(62) /* Recoverable interrupt 1 */ +#define MSR_SIBRCA6 PPC_BIT_NR(62) /* SIB Return Code Accumulator 6 (PPE42) */ +#define MSR_SIBRCA7 PPC_BIT_NR(63) /* SIB Return Code Accumulator 7 (PPE42) */ #define MSR_LE PPC_BIT_NR(63) /* Little-endian mode 1 hflags */ FIELD(MSR, SF, MSR_SF, 1) @@ -517,6 +546,9 @@ FIELD(MSR, PX, MSR_PX, 1) FIELD(MSR, PMM, MSR_PMM, 1) FIELD(MSR, RI, MSR_RI, 1) FIELD(MSR, LE, MSR_LE, 1) +FIELD(MSR, SEM, MSR_SEM6, 7) +FIELD(MSR, SIBRC, MSR_SIBRC2, 3) +FIELD(MSR, SIBRCA, MSR_SIBRCA7, 8) /* * FE0 and FE1 bits are not side-by-side @@ -730,6 +762,31 @@ FIELD(MSR, LE, MSR_LE, 1) #define ESR_VLEMI PPC_BIT(58) /* VLE operation */ #define ESR_MIF PPC_BIT(62) /* Misaligned instruction (VLE) */ +/* PPE42 Interrupt Status Register bits */ +#define PPE42_ISR_SRSMS0 PPC_BIT_NR(48) /* Sys Reset State Machine State 0 */ +#define PPE42_ISR_SRSMS1 PPC_BIT_NR(49) /* Sys Reset State Machine State 1 */ +#define PPE42_ISR_SRSMS2 PPC_BIT_NR(50) /* Sys Reset State Machine State 2 */ +#define PPE42_ISR_SRSMS3 PPC_BIT_NR(51) /* Sys Reset State Machine State 3 */ +#define PPE42_ISR_EP PPC_BIT_NR(53) /* MSR[EE] Maskable Event Pending */ +#define PPE42_ISR_PTR PPC_BIT_NR(56) /* Program Interrupt from trap */ +#define PPE42_ISR_ST PPC_BIT_NR(57) /* Data Interrupt caused by store */ +#define PPE42_ISR_MFE PPC_BIT_NR(60) /* Multiple Fault Error */ +#define PPE42_ISR_MCS0 PPC_BIT_NR(61) /* Machine Check Status bit0 */ +#define PPE42_ISR_MCS1 PPC_BIT_NR(62) /* Machine Check Status bit1 */ +#define PPE42_ISR_MCS2 PPC_BIT_NR(63) /* Machine Check Status bit2 */ +FIELD(PPE42_ISR, SRSMS, PPE42_ISR_SRSMS3, 4) +FIELD(PPE42_ISR, MCS, PPE42_ISR_MCS2, 3) + +/* PPE42 Machine Check Status field values */ +#define PPE42_ISR_MCS_INSTRUCTION 0 +#define PPE42_ISR_MCS_DATA_LOAD 1 +#define PPE42_ISR_MCS_DATA_PRECISE_STORE 2 +#define PPE42_ISR_MCS_DATA_IMPRECISE_STORE 3 +#define PPE42_ISR_MCS_PROGRAM 4 +#define PPE42_ISR_MCS_ISI 5 +#define PPE42_ISR_MCS_ALIGNMENT 6 +#define PPE42_ISR_MCS_DSI 7 + /* Transaction EXception And Summary Register bits */ #define TEXASR_FAILURE_PERSISTENT (63 - 7) #define TEXASR_DISALLOWED (63 - 8) @@ -785,6 +842,8 @@ enum { POWERPC_FLAG_SMT_1LPAR = 0x00800000, /* Has BHRB */ POWERPC_FLAG_BHRB = 0x01000000, + /* Use PPE42-specific behavior */ + POWERPC_FLAG_PPE42 = 0x02000000, }; /* @@ -1522,6 +1581,10 @@ struct PowerPCCPUClass { void (*init_proc)(CPUPPCState *env); int (*check_pow)(CPUPPCState *env); int (*check_attn)(CPUPPCState *env); + + /* Handlers to be set by the machine initialising the chips */ + uint64_t (*load_sprd)(CPUPPCState *env); + void (*store_sprd)(CPUPPCState *env, uint64_t val); }; static inline bool ppc_cpu_core_single_threaded(CPUState *cs) @@ -1750,9 +1813,12 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_BOOKE_CSRR0 (0x03A) #define SPR_BOOKE_CSRR1 (0x03B) #define SPR_BOOKE_DEAR (0x03D) +#define SPR_PPE42_EDR (0x03D) #define SPR_IAMR (0x03D) #define SPR_BOOKE_ESR (0x03E) +#define SPR_PPE42_ISR (0x03E) #define SPR_BOOKE_IVPR (0x03F) +#define SPR_PPE42_IVPR (0x03F) #define SPR_MPC_EIE (0x050) #define SPR_MPC_EID (0x051) #define SPR_MPC_NRI (0x052) @@ -1818,6 +1884,7 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_TBU40 (0x11E) #define SPR_SVR (0x11E) #define SPR_BOOKE_PIR (0x11E) +#define SPR_PPE42_PIR (0x11E) #define SPR_PVR (0x11F) #define SPR_HSPRG0 (0x130) #define SPR_BOOKE_DBSR (0x130) @@ -1827,6 +1894,7 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_BOOKE_EPCR (0x133) #define SPR_SPURR (0x134) #define SPR_BOOKE_DBCR0 (0x134) +#define SPR_PPE42_DBCR (0x134) #define SPR_IBCR (0x135) #define SPR_PURR (0x135) #define SPR_BOOKE_DBCR1 (0x135) @@ -1844,6 +1912,7 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_HSRR1 (0x13B) #define SPR_BOOKE_IAC4 (0x13B) #define SPR_BOOKE_DAC1 (0x13C) +#define SPR_PPE42_DACR (0x13C) #define SPR_MMCRH (0x13C) #define SPR_DABR2 (0x13D) #define SPR_BOOKE_DAC2 (0x13D) @@ -1853,12 +1922,14 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_BOOKE_DVC2 (0x13F) #define SPR_LPIDR (0x13F) #define SPR_BOOKE_TSR (0x150) +#define SPR_PPE42_TSR (0x150) #define SPR_HMER (0x150) #define SPR_HMEER (0x151) #define SPR_PCR (0x152) #define SPR_HEIR (0x153) #define SPR_BOOKE_LPIDR (0x152) #define SPR_BOOKE_TCR (0x154) +#define SPR_PPE42_TCR (0x154) #define SPR_BOOKE_TLB0PS (0x158) #define SPR_BOOKE_TLB1PS (0x159) #define SPR_BOOKE_TLB2PS (0x15A) @@ -2528,6 +2599,12 @@ enum { PPC2_MEM_LWSYNC = 0x0000000000200000ULL, /* ISA 2.06 BCD assist instructions */ PPC2_BCDA_ISA206 = 0x0000000000400000ULL, + /* PPE42 instructions */ + PPC2_PPE42 = 0x0000000000800000ULL, + /* PPE42X instructions */ + PPC2_PPE42X = 0x0000000001000000ULL, + /* PPE42XM instructions */ + PPC2_PPE42XM = 0x0000000002000000ULL, #define PPC_TCG_INSNS2 (PPC2_BOOKE206 | PPC2_VSX | PPC2_PRCNTL | PPC2_DBRX | \ PPC2_ISA205 | PPC2_VSX207 | PPC2_PERM_ISA206 | \ @@ -2537,7 +2614,8 @@ enum { PPC2_ALTIVEC_207 | PPC2_ISA207S | PPC2_DFP | \ PPC2_FP_CVT_S64 | PPC2_TM | PPC2_PM_ISA206 | \ PPC2_ISA300 | PPC2_ISA310 | PPC2_MEM_LWSYNC | \ - PPC2_BCDA_ISA206) + PPC2_BCDA_ISA206 | PPC2_PPE42 | PPC2_PPE42X | \ + PPC2_PPE42XM) }; /*****************************************************************************/ diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index a0e77f2673e56..3aa3aefc1366e 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -1653,6 +1653,47 @@ static void register_8xx_sprs(CPUPPCState *env) * ... and more (thermal management, performance counters, ...) */ +static void register_ppe42_sprs(CPUPPCState *env) +{ + spr_register(env, SPR_PPE42_EDR, "EDR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_PPE42_ISR, "ISR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + spr_register(env, SPR_PPE42_IVPR, "IVPR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + 0xfff80000); + spr_register(env, SPR_PPE42_PIR, "PIR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_pir, + 0x00000000); + spr_register(env, SPR_PPE42_DBCR, "DBCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_40x_dbcr0, + 0x00000000); + spr_register(env, SPR_PPE42_DACR, "DACR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + 0x00000000); + /* Timer */ + spr_register(env, SPR_DECR, "DECR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_decr, &spr_write_decr, + 0x00000000); + spr_register(env, SPR_PPE42_TSR, "TSR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke_tsr, + 0x00000000); + spr_register(env, SPR_BOOKE_TCR, "TCR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_booke_tcr, + 0x00000000); +} + /*****************************************************************************/ /* Exception vectors models */ static void init_excp_4xx(CPUPPCState *env) @@ -1679,6 +1720,30 @@ static void init_excp_4xx(CPUPPCState *env) #endif } +static void init_excp_ppe42(CPUPPCState *env) +{ +#if !defined(CONFIG_USER_ONLY) + /* Machine Check vector changed after version 0 */ + if (((env->spr[SPR_PVR] & 0xf00000ul) >> 20) == 0) { + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000000; + } else { + env->excp_vectors[POWERPC_EXCP_MCHECK] = 0x00000020; + } + env->excp_vectors[POWERPC_EXCP_RESET] = 0x00000040; + env->excp_vectors[POWERPC_EXCP_DSI] = 0x00000060; + env->excp_vectors[POWERPC_EXCP_ISI] = 0x00000080; + env->excp_vectors[POWERPC_EXCP_EXTERNAL] = 0x000000A0; + env->excp_vectors[POWERPC_EXCP_ALIGN] = 0x000000C0; + env->excp_vectors[POWERPC_EXCP_PROGRAM] = 0x000000E0; + env->excp_vectors[POWERPC_EXCP_DECR] = 0x00000100; + env->excp_vectors[POWERPC_EXCP_FIT] = 0x00000120; + env->excp_vectors[POWERPC_EXCP_WDT] = 0x00000140; + env->ivpr_mask = 0xFFFFFE00UL; + /* Hardware reset vector */ + env->hreset_vector = 0x00000040UL; +#endif +} + static void init_excp_MPC5xx(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) @@ -2200,6 +2265,80 @@ POWERPC_FAMILY(405)(ObjectClass *oc, const void *data) POWERPC_FLAG_DE | POWERPC_FLAG_BUS_CLK; } +static void init_proc_ppe42(CPUPPCState *env) +{ + register_ppe42_sprs(env); + + init_excp_ppe42(env); + env->dcache_line_size = 32; + env->icache_line_size = 32; + /* Allocate hardware IRQ controller */ + ppc40x_irq_init(env_archcpu(env)); + + SET_FIT_PERIOD(8, 12, 16, 20); + SET_WDT_PERIOD(16, 20, 24, 28); +} + +static void ppe42_class_common_init(PowerPCCPUClass *pcc) +{ + pcc->init_proc = init_proc_ppe42; + pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_none; + pcc->insns_flags = PPC_INSNS_BASE | + PPC_WRTEE | + PPC_CACHE | + PPC_CACHE_DCBZ | + PPC_MEM_SYNC; + pcc->msr_mask = R_MSR_SEM_MASK | + (1ull << MSR_IS0) | + R_MSR_SIBRC_MASK | + (1ull << MSR_LP) | + (1ull << MSR_WE) | + (1ull << MSR_IS1) | + (1ull << MSR_UIE) | + (1ull << MSR_EE) | + (1ull << MSR_ME) | + (1ull << MSR_IS2) | + (1ull << MSR_IS3) | + (1ull << MSR_IPE) | + R_MSR_SIBRCA_MASK; + pcc->mmu_model = POWERPC_MMU_REAL; + pcc->excp_model = POWERPC_EXCP_PPE42; + pcc->bus_model = PPC_FLAGS_INPUT_PPE42; + pcc->bfd_mach = bfd_mach_ppc_403; + pcc->flags = POWERPC_FLAG_PPE42 | POWERPC_FLAG_BUS_CLK; +} + +POWERPC_FAMILY(ppe42)(ObjectClass *oc, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + + dc->desc = "PPE 42"; + pcc->insns_flags2 = PPC2_PPE42; + ppe42_class_common_init(pcc); +} + +POWERPC_FAMILY(ppe42x)(ObjectClass *oc, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + + dc->desc = "PPE 42X"; + pcc->insns_flags2 = PPC2_PPE42 | PPC2_PPE42X; + ppe42_class_common_init(pcc); +} + +POWERPC_FAMILY(ppe42xm)(ObjectClass *oc, const void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + + dc->desc = "PPE 42XM"; + pcc->insns_flags2 = PPC2_PPE42 | PPC2_PPE42X | PPC2_PPE42XM; + ppe42_class_common_init(pcc); +} + static void init_proc_440EP(CPUPPCState *env) { register_BookE_sprs(env, 0x000000000000FFFFULL); @@ -6802,53 +6941,64 @@ static void init_ppc_proc(PowerPCCPU *cpu) /* MSR bits & flags consistency checks */ if (env->msr_mask & (1 << 25)) { - switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) { + switch (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE | + POWERPC_FLAG_PPE42)) { case POWERPC_FLAG_SPE: case POWERPC_FLAG_VRE: + case POWERPC_FLAG_PPE42: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" - "Should define POWERPC_FLAG_SPE or POWERPC_FLAG_VRE\n"); + "Should define POWERPC_FLAG_SPE or POWERPC_FLAG_VRE\n" + "or POWERPC_FLAG_PPE42\n"); exit(1); } - } else if (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE)) { + } else if (env->flags & (POWERPC_FLAG_SPE | POWERPC_FLAG_VRE | + POWERPC_FLAG_PPE42)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" - "Should not define POWERPC_FLAG_SPE nor POWERPC_FLAG_VRE\n"); + "Should not define POWERPC_FLAG_SPE nor POWERPC_FLAG_VRE\n" + "nor POWERPC_FLAG_PPE42\n"); exit(1); } if (env->msr_mask & (1 << 17)) { - switch (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) { + switch (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE | + POWERPC_FLAG_PPE42)) { case POWERPC_FLAG_TGPR: case POWERPC_FLAG_CE: + case POWERPC_FLAG_PPE42: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" - "Should define POWERPC_FLAG_TGPR or POWERPC_FLAG_CE\n"); + "Should define POWERPC_FLAG_TGPR or POWERPC_FLAG_CE\n" + "or POWERPC_FLAG_PPE42\n"); exit(1); } - } else if (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE)) { + } else if (env->flags & (POWERPC_FLAG_TGPR | POWERPC_FLAG_CE | + POWERPC_FLAG_PPE42)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" - "Should not define POWERPC_FLAG_TGPR nor POWERPC_FLAG_CE\n"); + "Should not define POWERPC_FLAG_TGPR nor POWERPC_FLAG_CE\n" + "nor POWERPC_FLAG_PPE42\n"); exit(1); } if (env->msr_mask & (1 << 10)) { switch (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE | - POWERPC_FLAG_UBLE)) { + POWERPC_FLAG_UBLE | POWERPC_FLAG_PPE42)) { case POWERPC_FLAG_SE: case POWERPC_FLAG_DWE: case POWERPC_FLAG_UBLE: + case POWERPC_FLAG_PPE42: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should define POWERPC_FLAG_SE or POWERPC_FLAG_DWE or " - "POWERPC_FLAG_UBLE\n"); + "POWERPC_FLAG_UBLE or POWERPC_FLAG_PPE42\n"); exit(1); } } else if (env->flags & (POWERPC_FLAG_SE | POWERPC_FLAG_DWE | - POWERPC_FLAG_UBLE)) { + POWERPC_FLAG_UBLE | POWERPC_FLAG_PPE42)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" "Should not define POWERPC_FLAG_SE nor POWERPC_FLAG_DWE nor " - "POWERPC_FLAG_UBLE\n"); + "POWERPC_FLAG_UBLE nor POWERPC_FLAG_PPE42\n"); exit(1); } if (env->msr_mask & (1 << 9)) { @@ -6867,18 +7017,23 @@ static void init_ppc_proc(PowerPCCPU *cpu) exit(1); } if (env->msr_mask & (1 << 2)) { - switch (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) { + switch (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM | + POWERPC_FLAG_PPE42)) { case POWERPC_FLAG_PX: case POWERPC_FLAG_PMM: + case POWERPC_FLAG_PPE42: break; default: fprintf(stderr, "PowerPC MSR definition inconsistency\n" - "Should define POWERPC_FLAG_PX or POWERPC_FLAG_PMM\n"); + "Should define POWERPC_FLAG_PX or POWERPC_FLAG_PMM\n" + "or POWERPC_FLAG_PPE42\n"); exit(1); } - } else if (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM)) { + } else if (env->flags & (POWERPC_FLAG_PX | POWERPC_FLAG_PMM | + POWERPC_FLAG_PPE42)) { fprintf(stderr, "PowerPC MSR definition inconsistency\n" - "Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n"); + "Should not define POWERPC_FLAG_PX nor POWERPC_FLAG_PMM\n" + "nor POWERPC_FLAG_PPE42\n"); exit(1); } if ((env->flags & POWERPC_FLAG_BUS_CLK) == 0) { @@ -7143,6 +7298,7 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data) { ObjectClass *oc = data; PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); DeviceClass *family = DEVICE_CLASS(ppc_cpu_get_family_class(pcc)); const char *typename = object_class_get_name(oc); char *name; @@ -7153,7 +7309,11 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data) } name = cpu_model_from_type(typename); - qemu_printf(" %-16s PVR %08x\n", name, pcc->pvr); + if (cc->deprecation_note) { + qemu_printf(" %-16s PVR %08x (deprecated)\n", name, pcc->pvr); + } else { + qemu_printf(" %-16s PVR %08x\n", name, pcc->pvr); + } for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) { PowerPCCPUAlias *alias = &ppc_cpu_aliases[i]; ObjectClass *alias_oc = ppc_cpu_class_by_name(alias->model); @@ -7225,7 +7385,7 @@ static int ppc_cpu_mmu_index(CPUState *cs, bool ifetch) #ifndef CONFIG_USER_ONLY static bool ppc_cpu_has_work(CPUState *cs) { - return cs->interrupt_request & CPU_INTERRUPT_HARD; + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD); } #endif /* !CONFIG_USER_ONLY */ @@ -7243,39 +7403,40 @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type) } msr = (target_ulong)0; - msr |= (target_ulong)MSR_HVB; - msr |= (target_ulong)1 << MSR_EP; + if (!(env->flags & POWERPC_FLAG_PPE42)) { + msr |= (target_ulong)MSR_HVB; + msr |= (target_ulong)1 << MSR_EP; #if defined(DO_SINGLE_STEP) && 0 - /* Single step trace mode */ - msr |= (target_ulong)1 << MSR_SE; - msr |= (target_ulong)1 << MSR_BE; + /* Single step trace mode */ + msr |= (target_ulong)1 << MSR_SE; + msr |= (target_ulong)1 << MSR_BE; #endif #if defined(CONFIG_USER_ONLY) - msr |= (target_ulong)1 << MSR_FP; /* Allow floating point usage */ - msr |= (target_ulong)1 << MSR_FE0; /* Allow floating point exceptions */ - msr |= (target_ulong)1 << MSR_FE1; - msr |= (target_ulong)1 << MSR_VR; /* Allow altivec usage */ - msr |= (target_ulong)1 << MSR_VSX; /* Allow VSX usage */ - msr |= (target_ulong)1 << MSR_SPE; /* Allow SPE usage */ - msr |= (target_ulong)1 << MSR_PR; + msr |= (target_ulong)1 << MSR_FP; /* Allow floating point usage */ + msr |= (target_ulong)1 << MSR_FE0; /* Allow floating point exceptions */ + msr |= (target_ulong)1 << MSR_FE1; + msr |= (target_ulong)1 << MSR_VR; /* Allow altivec usage */ + msr |= (target_ulong)1 << MSR_VSX; /* Allow VSX usage */ + msr |= (target_ulong)1 << MSR_SPE; /* Allow SPE usage */ + msr |= (target_ulong)1 << MSR_PR; #if defined(TARGET_PPC64) - msr |= (target_ulong)1 << MSR_TM; /* Transactional memory */ + msr |= (target_ulong)1 << MSR_TM; /* Transactional memory */ #endif #if !TARGET_BIG_ENDIAN - msr |= (target_ulong)1 << MSR_LE; /* Little-endian user mode */ - if (!((env->msr_mask >> MSR_LE) & 1)) { - fprintf(stderr, "Selected CPU does not support little-endian.\n"); - exit(1); - } + msr |= (target_ulong)1 << MSR_LE; /* Little-endian user mode */ + if (!((env->msr_mask >> MSR_LE) & 1)) { + fprintf(stderr, "Selected CPU does not support little-endian.\n"); + exit(1); + } #endif #endif #if defined(TARGET_PPC64) - if (mmu_is_64bit(env->mmu_model)) { - msr |= (1ULL << MSR_SF); - } + if (mmu_is_64bit(env->mmu_model)) { + msr |= (1ULL << MSR_SF); + } #endif - + } hreg_store_msr(env, msr, 1); #if !defined(CONFIG_USER_ONLY) @@ -7725,6 +7886,18 @@ void ppc_cpu_dump_state(CPUState *cs, FILE *f, int flags) * they can be read with "p $ivor0", "p $ivor1", etc. */ break; + case POWERPC_EXCP_PPE42: + qemu_fprintf(f, "SRR0 " TARGET_FMT_lx " SRR1 " TARGET_FMT_lx "\n", + env->spr[SPR_SRR0], env->spr[SPR_SRR1]); + + qemu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx + " ISR " TARGET_FMT_lx " EDR " TARGET_FMT_lx "\n", + env->spr[SPR_PPE42_TCR], env->spr[SPR_PPE42_TSR], + env->spr[SPR_PPE42_ISR], env->spr[SPR_PPE42_EDR]); + + qemu_fprintf(f, " PIR " TARGET_FMT_lx " IVPR " TARGET_FMT_lx "\n", + env->spr[SPR_PPE42_PIR], env->spr[SPR_PPE42_IVPR]); + break; case POWERPC_EXCP_40x: qemu_fprintf(f, " TCR " TARGET_FMT_lx " TSR " TARGET_FMT_lx " ESR " TARGET_FMT_lx " DEAR " TARGET_FMT_lx "\n", diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 1efdc4066ebb9..d8bca19fff516 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -949,6 +949,125 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) powerpc_set_excp_state(cpu, vector, new_msr); } +static void powerpc_excp_ppe42(PowerPCCPU *cpu, int excp) +{ + CPUPPCState *env = &cpu->env; + target_ulong msr, new_msr, vector; + target_ulong mcs = PPE42_ISR_MCS_INSTRUCTION; + bool promote_unmaskable; + + msr = env->msr; + + /* + * New interrupt handler msr preserves SIBRC and ME unless explicitly + * overridden by the exception. All other MSR bits are zeroed out. + */ + new_msr = env->msr & (((target_ulong)1 << MSR_ME) | R_MSR_SIBRC_MASK); + + /* HV emu assistance interrupt only exists on server arch 2.05 or later */ + if (excp == POWERPC_EXCP_HV_EMU) { + excp = POWERPC_EXCP_PROGRAM; + } + + /* + * Unmaskable interrupts (Program, ISI, Alignment and DSI) are promoted to + * machine check if MSR_UIE is 0. + */ + promote_unmaskable = !(msr & ((target_ulong)1 << MSR_UIE)); + + + switch (excp) { + case POWERPC_EXCP_MCHECK: /* Machine check exception */ + break; + case POWERPC_EXCP_DSI: /* Data storage exception */ + trace_ppc_excp_dsi(env->spr[SPR_PPE42_ISR], env->spr[SPR_PPE42_EDR]); + if (promote_unmaskable) { + excp = POWERPC_EXCP_MCHECK; + mcs = PPE42_ISR_MCS_DSI; + } + break; + case POWERPC_EXCP_ISI: /* Instruction storage exception */ + trace_ppc_excp_isi(msr, env->nip); + if (promote_unmaskable) { + excp = POWERPC_EXCP_MCHECK; + mcs = PPE42_ISR_MCS_ISI; + } + break; + case POWERPC_EXCP_EXTERNAL: /* External input */ + break; + case POWERPC_EXCP_ALIGN: /* Alignment exception */ + if (promote_unmaskable) { + excp = POWERPC_EXCP_MCHECK; + mcs = PPE42_ISR_MCS_ALIGNMENT; + } + break; + case POWERPC_EXCP_PROGRAM: /* Program exception */ + if (promote_unmaskable) { + excp = POWERPC_EXCP_MCHECK; + mcs = PPE42_ISR_MCS_PROGRAM; + } + switch (env->error_code & ~0xF) { + case POWERPC_EXCP_INVAL: + trace_ppc_excp_inval(env->nip); + env->spr[SPR_PPE42_ISR] &= ~((target_ulong)1 << PPE42_ISR_PTR); + break; + case POWERPC_EXCP_TRAP: + env->spr[SPR_PPE42_ISR] |= ((target_ulong)1 << PPE42_ISR_PTR); + break; + default: + /* Should never occur */ + cpu_abort(env_cpu(env), "Invalid program exception %d. Aborting\n", + env->error_code); + break; + } +#ifdef CONFIG_TCG + env->spr[SPR_PPE42_EDR] = ppc_ldl_code(env, env->nip); +#endif + break; + case POWERPC_EXCP_DECR: /* Decrementer exception */ + break; + case POWERPC_EXCP_FIT: /* Fixed-interval timer interrupt */ + trace_ppc_excp_print("FIT"); + break; + case POWERPC_EXCP_WDT: /* Watchdog timer interrupt */ + trace_ppc_excp_print("WDT"); + break; + case POWERPC_EXCP_RESET: /* System reset exception */ + /* reset exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + break; + default: + cpu_abort(env_cpu(env), "Invalid PPE42 exception %d. Aborting\n", + excp); + break; + } + + env->spr[SPR_SRR0] = env->nip; + env->spr[SPR_SRR1] = msr; + + vector = env->excp_vectors[excp]; + if (vector == (target_ulong)-1ULL) { + cpu_abort(env_cpu(env), + "Raised an exception without defined vector %d\n", excp); + } + vector |= env->spr[SPR_PPE42_IVPR]; + + if (excp == POWERPC_EXCP_MCHECK) { + /* Also set the Machine Check Status (MCS) */ + env->spr[SPR_PPE42_ISR] &= ~R_PPE42_ISR_MCS_MASK; + env->spr[SPR_PPE42_ISR] |= (mcs & R_PPE42_ISR_MCS_MASK); + env->spr[SPR_PPE42_ISR] &= ~((target_ulong)1 << PPE42_ISR_MFE); + + /* Machine checks halt execution if MSR_ME is 0 */ + powerpc_mcheck_checkstop(env); + + /* machine check exceptions don't have ME set */ + new_msr &= ~((target_ulong)1 << MSR_ME); + } + + powerpc_set_excp_state(cpu, vector, new_msr); +} + static void powerpc_excp_booke(PowerPCCPU *cpu, int excp) { CPUPPCState *env = &cpu->env; @@ -1589,6 +1708,9 @@ void powerpc_excp(PowerPCCPU *cpu, int excp) case POWERPC_EXCP_POWER11: powerpc_excp_books(cpu, excp); break; + case POWERPC_EXCP_PPE42: + powerpc_excp_ppe42(cpu, excp); + break; default: g_assert_not_reached(); } @@ -1945,6 +2067,43 @@ static int p9_next_unmasked_interrupt(CPUPPCState *env, } #endif /* TARGET_PPC64 */ +static int ppe42_next_unmasked_interrupt(CPUPPCState *env) +{ + bool async_deliver; + + /* External reset */ + if (env->pending_interrupts & PPC_INTERRUPT_RESET) { + return PPC_INTERRUPT_RESET; + } + /* Machine check exception */ + if (env->pending_interrupts & PPC_INTERRUPT_MCK) { + return PPC_INTERRUPT_MCK; + } + + async_deliver = FIELD_EX64(env->msr, MSR, EE); + + if (async_deliver != 0) { + /* Watchdog timer */ + if (env->pending_interrupts & PPC_INTERRUPT_WDT) { + return PPC_INTERRUPT_WDT; + } + /* External Interrupt */ + if (env->pending_interrupts & PPC_INTERRUPT_EXT) { + return PPC_INTERRUPT_EXT; + } + /* Fixed interval timer */ + if (env->pending_interrupts & PPC_INTERRUPT_FIT) { + return PPC_INTERRUPT_FIT; + } + /* Decrementer exception */ + if (env->pending_interrupts & PPC_INTERRUPT_DECR) { + return PPC_INTERRUPT_DECR; + } + } + + return 0; +} + static int ppc_next_unmasked_interrupt(CPUPPCState *env) { uint32_t pending_interrupts = env->pending_interrupts; @@ -1970,6 +2129,10 @@ static int ppc_next_unmasked_interrupt(CPUPPCState *env) } #endif + if (env->excp_model == POWERPC_EXCP_PPE42) { + return ppe42_next_unmasked_interrupt(env); + } + /* External reset */ if (pending_interrupts & PPC_INTERRUPT_RESET) { return PPC_INTERRUPT_RESET; diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c index 07b782f971be3..850aca6ed13e7 100644 --- a/target/ppc/fpu_helper.c +++ b/target/ppc/fpu_helper.c @@ -562,14 +562,14 @@ uint64_t helper_##op(CPUPPCState *env, float64 arg) \ return ret; \ } -FPU_FCTI(fctiw, int32, 0x80000000U) -FPU_FCTI(fctiwz, int32_round_to_zero, 0x80000000U) -FPU_FCTI(fctiwu, uint32, 0x00000000U) -FPU_FCTI(fctiwuz, uint32_round_to_zero, 0x00000000U) -FPU_FCTI(fctid, int64, 0x8000000000000000ULL) -FPU_FCTI(fctidz, int64_round_to_zero, 0x8000000000000000ULL) -FPU_FCTI(fctidu, uint64, 0x0000000000000000ULL) -FPU_FCTI(fctiduz, uint64_round_to_zero, 0x0000000000000000ULL) +FPU_FCTI(FCTIW, int32, 0x80000000U) +FPU_FCTI(FCTIWZ, int32_round_to_zero, 0x80000000U) +FPU_FCTI(FCTIWU, uint32, 0x00000000U) +FPU_FCTI(FCTIWUZ, uint32_round_to_zero, 0x00000000U) +FPU_FCTI(FCTID, int64, 0x8000000000000000ULL) +FPU_FCTI(FCTIDZ, int64_round_to_zero, 0x8000000000000000ULL) +FPU_FCTI(FCTIDU, uint64, 0x0000000000000000ULL) +FPU_FCTI(FCTIDUZ, uint64_round_to_zero, 0x0000000000000000ULL) #define FPU_FCFI(op, cvtr, is_single) \ uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ @@ -586,10 +586,10 @@ uint64_t helper_##op(CPUPPCState *env, uint64_t arg) \ return farg.ll; \ } -FPU_FCFI(fcfid, int64_to_float64, 0) -FPU_FCFI(fcfids, int64_to_float32, 1) -FPU_FCFI(fcfidu, uint64_to_float64, 0) -FPU_FCFI(fcfidus, uint64_to_float32, 1) +FPU_FCFI(FCFID, int64_to_float64, 0) +FPU_FCFI(FCFIDS, int64_to_float32, 1) +FPU_FCFI(FCFIDU, uint64_to_float64, 0) +FPU_FCFI(FCFIDUS, uint64_to_float32, 1) static uint64_t do_fri(CPUPPCState *env, uint64_t arg, FloatRoundMode rounding_mode) @@ -613,22 +613,22 @@ static uint64_t do_fri(CPUPPCState *env, uint64_t arg, return arg; } -uint64_t helper_frin(CPUPPCState *env, uint64_t arg) +uint64_t helper_FRIN(CPUPPCState *env, uint64_t arg) { return do_fri(env, arg, float_round_ties_away); } -uint64_t helper_friz(CPUPPCState *env, uint64_t arg) +uint64_t helper_FRIZ(CPUPPCState *env, uint64_t arg) { return do_fri(env, arg, float_round_to_zero); } -uint64_t helper_frip(CPUPPCState *env, uint64_t arg) +uint64_t helper_FRIP(CPUPPCState *env, uint64_t arg) { return do_fri(env, arg, float_round_up); } -uint64_t helper_frim(CPUPPCState *env, uint64_t arg) +uint64_t helper_FRIM(CPUPPCState *env, uint64_t arg) { return do_fri(env, arg, float_round_down); } @@ -697,7 +697,7 @@ static uint64_t do_frsp(CPUPPCState *env, uint64_t arg, uintptr_t retaddr) return helper_todouble(f32); } -uint64_t helper_frsp(CPUPPCState *env, uint64_t arg) +uint64_t helper_FRSP(CPUPPCState *env, uint64_t arg) { return do_frsp(env, arg, GETPC()); } @@ -871,7 +871,7 @@ uint32_t helper_FTSQRT(uint64_t frb) return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); } -void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2, +void helper_FCMPU(CPUPPCState *env, uint64_t arg1, uint64_t arg2, uint32_t crfD) { CPU_DoubleU farg1, farg2; @@ -902,7 +902,7 @@ void helper_fcmpu(CPUPPCState *env, uint64_t arg1, uint64_t arg2, } } -void helper_fcmpo(CPUPPCState *env, uint64_t arg1, uint64_t arg2, +void helper_FCMPO(CPUPPCState *env, uint64_t arg1, uint64_t arg2, uint32_t crfD) { CPU_DoubleU farg1, farg2; diff --git a/target/ppc/helper.h b/target/ppc/helper.h index ca414f2f43d3a..e99c8c824b452 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -94,26 +94,26 @@ DEF_HELPER_2(fpscr_setbit, void, env, i32) DEF_HELPER_FLAGS_1(todouble, TCG_CALL_NO_RWG_SE, i64, i32) DEF_HELPER_FLAGS_1(tosingle, TCG_CALL_NO_RWG_SE, i32, i64) -DEF_HELPER_4(fcmpo, void, env, i64, i64, i32) -DEF_HELPER_4(fcmpu, void, env, i64, i64, i32) +DEF_HELPER_4(FCMPO, void, env, i64, i64, i32) +DEF_HELPER_4(FCMPU, void, env, i64, i64, i32) -DEF_HELPER_2(fctiw, i64, env, i64) -DEF_HELPER_2(fctiwu, i64, env, i64) -DEF_HELPER_2(fctiwz, i64, env, i64) -DEF_HELPER_2(fctiwuz, i64, env, i64) -DEF_HELPER_2(fcfid, i64, env, i64) -DEF_HELPER_2(fcfidu, i64, env, i64) -DEF_HELPER_2(fcfids, i64, env, i64) -DEF_HELPER_2(fcfidus, i64, env, i64) -DEF_HELPER_2(fctid, i64, env, i64) -DEF_HELPER_2(fctidu, i64, env, i64) -DEF_HELPER_2(fctidz, i64, env, i64) -DEF_HELPER_2(fctiduz, i64, env, i64) -DEF_HELPER_2(frsp, i64, env, i64) -DEF_HELPER_2(frin, i64, env, i64) -DEF_HELPER_2(friz, i64, env, i64) -DEF_HELPER_2(frip, i64, env, i64) -DEF_HELPER_2(frim, i64, env, i64) +DEF_HELPER_2(FCTIW, i64, env, i64) +DEF_HELPER_2(FCTIWU, i64, env, i64) +DEF_HELPER_2(FCTIWZ, i64, env, i64) +DEF_HELPER_2(FCTIWUZ, i64, env, i64) +DEF_HELPER_2(FCFID, i64, env, i64) +DEF_HELPER_2(FCFIDU, i64, env, i64) +DEF_HELPER_2(FCFIDS, i64, env, i64) +DEF_HELPER_2(FCFIDUS, i64, env, i64) +DEF_HELPER_2(FCTID, i64, env, i64) +DEF_HELPER_2(FCTIDU, i64, env, i64) +DEF_HELPER_2(FCTIDZ, i64, env, i64) +DEF_HELPER_2(FCTIDUZ, i64, env, i64) +DEF_HELPER_2(FRSP, i64, env, i64) +DEF_HELPER_2(FRIN, i64, env, i64) +DEF_HELPER_2(FRIZ, i64, env, i64) +DEF_HELPER_2(FRIP, i64, env, i64) +DEF_HELPER_2(FRIM, i64, env, i64) DEF_HELPER_3(FADD, f64, env, f64, f64) DEF_HELPER_3(FADDS, f64, env, f64, f64) diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index 7e5726871e5b2..a07e6a7b7b646 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -186,6 +186,10 @@ static uint32_t hreg_compute_hflags_value(CPUPPCState *env) if (env->spr[SPR_LPCR] & LPCR_HR) { hflags |= 1 << HFLAGS_HR; } + if (unlikely(ppc_flags & POWERPC_FLAG_PPE42)) { + /* PPE42 has a single address space and no problem state */ + msr = 0; + } #ifndef CONFIG_USER_ONLY if (!env->has_hv_mode || (msr & (1ull << MSR_HV))) { @@ -274,6 +278,7 @@ TCGTBCPUState ppc_get_tb_cpu_state(CPUState *cs) return (TCGTBCPUState){ .pc = env->nip, .flags = hflags_current }; } +#ifndef CONFIG_USER_ONLY void cpu_interrupt_exittb(CPUState *cs) { /* @@ -285,6 +290,7 @@ void cpu_interrupt_exittb(CPUState *cs) cpu_interrupt(cs, CPU_INTERRUPT_EXITTB); } } +#endif int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) { @@ -306,9 +312,6 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) value &= ~(1 << MSR_ME); value |= env->msr & (1 << MSR_ME); } - if ((value ^ env->msr) & (R_MSR_IR_MASK | R_MSR_DR_MASK)) { - cpu_interrupt_exittb(cs); - } if ((env->mmu_model == POWERPC_MMU_BOOKE || env->mmu_model == POWERPC_MMU_BOOKE206) && ((value ^ env->msr) & R_MSR_GS_MASK)) { @@ -319,8 +322,14 @@ int hreg_store_msr(CPUPPCState *env, target_ulong value, int alter_hv) /* Swap temporary saved registers with GPRs */ hreg_swap_gpr_tgpr(env); } - if (unlikely((value ^ env->msr) & R_MSR_EP_MASK)) { - env->excp_prefix = FIELD_EX64(value, MSR, EP) * 0xFFF00000; + /* PPE42 uses IR, DR and EP MSR bits for other purposes */ + if (likely(!(env->flags & POWERPC_FLAG_PPE42))) { + if ((value ^ env->msr) & (R_MSR_IR_MASK | R_MSR_DR_MASK)) { + cpu_interrupt_exittb(cs); + } + if (unlikely((value ^ env->msr) & R_MSR_EP_MASK)) { + env->excp_prefix = FIELD_EX64(value, MSR, EP) * 0xFFF00000; + } } /* * If PR=1 then EE, IR and DR must be 1 @@ -462,6 +471,23 @@ void register_generic_sprs(PowerPCCPU *cpu) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); + + spr_register(env, SPR_PVR, "PVR", + /* Linux permits userspace to read PVR */ +#if defined(CONFIG_LINUX_USER) + &spr_read_generic, +#else + SPR_NOACCESS, +#endif + SPR_NOACCESS, + &spr_read_generic, SPR_NOACCESS, + pcc->pvr); + + /* PPE42 doesn't support SPRG1-3, SVR or TB regs */ + if (env->insns_flags2 & PPC2_PPE42) { + return; + } + spr_register(env, SPR_SPRG1, "SPRG1", SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, @@ -475,17 +501,6 @@ void register_generic_sprs(PowerPCCPU *cpu) &spr_read_generic, &spr_write_generic, 0x00000000); - spr_register(env, SPR_PVR, "PVR", - /* Linux permits userspace to read PVR */ -#if defined(CONFIG_LINUX_USER) - &spr_read_generic, -#else - SPR_NOACCESS, -#endif - SPR_NOACCESS, - &spr_read_generic, SPR_NOACCESS, - pcc->pvr); - /* Register SVR if it's defined to anything else than POWERPC_SVR_NONE */ if (pcc->svr != POWERPC_SVR_NONE) { if (pcc->svr & POWERPC_SVR_E500) { diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index e53fd2840d445..0e9c68f2fb407 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -58,6 +58,10 @@ %ds_rtp 22:4 !function=times_2 @DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si +%dd_si 3:s13 +&DD rt ra si:int64_t +@DD ...... rt:5 ra:5 ............. . .. &DD si=%dd_si + &DX_b vrt b %dx_b 6:10 16:5 0:1 @DX_b ...... vrt:5 ..... .......... ..... . &DX_b b=%dx_b @@ -66,6 +70,11 @@ %dx_d 6:s10 16:5 0:1 @DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d +%md_sh 1:1 11:5 +%md_mb 5:1 6:5 +&MD rs ra sh mb rc +@MD ...... rs:5 ra:5 ..... ...... ... . rc:1 &MD sh=%md_sh mb=%md_mb + &VA vrt vra vrb rc @VA ...... vrt:5 vra:5 vrb:5 rc:5 ...... &VA @@ -322,6 +331,13 @@ LDUX 011111 ..... ..... ..... 0000110101 - @X LQ 111000 ..... ..... ............ ---- @DQ_rtp +LVD 000101 ..... ..... ................ @D +LVDU 001001 ..... ..... ................ @D +LVDX 011111 ..... ..... ..... 0000010001 - @X +LSKU 111010 ..... ..... ............. 0 11 @DD +LCXU 111010 ..... ..... ............. 1 11 @DD + + ### Fixed-Point Store Instructions STB 100110 ..... ..... ................ @D @@ -346,6 +362,11 @@ STDUX 011111 ..... ..... ..... 0010110101 - @X STQ 111110 ..... ..... ..............10 @DS_rtp +STVDU 010110 ..... ..... ................ @D +STVDX 011111 ..... ..... ..... 0010010001 - @X +STSKU 111110 ..... ..... ............. 0 11 @DD +STCXU 111110 ..... ..... ............. 1 11 @DD + ### Fixed-Point Compare Instructions CMP 011111 ... - . ..... ..... 0000000000 - @X_bfl @@ -461,8 +482,14 @@ PRTYD 011111 ..... ..... ----- 0010111010 - @X_sa BPERMD 011111 ..... ..... ..... 0011111100 - @X CFUGED 011111 ..... ..... ..... 0011011100 - @X -CNTLZDM 011111 ..... ..... ..... 0000111011 - @X -CNTTZDM 011111 ..... ..... ..... 1000111011 - @X +{ + SLVD 011111 ..... ..... ..... 0000111011 . @X_rc + CNTLZDM 011111 ..... ..... ..... 0000111011 - @X +} +{ + SRVD 011111 ..... ..... ..... 1000111011 . @X_rc + CNTTZDM 011111 ..... ..... ..... 1000111011 - @X +} PDEPD 011111 ..... ..... ..... 0010011100 - @X PEXTD 011111 ..... ..... ..... 0010111100 - @X @@ -503,6 +530,17 @@ STFDU 110111 ..... ...... ............... @D STFDX 011111 ..... ...... .... 1011010111 - @X STFDUX 011111 ..... ...... .... 1011110111 - @X +### Floating-Point Move Instructions + +FMR 111111 ..... ----- ..... 0001001000 . @X_tb_rc +FNEG 111111 ..... ----- ..... 0000101000 . @X_tb_rc +FABS 111111 ..... ----- ..... 0100001000 . @X_tb_rc +FNABS 111111 ..... ----- ..... 0010001000 . @X_tb_rc + +FCPSGN 111111 ..... ..... ..... 0000001000 . @X_rc +FMRGEW 111111 ..... ..... ..... 1111000110 - @X +FMRGOW 111111 ..... ..... ..... 1101000110 - @X + ### Floating-Point Arithmetic Instructions FADD 111111 ..... ..... ..... ----- 10101 . @A_tab @@ -541,6 +579,35 @@ FNMADDS 111011 ..... ..... ..... ..... 11111 . @A FNMSUB 111111 ..... ..... ..... ..... 11110 . @A FNMSUBS 111011 ..... ..... ..... ..... 11110 . @A +### Floating-Point Rounding and Conversion Instructions + +FRSP 111111 ..... ----- ..... 0000001100 . @X_tb_rc + +FRIN 111111 ..... ----- ..... 0110001000 . @X_tb_rc +FRIZ 111111 ..... ----- ..... 0110101000 . @X_tb_rc +FRIP 111111 ..... ----- ..... 0111001000 . @X_tb_rc +FRIM 111111 ..... ----- ..... 0111101000 . @X_tb_rc + +FCTIW 111111 ..... ----- ..... 0000001110 . @X_tb_rc +FCTIWU 111111 ..... ----- ..... 0010001110 . @X_tb_rc +FCTIWZ 111111 ..... ----- ..... 0000001111 . @X_tb_rc +FCTIWUZ 111111 ..... ----- ..... 0010001111 . @X_tb_rc + +FCTID 111111 ..... ----- ..... 1100101110 . @X_tb_rc +FCTIDU 111111 ..... ----- ..... 1110101110 . @X_tb_rc +FCTIDZ 111111 ..... ----- ..... 1100101111 . @X_tb_rc +FCTIDUZ 111111 ..... ----- ..... 1110101111 . @X_tb_rc + +FCFID 111111 ..... ----- ..... 1101001110 . @X_tb_rc +FCFIDS 111011 ..... ----- ..... 1101001110 . @X_tb_rc +FCFIDU 111111 ..... ----- ..... 1111001110 . @X_tb_rc +FCFIDUS 111011 ..... ----- ..... 1111001110 . @X_tb_rc + +### Floating-Point Compare Instructions + +FCMPU 111111 ... -- ..... ..... 0000000000 - @X_bf +FCMPO 111111 ... -- ..... ..... 0000100000 - @X_bf + ### Floating-Point Select Instruction FSEL 111111 ..... ..... ..... ..... 10111 . @A @@ -981,8 +1048,16 @@ LXSSP 111001 ..... ..... .............. 11 @DS STXSSP 111101 ..... ..... .............. 11 @DS LXV 111101 ..... ..... ............ . 001 @DQ_TSX STXV 111101 ..... ..... ............ . 101 @DQ_TSX -LXVP 000110 ..... ..... ............ 0000 @DQ_TSXP -STXVP 000110 ..... ..... ............ 0001 @DQ_TSXP + +# STVD PPE instruction overlaps with the LXVP and STXVP instructions +{ + STVD 000110 ..... ..... ................ @D + [ + LXVP 000110 ..... ..... ............ 0000 @DQ_TSXP + STXVP 000110 ..... ..... ............ 0001 @DQ_TSXP + ] +} + LXVX 011111 ..... ..... ..... 0100 - 01100 . @X_TSX STXVX 011111 ..... ..... ..... 0110001100 . @X_TSX LXVPX 011111 ..... ..... ..... 0101001101 - @X_TSXP @@ -1300,3 +1375,26 @@ CLRBHRB 011111 ----- ----- ----- 0110101110 - ## Misc POWER instructions ATTN 000000 00000 00000 00000 0100000000 0 + +# Fused compare-branch instructions for PPE only +%fcb_bdx 1:s10 !function=times_4 +&FCB px:bool ra rb:uint64_t bdx lk:bool +@FCB ...... .. px:1 .. ra:5 rb:5 .......... lk:1 &FCB bdx=%fcb_bdx +&FCB_bix px:bool bix ra rb:uint64_t bdx lk:bool +@FCB_bix ...... .. px:1 bix:2 ra:5 rb:5 .......... lk:1 &FCB_bix bdx=%fcb_bdx + +CMPWBC 000001 00 . .. ..... ..... .......... . @FCB_bix +CMPLWBC 000001 01 . .. ..... ..... .......... . @FCB_bix +CMPWIBC 000001 10 . .. ..... ..... .......... . @FCB_bix +BNBWI 000001 11 . 00 ..... ..... .......... . @FCB +BNBW 000001 11 . 01 ..... ..... .......... . @FCB +CLRBWIBC 000001 11 . 10 ..... ..... .......... . @FCB +CLRBWBC 000001 11 . 11 ..... ..... .......... . @FCB + +# Data Cache Block Query for PPE only +DCBQ 011111 ..... ..... ..... 0110010110 - @X + +# Rotate Doubleword Instructions for PPE only +RLDICL 011110 ..... ..... ..... ...... 000 . . @MD +RLDICR 011110 ..... ..... ..... ...... 001 . . @MD +RLDIMI 011110 ..... ..... ..... ...... 011 . . @MD diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 015658049e612..cd60893a17d86 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -907,7 +907,7 @@ int kvmppc_put_books_sregs(PowerPCCPU *cpu) return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); } -int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) +int kvm_arch_put_registers(CPUState *cs, KvmPutState level, Error **errp) { PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *env = &cpu->env; @@ -1354,7 +1354,7 @@ static int kvmppc_handle_halt(PowerPCCPU *cpu) CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; - if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) && + if (!cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && FIELD_EX64(env->msr, MSR, EE)) { cs->halted = 1; cs->exception_index = EXCP_HLT; @@ -2760,11 +2760,11 @@ int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns) int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index, uint16_t n_valid, uint16_t n_invalid, Error **errp) { - struct kvm_get_htab_header *buf; - size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64; + size_t chunksize = sizeof(struct kvm_get_htab_header) + + n_valid * HASH_PTE_SIZE_64; + g_autofree struct kvm_get_htab_header *buf = g_malloc(chunksize); ssize_t rc; - buf = alloca(chunksize); buf->index = index; buf->n_valid = n_valid; buf->n_invalid = n_invalid; diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index e7d94625185c3..0e625cbb704d9 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -328,69 +328,22 @@ target_ulong helper_load_sprd(CPUPPCState *env) * accessed by powernv machines. */ PowerPCCPU *cpu = env_archcpu(env); - PnvCore *pc = pnv_cpu_state(cpu)->pnv_core; - target_ulong sprc = env->spr[SPR_POWER_SPRC]; + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); - if (pc->big_core) { - pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1); + if (pcc->load_sprd) { + return pcc->load_sprd(env); } - switch (sprc & 0x3e0) { - case 0: /* SCRATCH0-3 */ - case 1: /* SCRATCH4-7 */ - return pc->scratch[(sprc >> 3) & 0x7]; - - case 0x1e0: /* core thread state */ - if (env->excp_model == POWERPC_EXCP_POWER9) { - /* - * Only implement for POWER9 because skiboot uses it to check - * big-core mode. Other bits are unimplemented so we would - * prefer to get unimplemented message on POWER10 if it were - * used anywhere. - */ - if (pc->big_core) { - return PPC_BIT(63); - } else { - return 0; - } - } - /* fallthru */ - - default: - qemu_log_mask(LOG_UNIMP, "mfSPRD: Unimplemented SPRC:0x" - TARGET_FMT_lx"\n", sprc); - break; - } return 0; } void helper_store_sprd(CPUPPCState *env, target_ulong val) { - target_ulong sprc = env->spr[SPR_POWER_SPRC]; PowerPCCPU *cpu = env_archcpu(env); - PnvCore *pc = pnv_cpu_state(cpu)->pnv_core; - int nr; - - if (pc->big_core) { - pc = pnv_chip_find_core(pc->chip, CPU_CORE(pc)->core_id & ~0x1); - } + PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu); - switch (sprc & 0x3e0) { - case 0: /* SCRATCH0-3 */ - case 1: /* SCRATCH4-7 */ - /* - * Log stores to SCRATCH, because some firmware uses these for - * debugging and logging, but they would normally be read by the BMC, - * which is not implemented in QEMU yet. This gives a way to get at the - * information. Could also dump these upon checkstop. - */ - nr = (sprc >> 3) & 0x7; - pc->scratch[nr] = val; - break; - default: - qemu_log_mask(LOG_UNIMP, "mtSPRD: Unimplemented SPRC:0x" - TARGET_FMT_lx"\n", sprc); - break; + if (pcc->store_sprd) { + return pcc->store_sprd(env, val); } } diff --git a/target/ppc/tcg-excp_helper.c b/target/ppc/tcg-excp_helper.c index f835be515635a..edecfb857258b 100644 --- a/target/ppc/tcg-excp_helper.c +++ b/target/ppc/tcg-excp_helper.c @@ -229,6 +229,18 @@ void ppc_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, case POWERPC_MMU_BOOKE206: env->spr[SPR_BOOKE_DEAR] = vaddr; break; + case POWERPC_MMU_REAL: + if (env->flags & POWERPC_FLAG_PPE42) { + env->spr[SPR_PPE42_EDR] = vaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_PPE42_ISR] |= PPE42_ISR_ST; + } else { + env->spr[SPR_PPE42_ISR] &= ~PPE42_ISR_ST; + } + } else { + env->spr[SPR_DAR] = vaddr; + } + break; default: env->spr[SPR_DAR] = vaddr; break; diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 27f90c3cc5671..17e6d07c8c2db 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -209,6 +209,11 @@ struct DisasContext { #define DISAS_CHAIN DISAS_TARGET_2 /* lookup next tb, pc updated */ #define DISAS_CHAIN_UPDATE DISAS_TARGET_3 /* lookup next tb, pc stale */ +static inline bool is_ppe(const DisasContext *ctx) +{ + return !!(ctx->flags & POWERPC_FLAG_PPE42); +} + /* Return true iff byteswap is needed in a scalar memop */ static inline bool need_byteswap(const DisasContext *ctx) { @@ -556,11 +561,8 @@ void spr_access_nop(DisasContext *ctx, int sprn, int gprn) #endif -/* SPR common to all PowerPC */ -/* XER */ -void spr_read_xer(DisasContext *ctx, int gprn, int sprn) +static void gen_get_xer(DisasContext *ctx, TCGv dst) { - TCGv dst = cpu_gpr[gprn]; TCGv t0 = tcg_temp_new(); TCGv t1 = tcg_temp_new(); TCGv t2 = tcg_temp_new(); @@ -579,9 +581,16 @@ void spr_read_xer(DisasContext *ctx, int gprn, int sprn) } } -void spr_write_xer(DisasContext *ctx, int sprn, int gprn) +/* SPR common to all PowerPC */ +/* XER */ +void spr_read_xer(DisasContext *ctx, int gprn, int sprn) +{ + TCGv dst = cpu_gpr[gprn]; + gen_get_xer(ctx, dst); +} + +static void gen_set_xer(DisasContext *ctx, TCGv src) { - TCGv src = cpu_gpr[gprn]; /* Write all flags, while reading back check for isa300 */ tcg_gen_andi_tl(cpu_xer, src, ~((1u << XER_SO) | @@ -594,6 +603,12 @@ void spr_write_xer(DisasContext *ctx, int sprn, int gprn) tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1); } +void spr_write_xer(DisasContext *ctx, int sprn, int gprn) +{ + TCGv src = cpu_gpr[gprn]; + gen_set_xer(ctx, src); +} + /* LR */ void spr_read_lr(DisasContext *ctx, int gprn, int sprn) { @@ -3653,16 +3668,17 @@ static void gen_lookup_and_goto_ptr(DisasContext *ctx) } /*** Branch ***/ -static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, + target_ulong dest) { if (NARROW_MODE(ctx)) { dest = (uint32_t) dest; } if (use_goto_tb(ctx, dest)) { pmu_count_insns(ctx); - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); tcg_gen_movi_tl(cpu_nip, dest & ~3); - tcg_gen_exit_tb(ctx->base.tb, n); + tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx); } else { tcg_gen_movi_tl(cpu_nip, dest & ~3); gen_lookup_and_goto_ptr(ctx); @@ -4264,8 +4280,10 @@ static void gen_mtmsr(DisasContext *ctx) /* L=1 form only updates EE and RI */ mask &= (1ULL << MSR_RI) | (1ULL << MSR_EE); } else { - /* mtmsr does not alter S, ME, or LE */ - mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S)); + if (likely(!(ctx->insns_flags2 & PPC2_PPE42))) { + /* mtmsr does not alter S, ME, or LE */ + mask &= ~((1ULL << MSR_LE) | (1ULL << MSR_ME) | (1ULL << MSR_S)); + } /* * XXX: we need to update nip before the store if we enter @@ -5753,6 +5771,8 @@ static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) #include "translate/bhrb-impl.c.inc" +#include "translate/ppe-impl.c.inc" + /* Handles lfdp */ static void gen_dform39(DisasContext *ctx) { diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc index a66b83398b6ca..464fb1d90f738 100644 --- a/target/ppc/translate/fp-impl.c.inc +++ b/target/ppc/translate/fp-impl.c.inc @@ -98,28 +98,26 @@ static bool do_helper_ac(DisasContext *ctx, arg_A_tac *a, return true; } -#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ -static void gen_f##name(DisasContext *ctx) \ -{ \ - TCGv_i64 t0; \ - TCGv_i64 t1; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - t0 = tcg_temp_new_i64(); \ - t1 = tcg_temp_new_i64(); \ - gen_reset_fpstatus(); \ - get_fpr(t0, rB(ctx->opcode)); \ - gen_helper_f##name(t1, tcg_env, t0); \ - set_fpr(rD(ctx->opcode), t1); \ - if (set_fprf) { \ - gen_helper_compute_fprf_float64(tcg_env, t1); \ - } \ - gen_helper_float_check_status(tcg_env); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ +static bool do_round_convert(DisasContext *ctx, arg_X_tb_rc *a, + void (*helper)(TCGv_i64, TCGv_env, TCGv_i64), + bool set_fprf) +{ + TCGv_i64 t0, t1; + REQUIRE_FPU(ctx); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + gen_reset_fpstatus(); + get_fpr(t0, a->rb); + helper(t1, tcg_env, t0); + set_fpr(a->rt, t1); + if (set_fprf) { + gen_helper_compute_fprf_float64(tcg_env, t1); + } + gen_helper_float_check_status(tcg_env); + if (unlikely(a->rc)) { + gen_set_cr1_from_fpscr(ctx); + } + return true; } static bool do_helper_bs(DisasContext *ctx, arg_A_tb *a, @@ -213,41 +211,26 @@ TRANS(FSQRT, do_helper_fsqrt, gen_helper_FSQRT); TRANS(FSQRTS, do_helper_fsqrt, gen_helper_FSQRTS); /*** Floating-Point round & convert ***/ -/* fctiw */ -GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT); -/* fctiwu */ -GEN_FLOAT_B(ctiwu, 0x0E, 0x04, 0, PPC2_FP_CVT_ISA206); -/* fctiwz */ -GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT); -/* fctiwuz */ -GEN_FLOAT_B(ctiwuz, 0x0F, 0x04, 0, PPC2_FP_CVT_ISA206); -/* frsp */ -GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT); -/* fcfid */ -GEN_FLOAT_B(cfid, 0x0E, 0x1A, 1, PPC2_FP_CVT_S64); -/* fcfids */ -GEN_FLOAT_B(cfids, 0x0E, 0x1A, 0, PPC2_FP_CVT_ISA206); -/* fcfidu */ -GEN_FLOAT_B(cfidu, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); -/* fcfidus */ -GEN_FLOAT_B(cfidus, 0x0E, 0x1E, 0, PPC2_FP_CVT_ISA206); -/* fctid */ -GEN_FLOAT_B(ctid, 0x0E, 0x19, 0, PPC2_FP_CVT_S64); -/* fctidu */ -GEN_FLOAT_B(ctidu, 0x0E, 0x1D, 0, PPC2_FP_CVT_ISA206); -/* fctidz */ -GEN_FLOAT_B(ctidz, 0x0F, 0x19, 0, PPC2_FP_CVT_S64); -/* fctidu */ -GEN_FLOAT_B(ctiduz, 0x0F, 0x1D, 0, PPC2_FP_CVT_ISA206); - -/* frin */ -GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT); -/* friz */ -GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT); -/* frip */ -GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT); -/* frim */ -GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT); +TRANS_FLAGS(FLOAT, FRSP, do_round_convert, gen_helper_FRSP, true); +TRANS_FLAGS(FLOAT_EXT, FRIN, do_round_convert, gen_helper_FRIN, true); +TRANS_FLAGS(FLOAT_EXT, FRIZ, do_round_convert, gen_helper_FRIZ, true); +TRANS_FLAGS(FLOAT_EXT, FRIP, do_round_convert, gen_helper_FRIP, true); +TRANS_FLAGS(FLOAT_EXT, FRIM, do_round_convert, gen_helper_FRIM, true); + +TRANS_FLAGS(FLOAT, FCTIW, do_round_convert, gen_helper_FCTIW, false); +TRANS_FLAGS2(FP_CVT_ISA206, FCTIWU, do_round_convert, gen_helper_FCTIWU, false); +TRANS_FLAGS(FLOAT, FCTIWZ, do_round_convert, gen_helper_FCTIWZ, false); +TRANS_FLAGS2(FP_CVT_ISA206, FCTIWUZ, do_round_convert, gen_helper_FCTIWUZ, false); + +TRANS_FLAGS2(FP_CVT_S64, FCTID, do_round_convert, gen_helper_FCTID, false); +TRANS_FLAGS2(FP_CVT_ISA206, FCTIDU, do_round_convert, gen_helper_FCTIDU, false); +TRANS_FLAGS2(FP_CVT_S64, FCTIDZ, do_round_convert, gen_helper_FCTIDZ, false); +TRANS_FLAGS2(FP_CVT_ISA206, FCTIDUZ, do_round_convert, gen_helper_FCTIDUZ, false); + +TRANS_FLAGS2(FP_CVT_S64, FCFID, do_round_convert, gen_helper_FCFID, true); +TRANS_FLAGS2(FP_CVT_ISA206, FCFIDS, do_round_convert, gen_helper_FCFIDS, false); +TRANS_FLAGS2(FP_CVT_ISA206, FCFIDU, do_round_convert, gen_helper_FCFIDU, false); +TRANS_FLAGS2(FP_CVT_ISA206, FCFIDUS, do_round_convert, gen_helper_FCFIDUS, false); static bool trans_FTDIV(DisasContext *ctx, arg_X_bf *a) { @@ -274,183 +257,117 @@ static bool trans_FTSQRT(DisasContext *ctx, arg_X_bf_b *a) } /*** Floating-Point compare ***/ - -/* fcmpo */ -static void gen_fcmpo(DisasContext *ctx) +static bool do_helper_cmp(DisasContext *ctx, arg_X_bf *a, + void (*helper)(TCGv_env, TCGv_i64, TCGv_i64, + TCGv_i32)) { TCGv_i32 crf; - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + TCGv_i64 t0, t1; + REQUIRE_INSNS_FLAGS(ctx, FLOAT); + REQUIRE_FPU(ctx); t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); gen_reset_fpstatus(); - crf = tcg_constant_i32(crfD(ctx->opcode)); - get_fpr(t0, rA(ctx->opcode)); - get_fpr(t1, rB(ctx->opcode)); - gen_helper_fcmpo(tcg_env, t0, t1, crf); + crf = tcg_constant_i32(a->bf); + get_fpr(t0, a->ra); + get_fpr(t1, a->rb); + helper(tcg_env, t0, t1, crf); gen_helper_float_check_status(tcg_env); + return true; } -/* fcmpu */ -static void gen_fcmpu(DisasContext *ctx) -{ - TCGv_i32 crf; - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); - gen_reset_fpstatus(); - crf = tcg_constant_i32(crfD(ctx->opcode)); - get_fpr(t0, rA(ctx->opcode)); - get_fpr(t1, rB(ctx->opcode)); - gen_helper_fcmpu(tcg_env, t0, t1, crf); - gen_helper_float_check_status(tcg_env); -} +TRANS(FCMPU, do_helper_cmp, gen_helper_FCMPU); +TRANS(FCMPO, do_helper_cmp, gen_helper_FCMPO); /*** Floating-point move ***/ -/* fabs */ -/* XXX: beware that fabs never checks for NaNs nor update FPSCR */ -static void gen_fabs(DisasContext *ctx) -{ - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); - get_fpr(t0, rB(ctx->opcode)); - tcg_gen_andi_i64(t1, t0, ~(1ULL << 63)); - set_fpr(rD(ctx->opcode), t1); - if (unlikely(Rc(ctx->opcode))) { - gen_set_cr1_from_fpscr(ctx); - } -} /* fmr - fmr. */ /* XXX: beware that fmr never checks for NaNs nor update FPSCR */ -static void gen_fmr(DisasContext *ctx) +static bool trans_FMR(DisasContext *ctx, arg_FMR *a) { TCGv_i64 t0; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + REQUIRE_INSNS_FLAGS(ctx, FLOAT); + REQUIRE_FPU(ctx); t0 = tcg_temp_new_i64(); - get_fpr(t0, rB(ctx->opcode)); - set_fpr(rD(ctx->opcode), t0); - if (unlikely(Rc(ctx->opcode))) { + get_fpr(t0, a->rb); + set_fpr(a->rt, t0); + if (unlikely(a->rc)) { gen_set_cr1_from_fpscr(ctx); } + return true; } -/* fnabs */ -/* XXX: beware that fnabs never checks for NaNs nor update FPSCR */ -static void gen_fnabs(DisasContext *ctx) +/* XXX: beware that f{neg, abs, nabs} never checks for NaNs nor update FPSCR */ +static bool do_move_b(DisasContext *ctx, arg_X_tb_rc *a, int64_t val, + void (*tcg_op)(TCGv_i64, TCGv_i64, int64_t)) { - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + TCGv_i64 t0, t1; + REQUIRE_INSNS_FLAGS(ctx, FLOAT); + REQUIRE_FPU(ctx); t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); - get_fpr(t0, rB(ctx->opcode)); - tcg_gen_ori_i64(t1, t0, 1ULL << 63); - set_fpr(rD(ctx->opcode), t1); - if (unlikely(Rc(ctx->opcode))) { + get_fpr(t0, a->rb); + tcg_op(t1, t0, val); + set_fpr(a->rt, t1); + if (unlikely(a->rc)) { gen_set_cr1_from_fpscr(ctx); } + return true; } -/* fneg */ -/* XXX: beware that fneg never checks for NaNs nor update FPSCR */ -static void gen_fneg(DisasContext *ctx) -{ - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); - get_fpr(t0, rB(ctx->opcode)); - tcg_gen_xori_i64(t1, t0, 1ULL << 63); - set_fpr(rD(ctx->opcode), t1); - if (unlikely(Rc(ctx->opcode))) { - gen_set_cr1_from_fpscr(ctx); - } -} +TRANS(FNEG, do_move_b, 1ULL << 63, tcg_gen_xori_i64); +TRANS(FABS, do_move_b, ~(1ULL << 63), tcg_gen_andi_i64); +TRANS(FNABS, do_move_b, 1ULL << 63, tcg_gen_ori_i64); /* fcpsgn: PowerPC 2.05 specification */ /* XXX: beware that fcpsgn never checks for NaNs nor update FPSCR */ -static void gen_fcpsgn(DisasContext *ctx) +static bool trans_FCPSGN(DisasContext *ctx, arg_FCPSGN *a) { - TCGv_i64 t0; - TCGv_i64 t1; - TCGv_i64 t2; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + TCGv_i64 t0, t1, t2; + REQUIRE_INSNS_FLAGS2(ctx, ISA205); + REQUIRE_FPU(ctx); t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); t2 = tcg_temp_new_i64(); - get_fpr(t0, rA(ctx->opcode)); - get_fpr(t1, rB(ctx->opcode)); + get_fpr(t0, a->ra); + get_fpr(t1, a->rb); tcg_gen_deposit_i64(t2, t0, t1, 0, 63); - set_fpr(rD(ctx->opcode), t2); - if (unlikely(Rc(ctx->opcode))) { + set_fpr(a->rt, t2); + if (unlikely(a->rc)) { gen_set_cr1_from_fpscr(ctx); } + return true; } -static void gen_fmrgew(DisasContext *ctx) +static bool trans_FMRGEW(DisasContext *ctx, arg_FMRGEW *a) { - TCGv_i64 b0; - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } - b0 = tcg_temp_new_i64(); + TCGv_i64 t0, t1, t2; + REQUIRE_INSNS_FLAGS2(ctx, VSX207); + REQUIRE_FPU(ctx); t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); - get_fpr(t0, rB(ctx->opcode)); - tcg_gen_shri_i64(b0, t0, 32); - get_fpr(t0, rA(ctx->opcode)); - tcg_gen_deposit_i64(t1, t0, b0, 0, 32); - set_fpr(rD(ctx->opcode), t1); + t2 = tcg_temp_new_i64(); + get_fpr(t1, a->rb); + tcg_gen_shri_i64(t0, t1, 32); + get_fpr(t1, a->ra); + tcg_gen_deposit_i64(t2, t1, t0, 0, 32); + set_fpr(a->rt, t2); + return true; } -static void gen_fmrgow(DisasContext *ctx) +static bool trans_FMRGOW(DisasContext *ctx, arg_FMRGOW *a) { - TCGv_i64 t0; - TCGv_i64 t1; - TCGv_i64 t2; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + TCGv_i64 t0, t1, t2; + REQUIRE_INSNS_FLAGS2(ctx, VSX207); + REQUIRE_FPU(ctx); t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); t2 = tcg_temp_new_i64(); - get_fpr(t0, rB(ctx->opcode)); - get_fpr(t1, rA(ctx->opcode)); + get_fpr(t0, a->rb); + get_fpr(t1, a->ra); tcg_gen_deposit_i64(t2, t0, t1, 32, 32); - set_fpr(rD(ctx->opcode), t2); + set_fpr(a->rt, t2); + return true; } /*** Floating-Point status & ctrl register ***/ @@ -479,7 +396,7 @@ static void gen_mcrfs(DisasContext *ctx) tcg_gen_extu_tl_i64(tnew_fpscr, cpu_fpscr); /* Only the exception bits (including FX) should be cleared if read */ tcg_gen_andi_i64(tnew_fpscr, tnew_fpscr, - ~((0xF << shift) & FP_EX_CLEAR_BITS)); + ~(MAKE_64BIT_MASK(shift, 4) & FP_EX_CLEAR_BITS)); /* FEX and VX need to be updated, so don't set fpscr directly */ tmask = tcg_constant_i32(1 << nibble); gen_helper_store_fpscr(tcg_env, tnew_fpscr, tmask); @@ -1051,8 +968,6 @@ TRANS(STFDX, do_lsfp_X, false, true, false) TRANS(STFDUX, do_lsfp_X, true, true, false) TRANS(PSTFD, do_lsfp_PLS_D, false, true, false) -#undef GEN_FLOAT_B - #undef GEN_LDF #undef GEN_LDUF #undef GEN_LDUXF diff --git a/target/ppc/translate/fp-ops.c.inc b/target/ppc/translate/fp-ops.c.inc index cef4b5dfcbe6d..9bc9c3a3c3ffb 100644 --- a/target/ppc/translate/fp-ops.c.inc +++ b/target/ppc/translate/fp-ops.c.inc @@ -1,24 +1,3 @@ -#define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ -GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type) - -GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT), -GEN_HANDLER_E(fctiwu, 0x3F, 0x0E, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206), -GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT), -GEN_HANDLER_E(fctiwuz, 0x3F, 0x0F, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206), -GEN_FLOAT_B(rsp, 0x0C, 0x00, 1, PPC_FLOAT), -GEN_HANDLER_E(fcfid, 0x3F, 0x0E, 0x1A, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64), -GEN_HANDLER_E(fcfids, 0x3B, 0x0E, 0x1A, 0, PPC_NONE, PPC2_FP_CVT_ISA206), -GEN_HANDLER_E(fcfidu, 0x3F, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206), -GEN_HANDLER_E(fcfidus, 0x3B, 0x0E, 0x1E, 0, PPC_NONE, PPC2_FP_CVT_ISA206), -GEN_HANDLER_E(fctid, 0x3F, 0x0E, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64), -GEN_HANDLER_E(fctidu, 0x3F, 0x0E, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206), -GEN_HANDLER_E(fctidz, 0x3F, 0x0F, 0x19, 0x001F0000, PPC_NONE, PPC2_FP_CVT_S64), -GEN_HANDLER_E(fctiduz, 0x3F, 0x0F, 0x1D, 0, PPC_NONE, PPC2_FP_CVT_ISA206), -GEN_FLOAT_B(rin, 0x08, 0x0C, 1, PPC_FLOAT_EXT), -GEN_FLOAT_B(riz, 0x08, 0x0D, 1, PPC_FLOAT_EXT), -GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT), -GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT), - GEN_HANDLER_E(lfdepx, 0x1F, 0x1F, 0x12, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(lfiwax, 0x1f, 0x17, 0x1a, 0x00000001, PPC_NONE, PPC2_ISA205), GEN_HANDLER_E(lfiwzx, 0x1f, 0x17, 0x1b, 0x1, PPC_NONE, PPC2_FP_CVT_ISA206), @@ -31,15 +10,6 @@ GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX) GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205), -GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT), -GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT), -GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT), -GEN_HANDLER(fmr, 0x3F, 0x08, 0x02, 0x001F0000, PPC_FLOAT), -GEN_HANDLER(fnabs, 0x3F, 0x08, 0x04, 0x001F0000, PPC_FLOAT), -GEN_HANDLER(fneg, 0x3F, 0x08, 0x01, 0x001F0000, PPC_FLOAT), -GEN_HANDLER_E(fcpsgn, 0x3F, 0x08, 0x00, 0x00000000, PPC_NONE, PPC2_ISA205), -GEN_HANDLER_E(fmrgew, 0x3F, 0x06, 0x1E, 0x00000001, PPC_NONE, PPC2_VSX207), -GEN_HANDLER_E(fmrgow, 0x3F, 0x06, 0x1A, 0x00000001, PPC_NONE, PPC2_VSX207), GEN_HANDLER(mcrfs, 0x3F, 0x00, 0x02, 0x0063F801, PPC_FLOAT), GEN_HANDLER(mtfsb0, 0x3F, 0x06, 0x02, 0x001FF800, PPC_FLOAT), GEN_HANDLER(mtfsb1, 0x3F, 0x06, 0x01, 0x001FF800, PPC_FLOAT), diff --git a/target/ppc/translate/ppe-impl.c.inc b/target/ppc/translate/ppe-impl.c.inc new file mode 100644 index 0000000000000..0a0590344ea5c --- /dev/null +++ b/target/ppc/translate/ppe-impl.c.inc @@ -0,0 +1,609 @@ +/* + * IBM PPE Instructions + * + * Copyright (c) 2025, IBM Corporation. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + + +static bool vdr_is_valid(uint32_t vdr) +{ + const uint32_t valid_bitmap = 0xf00003ff; + return !!((1ul << (vdr & 0x1f)) & valid_bitmap); +} + +static bool ppe_gpr_is_valid(uint32_t reg) +{ + const uint32_t valid_bitmap = 0xf00027ff; + return !!((1ul << (reg & 0x1f)) & valid_bitmap); +} + +#define CHECK_VDR(CTX, VDR) \ + do { \ + if (unlikely(!vdr_is_valid(VDR))) { \ + gen_invalid(CTX); \ + return true; \ + } \ + } while (0) + +#define CHECK_PPE_GPR(CTX, REG) \ + do { \ + if (unlikely(!ppe_gpr_is_valid(REG))) { \ + gen_invalid(CTX); \ + return true; \ + } \ + } while (0) + +#define VDR_PAIR_REG(VDR) (((VDR) + 1) & 0x1f) + +#define CHECK_PPE_LEVEL(CTX, LVL) \ + do { \ + if (unlikely(!((CTX)->insns_flags2 & (LVL)))) { \ + gen_invalid(CTX); \ + return true; \ + } \ + } while (0) + +static bool trans_LCXU(DisasContext *ctx, arg_LCXU *a) +{ + int i; + TCGv base, EA; + TCGv lo, hi; + TCGv_i64 t8; + const uint8_t vd_list[] = {9, 7, 5, 3, 0}; + + if (unlikely(!is_ppe(ctx))) { + return false; + } + CHECK_PPE_LEVEL(ctx, PPC2_PPE42X); + CHECK_PPE_GPR(ctx, a->rt); + + if (unlikely((a->rt != a->ra) || (a->ra == 0) || (a->si < 0xB))) { + gen_invalid(ctx); + return true; + } + + EA = tcg_temp_new(); + base = tcg_temp_new(); + + tcg_gen_addi_tl(base, cpu_gpr[a->ra], a->si * 8); + gen_store_spr(SPR_PPE42_EDR, base); + + t8 = tcg_temp_new_i64(); + + tcg_gen_addi_tl(EA, base, -8); + tcg_gen_qemu_ld_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + tcg_gen_extr_i64_tl(cpu_gpr[31], cpu_gpr[30], t8); + + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_ld_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + tcg_gen_extr_i64_tl(cpu_gpr[29], cpu_gpr[28], t8); + + lo = tcg_temp_new(); + hi = tcg_temp_new(); + + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_ld_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + tcg_gen_extr_i64_tl(lo, hi, t8); + gen_store_spr(SPR_SRR0, hi); + gen_store_spr(SPR_SRR1, lo); + + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_ld_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + tcg_gen_extr_i64_tl(lo, hi, t8); + gen_set_xer(ctx, hi); + tcg_gen_mov_tl(cpu_ctr, lo); + + for (i = 0; i < sizeof(vd_list); i++) { + int vd = vd_list[i]; + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_ld_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + tcg_gen_extr_i64_tl(cpu_gpr[VDR_PAIR_REG(vd)], cpu_gpr[vd], t8); + } + + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_ld_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + tcg_gen_extr_i64_tl(lo, hi, t8); + tcg_gen_shri_tl(hi, hi, 28); + tcg_gen_trunc_tl_i32(cpu_crf[0], hi); + gen_store_spr(SPR_SPRG0, lo); + + tcg_gen_addi_tl(EA, base, 4); + tcg_gen_qemu_ld_tl(cpu_lr, EA, ctx->mem_idx, DEF_MEMOP(MO_32) | MO_ALIGN); + tcg_gen_mov_tl(cpu_gpr[a->ra], base); + return true; +} + +static bool trans_LSKU(DisasContext *ctx, arg_LSKU *a) +{ + int64_t n; + TCGv base, EA; + TCGv lo, hi; + TCGv_i64 t8; + + if (unlikely(!is_ppe(ctx))) { + return false; + } + + CHECK_PPE_LEVEL(ctx, PPC2_PPE42X); + CHECK_PPE_GPR(ctx, a->rt); + + if (unlikely((a->rt != a->ra) || (a->ra == 0) || + (a->si & PPC_BIT(0)) || (a->si == 0))) { + gen_invalid(ctx); + return true; + } + + EA = tcg_temp_new(); + base = tcg_temp_new(); + gen_addr_register(ctx, base); + + + tcg_gen_addi_tl(base, base, a->si * 8); + gen_store_spr(SPR_PPE42_EDR, base); + + n = a->si - 1; + t8 = tcg_temp_new_i64(); + if (n > 0) { + tcg_gen_addi_tl(EA, base, -8); + tcg_gen_qemu_ld_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + hi = cpu_gpr[30]; + lo = cpu_gpr[31]; + tcg_gen_extr_i64_tl(lo, hi, t8); + } + if (n > 1) { + tcg_gen_addi_tl(EA, base, -16); + tcg_gen_qemu_ld_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + hi = cpu_gpr[28]; + lo = cpu_gpr[29]; + tcg_gen_extr_i64_tl(lo, hi, t8); + } + tcg_gen_addi_tl(EA, base, 4); + tcg_gen_qemu_ld_tl(cpu_lr, EA, ctx->mem_idx, DEF_MEMOP(MO_32) | MO_ALIGN); + tcg_gen_mov_tl(cpu_gpr[a->ra], base); + return true; +} + +static bool trans_STCXU(DisasContext *ctx, arg_STCXU *a) +{ + TCGv EA; + TCGv lo, hi; + TCGv_i64 t8; + int i; + const uint8_t vd_list[] = {9, 7, 5, 3, 0}; + + if (unlikely(!is_ppe(ctx))) { + return false; + } + + CHECK_PPE_LEVEL(ctx, PPC2_PPE42X); + CHECK_PPE_GPR(ctx, a->rt); + + if (unlikely((a->rt != a->ra) || (a->ra == 0) || !(a->si & PPC_BIT(0)))) { + gen_invalid(ctx); + return true; + } + + EA = tcg_temp_new(); + tcg_gen_addi_tl(EA, cpu_gpr[a->ra], 4); + tcg_gen_qemu_st_tl(cpu_lr, EA, ctx->mem_idx, DEF_MEMOP(MO_32) | MO_ALIGN); + + gen_store_spr(SPR_PPE42_EDR, cpu_gpr[a->ra]); + + t8 = tcg_temp_new_i64(); + + tcg_gen_concat_tl_i64(t8, cpu_gpr[31], cpu_gpr[30]); + tcg_gen_addi_tl(EA, cpu_gpr[a->ra], -8); + tcg_gen_qemu_st_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + + tcg_gen_concat_tl_i64(t8, cpu_gpr[29], cpu_gpr[28]); + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_st_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + + lo = tcg_temp_new(); + hi = tcg_temp_new(); + + gen_load_spr(hi, SPR_SRR0); + gen_load_spr(lo, SPR_SRR1); + tcg_gen_concat_tl_i64(t8, lo, hi); + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_st_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + + gen_get_xer(ctx, hi); + tcg_gen_mov_tl(lo, cpu_ctr); + tcg_gen_concat_tl_i64(t8, lo, hi); + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_st_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + + for (i = 0; i < sizeof(vd_list); i++) { + int vd = vd_list[i]; + tcg_gen_concat_tl_i64(t8, cpu_gpr[VDR_PAIR_REG(vd)], cpu_gpr[vd]); + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_st_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + } + + gen_load_spr(lo, SPR_SPRG0); + tcg_gen_extu_i32_tl(hi, cpu_crf[0]); + tcg_gen_shli_tl(hi, hi, 28); + tcg_gen_concat_tl_i64(t8, lo, hi); + tcg_gen_addi_tl(EA, EA, -8); + tcg_gen_qemu_st_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + + tcg_gen_addi_tl(EA, cpu_gpr[a->ra], a->si * 8); + tcg_gen_qemu_st_tl(cpu_gpr[a->rt], EA, ctx->mem_idx, DEF_MEMOP(MO_32) | + MO_ALIGN); + tcg_gen_mov_tl(cpu_gpr[a->ra], EA); + return true; +} + +static bool trans_STSKU(DisasContext *ctx, arg_STSKU *a) +{ + int64_t n; + TCGv base, EA; + TCGv lo, hi; + TCGv_i64 t8; + + if (unlikely(!is_ppe(ctx))) { + return false; + } + + CHECK_PPE_LEVEL(ctx, PPC2_PPE42X); + CHECK_PPE_GPR(ctx, a->rt); + + if (unlikely((a->rt != a->ra) || (a->ra == 0) || !(a->si & PPC_BIT(0)))) { + gen_invalid(ctx); + return true; + } + + EA = tcg_temp_new(); + base = tcg_temp_new(); + gen_addr_register(ctx, base); + tcg_gen_addi_tl(EA, base, 4); + tcg_gen_qemu_st_tl(cpu_lr, EA, ctx->mem_idx, DEF_MEMOP(MO_32) | MO_ALIGN); + + gen_store_spr(SPR_PPE42_EDR, base); + + n = ~(a->si); + + t8 = tcg_temp_new_i64(); + if (n > 0) { + hi = cpu_gpr[30]; + lo = cpu_gpr[31]; + tcg_gen_concat_tl_i64(t8, lo, hi); + tcg_gen_addi_tl(EA, base, -8); + tcg_gen_qemu_st_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + } + if (n > 1) { + hi = cpu_gpr[28]; + lo = cpu_gpr[29]; + tcg_gen_concat_tl_i64(t8, lo, hi); + tcg_gen_addi_tl(EA, base, -16); + tcg_gen_qemu_st_i64(t8, EA, ctx->mem_idx, DEF_MEMOP(MO_64) | MO_ALIGN); + } + + tcg_gen_addi_tl(EA, base, a->si * 8); + tcg_gen_qemu_st_tl(cpu_gpr[a->rt], EA, ctx->mem_idx, DEF_MEMOP(MO_32) | + MO_ALIGN); + tcg_gen_mov_tl(cpu_gpr[a->ra], EA); + return true; +} + +static bool do_ppe_ldst(DisasContext *ctx, int rt, int ra, TCGv disp, + bool update, bool store) +{ + TCGv ea; + int rt_lo; + TCGv_i64 t8; + + CHECK_VDR(ctx, rt); + CHECK_PPE_GPR(ctx, ra); + rt_lo = VDR_PAIR_REG(rt); + if (update && (ra == 0 || (!store && ((ra == rt) || (ra == rt_lo))))) { + gen_invalid(ctx); + return true; + } + gen_set_access_type(ctx, ACCESS_INT); + + ea = do_ea_calc(ctx, ra, disp); + t8 = tcg_temp_new_i64(); + if (store) { + tcg_gen_concat_tl_i64(t8, cpu_gpr[rt_lo], cpu_gpr[rt]); + tcg_gen_qemu_st_i64(t8, ea, ctx->mem_idx, DEF_MEMOP(MO_64)); + } else { + tcg_gen_qemu_ld_i64(t8, ea, ctx->mem_idx, DEF_MEMOP(MO_64)); + tcg_gen_extr_i64_tl(cpu_gpr[rt_lo], cpu_gpr[rt], t8); + } + if (update) { + tcg_gen_mov_tl(cpu_gpr[ra], ea); + } + return true; +} + +static bool do_ppe_ldst_D(DisasContext *ctx, arg_D *a, bool update, bool store) +{ + if (unlikely(!is_ppe(ctx))) { + return false; + } + return do_ppe_ldst(ctx, a->rt, a->ra, tcg_constant_tl(a->si), update, + store); +} + +static bool do_ppe_ldst_X(DisasContext *ctx, arg_X *a, bool store) +{ + if (unlikely(!is_ppe(ctx))) { + return false; + } + CHECK_PPE_GPR(ctx, a->rb); + return do_ppe_ldst(ctx, a->rt, a->ra, cpu_gpr[a->rb], false, store); +} + +TRANS(LVD, do_ppe_ldst_D, false, false) +TRANS(LVDU, do_ppe_ldst_D, true, false) +TRANS(STVD, do_ppe_ldst_D, false, true) +TRANS(STVDU, do_ppe_ldst_D, true, true) +TRANS(LVDX, do_ppe_ldst_X, false) +TRANS(STVDX, do_ppe_ldst_X, true) + + +static bool do_fcb(DisasContext *ctx, TCGv ra_val, TCGv rb_val, int bix, + int32_t bdx, bool s, bool px, bool lk) +{ + TCGCond cond; + uint32_t mask; + TCGLabel *no_branch; + target_ulong dest; + + /* Update CR0 */ + gen_op_cmp32(ra_val, rb_val, s, 0); + + if (lk) { + gen_setlr(ctx, ctx->base.pc_next); + } + + + mask = PPC_BIT32(28 + bix); + cond = (px) ? TCG_COND_TSTEQ : TCG_COND_TSTNE; + no_branch = gen_new_label(); + dest = ctx->cia + bdx; + + /* Do the branch if CR0[bix] == PX */ + tcg_gen_brcondi_i32(cond, cpu_crf[0], mask, no_branch); + gen_goto_tb(ctx, 0, dest); + gen_set_label(no_branch); + gen_goto_tb(ctx, 1, ctx->base.pc_next); + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} + +static bool do_cmp_branch(DisasContext *ctx, arg_FCB_bix *a, bool s, + bool rb_is_gpr) +{ + TCGv old_ra; + TCGv rb_val; + + if (unlikely(!is_ppe(ctx))) { + return false; + } + CHECK_PPE_GPR(ctx, a->ra); + if (rb_is_gpr) { + CHECK_PPE_GPR(ctx, a->rb); + rb_val = cpu_gpr[a->rb]; + } else { + rb_val = tcg_constant_tl(a->rb); + } + if (a->bix == 3) { + old_ra = tcg_temp_new(); + tcg_gen_mov_tl(old_ra, cpu_gpr[a->ra]); + tcg_gen_sub_tl(cpu_gpr[a->ra], cpu_gpr[a->ra], rb_val); + return do_fcb(ctx, old_ra, rb_val, 2, + a->bdx, s, a->px, a->lk); + } else { + return do_fcb(ctx, cpu_gpr[a->ra], rb_val, a->bix, + a->bdx, s, a->px, a->lk); + } +} + +TRANS(CMPWBC, do_cmp_branch, true, true) +TRANS(CMPLWBC, do_cmp_branch, false, true) +TRANS(CMPWIBC, do_cmp_branch, true, false) + +static bool do_mask_branch(DisasContext *ctx, arg_FCB * a, bool invert, + bool update, bool rb_is_gpr) +{ + TCGv r; + TCGv mask, shift; + + if (unlikely(!is_ppe(ctx))) { + return false; + } + CHECK_PPE_GPR(ctx, a->ra); + if (rb_is_gpr) { + CHECK_PPE_GPR(ctx, a->rb); + mask = tcg_temp_new(); + shift = tcg_temp_new(); + tcg_gen_andi_tl(shift, cpu_gpr[a->rb], 0x1f); + tcg_gen_shr_tl(mask, tcg_constant_tl(0x80000000), shift); + } else { + mask = tcg_constant_tl(PPC_BIT32(a->rb)); + } + if (invert) { + tcg_gen_not_tl(mask, mask); + } + + /* apply mask to ra */ + r = tcg_temp_new(); + tcg_gen_and_tl(r, cpu_gpr[a->ra], mask); + if (update) { + tcg_gen_mov_tl(cpu_gpr[a->ra], r); + } + return do_fcb(ctx, r, tcg_constant_tl(0), 2, + a->bdx, false, a->px, a->lk); +} + +TRANS(BNBWI, do_mask_branch, false, false, false) +TRANS(BNBW, do_mask_branch, false, false, true) +TRANS(CLRBWIBC, do_mask_branch, true, true, false) +TRANS(CLRBWBC, do_mask_branch, true, true, true) + +static void gen_set_Rc0_i64(DisasContext *ctx, TCGv_i64 reg) +{ + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i32 t = tcg_temp_new_i32(); + + tcg_gen_movi_i64(t0, CRF_EQ); + tcg_gen_movi_i64(t1, CRF_LT); + tcg_gen_movcond_i64(TCG_COND_LT, t0, reg, tcg_constant_i64(0), t1, t0); + tcg_gen_movi_i64(t1, CRF_GT); + tcg_gen_movcond_i64(TCG_COND_GT, t0, reg, tcg_constant_i64(0), t1, t0); + tcg_gen_extrl_i64_i32(t, t0); + tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); + tcg_gen_or_i32(cpu_crf[0], cpu_crf[0], t); +} + +static bool do_shift64(DisasContext *ctx, arg_X_rc *a, bool left) +{ + int rt_lo, ra_lo; + TCGv_i64 t0, t8; + + if (unlikely(!is_ppe(ctx))) { + return false; + } + CHECK_PPE_LEVEL(ctx, PPC2_PPE42X); + CHECK_VDR(ctx, a->rt); + CHECK_VDR(ctx, a->ra); + CHECK_PPE_GPR(ctx, a->rb); + rt_lo = VDR_PAIR_REG(a->rt); + ra_lo = VDR_PAIR_REG(a->ra); + t8 = tcg_temp_new_i64(); + + /* AND rt with a mask that is 0 when rb >= 0x40 */ + t0 = tcg_temp_new_i64(); + tcg_gen_extu_tl_i64(t0, cpu_gpr[a->rb]); + tcg_gen_shli_i64(t0, t0, 0x39); + tcg_gen_sari_i64(t0, t0, 0x3f); + + /* form 64bit value from two 32bit regs */ + tcg_gen_concat_tl_i64(t8, cpu_gpr[rt_lo], cpu_gpr[a->rt]); + + /* apply mask */ + tcg_gen_andc_i64(t8, t8, t0); + + /* do the shift */ + tcg_gen_extu_tl_i64(t0, cpu_gpr[a->rb]); + tcg_gen_andi_i64(t0, t0, 0x3f); + if (left) { + tcg_gen_shl_i64(t8, t8, t0); + } else { + tcg_gen_shr_i64(t8, t8, t0); + } + + /* split the 64bit word back into two 32bit regs */ + tcg_gen_extr_i64_tl(cpu_gpr[ra_lo], cpu_gpr[a->ra], t8); + + /* update CR0 if requested */ + if (unlikely(a->rc != 0)) { + gen_set_Rc0_i64(ctx, t8); + } + return true; +} + +TRANS(SRVD, do_shift64, false) +TRANS(SLVD, do_shift64, true) + +static bool trans_DCBQ(DisasContext *ctx, arg_DCBQ * a) +{ + if (unlikely(!is_ppe(ctx))) { + return false; + } + + CHECK_PPE_GPR(ctx, a->rt); + CHECK_PPE_GPR(ctx, a->ra); + CHECK_PPE_GPR(ctx, a->rb); + + /* No cache exists, so just set RT to 0 */ + tcg_gen_movi_tl(cpu_gpr[a->rt], 0); + return true; +} + +static bool trans_RLDIMI(DisasContext *ctx, arg_RLDIMI *a) +{ + TCGv_i64 t_rs, t_ra; + int ra_lo, rs_lo; + uint32_t sh = a->sh; + uint32_t mb = a->mb; + uint32_t me = 63 - sh; + + if (unlikely(!is_ppe(ctx))) { + return false; + } + CHECK_PPE_LEVEL(ctx, PPC2_PPE42X); + CHECK_VDR(ctx, a->rs); + CHECK_VDR(ctx, a->ra); + + rs_lo = VDR_PAIR_REG(a->rs); + ra_lo = VDR_PAIR_REG(a->ra); + + t_rs = tcg_temp_new_i64(); + t_ra = tcg_temp_new_i64(); + + tcg_gen_concat_tl_i64(t_rs, cpu_gpr[rs_lo], cpu_gpr[a->rs]); + tcg_gen_concat_tl_i64(t_ra, cpu_gpr[ra_lo], cpu_gpr[a->ra]); + + if (mb <= me) { + tcg_gen_deposit_i64(t_ra, t_ra, t_rs, sh, me - mb + 1); + } else { + uint64_t mask = mask_u64(mb, me); + TCGv_i64 t1 = tcg_temp_new_i64(); + + tcg_gen_rotli_i64(t1, t_rs, sh); + tcg_gen_andi_i64(t1, t1, mask); + tcg_gen_andi_i64(t_ra, t_ra, ~mask); + tcg_gen_or_i64(t_ra, t_ra, t1); + } + + tcg_gen_extr_i64_tl(cpu_gpr[ra_lo], cpu_gpr[a->ra], t_ra); + + if (unlikely(a->rc != 0)) { + gen_set_Rc0_i64(ctx, t_ra); + } + return true; +} + + +static bool gen_rldinm_i64(DisasContext *ctx, arg_MD *a, int mb, int me, int sh) +{ + int len = me - mb + 1; + int rsh = (64 - sh) & 63; + int ra_lo, rs_lo; + TCGv_i64 t8; + + if (unlikely(!is_ppe(ctx))) { + return false; + } + CHECK_PPE_LEVEL(ctx, PPC2_PPE42X); + CHECK_VDR(ctx, a->rs); + CHECK_VDR(ctx, a->ra); + + rs_lo = VDR_PAIR_REG(a->rs); + ra_lo = VDR_PAIR_REG(a->ra); + t8 = tcg_temp_new_i64(); + tcg_gen_concat_tl_i64(t8, cpu_gpr[rs_lo], cpu_gpr[a->rs]); + if (sh != 0 && len > 0 && me == (63 - sh)) { + tcg_gen_deposit_z_i64(t8, t8, sh, len); + } else if (me == 63 && rsh + len <= 64) { + tcg_gen_extract_i64(t8, t8, rsh, len); + } else { + tcg_gen_rotli_i64(t8, t8, sh); + tcg_gen_andi_i64(t8, t8, mask_u64(mb, me)); + } + tcg_gen_extr_i64_tl(cpu_gpr[ra_lo], cpu_gpr[a->ra], t8); + if (unlikely(a->rc != 0)) { + gen_set_Rc0_i64(ctx, t8); + } + return true; +} + +TRANS(RLDICL, gen_rldinm_i64, a->mb, 63, a->sh) +TRANS(RLDICR, gen_rldinm_i64, 0, a->mb, a->sh) diff --git a/target/riscv/common-semi-target.h b/target/riscv/common-semi-target.c similarity index 53% rename from target/riscv/common-semi-target.h rename to target/riscv/common-semi-target.c index 7c8a59e0cc3cd..aeaeb88d5360c 100644 --- a/target/riscv/common-semi-target.h +++ b/target/riscv/common-semi-target.c @@ -8,43 +8,42 @@ * SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef TARGET_RISCV_COMMON_SEMI_TARGET_H -#define TARGET_RISCV_COMMON_SEMI_TARGET_H +#include "qemu/osdep.h" +#include "cpu.h" +#include "semihosting/common-semi.h" -static inline target_ulong common_semi_arg(CPUState *cs, int argno) +uint64_t common_semi_arg(CPUState *cs, int argno) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; return env->gpr[xA0 + argno]; } -static inline void common_semi_set_ret(CPUState *cs, target_ulong ret) +void common_semi_set_ret(CPUState *cs, uint64_t ret) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; env->gpr[xA0] = ret; } -static inline bool common_semi_sys_exit_extended(CPUState *cs, int nr) +bool is_64bit_semihosting(CPUArchState *env) { - return (nr == TARGET_SYS_EXIT_EXTENDED || sizeof(target_ulong) == 8); + return riscv_cpu_mxl(env) != MXL_RV32; } -static inline bool is_64bit_semihosting(CPUArchState *env) +bool common_semi_sys_exit_is_extended(CPUState *cs) { - return riscv_cpu_mxl(env) != MXL_RV32; + return is_64bit_semihosting(cpu_env(cs)); } -static inline target_ulong common_semi_stack_bottom(CPUState *cs) +uint64_t common_semi_stack_bottom(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; return env->gpr[xSP]; } -static inline bool common_semi_has_synccache(CPUArchState *env) +bool common_semi_has_synccache(CPUArchState *env) { return true; } - -#endif diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index d055ddf462311..a877018ab0c29 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -604,7 +604,7 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } } - if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) { + if (riscv_cpu_cfg(env)->ext_zve32x && (flags & CPU_DUMP_VPU)) { static const int dump_rvv_csrs[] = { CSR_VSTART, CSR_VXSAT, diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 4a862da61582c..4c13012442d98 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -50,7 +50,7 @@ typedef struct CPUArchState CPURISCVState; */ #define RISCV_UW2_ALWAYS_STORE_AMO 1 -#define RV(x) ((target_ulong)1 << (x - 'A')) +#define RV(x) BIT(x - 'A') /* * Update misa_bits[], misa_ext_info_arr[] and misa_ext_cfgs[] @@ -582,7 +582,7 @@ struct RISCVCPUClass { RISCVCPUDef *def; }; -static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) +static inline int riscv_has_ext(CPURISCVState *env, uint32_t ext) { return (env->misa_ext & ext) != 0; } @@ -592,6 +592,7 @@ static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) extern const char * const riscv_int_regnames[]; extern const char * const riscv_int_regnamesh[]; extern const char * const riscv_fpr_regnames[]; +extern const char * const riscv_rvv_regnames[]; const char *riscv_cpu_get_trap_name(target_ulong cause, bool async); int riscv_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, @@ -873,7 +874,7 @@ static inline void riscv_csr_write(CPURISCVState *env, int csrno, static inline target_ulong riscv_csr_read(CPURISCVState *env, int csrno) { target_ulong val = 0; - riscv_csrrw(env, csrno, &val, 0, 0, 0); + riscv_csrr(env, csrno, &val); return val; } diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 8631be97c58d1..5c91658c3dc41 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -25,7 +25,6 @@ #include "pmu.h" #include "time_helper.h" #include "exec/cputlb.h" -#include "exec/tb-flush.h" #include "exec/icount.h" #include "accel/tcg/getpc.h" #include "qemu/guest-random.h" @@ -204,6 +203,8 @@ static RISCVException cfi_ss(CPURISCVState *env, int csrno) #if !defined(CONFIG_USER_ONLY) if (env->debugger) { return RISCV_EXCP_NONE; + } else if (env->virt_enabled) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } #endif return RISCV_EXCP_ILLEGAL_INST; @@ -374,8 +375,11 @@ static RISCVException aia_smode(CPURISCVState *env, int csrno) static RISCVException aia_smode32(CPURISCVState *env, int csrno) { int ret; + int csr_priv = get_field(csrno, 0x300); - if (!riscv_cpu_cfg(env)->ext_ssaia) { + if (csr_priv == PRV_M && !riscv_cpu_cfg(env)->ext_smaia) { + return RISCV_EXCP_ILLEGAL_INST; + } else if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -2001,7 +2005,8 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno, if (riscv_has_ext(env, RVF)) { mask |= MSTATUS_FS; } - if (riscv_has_ext(env, RVV)) { + + if (riscv_cpu_cfg(env)->ext_zve32x) { mask |= MSTATUS_VS; } @@ -2170,8 +2175,6 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, env->mstatus &= ~MSTATUS_FS; } - /* flush translation cache */ - tb_flush(env_cpu(env)); env->xl = riscv_cpu_mxl(env); return RISCV_EXCP_NONE; } @@ -5577,7 +5580,7 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, csr_priv = get_field(csrno, 0x300); if (!env->debugger && (effective_priv < csr_priv)) { - if (csr_priv == (PRV_S + 1) && env->virt_enabled) { + if (csr_priv <= (PRV_S + 1) && env->virt_enabled) { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; } return RISCV_EXCP_ILLEGAL_INST; @@ -5862,8 +5865,8 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { NULL, read_mstatus_i128 }, [CSR_MISA] = { "misa", any, read_misa, write_misa, NULL, read_misa_i128 }, - [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg }, - [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg }, + [CSR_MIDELEG] = { "mideleg", smode, NULL, NULL, rmw_mideleg }, + [CSR_MEDELEG] = { "medeleg", smode, read_medeleg, write_medeleg }, [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie }, [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec }, [CSR_MCOUNTEREN] = { "mcounteren", umode, read_mcounteren, @@ -5871,7 +5874,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MSTATUSH] = { "mstatush", any32, read_mstatush, write_mstatush }, - [CSR_MEDELEGH] = { "medelegh", any32, read_zero, write_ignore, + [CSR_MEDELEGH] = { "medelegh", smode32, read_zero, write_ignore, .min_priv_ver = PRIV_VERSION_1_13_0 }, [CSR_HEDELEGH] = { "hedelegh", hmode32, read_hedelegh, write_hedelegh, .min_priv_ver = PRIV_VERSION_1_13_0 }, @@ -5911,7 +5914,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip }, /* Machine-Level High-Half CSRs (AIA) */ - [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh }, + [CSR_MIDELEGH] = { "midelegh", aia_smode32, NULL, NULL, rmw_midelegh }, [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh }, [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh }, [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph }, diff --git a/target/riscv/helper.h b/target/riscv/helper.h index f712b1c368ece..b785456ee08d1 100644 --- a/target/riscv/helper.h +++ b/target/riscv/helper.h @@ -1101,14 +1101,14 @@ DEF_HELPER_6(vslidedown_vx_b, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslidedown_vx_h, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslidedown_vx_w, void, ptr, ptr, tl, ptr, env, i32) DEF_HELPER_6(vslidedown_vx_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, tl, ptr, env, i32) -DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, tl, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_b, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1up_vx_d, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_b, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_h, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_w, void, ptr, ptr, i64, ptr, env, i32) +DEF_HELPER_6(vslide1down_vx_d, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vfslide1up_vf_h, void, ptr, ptr, i64, ptr, env, i32) DEF_HELPER_6(vfslide1up_vf_w, void, ptr, ptr, i64, ptr, env, i32) @@ -1284,3 +1284,8 @@ DEF_HELPER_4(vgmul_vv, void, ptr, ptr, env, i32) DEF_HELPER_5(vsm4k_vi, void, ptr, ptr, i32, env, i32) DEF_HELPER_4(vsm4r_vv, void, ptr, ptr, env, i32) DEF_HELPER_4(vsm4r_vs, void, ptr, ptr, env, i32) + +/* CFI (zicfiss) helpers */ +#ifndef CONFIG_USER_ONLY +DEF_HELPER_1(ssamoswap_disabled, void, env) +#endif diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index 610bf9ff30cef..2a487179f6388 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -864,286 +864,32 @@ GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check) GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check) /* - * MAXSZ returns the maximum vector size can be operated in bytes, - * which is used in GVEC IR when vl_eq_vlmax flag is set to true - * to accelerate vector operation. - */ -static inline uint32_t MAXSZ(DisasContext *s) -{ - int max_sz = s->cfg_ptr->vlenb << 3; - return max_sz >> (3 - s->lmul); -} - -static inline uint32_t get_log2(uint32_t a) -{ - uint32_t i = 0; - for (; a > 0;) { - a >>= 1; - i++; - } - return i; -} - -typedef void gen_tl_ldst(TCGv, TCGv_ptr, tcg_target_long); - -/* - * Simulate the strided load/store main loop: - * - * for (i = env->vstart; i < env->vl; env->vstart = ++i) { - * k = 0; - * while (k < nf) { - * if (!vm && !vext_elem_mask(v0, i)) { - * vext_set_elems_1s(vd, vma, (i + k * max_elems) * esz, - * (i + k * max_elems + 1) * esz); - * k++; - * continue; - * } - * target_ulong addr = base + stride * i + (k << log2_esz); - * ldst(env, adjust_addr(env, addr), i + k * max_elems, vd, ra); - * k++; - * } - * } - */ -static void gen_ldst_stride_main_loop(DisasContext *s, TCGv dest, uint32_t rs1, - uint32_t rs2, uint32_t vm, uint32_t nf, - gen_tl_ldst *ld_fn, gen_tl_ldst *st_fn, - bool is_load) -{ - TCGv addr = tcg_temp_new(); - TCGv base = get_gpr(s, rs1, EXT_NONE); - TCGv stride = get_gpr(s, rs2, EXT_NONE); - - TCGv i = tcg_temp_new(); - TCGv i_esz = tcg_temp_new(); - TCGv k = tcg_temp_new(); - TCGv k_esz = tcg_temp_new(); - TCGv k_max = tcg_temp_new(); - TCGv mask = tcg_temp_new(); - TCGv mask_offs = tcg_temp_new(); - TCGv mask_offs_64 = tcg_temp_new(); - TCGv mask_elem = tcg_temp_new(); - TCGv mask_offs_rem = tcg_temp_new(); - TCGv vreg = tcg_temp_new(); - TCGv dest_offs = tcg_temp_new(); - TCGv stride_offs = tcg_temp_new(); - - uint32_t max_elems = MAXSZ(s) >> s->sew; - - TCGLabel *start = gen_new_label(); - TCGLabel *end = gen_new_label(); - TCGLabel *start_k = gen_new_label(); - TCGLabel *inc_k = gen_new_label(); - TCGLabel *end_k = gen_new_label(); - - MemOp atomicity = MO_ATOM_NONE; - if (s->sew == 0) { - atomicity = MO_ATOM_NONE; - } else { - atomicity = MO_ATOM_IFALIGN_PAIR; - } - - mark_vs_dirty(s); - - tcg_gen_addi_tl(mask, (TCGv)tcg_env, vreg_ofs(s, 0)); - - /* Start of outer loop. */ - tcg_gen_mov_tl(i, cpu_vstart); - gen_set_label(start); - tcg_gen_brcond_tl(TCG_COND_GE, i, cpu_vl, end); - tcg_gen_shli_tl(i_esz, i, s->sew); - /* Start of inner loop. */ - tcg_gen_movi_tl(k, 0); - gen_set_label(start_k); - tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end_k); - /* - * If we are in mask agnostic regime and the operation is not unmasked we - * set the inactive elements to 1. - */ - if (!vm && s->vma) { - TCGLabel *active_element = gen_new_label(); - /* (i + k * max_elems) * esz */ - tcg_gen_shli_tl(mask_offs, k, get_log2(max_elems << s->sew)); - tcg_gen_add_tl(mask_offs, mask_offs, i_esz); - - /* - * Check whether the i bit of the mask is 0 or 1. - * - * static inline int vext_elem_mask(void *v0, int index) - * { - * int idx = index / 64; - * int pos = index % 64; - * return (((uint64_t *)v0)[idx] >> pos) & 1; - * } - */ - tcg_gen_shri_tl(mask_offs_64, mask_offs, 3); - tcg_gen_add_tl(mask_offs_64, mask_offs_64, mask); - tcg_gen_ld_i64((TCGv_i64)mask_elem, (TCGv_ptr)mask_offs_64, 0); - tcg_gen_rem_tl(mask_offs_rem, mask_offs, tcg_constant_tl(8)); - tcg_gen_shr_tl(mask_elem, mask_elem, mask_offs_rem); - tcg_gen_andi_tl(mask_elem, mask_elem, 1); - tcg_gen_brcond_tl(TCG_COND_NE, mask_elem, tcg_constant_tl(0), - active_element); - /* - * Set masked-off elements in the destination vector register to 1s. - * Store instructions simply skip this bit as memory ops access memory - * only for active elements. - */ - if (is_load) { - tcg_gen_shli_tl(mask_offs, mask_offs, s->sew); - tcg_gen_add_tl(mask_offs, mask_offs, dest); - st_fn(tcg_constant_tl(-1), (TCGv_ptr)mask_offs, 0); - } - tcg_gen_br(inc_k); - gen_set_label(active_element); - } - /* - * The element is active, calculate the address with stride: - * target_ulong addr = base + stride * i + (k << log2_esz); - */ - tcg_gen_mul_tl(stride_offs, stride, i); - tcg_gen_shli_tl(k_esz, k, s->sew); - tcg_gen_add_tl(stride_offs, stride_offs, k_esz); - tcg_gen_add_tl(addr, base, stride_offs); - /* Calculate the offset in the dst/src vector register. */ - tcg_gen_shli_tl(k_max, k, get_log2(max_elems)); - tcg_gen_add_tl(dest_offs, i, k_max); - tcg_gen_shli_tl(dest_offs, dest_offs, s->sew); - tcg_gen_add_tl(dest_offs, dest_offs, dest); - if (is_load) { - tcg_gen_qemu_ld_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity); - st_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0); - } else { - ld_fn((TCGv)vreg, (TCGv_ptr)dest_offs, 0); - tcg_gen_qemu_st_tl(vreg, addr, s->mem_idx, MO_LE | s->sew | atomicity); - } - /* - * We don't execute the load/store above if the element was inactive. - * We jump instead directly to incrementing k and continuing the loop. - */ - if (!vm && s->vma) { - gen_set_label(inc_k); - } - tcg_gen_addi_tl(k, k, 1); - tcg_gen_br(start_k); - /* End of the inner loop. */ - gen_set_label(end_k); - - tcg_gen_addi_tl(i, i, 1); - tcg_gen_mov_tl(cpu_vstart, i); - tcg_gen_br(start); - - /* End of the outer loop. */ - gen_set_label(end); - - return; -} - - -/* - * Set the tail bytes of the strided loads/stores to 1: - * - * for (k = 0; k < nf; ++k) { - * cnt = (k * max_elems + vl) * esz; - * tot = (k * max_elems + max_elems) * esz; - * for (i = cnt; i < tot; i += esz) { - * store_1s(-1, vd[vl+i]); - * } - * } + *** stride load and store */ -static void gen_ldst_stride_tail_loop(DisasContext *s, TCGv dest, uint32_t nf, - gen_tl_ldst *st_fn) -{ - TCGv i = tcg_temp_new(); - TCGv k = tcg_temp_new(); - TCGv tail_cnt = tcg_temp_new(); - TCGv tail_tot = tcg_temp_new(); - TCGv tail_addr = tcg_temp_new(); - - TCGLabel *start = gen_new_label(); - TCGLabel *end = gen_new_label(); - TCGLabel *start_i = gen_new_label(); - TCGLabel *end_i = gen_new_label(); - - uint32_t max_elems_b = MAXSZ(s); - uint32_t esz = 1 << s->sew; - - /* Start of the outer loop. */ - tcg_gen_movi_tl(k, 0); - tcg_gen_shli_tl(tail_cnt, cpu_vl, s->sew); - tcg_gen_movi_tl(tail_tot, max_elems_b); - tcg_gen_add_tl(tail_addr, dest, tail_cnt); - gen_set_label(start); - tcg_gen_brcond_tl(TCG_COND_GE, k, tcg_constant_tl(nf), end); - /* Start of the inner loop. */ - tcg_gen_mov_tl(i, tail_cnt); - gen_set_label(start_i); - tcg_gen_brcond_tl(TCG_COND_GE, i, tail_tot, end_i); - /* store_1s(-1, vd[vl+i]); */ - st_fn(tcg_constant_tl(-1), (TCGv_ptr)tail_addr, 0); - tcg_gen_addi_tl(tail_addr, tail_addr, esz); - tcg_gen_addi_tl(i, i, esz); - tcg_gen_br(start_i); - /* End of the inner loop. */ - gen_set_label(end_i); - /* Update the counts */ - tcg_gen_addi_tl(tail_cnt, tail_cnt, max_elems_b); - tcg_gen_addi_tl(tail_tot, tail_cnt, max_elems_b); - tcg_gen_addi_tl(k, k, 1); - tcg_gen_br(start); - /* End of the outer loop. */ - gen_set_label(end); - - return; -} +typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv, + TCGv, TCGv_env, TCGv_i32); static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, - uint32_t data, DisasContext *s, bool is_load) + uint32_t data, gen_helper_ldst_stride *fn, + DisasContext *s) { - if (!s->vstart_eq_zero) { - return false; - } - - TCGv dest = tcg_temp_new(); - - uint32_t nf = FIELD_EX32(data, VDATA, NF); - uint32_t vm = FIELD_EX32(data, VDATA, VM); - - /* Destination register and mask register */ - tcg_gen_addi_tl(dest, (TCGv)tcg_env, vreg_ofs(s, vd)); - - /* - * Select the appropriate load/tore to retrieve data from the vector - * register given a specific sew. - */ - static gen_tl_ldst * const ld_fns[4] = { - tcg_gen_ld8u_tl, tcg_gen_ld16u_tl, - tcg_gen_ld32u_tl, tcg_gen_ld_tl - }; - - static gen_tl_ldst * const st_fns[4] = { - tcg_gen_st8_tl, tcg_gen_st16_tl, - tcg_gen_st32_tl, tcg_gen_st_tl - }; + TCGv_ptr dest, mask; + TCGv base, stride; + TCGv_i32 desc; - gen_tl_ldst *ld_fn = ld_fns[s->sew]; - gen_tl_ldst *st_fn = st_fns[s->sew]; + dest = tcg_temp_new_ptr(); + mask = tcg_temp_new_ptr(); + base = get_gpr(s, rs1, EXT_NONE); + stride = get_gpr(s, rs2, EXT_NONE); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, + s->cfg_ptr->vlenb, data)); - if (ld_fn == NULL || st_fn == NULL) { - return false; - } + tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd)); + tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); mark_vs_dirty(s); - gen_ldst_stride_main_loop(s, dest, rs1, rs2, vm, nf, ld_fn, st_fn, is_load); - - tcg_gen_movi_tl(cpu_vstart, 0); - - /* - * Set the tail bytes to 1 if tail agnostic: - */ - if (s->vta != 0 && is_load) { - gen_ldst_stride_tail_loop(s, dest, nf, st_fn); - } + fn(dest, mask, base, stride, tcg_env, desc); finalize_rvv_inst(s); return true; @@ -1152,6 +898,16 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) { uint32_t data = 0; + gen_helper_ldst_stride *fn; + static gen_helper_ldst_stride * const fns[4] = { + gen_helper_vlse8_v, gen_helper_vlse16_v, + gen_helper_vlse32_v, gen_helper_vlse64_v + }; + + fn = fns[eew]; + if (fn == NULL) { + return false; + } uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); @@ -1159,7 +915,7 @@ static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) data = FIELD_DP32(data, VDATA, NF, a->nf); data = FIELD_DP32(data, VDATA, VTA, s->vta); data = FIELD_DP32(data, VDATA, VMA, s->vma); - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, true); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -1177,13 +933,23 @@ GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check) static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew) { uint32_t data = 0; + gen_helper_ldst_stride *fn; + static gen_helper_ldst_stride * const fns[4] = { + /* masked stride store */ + gen_helper_vsse8_v, gen_helper_vsse16_v, + gen_helper_vsse32_v, gen_helper_vsse64_v + }; uint8_t emul = vext_get_emul(s, eew); data = FIELD_DP32(data, VDATA, VM, a->vm); data = FIELD_DP32(data, VDATA, LMUL, emul); data = FIELD_DP32(data, VDATA, NF, a->nf); + fn = fns[eew]; + if (fn == NULL) { + return false; + } - return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, s, false); + return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s); } static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew) @@ -1534,6 +1300,17 @@ GEN_LDST_WHOLE_TRANS(vs8r_v, int8_t, 8, false) *** Vector Integer Arithmetic Instructions */ +/* + * MAXSZ returns the maximum vector size can be operated in bytes, + * which is used in GVEC IR when vl_eq_vlmax flag is set to true + * to accelerate vector operation. + */ +static inline uint32_t MAXSZ(DisasContext *s) +{ + int max_sz = s->cfg_ptr->vlenb * 8; + return max_sz >> (3 - s->lmul); +} + static bool opivv_check(DisasContext *s, arg_rmrr *a) { return require_rvv(s) && @@ -3574,19 +3351,19 @@ static void load_element(TCGv_i64 dest, TCGv_ptr base, /* offset of the idx element with base register r */ static uint32_t endian_ofs(DisasContext *s, int r, int idx) { -#if HOST_BIG_ENDIAN - return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew); -#else - return vreg_ofs(s, r) + (idx << s->sew); -#endif + if (HOST_BIG_ENDIAN) { + return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew); + } else { + return vreg_ofs(s, r) + (idx << s->sew); + } } /* adjust the index according to the endian */ static void endian_adjust(TCGv_i32 ofs, int sew) { -#if HOST_BIG_ENDIAN - tcg_gen_xori_i32(ofs, ofs, 7 >> sew); -#endif + if (HOST_BIG_ENDIAN) { + tcg_gen_xori_i32(ofs, ofs, 7 >> sew); + } } /* Load idx >= VLMAX ? 0 : vreg[idx] */ @@ -3784,7 +3561,6 @@ static bool slideup_check(DisasContext *s, arg_rmrr *a) } GEN_OPIVX_TRANS(vslideup_vx, slideup_check) -GEN_OPIVX_TRANS(vslide1up_vx, slideup_check) GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check) static bool slidedown_check(DisasContext *s, arg_rmrr *a) @@ -3795,9 +3571,56 @@ static bool slidedown_check(DisasContext *s, arg_rmrr *a) } GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check) -GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check) GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) +typedef void gen_helper_vslide1_vx(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr, + TCGv_env, TCGv_i32); + +#define GEN_OPIVX_VSLIDE1_TRANS(NAME, CHECK) \ +static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ +{ \ + if (CHECK(s, a)) { \ + static gen_helper_vslide1_vx * const fns[4] = { \ + gen_helper_##NAME##_b, gen_helper_##NAME##_h, \ + gen_helper_##NAME##_w, gen_helper_##NAME##_d, \ + }; \ + \ + TCGv_ptr dest, src2, mask; \ + TCGv_i64 src1; \ + TCGv_i32 desc; \ + uint32_t data = 0; \ + \ + dest = tcg_temp_new_ptr(); \ + mask = tcg_temp_new_ptr(); \ + src2 = tcg_temp_new_ptr(); \ + src1 = tcg_temp_new_i64(); \ + \ + data = FIELD_DP32(data, VDATA, VM, a->vm); \ + data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ + data = FIELD_DP32(data, VDATA, VTA, s->vta); \ + data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s); \ + data = FIELD_DP32(data, VDATA, VMA, s->vma); \ + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb, \ + s->cfg_ptr->vlenb, data)); \ + \ + tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd)); \ + tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2)); \ + tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0)); \ + tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN)); \ + \ + fns[s->sew](dest, mask, src1, src2, tcg_env, desc); \ + \ + tcg_gen_movi_tl(cpu_vstart, 0); \ + finalize_rvv_inst(s); \ + \ + return true; \ + } \ + return false; \ +} + +GEN_OPIVX_VSLIDE1_TRANS(vslide1up_vx, slideup_check) +GEN_OPIVX_VSLIDE1_TRANS(vslide1down_vx, slidedown_check) + /* Vector Floating-Point Slide Instructions */ static bool fslideup_check(DisasContext *s, arg_rmrr *a) { diff --git a/target/riscv/insn_trans/trans_rvzce.c.inc b/target/riscv/insn_trans/trans_rvzce.c.inc index c77c2b927b0cf..dd15af0f54b96 100644 --- a/target/riscv/insn_trans/trans_rvzce.c.inc +++ b/target/riscv/insn_trans/trans_rvzce.c.inc @@ -88,13 +88,13 @@ static bool trans_c_lbu(DisasContext *ctx, arg_c_lbu *a) static bool trans_c_lhu(DisasContext *ctx, arg_c_lhu *a) { REQUIRE_ZCB(ctx); - return gen_load(ctx, a, MO_UW); + return gen_load(ctx, a, MO_TEUW); } static bool trans_c_lh(DisasContext *ctx, arg_c_lh *a) { REQUIRE_ZCB(ctx); - return gen_load(ctx, a, MO_SW); + return gen_load(ctx, a, MO_TESW); } static bool trans_c_sb(DisasContext *ctx, arg_c_sb *a) @@ -106,7 +106,7 @@ static bool trans_c_sb(DisasContext *ctx, arg_c_sb *a) static bool trans_c_sh(DisasContext *ctx, arg_c_sh *a) { REQUIRE_ZCB(ctx); - return gen_store(ctx, a, MO_UW); + return gen_store(ctx, a, MO_TEUW); } #define X_S0 8 diff --git a/target/riscv/insn_trans/trans_rvzicfiss.c.inc b/target/riscv/insn_trans/trans_rvzicfiss.c.inc index b0096adcd0e50..f4a1c12ca0bcc 100644 --- a/target/riscv/insn_trans/trans_rvzicfiss.c.inc +++ b/target/riscv/insn_trans/trans_rvzicfiss.c.inc @@ -40,6 +40,7 @@ static bool trans_sspopchk(DisasContext *ctx, arg_sspopchk *a) tcg_gen_brcond_tl(TCG_COND_EQ, data, rs1, skip); tcg_gen_st_tl(tcg_constant_tl(RISCV_EXCP_SW_CHECK_BCFI_TVAL), tcg_env, offsetof(CPURISCVState, sw_check_code)); + gen_update_pc(ctx, 0); gen_helper_raise_exception(tcg_env, tcg_constant_i32(RISCV_EXCP_SW_CHECK)); gen_set_label(skip); @@ -90,7 +91,11 @@ static bool trans_ssamoswap_w(DisasContext *ctx, arg_amoswap_w *a) } if (!ctx->bcfi_enabled) { +#ifndef CONFIG_USER_ONLY + gen_helper_ssamoswap_disabled(tcg_env); +#else return false; +#endif } TCGv dest = dest_gpr(ctx, a->rd); @@ -115,7 +120,11 @@ static bool trans_ssamoswap_d(DisasContext *ctx, arg_amoswap_w *a) } if (!ctx->bcfi_enabled) { +#ifndef CONFIG_USER_ONLY + gen_helper_ssamoswap_disabled(tcg_env); +#else return false; +#endif } TCGv dest = dest_gpr(ctx, a->rd); diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 5c19062c19b60..0dd0d59d41ad6 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -36,6 +36,7 @@ #include "hw/pci/pci.h" #include "exec/memattrs.h" #include "system/address-spaces.h" +#include "system/memory.h" #include "hw/boards.h" #include "hw/irq.h" #include "hw/intc/riscv_imsic.h" @@ -1369,7 +1370,7 @@ int kvm_riscv_sync_mpstate_to_kvm(RISCVCPU *cpu, int state) return 0; } -int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) +int kvm_arch_put_registers(CPUState *cs, KvmPutState level, Error **errp) { int ret = 0; @@ -1564,6 +1565,7 @@ bool kvm_arch_stop_on_emulation_error(CPUState *cs) static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; g_autofree uint8_t *buf = NULL; RISCVCPU *cpu = RISCV_CPU(cs); target_ulong num_bytes; @@ -1588,7 +1590,7 @@ static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run) * Handle the case where a 32 bit CPU is running in a * 64 bit addressing env. */ - if (riscv_cpu_mxl(&cpu->env) == MXL_RV32) { + if (riscv_cpu_is_32bit(cpu)) { addr |= (uint64_t)run->riscv_sbi.args[2] << 32; } @@ -1602,9 +1604,9 @@ static void kvm_riscv_handle_sbi_dbcn(CPUState *cs, struct kvm_run *run) exit(1); } - cpu_physical_memory_write(addr, buf, ret); + address_space_write(cs->as, addr, attrs, buf, ret); } else { - cpu_physical_memory_read(addr, buf, num_bytes); + address_space_read(cs->as, addr, attrs, buf, num_bytes); ret = qemu_chr_fe_write_all(serial_hd(0)->be, buf, num_bytes); if (ret < 0) { diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 1600ec44f0b75..18d790af0d073 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -131,7 +131,8 @@ static bool vector_needed(void *opaque) RISCVCPU *cpu = opaque; CPURISCVState *env = &cpu->env; - return riscv_has_ext(env, RVV); + return kvm_enabled() ? riscv_has_ext(env, RVV) : + riscv_cpu_cfg(env)->ext_zve32x; } static const VMStateDescription vmstate_vector = { @@ -400,6 +401,30 @@ static const VMStateDescription vmstate_ssp = { } }; +static bool sstc_timer_needed(void *opaque) +{ + RISCVCPU *cpu = opaque; + CPURISCVState *env = &cpu->env; + + if (!cpu->cfg.ext_sstc) { + return false; + } + + return env->stimer != NULL || env->vstimer != NULL; +} + +static const VMStateDescription vmstate_sstc = { + .name = "cpu/timer", + .version_id = 1, + .minimum_version_id = 1, + .needed = sstc_timer_needed, + .fields = (const VMStateField[]) { + VMSTATE_TIMER_PTR(env.stimer, RISCVCPU), + VMSTATE_TIMER_PTR(env.vstimer, RISCVCPU), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", .version_id = 10, @@ -476,6 +501,7 @@ const VMStateDescription vmstate_riscv_cpu = { &vmstate_elp, &vmstate_ssp, &vmstate_ctr, + &vmstate_sstc, NULL } }; diff --git a/target/riscv/meson.build b/target/riscv/meson.build index a4bd61e52a940..fdefe88ccdd3a 100644 --- a/target/riscv/meson.build +++ b/target/riscv/meson.build @@ -8,6 +8,10 @@ gen = [ riscv_ss = ss.source_set() riscv_ss.add(gen) + +riscv_ss.add(when: 'CONFIG_ARM_COMPATIBLE_SEMIHOSTING', + if_true: files('common-semi-target.c')) + riscv_ss.add(files( 'cpu.c', 'cpu_helper.c', diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c index 100005ea4e99f..8a77476db93dd 100644 --- a/target/riscv/monitor.c +++ b/target/riscv/monitor.c @@ -23,6 +23,7 @@ #include "cpu_bits.h" #include "monitor/monitor.h" #include "monitor/hmp-target.h" +#include "system/memory.h" #ifdef TARGET_RISCV64 #define PTE_HEADER_FIELDS "vaddr paddr "\ @@ -77,11 +78,13 @@ static void print_pte(Monitor *mon, int va_bits, target_ulong vaddr, attr & PTE_D ? 'd' : '-'); } -static void walk_pte(Monitor *mon, hwaddr base, target_ulong start, +static void walk_pte(Monitor *mon, AddressSpace *as, + hwaddr base, target_ulong start, int level, int ptidxbits, int ptesize, int va_bits, target_ulong *vbase, hwaddr *pbase, hwaddr *last_paddr, target_ulong *last_size, int *last_attr) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; hwaddr pte_addr; hwaddr paddr; target_ulong last_start = -1; @@ -100,7 +103,7 @@ static void walk_pte(Monitor *mon, hwaddr base, target_ulong start, for (idx = 0; idx < (1UL << ptidxbits); idx++) { pte_addr = base + idx * ptesize; - cpu_physical_memory_read(pte_addr, &pte, ptesize); + address_space_read(as, pte_addr, attrs, &pte, ptesize); paddr = (hwaddr)(pte >> PTE_PPN_SHIFT) << PGSHIFT; attr = pte & 0xff; @@ -132,7 +135,7 @@ static void walk_pte(Monitor *mon, hwaddr base, target_ulong start, *last_size = pgsize; } else { /* pointer to the next level of the page table */ - walk_pte(mon, paddr, start, level - 1, ptidxbits, ptesize, + walk_pte(mon, as, paddr, start, level - 1, ptidxbits, ptesize, va_bits, vbase, pbase, last_paddr, last_size, last_attr); } @@ -145,6 +148,7 @@ static void walk_pte(Monitor *mon, hwaddr base, target_ulong start, static void mem_info_svxx(Monitor *mon, CPUArchState *env) { + AddressSpace *as = env_cpu(env)->as; int levels, ptidxbits, ptesize, vm, va_bits; hwaddr base; target_ulong vbase; @@ -199,7 +203,7 @@ static void mem_info_svxx(Monitor *mon, CPUArchState *env) last_attr = 0; /* walk page tables, starting from address 0 */ - walk_pte(mon, base, 0, levels - 1, ptidxbits, ptesize, va_bits, + walk_pte(mon, as, base, 0, levels - 1, ptidxbits, ptesize, va_bits, &vbase, &pbase, &last_paddr, &last_size, &last_attr); /* don't forget the last one */ diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 15460bf84bd37..8382aa94cb208 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -355,21 +355,22 @@ target_ulong helper_sret(CPURISCVState *env) } static void check_ret_from_m_mode(CPURISCVState *env, target_ulong retpc, - target_ulong prev_priv) + target_ulong prev_priv, + uintptr_t ra) { if (!(env->priv >= PRV_M)) { - riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); + riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, ra); } if (!riscv_cpu_allow_16bit_insn(&env_archcpu(env)->cfg, env->priv_ver, env->misa_ext) && (retpc & 0x3)) { - riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, GETPC()); + riscv_raise_exception(env, RISCV_EXCP_INST_ADDR_MIS, ra); } if (riscv_cpu_cfg(env)->pmp && !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { - riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC()); + riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, ra); } } static target_ulong ssdbltrp_mxret(CPURISCVState *env, target_ulong mstatus, @@ -394,8 +395,9 @@ target_ulong helper_mret(CPURISCVState *env) target_ulong retpc = env->mepc & get_xepc_mask(env); uint64_t mstatus = env->mstatus; target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); + uintptr_t ra = GETPC(); - check_ret_from_m_mode(env, retpc, prev_priv); + check_ret_from_m_mode(env, retpc, prev_priv, ra); target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) && (prev_priv != PRV_M); @@ -443,8 +445,9 @@ target_ulong helper_mnret(CPURISCVState *env) target_ulong retpc = env->mnepc; target_ulong prev_priv = get_field(env->mnstatus, MNSTATUS_MNPP); target_ulong prev_virt; + uintptr_t ra = GETPC(); - check_ret_from_m_mode(env, retpc, prev_priv); + check_ret_from_m_mode(env, retpc, prev_priv, ra); prev_virt = get_field(env->mnstatus, MNSTATUS_MNPV) && (prev_priv != PRV_M); @@ -714,4 +717,53 @@ target_ulong helper_hyp_hlvx_wu(CPURISCVState *env, target_ulong addr) return cpu_ldl_code_mmu(env, addr, oi, ra); } +void helper_ssamoswap_disabled(CPURISCVState *env) +{ + int exception = RISCV_EXCP_ILLEGAL_INST; + + /* + * Here we follow the RISC-V CFI spec [1] to implement the exception type + * of ssamoswap* instruction. + * + * [1] RISC-V CFI spec v1.0, ch2.7 Atomic Swap from a Shadow Stack Location + * + * Note: We have already checked some conditions in trans_* functions: + * 1. The effective priv mode is not M-mode. + * 2. The xSSE specific to the effictive priv mode is disabled. + */ + if (!get_field(env->menvcfg, MENVCFG_SSE)) { + /* + * Disabled M-mode SSE always trigger illegal instruction when + * current priv mode is not M-mode. + */ + exception = RISCV_EXCP_ILLEGAL_INST; + goto done; + } + + if (!riscv_has_ext(env, RVS)) { + /* S-mode is not implemented */ + exception = RISCV_EXCP_ILLEGAL_INST; + goto done; + } else if (env->virt_enabled) { + /* + * VU/VS-mode with disabled xSSE will trigger the virtual instruction + * exception. + */ + exception = RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + goto done; + } else { + /* + * U-mode with disabled S-mode SSE will trigger the illegal instruction + * exception. + * + * Note: S-mode is already handled in the disabled M-mode SSE case. + */ + exception = RISCV_EXCP_ILLEGAL_INST; + goto done; + } + +done: + riscv_raise_exception(env, exception, GETPC()); +} + #endif /* !CONFIG_USER_ONLY */ diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 3540327c9af0a..72f1372a4958c 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -211,11 +211,12 @@ void pmp_update_rule_addr(CPURISCVState *env, uint32_t pmp_index) break; case PMP_AMATCH_TOR: - sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ - ea = (this_addr << 2) - 1u; - if (sa > ea) { + if (prev_addr >= this_addr) { sa = ea = 0u; + break; } + sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */ + ea = (this_addr << 2) - 1u; break; case PMP_AMATCH_NA4: diff --git a/target/riscv/riscv-qmp-cmds.c b/target/riscv/riscv-qmp-cmds.c index 8a1856c50e0f6..c499f9b9a7d6b 100644 --- a/target/riscv/riscv-qmp-cmds.c +++ b/target/riscv/riscv-qmp-cmds.c @@ -31,6 +31,10 @@ #include "qapi/qobject-input-visitor.h" #include "qapi/visitor.h" #include "qom/qom-qobject.h" +#include "qemu/ctype.h" +#include "qemu/qemu-print.h" +#include "monitor/hmp.h" +#include "monitor/hmp-target.h" #include "system/kvm.h" #include "system/tcg.h" #include "cpu-qom.h" @@ -240,3 +244,147 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, return expansion_info; } + +/* + * We have way too many potential CSRs and regs being added + * regularly to register them in a static array. + * + * Declare an empty array instead, making get_monitor_def() use + * the target_get_monitor_def() API directly. + */ +const MonitorDef monitor_defs[] = { { } }; +const MonitorDef *target_monitor_defs(void) +{ + return monitor_defs; +} + +static bool reg_is_ulong_integer(CPURISCVState *env, const char *name, + target_ulong *val, bool is_gprh) +{ + const char * const *reg_names; + target_ulong *vals; + + if (is_gprh) { + reg_names = riscv_int_regnamesh; + vals = env->gprh; + } else { + reg_names = riscv_int_regnames; + vals = env->gpr; + } + + for (int i = 0; i < 32; i++) { + g_autofree char *reg_name = g_strdup(reg_names[i]); + char *reg1 = strtok(reg_name, "/"); + char *reg2 = strtok(NULL, "/"); + + if (strcasecmp(reg1, name) == 0 || + (reg2 && strcasecmp(reg2, name) == 0)) { + *val = vals[i]; + return true; + } + } + + return false; +} + +static bool reg_is_u64_fpu(CPURISCVState *env, const char *name, uint64_t *val) +{ + if (qemu_tolower(name[0]) != 'f') { + return false; + } + + for (int i = 0; i < 32; i++) { + g_autofree char *reg_name = g_strdup(riscv_fpr_regnames[i]); + char *reg1 = strtok(reg_name, "/"); + char *reg2 = strtok(NULL, "/"); + + if (strcasecmp(reg1, name) == 0 || + (reg2 && strcasecmp(reg2, name) == 0)) { + *val = env->fpr[i]; + return true; + } + } + + return false; +} + +static bool reg_is_vreg(const char *name) +{ + if (qemu_tolower(name[0]) != 'v' || strlen(name) > 3) { + return false; + } + + for (int i = 0; i < 32; i++) { + if (strcasecmp(name, riscv_rvv_regnames[i]) == 0) { + return true; + } + } + + return false; +} + +int target_get_monitor_def(CPUState *cs, const char *name, uint64_t *pval) +{ + CPURISCVState *env = &RISCV_CPU(cs)->env; + target_ulong val = 0; + uint64_t val64 = 0; + int i; + + if (reg_is_ulong_integer(env, name, &val, false) || + reg_is_ulong_integer(env, name, &val, true)) { + *pval = val; + return 0; + } + + if (reg_is_u64_fpu(env, name, &val64)) { + *pval = val64; + return 0; + } + + if (reg_is_vreg(name)) { + if (!riscv_cpu_cfg(env)->ext_zve32x) { + return -EINVAL; + } + + qemu_printf("Unable to print the value of vector " + "vreg '%s' from this API\n", name); + + /* + * We're returning 0 because returning -EINVAL triggers + * an 'unknown register' message in exp_unary() later, + * which feels ankward after our own error message. + */ + *pval = 0; + return 0; + } + + for (i = 0; i < ARRAY_SIZE(csr_ops); i++) { + RISCVException res; + int csrno = i; + + /* + * Early skip when possible since we're going + * through a lot of NULL entries. + */ + if (csr_ops[csrno].predicate == NULL) { + continue; + } + + if (strcasecmp(csr_ops[csrno].name, name) != 0) { + continue; + } + + res = riscv_csrrw_debug(env, csrno, &val, 0, 0); + + /* + * Rely on the smode, hmode, etc, predicates within csr.c + * to do the filtering of the registers that are present. + */ + if (res == RISCV_EXCP_NONE) { + *pval = val; + return 0; + } + } + + return -EINVAL; +} diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index 78fb2791847e3..1150bd14697cc 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -191,7 +191,8 @@ static TCGTBCPUState riscv_get_tb_cpu_state(CPUState *cs) return (TCGTBCPUState){ .pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc, - .flags = flags + .flags = flags, + .cs_base = env->misa_ext, }; } @@ -416,12 +417,21 @@ static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp) static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, Error **errp) { + uint32_t min_vlen; uint32_t vlen = cfg->vlenb << 3; - if (vlen > RV_VLEN_MAX || vlen < 128) { + if (riscv_has_ext(env, RVV)) { + min_vlen = 128; + } else if (cfg->ext_zve64x) { + min_vlen = 64; + } else if (cfg->ext_zve32x) { + min_vlen = 32; + } + + if (vlen > RV_VLEN_MAX || vlen < min_vlen) { error_setg(errp, "Vector extension implementation only supports VLEN " - "in the range [128, %d]", RV_VLEN_MAX); + "in the range [%d, %d]", min_vlen, RV_VLEN_MAX); return; } @@ -431,6 +441,12 @@ static void riscv_cpu_validate_v(CPURISCVState *env, RISCVCPUConfig *cfg, "in the range [8, 64]"); return; } + + if (vlen < cfg->elen) { + error_setg(errp, "Vector extension implementation requires VLEN " + "to be greater than or equal to ELEN"); + return; + } } static void riscv_cpu_disable_priv_spec_isa_exts(RISCVCPU *cpu) @@ -660,7 +676,7 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if (riscv_has_ext(env, RVV)) { + if (cpu->cfg.ext_zve32x) { riscv_cpu_validate_v(env, &cpu->cfg, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 9ddef2d6e2a91..9a53aecbfe998 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -24,6 +24,7 @@ #include "exec/helper-gen.h" #include "exec/target_page.h" #include "exec/translator.h" +#include "accel/tcg/cpu-ldst.h" #include "exec/translation-block.h" #include "exec/log.h" #include "semihosting/semihost.h" @@ -285,7 +286,8 @@ static void exit_tb(DisasContext *ctx) tcg_gen_exit_tb(NULL, 0); } -static void gen_goto_tb(DisasContext *ctx, int n, target_long diff) +static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, + target_long diff) { target_ulong dest = ctx->base.pc_next + diff; @@ -304,12 +306,12 @@ static void gen_goto_tb(DisasContext *ctx, int n, target_long diff) */ if (tb_cflags(ctx->base.tb) & CF_PCREL) { gen_update_pc(ctx, diff); - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); } else { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); gen_update_pc(ctx, diff); } - tcg_gen_exit_tb(ctx->base.tb, n); + tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx); } else { gen_update_pc(ctx, diff); lookup_and_goto_ptr(ctx); @@ -1166,7 +1168,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) CPUState *cpu = ctx->cs; CPURISCVState *env = cpu_env(cpu); - return translator_ldl(env, &ctx->base, pc); + return cpu_ldl_code(env, pc); } #define SS_MMU_INDEX(ctx) (ctx->mem_idx | MMU_IDX_SS_WRITE) diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 7c67d67a13f6c..2de3358ee8640 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -235,26 +235,26 @@ vext_continuous_ldst_host(CPURISCVState *env, vext_ldst_elem_fn_host *ldst_host, void *vd, uint32_t evl, uint32_t reg_start, void *host, uint32_t esz, bool is_load) { -#if HOST_BIG_ENDIAN - for (; reg_start < evl; reg_start++, host += esz) { - ldst_host(vd, reg_start, host); - } -#else - if (esz == 1) { - uint32_t byte_offset = reg_start * esz; - uint32_t size = (evl - reg_start) * esz; - - if (is_load) { - memcpy(vd + byte_offset, host, size); - } else { - memcpy(host, vd + byte_offset, size); - } - } else { + if (HOST_BIG_ENDIAN) { for (; reg_start < evl; reg_start++, host += esz) { ldst_host(vd, reg_start, host); } + } else { + if (esz == 1) { + uint32_t byte_offset = reg_start * esz; + uint32_t size = (evl - reg_start) * esz; + + if (is_load) { + memcpy(vd + byte_offset, host, size); + } else { + memcpy(host, vd + byte_offset, size); + } + } else { + for (; reg_start < evl; reg_start++, host += esz) { + ldst_host(vd, reg_start, host); + } + } } -#endif } static void vext_set_tail_elems_1s(target_ulong vl, void *vd, @@ -5198,11 +5198,11 @@ GEN_VEXT_VSLIE1UP(16, H2) GEN_VEXT_VSLIE1UP(32, H4) GEN_VEXT_VSLIE1UP(64, H8) -#define GEN_VEXT_VSLIDE1UP_VX(NAME, BITWIDTH) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ +#define GEN_VEXT_VSLIDE1UP_VX(NAME, BITWIDTH) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1up_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ } /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */ @@ -5249,11 +5249,11 @@ GEN_VEXT_VSLIDE1DOWN(16, H2) GEN_VEXT_VSLIDE1DOWN(32, H4) GEN_VEXT_VSLIDE1DOWN(64, H8) -#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, BITWIDTH) \ -void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ - CPURISCVState *env, uint32_t desc) \ -{ \ - vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ +#define GEN_VEXT_VSLIDE1DOWN_VX(NAME, BITWIDTH) \ +void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ + CPURISCVState *env, uint32_t desc) \ +{ \ + vslide1down_##BITWIDTH(vd, v0, s1, vs2, env, desc); \ } /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */ diff --git a/target/rx/cpu.c b/target/rx/cpu.c index c6dd5d6f832ee..da02ae7bf888c 100644 --- a/target/rx/cpu.c +++ b/target/rx/cpu.c @@ -75,8 +75,7 @@ static void rx_restore_state_to_opc(CPUState *cs, static bool rx_cpu_has_work(CPUState *cs) { - return cs->interrupt_request & - (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR); + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIR); } static int rx_cpu_mmu_index(CPUState *cs, bool ifunc) diff --git a/target/rx/helper.c b/target/rx/helper.c index 0640ab322b587..41c9606fd1d27 100644 --- a/target/rx/helper.c +++ b/target/rx/helper.c @@ -44,7 +44,7 @@ void rx_cpu_unpack_psw(CPURXState *env, uint32_t psw, int rte) void rx_cpu_do_interrupt(CPUState *cs) { CPURXState *env = cpu_env(cs); - int do_irq = cs->interrupt_request & INT_FLAGS; + int do_irq = cpu_test_interrupt(cs, INT_FLAGS); uint32_t save_psw; env->in_sleep = 0; @@ -63,7 +63,7 @@ void rx_cpu_do_interrupt(CPUState *cs) env->bpsw = save_psw; env->pc = env->fintv; env->psw_ipl = 15; - cs->interrupt_request &= ~CPU_INTERRUPT_FIR; + cpu_reset_interrupt(cs, CPU_INTERRUPT_FIR); qemu_set_irq(env->ack, env->ack_irq); qemu_log_mask(CPU_LOG_INT, "fast interrupt raised\n"); } else if (do_irq & CPU_INTERRUPT_HARD) { @@ -73,7 +73,7 @@ void rx_cpu_do_interrupt(CPUState *cs) cpu_stl_data(env, env->isp, env->pc); env->pc = cpu_ldl_data(env, env->intb + env->ack_irq * 4); env->psw_ipl = env->ack_ipl; - cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); qemu_set_irq(env->ack, env->ack_irq); qemu_log_mask(CPU_LOG_INT, "interrupt 0x%02x raised\n", env->ack_irq); diff --git a/target/rx/translate.c b/target/rx/translate.c index 19a9584a82974..ef865f14bf5ee 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -40,8 +40,8 @@ typedef struct DisasContext { } DisasContext; typedef struct DisasCompare { - TCGv value; - TCGv temp; + TCGv_i32 value; + TCGv_i32 temp; TCGCond cond; } DisasCompare; @@ -63,15 +63,20 @@ const char *rx_crname(uint8_t cr) #define DISAS_EXIT DISAS_TARGET_2 /* global register indexes */ -static TCGv cpu_regs[16]; -static TCGv cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c; -static TCGv cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl; -static TCGv cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp; -static TCGv cpu_fintv, cpu_intb, cpu_pc; +static TCGv_i32 cpu_regs[16]; +static TCGv_i32 cpu_psw_o, cpu_psw_s, cpu_psw_z, cpu_psw_c; +static TCGv_i32 cpu_psw_i, cpu_psw_pm, cpu_psw_u, cpu_psw_ipl; +static TCGv_i32 cpu_usp, cpu_fpsw, cpu_bpsw, cpu_bpc, cpu_isp; +static TCGv_i32 cpu_fintv, cpu_intb, cpu_pc; static TCGv_i64 cpu_acc; #define cpu_sp cpu_regs[0] +static inline MemOp mo_endian(DisasContext *dc) +{ + return MO_LE; +} + /* decoder helper */ static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn, int i, int n) @@ -85,7 +90,7 @@ static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn, static uint32_t li(DisasContext *ctx, int sz) { - target_ulong addr; + vaddr addr; uint32_t tmp; CPURXState *env = ctx->env; addr = ctx->base.pc_next; @@ -147,12 +152,12 @@ void rx_cpu_dump_state(CPUState *cs, FILE *f, int flags) } } -static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) +static void gen_goto_tb(DisasContext *dc, unsigned tb_slot_idx, vaddr dest) { if (translator_use_goto_tb(&dc->base, dest)) { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); tcg_gen_movi_i32(cpu_pc, dest); - tcg_gen_exit_tb(dc->base.tb, n); + tcg_gen_exit_tb(dc->base.tb, tb_slot_idx); } else { tcg_gen_movi_i32(cpu_pc, dest); tcg_gen_lookup_and_goto_ptr(); @@ -161,34 +166,34 @@ static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest) } /* generic load wrapper */ -static inline void rx_gen_ld(unsigned int size, TCGv reg, TCGv mem) +static void rx_gen_ld(DisasContext *ctx, MemOp size, TCGv_i32 reg, TCGv_i32 mem) { - tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | MO_TE); + tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_SIGN | mo_endian(ctx)); } /* unsigned load wrapper */ -static inline void rx_gen_ldu(unsigned int size, TCGv reg, TCGv mem) +static void rx_gen_ldu(DisasContext *ctx, MemOp size, TCGv_i32 reg, TCGv_i32 mem) { - tcg_gen_qemu_ld_i32(reg, mem, 0, size | MO_TE); + tcg_gen_qemu_ld_i32(reg, mem, 0, size | mo_endian(ctx)); } /* generic store wrapper */ -static inline void rx_gen_st(unsigned int size, TCGv reg, TCGv mem) +static void rx_gen_st(DisasContext *ctx, MemOp size, TCGv_i32 reg, TCGv_i32 mem) { - tcg_gen_qemu_st_i32(reg, mem, 0, size | MO_TE); + tcg_gen_qemu_st_i32(reg, mem, 0, size | mo_endian(ctx)); } /* [ri, rb] */ -static inline void rx_gen_regindex(DisasContext *ctx, TCGv mem, - int size, int ri, int rb) +static void rx_gen_regindex(DisasContext *ctx, TCGv_i32 mem, + int size, int ri, int rb) { tcg_gen_shli_i32(mem, cpu_regs[ri], size); tcg_gen_add_i32(mem, mem, cpu_regs[rb]); } /* dsp[reg] */ -static inline TCGv rx_index_addr(DisasContext *ctx, TCGv mem, - int ld, int size, int reg) +static TCGv_i32 rx_index_addr(DisasContext *ctx, TCGv_i32 mem, + int ld, int size, int reg) { uint32_t dsp; @@ -218,15 +223,15 @@ static inline MemOp mi_to_mop(unsigned mi) } /* load source operand */ -static inline TCGv rx_load_source(DisasContext *ctx, TCGv mem, - int ld, int mi, int rs) +static TCGv_i32 rx_load_source(DisasContext *ctx, TCGv_i32 mem, + int ld, int mi, int rs) { - TCGv addr; + TCGv_i32 addr; MemOp mop; if (ld < 3) { mop = mi_to_mop(mi); addr = rx_index_addr(ctx, mem, ld, mop & MO_SIZE, rs); - tcg_gen_qemu_ld_i32(mem, addr, 0, mop | MO_TE); + tcg_gen_qemu_ld_i32(mem, addr, 0, mop | mo_endian(ctx)); return mem; } else { return cpu_regs[rs]; @@ -315,7 +320,7 @@ static void psw_cond(DisasCompare *dc, uint32_t cond) } } -static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc) +static void move_from_cr(DisasContext *ctx, TCGv_i32 ret, int cr, uint32_t pc) { switch (cr) { case 0: /* PSW */ @@ -361,7 +366,7 @@ static void move_from_cr(DisasContext *ctx, TCGv ret, int cr, uint32_t pc) } } -static void move_to_cr(DisasContext *ctx, TCGv val, int cr) +static void move_to_cr(DisasContext *ctx, TCGv_i32 val, int cr) { if (cr >= 8 && !is_privileged(ctx, 0)) { /* Some control registers can only be written in privileged mode. */ @@ -414,35 +419,35 @@ static void move_to_cr(DisasContext *ctx, TCGv val, int cr) } } -static void push(TCGv val) +static void push(DisasContext *ctx, TCGv_i32 val) { tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); - rx_gen_st(MO_32, val, cpu_sp); + rx_gen_st(ctx, MO_32, val, cpu_sp); } -static void pop(TCGv ret) +static void pop(DisasContext *ctx, TCGv_i32 ret) { - rx_gen_ld(MO_32, ret, cpu_sp); + rx_gen_ld(ctx, MO_32, ret, cpu_sp); tcg_gen_addi_i32(cpu_sp, cpu_sp, 4); } /* mov. rs,dsp5[rd] */ static bool trans_MOV_rm(DisasContext *ctx, arg_MOV_rm *a) { - TCGv mem; - mem = tcg_temp_new(); + TCGv_i32 mem; + mem = tcg_temp_new_i32(); tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); - rx_gen_st(a->sz, cpu_regs[a->rs], mem); + rx_gen_st(ctx, a->sz, cpu_regs[a->rs], mem); return true; } /* mov. dsp5[rs],rd */ static bool trans_MOV_mr(DisasContext *ctx, arg_MOV_mr *a) { - TCGv mem; - mem = tcg_temp_new(); + TCGv_i32 mem; + mem = tcg_temp_new_i32(); tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); - rx_gen_ld(a->sz, cpu_regs[a->rd], mem); + rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], mem); return true; } @@ -459,31 +464,31 @@ static bool trans_MOV_ir(DisasContext *ctx, arg_MOV_ir *a) /* mov. #imm, dsp[rd] */ static bool trans_MOV_im(DisasContext *ctx, arg_MOV_im *a) { - TCGv imm, mem; + TCGv_i32 imm, mem; imm = tcg_constant_i32(a->imm); - mem = tcg_temp_new(); + mem = tcg_temp_new_i32(); tcg_gen_addi_i32(mem, cpu_regs[a->rd], a->dsp << a->sz); - rx_gen_st(a->sz, imm, mem); + rx_gen_st(ctx, a->sz, imm, mem); return true; } /* mov. [ri,rb],rd */ static bool trans_MOV_ar(DisasContext *ctx, arg_MOV_ar *a) { - TCGv mem; - mem = tcg_temp_new(); + TCGv_i32 mem; + mem = tcg_temp_new_i32(); rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); - rx_gen_ld(a->sz, cpu_regs[a->rd], mem); + rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], mem); return true; } /* mov. rd,[ri,rb] */ static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a) { - TCGv mem; - mem = tcg_temp_new(); + TCGv_i32 mem; + mem = tcg_temp_new_i32(); rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); - rx_gen_st(a->sz, cpu_regs[a->rs], mem); + rx_gen_st(ctx, a->sz, cpu_regs[a->rs], mem); return true; } @@ -493,7 +498,7 @@ static bool trans_MOV_ra(DisasContext *ctx, arg_MOV_ra *a) /* mov. rs,rd */ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) { - TCGv tmp, mem, addr; + TCGv_i32 tmp, mem, addr; if (a->lds == 3 && a->ldd == 3) { /* mov. rs,rd */ @@ -501,22 +506,22 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) return true; } - mem = tcg_temp_new(); + mem = tcg_temp_new_i32(); if (a->lds == 3) { /* mov. rs,dsp[rd] */ addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rs); - rx_gen_st(a->sz, cpu_regs[a->rd], addr); + rx_gen_st(ctx, a->sz, cpu_regs[a->rd], addr); } else if (a->ldd == 3) { /* mov. dsp[rs],rd */ addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs); - rx_gen_ld(a->sz, cpu_regs[a->rd], addr); + rx_gen_ld(ctx, a->sz, cpu_regs[a->rd], addr); } else { /* mov. dsp[rs],dsp[rd] */ - tmp = tcg_temp_new(); + tmp = tcg_temp_new_i32(); addr = rx_index_addr(ctx, mem, a->lds, a->sz, a->rs); - rx_gen_ld(a->sz, tmp, addr); + rx_gen_ld(ctx, a->sz, tmp, addr); addr = rx_index_addr(ctx, mem, a->ldd, a->sz, a->rd); - rx_gen_st(a->sz, tmp, addr); + rx_gen_st(ctx, a->sz, tmp, addr); } return true; } @@ -525,13 +530,13 @@ static bool trans_MOV_mm(DisasContext *ctx, arg_MOV_mm *a) /* mov. rs,[-rd] */ static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a) { - TCGv val; - val = tcg_temp_new(); + TCGv_i32 val; + val = tcg_temp_new_i32(); tcg_gen_mov_i32(val, cpu_regs[a->rs]); if (a->ad == 1) { tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } - rx_gen_st(a->sz, val, cpu_regs[a->rd]); + rx_gen_st(ctx, a->sz, val, cpu_regs[a->rd]); if (a->ad == 0) { tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } @@ -542,12 +547,12 @@ static bool trans_MOV_rp(DisasContext *ctx, arg_MOV_rp *a) /* mov. [-rd],rs */ static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a) { - TCGv val; - val = tcg_temp_new(); + TCGv_i32 val; + val = tcg_temp_new_i32(); if (a->ad == 1) { tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } - rx_gen_ld(a->sz, val, cpu_regs[a->rd]); + rx_gen_ld(ctx, a->sz, val, cpu_regs[a->rd]); if (a->ad == 0) { tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } @@ -559,10 +564,10 @@ static bool trans_MOV_pr(DisasContext *ctx, arg_MOV_pr *a) /* movu. dsp[rs],rd */ static bool trans_MOVU_mr(DisasContext *ctx, arg_MOVU_mr *a) { - TCGv mem; - mem = tcg_temp_new(); + TCGv_i32 mem; + mem = tcg_temp_new_i32(); tcg_gen_addi_i32(mem, cpu_regs[a->rs], a->dsp << a->sz); - rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); + rx_gen_ldu(ctx, a->sz, cpu_regs[a->rd], mem); return true; } @@ -576,10 +581,10 @@ static bool trans_MOVU_rr(DisasContext *ctx, arg_MOVU_rr *a) /* movu. [ri,rb],rd */ static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a) { - TCGv mem; - mem = tcg_temp_new(); + TCGv_i32 mem; + mem = tcg_temp_new_i32(); rx_gen_regindex(ctx, mem, a->sz, a->ri, a->rb); - rx_gen_ldu(a->sz, cpu_regs[a->rd], mem); + rx_gen_ldu(ctx, a->sz, cpu_regs[a->rd], mem); return true; } @@ -587,12 +592,12 @@ static bool trans_MOVU_ar(DisasContext *ctx, arg_MOVU_ar *a) /* mov. [-rd],rs */ static bool trans_MOVU_pr(DisasContext *ctx, arg_MOVU_pr *a) { - TCGv val; - val = tcg_temp_new(); + TCGv_i32 val; + val = tcg_temp_new_i32(); if (a->ad == 1) { tcg_gen_subi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } - rx_gen_ldu(a->sz, val, cpu_regs[a->rd]); + rx_gen_ldu(ctx, a->sz, val, cpu_regs[a->rd]); if (a->ad == 0) { tcg_gen_addi_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1 << a->sz); } @@ -617,9 +622,9 @@ static bool trans_POP(DisasContext *ctx, arg_POP *a) /* popc cr */ static bool trans_POPC(DisasContext *ctx, arg_POPC *a) { - TCGv val; - val = tcg_temp_new(); - pop(val); + TCGv_i32 val; + val = tcg_temp_new_i32(); + pop(ctx, val); move_to_cr(ctx, val, a->cr); return true; } @@ -634,7 +639,7 @@ static bool trans_POPM(DisasContext *ctx, arg_POPM *a) } r = a->rd; while (r <= a->rd2 && r < 16) { - pop(cpu_regs[r++]); + pop(ctx, cpu_regs[r++]); } return true; } @@ -643,34 +648,34 @@ static bool trans_POPM(DisasContext *ctx, arg_POPM *a) /* push. rs */ static bool trans_PUSH_r(DisasContext *ctx, arg_PUSH_r *a) { - TCGv val; - val = tcg_temp_new(); + TCGv_i32 val; + val = tcg_temp_new_i32(); tcg_gen_mov_i32(val, cpu_regs[a->rs]); tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); - rx_gen_st(a->sz, val, cpu_sp); + rx_gen_st(ctx, a->sz, val, cpu_sp); return true; } /* push. dsp[rs] */ static bool trans_PUSH_m(DisasContext *ctx, arg_PUSH_m *a) { - TCGv mem, val, addr; - mem = tcg_temp_new(); - val = tcg_temp_new(); + TCGv_i32 mem, val, addr; + mem = tcg_temp_new_i32(); + val = tcg_temp_new_i32(); addr = rx_index_addr(ctx, mem, a->ld, a->sz, a->rs); - rx_gen_ld(a->sz, val, addr); + rx_gen_ld(ctx, a->sz, val, addr); tcg_gen_subi_i32(cpu_sp, cpu_sp, 4); - rx_gen_st(a->sz, val, cpu_sp); + rx_gen_st(ctx, a->sz, val, cpu_sp); return true; } /* pushc rx */ static bool trans_PUSHC(DisasContext *ctx, arg_PUSHC *a) { - TCGv val; - val = tcg_temp_new(); + TCGv_i32 val; + val = tcg_temp_new_i32(); move_from_cr(ctx, val, a->cr, ctx->pc); - push(val); + push(ctx, val); return true; } @@ -685,7 +690,7 @@ static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a) } r = a->rs2; while (r >= a->rs && r >= 0) { - push(cpu_regs[r--]); + push(ctx, cpu_regs[r--]); } return true; } @@ -693,8 +698,8 @@ static bool trans_PUSHM(DisasContext *ctx, arg_PUSHM *a) /* xchg rs,rd */ static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a) { - TCGv tmp; - tmp = tcg_temp_new(); + TCGv_i32 tmp; + tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, cpu_regs[a->rs]); tcg_gen_mov_i32(cpu_regs[a->rs], cpu_regs[a->rd]); tcg_gen_mov_i32(cpu_regs[a->rd], tmp); @@ -704,8 +709,8 @@ static bool trans_XCHG_rr(DisasContext *ctx, arg_XCHG_rr *a) /* xchg dsp[rs].,rd */ static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a) { - TCGv mem, addr; - mem = tcg_temp_new(); + TCGv_i32 mem, addr; + mem = tcg_temp_new_i32(); switch (a->mi) { case 0: /* dsp[rs].b */ case 1: /* dsp[rs].w */ @@ -724,10 +729,10 @@ static bool trans_XCHG_mr(DisasContext *ctx, arg_XCHG_mr *a) return true; } -static inline void stcond(TCGCond cond, int rd, int imm) +static void stcond(TCGCond cond, int rd, int imm) { - TCGv z; - TCGv _imm; + TCGv_i32 z; + TCGv_i32 _imm; z = tcg_constant_i32(0); _imm = tcg_constant_i32(imm); tcg_gen_movcond_i32(cond, cpu_regs[rd], cpu_psw_z, z, @@ -753,15 +758,15 @@ static bool trans_STNZ(DisasContext *ctx, arg_STNZ *a) static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a) { DisasCompare dc; - TCGv val, mem, addr; - dc.temp = tcg_temp_new(); + TCGv_i32 val, mem, addr; + dc.temp = tcg_temp_new_i32(); psw_cond(&dc, a->cd); if (a->ld < 3) { - val = tcg_temp_new(); - mem = tcg_temp_new(); + val = tcg_temp_new_i32(); + mem = tcg_temp_new_i32(); tcg_gen_setcondi_i32(dc.cond, val, dc.value, 0); addr = rx_index_addr(ctx, mem, a->sz, a->ld, a->rd); - rx_gen_st(a->sz, val, addr); + rx_gen_st(ctx, a->sz, val, addr); } else { tcg_gen_setcondi_i32(dc.cond, cpu_regs[a->rd], dc.value, 0); } @@ -772,7 +777,7 @@ static bool trans_SCCnd(DisasContext *ctx, arg_SCCnd *a) static bool trans_RTSD_i(DisasContext *ctx, arg_RTSD_i *a) { tcg_gen_addi_i32(cpu_sp, cpu_sp, a->imm << 2); - pop(cpu_pc); + pop(ctx, cpu_pc); ctx->base.is_jmp = DISAS_JUMP; return true; } @@ -792,42 +797,42 @@ static bool trans_RTSD_irr(DisasContext *ctx, arg_RTSD_irr *a) tcg_gen_addi_i32(cpu_sp, cpu_sp, adj << 2); dst = a->rd; while (dst <= a->rd2 && dst < 16) { - pop(cpu_regs[dst++]); + pop(ctx, cpu_regs[dst++]); } - pop(cpu_pc); + pop(ctx, cpu_pc); ctx->base.is_jmp = DISAS_JUMP; return true; } -typedef void (*op2fn)(TCGv ret, TCGv arg1); -typedef void (*op3fn)(TCGv ret, TCGv arg1, TCGv arg2); +typedef void (*op2fn)(TCGv_i32 ret, TCGv_i32 arg1); +typedef void (*op3fn)(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2); -static inline void rx_gen_op_rr(op2fn opr, int dst, int src) +static void rx_gen_op_rr(op2fn opr, int dst, int src) { opr(cpu_regs[dst], cpu_regs[src]); } -static inline void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2) +static void rx_gen_op_rrr(op3fn opr, int dst, int src, int src2) { opr(cpu_regs[dst], cpu_regs[src], cpu_regs[src2]); } -static inline void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2) +static void rx_gen_op_irr(op3fn opr, int dst, int src, uint32_t src2) { - TCGv imm = tcg_constant_i32(src2); + TCGv_i32 imm = tcg_constant_i32(src2); opr(cpu_regs[dst], cpu_regs[src], imm); } -static inline void rx_gen_op_mr(op3fn opr, DisasContext *ctx, - int dst, int src, int ld, int mi) +static void rx_gen_op_mr(op3fn opr, DisasContext *ctx, + int dst, int src, int ld, int mi) { - TCGv val, mem; - mem = tcg_temp_new(); + TCGv_i32 val, mem; + mem = tcg_temp_new_i32(); val = rx_load_source(ctx, mem, ld, mi, src); opr(cpu_regs[dst], cpu_regs[dst], val); } -static void rx_and(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_and(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_and_i32(cpu_psw_s, arg1, arg2); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); @@ -857,7 +862,7 @@ static bool trans_AND_rrr(DisasContext *ctx, arg_AND_rrr *a) return true; } -static void rx_or(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_or(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_or_i32(cpu_psw_s, arg1, arg2); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); @@ -887,7 +892,7 @@ static bool trans_OR_rrr(DisasContext *ctx, arg_OR_rrr *a) return true; } -static void rx_xor(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_xor(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_xor_i32(cpu_psw_s, arg1, arg2); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); @@ -909,7 +914,7 @@ static bool trans_XOR_mr(DisasContext *ctx, arg_XOR_mr *a) return true; } -static void rx_tst(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_tst(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_and_i32(cpu_psw_s, arg1, arg2); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_s); @@ -930,7 +935,7 @@ static bool trans_TST_mr(DisasContext *ctx, arg_TST_mr *a) return true; } -static void rx_not(TCGv ret, TCGv arg1) +static void rx_not(TCGv_i32 ret, TCGv_i32 arg1) { tcg_gen_not_i32(ret, arg1); tcg_gen_mov_i32(cpu_psw_z, ret); @@ -945,7 +950,7 @@ static bool trans_NOT_rr(DisasContext *ctx, arg_NOT_rr *a) return true; } -static void rx_neg(TCGv ret, TCGv arg1) +static void rx_neg(TCGv_i32 ret, TCGv_i32 arg1) { tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_psw_o, arg1, 0x80000000); tcg_gen_neg_i32(ret, arg1); @@ -964,9 +969,9 @@ static bool trans_NEG_rr(DisasContext *ctx, arg_NEG_rr *a) } /* ret = arg1 + arg2 + psw_c */ -static void rx_adc(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_adc(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - TCGv z = tcg_constant_i32(0); + TCGv_i32 z = tcg_constant_i32(0); tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, cpu_psw_c, z); tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, cpu_psw_s, cpu_psw_c, arg2, z); tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); @@ -1002,9 +1007,9 @@ static bool trans_ADC_mr(DisasContext *ctx, arg_ADC_mr *a) } /* ret = arg1 + arg2 */ -static void rx_add(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_add(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - TCGv z = tcg_constant_i32(0); + TCGv_i32 z = tcg_constant_i32(0); tcg_gen_add2_i32(cpu_psw_s, cpu_psw_c, arg1, z, arg2, z); tcg_gen_xor_i32(cpu_psw_o, cpu_psw_s, arg1); tcg_gen_xor_i32(cpu_psw_z, arg1, arg2); @@ -1037,7 +1042,7 @@ static bool trans_ADD_rrr(DisasContext *ctx, arg_ADD_rrr *a) } /* ret = arg1 - arg2 */ -static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_sub(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { tcg_gen_sub_i32(cpu_psw_s, arg1, arg2); tcg_gen_setcond_i32(TCG_COND_GEU, cpu_psw_c, arg1, arg2); @@ -1051,17 +1056,17 @@ static void rx_sub(TCGv ret, TCGv arg1, TCGv arg2) } } -static void rx_cmp(TCGv dummy, TCGv arg1, TCGv arg2) +static void rx_cmp(TCGv_i32 dummy, TCGv_i32 arg1, TCGv_i32 arg2) { rx_sub(NULL, arg1, arg2); } /* ret = arg1 - arg2 - !psw_c */ /* -> ret = arg1 + ~arg2 + psw_c */ -static void rx_sbb(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_sbb(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { - TCGv temp; - temp = tcg_temp_new(); + TCGv_i32 temp; + temp = tcg_temp_new_i32(); tcg_gen_not_i32(temp, arg2); rx_adc(ret, arg1, temp); } @@ -1187,7 +1192,7 @@ static bool trans_MUL_rrr(DisasContext *ctx, arg_MUL_rrr *a) /* emul #imm, rd */ static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a) { - TCGv imm = tcg_constant_i32(a->imm); + TCGv_i32 imm = tcg_constant_i32(a->imm); if (a->rd > 14) { qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); } @@ -1200,11 +1205,11 @@ static bool trans_EMUL_ir(DisasContext *ctx, arg_EMUL_ir *a) /* emul dsp[rs], rd */ static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a) { - TCGv val, mem; + TCGv_i32 val, mem; if (a->rd > 14) { qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); } - mem = tcg_temp_new(); + mem = tcg_temp_new_i32(); val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); tcg_gen_muls2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], val); @@ -1214,7 +1219,7 @@ static bool trans_EMUL_mr(DisasContext *ctx, arg_EMUL_mr *a) /* emulu #imm, rd */ static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a) { - TCGv imm = tcg_constant_i32(a->imm); + TCGv_i32 imm = tcg_constant_i32(a->imm); if (a->rd > 14) { qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); } @@ -1227,23 +1232,23 @@ static bool trans_EMULU_ir(DisasContext *ctx, arg_EMULU_ir *a) /* emulu dsp[rs], rd */ static bool trans_EMULU_mr(DisasContext *ctx, arg_EMULU_mr *a) { - TCGv val, mem; + TCGv_i32 val, mem; if (a->rd > 14) { qemu_log_mask(LOG_GUEST_ERROR, "rd too large %d", a->rd); } - mem = tcg_temp_new(); + mem = tcg_temp_new_i32(); val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); tcg_gen_mulu2_i32(cpu_regs[a->rd], cpu_regs[(a->rd + 1) & 15], cpu_regs[a->rd], val); return true; } -static void rx_div(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_div(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { gen_helper_div(ret, tcg_env, arg1, arg2); } -static void rx_divu(TCGv ret, TCGv arg1, TCGv arg2) +static void rx_divu(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { gen_helper_divu(ret, tcg_env, arg1, arg2); } @@ -1283,8 +1288,8 @@ static bool trans_DIVU_mr(DisasContext *ctx, arg_DIVU_mr *a) /* shll #imm:5, rs2, rd */ static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a) { - TCGv tmp; - tmp = tcg_temp_new(); + TCGv_i32 tmp; + tmp = tcg_temp_new_i32(); if (a->imm) { tcg_gen_sari_i32(cpu_psw_c, cpu_regs[a->rs2], 32 - a->imm); tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rs2], a->imm); @@ -1306,14 +1311,14 @@ static bool trans_SHLL_irr(DisasContext *ctx, arg_SHLL_irr *a) static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) { TCGLabel *noshift, *done; - TCGv count, tmp; + TCGv_i32 count, tmp; noshift = gen_new_label(); done = gen_new_label(); /* if (cpu_regs[a->rs]) { */ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[a->rs], 0, noshift); - count = tcg_temp_new(); - tmp = tcg_temp_new(); + count = tcg_temp_new_i32(); + tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 31); tcg_gen_sub_i32(count, tcg_constant_i32(32), tmp); tcg_gen_sar_i32(cpu_psw_c, cpu_regs[a->rd], count); @@ -1334,10 +1339,10 @@ static bool trans_SHLL_rr(DisasContext *ctx, arg_SHLL_rr *a) return true; } -static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm, - unsigned int alith) +static void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm, + unsigned int alith) { - static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = { + static void (* const gen_sXri[])(TCGv_i32 ret, TCGv_i32 arg1, int arg2) = { tcg_gen_shri_i32, tcg_gen_sari_i32, }; tcg_debug_assert(alith < 2); @@ -1354,20 +1359,21 @@ static inline void shiftr_imm(uint32_t rd, uint32_t rs, uint32_t imm, tcg_gen_mov_i32(cpu_psw_s, cpu_regs[rd]); } -static inline void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith) +static void shiftr_reg(uint32_t rd, uint32_t rs, unsigned int alith) { TCGLabel *noshift, *done; - TCGv count; - static void (* const gen_sXri[])(TCGv ret, TCGv arg1, int arg2) = { + TCGv_i32 count; + static void (* const gen_sXri[])(TCGv_i32 ret, TCGv_i32 arg1, int arg2) = { tcg_gen_shri_i32, tcg_gen_sari_i32, }; - static void (* const gen_sXr[])(TCGv ret, TCGv arg1, TCGv arg2) = { + static void (* const gen_sXr[])(TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2) = { tcg_gen_shr_i32, tcg_gen_sar_i32, }; tcg_debug_assert(alith < 2); noshift = gen_new_label(); done = gen_new_label(); - count = tcg_temp_new(); + count = tcg_temp_new_i32(); /* if (cpu_regs[rs]) { */ tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_regs[rs], 0, noshift); tcg_gen_andi_i32(count, cpu_regs[rs], 31); @@ -1419,8 +1425,8 @@ static bool trans_SHLR_rr(DisasContext *ctx, arg_SHLR_rr *a) /* rolc rd */ static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a) { - TCGv tmp; - tmp = tcg_temp_new(); + TCGv_i32 tmp; + tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, cpu_regs[a->rd], 31); tcg_gen_shli_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1); tcg_gen_or_i32(cpu_regs[a->rd], cpu_regs[a->rd], cpu_psw_c); @@ -1433,8 +1439,8 @@ static bool trans_ROLC(DisasContext *ctx, arg_ROLC *a) /* rorc rd */ static bool trans_RORC(DisasContext *ctx, arg_RORC *a) { - TCGv tmp; - tmp = tcg_temp_new(); + TCGv_i32 tmp; + tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_regs[a->rd], 0x00000001); tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rd], 1); tcg_gen_shli_i32(cpu_psw_c, cpu_psw_c, 31); @@ -1447,7 +1453,7 @@ static bool trans_RORC(DisasContext *ctx, arg_RORC *a) enum {ROTR = 0, ROTL = 1}; enum {ROT_IMM = 0, ROT_REG = 1}; -static inline void rx_rot(int ir, int dir, int rd, int src) +static void rx_rot(int ir, int dir, int rd, int src) { switch (dir) { case ROTL: @@ -1509,8 +1515,8 @@ static bool trans_REVL(DisasContext *ctx, arg_REVL *a) /* revw rs, rd */ static bool trans_REVW(DisasContext *ctx, arg_REVW *a) { - TCGv tmp; - tmp = tcg_temp_new(); + TCGv_i32 tmp; + tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, cpu_regs[a->rs], 0x00ff00ff); tcg_gen_shli_i32(tmp, tmp, 8); tcg_gen_shri_i32(cpu_regs[a->rd], cpu_regs[a->rs], 8); @@ -1527,7 +1533,7 @@ static void rx_bcnd_main(DisasContext *ctx, int cd, int dst) switch (cd) { case 0 ... 13: - dc.temp = tcg_temp_new(); + dc.temp = tcg_temp_new_i32(); psw_cond(&dc, cd); t = gen_new_label(); done = gen_new_label(); @@ -1582,10 +1588,10 @@ static bool trans_BRA_l(DisasContext *ctx, arg_BRA_l *a) return true; } -static inline void rx_save_pc(DisasContext *ctx) +static void rx_save_pc(DisasContext *ctx) { - TCGv pc = tcg_constant_i32(ctx->base.pc_next); - push(pc); + TCGv_i32 pc = tcg_constant_i32(ctx->base.pc_next); + push(ctx, pc); } /* jmp rs */ @@ -1626,7 +1632,7 @@ static bool trans_BSR_l(DisasContext *ctx, arg_BSR_l *a) /* rts */ static bool trans_RTS(DisasContext *ctx, arg_RTS *a) { - pop(cpu_pc); + pop(ctx, cpu_pc); ctx->base.is_jmp = DISAS_JUMP; return true; } @@ -1667,7 +1673,7 @@ static bool trans_SMOVB(DisasContext *ctx, arg_SMOVB *a) #define STRING(op) \ do { \ - TCGv size = tcg_constant_i32(a->sz); \ + TCGv_i32 size = tcg_constant_i32(a->sz); \ gen_helper_##op(tcg_env, size); \ } while (0) @@ -1798,7 +1804,7 @@ static bool trans_MVTACLO(DisasContext *ctx, arg_MVTACLO *a) /* racw #imm */ static bool trans_RACW(DisasContext *ctx, arg_RACW *a) { - TCGv imm = tcg_constant_i32(a->imm + 1); + TCGv_i32 imm = tcg_constant_i32(a->imm + 1); gen_helper_racw(tcg_env, imm); return true; } @@ -1806,8 +1812,8 @@ static bool trans_RACW(DisasContext *ctx, arg_RACW *a) /* sat rd */ static bool trans_SAT(DisasContext *ctx, arg_SAT *a) { - TCGv tmp, z; - tmp = tcg_temp_new(); + TCGv_i32 tmp, z; + tmp = tcg_temp_new_i32(); z = tcg_constant_i32(0); /* S == 1 -> 0xffffffff / S == 0 -> 0x00000000 */ tcg_gen_sari_i32(tmp, cpu_psw_s, 31); @@ -1830,7 +1836,7 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ cat3(arg_, name, _ir) * a) \ { \ - TCGv imm = tcg_constant_i32(li(ctx, 0)); \ + TCGv_i32 imm = tcg_constant_i32(li(ctx, 0)); \ gen_helper_##op(cpu_regs[a->rd], tcg_env, \ cpu_regs[a->rd], imm); \ return true; \ @@ -1838,8 +1844,8 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) static bool cat3(trans_, name, _mr)(DisasContext *ctx, \ cat3(arg_, name, _mr) * a) \ { \ - TCGv val, mem; \ - mem = tcg_temp_new(); \ + TCGv_i32 val, mem; \ + mem = tcg_temp_new_i32(); \ val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ gen_helper_##op(cpu_regs[a->rd], tcg_env, \ cpu_regs[a->rd], val); \ @@ -1849,8 +1855,8 @@ static bool trans_SATR(DisasContext *ctx, arg_SATR *a) #define FCONVOP(name, op) \ static bool trans_##name(DisasContext *ctx, arg_##name * a) \ { \ - TCGv val, mem; \ - mem = tcg_temp_new(); \ + TCGv_i32 val, mem; \ + mem = tcg_temp_new_i32(); \ val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); \ gen_helper_##op(cpu_regs[a->rd], tcg_env, val); \ return true; \ @@ -1864,7 +1870,7 @@ FOP(FDIV, fdiv) /* fcmp #imm, rd */ static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a) { - TCGv imm = tcg_constant_i32(li(ctx, 0)); + TCGv_i32 imm = tcg_constant_i32(li(ctx, 0)); gen_helper_fcmp(tcg_env, cpu_regs[a->rd], imm); return true; } @@ -1873,8 +1879,8 @@ static bool trans_FCMP_ir(DisasContext *ctx, arg_FCMP_ir * a) /* fcmp rs, rd */ static bool trans_FCMP_mr(DisasContext *ctx, arg_FCMP_mr *a) { - TCGv val, mem; - mem = tcg_temp_new(); + TCGv_i32 val, mem; + mem = tcg_temp_new_i32(); val = rx_load_source(ctx, mem, a->ld, MO_32, a->rs); gen_helper_fcmp(tcg_env, cpu_regs[a->rd], val); return true; @@ -1887,70 +1893,70 @@ FCONVOP(ROUND, round) /* itof dsp[rs], rd */ static bool trans_ITOF(DisasContext *ctx, arg_ITOF * a) { - TCGv val, mem; - mem = tcg_temp_new(); + TCGv_i32 val, mem; + mem = tcg_temp_new_i32(); val = rx_load_source(ctx, mem, a->ld, a->mi, a->rs); gen_helper_itof(cpu_regs[a->rd], tcg_env, val); return true; } -static void rx_bsetm(TCGv mem, TCGv mask) +static void rx_bsetm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask) { - TCGv val; - val = tcg_temp_new(); - rx_gen_ld(MO_8, val, mem); + TCGv_i32 val; + val = tcg_temp_new_i32(); + rx_gen_ld(ctx, MO_8, val, mem); tcg_gen_or_i32(val, val, mask); - rx_gen_st(MO_8, val, mem); + rx_gen_st(ctx, MO_8, val, mem); } -static void rx_bclrm(TCGv mem, TCGv mask) +static void rx_bclrm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask) { - TCGv val; - val = tcg_temp_new(); - rx_gen_ld(MO_8, val, mem); + TCGv_i32 val; + val = tcg_temp_new_i32(); + rx_gen_ld(ctx, MO_8, val, mem); tcg_gen_andc_i32(val, val, mask); - rx_gen_st(MO_8, val, mem); + rx_gen_st(ctx, MO_8, val, mem); } -static void rx_btstm(TCGv mem, TCGv mask) +static void rx_btstm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask) { - TCGv val; - val = tcg_temp_new(); - rx_gen_ld(MO_8, val, mem); + TCGv_i32 val; + val = tcg_temp_new_i32(); + rx_gen_ld(ctx, MO_8, val, mem); tcg_gen_and_i32(val, val, mask); tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, val, 0); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); } -static void rx_bnotm(TCGv mem, TCGv mask) +static void rx_bnotm(DisasContext *ctx, TCGv_i32 mem, TCGv_i32 mask) { - TCGv val; - val = tcg_temp_new(); - rx_gen_ld(MO_8, val, mem); + TCGv_i32 val; + val = tcg_temp_new_i32(); + rx_gen_ld(ctx, MO_8, val, mem); tcg_gen_xor_i32(val, val, mask); - rx_gen_st(MO_8, val, mem); + rx_gen_st(ctx, MO_8, val, mem); } -static void rx_bsetr(TCGv reg, TCGv mask) +static void rx_bsetr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask) { tcg_gen_or_i32(reg, reg, mask); } -static void rx_bclrr(TCGv reg, TCGv mask) +static void rx_bclrr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask) { tcg_gen_andc_i32(reg, reg, mask); } -static inline void rx_btstr(TCGv reg, TCGv mask) +static void rx_btstr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask) { - TCGv t0; - t0 = tcg_temp_new(); + TCGv_i32 t0; + t0 = tcg_temp_new_i32(); tcg_gen_and_i32(t0, reg, mask); tcg_gen_setcondi_i32(TCG_COND_NE, cpu_psw_c, t0, 0); tcg_gen_mov_i32(cpu_psw_z, cpu_psw_c); } -static inline void rx_bnotr(TCGv reg, TCGv mask) +static void rx_bnotr(DisasContext *ctx, TCGv_i32 reg, TCGv_i32 mask) { tcg_gen_xor_i32(reg, reg, mask); } @@ -1959,43 +1965,43 @@ static inline void rx_bnotr(TCGv reg, TCGv mask) static bool cat3(trans_, name, _im)(DisasContext *ctx, \ cat3(arg_, name, _im) * a) \ { \ - TCGv mask, mem, addr; \ - mem = tcg_temp_new(); \ + TCGv_i32 mask, mem, addr; \ + mem = tcg_temp_new_i32(); \ mask = tcg_constant_i32(1 << a->imm); \ addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ - cat3(rx_, op, m)(addr, mask); \ + cat3(rx_, op, m)(ctx, addr, mask); \ return true; \ } \ static bool cat3(trans_, name, _ir)(DisasContext *ctx, \ cat3(arg_, name, _ir) * a) \ { \ - TCGv mask; \ + TCGv_i32 mask; \ mask = tcg_constant_i32(1 << a->imm); \ - cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ + cat3(rx_, op, r)(ctx, cpu_regs[a->rd], mask); \ return true; \ } \ static bool cat3(trans_, name, _rr)(DisasContext *ctx, \ cat3(arg_, name, _rr) * a) \ { \ - TCGv mask, b; \ - mask = tcg_temp_new(); \ - b = tcg_temp_new(); \ + TCGv_i32 mask, b; \ + mask = tcg_temp_new_i32(); \ + b = tcg_temp_new_i32(); \ tcg_gen_andi_i32(b, cpu_regs[a->rs], 31); \ tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ - cat3(rx_, op, r)(cpu_regs[a->rd], mask); \ + cat3(rx_, op, r)(ctx, cpu_regs[a->rd], mask); \ return true; \ } \ static bool cat3(trans_, name, _rm)(DisasContext *ctx, \ cat3(arg_, name, _rm) * a) \ { \ - TCGv mask, mem, addr, b; \ - mask = tcg_temp_new(); \ - b = tcg_temp_new(); \ + TCGv_i32 mask, mem, addr, b; \ + mask = tcg_temp_new_i32(); \ + b = tcg_temp_new_i32(); \ tcg_gen_andi_i32(b, cpu_regs[a->rd], 7); \ tcg_gen_shl_i32(mask, tcg_constant_i32(1), b); \ - mem = tcg_temp_new(); \ + mem = tcg_temp_new_i32(); \ addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rs); \ - cat3(rx_, op, m)(addr, mask); \ + cat3(rx_, op, m)(ctx, addr, mask); \ return true; \ } @@ -2004,12 +2010,12 @@ BITOP(BCLR, bclr) BITOP(BTST, btst) BITOP(BNOT, bnot) -static inline void bmcnd_op(TCGv val, TCGCond cond, int pos) +static void bmcnd_op(TCGv_i32 val, TCGCond cond, int pos) { - TCGv bit; + TCGv_i32 bit; DisasCompare dc; - dc.temp = tcg_temp_new(); - bit = tcg_temp_new(); + dc.temp = tcg_temp_new_i32(); + bit = tcg_temp_new_i32(); psw_cond(&dc, cond); tcg_gen_andi_i32(val, val, ~(1 << pos)); tcg_gen_setcondi_i32(dc.cond, bit, dc.value, 0); @@ -2019,13 +2025,13 @@ static inline void bmcnd_op(TCGv val, TCGCond cond, int pos) /* bmcnd #imm, dsp[rd] */ static bool trans_BMCnd_im(DisasContext *ctx, arg_BMCnd_im *a) { - TCGv val, mem, addr; - val = tcg_temp_new(); - mem = tcg_temp_new(); + TCGv_i32 val, mem, addr; + val = tcg_temp_new_i32(); + mem = tcg_temp_new_i32(); addr = rx_index_addr(ctx, mem, a->ld, MO_8, a->rd); - rx_gen_ld(MO_8, val, addr); + rx_gen_ld(ctx, MO_8, val, addr); bmcnd_op(val, a->cd, a->imm); - rx_gen_st(MO_8, val, addr); + rx_gen_st(ctx, MO_8, val, addr); return true; } @@ -2045,7 +2051,7 @@ enum { PSW_U = 9, }; -static inline void clrsetpsw(DisasContext *ctx, int cb, int val) +static void clrsetpsw(DisasContext *ctx, int cb, int val) { if (cb < 8) { switch (cb) { @@ -2113,7 +2119,7 @@ static bool trans_MVTIPL(DisasContext *ctx, arg_MVTIPL *a) /* mvtc #imm, rd */ static bool trans_MVTC_i(DisasContext *ctx, arg_MVTC_i *a) { - TCGv imm; + TCGv_i32 imm; imm = tcg_constant_i32(a->imm); move_to_cr(ctx, imm, a->cr); @@ -2137,9 +2143,9 @@ static bool trans_MVFC(DisasContext *ctx, arg_MVFC *a) /* rtfi */ static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a) { - TCGv psw; + TCGv_i32 psw; if (is_privileged(ctx, 1)) { - psw = tcg_temp_new(); + psw = tcg_temp_new_i32(); tcg_gen_mov_i32(cpu_pc, cpu_bpc); tcg_gen_mov_i32(psw, cpu_bpsw); gen_helper_set_psw_rte(tcg_env, psw); @@ -2151,11 +2157,11 @@ static bool trans_RTFI(DisasContext *ctx, arg_RTFI *a) /* rte */ static bool trans_RTE(DisasContext *ctx, arg_RTE *a) { - TCGv psw; + TCGv_i32 psw; if (is_privileged(ctx, 1)) { - psw = tcg_temp_new(); - pop(cpu_pc); - pop(psw); + psw = tcg_temp_new_i32(); + pop(ctx, cpu_pc); + pop(ctx, psw); gen_helper_set_psw_rte(tcg_env, psw); ctx->base.is_jmp = DISAS_EXIT; } @@ -2174,7 +2180,7 @@ static bool trans_BRK(DisasContext *ctx, arg_BRK *a) /* int #imm */ static bool trans_INT(DisasContext *ctx, arg_INT *a) { - TCGv vec; + TCGv_i32 vec; tcg_debug_assert(a->imm < 0x100); vec = tcg_constant_i32(a->imm); diff --git a/target/s390x/cpu-system.c b/target/s390x/cpu-system.c index 709ccd52992ce..f3a9ffb2a2772 100644 --- a/target/s390x/cpu-system.c +++ b/target/s390x/cpu-system.c @@ -49,7 +49,7 @@ bool s390_cpu_has_work(CPUState *cs) return false; } - if (!(cs->interrupt_request & CPU_INTERRUPT_HARD)) { + if (!cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) { return false; } diff --git a/target/s390x/helper.c b/target/s390x/helper.c index 5c127da1a6a89..184428c6d9de8 100644 --- a/target/s390x/helper.c +++ b/target/s390x/helper.c @@ -24,8 +24,8 @@ #include "gdbstub/helpers.h" #include "qemu/timer.h" #include "hw/s390x/ioinst.h" -#include "target/s390x/kvm/pv.h" #include "system/hw_accel.h" +#include "system/memory.h" #include "system/runstate.h" #include "exec/target_page.h" #include "exec/watchpoint.h" @@ -107,19 +107,23 @@ LowCore *cpu_map_lowcore(CPUS390XState *env) { LowCore *lowcore; hwaddr len = sizeof(LowCore); + CPUState *cs = env_cpu(env); + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; - lowcore = cpu_physical_memory_map(env->psa, &len, true); + lowcore = address_space_map(cs->as, env->psa, &len, true, attrs); if (len < sizeof(LowCore)) { - cpu_abort(env_cpu(env), "Could not map lowcore\n"); + cpu_abort(cs, "Could not map lowcore\n"); } return lowcore; } -void cpu_unmap_lowcore(LowCore *lowcore) +void cpu_unmap_lowcore(CPUS390XState *env, LowCore *lowcore) { - cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore)); + AddressSpace *as = env_cpu(env)->as; + + address_space_unmap(as, lowcore, sizeof(LowCore), true, sizeof(LowCore)); } void do_restart_interrupt(CPUS390XState *env) @@ -134,7 +138,7 @@ void do_restart_interrupt(CPUS390XState *env) mask = be64_to_cpu(lowcore->restart_new_psw.mask); addr = be64_to_cpu(lowcore->restart_new_psw.addr); - cpu_unmap_lowcore(lowcore); + cpu_unmap_lowcore(env, lowcore); env->pending_int &= ~INTERRUPT_RESTART; s390_cpu_set_psw(env, mask, addr); @@ -177,109 +181,3 @@ void s390_cpu_recompute_watchpoints(CPUState *cs) wp_flags, NULL); } } - -typedef struct SigpSaveArea { - uint64_t fprs[16]; /* 0x0000 */ - uint64_t grs[16]; /* 0x0080 */ - PSW psw; /* 0x0100 */ - uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */ - uint32_t prefix; /* 0x0118 */ - uint32_t fpc; /* 0x011c */ - uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */ - uint32_t todpr; /* 0x0124 */ - uint64_t cputm; /* 0x0128 */ - uint64_t ckc; /* 0x0130 */ - uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */ - uint32_t ars[16]; /* 0x0140 */ - uint64_t crs[16]; /* 0x0384 */ -} SigpSaveArea; -QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512); - -int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch) -{ - static const uint8_t ar_id = 1; - SigpSaveArea *sa; - hwaddr len = sizeof(*sa); - int i; - - /* For PVMs storing will occur when this cpu enters SIE again */ - if (s390_is_pv()) { - return 0; - } - - sa = cpu_physical_memory_map(addr, &len, true); - if (!sa) { - return -EFAULT; - } - if (len != sizeof(*sa)) { - cpu_physical_memory_unmap(sa, len, 1, 0); - return -EFAULT; - } - - if (store_arch) { - cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1); - } - for (i = 0; i < 16; ++i) { - sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i)); - } - for (i = 0; i < 16; ++i) { - sa->grs[i] = cpu_to_be64(cpu->env.regs[i]); - } - sa->psw.addr = cpu_to_be64(cpu->env.psw.addr); - sa->psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(&cpu->env)); - sa->prefix = cpu_to_be32(cpu->env.psa); - sa->fpc = cpu_to_be32(cpu->env.fpc); - sa->todpr = cpu_to_be32(cpu->env.todpr); - sa->cputm = cpu_to_be64(cpu->env.cputm); - sa->ckc = cpu_to_be64(cpu->env.ckc >> 8); - for (i = 0; i < 16; ++i) { - sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]); - } - for (i = 0; i < 16; ++i) { - sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]); - } - - cpu_physical_memory_unmap(sa, len, 1, len); - - return 0; -} - -typedef struct SigpAdtlSaveArea { - uint64_t vregs[32][2]; /* 0x0000 */ - uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ - uint64_t gscb[4]; /* 0x0400 */ - uint8_t pad_0x0420[0x1000 - 0x0420]; /* 0x0420 */ -} SigpAdtlSaveArea; -QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096); - -#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */ -int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len) -{ - SigpAdtlSaveArea *sa; - hwaddr save = len; - int i; - - sa = cpu_physical_memory_map(addr, &save, true); - if (!sa) { - return -EFAULT; - } - if (save != len) { - cpu_physical_memory_unmap(sa, len, 1, 0); - return -EFAULT; - } - - if (s390_has_feat(S390_FEAT_VECTOR)) { - for (i = 0; i < 32; i++) { - sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]); - sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]); - } - } - if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) { - for (i = 0; i < 4; i++) { - sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]); - } - } - - cpu_physical_memory_unmap(sa, len, 1, len); - return 0; -} diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 491cc5f9756ac..916dac1f14ec9 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -468,7 +468,7 @@ static int can_sync_regs(CPUState *cs, int regs) #define KVM_SYNC_REQUIRED_REGS (KVM_SYNC_GPRS | KVM_SYNC_ACRS | \ KVM_SYNC_CRS | KVM_SYNC_PREFIX) -int kvm_arch_put_registers(CPUState *cs, int level, Error **errp) +int kvm_arch_put_registers(CPUState *cs, KvmPutState level, Error **errp) { CPUS390XState *env = cpu_env(cs); struct kvm_fpu fpu = {}; diff --git a/target/s390x/mmu_helper.c b/target/s390x/mmu_helper.c index 00946e9c0fe05..3b1e75f78336c 100644 --- a/target/s390x/mmu_helper.c +++ b/target/s390x/mmu_helper.c @@ -23,6 +23,7 @@ #include "kvm/kvm_s390x.h" #include "system/kvm.h" #include "system/tcg.h" +#include "system/memory.h" #include "exec/page-protection.h" #include "exec/target_page.h" #include "hw/hw.h" @@ -522,6 +523,7 @@ int s390_cpu_pv_mem_rw(S390CPU *cpu, unsigned int offset, void *hostbuf, int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, int len, bool is_write) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; int currlen, nr_pages, i; target_ulong *pages; uint64_t tec; @@ -539,19 +541,28 @@ int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf, pages = g_malloc(nr_pages * sizeof(*pages)); ret = translate_pages(cpu, laddr, nr_pages, pages, is_write, &tec); - if (ret) { - trigger_access_exception(&cpu->env, ret, tec); - } else if (hostbuf != NULL) { + if (ret == 0 && hostbuf != NULL) { + AddressSpace *as = CPU(cpu)->as; + /* Copy data by stepping through the area page by page */ for (i = 0; i < nr_pages; i++) { + MemTxResult res; + currlen = MIN(len, TARGET_PAGE_SIZE - (laddr % TARGET_PAGE_SIZE)); - cpu_physical_memory_rw(pages[i] | (laddr & ~TARGET_PAGE_MASK), - hostbuf, currlen, is_write); + res = address_space_rw(as, pages[i] | (laddr & ~TARGET_PAGE_MASK), + attrs, hostbuf, currlen, is_write); + if (res != MEMTX_OK) { + ret = PGM_ADDRESSING; + break; + } laddr += currlen; hostbuf += currlen; len -= currlen; } } + if (ret) { + trigger_access_exception(&cpu->env, ret, tec); + } g_free(pages); return ret; diff --git a/target/s390x/s390x-internal.h b/target/s390x/s390x-internal.h index 56cce2e7f5060..9691366ec916e 100644 --- a/target/s390x/s390x-internal.h +++ b/target/s390x/s390x-internal.h @@ -323,11 +323,8 @@ void s390x_cpu_timer(void *opaque); void s390_handle_wait(S390CPU *cpu); hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr); -#define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area) -int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch); -int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len); LowCore *cpu_map_lowcore(CPUS390XState *env); -void cpu_unmap_lowcore(LowCore *lowcore); +void cpu_unmap_lowcore(CPUS390XState *env, LowCore *lowcore); #endif /* CONFIG_USER_ONLY */ diff --git a/target/s390x/sigp.c b/target/s390x/sigp.c index 5e95c4978f962..f5d7bc0fa2256 100644 --- a/target/s390x/sigp.c +++ b/target/s390x/sigp.c @@ -13,12 +13,14 @@ #include "s390x-internal.h" #include "hw/boards.h" #include "system/hw_accel.h" +#include "system/memory.h" #include "system/runstate.h" #include "system/address-spaces.h" #include "exec/cputlb.h" #include "system/tcg.h" #include "trace.h" #include "qapi/qapi-types-machine.h" +#include "target/s390x/kvm/pv.h" QemuMutex qemu_sigp_mutex; @@ -126,6 +128,78 @@ static void sigp_stop(CPUState *cs, run_on_cpu_data arg) si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } +typedef struct SigpSaveArea { + uint64_t fprs[16]; /* 0x0000 */ + uint64_t grs[16]; /* 0x0080 */ + PSW psw; /* 0x0100 */ + uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */ + uint32_t prefix; /* 0x0118 */ + uint32_t fpc; /* 0x011c */ + uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */ + uint32_t todpr; /* 0x0124 */ + uint64_t cputm; /* 0x0128 */ + uint64_t ckc; /* 0x0130 */ + uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */ + uint32_t ars[16]; /* 0x0140 */ + uint64_t crs[16]; /* 0x0384 */ +} SigpSaveArea; +QEMU_BUILD_BUG_ON(sizeof(SigpSaveArea) != 512); + +#define S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area) +static int s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch) +{ + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + AddressSpace *as = CPU(cpu)->as; + SigpSaveArea *sa; + hwaddr len = sizeof(*sa); + int i; + + /* For PVMs storing will occur when this cpu enters SIE again */ + if (s390_is_pv()) { + return 0; + } + + sa = address_space_map(as, addr, &len, true, attrs); + if (!sa) { + return -EFAULT; + } + if (len != sizeof(*sa)) { + address_space_unmap(as, sa, len, true, 0); + return -EFAULT; + } + + if (store_arch) { + static const uint8_t ar_id = 1; + + address_space_stb(as, offsetof(LowCore, ar_access_id), + ar_id, attrs, NULL); + + } + for (i = 0; i < 16; ++i) { + sa->fprs[i] = cpu_to_be64(*get_freg(&cpu->env, i)); + } + for (i = 0; i < 16; ++i) { + sa->grs[i] = cpu_to_be64(cpu->env.regs[i]); + } + sa->psw.addr = cpu_to_be64(cpu->env.psw.addr); + sa->psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(&cpu->env)); + sa->prefix = cpu_to_be32(cpu->env.psa); + sa->fpc = cpu_to_be32(cpu->env.fpc); + sa->todpr = cpu_to_be32(cpu->env.todpr); + sa->cputm = cpu_to_be64(cpu->env.cputm); + sa->ckc = cpu_to_be64(cpu->env.ckc >> 8); + for (i = 0; i < 16; ++i) { + sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]); + } + for (i = 0; i < 16; ++i) { + sa->crs[i] = cpu_to_be64(cpu->env.cregs[i]); + } + + address_space_unmap(as, sa, len, true, len); + + return 0; +} + static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg) { S390CPU *cpu = S390_CPU(cs); @@ -172,6 +246,49 @@ static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg) si->cc = SIGP_CC_ORDER_CODE_ACCEPTED; } +typedef struct SigpAdtlSaveArea { + uint64_t vregs[32][2]; /* 0x0000 */ + uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */ + uint64_t gscb[4]; /* 0x0400 */ + uint8_t pad_0x0420[0x1000 - 0x0420]; /* 0x0420 */ +} SigpAdtlSaveArea; +QEMU_BUILD_BUG_ON(sizeof(SigpAdtlSaveArea) != 4096); + +#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */ +static int s390_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len) +{ + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + AddressSpace *as = CPU(cpu)->as; + SigpAdtlSaveArea *sa; + hwaddr save = len; + int i; + + sa = address_space_map(as, addr, &save, true, attrs); + if (!sa) { + return -EFAULT; + } + if (save != len) { + address_space_unmap(as, sa, len, true, 0); + return -EFAULT; + } + + if (s390_has_feat(S390_FEAT_VECTOR)) { + for (i = 0; i < 32; i++) { + sa->vregs[i][0] = cpu_to_be64(cpu->env.vregs[i][0]); + sa->vregs[i][1] = cpu_to_be64(cpu->env.vregs[i][1]); + } + } + if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) { + for (i = 0; i < 4; i++) { + sa->gscb[i] = cpu_to_be64(cpu->env.gscb[i]); + } + } + + address_space_unmap(as, sa, len, true, len); + + return 0; +} + #define ADTL_SAVE_LC_MASK 0xfUL static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg) { diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c index e4c75d0ce01b9..0ae4e26606594 100644 --- a/target/s390x/tcg/excp_helper.c +++ b/target/s390x/tcg/excp_helper.c @@ -30,6 +30,7 @@ #ifndef CONFIG_USER_ONLY #include "qemu/timer.h" #include "system/address-spaces.h" +#include "system/memory.h" #include "hw/s390x/ioinst.h" #include "hw/s390x/s390_flic.h" #include "hw/boards.h" @@ -284,7 +285,7 @@ static void do_program_interrupt(CPUS390XState *env) addr = be64_to_cpu(lowcore->program_new_psw.addr); lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea); - cpu_unmap_lowcore(lowcore); + cpu_unmap_lowcore(env, lowcore); s390_cpu_set_psw(env, mask, addr); } @@ -303,7 +304,7 @@ static void do_svc_interrupt(CPUS390XState *env) mask = be64_to_cpu(lowcore->svc_new_psw.mask); addr = be64_to_cpu(lowcore->svc_new_psw.addr); - cpu_unmap_lowcore(lowcore); + cpu_unmap_lowcore(env, lowcore); s390_cpu_set_psw(env, mask, addr); @@ -377,7 +378,7 @@ static void do_ext_interrupt(CPUS390XState *env) lowcore->external_old_psw.mask = cpu_to_be64(s390_cpu_get_psw_mask(env)); lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr); - cpu_unmap_lowcore(lowcore); + cpu_unmap_lowcore(env, lowcore); s390_cpu_set_psw(env, mask, addr); } @@ -404,7 +405,7 @@ static void do_io_interrupt(CPUS390XState *env) mask = be64_to_cpu(lowcore->io_new_psw.mask); addr = be64_to_cpu(lowcore->io_new_psw.addr); - cpu_unmap_lowcore(lowcore); + cpu_unmap_lowcore(env, lowcore); g_free(io); s390_cpu_set_psw(env, mask, addr); @@ -418,16 +419,18 @@ QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024); static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + AddressSpace *as = env_cpu(env)->as; hwaddr len = sizeof(MchkExtSaveArea); MchkExtSaveArea *sa; int i; - sa = cpu_physical_memory_map(mcesao, &len, true); + sa = address_space_map(as, mcesao, &len, true, attrs); if (!sa) { return -EFAULT; } if (len != sizeof(MchkExtSaveArea)) { - cpu_physical_memory_unmap(sa, len, 1, 0); + address_space_unmap(as, sa, len, true, 0); return -EFAULT; } @@ -436,7 +439,7 @@ static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao) sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]); } - cpu_physical_memory_unmap(sa, len, 1, len); + address_space_unmap(as, sa, len, true, len); return 0; } @@ -488,7 +491,7 @@ static void do_mchk_interrupt(CPUS390XState *env) mask = be64_to_cpu(lowcore->mcck_new_psw.mask); addr = be64_to_cpu(lowcore->mcck_new_psw.addr); - cpu_unmap_lowcore(lowcore); + cpu_unmap_lowcore(env, lowcore); s390_cpu_set_psw(env, mask, addr); } @@ -559,7 +562,7 @@ void s390_cpu_do_interrupt(CPUState *cs) /* we might still have pending interrupts, but not deliverable */ if (!env->pending_int && !qemu_s390_flic_has_any(flic)) { - cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } /* WAIT PSW during interrupt injection or STOP interrupt */ diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index f7101be5745c2..6d9d601d29aef 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -570,7 +570,7 @@ uint32_t HELPER(tpi)(CPUS390XState *env, uint64_t addr) lowcore->subchannel_nr = cpu_to_be16(io->nr); lowcore->io_int_parm = cpu_to_be32(io->parm); lowcore->io_int_word = cpu_to_be32(io->word); - cpu_unmap_lowcore(lowcore); + cpu_unmap_lowcore(env, lowcore); } g_free(io); @@ -700,7 +700,7 @@ void HELPER(stfl)(CPUS390XState *env) lowcore = cpu_map_lowcore(env); prepare_stfl(); memcpy(&lowcore->stfl_fac_list, stfl_bytes, sizeof(lowcore->stfl_fac_list)); - cpu_unmap_lowcore(lowcore); + cpu_unmap_lowcore(env, lowcore); } #endif diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index c7e8574438c25..ec9e5a07516af 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -258,9 +258,9 @@ static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) * 16 byte operations to handle it in a special way. */ g_assert(es <= MO_64); -#if !HOST_BIG_ENDIAN - offs ^= (8 - bytes); -#endif + if (!HOST_BIG_ENDIAN) { + offs ^= (8 - bytes); + } return offs + vec_full_reg_offset(reg); } diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc index e073e5ad3aa58..f3b4b48ab7b76 100644 --- a/target/s390x/tcg/translate_vx.c.inc +++ b/target/s390x/tcg/translate_vx.c.inc @@ -175,9 +175,9 @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr, /* convert it to an element offset relative to tcg_env (vec_reg_offset() */ tcg_gen_shli_i64(tmp, tmp, es); -#if !HOST_BIG_ENDIAN - tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es)); -#endif + if (!HOST_BIG_ENDIAN) { + tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es)); + } tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg)); /* generate the final ptr by adding tcg_env */ diff --git a/target/sh4/cpu.c b/target/sh4/cpu.c index 4f561e8c912f3..21ccb86df48ad 100644 --- a/target/sh4/cpu.c +++ b/target/sh4/cpu.c @@ -108,7 +108,7 @@ static bool superh_io_recompile_replay_branch(CPUState *cs, static bool superh_cpu_has_work(CPUState *cs) { - return cs->interrupt_request & CPU_INTERRUPT_HARD; + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD); } #endif /* !CONFIG_USER_ONLY */ diff --git a/target/sh4/cpu.h b/target/sh4/cpu.h index c41ab70dd7c02..b0759010c47d8 100644 --- a/target/sh4/cpu.h +++ b/target/sh4/cpu.h @@ -277,7 +277,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr, uint32_t mem_value); #endif -int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr); +int cpu_sh4_is_cached(CPUSH4State *env, uint32_t addr); void cpu_load_tlb(CPUSH4State * env); @@ -365,14 +365,14 @@ static inline int cpu_ptel_pr (uint32_t ptel) #define PTEA_TC (1 << 3) #define cpu_ptea_tc(ptea) (((ptea) & PTEA_TC) >> 3) -static inline target_ulong cpu_read_sr(CPUSH4State *env) +static inline uint32_t cpu_read_sr(CPUSH4State *env) { return env->sr | (env->sr_m << SR_M) | (env->sr_q << SR_Q) | (env->sr_t << SR_T); } -static inline void cpu_write_sr(CPUSH4State *env, target_ulong sr) +static inline void cpu_write_sr(CPUSH4State *env, uint32_t sr) { env->sr_m = (sr >> SR_M) & 1; env->sr_q = (sr >> SR_Q) & 1; diff --git a/target/sh4/helper.c b/target/sh4/helper.c index fb7642bda1b74..3b18a320b86db 100644 --- a/target/sh4/helper.c +++ b/target/sh4/helper.c @@ -47,7 +47,7 @@ #if defined(CONFIG_USER_ONLY) -int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr) +int cpu_sh4_is_cached(CPUSH4State *env, uint32_t addr) { /* For user mode, only U0 area is cacheable. */ return !(addr & 0x80000000); @@ -58,7 +58,7 @@ int cpu_sh4_is_cached(CPUSH4State *env, target_ulong addr) void superh_cpu_do_interrupt(CPUState *cs) { CPUSH4State *env = cpu_env(cs); - int do_irq = cs->interrupt_request & CPU_INTERRUPT_HARD; + int do_irq = cpu_test_interrupt(cs, CPU_INTERRUPT_HARD); int do_exp, irq_vector = cs->exception_index; /* prioritize exceptions over interrupts */ @@ -231,11 +231,11 @@ static int itlb_replacement(CPUSH4State * env) /* Find the corresponding entry in the right TLB Return entry, MMU_DTLB_MISS or MMU_DTLB_MULTIPLE */ -static int find_tlb_entry(CPUSH4State * env, target_ulong address, +static int find_tlb_entry(CPUSH4State *env, vaddr address, tlb_t * entries, uint8_t nbtlb, int use_asid) { int match = MMU_DTLB_MISS; - uint32_t start, end; + vaddr start, end; uint8_t asid; int i; @@ -291,7 +291,7 @@ static int copy_utlb_entry_itlb(CPUSH4State *env, int utlb) /* Find itlb entry Return entry, MMU_ITLB_MISS, MMU_ITLB_MULTIPLE or MMU_DTLB_MULTIPLE */ -static int find_itlb_entry(CPUSH4State * env, target_ulong address, +static int find_itlb_entry(CPUSH4State *env, vaddr address, int use_asid) { int e; @@ -309,7 +309,7 @@ static int find_itlb_entry(CPUSH4State * env, target_ulong address, /* Find utlb entry Return entry, MMU_DTLB_MISS, MMU_DTLB_MULTIPLE */ -static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid) +static int find_utlb_entry(CPUSH4State *env, vaddr address, int use_asid) { /* per utlb access */ increment_urc(env); @@ -325,8 +325,8 @@ static int find_utlb_entry(CPUSH4State * env, target_ulong address, int use_asid MMU_ITLB_MULTIPLE, MMU_ITLB_VIOLATION, MMU_IADDR_ERROR, MMU_DADDR_ERROR_READ, MMU_DADDR_ERROR_WRITE. */ -static int get_mmu_address(CPUSH4State * env, target_ulong * physical, - int *prot, target_ulong address, +static int get_mmu_address(CPUSH4State *env, hwaddr *physical, + int *prot, vaddr address, MMUAccessType access_type) { int use_asid, n; @@ -392,8 +392,8 @@ static int get_mmu_address(CPUSH4State * env, target_ulong * physical, return n; } -static int get_physical_address(CPUSH4State * env, target_ulong * physical, - int *prot, target_ulong address, +static int get_physical_address(CPUSH4State *env, hwaddr* physical, + int *prot, vaddr address, MMUAccessType access_type) { /* P1, P2 and P4 areas do not use translation */ @@ -433,7 +433,7 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical, hwaddr superh_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { - target_ulong physical; + hwaddr physical; int prot; if (get_physical_address(cpu_env(cs), &physical, &prot, addr, MMU_DATA_LOAD) @@ -452,7 +452,7 @@ void cpu_load_tlb(CPUSH4State * env) if (entry->v) { /* Overwriting valid entry in utlb. */ - target_ulong address = entry->vpn << 10; + vaddr address = entry->vpn << 10; tlb_flush_page(cs, address); } @@ -528,7 +528,7 @@ void cpu_sh4_write_mmaped_itlb_addr(CPUSH4State *s, hwaddr addr, tlb_t * entry = &s->itlb[index]; if (entry->v) { /* Overwriting valid entry in itlb. */ - target_ulong address = entry->vpn << 10; + vaddr address = entry->vpn << 10; tlb_flush_page(env_cpu(s), address); } entry->asid = asid; @@ -570,7 +570,7 @@ void cpu_sh4_write_mmaped_itlb_data(CPUSH4State *s, hwaddr addr, /* ITLB Data Array 1 */ if (entry->v) { /* Overwriting valid entry in utlb. */ - target_ulong address = entry->vpn << 10; + vaddr address = entry->vpn << 10; tlb_flush_page(env_cpu(s), address); } entry->ppn = (mem_value & 0x1ffffc00) >> 10; @@ -665,7 +665,7 @@ void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr, CPUState *cs = env_cpu(s); /* Overwriting valid entry in utlb. */ - target_ulong address = entry->vpn << 10; + vaddr address = entry->vpn << 10; tlb_flush_page(cs, address); } entry->asid = asid; @@ -716,7 +716,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr, /* UTLB Data Array 1 */ if (entry->v) { /* Overwriting valid entry in utlb. */ - target_ulong address = entry->vpn << 10; + vaddr address = entry->vpn << 10; tlb_flush_page(env_cpu(s), address); } entry->ppn = (mem_value & 0x1ffffc00) >> 10; @@ -735,7 +735,7 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr, } } -int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr) +int cpu_sh4_is_cached(CPUSH4State *env, uint32_t addr) { int n; int use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD)); @@ -800,7 +800,7 @@ bool superh_cpu_tlb_fill(CPUState *cs, vaddr address, int size, CPUSH4State *env = cpu_env(cs); int ret; - target_ulong physical; + hwaddr physical; int prot; ret = get_physical_address(env, &physical, &prot, address, access_type); diff --git a/target/sh4/translate.c b/target/sh4/translate.c index 70fd13aa3f55e..b3ae0a3814c7e 100644 --- a/target/sh4/translate.c +++ b/target/sh4/translate.c @@ -223,7 +223,7 @@ static inline bool use_exit_tb(DisasContext *ctx) return (ctx->tbflags & TB_FLAG_GUSA_EXCLUSIVE) != 0; } -static bool use_goto_tb(DisasContext *ctx, target_ulong dest) +static bool use_goto_tb(DisasContext *ctx, vaddr dest) { if (use_exit_tb(ctx)) { return false; @@ -231,12 +231,12 @@ static bool use_goto_tb(DisasContext *ctx, target_ulong dest) return translator_use_goto_tb(&ctx->base, dest); } -static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_idx, vaddr dest) { if (use_goto_tb(ctx, dest)) { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_idx); tcg_gen_movi_i32(cpu_pc, dest); - tcg_gen_exit_tb(ctx->base.tb, n); + tcg_gen_exit_tb(ctx->base.tb, tb_slot_idx); } else { tcg_gen_movi_i32(cpu_pc, dest); if (use_exit_tb(ctx)) { @@ -267,7 +267,7 @@ static void gen_jump(DisasContext * ctx) } /* Immediate conditional jump (bt or bf) */ -static void gen_conditional_jump(DisasContext *ctx, target_ulong dest, +static void gen_conditional_jump(DisasContext *ctx, vaddr dest, bool jump_if_true) { TCGLabel *l1 = gen_new_label(); diff --git a/target/sparc/cpu.c b/target/sparc/cpu.c index 245caf2de0218..c9773f15401a9 100644 --- a/target/sparc/cpu.c +++ b/target/sparc/cpu.c @@ -783,7 +783,7 @@ static void sparc_restore_state_to_opc(CPUState *cs, #ifndef CONFIG_USER_ONLY static bool sparc_cpu_has_work(CPUState *cs) { - return (cs->interrupt_request & CPU_INTERRUPT_HARD) && + return cpu_test_interrupt(cs, CPU_INTERRUPT_HARD) && cpu_interrupts_enabled(cpu_env(cs)); } #endif /* !CONFIG_USER_ONLY */ diff --git a/target/sparc/cpu.h b/target/sparc/cpu.h index 31cb3d97eb1b9..7169a502432da 100644 --- a/target/sparc/cpu.h +++ b/target/sparc/cpu.h @@ -3,7 +3,6 @@ #include "qemu/bswap.h" #include "cpu-qom.h" -#include "exec/cpu-common.h" #include "exec/cpu-defs.h" #include "exec/cpu-interrupt.h" #include "qemu/cpu-float.h" diff --git a/target/sparc/helper.c b/target/sparc/helper.c index 9163b9d46ad01..c5d88de37c994 100644 --- a/target/sparc/helper.c +++ b/target/sparc/helper.c @@ -21,6 +21,7 @@ #include "cpu.h" #include "qemu/timer.h" #include "qemu/host-utils.h" +#include "exec/cpu-common.h" #include "exec/helper-proto.h" void cpu_raise_exception_ra(CPUSPARCState *env, int tt, uintptr_t ra) diff --git a/target/sparc/insns.decode b/target/sparc/insns.decode index 9e39d232735e6..242ec420161fc 100644 --- a/target/sparc/insns.decode +++ b/target/sparc/insns.decode @@ -88,9 +88,10 @@ CALL 01 i:s30 { [ - STBAR 10 00000 101000 01111 0 0000000000000 + STBAR_v9 10 00000 101000 01111 0 0000000000000 MEMBAR 10 00000 101000 01111 1 000000 cmask:3 mmask:4 + RDY_v9 10 rd:5 101000 00000 0 0000000000000 RDCCR 10 rd:5 101000 00010 0 0000000000000 RDASI 10 rd:5 101000 00011 0 0000000000000 RDTICK 10 rd:5 101000 00100 0 0000000000000 @@ -107,8 +108,26 @@ CALL 01 i:s30 RDSTICK_CMPR 10 rd:5 101000 11001 0 0000000000000 RDSTRAND_STATUS 10 rd:5 101000 11010 0 0000000000000 ] - # Before v8, all rs1 accepted; otherwise rs1==0. - RDY 10 rd:5 101000 rs1:5 0 0000000000000 + + # The v8 manual, section B.30 STBAR instruction, says + # bits [12:0] are ignored, but bit 13 must be 0. + # However, section B.28 Read State Register Instruction has a + # comment that RDASR with rs1 = 15, rd = 0 is STBAR. Here, + # bit 13 is also ignored and rd != 0 is merely reserved. + # + # Solaris 8 executes v9 MEMBAR instruction 0x8143e008 during boot. + # This confirms that bit 13 is ignored, as 0x8143c000 is STBAR. + STBAR_v8 10 ----- 101000 01111 - ------------- + + # For v7, bits [18:0] are ignored. + # For v8, bits [18:14], aka rs1, are repurposed and rs1 = 0 is RDY, + # and other values are RDASR. However, the v8 manual explicitly + # says that rs1 in 1..14 yield undefined results and do not cause + # an illegal instruction trap, and rs1 in 16..31 are available for + # implementation specific usage. + # Implement not causing an illegal instruction trap for v8 by + # continuing to interpret unused values per v7, i.e. as RDY. + RDY_v7 10 rd:5 101000 ----- - ------------- } { @@ -139,14 +158,16 @@ CALL 01 i:s30 } { - RDPSR 10 rd:5 101001 00000 0 0000000000000 - RDHPR_hpstate 10 rd:5 101001 00000 0 0000000000000 + [ + RDHPR_hpstate 10 rd:5 101001 00000 0 0000000000000 + RDHPR_htstate 10 rd:5 101001 00001 0 0000000000000 + RDHPR_hintp 10 rd:5 101001 00011 0 0000000000000 + RDHPR_htba 10 rd:5 101001 00101 0 0000000000000 + RDHPR_hver 10 rd:5 101001 00110 0 0000000000000 + RDHPR_hstick_cmpr 10 rd:5 101001 11111 0 0000000000000 + ] + RDPSR 10 rd:5 101001 ----- - ------------- } -RDHPR_htstate 10 rd:5 101001 00001 0 0000000000000 -RDHPR_hintp 10 rd:5 101001 00011 0 0000000000000 -RDHPR_htba 10 rd:5 101001 00101 0 0000000000000 -RDHPR_hver 10 rd:5 101001 00110 0 0000000000000 -RDHPR_hstick_cmpr 10 rd:5 101001 11111 0 0000000000000 { WRPSR 10 00000 110001 ..... . ............. @n_r_ri @@ -159,26 +180,28 @@ RESTORED 10 00001 110001 00000 0 0000000000000 # UA2005 INVALW { - RDWIM 10 rd:5 101010 00000 0 0000000000000 - RDPR_tpc 10 rd:5 101010 00000 0 0000000000000 + [ + RDPR_tpc 10 rd:5 101010 00000 0 0000000000000 + RDPR_tnpc 10 rd:5 101010 00001 0 0000000000000 + RDPR_tstate 10 rd:5 101010 00010 0 0000000000000 + RDPR_tt 10 rd:5 101010 00011 0 0000000000000 + RDPR_tick 10 rd:5 101010 00100 0 0000000000000 + RDPR_tba 10 rd:5 101010 00101 0 0000000000000 + RDPR_pstate 10 rd:5 101010 00110 0 0000000000000 + RDPR_tl 10 rd:5 101010 00111 0 0000000000000 + RDPR_pil 10 rd:5 101010 01000 0 0000000000000 + RDPR_cwp 10 rd:5 101010 01001 0 0000000000000 + RDPR_cansave 10 rd:5 101010 01010 0 0000000000000 + RDPR_canrestore 10 rd:5 101010 01011 0 0000000000000 + RDPR_cleanwin 10 rd:5 101010 01100 0 0000000000000 + RDPR_otherwin 10 rd:5 101010 01101 0 0000000000000 + RDPR_wstate 10 rd:5 101010 01110 0 0000000000000 + RDPR_gl 10 rd:5 101010 10000 0 0000000000000 + RDPR_strand_status 10 rd:5 101010 11010 0 0000000000000 + RDPR_ver 10 rd:5 101010 11111 0 0000000000000 + ] + RDWIM 10 rd:5 101010 ----- - ------------- } -RDPR_tnpc 10 rd:5 101010 00001 0 0000000000000 -RDPR_tstate 10 rd:5 101010 00010 0 0000000000000 -RDPR_tt 10 rd:5 101010 00011 0 0000000000000 -RDPR_tick 10 rd:5 101010 00100 0 0000000000000 -RDPR_tba 10 rd:5 101010 00101 0 0000000000000 -RDPR_pstate 10 rd:5 101010 00110 0 0000000000000 -RDPR_tl 10 rd:5 101010 00111 0 0000000000000 -RDPR_pil 10 rd:5 101010 01000 0 0000000000000 -RDPR_cwp 10 rd:5 101010 01001 0 0000000000000 -RDPR_cansave 10 rd:5 101010 01010 0 0000000000000 -RDPR_canrestore 10 rd:5 101010 01011 0 0000000000000 -RDPR_cleanwin 10 rd:5 101010 01100 0 0000000000000 -RDPR_otherwin 10 rd:5 101010 01101 0 0000000000000 -RDPR_wstate 10 rd:5 101010 01110 0 0000000000000 -RDPR_gl 10 rd:5 101010 10000 0 0000000000000 -RDPR_strand_status 10 rd:5 101010 11010 0 0000000000000 -RDPR_ver 10 rd:5 101010 11111 0 0000000000000 { WRWIM 10 00000 110010 ..... . ............. @n_r_ri @@ -203,7 +226,7 @@ WRPR_strand_status 10 11010 110010 ..... . ............. @n_r_ri { FLUSHW 10 00000 101011 00000 0 0000000000000 - RDTBR 10 rd:5 101011 00000 0 0000000000000 + RDTBR 10 rd:5 101011 ----- - ------------- } { diff --git a/target/sparc/int32_helper.c b/target/sparc/int32_helper.c index 39db4ffa70a88..fdcaa0a578b2c 100644 --- a/target/sparc/int32_helper.c +++ b/target/sparc/int32_helper.c @@ -65,6 +65,7 @@ static const char *excp_name_str(int32_t exception_index) return excp_names[exception_index]; } +#if !defined(CONFIG_USER_ONLY) void cpu_check_irqs(CPUSPARCState *env) { CPUState *cs; @@ -96,6 +97,7 @@ void cpu_check_irqs(CPUSPARCState *env) cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } +#endif void sparc_cpu_do_interrupt(CPUState *cs) { diff --git a/target/sparc/int64_helper.c b/target/sparc/int64_helper.c index bd14c7a0db9ae..96ef81c26cd12 100644 --- a/target/sparc/int64_helper.c +++ b/target/sparc/int64_helper.c @@ -20,6 +20,7 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "cpu.h" +#include "exec/cpu-common.h" #include "exec/helper-proto.h" #include "exec/log.h" #include "trace.h" @@ -62,6 +63,7 @@ static const char * const excp_names[0x80] = { }; #endif +#if !defined(CONFIG_USER_ONLY) void cpu_check_irqs(CPUSPARCState *env) { CPUState *cs; @@ -89,7 +91,7 @@ void cpu_check_irqs(CPUSPARCState *env) * the next bit is (2 << psrpil). */ if (pil < (2 << env->psrpil)) { - if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) { trace_sparc64_cpu_check_irqs_reset_irq(env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); @@ -120,13 +122,14 @@ void cpu_check_irqs(CPUSPARCState *env) break; } } - } else if (cs->interrupt_request & CPU_INTERRUPT_HARD) { + } else if (cpu_test_interrupt(cs, CPU_INTERRUPT_HARD)) { trace_sparc64_cpu_check_irqs_disabled(pil, env->pil_in, env->softint, env->interrupt_index); env->interrupt_index = 0; cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); } } +#endif void sparc_cpu_do_interrupt(CPUState *cs) { diff --git a/target/sparc/translate.c b/target/sparc/translate.c index b922e53bf1200..d6b599b71fee8 100644 --- a/target/sparc/translate.c +++ b/target/sparc/translate.c @@ -363,15 +363,15 @@ static bool use_goto_tb(DisasContext *s, target_ulong pc, target_ulong npc) translator_use_goto_tb(&s->base, npc); } -static void gen_goto_tb(DisasContext *s, int tb_num, +static void gen_goto_tb(DisasContext *s, unsigned tb_slot_idx, target_ulong pc, target_ulong npc) { if (use_goto_tb(s, pc, npc)) { /* jump to same page: we can use a direct jump */ - tcg_gen_goto_tb(tb_num); + tcg_gen_goto_tb(tb_slot_idx); tcg_gen_movi_tl(cpu_pc, pc); tcg_gen_movi_tl(cpu_npc, npc); - tcg_gen_exit_tb(s->base.tb, tb_num); + tcg_gen_exit_tb(s->base.tb, tb_slot_idx); } else { /* jump to another page: we can use an indirect jump */ tcg_gen_movi_tl(cpu_pc, pc); @@ -2487,7 +2487,7 @@ static int extract_qfpreg(DisasContext *dc, int x) #define TRANS(NAME, AVAIL, FUNC, ...) \ static bool trans_##NAME(DisasContext *dc, arg_##NAME *a) \ - { return avail_##AVAIL(dc) && FUNC(dc, __VA_ARGS__); } + { return avail_##AVAIL(dc) && FUNC(dc, ## __VA_ARGS__); } #define avail_ALL(C) true #ifdef TARGET_SPARC64 @@ -2526,6 +2526,32 @@ static int extract_qfpreg(DisasContext *dc, int x) # define avail_VIS4(C) false #endif +/* + * We decoded bit 13 as imm, and bits [12:0] as rs2_or_imm. + * For v9, if !imm, then the unused bits [12:5] must be zero. + * For v7 and v8, the unused bits are ignored; clear them here. + */ +static bool check_rs2(DisasContext *dc, int *rs2) +{ + if (unlikely(*rs2 & ~0x1f)) { + if (avail_64(dc)) { + return false; + } + *rs2 &= 0x1f; + } + return true; +} + +static bool check_r_r_ri(DisasContext *dc, arg_r_r_ri *a) +{ + return a->imm || check_rs2(dc, &a->rs2_or_imm); +} + +static bool check_r_r_ri_cc(DisasContext *dc, arg_r_r_ri_cc *a) +{ + return a->imm || check_rs2(dc, &a->rs2_or_imm); +} + /* Default case for non jump instructions. */ static bool advance_pc(DisasContext *dc) { @@ -2823,12 +2849,15 @@ static bool trans_Tcc_i_v9(DisasContext *dc, arg_Tcc_i_v9 *a) return do_tcc(dc, a->cond, a->cc, a->rs1, true, a->i); } -static bool trans_STBAR(DisasContext *dc, arg_STBAR *a) +static bool do_stbar(DisasContext *dc) { tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC); return advance_pc(dc); } +TRANS(STBAR_v8, 32, do_stbar) +TRANS(STBAR_v9, 64, do_stbar) + static bool trans_MEMBAR(DisasContext *dc, arg_MEMBAR *a) { if (avail_32(dc)) { @@ -2860,18 +2889,8 @@ static TCGv do_rdy(DisasContext *dc, TCGv dst) return cpu_y; } -static bool trans_RDY(DisasContext *dc, arg_RDY *a) -{ - /* - * TODO: Need a feature bit for sparcv8. In the meantime, treat all - * 32-bit cpus like sparcv7, which ignores the rs1 field. - * This matches after all other ASR, so Leon3 Asr17 is handled first. - */ - if (avail_64(dc) && a->rs1 != 0) { - return false; - } - return do_rd_special(dc, true, a->rd, do_rdy); -} +TRANS(RDY_v7, 32, do_rd_special, true, a->rd, do_rdy) +TRANS(RDY_v9, 64, do_rd_special, true, a->rd, do_rdy) static TCGv do_rd_leon3_config(DisasContext *dc, TCGv dst) { @@ -3256,8 +3275,7 @@ static bool do_wr_special(DisasContext *dc, arg_r_r_ri *a, bool priv, { TCGv src; - /* For simplicity, we under-decoded the rs2 form. */ - if (!a->imm && (a->rs2_or_imm & ~0x1f)) { + if (!check_r_r_ri(dc, a)) { return false; } if (!priv) { @@ -3700,8 +3718,7 @@ static bool do_arith_int(DisasContext *dc, arg_r_r_ri_cc *a, { TCGv dst, src1; - /* For simplicity, we under-decoded the rs2 form. */ - if (!a->imm && a->rs2_or_imm & ~0x1f) { + if (!check_r_r_ri_cc(dc, a)) { return false; } @@ -3785,11 +3802,11 @@ static bool trans_OR(DisasContext *dc, arg_r_r_ri_cc *a) { /* OR with %g0 is the canonical alias for MOV. */ if (!a->cc && a->rs1 == 0) { + if (!check_r_r_ri_cc(dc, a)) { + return false; + } if (a->imm || a->rs2_or_imm == 0) { gen_store_gpr(dc, a->rd, tcg_constant_tl(a->rs2_or_imm)); - } else if (a->rs2_or_imm & ~0x1f) { - /* For simplicity, we under-decoded the rs2 form. */ - return false; } else { gen_store_gpr(dc, a->rd, cpu_regs[a->rs2_or_imm]); } @@ -3806,8 +3823,7 @@ static bool trans_UDIV(DisasContext *dc, arg_r_r_ri *a) if (!avail_DIV(dc)) { return false; } - /* For simplicity, we under-decoded the rs2 form. */ - if (!a->imm && a->rs2_or_imm & ~0x1f) { + if (!check_r_r_ri(dc, a)) { return false; } @@ -3858,8 +3874,7 @@ static bool trans_UDIVX(DisasContext *dc, arg_r_r_ri *a) if (!avail_64(dc)) { return false; } - /* For simplicity, we under-decoded the rs2 form. */ - if (!a->imm && a->rs2_or_imm & ~0x1f) { + if (!check_r_r_ri(dc, a)) { return false; } @@ -3896,8 +3911,7 @@ static bool trans_SDIVX(DisasContext *dc, arg_r_r_ri *a) if (!avail_64(dc)) { return false; } - /* For simplicity, we under-decoded the rs2 form. */ - if (!a->imm && a->rs2_or_imm & ~0x1f) { + if (!check_r_r_ri(dc, a)) { return false; } @@ -4193,8 +4207,7 @@ TRANS(SRA_i, ALL, do_shift_i, a, false, false) static TCGv gen_rs2_or_imm(DisasContext *dc, bool imm, int rs2_or_imm) { - /* For simplicity, we under-decoded the rs2 form. */ - if (!imm && rs2_or_imm & ~0x1f) { + if (!imm && !check_rs2(dc, &rs2_or_imm)) { return NULL; } if (imm || rs2_or_imm == 0) { @@ -4257,8 +4270,7 @@ static bool do_add_special(DisasContext *dc, arg_r_r_ri *a, { TCGv src1, sum; - /* For simplicity, we under-decoded the rs2 form. */ - if (!a->imm && a->rs2_or_imm & ~0x1f) { + if (!check_r_r_ri(dc, a)) { return false; } @@ -4376,8 +4388,7 @@ static TCGv gen_ldst_addr(DisasContext *dc, int rs1, bool imm, int rs2_or_imm) { TCGv addr, tmp = NULL; - /* For simplicity, we under-decoded the rs2 form. */ - if (!imm && rs2_or_imm & ~0x1f) { + if (!imm && !check_rs2(dc, &rs2_or_imm)) { return NULL; } diff --git a/target/tricore/cpu.c b/target/tricore/cpu.c index 4f035b6f76849..04319e107ba2c 100644 --- a/target/tricore/cpu.c +++ b/target/tricore/cpu.c @@ -37,7 +37,7 @@ static const gchar *tricore_gdb_arch_name(CPUState *cs) static void tricore_cpu_set_pc(CPUState *cs, vaddr value) { - cpu_env(cs)->PC = value & ~(target_ulong)1; + cpu_env(cs)->PC = value & ~1; } static vaddr tricore_cpu_get_pc(CPUState *cs) diff --git a/target/tricore/helper.c b/target/tricore/helper.c index e4c53d453dddd..7574111c87df8 100644 --- a/target/tricore/helper.c +++ b/target/tricore/helper.c @@ -35,7 +35,7 @@ enum { }; static int get_physical_address(CPUTriCoreState *env, hwaddr *physical, - int *prot, target_ulong address, + int *prot, vaddr address, MMUAccessType access_type, int mmu_idx) { int ret = TLBRET_MATCH; @@ -61,7 +61,7 @@ hwaddr tricore_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) } /* TODO: Add exception support */ -static void raise_mmu_exception(CPUTriCoreState *env, target_ulong address, +static void raise_mmu_exception(CPUTriCoreState *env, vaddr address, int rw, int tlb_error) { } diff --git a/target/tricore/op_helper.c b/target/tricore/op_helper.c index 9910c13f4b50b..610f148a237d3 100644 --- a/target/tricore/op_helper.c +++ b/target/tricore/op_helper.c @@ -149,15 +149,15 @@ static uint32_t ssov32(CPUTriCoreState *env, int64_t arg) if (arg > max_pos) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); - ret = (target_ulong)max_pos; + ret = (uint32_t)max_pos; } else { if (arg < max_neg) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); - ret = (target_ulong)max_neg; + ret = (uint32_t)max_neg; } else { env->PSW_USB_V = 0; - ret = (target_ulong)arg; + ret = (uint32_t)arg; } } env->PSW_USB_AV = arg ^ arg * 2u; @@ -172,10 +172,10 @@ static uint32_t suov32_pos(CPUTriCoreState *env, uint64_t arg) if (arg > max_pos) { env->PSW_USB_V = (1 << 31); env->PSW_USB_SV = (1 << 31); - ret = (target_ulong)max_pos; + ret = (uint32_t)max_pos; } else { env->PSW_USB_V = 0; - ret = (target_ulong)arg; + ret = (uint32_t)arg; } env->PSW_USB_AV = arg ^ arg * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; @@ -192,7 +192,7 @@ static uint32_t suov32_neg(CPUTriCoreState *env, int64_t arg) ret = 0; } else { env->PSW_USB_V = 0; - ret = (target_ulong)arg; + ret = (uint32_t)arg; } env->PSW_USB_AV = arg ^ arg * 2u; env->PSW_USB_SAV |= env->PSW_USB_AV; @@ -260,8 +260,7 @@ static uint32_t suov16(CPUTriCoreState *env, int32_t hw0, int32_t hw1) return (hw0 & 0xffff) | (hw1 << 16); } -target_ulong helper_add_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_add_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); @@ -294,8 +293,7 @@ uint64_t helper_add64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) return result; } -target_ulong helper_add_h_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_add_h_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t ret_hw0, ret_hw1; @@ -397,8 +395,7 @@ uint32_t helper_addsur_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, } -target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_add_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int64_t t1 = extract64(r1, 0, 32); int64_t t2 = extract64(r2, 0, 32); @@ -406,8 +403,7 @@ target_ulong helper_add_suov(CPUTriCoreState *env, target_ulong r1, return suov32_pos(env, result); } -target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_add_h_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t ret_hw0, ret_hw1; @@ -416,8 +412,7 @@ target_ulong helper_add_h_suov(CPUTriCoreState *env, target_ulong r1, return suov16(env, ret_hw0, ret_hw1); } -target_ulong helper_sub_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_sub_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); @@ -450,8 +445,7 @@ uint64_t helper_sub64_ssov(CPUTriCoreState *env, uint64_t r1, uint64_t r2) return result; } -target_ulong helper_sub_h_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_sub_h_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t ret_hw0, ret_hw1; @@ -552,8 +546,7 @@ uint32_t helper_subadr_h_ssov(CPUTriCoreState *env, uint64_t r1, uint32_t r2_l, return (result1 & 0xffff0000ULL) | ((result0 >> 16) & 0xffffULL); } -target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_sub_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int64_t t1 = extract64(r1, 0, 32); int64_t t2 = extract64(r2, 0, 32); @@ -561,8 +554,7 @@ target_ulong helper_sub_suov(CPUTriCoreState *env, target_ulong r1, return suov32_neg(env, result); } -target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_sub_h_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t ret_hw0, ret_hw1; @@ -571,8 +563,7 @@ target_ulong helper_sub_h_suov(CPUTriCoreState *env, target_ulong r1, return suov16(env, ret_hw0, ret_hw1); } -target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_mul_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); @@ -580,8 +571,7 @@ target_ulong helper_mul_ssov(CPUTriCoreState *env, target_ulong r1, return ssov32(env, result); } -target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_mul_suov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int64_t t1 = extract64(r1, 0, 32); int64_t t2 = extract64(r2, 0, 32); @@ -590,8 +580,7 @@ target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1, return suov32_pos(env, result); } -target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_sha_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int64_t t1 = sextract64(r1, 0, 32); int32_t t2 = sextract64(r2, 0, 6); @@ -606,14 +595,14 @@ target_ulong helper_sha_ssov(CPUTriCoreState *env, target_ulong r1, return ssov32(env, result); } -uint32_t helper_abs_ssov(CPUTriCoreState *env, target_ulong r1) +uint32_t helper_abs_ssov(CPUTriCoreState *env, uint32_t r1) { - target_ulong result; + uint32_t result; result = ((int32_t)r1 >= 0) ? r1 : (0 - r1); return ssov32(env, result); } -uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1) +uint32_t helper_abs_h_ssov(CPUTriCoreState *env, uint32_t r1) { int32_t ret_h0, ret_h1; @@ -626,8 +615,7 @@ uint32_t helper_abs_h_ssov(CPUTriCoreState *env, target_ulong r1) return ssov16(env, ret_h0, ret_h1); } -target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_absdif_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); @@ -641,8 +629,7 @@ target_ulong helper_absdif_ssov(CPUTriCoreState *env, target_ulong r1, return ssov32(env, result); } -uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2) +uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t t1, t2; int32_t ret_h0, ret_h1; @@ -666,8 +653,8 @@ uint32_t helper_absdif_h_ssov(CPUTriCoreState *env, target_ulong r1, return ssov16(env, ret_h0, ret_h1); } -target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2, target_ulong r3) +uint32_t helper_madd32_ssov(CPUTriCoreState *env, uint32_t r1, + uint32_t r2, uint32_t r3) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); @@ -678,8 +665,8 @@ target_ulong helper_madd32_ssov(CPUTriCoreState *env, target_ulong r1, return ssov32(env, result); } -target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2, target_ulong r3) +uint32_t helper_madd32_suov(CPUTriCoreState *env, uint32_t r1, + uint32_t r2, uint32_t r3) { uint64_t t1 = extract64(r1, 0, 32); uint64_t t2 = extract64(r2, 0, 32); @@ -690,8 +677,8 @@ target_ulong helper_madd32_suov(CPUTriCoreState *env, target_ulong r1, return suov32_pos(env, result); } -uint64_t helper_madd64_ssov(CPUTriCoreState *env, target_ulong r1, - uint64_t r2, target_ulong r3) +uint64_t helper_madd64_ssov(CPUTriCoreState *env, uint32_t r1, + uint64_t r2, uint32_t r3) { uint64_t ret, ovf; int64_t t1 = sextract64(r1, 0, 32); @@ -848,8 +835,8 @@ uint32_t helper_maddr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2, return ret & 0xffff0000ll; } -uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1, - uint64_t r2, target_ulong r3) +uint64_t helper_madd64_suov(CPUTriCoreState *env, uint32_t r1, + uint64_t r2, uint32_t r3) { uint64_t ret, mul; uint64_t t1 = extract64(r1, 0, 32); @@ -873,8 +860,8 @@ uint64_t helper_madd64_suov(CPUTriCoreState *env, target_ulong r1, return ret; } -target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2, target_ulong r3) +uint32_t helper_msub32_ssov(CPUTriCoreState *env, uint32_t r1, + uint32_t r2, uint32_t r3) { int64_t t1 = sextract64(r1, 0, 32); int64_t t2 = sextract64(r2, 0, 32); @@ -885,8 +872,8 @@ target_ulong helper_msub32_ssov(CPUTriCoreState *env, target_ulong r1, return ssov32(env, result); } -target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1, - target_ulong r2, target_ulong r3) +uint32_t helper_msub32_suov(CPUTriCoreState *env, uint32_t r1, + uint32_t r2, uint32_t r3) { uint64_t t1 = extract64(r1, 0, 32); uint64_t t2 = extract64(r2, 0, 32); @@ -912,8 +899,8 @@ target_ulong helper_msub32_suov(CPUTriCoreState *env, target_ulong r1, return result; } -uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1, - uint64_t r2, target_ulong r3) +uint64_t helper_msub64_ssov(CPUTriCoreState *env, uint32_t r1, + uint64_t r2, uint32_t r3) { uint64_t ret, ovf; int64_t t1 = sextract64(r1, 0, 32); @@ -944,8 +931,8 @@ uint64_t helper_msub64_ssov(CPUTriCoreState *env, target_ulong r1, return ret; } -uint64_t helper_msub64_suov(CPUTriCoreState *env, target_ulong r1, - uint64_t r2, target_ulong r3) +uint64_t helper_msub64_suov(CPUTriCoreState *env, uint32_t r1, + uint64_t r2, uint32_t r3) { uint64_t ret, mul; uint64_t t1 = extract64(r1, 0, 32); @@ -1097,7 +1084,7 @@ uint32_t helper_msubr_q_ssov(CPUTriCoreState *env, uint32_t r1, uint32_t r2, return ret & 0xffff0000ll; } -uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg) +uint32_t helper_abs_b(CPUTriCoreState *env, uint32_t arg) { int32_t b, i; int32_t ovf = 0; @@ -1120,7 +1107,7 @@ uint32_t helper_abs_b(CPUTriCoreState *env, target_ulong arg) return ret; } -uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg) +uint32_t helper_abs_h(CPUTriCoreState *env, uint32_t arg) { int32_t h, i; int32_t ovf = 0; @@ -1143,7 +1130,7 @@ uint32_t helper_abs_h(CPUTriCoreState *env, target_ulong arg) return ret; } -uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +uint32_t helper_absdif_b(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t b, i; int32_t extr_r2; @@ -1167,7 +1154,7 @@ uint32_t helper_absdif_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_absdif_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +uint32_t helper_absdif_h(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t h, i; int32_t extr_r2; @@ -1296,7 +1283,7 @@ uint32_t helper_maddr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2, return ret & 0xffff0000ll; } -uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +uint32_t helper_add_b(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t b, i; int32_t extr_r1, extr_r2; @@ -1322,7 +1309,7 @@ uint32_t helper_add_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_add_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +uint32_t helper_add_h(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t h, i; int32_t extr_r1, extr_r2; @@ -1451,7 +1438,7 @@ uint32_t helper_msubr_q(CPUTriCoreState *env, uint32_t r1, uint32_t r2, return ret & 0xffff0000ll; } -uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +uint32_t helper_sub_b(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t b, i; int32_t extr_r1, extr_r2; @@ -1477,7 +1464,7 @@ uint32_t helper_sub_b(CPUTriCoreState *env, target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +uint32_t helper_sub_h(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t h, i; int32_t extr_r1, extr_r2; @@ -1502,7 +1489,7 @@ uint32_t helper_sub_h(CPUTriCoreState *env, target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_eq_b(target_ulong r1, target_ulong r2) +uint32_t helper_eq_b(uint32_t r1, uint32_t r2) { uint32_t ret, msk; int32_t i; @@ -1519,7 +1506,7 @@ uint32_t helper_eq_b(target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_eq_h(target_ulong r1, target_ulong r2) +uint32_t helper_eq_h(uint32_t r1, uint32_t r2) { int32_t ret = 0; @@ -1534,7 +1521,7 @@ uint32_t helper_eq_h(target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_eqany_b(target_ulong r1, target_ulong r2) +uint32_t helper_eqany_b(uint32_t r1, uint32_t r2) { int32_t i; uint32_t ret = 0; @@ -1546,7 +1533,7 @@ uint32_t helper_eqany_b(target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_eqany_h(target_ulong r1, target_ulong r2) +uint32_t helper_eqany_h(uint32_t r1, uint32_t r2) { uint32_t ret; @@ -1556,7 +1543,7 @@ uint32_t helper_eqany_h(target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_lt_b(target_ulong r1, target_ulong r2) +uint32_t helper_lt_b(uint32_t r1, uint32_t r2) { int32_t i; uint32_t ret = 0; @@ -1570,7 +1557,7 @@ uint32_t helper_lt_b(target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_lt_bu(target_ulong r1, target_ulong r2) +uint32_t helper_lt_bu(uint32_t r1, uint32_t r2) { int32_t i; uint32_t ret = 0; @@ -1584,7 +1571,7 @@ uint32_t helper_lt_bu(target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_lt_h(target_ulong r1, target_ulong r2) +uint32_t helper_lt_h(uint32_t r1, uint32_t r2) { uint32_t ret = 0; @@ -1599,7 +1586,7 @@ uint32_t helper_lt_h(target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_lt_hu(target_ulong r1, target_ulong r2) +uint32_t helper_lt_hu(uint32_t r1, uint32_t r2) { uint32_t ret = 0; @@ -1615,7 +1602,7 @@ uint32_t helper_lt_hu(target_ulong r1, target_ulong r2) } #define EXTREMA_H_B(name, op) \ -uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \ +uint32_t helper_##name ##_b(uint32_t r1, uint32_t r2) \ { \ int32_t i, extr_r1, extr_r2; \ uint32_t ret = 0; \ @@ -1629,7 +1616,7 @@ uint32_t helper_##name ##_b(target_ulong r1, target_ulong r2) \ return ret; \ } \ \ -uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\ +uint32_t helper_##name ##_bu(uint32_t r1, uint32_t r2) \ { \ int32_t i; \ uint32_t extr_r1, extr_r2; \ @@ -1644,7 +1631,7 @@ uint32_t helper_##name ##_bu(target_ulong r1, target_ulong r2)\ return ret; \ } \ \ -uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \ +uint32_t helper_##name ##_h(uint32_t r1, uint32_t r2) \ { \ int32_t extr_r1, extr_r2; \ uint32_t ret = 0; \ @@ -1662,7 +1649,7 @@ uint32_t helper_##name ##_h(target_ulong r1, target_ulong r2) \ return ret; \ } \ \ -uint32_t helper_##name ##_hu(target_ulong r1, target_ulong r2)\ +uint32_t helper_##name ##_hu(uint32_t r1, uint32_t r2) \ { \ uint32_t extr_r1, extr_r2; \ uint32_t ret = 0; \ @@ -1729,7 +1716,7 @@ EXTREMA_H_B(min, <) #undef EXTREMA_H_B -uint32_t helper_clo_h(target_ulong r1) +uint32_t helper_clo_h(uint32_t r1) { uint32_t ret_hw0 = extract32(r1, 0, 16); uint32_t ret_hw1 = extract32(r1, 16, 16); @@ -1747,7 +1734,7 @@ uint32_t helper_clo_h(target_ulong r1) return ret_hw0 | (ret_hw1 << 16); } -uint32_t helper_clz_h(target_ulong r1) +uint32_t helper_clz_h(uint32_t r1) { uint32_t ret_hw0 = extract32(r1, 0, 16); uint32_t ret_hw1 = extract32(r1, 16, 16); @@ -1765,7 +1752,7 @@ uint32_t helper_clz_h(target_ulong r1) return ret_hw0 | (ret_hw1 << 16); } -uint32_t helper_cls_h(target_ulong r1) +uint32_t helper_cls_h(uint32_t r1) { uint32_t ret_hw0 = extract32(r1, 0, 16); uint32_t ret_hw1 = extract32(r1, 16, 16); @@ -1783,7 +1770,7 @@ uint32_t helper_cls_h(target_ulong r1) return ret_hw0 | (ret_hw1 << 16); } -uint32_t helper_sh(target_ulong r1, target_ulong r2) +uint32_t helper_sh(uint32_t r1, uint32_t r2) { int32_t shift_count = sextract32(r2, 0, 6); @@ -1796,7 +1783,7 @@ uint32_t helper_sh(target_ulong r1, target_ulong r2) } } -uint32_t helper_sh_h(target_ulong r1, target_ulong r2) +uint32_t helper_sh_h(uint32_t r1, uint32_t r2) { int32_t ret_hw0, ret_hw1; int32_t shift_count; @@ -1816,7 +1803,7 @@ uint32_t helper_sh_h(target_ulong r1, target_ulong r2) } } -uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2) +uint32_t helper_sha(CPUTriCoreState *env, uint32_t r1, uint32_t r2) { int32_t shift_count; int64_t result, t1; @@ -1854,7 +1841,7 @@ uint32_t helper_sha(CPUTriCoreState *env, target_ulong r1, target_ulong r2) return ret; } -uint32_t helper_sha_h(target_ulong r1, target_ulong r2) +uint32_t helper_sha_h(uint32_t r1, uint32_t r2) { int32_t shift_count; int32_t ret_hw0, ret_hw1; @@ -1874,7 +1861,7 @@ uint32_t helper_sha_h(target_ulong r1, target_ulong r2) } } -uint32_t helper_bmerge(target_ulong r1, target_ulong r2) +uint32_t helper_bmerge(uint32_t r1, uint32_t r2) { uint32_t i, ret; @@ -1905,7 +1892,7 @@ uint64_t helper_bsplit(uint32_t r1) return ret; } -uint32_t helper_parity(target_ulong r1) +uint32_t helper_parity(uint32_t r1) { uint32_t ret; uint32_t nOnes, i; @@ -1942,7 +1929,7 @@ uint32_t helper_parity(target_ulong r1) } uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high, - target_ulong r2) + uint32_t r2) { uint32_t ret; int32_t fp_exp, fp_frac, temp_exp, fp_exp_frac; @@ -1983,7 +1970,7 @@ uint32_t helper_pack(uint32_t carry, uint32_t r1_low, uint32_t r1_high, return ret; } -uint64_t helper_unpack(target_ulong arg1) +uint64_t helper_unpack(uint32_t arg1) { int32_t fp_exp = extract32(arg1, 23, 8); int32_t fp_frac = extract32(arg1, 0, 23); @@ -2408,7 +2395,7 @@ uint32_t helper_shuffle(uint32_t arg0, uint32_t arg1) /* context save area (CSA) related helpers */ -static int cdc_increment(target_ulong *psw) +static int cdc_increment(uint32_t *psw) { if ((*psw & MASK_PSW_CDC) == 0x7f) { return 0; @@ -2426,7 +2413,7 @@ static int cdc_increment(target_ulong *psw) return 0; } -static int cdc_decrement(target_ulong *psw) +static int cdc_decrement(uint32_t *psw) { if ((*psw & MASK_PSW_CDC) == 0x7f) { return 0; @@ -2442,7 +2429,7 @@ static int cdc_decrement(target_ulong *psw) return 0; } -static bool cdc_zero(target_ulong *psw) +static bool cdc_zero(uint32_t *psw) { int cdc = *psw & MASK_PSW_CDC; /* Returns TRUE if PSW.CDC.COUNT == 0 or if PSW.CDC == @@ -2457,7 +2444,7 @@ static bool cdc_zero(target_ulong *psw) return count == 0; } -static void save_context_upper(CPUTriCoreState *env, target_ulong ea) +static void save_context_upper(CPUTriCoreState *env, uint32_t ea) { cpu_stl_data(env, ea, env->PCXI); cpu_stl_data(env, ea+4, psw_read(env)); @@ -2477,7 +2464,7 @@ static void save_context_upper(CPUTriCoreState *env, target_ulong ea) cpu_stl_data(env, ea+60, env->gpr_d[15]); } -static void save_context_lower(CPUTriCoreState *env, target_ulong ea) +static void save_context_lower(CPUTriCoreState *env, uint32_t ea) { cpu_stl_data(env, ea, env->PCXI); cpu_stl_data(env, ea+4, env->gpr_a[11]); @@ -2497,8 +2484,8 @@ static void save_context_lower(CPUTriCoreState *env, target_ulong ea) cpu_stl_data(env, ea+60, env->gpr_d[7]); } -static void restore_context_upper(CPUTriCoreState *env, target_ulong ea, - target_ulong *new_PCXI, target_ulong *new_PSW) +static void restore_context_upper(CPUTriCoreState *env, uint32_t ea, + uint32_t *new_PCXI, uint32_t *new_PSW) { *new_PCXI = cpu_ldl_data(env, ea); *new_PSW = cpu_ldl_data(env, ea+4); @@ -2518,8 +2505,8 @@ static void restore_context_upper(CPUTriCoreState *env, target_ulong ea, env->gpr_d[15] = cpu_ldl_data(env, ea+60); } -static void restore_context_lower(CPUTriCoreState *env, target_ulong ea, - target_ulong *ra, target_ulong *pcxi) +static void restore_context_lower(CPUTriCoreState *env, uint32_t ea, + uint32_t *ra, uint32_t *pcxi) { *pcxi = cpu_ldl_data(env, ea); *ra = cpu_ldl_data(env, ea+4); @@ -2541,10 +2528,10 @@ static void restore_context_lower(CPUTriCoreState *env, target_ulong ea, void helper_call(CPUTriCoreState *env, uint32_t next_pc) { - target_ulong tmp_FCX; - target_ulong ea; - target_ulong new_FCX; - target_ulong psw; + uint32_t tmp_FCX; + uint32_t ea; + uint32_t new_FCX; + uint32_t psw; psw = psw_read(env); /* if (FCX == 0) trap(FCU); */ @@ -2604,9 +2591,9 @@ void helper_call(CPUTriCoreState *env, uint32_t next_pc) void helper_ret(CPUTriCoreState *env) { - target_ulong ea; - target_ulong new_PCXI; - target_ulong new_PSW, psw; + uint32_t ea; + uint32_t new_PCXI; + uint32_t new_PSW, psw; psw = psw_read(env); /* if (PSW.CDE) then if (cdc_decrement()) then trap(CDU);*/ @@ -2657,9 +2644,9 @@ void helper_ret(CPUTriCoreState *env) void helper_bisr(CPUTriCoreState *env, uint32_t const9) { - target_ulong tmp_FCX; - target_ulong ea; - target_ulong new_FCX; + uint32_t tmp_FCX; + uint32_t ea; + uint32_t new_FCX; if (env->FCX == 0) { /* FCU trap */ @@ -2701,9 +2688,9 @@ void helper_bisr(CPUTriCoreState *env, uint32_t const9) void helper_rfe(CPUTriCoreState *env) { - target_ulong ea; - target_ulong new_PCXI; - target_ulong new_PSW; + uint32_t ea; + uint32_t new_PCXI; + uint32_t new_PSW; /* if (PCXI[19: 0] == 0) then trap(CSU); */ if ((env->PCXI & 0xfffff) == 0) { /* raise csu trap */ @@ -2762,35 +2749,35 @@ void helper_rfm(CPUTriCoreState *env) } } -void helper_ldlcx(CPUTriCoreState *env, target_ulong ea) +void helper_ldlcx(CPUTriCoreState *env, uint32_t ea) { uint32_t dummy; /* insn doesn't load PCXI and RA */ restore_context_lower(env, ea, &dummy, &dummy); } -void helper_lducx(CPUTriCoreState *env, target_ulong ea) +void helper_lducx(CPUTriCoreState *env, uint32_t ea) { uint32_t dummy; /* insn doesn't load PCXI and PSW */ restore_context_upper(env, ea, &dummy, &dummy); } -void helper_stlcx(CPUTriCoreState *env, target_ulong ea) +void helper_stlcx(CPUTriCoreState *env, uint32_t ea) { save_context_lower(env, ea); } -void helper_stucx(CPUTriCoreState *env, target_ulong ea) +void helper_stucx(CPUTriCoreState *env, uint32_t ea) { save_context_upper(env, ea); } void helper_svlcx(CPUTriCoreState *env) { - target_ulong tmp_FCX; - target_ulong ea; - target_ulong new_FCX; + uint32_t tmp_FCX; + uint32_t ea; + uint32_t new_FCX; if (env->FCX == 0) { /* FCU trap */ @@ -2831,9 +2818,9 @@ void helper_svlcx(CPUTriCoreState *env) void helper_svucx(CPUTriCoreState *env) { - target_ulong tmp_FCX; - target_ulong ea; - target_ulong new_FCX; + uint32_t tmp_FCX; + uint32_t ea; + uint32_t new_FCX; if (env->FCX == 0) { /* FCU trap */ @@ -2874,8 +2861,8 @@ void helper_svucx(CPUTriCoreState *env) void helper_rslcx(CPUTriCoreState *env) { - target_ulong ea; - target_ulong new_PCXI; + uint32_t ea; + uint32_t new_PCXI; /* if (PCXI[19: 0] == 0) then trap(CSU); */ if ((env->PCXI & 0xfffff) == 0) { /* CSU trap */ diff --git a/target/tricore/translate.c b/target/tricore/translate.c index 3d0e7a10bd8ac..fbe05a93a8a26 100644 --- a/target/tricore/translate.c +++ b/target/tricore/translate.c @@ -44,19 +44,19 @@ /* * TCG registers */ -static TCGv cpu_PC; -static TCGv cpu_PCXI; -static TCGv cpu_PSW; -static TCGv cpu_ICR; +static TCGv_i32 cpu_PC; +static TCGv_i32 cpu_PCXI; +static TCGv_i32 cpu_PSW; +static TCGv_i32 cpu_ICR; /* GPR registers */ -static TCGv cpu_gpr_a[16]; -static TCGv cpu_gpr_d[16]; +static TCGv_i32 cpu_gpr_a[16]; +static TCGv_i32 cpu_gpr_d[16]; /* PSW Flag cache */ -static TCGv cpu_PSW_C; -static TCGv cpu_PSW_V; -static TCGv cpu_PSW_SV; -static TCGv cpu_PSW_AV; -static TCGv cpu_PSW_SAV; +static TCGv_i32 cpu_PSW_C; +static TCGv_i32 cpu_PSW_V; +static TCGv_i32 cpu_PSW_SV; +static TCGv_i32 cpu_PSW_AV; +static TCGv_i32 cpu_PSW_SAV; static const char *regnames_a[] = { "a0" , "a1" , "a2" , "a3" , "a4" , "a5" , @@ -72,7 +72,8 @@ static const char *regnames_d[] = { typedef struct DisasContext { DisasContextBase base; - target_ulong pc_succ_insn; + + vaddr pc_succ_insn; uint32_t opcode; /* Routine used to access memory */ int mem_idx; @@ -135,46 +136,46 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) } while (0) #define GEN_HELPER_LL(name, ret, arg0, arg1, n) do { \ - TCGv arg00 = tcg_temp_new(); \ - TCGv arg01 = tcg_temp_new(); \ - TCGv arg11 = tcg_temp_new(); \ - tcg_gen_sari_tl(arg00, arg0, 16); \ - tcg_gen_ext16s_tl(arg01, arg0); \ - tcg_gen_ext16s_tl(arg11, arg1); \ + TCGv_i32 arg00 = tcg_temp_new_i32(); \ + TCGv_i32 arg01 = tcg_temp_new_i32(); \ + TCGv_i32 arg11 = tcg_temp_new_i32(); \ + tcg_gen_sari_i32(arg00, arg0, 16); \ + tcg_gen_ext16s_i32(arg01, arg0); \ + tcg_gen_ext16s_i32(arg11, arg1); \ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \ } while (0) #define GEN_HELPER_LU(name, ret, arg0, arg1, n) do { \ - TCGv arg00 = tcg_temp_new(); \ - TCGv arg01 = tcg_temp_new(); \ - TCGv arg10 = tcg_temp_new(); \ - TCGv arg11 = tcg_temp_new(); \ - tcg_gen_sari_tl(arg00, arg0, 16); \ - tcg_gen_ext16s_tl(arg01, arg0); \ - tcg_gen_sari_tl(arg11, arg1, 16); \ - tcg_gen_ext16s_tl(arg10, arg1); \ + TCGv_i32 arg00 = tcg_temp_new_i32(); \ + TCGv_i32 arg01 = tcg_temp_new_i32(); \ + TCGv_i32 arg10 = tcg_temp_new_i32(); \ + TCGv_i32 arg11 = tcg_temp_new_i32(); \ + tcg_gen_sari_i32(arg00, arg0, 16); \ + tcg_gen_ext16s_i32(arg01, arg0); \ + tcg_gen_sari_i32(arg11, arg1, 16); \ + tcg_gen_ext16s_i32(arg10, arg1); \ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \ } while (0) #define GEN_HELPER_UL(name, ret, arg0, arg1, n) do { \ - TCGv arg00 = tcg_temp_new(); \ - TCGv arg01 = tcg_temp_new(); \ - TCGv arg10 = tcg_temp_new(); \ - TCGv arg11 = tcg_temp_new(); \ - tcg_gen_sari_tl(arg00, arg0, 16); \ - tcg_gen_ext16s_tl(arg01, arg0); \ - tcg_gen_sari_tl(arg10, arg1, 16); \ - tcg_gen_ext16s_tl(arg11, arg1); \ + TCGv_i32 arg00 = tcg_temp_new_i32(); \ + TCGv_i32 arg01 = tcg_temp_new_i32(); \ + TCGv_i32 arg10 = tcg_temp_new_i32(); \ + TCGv_i32 arg11 = tcg_temp_new_i32(); \ + tcg_gen_sari_i32(arg00, arg0, 16); \ + tcg_gen_ext16s_i32(arg01, arg0); \ + tcg_gen_sari_i32(arg10, arg1, 16); \ + tcg_gen_ext16s_i32(arg11, arg1); \ gen_helper_##name(ret, arg00, arg01, arg10, arg11, n); \ } while (0) #define GEN_HELPER_UU(name, ret, arg0, arg1, n) do { \ - TCGv arg00 = tcg_temp_new(); \ - TCGv arg01 = tcg_temp_new(); \ - TCGv arg11 = tcg_temp_new(); \ - tcg_gen_sari_tl(arg01, arg0, 16); \ - tcg_gen_ext16s_tl(arg00, arg0); \ - tcg_gen_sari_tl(arg11, arg1, 16); \ + TCGv_i32 arg00 = tcg_temp_new_i32(); \ + TCGv_i32 arg01 = tcg_temp_new_i32(); \ + TCGv_i32 arg11 = tcg_temp_new_i32(); \ + tcg_gen_sari_i32(arg01, arg0, 16); \ + tcg_gen_ext16s_i32(arg00, arg0); \ + tcg_gen_sari_i32(arg11, arg1, 16); \ gen_helper_##name(ret, arg00, arg01, arg11, arg11, n); \ } while (0) @@ -200,7 +201,7 @@ void tricore_cpu_dump_state(CPUState *cs, FILE *f, int flags) /* For two 32-bit registers used a 64-bit register, the first registernumber needs to be even. Otherwise we trap. */ -static inline void generate_trap(DisasContext *ctx, int class, int tin); +static void generate_trap(DisasContext *ctx, int class, int tin); #define CHECK_REG_PAIR(reg) do { \ if (reg & 0x1) { \ generate_trap(ctx, TRAPC_INSN_ERR, TIN2_OPD); \ @@ -209,23 +210,24 @@ static inline void generate_trap(DisasContext *ctx, int class, int tin); /* Functions for load/save to/from memory */ -static inline void gen_offset_ld(DisasContext *ctx, TCGv r1, TCGv r2, - int16_t con, MemOp mop) +static void gen_offset_ld(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2, + int16_t con, MemOp mop) { - TCGv temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, r2, con); - tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop); + TCGv_i32 temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, r2, con); + tcg_gen_qemu_ld_i32(r1, temp, ctx->mem_idx, mop); } -static inline void gen_offset_st(DisasContext *ctx, TCGv r1, TCGv r2, - int16_t con, MemOp mop) +static void gen_offset_st(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2, + int16_t con, MemOp mop) { - TCGv temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, r2, con); - tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop); + TCGv_i32 temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, r2, con); + tcg_gen_qemu_st_i32(r1, temp, ctx->mem_idx, mop); } -static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) +static void gen_st_2regs_64(DisasContext *ctx, TCGv_i32 rh, TCGv_i32 rl, + TCGv_i32 address) { TCGv_i64 temp = tcg_temp_new_i64(); @@ -233,15 +235,17 @@ static void gen_st_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) tcg_gen_qemu_st_i64(temp, address, ctx->mem_idx, MO_LEUQ); } -static void gen_offset_st_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, - DisasContext *ctx) +static void gen_offset_st_2regs(DisasContext *ctx, + TCGv_i32 rh, TCGv_i32 rl, + TCGv_i32 base, int16_t con) { - TCGv temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, base, con); - gen_st_2regs_64(rh, rl, temp, ctx); + TCGv_i32 temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, base, con); + gen_st_2regs_64(ctx, rh, rl, temp); } -static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) +static void gen_ld_2regs_64(DisasContext *ctx, TCGv_i32 rh, TCGv_i32 rl, + TCGv_i32 address) { TCGv_i64 temp = tcg_temp_new_i64(); @@ -250,87 +254,88 @@ static void gen_ld_2regs_64(TCGv rh, TCGv rl, TCGv address, DisasContext *ctx) tcg_gen_extr_i64_i32(rl, rh, temp); } -static void gen_offset_ld_2regs(TCGv rh, TCGv rl, TCGv base, int16_t con, - DisasContext *ctx) +static void gen_offset_ld_2regs(DisasContext *ctx, + TCGv_i32 rh, TCGv_i32 rl, + TCGv_i32 base, int16_t con) { - TCGv temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, base, con); - gen_ld_2regs_64(rh, rl, temp, ctx); + TCGv_i32 temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, base, con); + gen_ld_2regs_64(ctx, rh, rl, temp); } -static void gen_st_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, - MemOp mop) +static void gen_st_preincr(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2, + int16_t off, MemOp mop) { - TCGv temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, r2, off); - tcg_gen_qemu_st_tl(r1, temp, ctx->mem_idx, mop); - tcg_gen_mov_tl(r2, temp); + TCGv_i32 temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, r2, off); + tcg_gen_qemu_st_i32(r1, temp, ctx->mem_idx, mop); + tcg_gen_mov_i32(r2, temp); } -static void gen_ld_preincr(DisasContext *ctx, TCGv r1, TCGv r2, int16_t off, - MemOp mop) +static void gen_ld_preincr(DisasContext *ctx, TCGv_i32 r1, TCGv_i32 r2, + int16_t off, MemOp mop) { - TCGv temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, r2, off); - tcg_gen_qemu_ld_tl(r1, temp, ctx->mem_idx, mop); - tcg_gen_mov_tl(r2, temp); + TCGv_i32 temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, r2, off); + tcg_gen_qemu_ld_i32(r1, temp, ctx->mem_idx, mop); + tcg_gen_mov_i32(r2, temp); } /* M(EA, word) = (M(EA, word) & ~E[a][63:32]) | (E[a][31:0] & E[a][63:32]); */ -static void gen_ldmst(DisasContext *ctx, int ereg, TCGv ea) +static void gen_ldmst(DisasContext *ctx, int ereg, TCGv_i32 ea) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); CHECK_REG_PAIR(ereg); /* temp = (M(EA, word) */ - tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL); /* temp = temp & ~E[a][63:32]) */ - tcg_gen_andc_tl(temp, temp, cpu_gpr_d[ereg+1]); + tcg_gen_andc_i32(temp, temp, cpu_gpr_d[ereg + 1]); /* temp2 = (E[a][31:0] & E[a][63:32]); */ - tcg_gen_and_tl(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg+1]); + tcg_gen_and_i32(temp2, cpu_gpr_d[ereg], cpu_gpr_d[ereg + 1]); /* temp = temp | temp2; */ - tcg_gen_or_tl(temp, temp, temp2); + tcg_gen_or_i32(temp, temp, temp2); /* M(EA, word) = temp; */ - tcg_gen_qemu_st_tl(temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_qemu_st_i32(temp, ea, ctx->mem_idx, MO_LEUL); } /* tmp = M(EA, word); M(EA, word) = D[a]; D[a] = tmp[31:0];*/ -static void gen_swap(DisasContext *ctx, int reg, TCGv ea) +static void gen_swap(DisasContext *ctx, int reg, TCGv_i32 ea) { - TCGv temp = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); - tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); - tcg_gen_qemu_st_tl(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL); - tcg_gen_mov_tl(cpu_gpr_d[reg], temp); + tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_qemu_st_i32(cpu_gpr_d[reg], ea, ctx->mem_idx, MO_LEUL); + tcg_gen_mov_i32(cpu_gpr_d[reg], temp); } -static void gen_cmpswap(DisasContext *ctx, int reg, TCGv ea) +static void gen_cmpswap(DisasContext *ctx, int reg, TCGv_i32 ea) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); CHECK_REG_PAIR(reg); - tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); - tcg_gen_movcond_tl(TCG_COND_EQ, temp2, cpu_gpr_d[reg+1], temp, + tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_movcond_i32(TCG_COND_EQ, temp2, cpu_gpr_d[reg + 1], temp, cpu_gpr_d[reg], temp); - tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL); - tcg_gen_mov_tl(cpu_gpr_d[reg], temp); + tcg_gen_qemu_st_i32(temp2, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_mov_i32(cpu_gpr_d[reg], temp); } -static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea) +static void gen_swapmsk(DisasContext *ctx, int reg, TCGv_i32 ea) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); - TCGv temp3 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); + TCGv_i32 temp3 = tcg_temp_new_i32(); CHECK_REG_PAIR(reg); - tcg_gen_qemu_ld_tl(temp, ea, ctx->mem_idx, MO_LEUL); - tcg_gen_and_tl(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg+1]); - tcg_gen_andc_tl(temp3, temp, cpu_gpr_d[reg+1]); - tcg_gen_or_tl(temp2, temp2, temp3); - tcg_gen_qemu_st_tl(temp2, ea, ctx->mem_idx, MO_LEUL); - tcg_gen_mov_tl(cpu_gpr_d[reg], temp); + tcg_gen_qemu_ld_i32(temp, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_and_i32(temp2, cpu_gpr_d[reg], cpu_gpr_d[reg + 1]); + tcg_gen_andc_i32(temp3, temp, cpu_gpr_d[reg + 1]); + tcg_gen_or_i32(temp2, temp2, temp3); + tcg_gen_qemu_st_i32(temp2, ea, ctx->mem_idx, MO_LEUL); + tcg_gen_mov_i32(cpu_gpr_d[reg], temp); } /* We generate loads and store to core special function register (csfr) through @@ -340,12 +345,12 @@ static void gen_swapmsk(DisasContext *ctx, int reg, TCGv ea) #define R(ADDRESS, REG, FEATURE) \ case ADDRESS: \ if (has_feature(ctx, FEATURE)) { \ - tcg_gen_ld_tl(ret, tcg_env, offsetof(CPUTriCoreState, REG)); \ + tcg_gen_ld_i32(ret, tcg_env, offsetof(CPUTriCoreState, REG)); \ } \ break; #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) #define E(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) -static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset) +static void gen_mfcr(DisasContext *ctx, TCGv_i32 ret, int32_t offset) { /* since we're caching PSW make this a special case */ if (offset == 0xfe04) { @@ -365,7 +370,7 @@ static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset) #define A(ADDRESS, REG, FEATURE) R(ADDRESS, REG, FEATURE) \ case ADDRESS: \ if (has_feature(ctx, FEATURE)) { \ - tcg_gen_st_tl(r1, tcg_env, offsetof(CPUTriCoreState, REG)); \ + tcg_gen_st_i32(r1, tcg_env, offsetof(CPUTriCoreState, REG)); \ } \ break; /* Endinit protected registers @@ -373,8 +378,7 @@ static inline void gen_mfcr(DisasContext *ctx, TCGv ret, int32_t offset) watchdog device, we handle endinit protected registers like all-access registers for now. */ #define E(ADDRESS, REG, FEATURE) A(ADDRESS, REG, FEATURE) -static inline void gen_mtcr(DisasContext *ctx, TCGv r1, - int32_t offset) +static void gen_mtcr(DisasContext *ctx, TCGv_i32 r1, int32_t offset) { if (ctx->priv == TRICORE_PRIV_SM) { /* since we're caching PSW make this a special case */ @@ -393,31 +397,30 @@ static inline void gen_mtcr(DisasContext *ctx, TCGv r1, /* Functions for arithmetic instructions */ -static inline void gen_add_d(TCGv ret, TCGv r1, TCGv r2) +static void gen_add_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { - TCGv t0 = tcg_temp_new_i32(); - TCGv result = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 result = tcg_temp_new_i32(); /* Addition and set V/SV bits */ - tcg_gen_add_tl(result, r1, r2); + tcg_gen_add_i32(result, r1, r2); /* calc V bit */ - tcg_gen_xor_tl(cpu_PSW_V, result, r1); - tcg_gen_xor_tl(t0, r1, r2); - tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0); + tcg_gen_xor_i32(cpu_PSW_V, result, r1); + tcg_gen_xor_i32(t0, r1, r2); + tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t0); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, result, result); - tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, result, result); + tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ - tcg_gen_mov_tl(ret, result); + tcg_gen_mov_i32(ret, result); } -static inline void -gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) +static void gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) { - TCGv temp = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 result = tcg_temp_new_i64(); @@ -429,65 +432,66 @@ gen_add64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) tcg_gen_andc_i64(t1, t1, t0); tcg_gen_extrh_i64_i32(cpu_PSW_V, t1); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* calc AV/SAV bits */ tcg_gen_extrh_i64_i32(temp, result); - tcg_gen_add_tl(cpu_PSW_AV, temp, temp); - tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, temp, temp); + tcg_gen_xor_i32(cpu_PSW_AV, temp, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_i64(ret, result); } -static inline void -gen_addsub64_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, void(*op1)(TCGv, TCGv, TCGv), - void(*op2)(TCGv, TCGv, TCGv)) +static void gen_addsub64_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + void(*op1)(TCGv_i32, TCGv_i32, TCGv_i32), + void(*op2)(TCGv_i32, TCGv_i32, TCGv_i32)) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); - TCGv temp3 = tcg_temp_new(); - TCGv temp4 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); + TCGv_i32 temp3 = tcg_temp_new_i32(); + TCGv_i32 temp4 = tcg_temp_new_i32(); (*op1)(temp, r1_low, r2); /* calc V0 bit */ - tcg_gen_xor_tl(temp2, temp, r1_low); - tcg_gen_xor_tl(temp3, r1_low, r2); + tcg_gen_xor_i32(temp2, temp, r1_low); + tcg_gen_xor_i32(temp3, r1_low, r2); if (op1 == tcg_gen_add_tl) { - tcg_gen_andc_tl(temp2, temp2, temp3); + tcg_gen_andc_i32(temp2, temp2, temp3); } else { - tcg_gen_and_tl(temp2, temp2, temp3); + tcg_gen_and_i32(temp2, temp2, temp3); } (*op2)(temp3, r1_high, r3); /* calc V1 bit */ - tcg_gen_xor_tl(cpu_PSW_V, temp3, r1_high); - tcg_gen_xor_tl(temp4, r1_high, r3); + tcg_gen_xor_i32(cpu_PSW_V, temp3, r1_high); + tcg_gen_xor_i32(temp4, r1_high, r3); if (op2 == tcg_gen_add_tl) { - tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, temp4); + tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, temp4); } else { - tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp4); + tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp4); } /* combine V0/V1 bits */ - tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp2); + tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp2); /* calc sv bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* write result */ - tcg_gen_mov_tl(ret_low, temp); - tcg_gen_mov_tl(ret_high, temp3); + tcg_gen_mov_i32(ret_low, temp); + tcg_gen_mov_i32(ret_high, temp3); /* calc AV bit */ - tcg_gen_add_tl(temp, ret_low, ret_low); - tcg_gen_xor_tl(temp, temp, ret_low); - tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); - tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, ret_high); - tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp); + tcg_gen_add_i32(temp, ret_low, ret_low); + tcg_gen_xor_i32(temp, temp, ret_low); + tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_i32(cpu_PSW_AV, cpu_PSW_AV, ret_high); + tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } /* ret = r2 + (r1 * r3); */ -static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) +static void gen_madd32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -508,53 +512,51 @@ static inline void gen_madd32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL); tcg_gen_or_i64(t2, t2, t3); tcg_gen_extrl_i64_i32(cpu_PSW_V, t2); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, ret, ret); - tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret, ret); + tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void gen_maddi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) +static void gen_maddi32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_madd32_d(ret, r1, r2, temp); } -static inline void -gen_madd64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - TCGv r3) +static void gen_madd64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1, + TCGv_i32 r2_low, TCGv_i32 r2_high, TCGv_i32 r3) { - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - TCGv t3 = tcg_temp_new(); - TCGv t4 = tcg_temp_new(); + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + TCGv_i32 t4 = tcg_temp_new_i32(); - tcg_gen_muls2_tl(t1, t2, r1, r3); + tcg_gen_muls2_i32(t1, t2, r1, r3); /* only the add can overflow */ - tcg_gen_add2_tl(t3, t4, r2_low, r2_high, t1, t2); + tcg_gen_add2_i32(t3, t4, r2_low, r2_high, t1, t2); /* calc V bit */ - tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high); - tcg_gen_xor_tl(t1, r2_high, t2); - tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t1); + tcg_gen_xor_i32(cpu_PSW_V, t4, r2_high); + tcg_gen_xor_i32(t1, r2_high, t2); + tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t1); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, t4, t4); - tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, t4, t4); + tcg_gen_xor_i32(cpu_PSW_AV, t4, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back the result */ - tcg_gen_mov_tl(ret_low, t3); - tcg_gen_mov_tl(ret_high, t4); + tcg_gen_mov_i32(ret_low, t3); + tcg_gen_mov_i32(ret_high, t4); } -static inline void -gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - TCGv r3) +static void gen_maddu64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1, + TCGv_i32 r2_low, TCGv_i32 r2_high, TCGv_i32 r3) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -572,39 +574,38 @@ gen_maddu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, calc V bit */ tcg_gen_setcond_i64(TCG_COND_LTU, t2, t2, t1); tcg_gen_extrl_i64_i32(cpu_PSW_V, t2); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); - tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void -gen_maddi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - int32_t con) +static void gen_maddi64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1, + TCGv_i32 r2_low, TCGv_i32 r2_high, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_madd64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); } -static inline void -gen_maddui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - int32_t con) +static void gen_maddui64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1, + TCGv_i32 r2_low, TCGv_i32 r2_high, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_maddu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); } -static inline void -gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_madd_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -625,13 +626,14 @@ gen_madd_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_add_tl, tcg_gen_add_tl); } -static inline void -gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddsu_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -652,11 +654,12 @@ gen_maddsu_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_sub_tl, tcg_gen_add_tl); } -static inline void -gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddsum_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); TCGv_i64 temp64_3 = tcg_temp_new_i64(); @@ -685,16 +688,17 @@ gen_maddsum_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2); } -static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2); +static void gen_adds(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2); -static inline void -gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_madds_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); - TCGv temp3 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); + TCGv_i32 temp3 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { @@ -713,25 +717,26 @@ gen_madds_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, } tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_adds(ret_low, r1_low, temp); - tcg_gen_mov_tl(temp, cpu_PSW_V); - tcg_gen_mov_tl(temp3, cpu_PSW_AV); + tcg_gen_mov_i32(temp, cpu_PSW_V); + tcg_gen_mov_i32(temp3, cpu_PSW_AV); gen_adds(ret_high, r1_high, temp2); /* combine v bits */ - tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ - tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); + tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3); } -static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2); +static void gen_subs(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2); -static inline void -gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddsus_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); - TCGv temp3 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); + TCGv_i32 temp3 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { @@ -750,20 +755,21 @@ gen_maddsus_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, } tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_subs(ret_low, r1_low, temp); - tcg_gen_mov_tl(temp, cpu_PSW_V); - tcg_gen_mov_tl(temp3, cpu_PSW_AV); + tcg_gen_mov_i32(temp, cpu_PSW_V); + tcg_gen_mov_i32(temp3, cpu_PSW_AV); gen_adds(ret_high, r1_high, temp2); /* combine v bits */ - tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ - tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); + tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3); } -static inline void -gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddsums_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); @@ -792,11 +798,12 @@ gen_maddsums_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, } -static inline void -gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddm_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); TCGv_i64 temp64_3 = tcg_temp_new_i64(); @@ -820,11 +827,12 @@ gen_maddm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3); } -static inline void -gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddms_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); switch (mode) { @@ -846,11 +854,11 @@ gen_maddms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); } -static inline void -gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, - uint32_t mode) +static void gen_maddr64_h(TCGv_i32 ret, TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -869,23 +877,25 @@ gen_maddr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, gen_helper_addr_h(ret, tcg_env, temp64, r1_low, r1_high); } -static inline void -gen_maddr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddr32_h(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); - tcg_gen_andi_tl(temp2, r1, 0xffff0000); - tcg_gen_shli_tl(temp, r1, 16); + tcg_gen_andi_i32(temp2, r1, 0xffff0000); + tcg_gen_shli_i32(temp, r1, 16); gen_maddr64_h(ret, temp, temp2, r2, r3, n, mode); } -static inline void -gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddsur32_h(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -901,17 +911,17 @@ gen_maddsur32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } - tcg_gen_andi_tl(temp2, r1, 0xffff0000); - tcg_gen_shli_tl(temp, r1, 16); + tcg_gen_andi_i32(temp2, r1, 0xffff0000); + tcg_gen_shli_i32(temp, r1, 16); gen_helper_addsur_h(ret, tcg_env, temp64, temp, temp2); } -static inline void -gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, - uint32_t n, uint32_t mode) +static void gen_maddr64s_h(TCGv_i32 ret, TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -930,23 +940,25 @@ gen_maddr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, gen_helper_addr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high); } -static inline void -gen_maddr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddr32s_h(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); - tcg_gen_andi_tl(temp2, r1, 0xffff0000); - tcg_gen_shli_tl(temp, r1, 16); + tcg_gen_andi_i32(temp2, r1, 0xffff0000); + tcg_gen_shli_i32(temp, r1, 16); gen_maddr64s_h(ret, temp, temp2, r2, r3, n, mode); } -static inline void -gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +static void gen_maddsur32s_h(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -962,32 +974,32 @@ gen_maddsur32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } - tcg_gen_andi_tl(temp2, r1, 0xffff0000); - tcg_gen_shli_tl(temp, r1, 16); + tcg_gen_andi_i32(temp2, r1, 0xffff0000); + tcg_gen_shli_i32(temp, r1, 16); gen_helper_addsur_h_ssov(ret, tcg_env, temp64, temp, temp2); } -static inline void -gen_maddr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +static void gen_maddr_q(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); gen_helper_maddr_q(ret, tcg_env, r1, r2, r3, t_n); } -static inline void -gen_maddrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +static void gen_maddrs_q(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); gen_helper_maddr_q_ssov(ret, tcg_env, r1, r2, r3, t_n); } -static inline void -gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, - uint32_t up_shift) +static void gen_madd32_q(TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3, + uint32_t n, uint32_t up_shift) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); - TCGv temp3 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); + TCGv_i32 temp3 = tcg_temp_new_i32(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); @@ -1008,81 +1020,83 @@ gen_madd32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL); tcg_gen_or_i64(t1, t1, t2); tcg_gen_extrl_i64_i32(cpu_PSW_V, t1); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* We produce an overflow on the host if the mul before was (0x80000000 * 0x80000000) << 1). If this is the case, we negate the ovf. */ if (n == 1) { - tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000); - tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3); - tcg_gen_and_tl(temp, temp, temp2); - tcg_gen_shli_tl(temp, temp, 31); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000); + tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3); + tcg_gen_and_i32(temp, temp, temp2); + tcg_gen_shli_i32(temp, temp, 31); /* negate v bit, if special condition */ - tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_xor_i32(cpu_PSW_V, cpu_PSW_V, temp); } /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3); - tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, temp3, temp3); + tcg_gen_xor_i32(cpu_PSW_AV, temp3, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ - tcg_gen_mov_tl(ret, temp3); + tcg_gen_mov_i32(ret, temp3); } -static inline void -gen_m16add32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +static void gen_m16add32_q(TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3, + uint32_t n) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); if (n == 0) { - tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_mul_i32(temp, arg2, arg3); } else { /* n is expected to be 1 */ - tcg_gen_mul_tl(temp, arg2, arg3); - tcg_gen_shli_tl(temp, temp, 1); + tcg_gen_mul_i32(temp, arg2, arg3); + tcg_gen_shli_i32(temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); - tcg_gen_sub_tl(temp, temp, temp2); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_i32(temp, temp, temp2); } gen_add_d(ret, arg1, temp); } -static inline void -gen_m16adds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +static void gen_m16adds32_q(TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3, + uint32_t n) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); if (n == 0) { - tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_mul_i32(temp, arg2, arg3); } else { /* n is expected to be 1 */ - tcg_gen_mul_tl(temp, arg2, arg3); - tcg_gen_shli_tl(temp, temp, 1); + tcg_gen_mul_i32(temp, arg2, arg3); + tcg_gen_shli_i32(temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); - tcg_gen_sub_tl(temp, temp, temp2); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_i32(temp, temp, temp2); } gen_adds(ret, arg1, temp); } -static inline void -gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, - TCGv arg3, uint32_t n) +static void gen_m16add64_q(TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1_low, TCGv_i32 arg1_high, + TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); if (n == 0) { - tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_mul_i32(temp, arg2, arg3); } else { /* n is expected to be 1 */ - tcg_gen_mul_tl(temp, arg2, arg3); - tcg_gen_shli_tl(temp, temp, 1); + tcg_gen_mul_i32(temp, arg2, arg3); + tcg_gen_shli_i32(temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); - tcg_gen_sub_tl(temp, temp, temp2); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_i32(temp, temp, temp2); } tcg_gen_ext_i32_i64(t2, temp); tcg_gen_shli_i64(t2, t2, 16); @@ -1092,23 +1106,23 @@ gen_m16add64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_extr_i64_i32(rl, rh, t3); } -static inline void -gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, - TCGv arg3, uint32_t n) +static void gen_m16adds64_q(TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1_low, TCGv_i32 arg1_high, + TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); if (n == 0) { - tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_mul_i32(temp, arg2, arg3); } else { /* n is expected to be 1 */ - tcg_gen_mul_tl(temp, arg2, arg3); - tcg_gen_shli_tl(temp, temp, 1); + tcg_gen_mul_i32(temp, arg2, arg3); + tcg_gen_shli_i32(temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); - tcg_gen_sub_tl(temp, temp, temp2); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_i32(temp, temp, temp2); } tcg_gen_ext_i32_i64(t2, temp); tcg_gen_shli_i64(t2, t2, 16); @@ -1118,15 +1132,15 @@ gen_m16adds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_extr_i64_i32(rl, rh, t1); } -static inline void -gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, - TCGv arg3, uint32_t n) +static void gen_madd64_q(TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1_low, TCGv_i32 arg1_high, + TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); TCGv_i64 t4 = tcg_temp_new_i64(); - TCGv temp, temp2; + TCGv_i32 temp, temp2; tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high); tcg_gen_ext_i32_i64(t2, arg2); @@ -1146,29 +1160,29 @@ gen_madd64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, (0x80000000 * 0x80000000) << 1). If this is the case, we negate the ovf. */ if (n == 1) { - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); - tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000); - tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3); - tcg_gen_and_tl(temp, temp, temp2); - tcg_gen_shli_tl(temp, temp, 31); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000); + tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3); + tcg_gen_and_i32(temp, temp, temp2); + tcg_gen_shli_i32(temp, temp, 31); /* negate v bit, if special condition */ - tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_xor_i32(cpu_PSW_V, cpu_PSW_V, temp); } /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t4); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, rh, rh); - tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, rh, rh); + tcg_gen_xor_i32(cpu_PSW_AV, rh, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void -gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, - uint32_t up_shift) +static void gen_madds32_q(TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3, + uint32_t n, uint32_t up_shift) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -1184,12 +1198,12 @@ gen_madds32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, gen_helper_madd32_q_add_ssov(ret, tcg_env, t1, t2); } -static inline void -gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, - TCGv arg3, uint32_t n) +static void gen_madds64_q(TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1_low, TCGv_i32 arg1_high, + TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n) { TCGv_i64 r1 = tcg_temp_new_i64(); - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); gen_helper_madd64_q_ssov(r1, tcg_env, r1, arg2, arg3, t_n); @@ -1197,7 +1211,7 @@ gen_madds64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, } /* ret = r2 - (r1 * r3); */ -static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) +static void gen_msub32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -1218,62 +1232,61 @@ static inline void gen_msub32_d(TCGv ret, TCGv r1, TCGv r2, TCGv r3) tcg_gen_setcondi_i64(TCG_COND_LT, t2, t1, -0x80000000LL); tcg_gen_or_i64(t2, t2, t3); tcg_gen_extrl_i64_i32(cpu_PSW_V, t2); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, ret, ret); - tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret, ret); + tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void gen_msubi32_d(TCGv ret, TCGv r1, TCGv r2, int32_t con) +static void gen_msubi32_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_msub32_d(ret, r1, r2, temp); } -static inline void -gen_msub64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - TCGv r3) +static void gen_msub64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + TCGv_i32 r3) { - TCGv t1 = tcg_temp_new(); - TCGv t2 = tcg_temp_new(); - TCGv t3 = tcg_temp_new(); - TCGv t4 = tcg_temp_new(); + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + TCGv_i32 t4 = tcg_temp_new_i32(); - tcg_gen_muls2_tl(t1, t2, r1, r3); + tcg_gen_muls2_i32(t1, t2, r1, r3); /* only the sub can overflow */ - tcg_gen_sub2_tl(t3, t4, r2_low, r2_high, t1, t2); + tcg_gen_sub2_i32(t3, t4, r2_low, r2_high, t1, t2); /* calc V bit */ - tcg_gen_xor_tl(cpu_PSW_V, t4, r2_high); - tcg_gen_xor_tl(t1, r2_high, t2); - tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, t1); + tcg_gen_xor_i32(cpu_PSW_V, t4, r2_high); + tcg_gen_xor_i32(t1, r2_high, t2); + tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, t1); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, t4, t4); - tcg_gen_xor_tl(cpu_PSW_AV, t4, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, t4, t4); + tcg_gen_xor_i32(cpu_PSW_AV, t4, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back the result */ - tcg_gen_mov_tl(ret_low, t3); - tcg_gen_mov_tl(ret_high, t4); + tcg_gen_mov_i32(ret_low, t3); + tcg_gen_mov_i32(ret_high, t4); } -static inline void -gen_msubi64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - int32_t con) +static void gen_msubi64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_msub64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); } -static inline void -gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - TCGv r3) +static void gen_msubu64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, TCGv_i32 r1, + TCGv_i32 r2_low, TCGv_i32 r2_high, TCGv_i32 r3) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -1289,153 +1302,152 @@ gen_msubu64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, /* calc V bit, only the sub can overflow, if t1 > t2 */ tcg_gen_setcond_i64(TCG_COND_GTU, t1, t1, t2); tcg_gen_extrl_i64_i32(cpu_PSW_V, t1); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); - tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void -gen_msubui64_d(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - int32_t con) +static void gen_msubui64_d(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_msubu64_d(ret_low, ret_high, r1, r2_low, r2_high, temp); } -static inline void gen_addi_d(TCGv ret, TCGv r1, target_ulong r2) +static void gen_addi_d(TCGv_i32 ret, TCGv_i32 r1, int32_t r2) { - TCGv temp = tcg_constant_i32(r2); + TCGv_i32 temp = tcg_constant_i32(r2); gen_add_d(ret, r1, temp); } /* calculate the carry bit too */ -static inline void gen_add_CC(TCGv ret, TCGv r1, TCGv r2) +static void gen_add_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { - TCGv t0 = tcg_temp_new_i32(); - TCGv result = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 result = tcg_temp_new_i32(); - tcg_gen_movi_tl(t0, 0); + tcg_gen_movi_i32(t0, 0); /* Addition and set C/V/SV bits */ tcg_gen_add2_i32(result, cpu_PSW_C, r1, t0, r2, t0); /* calc V bit */ - tcg_gen_xor_tl(cpu_PSW_V, result, r1); - tcg_gen_xor_tl(t0, r1, r2); - tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0); + tcg_gen_xor_i32(cpu_PSW_V, result, r1); + tcg_gen_xor_i32(t0, r1, r2); + tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t0); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, result, result); - tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, result, result); + tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ - tcg_gen_mov_tl(ret, result); + tcg_gen_mov_i32(ret, result); } -static inline void gen_addi_CC(TCGv ret, TCGv r1, int32_t con) +static void gen_addi_CC(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_add_CC(ret, r1, temp); } -static inline void gen_addc_CC(TCGv ret, TCGv r1, TCGv r2) +static void gen_addc_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { - TCGv t0 = tcg_temp_new_i32(); - TCGv result = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 result = tcg_temp_new_i32(); /* Addition, carry and set C/V/SV bits */ tcg_gen_addcio_i32(result, cpu_PSW_C, r1, r2, cpu_PSW_C); /* calc V bit */ - tcg_gen_xor_tl(cpu_PSW_V, result, r1); - tcg_gen_xor_tl(t0, r1, r2); - tcg_gen_andc_tl(cpu_PSW_V, cpu_PSW_V, t0); + tcg_gen_xor_i32(cpu_PSW_V, result, r1); + tcg_gen_xor_i32(t0, r1, r2); + tcg_gen_andc_i32(cpu_PSW_V, cpu_PSW_V, t0); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, result, result); - tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, result, result); + tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ - tcg_gen_mov_tl(ret, result); + tcg_gen_mov_i32(ret, result); } -static inline void gen_addci_CC(TCGv ret, TCGv r1, int32_t con) +static void gen_addci_CC(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_addc_CC(ret, r1, temp); } -static inline void gen_cond_add(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, - TCGv r4) +static void gen_cond_add(TCGCond cond, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, TCGv_i32 r4) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); - TCGv result = tcg_temp_new(); - TCGv mask = tcg_temp_new(); - TCGv t0 = tcg_constant_i32(0); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); + TCGv_i32 result = tcg_temp_new_i32(); + TCGv_i32 mask = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_constant_i32(0); /* create mask for sticky bits */ - tcg_gen_setcond_tl(cond, mask, r4, t0); - tcg_gen_shli_tl(mask, mask, 31); + tcg_gen_setcond_i32(cond, mask, r4, t0); + tcg_gen_shli_i32(mask, mask, 31); - tcg_gen_add_tl(result, r1, r2); + tcg_gen_add_i32(result, r1, r2); /* Calc PSW_V */ - tcg_gen_xor_tl(temp, result, r1); - tcg_gen_xor_tl(temp2, r1, r2); - tcg_gen_andc_tl(temp, temp, temp2); - tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V); + tcg_gen_xor_i32(temp, result, r1); + tcg_gen_xor_i32(temp2, r1, r2); + tcg_gen_andc_i32(temp, temp, temp2); + tcg_gen_movcond_i32(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V); /* Set PSW_SV */ - tcg_gen_and_tl(temp, temp, mask); - tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV); + tcg_gen_and_i32(temp, temp, mask); + tcg_gen_or_i32(cpu_PSW_SV, temp, cpu_PSW_SV); /* calc AV bit */ - tcg_gen_add_tl(temp, result, result); - tcg_gen_xor_tl(temp, temp, result); - tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV); + tcg_gen_add_i32(temp, result, result); + tcg_gen_xor_i32(temp, temp, result); + tcg_gen_movcond_i32(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV); /* calc SAV bit */ - tcg_gen_and_tl(temp, temp, mask); - tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV); + tcg_gen_and_i32(temp, temp, mask); + tcg_gen_or_i32(cpu_PSW_SAV, temp, cpu_PSW_SAV); /* write back result */ - tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1); + tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1); } -static inline void gen_condi_add(TCGCond cond, TCGv r1, int32_t r2, - TCGv r3, TCGv r4) +static void gen_condi_add(TCGCond cond, + TCGv_i32 r1, int32_t r2, TCGv_i32 r3, TCGv_i32 r4) { - TCGv temp = tcg_constant_i32(r2); + TCGv_i32 temp = tcg_constant_i32(r2); gen_cond_add(cond, r1, temp, r3, r4); } -static inline void gen_sub_d(TCGv ret, TCGv r1, TCGv r2) +static void gen_sub_d(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { - TCGv temp = tcg_temp_new_i32(); - TCGv result = tcg_temp_new_i32(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 result = tcg_temp_new_i32(); - tcg_gen_sub_tl(result, r1, r2); + tcg_gen_sub_i32(result, r1, r2); /* calc V bit */ - tcg_gen_xor_tl(cpu_PSW_V, result, r1); - tcg_gen_xor_tl(temp, r1, r2); - tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_xor_i32(cpu_PSW_V, result, r1); + tcg_gen_xor_i32(temp, r1, r2); + tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV bit */ - tcg_gen_add_tl(cpu_PSW_AV, result, result); - tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, result, result); + tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ - tcg_gen_mov_tl(ret, result); + tcg_gen_mov_i32(ret, result); } -static inline void -gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) +static void gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) { - TCGv temp = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 result = tcg_temp_new_i64(); @@ -1447,87 +1459,88 @@ gen_sub64_d(TCGv_i64 ret, TCGv_i64 r1, TCGv_i64 r2) tcg_gen_and_i64(t1, t1, t0); tcg_gen_extrh_i64_i32(cpu_PSW_V, t1); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* calc AV/SAV bits */ tcg_gen_extrh_i64_i32(temp, result); - tcg_gen_add_tl(cpu_PSW_AV, temp, temp); - tcg_gen_xor_tl(cpu_PSW_AV, temp, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, temp, temp); + tcg_gen_xor_i32(cpu_PSW_AV, temp, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ tcg_gen_mov_i64(ret, result); } -static inline void gen_sub_CC(TCGv ret, TCGv r1, TCGv r2) +static void gen_sub_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { - TCGv result = tcg_temp_new(); - TCGv temp = tcg_temp_new(); + TCGv_i32 result = tcg_temp_new_i32(); + TCGv_i32 temp = tcg_temp_new_i32(); - tcg_gen_sub_tl(result, r1, r2); + tcg_gen_sub_i32(result, r1, r2); /* calc C bit */ - tcg_gen_setcond_tl(TCG_COND_GEU, cpu_PSW_C, r1, r2); + tcg_gen_setcond_i32(TCG_COND_GEU, cpu_PSW_C, r1, r2); /* calc V bit */ - tcg_gen_xor_tl(cpu_PSW_V, result, r1); - tcg_gen_xor_tl(temp, r1, r2); - tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_xor_i32(cpu_PSW_V, result, r1); + tcg_gen_xor_i32(temp, r1, r2); + tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV bit */ - tcg_gen_add_tl(cpu_PSW_AV, result, result); - tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, result, result); + tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ - tcg_gen_mov_tl(ret, result); + tcg_gen_mov_i32(ret, result); } -static inline void gen_subc_CC(TCGv ret, TCGv r1, TCGv r2) +static void gen_subc_CC(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { - TCGv temp = tcg_temp_new(); - tcg_gen_not_tl(temp, r2); + TCGv_i32 temp = tcg_temp_new_i32(); + tcg_gen_not_i32(temp, r2); gen_addc_CC(ret, r1, temp); } -static inline void gen_cond_sub(TCGCond cond, TCGv r1, TCGv r2, TCGv r3, - TCGv r4) +static void gen_cond_sub(TCGCond cond, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, TCGv_i32 r4) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); - TCGv result = tcg_temp_new(); - TCGv mask = tcg_temp_new(); - TCGv t0 = tcg_constant_i32(0); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); + TCGv_i32 result = tcg_temp_new_i32(); + TCGv_i32 mask = tcg_temp_new_i32(); + TCGv_i32 t0 = tcg_constant_i32(0); /* create mask for sticky bits */ - tcg_gen_setcond_tl(cond, mask, r4, t0); - tcg_gen_shli_tl(mask, mask, 31); + tcg_gen_setcond_i32(cond, mask, r4, t0); + tcg_gen_shli_i32(mask, mask, 31); - tcg_gen_sub_tl(result, r1, r2); + tcg_gen_sub_i32(result, r1, r2); /* Calc PSW_V */ - tcg_gen_xor_tl(temp, result, r1); - tcg_gen_xor_tl(temp2, r1, r2); - tcg_gen_and_tl(temp, temp, temp2); - tcg_gen_movcond_tl(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V); + tcg_gen_xor_i32(temp, result, r1); + tcg_gen_xor_i32(temp2, r1, r2); + tcg_gen_and_i32(temp, temp, temp2); + tcg_gen_movcond_i32(cond, cpu_PSW_V, r4, t0, temp, cpu_PSW_V); /* Set PSW_SV */ - tcg_gen_and_tl(temp, temp, mask); - tcg_gen_or_tl(cpu_PSW_SV, temp, cpu_PSW_SV); + tcg_gen_and_i32(temp, temp, mask); + tcg_gen_or_i32(cpu_PSW_SV, temp, cpu_PSW_SV); /* calc AV bit */ - tcg_gen_add_tl(temp, result, result); - tcg_gen_xor_tl(temp, temp, result); - tcg_gen_movcond_tl(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV); + tcg_gen_add_i32(temp, result, result); + tcg_gen_xor_i32(temp, temp, result); + tcg_gen_movcond_i32(cond, cpu_PSW_AV, r4, t0, temp, cpu_PSW_AV); /* calc SAV bit */ - tcg_gen_and_tl(temp, temp, mask); - tcg_gen_or_tl(cpu_PSW_SAV, temp, cpu_PSW_SAV); + tcg_gen_and_i32(temp, temp, mask); + tcg_gen_or_i32(cpu_PSW_SAV, temp, cpu_PSW_SAV); /* write back result */ - tcg_gen_movcond_tl(cond, r3, r4, t0, result, r1); + tcg_gen_movcond_i32(cond, r3, r4, t0, result, r1); } -static inline void -gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_msub_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -1548,14 +1561,15 @@ gen_msub_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_sub_tl, tcg_gen_sub_tl); } -static inline void -gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubs_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); - TCGv temp3 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); + TCGv_i32 temp3 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { @@ -1574,20 +1588,21 @@ gen_msubs_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, } tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_subs(ret_low, r1_low, temp); - tcg_gen_mov_tl(temp, cpu_PSW_V); - tcg_gen_mov_tl(temp3, cpu_PSW_AV); + tcg_gen_mov_i32(temp, cpu_PSW_V); + tcg_gen_mov_i32(temp3, cpu_PSW_AV); gen_subs(ret_high, r1_high, temp2); /* combine v bits */ - tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ - tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); + tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3); } -static inline void -gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubm_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); TCGv_i64 temp64_3 = tcg_temp_new_i64(); @@ -1611,11 +1626,12 @@ gen_msubm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_3); } -static inline void -gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubms_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); switch (mode) { @@ -1637,11 +1653,12 @@ gen_msubms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); } -static inline void -gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, - uint32_t mode) +static void gen_msubr64_h(TCGv_i32 ret, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -1660,22 +1677,24 @@ gen_msubr64_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, uint32_t n, gen_helper_subr_h(ret, tcg_env, temp64, r1_low, r1_high); } -static inline void -gen_msubr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubr32_h(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); - tcg_gen_andi_tl(temp2, r1, 0xffff0000); - tcg_gen_shli_tl(temp, r1, 16); + tcg_gen_andi_i32(temp2, r1, 0xffff0000); + tcg_gen_shli_i32(temp, r1, 16); gen_msubr64_h(ret, temp, temp2, r2, r3, n, mode); } -static inline void -gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, - uint32_t n, uint32_t mode) +static void gen_msubr64s_h(TCGv_i32 ret, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -1694,36 +1713,37 @@ gen_msubr64s_h(TCGv ret, TCGv r1_low, TCGv r1_high, TCGv r2, TCGv r3, gen_helper_subr_h_ssov(ret, tcg_env, temp64, r1_low, r1_high); } -static inline void -gen_msubr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubr32s_h(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); - tcg_gen_andi_tl(temp2, r1, 0xffff0000); - tcg_gen_shli_tl(temp, r1, 16); + tcg_gen_andi_i32(temp2, r1, 0xffff0000); + tcg_gen_shli_i32(temp, r1, 16); gen_msubr64s_h(ret, temp, temp2, r2, r3, n, mode); } -static inline void -gen_msubr_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +static void gen_msubr_q(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, uint32_t n) { - TCGv temp = tcg_constant_i32(n); + TCGv_i32 temp = tcg_constant_i32(n); gen_helper_msubr_q(ret, tcg_env, r1, r2, r3, temp); } -static inline void -gen_msubrs_q(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n) +static void gen_msubrs_q(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, uint32_t n) { - TCGv temp = tcg_constant_i32(n); + TCGv_i32 temp = tcg_constant_i32(n); gen_helper_msubr_q_ssov(ret, tcg_env, r1, r2, r3, temp); } -static inline void -gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, - uint32_t up_shift) +static void gen_msub32_q(TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3, + uint32_t n, uint32_t up_shift) { - TCGv temp3 = tcg_temp_new(); + TCGv_i32 temp3 = tcg_temp_new_i32(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); @@ -1748,70 +1768,72 @@ gen_msub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, tcg_gen_setcondi_i64(TCG_COND_LT, t2, t3, -0x80000000LL); tcg_gen_or_i64(t1, t1, t2); tcg_gen_extrl_i64_i32(cpu_PSW_V, t1); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, temp3, temp3); - tcg_gen_xor_tl(cpu_PSW_AV, temp3, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, temp3, temp3); + tcg_gen_xor_i32(cpu_PSW_AV, temp3, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ - tcg_gen_mov_tl(ret, temp3); + tcg_gen_mov_i32(ret, temp3); } -static inline void -gen_m16sub32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +static void gen_m16sub32_q(TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3, + uint32_t n) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); if (n == 0) { - tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_mul_i32(temp, arg2, arg3); } else { /* n is expected to be 1 */ - tcg_gen_mul_tl(temp, arg2, arg3); - tcg_gen_shli_tl(temp, temp, 1); + tcg_gen_mul_i32(temp, arg2, arg3); + tcg_gen_shli_i32(temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); - tcg_gen_sub_tl(temp, temp, temp2); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_i32(temp, temp, temp2); } gen_sub_d(ret, arg1, temp); } -static inline void -gen_m16subs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n) +static void gen_m16subs32_q(TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3, + uint32_t n) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); if (n == 0) { - tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_mul_i32(temp, arg2, arg3); } else { /* n is expected to be 1 */ - tcg_gen_mul_tl(temp, arg2, arg3); - tcg_gen_shli_tl(temp, temp, 1); + tcg_gen_mul_i32(temp, arg2, arg3); + tcg_gen_shli_i32(temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); - tcg_gen_sub_tl(temp, temp, temp2); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_i32(temp, temp, temp2); } gen_subs(ret, arg1, temp); } -static inline void -gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, - TCGv arg3, uint32_t n) +static void gen_m16sub64_q(TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1_low, TCGv_i32 arg1_high, + TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); if (n == 0) { - tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_mul_i32(temp, arg2, arg3); } else { /* n is expected to be 1 */ - tcg_gen_mul_tl(temp, arg2, arg3); - tcg_gen_shli_tl(temp, temp, 1); + tcg_gen_mul_i32(temp, arg2, arg3); + tcg_gen_shli_i32(temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); - tcg_gen_sub_tl(temp, temp, temp2); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_i32(temp, temp, temp2); } tcg_gen_ext_i32_i64(t2, temp); tcg_gen_shli_i64(t2, t2, 16); @@ -1821,23 +1843,23 @@ gen_m16sub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_extr_i64_i32(rl, rh, t3); } -static inline void -gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, - TCGv arg3, uint32_t n) +static void gen_m16subs64_q(TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1_low, TCGv_i32 arg1_high, + TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); if (n == 0) { - tcg_gen_mul_tl(temp, arg2, arg3); + tcg_gen_mul_i32(temp, arg2, arg3); } else { /* n is expected to be 1 */ - tcg_gen_mul_tl(temp, arg2, arg3); - tcg_gen_shli_tl(temp, temp, 1); + tcg_gen_mul_i32(temp, arg2, arg3); + tcg_gen_shli_i32(temp, temp, 1); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, temp, 0x80000000); - tcg_gen_sub_tl(temp, temp, temp2); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, temp, 0x80000000); + tcg_gen_sub_i32(temp, temp, temp2); } tcg_gen_ext_i32_i64(t2, temp); tcg_gen_shli_i64(t2, t2, 16); @@ -1847,15 +1869,15 @@ gen_m16subs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, tcg_gen_extr_i64_i32(rl, rh, t1); } -static inline void -gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, - TCGv arg3, uint32_t n) +static void gen_msub64_q(TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1_low, TCGv_i32 arg1_high, + TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); TCGv_i64 t3 = tcg_temp_new_i64(); TCGv_i64 t4 = tcg_temp_new_i64(); - TCGv temp, temp2; + TCGv_i32 temp, temp2; tcg_gen_concat_i32_i64(t1, arg1_low, arg1_high); tcg_gen_ext_i32_i64(t2, arg2); @@ -1875,29 +1897,29 @@ gen_msub64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, (0x80000000 * 0x80000000) << 1). If this is the case, we negate the ovf. */ if (n == 1) { - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); - tcg_gen_setcondi_tl(TCG_COND_EQ, temp, arg2, 0x80000000); - tcg_gen_setcond_tl(TCG_COND_EQ, temp2, arg2, arg3); - tcg_gen_and_tl(temp, temp, temp2); - tcg_gen_shli_tl(temp, temp, 31); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp, arg2, 0x80000000); + tcg_gen_setcond_i32(TCG_COND_EQ, temp2, arg2, arg3); + tcg_gen_and_i32(temp, temp, temp2); + tcg_gen_shli_i32(temp, temp, 31); /* negate v bit, if special condition */ - tcg_gen_xor_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_xor_i32(cpu_PSW_V, cpu_PSW_V, temp); } /* write back result */ tcg_gen_extr_i64_i32(rl, rh, t4); /* Calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV/SAV bits */ - tcg_gen_add_tl(cpu_PSW_AV, rh, rh); - tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, rh, rh); + tcg_gen_xor_i32(cpu_PSW_AV, rh, cpu_PSW_AV); /* calc SAV */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void -gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, - uint32_t up_shift) +static void gen_msubs32_q(TCGv_i32 ret, + TCGv_i32 arg1, TCGv_i32 arg2, TCGv_i32 arg3, + uint32_t n, uint32_t up_shift) { TCGv_i64 t1 = tcg_temp_new_i64(); TCGv_i64 t2 = tcg_temp_new_i64(); @@ -1918,25 +1940,26 @@ gen_msubs32_q(TCGv ret, TCGv arg1, TCGv arg2, TCGv arg3, uint32_t n, gen_helper_msub32_q_sub_ssov(ret, tcg_env, t1, t3); } -static inline void -gen_msubs64_q(TCGv rl, TCGv rh, TCGv arg1_low, TCGv arg1_high, TCGv arg2, - TCGv arg3, uint32_t n) +static void gen_msubs64_q(TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 arg1_low, TCGv_i32 arg1_high, + TCGv_i32 arg2, TCGv_i32 arg3, uint32_t n) { TCGv_i64 r1 = tcg_temp_new_i64(); - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); tcg_gen_concat_i32_i64(r1, arg1_low, arg1_high); gen_helper_msub64_q_ssov(r1, tcg_env, r1, arg2, arg3, t_n); tcg_gen_extr_i64_i32(rl, rh, r1); } -static inline void -gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubad_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -1957,11 +1980,12 @@ gen_msubad_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_add_tl, tcg_gen_sub_tl); } -static inline void -gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubadm_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); TCGv_i64 temp64_3 = tcg_temp_new_i64(); @@ -1990,12 +2014,13 @@ gen_msubadm_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64_2); } -static inline void -gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubadr32_h(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -2011,19 +2036,20 @@ gen_msubadr32_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } - tcg_gen_andi_tl(temp2, r1, 0xffff0000); - tcg_gen_shli_tl(temp, r1, 16); + tcg_gen_andi_i32(temp2, r1, 0xffff0000); + tcg_gen_shli_i32(temp, r1, 16); gen_helper_subadr_h(ret, tcg_env, temp64, temp, temp2); } -static inline void -gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubads_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); - TCGv temp3 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); + TCGv_i32 temp3 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { @@ -2042,20 +2068,21 @@ gen_msubads_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, } tcg_gen_extr_i64_i32(temp, temp2, temp64); gen_adds(ret_low, r1_low, temp); - tcg_gen_mov_tl(temp, cpu_PSW_V); - tcg_gen_mov_tl(temp3, cpu_PSW_AV); + tcg_gen_mov_i32(temp, cpu_PSW_V); + tcg_gen_mov_i32(temp3, cpu_PSW_AV); gen_subs(ret_high, r1_high, temp2); /* combine v bits */ - tcg_gen_or_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_or_i32(cpu_PSW_V, cpu_PSW_V, temp); /* combine av bits */ - tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp3); + tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp3); } -static inline void -gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, - TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubadms_h(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1_low, TCGv_i32 r1_high, + TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); + TCGv_i32 t_n = tcg_constant_i32(n); TCGv_i64 temp64 = tcg_temp_new_i64(); TCGv_i64 temp64_2 = tcg_temp_new_i64(); @@ -2083,12 +2110,13 @@ gen_msubadms_h(TCGv ret_low, TCGv ret_high, TCGv r1_low, TCGv r1_high, TCGv r2, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); } -static inline void -gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) +static void gen_msubadr32s_h(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 r3, + uint32_t n, uint32_t mode) { - TCGv t_n = tcg_constant_i32(n); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 t_n = tcg_constant_i32(n); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); TCGv_i64 temp64 = tcg_temp_new_i64(); switch (mode) { case MODE_LL: @@ -2104,166 +2132,168 @@ gen_msubadr32s_h(TCGv ret, TCGv r1, TCGv r2, TCGv r3, uint32_t n, uint32_t mode) GEN_HELPER_UU(mul_h, temp64, r2, r3, t_n); break; } - tcg_gen_andi_tl(temp2, r1, 0xffff0000); - tcg_gen_shli_tl(temp, r1, 16); + tcg_gen_andi_i32(temp2, r1, 0xffff0000); + tcg_gen_shli_i32(temp, r1, 16); gen_helper_subadr_h_ssov(ret, tcg_env, temp64, temp, temp2); } -static inline void gen_abs(TCGv ret, TCGv r1) +static void gen_abs(TCGv_i32 ret, TCGv_i32 r1) { - tcg_gen_abs_tl(ret, r1); + tcg_gen_abs_i32(ret, r1); /* overflow can only happen, if r1 = 0x80000000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, r1, 0x80000000); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV bit */ - tcg_gen_add_tl(cpu_PSW_AV, ret, ret); - tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret, ret); + tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void gen_absdif(TCGv ret, TCGv r1, TCGv r2) +static void gen_absdif(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { - TCGv temp = tcg_temp_new_i32(); - TCGv result = tcg_temp_new_i32(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 result = tcg_temp_new_i32(); - tcg_gen_sub_tl(result, r1, r2); - tcg_gen_sub_tl(temp, r2, r1); - tcg_gen_movcond_tl(TCG_COND_GT, result, r1, r2, result, temp); + tcg_gen_sub_i32(result, r1, r2); + tcg_gen_sub_i32(temp, r2, r1); + tcg_gen_movcond_i32(TCG_COND_GT, result, r1, r2, result, temp); /* calc V bit */ - tcg_gen_xor_tl(cpu_PSW_V, result, r1); - tcg_gen_xor_tl(temp, result, r2); - tcg_gen_movcond_tl(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp); - tcg_gen_xor_tl(temp, r1, r2); - tcg_gen_and_tl(cpu_PSW_V, cpu_PSW_V, temp); + tcg_gen_xor_i32(cpu_PSW_V, result, r1); + tcg_gen_xor_i32(temp, result, r2); + tcg_gen_movcond_i32(TCG_COND_GT, cpu_PSW_V, r1, r2, cpu_PSW_V, temp); + tcg_gen_xor_i32(temp, r1, r2); + tcg_gen_and_i32(cpu_PSW_V, cpu_PSW_V, temp); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV bit */ - tcg_gen_add_tl(cpu_PSW_AV, result, result); - tcg_gen_xor_tl(cpu_PSW_AV, result, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, result, result); + tcg_gen_xor_i32(cpu_PSW_AV, result, cpu_PSW_AV); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* write back result */ - tcg_gen_mov_tl(ret, result); + tcg_gen_mov_i32(ret, result); } -static inline void gen_absdifi(TCGv ret, TCGv r1, int32_t con) +static void gen_absdifi(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_absdif(ret, r1, temp); } -static inline void gen_absdifsi(TCGv ret, TCGv r1, int32_t con) +static void gen_absdifsi(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_helper_absdif_ssov(ret, tcg_env, r1, temp); } -static inline void gen_mul_i32s(TCGv ret, TCGv r1, TCGv r2) +static void gen_mul_i32s(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { - TCGv high = tcg_temp_new(); - TCGv low = tcg_temp_new(); + TCGv_i32 high = tcg_temp_new_i32(); + TCGv_i32 low = tcg_temp_new_i32(); - tcg_gen_muls2_tl(low, high, r1, r2); - tcg_gen_mov_tl(ret, low); + tcg_gen_muls2_i32(low, high, r1, r2); + tcg_gen_mov_i32(ret, low); /* calc V bit */ - tcg_gen_sari_tl(low, low, 31); - tcg_gen_setcond_tl(TCG_COND_NE, cpu_PSW_V, high, low); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_sari_i32(low, low, 31); + tcg_gen_setcond_i32(TCG_COND_NE, cpu_PSW_V, high, low); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV bit */ - tcg_gen_add_tl(cpu_PSW_AV, ret, ret); - tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret, ret); + tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void gen_muli_i32s(TCGv ret, TCGv r1, int32_t con) +static void gen_muli_i32s(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_mul_i32s(ret, r1, temp); } -static inline void gen_mul_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) +static void gen_mul_i64s(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2) { - tcg_gen_muls2_tl(ret_low, ret_high, r1, r2); + tcg_gen_muls2_i32(ret_low, ret_high, r1, r2); /* clear V bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV bit */ - tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); - tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void gen_muli_i64s(TCGv ret_low, TCGv ret_high, TCGv r1, - int32_t con) +static void gen_muli_i64s(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_mul_i64s(ret_low, ret_high, r1, temp); } -static inline void gen_mul_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2) +static void gen_mul_i64u(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2) { - tcg_gen_mulu2_tl(ret_low, ret_high, r1, r2); + tcg_gen_mulu2_i32(ret_low, ret_high, r1, r2); /* clear V bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* Calc AV bit */ - tcg_gen_add_tl(cpu_PSW_AV, ret_high, ret_high); - tcg_gen_xor_tl(cpu_PSW_AV, ret_high, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret_high, ret_high); + tcg_gen_xor_i32(cpu_PSW_AV, ret_high, cpu_PSW_AV); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static inline void gen_muli_i64u(TCGv ret_low, TCGv ret_high, TCGv r1, - int32_t con) +static void gen_muli_i64u(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_mul_i64u(ret_low, ret_high, r1, temp); } -static inline void gen_mulsi_i32(TCGv ret, TCGv r1, int32_t con) +static void gen_mulsi_i32(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_helper_mul_ssov(ret, tcg_env, r1, temp); } -static inline void gen_mulsui_i32(TCGv ret, TCGv r1, int32_t con) +static void gen_mulsui_i32(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_helper_mul_suov(ret, tcg_env, r1, temp); } /* gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); */ -static inline void gen_maddsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) +static void gen_maddsi_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_helper_madd32_ssov(ret, tcg_env, r1, r2, temp); } -static inline void gen_maddsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) +static void gen_maddsui_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_helper_madd32_suov(ret, tcg_env, r1, r2, temp); } -static void -gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) +static void gen_mul_q(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2, + uint32_t n, uint32_t up_shift) { TCGv_i64 temp_64 = tcg_temp_new_i64(); TCGv_i64 temp2_64 = tcg_temp_new_i64(); if (n == 0) { if (up_shift == 32) { - tcg_gen_muls2_tl(rh, rl, arg1, arg2); + tcg_gen_muls2_i32(rh, rl, arg1, arg2); } else if (up_shift == 16) { tcg_gen_ext_i32_i64(temp_64, arg1); tcg_gen_ext_i32_i64(temp2_64, arg2); @@ -2272,10 +2302,10 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) tcg_gen_shri_i64(temp_64, temp_64, up_shift); tcg_gen_extr_i64_i32(rl, rh, temp_64); } else { - tcg_gen_muls2_tl(rl, rh, arg1, arg2); + tcg_gen_muls2_i32(rl, rh, arg1, arg2); } /* reset v bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); } else { /* n is expected to be 1 */ tcg_gen_ext_i32_i64(temp_64, arg1); tcg_gen_ext_i32_i64(temp2_64, arg2); @@ -2290,79 +2320,78 @@ gen_mul_q(TCGv rl, TCGv rh, TCGv arg1, TCGv arg2, uint32_t n, uint32_t up_shift) tcg_gen_extr_i64_i32(rl, rh, temp_64); /* overflow only occurs if r1 = r2 = 0x8000 */ if (up_shift == 0) {/* result is 64 bit */ - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rh, + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, rh, 0x80000000); } else { /* result is 32 bit */ - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, rl, + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, rl, 0x80000000); } - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* calc sv overflow bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); } /* calc av overflow bit */ if (up_shift == 0) { - tcg_gen_add_tl(cpu_PSW_AV, rh, rh); - tcg_gen_xor_tl(cpu_PSW_AV, rh, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, rh, rh); + tcg_gen_xor_i32(cpu_PSW_AV, rh, cpu_PSW_AV); } else { - tcg_gen_add_tl(cpu_PSW_AV, rl, rl); - tcg_gen_xor_tl(cpu_PSW_AV, rl, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, rl, rl); + tcg_gen_xor_i32(cpu_PSW_AV, rl, cpu_PSW_AV); } /* calc sav overflow bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static void -gen_mul_q_16(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) +static void gen_mul_q_16(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, uint32_t n) { - TCGv temp = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); if (n == 0) { - tcg_gen_mul_tl(ret, arg1, arg2); + tcg_gen_mul_i32(ret, arg1, arg2); } else { /* n is expected to be 1 */ - tcg_gen_mul_tl(ret, arg1, arg2); - tcg_gen_shli_tl(ret, ret, 1); + tcg_gen_mul_i32(ret, arg1, arg2); + tcg_gen_shli_i32(ret, ret, 1); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80000000); - tcg_gen_sub_tl(ret, ret, temp); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp, ret, 0x80000000); + tcg_gen_sub_i32(ret, ret, temp); } /* reset v bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); /* calc av overflow bit */ - tcg_gen_add_tl(cpu_PSW_AV, ret, ret); - tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret, ret); + tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc sav overflow bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static void gen_mulr_q(TCGv ret, TCGv arg1, TCGv arg2, uint32_t n) +static void gen_mulr_q(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, uint32_t n) { - TCGv temp = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); if (n == 0) { - tcg_gen_mul_tl(ret, arg1, arg2); - tcg_gen_addi_tl(ret, ret, 0x8000); + tcg_gen_mul_i32(ret, arg1, arg2); + tcg_gen_addi_i32(ret, ret, 0x8000); } else { - tcg_gen_mul_tl(ret, arg1, arg2); - tcg_gen_shli_tl(ret, ret, 1); - tcg_gen_addi_tl(ret, ret, 0x8000); + tcg_gen_mul_i32(ret, arg1, arg2); + tcg_gen_shli_i32(ret, ret, 1); + tcg_gen_addi_i32(ret, ret, 0x8000); /* catch special case r1 = r2 = 0x8000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp, ret, 0x80008000); - tcg_gen_muli_tl(temp, temp, 0x8001); - tcg_gen_sub_tl(ret, ret, temp); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp, ret, 0x80008000); + tcg_gen_muli_i32(temp, temp, 0x8001); + tcg_gen_sub_i32(ret, ret, temp); } /* reset v bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); /* calc av overflow bit */ - tcg_gen_add_tl(cpu_PSW_AV, ret, ret); - tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret, ret); + tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc sav overflow bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* cut halfword off */ - tcg_gen_andi_tl(ret, ret, 0xffff0000); + tcg_gen_andi_i32(ret, ret, 0xffff0000); } -static inline void -gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - TCGv r3) +static void gen_madds_64(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + TCGv_i32 r3) { TCGv_i64 temp64 = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); @@ -2370,17 +2399,17 @@ gen_madds_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); } -static inline void -gen_maddsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - int32_t con) +static void gen_maddsi_64(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_madds_64(ret_low, ret_high, r1, r2_low, r2_high, temp); } -static inline void -gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - TCGv r3) +static void gen_maddsu_64(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + TCGv_i32 r3) { TCGv_i64 temp64 = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); @@ -2388,29 +2417,29 @@ gen_maddsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); } -static inline void -gen_maddsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - int32_t con) +static void gen_maddsui_64(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, + TCGv_i32 r2_high, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_maddsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); } -static inline void gen_msubsi_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) +static void gen_msubsi_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_helper_msub32_ssov(ret, tcg_env, r1, r2, temp); } -static inline void gen_msubsui_32(TCGv ret, TCGv r1, TCGv r2, int32_t con) +static void gen_msubsui_32(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_helper_msub32_suov(ret, tcg_env, r1, r2, temp); } -static inline void -gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - TCGv r3) +static void gen_msubs_64(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + TCGv_i32 r3) { TCGv_i64 temp64 = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); @@ -2418,17 +2447,17 @@ gen_msubs_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); } -static inline void -gen_msubsi_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - int32_t con) +static void gen_msubsi_64(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_msubs_64(ret_low, ret_high, r1, r2_low, r2_high, temp); } -static inline void -gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - TCGv r3) +static void gen_msubsu_64(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + TCGv_i32 r3) { TCGv_i64 temp64 = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(temp64, r2_low, r2_high); @@ -2436,310 +2465,312 @@ gen_msubsu_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, tcg_gen_extr_i64_i32(ret_low, ret_high, temp64); } -static inline void -gen_msubsui_64(TCGv ret_low, TCGv ret_high, TCGv r1, TCGv r2_low, TCGv r2_high, - int32_t con) +static void gen_msubsui_64(TCGv_i32 ret_low, TCGv_i32 ret_high, + TCGv_i32 r1, TCGv_i32 r2_low, TCGv_i32 r2_high, + int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_msubsu_64(ret_low, ret_high, r1, r2_low, r2_high, temp); } -static void gen_saturate(TCGv ret, TCGv arg, int32_t up, int32_t low) +static void gen_saturate(TCGv_i32 ret, TCGv_i32 arg, int32_t up, int32_t low) { - tcg_gen_smax_tl(ret, arg, tcg_constant_i32(low)); - tcg_gen_smin_tl(ret, ret, tcg_constant_i32(up)); + tcg_gen_smax_i32(ret, arg, tcg_constant_i32(low)); + tcg_gen_smin_i32(ret, ret, tcg_constant_i32(up)); } -static void gen_saturate_u(TCGv ret, TCGv arg, int32_t up) +static void gen_saturate_u(TCGv_i32 ret, TCGv_i32 arg, int32_t up) { - tcg_gen_umin_tl(ret, arg, tcg_constant_i32(up)); + tcg_gen_umin_i32(ret, arg, tcg_constant_i32(up)); } -static void gen_shi(TCGv ret, TCGv r1, int32_t shift_count) +static void gen_shi(TCGv_i32 ret, TCGv_i32 r1, int32_t shift_count) { if (shift_count == -32) { - tcg_gen_movi_tl(ret, 0); + tcg_gen_movi_i32(ret, 0); } else if (shift_count >= 0) { - tcg_gen_shli_tl(ret, r1, shift_count); + tcg_gen_shli_i32(ret, r1, shift_count); } else { - tcg_gen_shri_tl(ret, r1, -shift_count); + tcg_gen_shri_i32(ret, r1, -shift_count); } } -static void gen_sh_hi(TCGv ret, TCGv r1, int32_t shiftcount) +static void gen_sh_hi(TCGv_i32 ret, TCGv_i32 r1, int32_t shiftcount) { - TCGv temp_low, temp_high; + TCGv_i32 temp_low, temp_high; if (shiftcount == -16) { - tcg_gen_movi_tl(ret, 0); + tcg_gen_movi_i32(ret, 0); } else { - temp_high = tcg_temp_new(); - temp_low = tcg_temp_new(); + temp_high = tcg_temp_new_i32(); + temp_low = tcg_temp_new_i32(); - tcg_gen_andi_tl(temp_low, r1, 0xffff); - tcg_gen_andi_tl(temp_high, r1, 0xffff0000); + tcg_gen_andi_i32(temp_low, r1, 0xffff); + tcg_gen_andi_i32(temp_high, r1, 0xffff0000); gen_shi(temp_low, temp_low, shiftcount); gen_shi(ret, temp_high, shiftcount); - tcg_gen_deposit_tl(ret, ret, temp_low, 0, 16); + tcg_gen_deposit_i32(ret, ret, temp_low, 0, 16); } } -static void gen_shaci(TCGv ret, TCGv r1, int32_t shift_count) +static void gen_shaci(TCGv_i32 ret, TCGv_i32 r1, int32_t shift_count) { uint32_t msk, msk_start; - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); if (shift_count == 0) { /* Clear PSW.C and PSW.V */ - tcg_gen_movi_tl(cpu_PSW_C, 0); - tcg_gen_mov_tl(cpu_PSW_V, cpu_PSW_C); - tcg_gen_mov_tl(ret, r1); + tcg_gen_movi_i32(cpu_PSW_C, 0); + tcg_gen_mov_i32(cpu_PSW_V, cpu_PSW_C); + tcg_gen_mov_i32(ret, r1); } else if (shift_count == -32) { /* set PSW.C */ - tcg_gen_mov_tl(cpu_PSW_C, r1); + tcg_gen_mov_i32(cpu_PSW_C, r1); /* fill ret completely with sign bit */ - tcg_gen_sari_tl(ret, r1, 31); + tcg_gen_sari_i32(ret, r1, 31); /* clear PSW.V */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); } else if (shift_count > 0) { - TCGv t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count); - TCGv t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count); + TCGv_i32 t_max = tcg_constant_i32(0x7FFFFFFF >> shift_count); + TCGv_i32 t_min = tcg_constant_i32(((int32_t) -0x80000000) >> shift_count); /* calc carry */ msk_start = 32 - shift_count; msk = ((1 << shift_count) - 1) << msk_start; - tcg_gen_andi_tl(cpu_PSW_C, r1, msk); + tcg_gen_andi_i32(cpu_PSW_C, r1, msk); /* calc v/sv bits */ - tcg_gen_setcond_tl(TCG_COND_GT, temp, r1, t_max); - tcg_gen_setcond_tl(TCG_COND_LT, temp2, r1, t_min); - tcg_gen_or_tl(cpu_PSW_V, temp, temp2); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_setcond_i32(TCG_COND_GT, temp, r1, t_max); + tcg_gen_setcond_i32(TCG_COND_LT, temp2, r1, t_min); + tcg_gen_or_i32(cpu_PSW_V, temp, temp2); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* calc sv */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_V, cpu_PSW_SV); /* do shift */ - tcg_gen_shli_tl(ret, r1, shift_count); + tcg_gen_shli_i32(ret, r1, shift_count); } else { /* clear PSW.V */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); /* calc carry */ msk = (1 << -shift_count) - 1; - tcg_gen_andi_tl(cpu_PSW_C, r1, msk); + tcg_gen_andi_i32(cpu_PSW_C, r1, msk); /* do shift */ - tcg_gen_sari_tl(ret, r1, -shift_count); + tcg_gen_sari_i32(ret, r1, -shift_count); } /* calc av overflow bit */ - tcg_gen_add_tl(cpu_PSW_AV, ret, ret); - tcg_gen_xor_tl(cpu_PSW_AV, ret, cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, ret, ret); + tcg_gen_xor_i32(cpu_PSW_AV, ret, cpu_PSW_AV); /* calc sav overflow bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); } -static void gen_shas(TCGv ret, TCGv r1, TCGv r2) +static void gen_shas(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { gen_helper_sha_ssov(ret, tcg_env, r1, r2); } -static void gen_shasi(TCGv ret, TCGv r1, int32_t con) +static void gen_shasi(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_shas(ret, r1, temp); } -static void gen_sha_hi(TCGv ret, TCGv r1, int32_t shift_count) +static void gen_sha_hi(TCGv_i32 ret, TCGv_i32 r1, int32_t shift_count) { - TCGv low, high; + TCGv_i32 low, high; if (shift_count == 0) { - tcg_gen_mov_tl(ret, r1); + tcg_gen_mov_i32(ret, r1); } else if (shift_count > 0) { - low = tcg_temp_new(); - high = tcg_temp_new(); + low = tcg_temp_new_i32(); + high = tcg_temp_new_i32(); - tcg_gen_andi_tl(high, r1, 0xffff0000); - tcg_gen_shli_tl(low, r1, shift_count); - tcg_gen_shli_tl(ret, high, shift_count); - tcg_gen_deposit_tl(ret, ret, low, 0, 16); + tcg_gen_andi_i32(high, r1, 0xffff0000); + tcg_gen_shli_i32(low, r1, shift_count); + tcg_gen_shli_i32(ret, high, shift_count); + tcg_gen_deposit_i32(ret, ret, low, 0, 16); } else { - low = tcg_temp_new(); - high = tcg_temp_new(); + low = tcg_temp_new_i32(); + high = tcg_temp_new_i32(); - tcg_gen_ext16s_tl(low, r1); - tcg_gen_sari_tl(low, low, -shift_count); - tcg_gen_sari_tl(ret, r1, -shift_count); - tcg_gen_deposit_tl(ret, ret, low, 0, 16); + tcg_gen_ext16s_i32(low, r1); + tcg_gen_sari_i32(low, low, -shift_count); + tcg_gen_sari_i32(ret, r1, -shift_count); + tcg_gen_deposit_i32(ret, ret, low, 0, 16); } } /* ret = {ret[30:0], (r1 cond r2)}; */ -static void gen_sh_cond(int cond, TCGv ret, TCGv r1, TCGv r2) +static void gen_sh_cond(int cond, TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); - tcg_gen_shli_tl(temp, ret, 1); - tcg_gen_setcond_tl(cond, temp2, r1, r2); - tcg_gen_or_tl(ret, temp, temp2); + tcg_gen_shli_i32(temp, ret, 1); + tcg_gen_setcond_i32(cond, temp2, r1, r2); + tcg_gen_or_i32(ret, temp, temp2); } -static void gen_sh_condi(int cond, TCGv ret, TCGv r1, int32_t con) +static void gen_sh_condi(int cond, TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_sh_cond(cond, ret, r1, temp); } -static inline void gen_adds(TCGv ret, TCGv r1, TCGv r2) +static void gen_adds(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { gen_helper_add_ssov(ret, tcg_env, r1, r2); } -static inline void gen_addsi(TCGv ret, TCGv r1, int32_t con) +static void gen_addsi(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_helper_add_ssov(ret, tcg_env, r1, temp); } -static inline void gen_addsui(TCGv ret, TCGv r1, int32_t con) +static void gen_addsui(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_helper_add_suov(ret, tcg_env, r1, temp); } -static inline void gen_subs(TCGv ret, TCGv r1, TCGv r2) +static void gen_subs(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { gen_helper_sub_ssov(ret, tcg_env, r1, r2); } -static inline void gen_subsu(TCGv ret, TCGv r1, TCGv r2) +static void gen_subsu(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2) { gen_helper_sub_suov(ret, tcg_env, r1, r2); } -static inline void gen_bit_2op(TCGv ret, TCGv r1, TCGv r2, - int pos1, int pos2, - void(*op1)(TCGv, TCGv, TCGv), - void(*op2)(TCGv, TCGv, TCGv)) +static void gen_bit_2op(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, + int pos1, int pos2, + void(*op1)(TCGv_i32, TCGv_i32, TCGv_i32), + void(*op2)(TCGv_i32, TCGv_i32, TCGv_i32)) { - TCGv temp1, temp2; + TCGv_i32 temp1, temp2; - temp1 = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp1 = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); - tcg_gen_shri_tl(temp2, r2, pos2); - tcg_gen_shri_tl(temp1, r1, pos1); + tcg_gen_shri_i32(temp2, r2, pos2); + tcg_gen_shri_i32(temp1, r1, pos1); (*op1)(temp1, temp1, temp2); (*op2)(temp1 , ret, temp1); - tcg_gen_deposit_tl(ret, ret, temp1, 0, 1); + tcg_gen_deposit_i32(ret, ret, temp1, 0, 1); } /* ret = r1[pos1] op1 r2[pos2]; */ -static inline void gen_bit_1op(TCGv ret, TCGv r1, TCGv r2, - int pos1, int pos2, - void(*op1)(TCGv, TCGv, TCGv)) +static void gen_bit_1op(TCGv_i32 ret, TCGv_i32 r1, TCGv_i32 r2, + int pos1, int pos2, + void(*op1)(TCGv_i32, TCGv_i32, TCGv_i32)) { - TCGv temp1, temp2; + TCGv_i32 temp1, temp2; - temp1 = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp1 = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); - tcg_gen_shri_tl(temp2, r2, pos2); - tcg_gen_shri_tl(temp1, r1, pos1); + tcg_gen_shri_i32(temp2, r2, pos2); + tcg_gen_shri_i32(temp1, r1, pos1); (*op1)(ret, temp1, temp2); - tcg_gen_andi_tl(ret, ret, 0x1); + tcg_gen_andi_i32(ret, ret, 0x1); } -static inline void gen_accumulating_cond(int cond, TCGv ret, TCGv r1, TCGv r2, - void(*op)(TCGv, TCGv, TCGv)) +static void gen_accumulating_cond(int cond, TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, + void(*op)(TCGv_i32, TCGv_i32, TCGv_i32)) { - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); /* temp = (arg1 cond arg2 )*/ - tcg_gen_setcond_tl(cond, temp, r1, r2); + tcg_gen_setcond_i32(cond, temp, r1, r2); /* temp2 = ret[0]*/ - tcg_gen_andi_tl(temp2, ret, 0x1); + tcg_gen_andi_i32(temp2, ret, 0x1); /* temp = temp insn temp2 */ (*op)(temp, temp, temp2); /* ret = {ret[31:1], temp} */ - tcg_gen_deposit_tl(ret, ret, temp, 0, 1); + tcg_gen_deposit_i32(ret, ret, temp, 0, 1); } -static inline void -gen_accumulating_condi(int cond, TCGv ret, TCGv r1, int32_t con, - void(*op)(TCGv, TCGv, TCGv)) +static void gen_accumulating_condi(int cond, TCGv_i32 ret, TCGv_i32 r1, + int32_t con, + void(*op)(TCGv_i32, TCGv_i32, TCGv_i32)) { - TCGv temp = tcg_constant_i32(con); + TCGv_i32 temp = tcg_constant_i32(con); gen_accumulating_cond(cond, ret, r1, temp, op); } -static inline void gen_eqany_bi(TCGv ret, TCGv r1, int32_t con) +static void gen_eqany_bi(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv b0 = tcg_temp_new(); - TCGv b1 = tcg_temp_new(); - TCGv b2 = tcg_temp_new(); - TCGv b3 = tcg_temp_new(); + TCGv_i32 b0 = tcg_temp_new_i32(); + TCGv_i32 b1 = tcg_temp_new_i32(); + TCGv_i32 b2 = tcg_temp_new_i32(); + TCGv_i32 b3 = tcg_temp_new_i32(); /* byte 0 */ - tcg_gen_andi_tl(b0, r1, 0xff); - tcg_gen_setcondi_tl(TCG_COND_EQ, b0, b0, con & 0xff); + tcg_gen_andi_i32(b0, r1, 0xff); + tcg_gen_setcondi_i32(TCG_COND_EQ, b0, b0, con & 0xff); /* byte 1 */ - tcg_gen_andi_tl(b1, r1, 0xff00); - tcg_gen_setcondi_tl(TCG_COND_EQ, b1, b1, con & 0xff00); + tcg_gen_andi_i32(b1, r1, 0xff00); + tcg_gen_setcondi_i32(TCG_COND_EQ, b1, b1, con & 0xff00); /* byte 2 */ - tcg_gen_andi_tl(b2, r1, 0xff0000); - tcg_gen_setcondi_tl(TCG_COND_EQ, b2, b2, con & 0xff0000); + tcg_gen_andi_i32(b2, r1, 0xff0000); + tcg_gen_setcondi_i32(TCG_COND_EQ, b2, b2, con & 0xff0000); /* byte 3 */ - tcg_gen_andi_tl(b3, r1, 0xff000000); - tcg_gen_setcondi_tl(TCG_COND_EQ, b3, b3, con & 0xff000000); + tcg_gen_andi_i32(b3, r1, 0xff000000); + tcg_gen_setcondi_i32(TCG_COND_EQ, b3, b3, con & 0xff000000); /* combine them */ - tcg_gen_or_tl(ret, b0, b1); - tcg_gen_or_tl(ret, ret, b2); - tcg_gen_or_tl(ret, ret, b3); + tcg_gen_or_i32(ret, b0, b1); + tcg_gen_or_i32(ret, ret, b2); + tcg_gen_or_i32(ret, ret, b3); } -static inline void gen_eqany_hi(TCGv ret, TCGv r1, int32_t con) +static void gen_eqany_hi(TCGv_i32 ret, TCGv_i32 r1, int32_t con) { - TCGv h0 = tcg_temp_new(); - TCGv h1 = tcg_temp_new(); + TCGv_i32 h0 = tcg_temp_new_i32(); + TCGv_i32 h1 = tcg_temp_new_i32(); /* halfword 0 */ - tcg_gen_andi_tl(h0, r1, 0xffff); - tcg_gen_setcondi_tl(TCG_COND_EQ, h0, h0, con & 0xffff); + tcg_gen_andi_i32(h0, r1, 0xffff); + tcg_gen_setcondi_i32(TCG_COND_EQ, h0, h0, con & 0xffff); /* halfword 1 */ - tcg_gen_andi_tl(h1, r1, 0xffff0000); - tcg_gen_setcondi_tl(TCG_COND_EQ, h1, h1, con & 0xffff0000); + tcg_gen_andi_i32(h1, r1, 0xffff0000); + tcg_gen_setcondi_i32(TCG_COND_EQ, h1, h1, con & 0xffff0000); /* combine them */ - tcg_gen_or_tl(ret, h0, h1); + tcg_gen_or_i32(ret, h0, h1); } /* mask = ((1 << width) -1) << pos; ret = (r1 & ~mask) | (r2 << pos) & mask); */ -static inline void gen_insert(TCGv ret, TCGv r1, TCGv r2, TCGv width, TCGv pos) +static void gen_insert(TCGv_i32 ret, + TCGv_i32 r1, TCGv_i32 r2, TCGv_i32 width, TCGv_i32 pos) { - TCGv mask = tcg_temp_new(); - TCGv temp = tcg_temp_new(); - TCGv temp2 = tcg_temp_new(); + TCGv_i32 mask = tcg_temp_new_i32(); + TCGv_i32 temp = tcg_temp_new_i32(); + TCGv_i32 temp2 = tcg_temp_new_i32(); - tcg_gen_shl_tl(mask, tcg_constant_tl(1), width); - tcg_gen_subi_tl(mask, mask, 1); - tcg_gen_shl_tl(mask, mask, pos); + tcg_gen_shl_i32(mask, tcg_constant_i32(1), width); + tcg_gen_subi_i32(mask, mask, 1); + tcg_gen_shl_i32(mask, mask, pos); - tcg_gen_shl_tl(temp, r2, pos); - tcg_gen_and_tl(temp, temp, mask); - tcg_gen_andc_tl(temp2, r1, mask); - tcg_gen_or_tl(ret, temp, temp2); + tcg_gen_shl_i32(temp, r2, pos); + tcg_gen_and_i32(temp, temp, mask); + tcg_gen_andc_i32(temp2, r1, mask); + tcg_gen_or_i32(ret, temp, temp2); } -static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1) +static void gen_bsplit(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 r1) { TCGv_i64 temp = tcg_temp_new_i64(); @@ -2747,7 +2778,7 @@ static inline void gen_bsplit(TCGv rl, TCGv rh, TCGv r1) tcg_gen_extr_i64_i32(rl, rh, temp); } -static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1) +static void gen_unpack(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 r1) { TCGv_i64 temp = tcg_temp_new_i64(); @@ -2755,8 +2786,9 @@ static inline void gen_unpack(TCGv rl, TCGv rh, TCGv r1) tcg_gen_extr_i64_i32(rl, rh, temp); } -static inline void -gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) +static void gen_dvinit_b(DisasContext *ctx, + TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 r1, TCGv_i32 r2) { TCGv_i64 ret = tcg_temp_new_i64(); @@ -2768,8 +2800,9 @@ gen_dvinit_b(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) tcg_gen_extr_i64_i32(rl, rh, ret); } -static inline void -gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) +static void gen_dvinit_h(DisasContext *ctx, + TCGv_i32 rl, TCGv_i32 rh, + TCGv_i32 r1, TCGv_i32 r2) { TCGv_i64 ret = tcg_temp_new_i64(); @@ -2781,47 +2814,47 @@ gen_dvinit_h(DisasContext *ctx, TCGv rl, TCGv rh, TCGv r1, TCGv r2) tcg_gen_extr_i64_i32(rl, rh, ret); } -static void gen_calc_usb_mul_h(TCGv arg_low, TCGv arg_high) +static void gen_calc_usb_mul_h(TCGv_i32 arg_low, TCGv_i32 arg_high) { - TCGv temp = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); /* calc AV bit */ - tcg_gen_add_tl(temp, arg_low, arg_low); - tcg_gen_xor_tl(temp, temp, arg_low); - tcg_gen_add_tl(cpu_PSW_AV, arg_high, arg_high); - tcg_gen_xor_tl(cpu_PSW_AV, cpu_PSW_AV, arg_high); - tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp); + tcg_gen_add_i32(temp, arg_low, arg_low); + tcg_gen_xor_i32(temp, temp, arg_low); + tcg_gen_add_i32(cpu_PSW_AV, arg_high, arg_high); + tcg_gen_xor_i32(cpu_PSW_AV, cpu_PSW_AV, arg_high); + tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_movi_i32(cpu_PSW_V, 0); } -static void gen_calc_usb_mulr_h(TCGv arg) +static void gen_calc_usb_mulr_h(TCGv_i32 arg) { - TCGv temp = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); /* calc AV bit */ - tcg_gen_add_tl(temp, arg, arg); - tcg_gen_xor_tl(temp, temp, arg); - tcg_gen_shli_tl(cpu_PSW_AV, temp, 16); - tcg_gen_or_tl(cpu_PSW_AV, cpu_PSW_AV, temp); + tcg_gen_add_i32(temp, arg, arg); + tcg_gen_xor_i32(temp, temp, arg); + tcg_gen_shli_i32(cpu_PSW_AV, temp, 16); + tcg_gen_or_i32(cpu_PSW_AV, cpu_PSW_AV, temp); /* calc SAV bit */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); /* clear V bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); } /* helpers for generating program flow micro-ops */ -static inline void gen_save_pc(target_ulong pc) +static void gen_save_pc(vaddr pc) { - tcg_gen_movi_tl(cpu_PC, pc); + tcg_gen_movi_i32(cpu_PC, pc); } -static void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +static void gen_goto_tb(DisasContext *ctx, unsigned tb_slot_index, vaddr dest) { if (translator_use_goto_tb(&ctx->base, dest)) { - tcg_gen_goto_tb(n); + tcg_gen_goto_tb(tb_slot_index); gen_save_pc(dest); - tcg_gen_exit_tb(ctx->base.tb, n); + tcg_gen_exit_tb(ctx->base.tb, tb_slot_index); } else { gen_save_pc(dest); tcg_gen_lookup_and_goto_ptr(); @@ -2839,11 +2872,11 @@ static void generate_trap(DisasContext *ctx, int class, int tin) ctx->base.is_jmp = DISAS_NORETURN; } -static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, - TCGv r2, int16_t address) +static void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv_i32 r1, + TCGv_i32 r2, int16_t address) { TCGLabel *jumpLabel = gen_new_label(); - tcg_gen_brcond_tl(cond, r1, r2, jumpLabel); + tcg_gen_brcond_i32(cond, r1, r2, jumpLabel); gen_goto_tb(ctx, 1, ctx->pc_succ_insn); @@ -2851,10 +2884,10 @@ static inline void gen_branch_cond(DisasContext *ctx, TCGCond cond, TCGv r1, gen_goto_tb(ctx, 0, ctx->base.pc_next + address * 2); } -static inline void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv r1, - int r2, int16_t address) +static void gen_branch_condi(DisasContext *ctx, TCGCond cond, TCGv_i32 r1, + int r2, int16_t address) { - TCGv temp = tcg_constant_i32(r2); + TCGv_i32 temp = tcg_constant_i32(r2); gen_branch_cond(ctx, cond, r1, temp, address); } @@ -2862,8 +2895,8 @@ static void gen_loop(DisasContext *ctx, int r1, int32_t offset) { TCGLabel *l1 = gen_new_label(); - tcg_gen_subi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], 1); - tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1); + tcg_gen_subi_i32(cpu_gpr_a[r1], cpu_gpr_a[r1], 1); + tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_gpr_a[r1], -1, l1); gen_goto_tb(ctx, 1, ctx->base.pc_next + offset); gen_set_label(l1); gen_goto_tb(ctx, 0, ctx->pc_succ_insn); @@ -2871,29 +2904,29 @@ static void gen_loop(DisasContext *ctx, int r1, int32_t offset) static void gen_fcall_save_ctx(DisasContext *ctx) { - TCGv temp = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); - tcg_gen_addi_tl(temp, cpu_gpr_a[10], -4); - tcg_gen_qemu_st_tl(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL); - tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn); - tcg_gen_mov_tl(cpu_gpr_a[10], temp); + tcg_gen_addi_i32(temp, cpu_gpr_a[10], -4); + tcg_gen_qemu_st_i32(cpu_gpr_a[11], temp, ctx->mem_idx, MO_LESL); + tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn); + tcg_gen_mov_i32(cpu_gpr_a[10], temp); } static void gen_fret(DisasContext *ctx) { - TCGv temp = tcg_temp_new(); + TCGv_i32 temp = tcg_temp_new_i32(); - tcg_gen_andi_tl(temp, cpu_gpr_a[11], ~0x1); - tcg_gen_qemu_ld_tl(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL); - tcg_gen_addi_tl(cpu_gpr_a[10], cpu_gpr_a[10], 4); - tcg_gen_mov_tl(cpu_PC, temp); + tcg_gen_andi_i32(temp, cpu_gpr_a[11], ~0x1); + tcg_gen_qemu_ld_i32(cpu_gpr_a[11], cpu_gpr_a[10], ctx->mem_idx, MO_LESL); + tcg_gen_addi_i32(cpu_gpr_a[10], cpu_gpr_a[10], 4); + tcg_gen_mov_i32(cpu_PC, temp); ctx->base.is_jmp = DISAS_EXIT; } static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, int r2 , int32_t constant , int32_t offset) { - TCGv temp, temp2; + TCGv_i32 temp, temp2; int n; switch (opc) { @@ -2930,13 +2963,13 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, break; /* SBRN-format jumps */ case OPC1_16_SBRN_JZ_T: - temp = tcg_temp_new(); - tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); + temp = tcg_temp_new_i32(); + tcg_gen_andi_i32(temp, cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_EQ, temp, 0, offset); break; case OPC1_16_SBRN_JNZ_T: - temp = tcg_temp_new(); - tcg_gen_andi_tl(temp, cpu_gpr_d[15], 0x1u << constant); + temp = tcg_temp_new_i32(); + tcg_gen_andi_i32(temp, cpu_gpr_d[15], 0x1u << constant); gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); break; /* SBR-format jumps */ @@ -2985,7 +3018,7 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, break; /* SR-format jumps */ case OPC1_16_SR_JI: - tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], 0xfffffffe); + tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], 0xfffffffe); ctx->base.is_jmp = DISAS_EXIT; break; case OPC2_32_SYS_RET: @@ -3007,13 +3040,13 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); break; case OPC1_32_B_JLA: - tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn); + tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn); /* fall through */ case OPC1_32_B_JA: gen_goto_tb(ctx, 0, EA_B_ABSOLUT(offset)); break; case OPC1_32_B_JL: - tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn); + tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn); gen_goto_tb(ctx, 0, ctx->base.pc_next + offset * 2); break; /* BOL format */ @@ -3043,16 +3076,16 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, } break; case OPCM_32_BRC_JNE: - temp = tcg_temp_new(); + temp = tcg_temp_new_i32(); if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRC_JNED) { - tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); + tcg_gen_mov_i32(temp, cpu_gpr_d[r1]); /* subi is unconditional */ - tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); + tcg_gen_subi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); } else { - tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); + tcg_gen_mov_i32(temp, cpu_gpr_d[r1]); /* addi is unconditional */ - tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); + tcg_gen_addi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_condi(ctx, TCG_COND_NE, temp, constant, offset); } break; @@ -3060,8 +3093,8 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, case OPCM_32_BRN_JTT: n = MASK_OP_BRN_N(ctx->opcode); - temp = tcg_temp_new(); - tcg_gen_andi_tl(temp, cpu_gpr_d[r1], (1 << n)); + temp = tcg_temp_new_i32(); + tcg_gen_andi_i32(temp, cpu_gpr_d[r1], (1 << n)); if (MASK_OP_BRN_OP2(ctx->opcode) == OPC2_32_BRN_JNZ_T) { gen_branch_condi(ctx, TCG_COND_NE, temp, 0, offset); @@ -3115,21 +3148,21 @@ static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int r1, } break; case OPCM_32_BRR_JNE: - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); if (MASK_OP_BRC_OP2(ctx->opcode) == OPC2_32_BRR_JNED) { - tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); + tcg_gen_mov_i32(temp, cpu_gpr_d[r1]); /* also save r2, in case of r1 == r2, so r2 is not decremented */ - tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_mov_i32(temp2, cpu_gpr_d[r2]); /* subi is unconditional */ - tcg_gen_subi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); + tcg_gen_subi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); } else { - tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); + tcg_gen_mov_i32(temp, cpu_gpr_d[r1]); /* also save r2, in case of r1 == r2, so r2 is not decremented */ - tcg_gen_mov_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_mov_i32(temp2, cpu_gpr_d[r2]); /* addi is unconditional */ - tcg_gen_addi_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); + tcg_gen_addi_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 1); gen_branch_cond(ctx, TCG_COND_NE, temp, temp2, offset); } break; @@ -3154,7 +3187,7 @@ static void decode_src_opc(DisasContext *ctx, int op1) { int r1; int32_t const4; - TCGv temp, temp2; + TCGv_i32 temp, temp2; r1 = MASK_OP_SRC_S1D(ctx->opcode); const4 = MASK_OP_SRC_CONST4_SEXT(ctx->opcode); @@ -3170,7 +3203,7 @@ static void decode_src_opc(DisasContext *ctx, int op1) gen_addi_d(cpu_gpr_d[15], cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_ADD_A: - tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], const4); + tcg_gen_addi_i32(cpu_gpr_a[r1], cpu_gpr_a[r1], const4); break; case OPC1_16_SRC_CADD: gen_condi_add(TCG_COND_NE, cpu_gpr_d[r1], const4, cpu_gpr_d[r1], @@ -3181,37 +3214,37 @@ static void decode_src_opc(DisasContext *ctx, int op1) cpu_gpr_d[15]); break; case OPC1_16_SRC_CMOV: - temp = tcg_constant_tl(0); - temp2 = tcg_constant_tl(const4); - tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + temp = tcg_constant_i32(0); + temp2 = tcg_constant_i32(const4); + tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, temp2, cpu_gpr_d[r1]); break; case OPC1_16_SRC_CMOVN: - temp = tcg_constant_tl(0); - temp2 = tcg_constant_tl(const4); - tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + temp = tcg_constant_i32(0); + temp2 = tcg_constant_i32(const4); + tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, temp2, cpu_gpr_d[r1]); break; case OPC1_16_SRC_EQ: - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_LT: - tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1], + tcg_gen_setcondi_i32(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_MOV: - tcg_gen_movi_tl(cpu_gpr_d[r1], const4); + tcg_gen_movi_i32(cpu_gpr_d[r1], const4); break; case OPC1_16_SRC_MOV_A: const4 = MASK_OP_SRC_CONST4(ctx->opcode); - tcg_gen_movi_tl(cpu_gpr_a[r1], const4); + tcg_gen_movi_i32(cpu_gpr_a[r1], const4); break; case OPC1_16_SRC_MOV_E: if (has_feature(ctx, TRICORE_FEATURE_16)) { CHECK_REG_PAIR(r1); - tcg_gen_movi_tl(cpu_gpr_d[r1], const4); - tcg_gen_sari_tl(cpu_gpr_d[r1+1], cpu_gpr_d[r1], 31); + tcg_gen_movi_i32(cpu_gpr_d[r1], const4); + tcg_gen_sari_i32(cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], 31); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } @@ -3230,7 +3263,7 @@ static void decode_src_opc(DisasContext *ctx, int op1) static void decode_srr_opc(DisasContext *ctx, int op1) { int r1, r2; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_SRR_S1D(ctx->opcode); r2 = MASK_OP_SRR_S2(ctx->opcode); @@ -3246,49 +3279,49 @@ static void decode_srr_opc(DisasContext *ctx, int op1) gen_add_d(cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_ADD_A: - tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]); + tcg_gen_add_i32(cpu_gpr_a[r1], cpu_gpr_a[r1], cpu_gpr_a[r2]); break; case OPC1_16_SRR_ADDS: gen_adds(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_AND: - tcg_gen_and_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_and_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_CMOV: - temp = tcg_constant_tl(0); - tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + temp = tcg_constant_i32(0); + tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r1], cpu_gpr_d[15], temp, cpu_gpr_d[r2], cpu_gpr_d[r1]); break; case OPC1_16_SRR_CMOVN: - temp = tcg_constant_tl(0); - tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, + temp = tcg_constant_i32(0); + tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r1], cpu_gpr_d[15], temp, cpu_gpr_d[r2], cpu_gpr_d[r1]); break; case OPC1_16_SRR_EQ: - tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], + tcg_gen_setcond_i32(TCG_COND_EQ, cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_LT: - tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1], + tcg_gen_setcond_i32(TCG_COND_LT, cpu_gpr_d[15], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_MOV: - tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_mov_i32(cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_MOV_A: - tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_d[r2]); + tcg_gen_mov_i32(cpu_gpr_a[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_MOV_AA: - tcg_gen_mov_tl(cpu_gpr_a[r1], cpu_gpr_a[r2]); + tcg_gen_mov_i32(cpu_gpr_a[r1], cpu_gpr_a[r2]); break; case OPC1_16_SRR_MOV_D: - tcg_gen_mov_tl(cpu_gpr_d[r1], cpu_gpr_a[r2]); + tcg_gen_mov_i32(cpu_gpr_d[r1], cpu_gpr_a[r2]); break; case OPC1_16_SRR_MUL: gen_mul_i32s(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_OR: - tcg_gen_or_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_or_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_SUB: gen_sub_d(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); @@ -3303,7 +3336,7 @@ static void decode_srr_opc(DisasContext *ctx, int op1) gen_subs(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC1_16_SRR_XOR: - tcg_gen_xor_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_xor_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -3319,32 +3352,32 @@ static void decode_ssr_opc(DisasContext *ctx, int op1) switch (op1) { case OPC1_16_SSR_ST_A: - tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_qemu_st_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); break; case OPC1_16_SSR_ST_A_POSTINC: - tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + tcg_gen_qemu_st_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); break; case OPC1_16_SSR_ST_B: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); break; case OPC1_16_SSR_ST_B_POSTINC: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 1); break; case OPC1_16_SSR_ST_H: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); break; case OPC1_16_SSR_ST_H_POSTINC: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 2); break; case OPC1_16_SSR_ST_W: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); break; case OPC1_16_SSR_ST_W_POSTINC: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -3359,7 +3392,7 @@ static void decode_sc_opc(DisasContext *ctx, int op1) switch (op1) { case OPC1_16_SC_AND: - tcg_gen_andi_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16); + tcg_gen_andi_i32(cpu_gpr_d[15], cpu_gpr_d[15], const16); break; case OPC1_16_SC_BISR: if (ctx->priv == TRICORE_PRIV_SM) { @@ -3375,10 +3408,10 @@ static void decode_sc_opc(DisasContext *ctx, int op1) gen_offset_ld(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL); break; case OPC1_16_SC_MOV: - tcg_gen_movi_tl(cpu_gpr_d[15], const16); + tcg_gen_movi_i32(cpu_gpr_d[15], const16); break; case OPC1_16_SC_OR: - tcg_gen_ori_tl(cpu_gpr_d[15], cpu_gpr_d[15], const16); + tcg_gen_ori_i32(cpu_gpr_d[15], cpu_gpr_d[15], const16); break; case OPC1_16_SC_ST_A: gen_offset_st(ctx, cpu_gpr_a[15], cpu_gpr_a[10], const16 * 4, MO_LESL); @@ -3387,7 +3420,7 @@ static void decode_sc_opc(DisasContext *ctx, int op1) gen_offset_st(ctx, cpu_gpr_d[15], cpu_gpr_a[10], const16 * 4, MO_LESL); break; case OPC1_16_SC_SUB_A: - tcg_gen_subi_tl(cpu_gpr_a[10], cpu_gpr_a[10], const16); + tcg_gen_subi_i32(cpu_gpr_a[10], cpu_gpr_a[10], const16); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -3404,32 +3437,32 @@ static void decode_slr_opc(DisasContext *ctx, int op1) switch (op1) { /* SLR-format */ case OPC1_16_SLR_LD_A: - tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); break; case OPC1_16_SLR_LD_A_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); break; case OPC1_16_SLR_LD_BU: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); break; case OPC1_16_SLR_LD_BU_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 1); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 1); break; case OPC1_16_SLR_LD_H: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); break; case OPC1_16_SLR_LD_H_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 2); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 2); break; case OPC1_16_SLR_LD_W: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); break; case OPC1_16_SLR_LD_W_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], 4); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -3512,17 +3545,18 @@ static void decode_sr_accu(DisasContext *ctx) switch (op2) { case OPC2_16_SR_RSUB: /* calc V bit -- overflow only if r1 = -0x80000000 */ - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r1], -0x80000000); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, + cpu_gpr_d[r1], -0x80000000); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* calc SV bit */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* sub */ - tcg_gen_neg_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]); + tcg_gen_neg_i32(cpu_gpr_d[r1], cpu_gpr_d[r1]); /* calc av */ - tcg_gen_add_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]); - tcg_gen_xor_tl(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV); + tcg_gen_add_i32(cpu_PSW_AV, cpu_gpr_d[r1], cpu_gpr_d[r1]); + tcg_gen_xor_i32(cpu_PSW_AV, cpu_gpr_d[r1], cpu_PSW_AV); /* calc sav */ - tcg_gen_or_tl(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); + tcg_gen_or_i32(cpu_PSW_SAV, cpu_PSW_SAV, cpu_PSW_AV); break; case OPC2_16_SR_SAT_B: gen_saturate(cpu_gpr_d[r1], cpu_gpr_d[r1], 0x7f, -0x80); @@ -3547,7 +3581,7 @@ static void decode_16Bit_opc(DisasContext *ctx) int r1, r2; int32_t const16; int32_t address; - TCGv temp; + TCGv_i32 temp; op1 = MASK_OP_MAJOR(ctx->opcode); @@ -3614,9 +3648,9 @@ static void decode_16Bit_opc(DisasContext *ctx) r2 = MASK_OP_SRRS_S2(ctx->opcode); r1 = MASK_OP_SRRS_S1D(ctx->opcode); const16 = MASK_OP_SRRS_N(ctx->opcode); - temp = tcg_temp_new(); - tcg_gen_shli_tl(temp, cpu_gpr_d[15], const16); - tcg_gen_add_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], temp); + temp = tcg_temp_new_i32(); + tcg_gen_shli_i32(temp, cpu_gpr_d[15], const16); + tcg_gen_add_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], temp); break; /* SLRO-format */ case OPC1_16_SLRO_LD_A: @@ -3765,7 +3799,7 @@ static void decode_16Bit_opc(DisasContext *ctx) break; case OPC1_16_SR_NOT: r1 = MASK_OP_SR_S1D(ctx->opcode); - tcg_gen_not_tl(cpu_gpr_d[r1], cpu_gpr_d[r1]); + tcg_gen_not_i32(cpu_gpr_d[r1], cpu_gpr_d[r1]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -3782,7 +3816,7 @@ static void decode_abs_ldw(DisasContext *ctx) int32_t op2; int32_t r1; uint32_t address; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); @@ -3792,18 +3826,18 @@ static void decode_abs_ldw(DisasContext *ctx) switch (op2) { case OPC2_32_ABS_LD_A: - tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); + tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); break; case OPC2_32_ABS_LD_D: CHECK_REG_PAIR(r1); - gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); + gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp); break; case OPC2_32_ABS_LD_DA: CHECK_REG_PAIR(r1); - gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); + gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp); break; case OPC2_32_ABS_LD_W: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -3815,7 +3849,7 @@ static void decode_abs_ldb(DisasContext *ctx) int32_t op2; int32_t r1; uint32_t address; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); @@ -3825,16 +3859,16 @@ static void decode_abs_ldb(DisasContext *ctx) switch (op2) { case OPC2_32_ABS_LD_B: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_SB); break; case OPC2_32_ABS_LD_BU: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); break; case OPC2_32_ABS_LD_H: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESW); break; case OPC2_32_ABS_LD_HU: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -3846,7 +3880,7 @@ static void decode_abs_ldst_swap(DisasContext *ctx) int32_t op2; int32_t r1; uint32_t address; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); @@ -3897,7 +3931,7 @@ static void decode_abs_store(DisasContext *ctx) int32_t op2; int32_t r1; uint32_t address; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); @@ -3907,18 +3941,18 @@ static void decode_abs_store(DisasContext *ctx) switch (op2) { case OPC2_32_ABS_ST_A: - tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); + tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LESL); break; case OPC2_32_ABS_ST_D: CHECK_REG_PAIR(r1); - gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); + gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp); break; case OPC2_32_ABS_ST_DA: CHECK_REG_PAIR(r1); - gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); + gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp); break; case OPC2_32_ABS_ST_W: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LESL); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -3930,7 +3964,7 @@ static void decode_abs_storeb_h(DisasContext *ctx) int32_t op2; int32_t r1; uint32_t address; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_ABS_S1D(ctx->opcode); address = MASK_OP_ABS_OFF18(ctx->opcode); @@ -3940,10 +3974,10 @@ static void decode_abs_storeb_h(DisasContext *ctx) switch (op2) { case OPC2_32_ABS_ST_B: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_UB); break; case OPC2_32_ABS_ST_H: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -4032,7 +4066,7 @@ static void decode_bit_insert(DisasContext *ctx) uint32_t op2; int r1, r2, r3; int pos1, pos2; - TCGv temp; + TCGv_i32 temp; op2 = MASK_OP_BIT_OP2(ctx->opcode); r1 = MASK_OP_BIT_S1(ctx->opcode); r2 = MASK_OP_BIT_S2(ctx->opcode); @@ -4040,13 +4074,13 @@ static void decode_bit_insert(DisasContext *ctx) pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); - temp = tcg_temp_new(); + temp = tcg_temp_new_i32(); - tcg_gen_shri_tl(temp, cpu_gpr_d[r2], pos2); + tcg_gen_shri_i32(temp, cpu_gpr_d[r2], pos2); if (op2 == OPC2_32_BIT_INSN_T) { - tcg_gen_not_tl(temp, temp); + tcg_gen_not_i32(temp, temp); } - tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1); + tcg_gen_deposit_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, pos1, 1); } static void decode_bit_logical_t2(DisasContext *ctx) @@ -4131,7 +4165,7 @@ static void decode_bit_sh_logic1(DisasContext *ctx) uint32_t op2; int r1, r2, r3; int pos1, pos2; - TCGv temp; + TCGv_i32 temp; op2 = MASK_OP_BIT_OP2(ctx->opcode); r1 = MASK_OP_BIT_S1(ctx->opcode); @@ -4140,7 +4174,7 @@ static void decode_bit_sh_logic1(DisasContext *ctx) pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); - temp = tcg_temp_new(); + temp = tcg_temp_new_i32(); switch (op2) { case OPC2_32_BIT_SH_AND_T: @@ -4162,8 +4196,8 @@ static void decode_bit_sh_logic1(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); - tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); + tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); + tcg_gen_add_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); } static void decode_bit_sh_logic2(DisasContext *ctx) @@ -4171,7 +4205,7 @@ static void decode_bit_sh_logic2(DisasContext *ctx) uint32_t op2; int r1, r2, r3; int pos1, pos2; - TCGv temp; + TCGv_i32 temp; op2 = MASK_OP_BIT_OP2(ctx->opcode); r1 = MASK_OP_BIT_S1(ctx->opcode); @@ -4180,7 +4214,7 @@ static void decode_bit_sh_logic2(DisasContext *ctx) pos1 = MASK_OP_BIT_POS1(ctx->opcode); pos2 = MASK_OP_BIT_POS2(ctx->opcode); - temp = tcg_temp_new(); + temp = tcg_temp_new_i32(); switch (op2) { case OPC2_32_BIT_SH_NAND_T: @@ -4202,8 +4236,8 @@ static void decode_bit_sh_logic2(DisasContext *ctx) default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } - tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); - tcg_gen_add_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); + tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], 1); + tcg_gen_add_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], temp); } /* BO-format */ @@ -4214,7 +4248,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) uint32_t op2; uint32_t off10; int32_t r1, r2; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); @@ -4232,14 +4266,14 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) case OPC2_32_BO_CACHEA_I_POSTINC: /* instruction to access the cache, but we still need to handle the addressing mode */ - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_CACHEA_WI_PREINC: case OPC2_32_BO_CACHEA_W_PREINC: case OPC2_32_BO_CACHEA_I_PREINC: /* instruction to access the cache, but we still need to handle the addressing mode */ - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_CACHEI_WI_SHORTOFF: case OPC2_32_BO_CACHEI_W_SHORTOFF: @@ -4250,7 +4284,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) case OPC2_32_BO_CACHEI_W_POSTINC: case OPC2_32_BO_CACHEI_WI_POSTINC: if (has_feature(ctx, TRICORE_FEATURE_131)) { - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } @@ -4258,7 +4292,7 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) case OPC2_32_BO_CACHEI_W_PREINC: case OPC2_32_BO_CACHEI_WI_PREINC: if (has_feature(ctx, TRICORE_FEATURE_131)) { - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } @@ -4267,9 +4301,9 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) gen_offset_st(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL); break; case OPC2_32_BO_ST_A_POSTINC: - tcg_gen_qemu_st_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_st_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESL); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_A_PREINC: gen_st_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LESL); @@ -4278,82 +4312,84 @@ static void decode_bo_addrmode_post_pre_base(DisasContext *ctx) gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB); break; case OPC2_32_BO_ST_B_POSTINC: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_B_PREINC: gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB); break; case OPC2_32_BO_ST_D_SHORTOFF: CHECK_REG_PAIR(r1); - gen_offset_st_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], - off10, ctx); + gen_offset_st_2regs(ctx, + cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2], + off10); break; case OPC2_32_BO_ST_D_POSTINC: CHECK_REG_PAIR(r1); - gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2]); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_D_PREINC: CHECK_REG_PAIR(r1); - temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); - gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); - tcg_gen_mov_tl(cpu_gpr_a[r2], temp); + temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); + gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp); + tcg_gen_mov_i32(cpu_gpr_a[r2], temp); break; case OPC2_32_BO_ST_DA_SHORTOFF: CHECK_REG_PAIR(r1); - gen_offset_st_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], - off10, ctx); + gen_offset_st_2regs(ctx, + cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2], + off10); break; case OPC2_32_BO_ST_DA_POSTINC: CHECK_REG_PAIR(r1); - gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2]); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_DA_PREINC: CHECK_REG_PAIR(r1); - temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); - gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); - tcg_gen_mov_tl(cpu_gpr_a[r2], temp); + temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); + gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp); + tcg_gen_mov_i32(cpu_gpr_a[r2], temp); break; case OPC2_32_BO_ST_H_SHORTOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_ST_H_POSTINC: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_H_PREINC: gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_ST_Q_SHORTOFF: - temp = tcg_temp_new(); - tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); + temp = tcg_temp_new_i32(); + tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16); gen_offset_st(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_ST_Q_POSTINC: - temp = tcg_temp_new(); - tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_qemu_st_tl(temp, cpu_gpr_a[r2], ctx->mem_idx, + temp = tcg_temp_new_i32(); + tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_i32(temp, cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_Q_PREINC: - temp = tcg_temp_new(); - tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); + temp = tcg_temp_new_i32(); + tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16); gen_st_preincr(ctx, temp, cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_ST_W_SHORTOFF: gen_offset_st(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); break; case OPC2_32_BO_ST_W_POSTINC: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_ST_W_PREINC: gen_st_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); @@ -4368,102 +4404,102 @@ static void decode_bo_addrmode_bitreverse_circular(DisasContext *ctx) uint32_t op2; uint32_t off10; int32_t r1, r2; - TCGv temp, temp2, t_off10; + TCGv_i32 temp, temp2, t_off10; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); t_off10 = tcg_constant_i32(off10); CHECK_REG_PAIR(r2); - tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); - tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]); + tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp); switch (op2) { case OPC2_32_BO_CACHEA_WI_BR: case OPC2_32_BO_CACHEA_W_BR: case OPC2_32_BO_CACHEA_I_BR: - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_CACHEA_WI_CIRC: case OPC2_32_BO_CACHEA_W_CIRC: case OPC2_32_BO_CACHEA_I_CIRC: - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_ST_A_BR: - tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_ST_A_CIRC: - tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_ST_B_BR: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_ST_B_CIRC: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_ST_D_BR: CHECK_REG_PAIR(r1); - gen_st_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + gen_st_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_ST_D_CIRC: CHECK_REG_PAIR(r1); - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); - tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16); - tcg_gen_addi_tl(temp, temp, 4); - tcg_gen_rem_tl(temp, temp, temp2); - tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); - tcg_gen_qemu_st_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16); + tcg_gen_addi_i32(temp, temp, 4); + tcg_gen_rem_i32(temp, temp, temp2); + tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1 + 1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_ST_DA_BR: CHECK_REG_PAIR(r1); - gen_st_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + gen_st_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_ST_DA_CIRC: CHECK_REG_PAIR(r1); - tcg_gen_qemu_st_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); - tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16); - tcg_gen_addi_tl(temp, temp, 4); - tcg_gen_rem_tl(temp, temp, temp2); - tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); - tcg_gen_qemu_st_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_st_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16); + tcg_gen_addi_i32(temp, temp, 4); + tcg_gen_rem_i32(temp, temp, temp2); + tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp); + tcg_gen_qemu_st_i32(cpu_gpr_a[r1 + 1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_ST_H_BR: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_ST_H_CIRC: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_ST_Q_BR: - tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_i32(temp, temp2, ctx->mem_idx, MO_LEUW); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_ST_Q_CIRC: - tcg_gen_shri_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_qemu_st_tl(temp, temp2, ctx->mem_idx, MO_LEUW); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_shri_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_i32(temp, temp2, ctx->mem_idx, MO_LEUW); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_ST_W_BR: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_ST_W_CIRC: - tcg_gen_qemu_st_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_st_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -4475,7 +4511,7 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) uint32_t op2; uint32_t off10; int32_t r1, r2; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); @@ -4487,9 +4523,9 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) gen_offset_ld(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL); break; case OPC2_32_BO_LD_A_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_A_PREINC: gen_ld_preincr(ctx, cpu_gpr_a[r1], cpu_gpr_a[r2], off10, MO_LEUL); @@ -4498,9 +4534,9 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB); break; case OPC2_32_BO_LD_B_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_SB); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_B_PREINC: gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_SB); @@ -4509,54 +4545,56 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB); break; case OPC2_32_BO_LD_BU_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_UB); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_BU_PREINC: gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_UB); break; case OPC2_32_BO_LD_D_SHORTOFF: CHECK_REG_PAIR(r1); - gen_offset_ld_2regs(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], - off10, ctx); + gen_offset_ld_2regs(ctx, + cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2], + off10); break; case OPC2_32_BO_LD_D_POSTINC: CHECK_REG_PAIR(r1); - gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], cpu_gpr_a[r2], ctx); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], cpu_gpr_a[r2]); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_D_PREINC: CHECK_REG_PAIR(r1); - temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); - gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp, ctx); - tcg_gen_mov_tl(cpu_gpr_a[r2], temp); + temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); + gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp); + tcg_gen_mov_i32(cpu_gpr_a[r2], temp); break; case OPC2_32_BO_LD_DA_SHORTOFF: CHECK_REG_PAIR(r1); - gen_offset_ld_2regs(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], - off10, ctx); + gen_offset_ld_2regs(ctx, + cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2], + off10); break; case OPC2_32_BO_LD_DA_POSTINC: CHECK_REG_PAIR(r1); - gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], cpu_gpr_a[r2], ctx); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], cpu_gpr_a[r2]); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_DA_PREINC: CHECK_REG_PAIR(r1); - temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); - gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp, ctx); - tcg_gen_mov_tl(cpu_gpr_a[r2], temp); + temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); + gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp); + tcg_gen_mov_i32(cpu_gpr_a[r2], temp); break; case OPC2_32_BO_LD_H_SHORTOFF: gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW); break; case OPC2_32_BO_LD_H_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LESW); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_H_PREINC: gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LESW); @@ -4565,34 +4603,34 @@ static void decode_bo_addrmode_ld_post_pre_base(DisasContext *ctx) gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_LD_HU_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_HU_PREINC: gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); break; case OPC2_32_BO_LD_Q_SHORTOFF: gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); - tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); break; case OPC2_32_BO_LD_Q_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUW); - tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_Q_PREINC: gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUW); - tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); break; case OPC2_32_BO_LD_W_SHORTOFF: gen_offset_ld(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); break; case OPC2_32_BO_LD_W_POSTINC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], cpu_gpr_a[r2], ctx->mem_idx, MO_LEUL); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LD_W_PREINC: gen_ld_preincr(ctx, cpu_gpr_d[r1], cpu_gpr_a[r2], off10, MO_LEUL); @@ -4607,109 +4645,109 @@ static void decode_bo_addrmode_ld_bitreverse_circular(DisasContext *ctx) uint32_t op2; uint32_t off10; int r1, r2; - TCGv temp, temp2, t_off10; + TCGv_i32 temp, temp2, t_off10; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); t_off10 = tcg_constant_i32(off10); CHECK_REG_PAIR(r2); - tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); - tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]); + tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp); switch (op2) { case OPC2_32_BO_LD_A_BR: - tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LD_A_CIRC: - tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_LD_B_BR: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LD_B_CIRC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_SB); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_LD_BU_BR: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LD_BU_CIRC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_UB); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_LD_D_BR: CHECK_REG_PAIR(r1); - gen_ld_2regs_64(cpu_gpr_d[r1+1], cpu_gpr_d[r1], temp2, ctx); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + gen_ld_2regs_64(ctx, cpu_gpr_d[r1 + 1], cpu_gpr_d[r1], temp2); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LD_D_CIRC: CHECK_REG_PAIR(r1); - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); - tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16); - tcg_gen_addi_tl(temp, temp, 4); - tcg_gen_rem_tl(temp, temp, temp2); - tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1+1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16); + tcg_gen_addi_i32(temp, temp, 4); + tcg_gen_rem_i32(temp, temp, temp2); + tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1 + 1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_LD_DA_BR: CHECK_REG_PAIR(r1); - gen_ld_2regs_64(cpu_gpr_a[r1+1], cpu_gpr_a[r1], temp2, ctx); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + gen_ld_2regs_64(ctx, cpu_gpr_a[r1 + 1], cpu_gpr_a[r1], temp2); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LD_DA_CIRC: CHECK_REG_PAIR(r1); - tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); - tcg_gen_shri_tl(temp2, cpu_gpr_a[r2+1], 16); - tcg_gen_addi_tl(temp, temp, 4); - tcg_gen_rem_tl(temp, temp, temp2); - tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); - tcg_gen_qemu_ld_tl(cpu_gpr_a[r1+1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp2, ctx->mem_idx, MO_LEUL); + tcg_gen_shri_i32(temp2, cpu_gpr_a[r2 + 1], 16); + tcg_gen_addi_i32(temp, temp, 4); + tcg_gen_rem_i32(temp, temp, temp2); + tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp); + tcg_gen_qemu_ld_i32(cpu_gpr_a[r1 + 1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_LD_H_BR: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LD_H_CIRC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LESW); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_LD_HU_BR: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LD_HU_CIRC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_LD_Q_BR: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); - tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LD_Q_CIRC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); - tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUW); + tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_LD_W_BR: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LD_W_CIRC: - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp2, ctx->mem_idx, MO_LEUL); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -4722,7 +4760,7 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) uint32_t off10; int r1, r2; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); @@ -4730,74 +4768,74 @@ static void decode_bo_addrmode_stctx_post_pre_base(DisasContext *ctx) op2 = MASK_OP_BO_OP2(ctx->opcode); - temp = tcg_temp_new(); + temp = tcg_temp_new_i32(); switch (op2) { case OPC2_32_BO_LDLCX_SHORTOFF: - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); gen_helper_ldlcx(tcg_env, temp); break; case OPC2_32_BO_LDMST_SHORTOFF: - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); gen_ldmst(ctx, r1, temp); break; case OPC2_32_BO_LDMST_POSTINC: gen_ldmst(ctx, r1, cpu_gpr_a[r2]); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_LDMST_PREINC: - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); gen_ldmst(ctx, r1, cpu_gpr_a[r2]); break; case OPC2_32_BO_LDUCX_SHORTOFF: - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); gen_helper_lducx(tcg_env, temp); break; case OPC2_32_BO_LEA_SHORTOFF: - tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_STLCX_SHORTOFF: - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); gen_helper_stlcx(tcg_env, temp); break; case OPC2_32_BO_STUCX_SHORTOFF: - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); gen_helper_stucx(tcg_env, temp); break; case OPC2_32_BO_SWAP_W_SHORTOFF: - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); gen_swap(ctx, r1, temp); break; case OPC2_32_BO_SWAP_W_POSTINC: gen_swap(ctx, r1, cpu_gpr_a[r2]); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_SWAP_W_PREINC: - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); gen_swap(ctx, r1, cpu_gpr_a[r2]); break; case OPC2_32_BO_CMPSWAP_W_SHORTOFF: - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); gen_cmpswap(ctx, r1, temp); break; case OPC2_32_BO_CMPSWAP_W_POSTINC: gen_cmpswap(ctx, r1, cpu_gpr_a[r2]); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_CMPSWAP_W_PREINC: - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); gen_cmpswap(ctx, r1, cpu_gpr_a[r2]); break; case OPC2_32_BO_SWAPMSK_W_SHORTOFF: - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], off10); gen_swapmsk(ctx, r1, temp); break; case OPC2_32_BO_SWAPMSK_W_POSTINC: gen_swapmsk(ctx, r1, cpu_gpr_a[r2]); - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); break; case OPC2_32_BO_SWAPMSK_W_PREINC: - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r2], off10); gen_swapmsk(ctx, r1, cpu_gpr_a[r2]); break; default: @@ -4810,52 +4848,52 @@ static void decode_bo_addrmode_ldmst_bitreverse_circular(DisasContext *ctx) uint32_t op2; uint32_t off10; int r1, r2; - TCGv temp, temp2, t_off10; + TCGv_i32 temp, temp2, t_off10; r1 = MASK_OP_BO_S1D(ctx->opcode); r2 = MASK_OP_BO_S2(ctx->opcode); off10 = MASK_OP_BO_OFF10_SEXT(ctx->opcode); op2 = MASK_OP_BO_OP2(ctx->opcode); - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); t_off10 = tcg_constant_i32(off10); CHECK_REG_PAIR(r2); - tcg_gen_ext16u_tl(temp, cpu_gpr_a[r2+1]); - tcg_gen_add_tl(temp2, cpu_gpr_a[r2], temp); + tcg_gen_ext16u_i32(temp, cpu_gpr_a[r2 + 1]); + tcg_gen_add_i32(temp2, cpu_gpr_a[r2], temp); switch (op2) { case OPC2_32_BO_LDMST_BR: gen_ldmst(ctx, r1, temp2); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_LDMST_CIRC: gen_ldmst(ctx, r1, temp2); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_SWAP_W_BR: gen_swap(ctx, r1, temp2); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_SWAP_W_CIRC: gen_swap(ctx, r1, temp2); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_CMPSWAP_W_BR: gen_cmpswap(ctx, r1, temp2); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_CMPSWAP_W_CIRC: gen_cmpswap(ctx, r1, temp2); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; case OPC2_32_BO_SWAPMSK_W_BR: gen_swapmsk(ctx, r1, temp2); - gen_helper_br_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1]); + gen_helper_br_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1]); break; case OPC2_32_BO_SWAPMSK_W_CIRC: gen_swapmsk(ctx, r1, temp2); - gen_helper_circ_update(cpu_gpr_a[r2+1], cpu_gpr_a[r2+1], t_off10); + gen_helper_circ_update(cpu_gpr_a[r2 + 1], cpu_gpr_a[r2 + 1], t_off10); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -4866,7 +4904,7 @@ static void decode_bol_opc(DisasContext *ctx, int32_t op1) { int r1, r2; int32_t address; - TCGv temp; + TCGv_i32 temp; r1 = MASK_OP_BOL_S1D(ctx->opcode); r2 = MASK_OP_BOL_S2(ctx->opcode); @@ -4874,17 +4912,17 @@ static void decode_bol_opc(DisasContext *ctx, int32_t op1) switch (op1) { case OPC1_32_BOL_LD_A_LONGOFF: - temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); - tcg_gen_qemu_ld_tl(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL); + temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], address); + tcg_gen_qemu_ld_i32(cpu_gpr_a[r1], temp, ctx->mem_idx, MO_LEUL); break; case OPC1_32_BOL_LD_W_LONGOFF: - temp = tcg_temp_new(); - tcg_gen_addi_tl(temp, cpu_gpr_a[r2], address); - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL); + temp = tcg_temp_new_i32(); + tcg_gen_addi_i32(temp, cpu_gpr_a[r2], address); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUL); break; case OPC1_32_BOL_LEA_LONGOFF: - tcg_gen_addi_tl(cpu_gpr_a[r1], cpu_gpr_a[r2], address); + tcg_gen_addi_i32(cpu_gpr_a[r1], cpu_gpr_a[r2], address); break; case OPC1_32_BOL_ST_A_LONGOFF: if (has_feature(ctx, TRICORE_FEATURE_16)) { @@ -4949,7 +4987,7 @@ static void decode_rc_logical_shift(DisasContext *ctx) uint32_t op2; int r1, r2; int32_t const9; - TCGv temp; + TCGv_i32 temp; r2 = MASK_OP_RC_D(ctx->opcode); r1 = MASK_OP_RC_S1(ctx->opcode); @@ -4958,26 +4996,26 @@ static void decode_rc_logical_shift(DisasContext *ctx) switch (op2) { case OPC2_32_RC_AND: - tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_andi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ANDN: - tcg_gen_andi_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9); + tcg_gen_andi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9); break; case OPC2_32_RC_NAND: - temp = tcg_temp_new(); - tcg_gen_movi_tl(temp, const9); - tcg_gen_nand_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp); + temp = tcg_temp_new_i32(); + tcg_gen_movi_i32(temp, const9); + tcg_gen_nand_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp); break; case OPC2_32_RC_NOR: - temp = tcg_temp_new(); - tcg_gen_movi_tl(temp, const9); - tcg_gen_nor_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp); + temp = tcg_temp_new_i32(); + tcg_gen_movi_i32(temp, const9); + tcg_gen_nor_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp); break; case OPC2_32_RC_OR: - tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_ori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_ORN: - tcg_gen_ori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9); + tcg_gen_ori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], ~const9); break; case OPC2_32_RC_SH: const9 = sextract32(const9, 0, 6); @@ -4999,11 +5037,11 @@ static void decode_rc_logical_shift(DisasContext *ctx) gen_shasi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_XNOR: - tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); - tcg_gen_not_tl(cpu_gpr_d[r2], cpu_gpr_d[r2]); + tcg_gen_xori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_not_i32(cpu_gpr_d[r2], cpu_gpr_d[r2]); break; case OPC2_32_RC_XOR: - tcg_gen_xori_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_xori_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_SHUFFLE: if (has_feature(ctx, TRICORE_FEATURE_162)) { @@ -5024,7 +5062,7 @@ static void decode_rc_accumulator(DisasContext *ctx) int r1, r2; int16_t const9; - TCGv temp; + TCGv_i32 temp; r2 = MASK_OP_RC_D(ctx->opcode); r1 = MASK_OP_RC_S1(ctx->opcode); @@ -5032,7 +5070,7 @@ static void decode_rc_accumulator(DisasContext *ctx) op2 = MASK_OP_RC_OP2(ctx->opcode); - temp = tcg_temp_new(); + temp = tcg_temp_new_i32(); switch (op2) { case OPC2_32_RC_ABSDIF: @@ -5083,7 +5121,7 @@ static void decode_rc_accumulator(DisasContext *ctx) const9, &tcg_gen_and_tl); break; case OPC2_32_RC_EQ: - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_EQANY_B: gen_eqany_bi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); @@ -5092,41 +5130,41 @@ static void decode_rc_accumulator(DisasContext *ctx) gen_eqany_hi(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_GE: - tcg_gen_setcondi_tl(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_setcondi_i32(TCG_COND_GE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_GE_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); - tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_setcondi_i32(TCG_COND_GEU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_LT: - tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_setcondi_i32(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_LT_U: const9 = MASK_OP_RC_CONST9(ctx->opcode); - tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_setcondi_i32(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_MAX: - tcg_gen_movi_tl(temp, const9); - tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, + tcg_gen_movi_i32(temp, const9); + tcg_gen_movcond_i32(TCG_COND_GT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, cpu_gpr_d[r1], temp); break; case OPC2_32_RC_MAX_U: - tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode)); - tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, + tcg_gen_movi_i32(temp, MASK_OP_RC_CONST9(ctx->opcode)); + tcg_gen_movcond_i32(TCG_COND_GTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, cpu_gpr_d[r1], temp); break; case OPC2_32_RC_MIN: - tcg_gen_movi_tl(temp, const9); - tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, + tcg_gen_movi_i32(temp, const9); + tcg_gen_movcond_i32(TCG_COND_LT, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, cpu_gpr_d[r1], temp); break; case OPC2_32_RC_MIN_U: - tcg_gen_movi_tl(temp, MASK_OP_RC_CONST9(ctx->opcode)); - tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, + tcg_gen_movi_i32(temp, MASK_OP_RC_CONST9(ctx->opcode)); + tcg_gen_movcond_i32(TCG_COND_LTU, cpu_gpr_d[r2], cpu_gpr_d[r1], temp, cpu_gpr_d[r1], temp); break; case OPC2_32_RC_NE: - tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); + tcg_gen_setcondi_i32(TCG_COND_NE, cpu_gpr_d[r2], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_OR_EQ: gen_accumulating_condi(TCG_COND_EQ, cpu_gpr_d[r2], cpu_gpr_d[r1], @@ -5155,15 +5193,15 @@ static void decode_rc_accumulator(DisasContext *ctx) const9, &tcg_gen_or_tl); break; case OPC2_32_RC_RSUB: - tcg_gen_movi_tl(temp, const9); + tcg_gen_movi_i32(temp, const9); gen_sub_d(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]); break; case OPC2_32_RC_RSUBS: - tcg_gen_movi_tl(temp, const9); + tcg_gen_movi_i32(temp, const9); gen_subs(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]); break; case OPC2_32_RC_RSUBS_U: - tcg_gen_movi_tl(temp, const9); + tcg_gen_movi_i32(temp, const9); gen_subsu(cpu_gpr_d[r2], temp, cpu_gpr_d[r1]); break; case OPC2_32_RC_SH_EQ: @@ -5259,7 +5297,7 @@ static void decode_rc_mul(DisasContext *ctx) break; case OPC2_32_RC_MUL_64: CHECK_REG_PAIR(r2); - gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9); + gen_muli_i64s(cpu_gpr_d[r2], cpu_gpr_d[r2 + 1], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_MULS_32: gen_mulsi_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], const9); @@ -5267,7 +5305,7 @@ static void decode_rc_mul(DisasContext *ctx) case OPC2_32_RC_MUL_U_64: const9 = MASK_OP_RC_CONST9(ctx->opcode); CHECK_REG_PAIR(r2); - gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2+1], cpu_gpr_d[r1], const9); + gen_muli_i64u(cpu_gpr_d[r2], cpu_gpr_d[r2 + 1], cpu_gpr_d[r1], const9); break; case OPC2_32_RC_MULS_U_32: const9 = MASK_OP_RC_CONST9(ctx->opcode); @@ -5285,7 +5323,7 @@ static void decode_rcpw_insert(DisasContext *ctx) int r1, r2; int32_t pos, width, const4; - TCGv temp; + TCGv_i32 temp; op2 = MASK_OP_RCPW_OP2(ctx->opcode); r1 = MASK_OP_RCPW_S1(ctx->opcode); @@ -5299,18 +5337,18 @@ static void decode_rcpw_insert(DisasContext *ctx) CHECK_REG_PAIR(r2); /* if pos + width > 32 undefined result */ if (pos + width <= 32) { - tcg_gen_movi_tl(cpu_gpr_d[r2+1], ((1u << width) - 1) << pos); - tcg_gen_movi_tl(cpu_gpr_d[r2], (const4 << pos)); + tcg_gen_movi_i32(cpu_gpr_d[r2 + 1], ((1u << width) - 1) << pos); + tcg_gen_movi_i32(cpu_gpr_d[r2], (const4 << pos)); } break; case OPC2_32_RCPW_INSERT: - /* tcg_gen_deposit_tl() does not handle the case of width = 0 */ + /* tcg_gen_deposit_i32() does not handle the case of width = 0 */ if (width == 0) { - tcg_gen_mov_tl(cpu_gpr_d[r2], cpu_gpr_d[r1]); + tcg_gen_mov_i32(cpu_gpr_d[r2], cpu_gpr_d[r1]); /* if pos + width > 32 undefined result */ } else if (pos + width <= 32) { temp = tcg_constant_i32(const4); - tcg_gen_deposit_tl(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width); + tcg_gen_deposit_i32(cpu_gpr_d[r2], cpu_gpr_d[r1], temp, pos, width); } break; default: @@ -5326,7 +5364,7 @@ static void decode_rcrw_insert(DisasContext *ctx) int r1, r3, r4; int32_t width, const4; - TCGv temp, temp2, temp3; + TCGv_i32 temp, temp2, temp3; op2 = MASK_OP_RCRW_OP2(ctx->opcode); r1 = MASK_OP_RCRW_S1(ctx->opcode); @@ -5335,24 +5373,24 @@ static void decode_rcrw_insert(DisasContext *ctx) width = MASK_OP_RCRW_WIDTH(ctx->opcode); const4 = MASK_OP_RCRW_CONST4(ctx->opcode); - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); switch (op2) { case OPC2_32_RCRW_IMASK: CHECK_REG_PAIR(r4); - tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f); - tcg_gen_movi_tl(temp2, (1 << width) - 1); - tcg_gen_shl_tl(cpu_gpr_d[r4 + 1], temp2, temp); - tcg_gen_movi_tl(temp2, const4); - tcg_gen_shl_tl(cpu_gpr_d[r4], temp2, temp); + tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f); + tcg_gen_movi_i32(temp2, (1 << width) - 1); + tcg_gen_shl_i32(cpu_gpr_d[r4 + 1], temp2, temp); + tcg_gen_movi_i32(temp2, const4); + tcg_gen_shl_i32(cpu_gpr_d[r4], temp2, temp); break; case OPC2_32_RCRW_INSERT: - temp3 = tcg_temp_new(); + temp3 = tcg_temp_new_i32(); - tcg_gen_movi_tl(temp, width); - tcg_gen_movi_tl(temp2, const4); - tcg_gen_andi_tl(temp3, cpu_gpr_d[r3], 0x1f); + tcg_gen_movi_i32(temp, width); + tcg_gen_movi_i32(temp2, const4); + tcg_gen_andi_i32(temp3, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], temp2, temp, temp3); break; default: @@ -5368,7 +5406,7 @@ static void decode_rcr_cond_select(DisasContext *ctx) int r1, r3, r4; int32_t const9; - TCGv temp, temp2; + TCGv_i32 temp, temp2; op2 = MASK_OP_RCR_OP2(ctx->opcode); r1 = MASK_OP_RCR_S1(ctx->opcode); @@ -5388,13 +5426,13 @@ static void decode_rcr_cond_select(DisasContext *ctx) case OPC2_32_RCR_SEL: temp = tcg_constant_i32(0); temp2 = tcg_constant_i32(const9); - tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, + tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp2); break; case OPC2_32_RCR_SELN: temp = tcg_constant_i32(0); temp2 = tcg_constant_i32(const9); - tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, + tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp2); break; default: @@ -5422,8 +5460,8 @@ static void decode_rcr_madd(DisasContext *ctx) case OPC2_32_RCR_MADD_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + gen_maddi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9); break; case OPC2_32_RCR_MADDS_32: gen_maddsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); @@ -5431,15 +5469,15 @@ static void decode_rcr_madd(DisasContext *ctx) case OPC2_32_RCR_MADDS_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + gen_maddsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9); break; case OPC2_32_RCR_MADD_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); const9 = MASK_OP_RCR_CONST9(ctx->opcode); - gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + gen_maddui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9); break; case OPC2_32_RCR_MADDS_U_32: const9 = MASK_OP_RCR_CONST9(ctx->opcode); @@ -5449,8 +5487,8 @@ static void decode_rcr_madd(DisasContext *ctx) CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); const9 = MASK_OP_RCR_CONST9(ctx->opcode); - gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + gen_maddsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -5477,8 +5515,8 @@ static void decode_rcr_msub(DisasContext *ctx) case OPC2_32_RCR_MSUB_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + gen_msubi64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9); break; case OPC2_32_RCR_MSUBS_32: gen_msubsi_32(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r3], const9); @@ -5486,15 +5524,15 @@ static void decode_rcr_msub(DisasContext *ctx) case OPC2_32_RCR_MSUBS_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + gen_msubsi_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9); break; case OPC2_32_RCR_MSUB_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); const9 = MASK_OP_RCR_CONST9(ctx->opcode); - gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + gen_msubui64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9); break; case OPC2_32_RCR_MSUBS_U_32: const9 = MASK_OP_RCR_CONST9(ctx->opcode); @@ -5504,8 +5542,8 @@ static void decode_rcr_msub(DisasContext *ctx) CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); const9 = MASK_OP_RCR_CONST9(ctx->opcode); - gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], const9); + gen_msubsui_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], const9); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -5532,33 +5570,33 @@ static void decode_rlc_opc(DisasContext *ctx, gen_addi_d(cpu_gpr_d[r2], cpu_gpr_d[r1], const16 << 16); break; case OPC1_32_RLC_ADDIH_A: - tcg_gen_addi_tl(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16); + tcg_gen_addi_i32(cpu_gpr_a[r2], cpu_gpr_a[r1], const16 << 16); break; case OPC1_32_RLC_MFCR: const16 = MASK_OP_RLC_CONST16(ctx->opcode); gen_mfcr(ctx, cpu_gpr_d[r2], const16); break; case OPC1_32_RLC_MOV: - tcg_gen_movi_tl(cpu_gpr_d[r2], const16); + tcg_gen_movi_i32(cpu_gpr_d[r2], const16); break; case OPC1_32_RLC_MOV_64: if (has_feature(ctx, TRICORE_FEATURE_16)) { CHECK_REG_PAIR(r2); - tcg_gen_movi_tl(cpu_gpr_d[r2], const16); - tcg_gen_movi_tl(cpu_gpr_d[r2+1], const16 >> 15); + tcg_gen_movi_i32(cpu_gpr_d[r2], const16); + tcg_gen_movi_i32(cpu_gpr_d[r2 + 1], const16 >> 15); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC1_32_RLC_MOV_U: const16 = MASK_OP_RLC_CONST16(ctx->opcode); - tcg_gen_movi_tl(cpu_gpr_d[r2], const16); + tcg_gen_movi_i32(cpu_gpr_d[r2], const16); break; case OPC1_32_RLC_MOV_H: - tcg_gen_movi_tl(cpu_gpr_d[r2], const16 << 16); + tcg_gen_movi_i32(cpu_gpr_d[r2], const16 << 16); break; case OPC1_32_RLC_MOVH_A: - tcg_gen_movi_tl(cpu_gpr_a[r2], const16 << 16); + tcg_gen_movi_i32(cpu_gpr_a[r2], const16 << 16); break; case OPC1_32_RLC_MTCR: const16 = MASK_OP_RLC_CONST16(ctx->opcode); @@ -5575,7 +5613,7 @@ static void decode_rr_accumulator(DisasContext *ctx) uint32_t op2; int r3, r2, r1; - TCGv temp; + TCGv_i32 temp; r3 = MASK_OP_RR_D(ctx->opcode); r2 = MASK_OP_RR_S2(ctx->opcode); @@ -5672,7 +5710,7 @@ static void decode_rr_accumulator(DisasContext *ctx) cpu_gpr_d[r2], &tcg_gen_and_tl); break; case OPC2_32_RR_EQ: - tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_setcond_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_EQ_B: @@ -5682,7 +5720,7 @@ static void decode_rr_accumulator(DisasContext *ctx) gen_helper_eq_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_EQ_W: - tcg_gen_negsetcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], + tcg_gen_negsetcond_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_EQANY_B: @@ -5692,19 +5730,19 @@ static void decode_rr_accumulator(DisasContext *ctx) gen_helper_eqany_h(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_GE: - tcg_gen_setcond_tl(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_setcond_i32(TCG_COND_GE, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_GE_U: - tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_setcond_i32(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_LT: - tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_setcond_i32(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_U: - tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_setcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_B: @@ -5720,19 +5758,19 @@ static void decode_rr_accumulator(DisasContext *ctx) gen_helper_lt_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_W: - tcg_gen_negsetcond_tl(TCG_COND_LT, cpu_gpr_d[r3], + tcg_gen_negsetcond_i32(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_LT_WU: - tcg_gen_negsetcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], + tcg_gen_negsetcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_MAX: - tcg_gen_movcond_tl(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_movcond_i32(TCG_COND_GT, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_MAX_U: - tcg_gen_movcond_tl(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_movcond_i32(TCG_COND_GTU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_MAX_B: @@ -5748,11 +5786,11 @@ static void decode_rr_accumulator(DisasContext *ctx) gen_helper_max_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_MIN: - tcg_gen_movcond_tl(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_movcond_i32(TCG_COND_LT, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_MIN_U: - tcg_gen_movcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_movcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_MIN_B: @@ -5768,16 +5806,16 @@ static void decode_rr_accumulator(DisasContext *ctx) gen_helper_min_hu(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_MOV: - tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]); + tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r2]); break; case OPC2_32_RR_MOV_64: if (has_feature(ctx, TRICORE_FEATURE_16)) { - temp = tcg_temp_new(); + temp = tcg_temp_new_i32(); CHECK_REG_PAIR(r3); - tcg_gen_mov_tl(temp, cpu_gpr_d[r1]); - tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]); - tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp); + tcg_gen_mov_i32(temp, cpu_gpr_d[r1]); + tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r2]); + tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } @@ -5785,14 +5823,14 @@ static void decode_rr_accumulator(DisasContext *ctx) case OPC2_32_RR_MOVS_64: if (has_feature(ctx, TRICORE_FEATURE_16)) { CHECK_REG_PAIR(r3); - tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r2]); - tcg_gen_sari_tl(cpu_gpr_d[r3 + 1], cpu_gpr_d[r2], 31); + tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r2]); + tcg_gen_sari_i32(cpu_gpr_d[r3 + 1], cpu_gpr_d[r2], 31); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } break; case OPC2_32_RR_NE: - tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1], + tcg_gen_setcond_i32(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_OR_EQ: @@ -5925,41 +5963,41 @@ static void decode_rr_logical_shift(DisasContext *ctx) switch (op2) { case OPC2_32_RR_AND: - tcg_gen_and_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_and_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_ANDN: - tcg_gen_andc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_andc_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_CLO: - tcg_gen_not_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); - tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r3], TARGET_LONG_BITS); + tcg_gen_not_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]); + tcg_gen_clzi_i32(cpu_gpr_d[r3], cpu_gpr_d[r3], TARGET_LONG_BITS); break; case OPC2_32_RR_CLO_H: gen_helper_clo_h(cpu_gpr_d[r3], cpu_gpr_d[r1]); break; case OPC2_32_RR_CLS: - tcg_gen_clrsb_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); + tcg_gen_clrsb_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]); break; case OPC2_32_RR_CLS_H: gen_helper_cls_h(cpu_gpr_d[r3], cpu_gpr_d[r1]); break; case OPC2_32_RR_CLZ: - tcg_gen_clzi_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], TARGET_LONG_BITS); + tcg_gen_clzi_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], TARGET_LONG_BITS); break; case OPC2_32_RR_CLZ_H: gen_helper_clz_h(cpu_gpr_d[r3], cpu_gpr_d[r1]); break; case OPC2_32_RR_NAND: - tcg_gen_nand_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_nand_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_NOR: - tcg_gen_nor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_nor_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_OR: - tcg_gen_or_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_or_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_ORN: - tcg_gen_orc_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_orc_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_SH: gen_helper_sh(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); @@ -5977,10 +6015,10 @@ static void decode_rr_logical_shift(DisasContext *ctx) gen_shas(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_XNOR: - tcg_gen_eqv_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_eqv_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_XOR: - tcg_gen_xor_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); + tcg_gen_xor_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -5991,7 +6029,7 @@ static void decode_rr_address(DisasContext *ctx) { uint32_t op2, n; int r1, r2, r3; - TCGv temp; + TCGv_i32 temp; op2 = MASK_OP_RR_OP2(ctx->opcode); r3 = MASK_OP_RR_D(ctx->opcode); @@ -6001,52 +6039,52 @@ static void decode_rr_address(DisasContext *ctx) switch (op2) { case OPC2_32_RR_ADD_A: - tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); + tcg_gen_add_i32(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); break; case OPC2_32_RR_ADDSC_A: - temp = tcg_temp_new(); - tcg_gen_shli_tl(temp, cpu_gpr_d[r1], n); - tcg_gen_add_tl(cpu_gpr_a[r3], cpu_gpr_a[r2], temp); + temp = tcg_temp_new_i32(); + tcg_gen_shli_i32(temp, cpu_gpr_d[r1], n); + tcg_gen_add_i32(cpu_gpr_a[r3], cpu_gpr_a[r2], temp); break; case OPC2_32_RR_ADDSC_AT: - temp = tcg_temp_new(); - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 3); - tcg_gen_add_tl(temp, cpu_gpr_a[r2], temp); - tcg_gen_andi_tl(cpu_gpr_a[r3], temp, 0xFFFFFFFC); + temp = tcg_temp_new_i32(); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 3); + tcg_gen_add_i32(temp, cpu_gpr_a[r2], temp); + tcg_gen_andi_i32(cpu_gpr_a[r3], temp, 0xFFFFFFFC); break; case OPC2_32_RR_EQ_A: - tcg_gen_setcond_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], + tcg_gen_setcond_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); break; case OPC2_32_RR_EQZ: - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_gpr_d[r3], cpu_gpr_a[r1], 0); break; case OPC2_32_RR_GE_A: - tcg_gen_setcond_tl(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1], + tcg_gen_setcond_i32(TCG_COND_GEU, cpu_gpr_d[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); break; case OPC2_32_RR_LT_A: - tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1], + tcg_gen_setcond_i32(TCG_COND_LTU, cpu_gpr_d[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); break; case OPC2_32_RR_MOV_A: - tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_d[r2]); + tcg_gen_mov_i32(cpu_gpr_a[r3], cpu_gpr_d[r2]); break; case OPC2_32_RR_MOV_AA: - tcg_gen_mov_tl(cpu_gpr_a[r3], cpu_gpr_a[r2]); + tcg_gen_mov_i32(cpu_gpr_a[r3], cpu_gpr_a[r2]); break; case OPC2_32_RR_MOV_D: - tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_a[r2]); + tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_a[r2]); break; case OPC2_32_RR_NE_A: - tcg_gen_setcond_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], + tcg_gen_setcond_i32(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); break; case OPC2_32_RR_NEZ_A: - tcg_gen_setcondi_tl(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0); + tcg_gen_setcondi_i32(TCG_COND_NE, cpu_gpr_d[r3], cpu_gpr_a[r1], 0); break; case OPC2_32_RR_SUB_A: - tcg_gen_sub_tl(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); + tcg_gen_sub_i32(cpu_gpr_a[r3], cpu_gpr_a[r1], cpu_gpr_a[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -6063,19 +6101,19 @@ static void decode_rr_idirect(DisasContext *ctx) switch (op2) { case OPC2_32_RR_JI: - tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1); + tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1); break; case OPC2_32_RR_JLI: - tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1); - tcg_gen_movi_tl(cpu_gpr_a[11], ctx->pc_succ_insn); + tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1); + tcg_gen_movi_i32(cpu_gpr_a[11], ctx->pc_succ_insn); break; case OPC2_32_RR_CALLI: gen_helper_1arg(call, ctx->pc_succ_insn); - tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1); + tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1); break; case OPC2_32_RR_FCALLI: gen_fcall_save_ctx(ctx); - tcg_gen_andi_tl(cpu_PC, cpu_gpr_a[r1], ~0x1); + tcg_gen_andi_i32(cpu_PC, cpu_gpr_a[r1], ~0x1); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -6089,7 +6127,7 @@ static void decode_rr_divide(DisasContext *ctx) uint32_t op2; int r1, r2, r3; - TCGv temp, temp2, temp3; + TCGv_i32 temp, temp2, temp3; op2 = MASK_OP_RR_OP2(ctx->opcode); r3 = MASK_OP_RR_D(ctx->opcode); @@ -6102,107 +6140,107 @@ static void decode_rr_divide(DisasContext *ctx) break; case OPC2_32_RR_BSPLIT: CHECK_REG_PAIR(r3); - gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]); + gen_bsplit(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1]); break; case OPC2_32_RR_DVINIT_B: CHECK_REG_PAIR(r3); - gen_dvinit_b(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + gen_dvinit_b(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_DVINIT_BU: - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); - temp3 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); + temp3 = tcg_temp_new_i32(); CHECK_REG_PAIR(r3); - tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 8); + tcg_gen_shri_i32(temp3, cpu_gpr_d[r1], 8); /* reset av */ - tcg_gen_movi_tl(cpu_PSW_AV, 0); + tcg_gen_movi_i32(cpu_PSW_AV, 0); if (!has_feature(ctx, TRICORE_FEATURE_131)) { - /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ - tcg_gen_abs_tl(temp, temp3); - tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]); - tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2); + /* overflow = (abs(D[r3 + 1]) >= abs(D[r2])) */ + tcg_gen_abs_i32(temp, temp3); + tcg_gen_abs_i32(temp2, cpu_gpr_d[r2]); + tcg_gen_setcond_i32(TCG_COND_GE, cpu_PSW_V, temp, temp2); } else { /* overflow = (D[b] == 0) */ - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); } - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* sv */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* write result */ - tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 24); - tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3); + tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], 24); + tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp3); break; case OPC2_32_RR_DVINIT_H: CHECK_REG_PAIR(r3); - gen_dvinit_h(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + gen_dvinit_h(ctx, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR_DVINIT_HU: - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); - temp3 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); + temp3 = tcg_temp_new_i32(); CHECK_REG_PAIR(r3); - tcg_gen_shri_tl(temp3, cpu_gpr_d[r1], 16); + tcg_gen_shri_i32(temp3, cpu_gpr_d[r1], 16); /* reset av */ - tcg_gen_movi_tl(cpu_PSW_AV, 0); + tcg_gen_movi_i32(cpu_PSW_AV, 0); if (!has_feature(ctx, TRICORE_FEATURE_131)) { - /* overflow = (abs(D[r3+1]) >= abs(D[r2])) */ - tcg_gen_abs_tl(temp, temp3); - tcg_gen_abs_tl(temp2, cpu_gpr_d[r2]); - tcg_gen_setcond_tl(TCG_COND_GE, cpu_PSW_V, temp, temp2); + /* overflow = (abs(D[r3 + 1]) >= abs(D[r2])) */ + tcg_gen_abs_i32(temp, temp3); + tcg_gen_abs_i32(temp2, cpu_gpr_d[r2]); + tcg_gen_setcond_i32(TCG_COND_GE, cpu_PSW_V, temp, temp2); } else { /* overflow = (D[b] == 0) */ - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); } - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* sv */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* write result */ - tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], 16); - tcg_gen_mov_tl(cpu_gpr_d[r3+1], temp3); + tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], 16); + tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp3); break; case OPC2_32_RR_DVINIT: - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); CHECK_REG_PAIR(r3); /* overflow = ((D[b] == 0) || ((D[b] == 0xFFFFFFFF) && (D[a] == 0x80000000))) */ - tcg_gen_setcondi_tl(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff); - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000); - tcg_gen_and_tl(temp, temp, temp2); - tcg_gen_setcondi_tl(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0); - tcg_gen_or_tl(cpu_PSW_V, temp, temp2); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp, cpu_gpr_d[r2], 0xffffffff); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, cpu_gpr_d[r1], 0x80000000); + tcg_gen_and_i32(temp, temp, temp2); + tcg_gen_setcondi_i32(TCG_COND_EQ, temp2, cpu_gpr_d[r2], 0); + tcg_gen_or_i32(cpu_PSW_V, temp, temp2); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* sv */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* reset av */ - tcg_gen_movi_tl(cpu_PSW_AV, 0); + tcg_gen_movi_i32(cpu_PSW_AV, 0); /* write result */ - tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); + tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]); /* sign extend to high reg */ - tcg_gen_sari_tl(cpu_gpr_d[r3+1], cpu_gpr_d[r1], 31); + tcg_gen_sari_i32(cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], 31); break; case OPC2_32_RR_DVINIT_U: CHECK_REG_PAIR(r3); /* overflow = (D[b] == 0) */ - tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); - tcg_gen_shli_tl(cpu_PSW_V, cpu_PSW_V, 31); + tcg_gen_setcondi_i32(TCG_COND_EQ, cpu_PSW_V, cpu_gpr_d[r2], 0); + tcg_gen_shli_i32(cpu_PSW_V, cpu_PSW_V, 31); /* sv */ - tcg_gen_or_tl(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); + tcg_gen_or_i32(cpu_PSW_SV, cpu_PSW_SV, cpu_PSW_V); /* reset av */ - tcg_gen_movi_tl(cpu_PSW_AV, 0); + tcg_gen_movi_i32(cpu_PSW_AV, 0); /* write result */ - tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); + tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]); /* zero extend to high reg*/ - tcg_gen_movi_tl(cpu_gpr_d[r3+1], 0); + tcg_gen_movi_i32(cpu_gpr_d[r3 + 1], 0); break; case OPC2_32_RR_PARITY: gen_helper_parity(cpu_gpr_d[r3], cpu_gpr_d[r1]); break; case OPC2_32_RR_UNPACK: CHECK_REG_PAIR(r3); - gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1]); + gen_unpack(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1]); break; case OPC2_32_RR_CRC32_B: if (has_feature(ctx, TRICORE_FEATURE_162)) { @@ -6228,7 +6266,7 @@ static void decode_rr_divide(DisasContext *ctx) case OPC2_32_RR_POPCNT_W: if (has_feature(ctx, TRICORE_FEATURE_162)) { - tcg_gen_ctpop_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); + tcg_gen_ctpop_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); } @@ -6236,7 +6274,7 @@ static void decode_rr_divide(DisasContext *ctx) case OPC2_32_RR_DIV: if (has_feature(ctx, TRICORE_FEATURE_16)) { CHECK_REG_PAIR(r3); - GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + GEN_HELPER_RR(divide, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2]); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -6245,7 +6283,7 @@ static void decode_rr_divide(DisasContext *ctx) case OPC2_32_RR_DIV_U: if (has_feature(ctx, TRICORE_FEATURE_16)) { CHECK_REG_PAIR(r3); - GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3+1], + GEN_HELPER_RR(divide_u, cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2]); } else { generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -6313,7 +6351,7 @@ static void decode_rr1_mul(DisasContext *ctx) uint32_t op2; int r1, r2, r3; - TCGv n; + TCGv_i32 n; TCGv_i64 temp64; r1 = MASK_OP_RR1_S1(ctx->opcode); @@ -6327,69 +6365,69 @@ static void decode_rr1_mul(DisasContext *ctx) temp64 = tcg_temp_new_i64(); CHECK_REG_PAIR(r3); GEN_HELPER_LL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); - tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); - gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64); + gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]); break; case OPC2_32_RR1_MUL_H_32_LU: temp64 = tcg_temp_new_i64(); CHECK_REG_PAIR(r3); GEN_HELPER_LU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); - tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); - gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64); + gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]); break; case OPC2_32_RR1_MUL_H_32_UL: temp64 = tcg_temp_new_i64(); CHECK_REG_PAIR(r3); GEN_HELPER_UL(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); - tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); - gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64); + gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]); break; case OPC2_32_RR1_MUL_H_32_UU: temp64 = tcg_temp_new_i64(); CHECK_REG_PAIR(r3); GEN_HELPER_UU(mul_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); - tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); - gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3+1]); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64); + gen_calc_usb_mul_h(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1]); break; case OPC2_32_RR1_MULM_H_64_LL: temp64 = tcg_temp_new_i64(); CHECK_REG_PAIR(r3); GEN_HELPER_LL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); - tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64); /* reset V bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); /* reset AV bit */ - tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); + tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V); break; case OPC2_32_RR1_MULM_H_64_LU: temp64 = tcg_temp_new_i64(); CHECK_REG_PAIR(r3); GEN_HELPER_LU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); - tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64); /* reset V bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); /* reset AV bit */ - tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); + tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V); break; case OPC2_32_RR1_MULM_H_64_UL: temp64 = tcg_temp_new_i64(); CHECK_REG_PAIR(r3); GEN_HELPER_UL(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); - tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64); /* reset V bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); /* reset AV bit */ - tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); + tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V); break; case OPC2_32_RR1_MULM_H_64_UU: temp64 = tcg_temp_new_i64(); CHECK_REG_PAIR(r3); GEN_HELPER_UU(mulm_h, temp64, cpu_gpr_d[r1], cpu_gpr_d[r2], n); - tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3+1], temp64); + tcg_gen_extr_i64_i32(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], temp64); /* reset V bit */ - tcg_gen_movi_tl(cpu_PSW_V, 0); + tcg_gen_movi_i32(cpu_PSW_V, 0); /* reset AV bit */ - tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); + tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V); break; case OPC2_32_RR1_MULR_H_16_LL: GEN_HELPER_LL(mulr_h, cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], n); @@ -6418,7 +6456,7 @@ static void decode_rr1_mulq(DisasContext *ctx) int r1, r2, r3; uint32_t n; - TCGv temp, temp2; + TCGv_i32 temp, temp2; r1 = MASK_OP_RR1_S1(ctx->opcode); r2 = MASK_OP_RR1_S2(ctx->opcode); @@ -6426,8 +6464,8 @@ static void decode_rr1_mulq(DisasContext *ctx) n = MASK_OP_RR1_N(ctx->opcode); op2 = MASK_OP_RR1_OP2(ctx->opcode); - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); switch (op2) { case OPC2_32_RR1_MUL_Q_32: @@ -6435,45 +6473,45 @@ static void decode_rr1_mulq(DisasContext *ctx) break; case OPC2_32_RR1_MUL_Q_64: CHECK_REG_PAIR(r3); - gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, 0); break; case OPC2_32_RR1_MUL_Q_32_L: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RR1_MUL_Q_64_L: CHECK_REG_PAIR(r3); - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); - gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); + gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n, 0); break; case OPC2_32_RR1_MUL_Q_32_U: - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); gen_mul_q(cpu_gpr_d[r3], temp, cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RR1_MUL_Q_64_U: CHECK_REG_PAIR(r3); - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); - gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, n, 0); + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); + gen_mul_q(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n, 0); break; case OPC2_32_RR1_MUL_Q_32_LL: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RR1_MUL_Q_32_UU: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_mul_q_16(cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RR1_MULR_Q_32_L: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RR1_MULR_Q_32_U: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_mulr_q(cpu_gpr_d[r3], temp, temp2, n); break; default: @@ -6497,7 +6535,7 @@ static void decode_rr2_mul(DisasContext *ctx) break; case OPC2_32_RR2_MUL_64: CHECK_REG_PAIR(r3); - gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + gen_mul_i64s(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR2_MULS_32: @@ -6506,7 +6544,7 @@ static void decode_rr2_mul(DisasContext *ctx) break; case OPC2_32_RR2_MUL_U_64: CHECK_REG_PAIR(r3); - gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r1], + gen_mul_i64u(cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RR2_MULS_U_32: @@ -6524,7 +6562,7 @@ static void decode_rrpw_extract_insert(DisasContext *ctx) uint32_t op2; int r1, r2, r3; int32_t pos, width; - TCGv temp; + TCGv_i32 temp; op2 = MASK_OP_RRPW_OP2(ctx->opcode); r1 = MASK_OP_RRPW_S1(ctx->opcode); @@ -6536,35 +6574,35 @@ static void decode_rrpw_extract_insert(DisasContext *ctx) switch (op2) { case OPC2_32_RRPW_EXTR: if (width == 0) { - tcg_gen_movi_tl(cpu_gpr_d[r3], 0); + tcg_gen_movi_i32(cpu_gpr_d[r3], 0); } else if (pos + width <= 32) { - tcg_gen_sextract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width); + tcg_gen_sextract_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width); } break; case OPC2_32_RRPW_EXTR_U: if (width == 0) { - tcg_gen_movi_tl(cpu_gpr_d[r3], 0); + tcg_gen_movi_i32(cpu_gpr_d[r3], 0); } else { - tcg_gen_extract_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width); + tcg_gen_extract_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], pos, width); } break; case OPC2_32_RRPW_IMASK: CHECK_REG_PAIR(r3); if (pos + width <= 32) { - temp = tcg_temp_new(); - tcg_gen_movi_tl(temp, ((1u << width) - 1) << pos); - tcg_gen_shli_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], pos); - tcg_gen_mov_tl(cpu_gpr_d[r3 + 1], temp); + temp = tcg_temp_new_i32(); + tcg_gen_movi_i32(temp, ((1u << width) - 1) << pos); + tcg_gen_shli_i32(cpu_gpr_d[r3], cpu_gpr_d[r2], pos); + tcg_gen_mov_i32(cpu_gpr_d[r3 + 1], temp); } break; case OPC2_32_RRPW_INSERT: - /* tcg_gen_deposit_tl() does not handle the case of width = 0 */ + /* tcg_gen_deposit_i32() does not handle the case of width = 0 */ if (width == 0) { - tcg_gen_mov_tl(cpu_gpr_d[r3], cpu_gpr_d[r1]); + tcg_gen_mov_i32(cpu_gpr_d[r3], cpu_gpr_d[r1]); } else if (pos + width <= 32) { - tcg_gen_deposit_tl(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], + tcg_gen_deposit_i32(cpu_gpr_d[r3], cpu_gpr_d[r1], cpu_gpr_d[r2], pos, width); } break; @@ -6578,7 +6616,7 @@ static void decode_rrr_cond_select(DisasContext *ctx) { uint32_t op2; int r1, r2, r3, r4; - TCGv temp; + TCGv_i32 temp; op2 = MASK_OP_RRR_OP2(ctx->opcode); r1 = MASK_OP_RRR_S1(ctx->opcode); @@ -6605,12 +6643,12 @@ static void decode_rrr_cond_select(DisasContext *ctx) break; case OPC2_32_RRR_SEL: temp = tcg_constant_i32(0); - tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, + tcg_gen_movcond_i32(TCG_COND_NE, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2]); break; case OPC2_32_RRR_SELN: temp = tcg_constant_i32(0); - tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, + tcg_gen_movcond_i32(TCG_COND_EQ, cpu_gpr_d[r4], cpu_gpr_d[r3], temp, cpu_gpr_d[r1], cpu_gpr_d[r2]); break; default: @@ -6634,49 +6672,49 @@ static void decode_rrr_divide(DisasContext *ctx) case OPC2_32_RRR_DVADJ: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); - GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + GEN_HELPER_RRR(dvadj, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR_DVSTEP: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); - GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + GEN_HELPER_RRR(dvstep, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR_DVSTEP_U: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); - GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + GEN_HELPER_RRR(dvstep_u, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR_IXMAX: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); - GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + GEN_HELPER_RRR(ixmax, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR_IXMAX_U: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); - GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + GEN_HELPER_RRR(ixmax_u, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR_IXMIN: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); - GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + GEN_HELPER_RRR(ixmin, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR_IXMIN_U: CHECK_REG_PAIR(r3); CHECK_REG_PAIR(r4); - GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + GEN_HELPER_RRR(ixmin_u, cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR_PACK: CHECK_REG_PAIR(r3); gen_helper_pack(cpu_gpr_d[r4], cpu_PSW_C, cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1]); + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1]); break; case OPC2_32_RRR_CRCN: if (has_feature(ctx, TRICORE_FEATURE_162)) { @@ -6724,8 +6762,8 @@ static void decode_rrr2_madd(DisasContext *ctx) case OPC2_32_RRR2_MADD_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madd64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + gen_madd64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MADDS_32: gen_helper_madd32_ssov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1], @@ -6734,14 +6772,14 @@ static void decode_rrr2_madd(DisasContext *ctx) case OPC2_32_RRR2_MADDS_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madds_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + gen_madds_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MADD_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + gen_maddu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MADDS_U_32: gen_helper_madd32_suov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1], @@ -6750,8 +6788,8 @@ static void decode_rrr2_madd(DisasContext *ctx) case OPC2_32_RRR2_MADDS_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + gen_maddsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -6777,8 +6815,8 @@ static void decode_rrr2_msub(DisasContext *ctx) case OPC2_32_RRR2_MSUB_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msub64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + gen_msub64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MSUBS_32: gen_helper_msub32_ssov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1], @@ -6787,14 +6825,14 @@ static void decode_rrr2_msub(DisasContext *ctx) case OPC2_32_RRR2_MSUBS_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubs_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + gen_msubs_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MSUB_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + gen_msubu64_d(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; case OPC2_32_RRR2_MSUBS_U_32: gen_helper_msub32_suov(cpu_gpr_d[r4], tcg_env, cpu_gpr_d[r1], @@ -6803,8 +6841,8 @@ static void decode_rrr2_msub(DisasContext *ctx) case OPC2_32_RRR2_MSUBS_U_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r1], - cpu_gpr_d[r3], cpu_gpr_d[r3+1], cpu_gpr_d[r2]); + gen_msubsu_64(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r1], + cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r2]); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -6828,98 +6866,98 @@ static void decode_rrr1_madd(DisasContext *ctx) case OPC2_32_RRR1_MADD_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADD_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADD_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADD_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_madd_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDS_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDS_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDS_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDS_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_madds_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDM_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDM_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDM_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDM_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_maddm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDMS_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDMS_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDMS_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDMS_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_maddms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDR_H_LL: gen_maddr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], @@ -6962,7 +7000,7 @@ static void decode_rrr1_maddq_h(DisasContext *ctx) { uint32_t op2; uint32_t r1, r2, r3, r4, n; - TCGv temp, temp2; + TCGv_i32 temp, temp2; op2 = MASK_OP_RRR1_OP2(ctx->opcode); r1 = MASK_OP_RRR1_S1(ctx->opcode); @@ -6971,8 +7009,8 @@ static void decode_rrr1_maddq_h(DisasContext *ctx) r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); switch (op2) { case OPC2_32_RRR1_MADD_Q_32: @@ -6982,61 +7020,61 @@ static void decode_rrr1_maddq_h(DisasContext *ctx) case OPC2_32_RRR1_MADD_Q_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n); break; case OPC2_32_RRR1_MADD_Q_32_L: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MADD_Q_64_L: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); - gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); + gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MADD_Q_32_U: - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); gen_madd32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MADD_Q_64_U: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); - gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); + gen_madd64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MADD_Q_32_LL: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADD_Q_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); - gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], temp, temp2, n); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); + gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], temp, temp2, n); break; case OPC2_32_RRR1_MADD_Q_32_UU: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_m16add32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADD_Q_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); - gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], temp, temp2, n); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); + gen_m16add64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], temp, temp2, n); break; case OPC2_32_RRR1_MADDS_Q_32: gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], @@ -7045,90 +7083,90 @@ static void decode_rrr1_maddq_h(DisasContext *ctx) case OPC2_32_RRR1_MADDS_Q_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n); break; case OPC2_32_RRR1_MADDS_Q_32_L: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MADDS_Q_64_L: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); - gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); + gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MADDS_Q_32_U: - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); gen_madds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MADDS_Q_64_U: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); - gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); + gen_madds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MADDS_Q_32_LL: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDS_Q_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); - gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], temp, temp2, n); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); + gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], temp, temp2, n); break; case OPC2_32_RRR1_MADDS_Q_32_UU: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_m16adds32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDS_Q_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); - gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], temp, temp2, n); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); + gen_m16adds64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], temp, temp2, n); break; case OPC2_32_RRR1_MADDR_H_64_UL: CHECK_REG_PAIR(r3); - gen_maddr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1], + gen_maddr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2); break; case OPC2_32_RRR1_MADDRS_H_64_UL: CHECK_REG_PAIR(r3); - gen_maddr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1], + gen_maddr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2); break; case OPC2_32_RRR1_MADDR_Q_32_LL: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDR_Q_32_UU: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_maddr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDRS_Q_32_LL: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MADDRS_Q_32_UU: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_maddrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; default: @@ -7152,109 +7190,109 @@ static void decode_rrr1_maddsu_h(DisasContext *ctx) case OPC2_32_RRR1_MADDSU_H_32_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSU_H_32_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSU_H_32_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSU_H_32_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_maddsu_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDSUS_H_32_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSUS_H_32_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSUS_H_32_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSUS_H_32_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsus_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDSUM_H_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSUM_H_64_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSUM_H_64_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSUM_H_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsum_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDSUMS_H_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MADDSUMS_H_64_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MADDSUMS_H_64_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MADDSUMS_H_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_maddsums_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MADDSUR_H_16_LL: @@ -7310,98 +7348,98 @@ static void decode_rrr1_msub(DisasContext *ctx) case OPC2_32_RRR1_MSUB_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUB_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUB_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUB_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_msub_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBS_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBS_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBS_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBS_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_msubs_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBM_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBM_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBM_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBM_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_msubm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBMS_H_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBMS_H_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBMS_H_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBMS_H_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_msubms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBR_H_LL: gen_msubr32_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], @@ -7444,7 +7482,7 @@ static void decode_rrr1_msubq_h(DisasContext *ctx) { uint32_t op2; uint32_t r1, r2, r3, r4, n; - TCGv temp, temp2; + TCGv_i32 temp, temp2; op2 = MASK_OP_RRR1_OP2(ctx->opcode); r1 = MASK_OP_RRR1_S1(ctx->opcode); @@ -7453,8 +7491,8 @@ static void decode_rrr1_msubq_h(DisasContext *ctx) r4 = MASK_OP_RRR1_D(ctx->opcode); n = MASK_OP_RRR1_N(ctx->opcode); - temp = tcg_temp_new(); - temp2 = tcg_temp_new(); + temp = tcg_temp_new_i32(); + temp2 = tcg_temp_new_i32(); switch (op2) { case OPC2_32_RRR1_MSUB_Q_32: @@ -7464,61 +7502,61 @@ static void decode_rrr1_msubq_h(DisasContext *ctx) case OPC2_32_RRR1_MSUB_Q_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n); break; case OPC2_32_RRR1_MSUB_Q_32_L: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MSUB_Q_64_L: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); - gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); + gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MSUB_Q_32_U: - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); gen_msub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MSUB_Q_64_U: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); - gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); + gen_msub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MSUB_Q_32_LL: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUB_Q_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); - gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], temp, temp2, n); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); + gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], temp, temp2, n); break; case OPC2_32_RRR1_MSUB_Q_32_UU: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_m16sub32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUB_Q_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); - gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], temp, temp2, n); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); + gen_m16sub64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], temp, temp2, n); break; case OPC2_32_RRR1_MSUBS_Q_32: gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], @@ -7527,90 +7565,90 @@ static void decode_rrr1_msubq_h(DisasContext *ctx) case OPC2_32_RRR1_MSUBS_Q_64: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n); break; case OPC2_32_RRR1_MSUBS_Q_32_L: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MSUBS_Q_64_L: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r2]); - gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r2]); + gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MSUBS_Q_32_U: - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); gen_msubs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r1], temp, n, 16); break; case OPC2_32_RRR1_MSUBS_Q_64_U: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_sari_tl(temp, cpu_gpr_d[r2], 16); - gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], temp, + tcg_gen_sari_i32(temp, cpu_gpr_d[r2], 16); + gen_msubs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], temp, n); break; case OPC2_32_RRR1_MSUBS_Q_32_LL: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBS_Q_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); - gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], temp, temp2, n); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); + gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], temp, temp2, n); break; case OPC2_32_RRR1_MSUBS_Q_32_UU: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_m16subs32_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBS_Q_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); - gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], temp, temp2, n); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); + gen_m16subs64_q(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], temp, temp2, n); break; case OPC2_32_RRR1_MSUBR_H_64_UL: CHECK_REG_PAIR(r3); - gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1], + gen_msubr64_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2); break; case OPC2_32_RRR1_MSUBRS_H_64_UL: CHECK_REG_PAIR(r3); - gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3+1], + gen_msubr64s_h(cpu_gpr_d[r4], cpu_gpr_d[r3], cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, 2); break; case OPC2_32_RRR1_MSUBR_Q_32_LL: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBR_Q_32_UU: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_msubr_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBRS_Q_32_LL: - tcg_gen_ext16s_tl(temp, cpu_gpr_d[r1]); - tcg_gen_ext16s_tl(temp2, cpu_gpr_d[r2]); + tcg_gen_ext16s_i32(temp, cpu_gpr_d[r1]); + tcg_gen_ext16s_i32(temp2, cpu_gpr_d[r2]); gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; case OPC2_32_RRR1_MSUBRS_Q_32_UU: - tcg_gen_sari_tl(temp, cpu_gpr_d[r1], 16); - tcg_gen_sari_tl(temp2, cpu_gpr_d[r2], 16); + tcg_gen_sari_i32(temp, cpu_gpr_d[r1], 16); + tcg_gen_sari_i32(temp2, cpu_gpr_d[r2], 16); gen_msubrs_q(cpu_gpr_d[r4], cpu_gpr_d[r3], temp, temp2, n); break; default: @@ -7634,109 +7672,109 @@ static void decode_rrr1_msubad_h(DisasContext *ctx) case OPC2_32_RRR1_MSUBAD_H_32_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); + gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBAD_H_32_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); + gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBAD_H_32_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); + gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBAD_H_32_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); + gen_msubad_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBADS_H_32_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBADS_H_32_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBADS_H_32_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBADS_H_32_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubads_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBADM_H_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBADM_H_64_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBADM_H_64_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBADM_H_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubadm_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBADMS_H_64_LL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LL); break; case OPC2_32_RRR1_MSUBADMS_H_64_LU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_LU); break; case OPC2_32_RRR1_MSUBADMS_H_64_UL: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UL); break; case OPC2_32_RRR1_MSUBADMS_H_64_UU: CHECK_REG_PAIR(r4); CHECK_REG_PAIR(r3); - gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4+1], cpu_gpr_d[r3], - cpu_gpr_d[r3+1], cpu_gpr_d[r1], cpu_gpr_d[r2], + gen_msubadms_h(cpu_gpr_d[r4], cpu_gpr_d[r4 + 1], cpu_gpr_d[r3], + cpu_gpr_d[r3 + 1], cpu_gpr_d[r1], cpu_gpr_d[r2], n, MODE_UU); break; case OPC2_32_RRR1_MSUBADR_H_16_LL: @@ -7781,7 +7819,7 @@ static void decode_rrrr_extract_insert(DisasContext *ctx) { uint32_t op2; int r1, r2, r3, r4; - TCGv tmp_width, tmp_pos; + TCGv_i32 tmp_width, tmp_pos; r1 = MASK_OP_RRRR_S1(ctx->opcode); r2 = MASK_OP_RRRR_S2(ctx->opcode); @@ -7789,48 +7827,48 @@ static void decode_rrrr_extract_insert(DisasContext *ctx) r4 = MASK_OP_RRRR_D(ctx->opcode); op2 = MASK_OP_RRRR_OP2(ctx->opcode); - tmp_pos = tcg_temp_new(); - tmp_width = tcg_temp_new(); + tmp_pos = tcg_temp_new_i32(); + tmp_width = tcg_temp_new_i32(); switch (op2) { case OPC2_32_RRRR_DEXTR: - tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f); + tcg_gen_andi_i32(tmp_pos, cpu_gpr_d[r3], 0x1f); if (r1 == r2) { - tcg_gen_rotl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos); + tcg_gen_rotl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos); } else { - TCGv msw = tcg_temp_new(); - TCGv zero = tcg_constant_tl(0); - tcg_gen_shl_tl(tmp_width, cpu_gpr_d[r1], tmp_pos); - tcg_gen_subfi_tl(msw, 32, tmp_pos); - tcg_gen_shr_tl(msw, cpu_gpr_d[r2], msw); + TCGv_i32 msw = tcg_temp_new_i32(); + TCGv_i32 zero = tcg_constant_i32(0); + tcg_gen_shl_i32(tmp_width, cpu_gpr_d[r1], tmp_pos); + tcg_gen_subfi_i32(msw, 32, tmp_pos); + tcg_gen_shr_i32(msw, cpu_gpr_d[r2], msw); /* * if pos == 0, then we do cpu_gpr_d[r2] << 32, which is undefined * behaviour. So check that case here and set the low bits to zero * which effectivly returns cpu_gpr_d[r1] */ - tcg_gen_movcond_tl(TCG_COND_EQ, msw, tmp_pos, zero, zero, msw); - tcg_gen_or_tl(cpu_gpr_d[r4], tmp_width, msw); + tcg_gen_movcond_i32(TCG_COND_EQ, msw, tmp_pos, zero, zero, msw); + tcg_gen_or_i32(cpu_gpr_d[r4], tmp_width, msw); } break; case OPC2_32_RRRR_EXTR: case OPC2_32_RRRR_EXTR_U: CHECK_REG_PAIR(r3); - tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f); - tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f); - tcg_gen_add_tl(tmp_pos, tmp_pos, tmp_width); - tcg_gen_subfi_tl(tmp_pos, 32, tmp_pos); - tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos); - tcg_gen_subfi_tl(tmp_width, 32, tmp_width); + tcg_gen_andi_i32(tmp_width, cpu_gpr_d[r3 + 1], 0x1f); + tcg_gen_andi_i32(tmp_pos, cpu_gpr_d[r3], 0x1f); + tcg_gen_add_i32(tmp_pos, tmp_pos, tmp_width); + tcg_gen_subfi_i32(tmp_pos, 32, tmp_pos); + tcg_gen_shl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], tmp_pos); + tcg_gen_subfi_i32(tmp_width, 32, tmp_width); if (op2 == OPC2_32_RRRR_EXTR) { - tcg_gen_sar_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width); + tcg_gen_sar_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width); } else { - tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width); + tcg_gen_shr_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], tmp_width); } break; case OPC2_32_RRRR_INSERT: CHECK_REG_PAIR(r3); - tcg_gen_andi_tl(tmp_width, cpu_gpr_d[r3+1], 0x1f); - tcg_gen_andi_tl(tmp_pos, cpu_gpr_d[r3], 0x1f); + tcg_gen_andi_i32(tmp_width, cpu_gpr_d[r3 + 1], 0x1f); + tcg_gen_andi_i32(tmp_pos, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], tmp_width, tmp_pos); break; @@ -7846,7 +7884,7 @@ static void decode_rrrw_extract_insert(DisasContext *ctx) int r1, r2, r3, r4; int32_t width; - TCGv temp, temp2; + TCGv_i32 temp, temp2; op2 = MASK_OP_RRRW_OP2(ctx->opcode); r1 = MASK_OP_RRRW_S1(ctx->opcode); @@ -7855,39 +7893,39 @@ static void decode_rrrw_extract_insert(DisasContext *ctx) r4 = MASK_OP_RRRW_D(ctx->opcode); width = MASK_OP_RRRW_WIDTH(ctx->opcode); - temp = tcg_temp_new(); + temp = tcg_temp_new_i32(); switch (op2) { case OPC2_32_RRRW_EXTR: - tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f); - tcg_gen_addi_tl(temp, temp, width); - tcg_gen_subfi_tl(temp, 32, temp); - tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp); - tcg_gen_sari_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width); + tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f); + tcg_gen_addi_i32(temp, temp, width); + tcg_gen_subfi_i32(temp, 32, temp); + tcg_gen_shl_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], temp); + tcg_gen_sari_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], 32 - width); break; case OPC2_32_RRRW_EXTR_U: if (width == 0) { - tcg_gen_movi_tl(cpu_gpr_d[r4], 0); + tcg_gen_movi_i32(cpu_gpr_d[r4], 0); } else { - tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f); - tcg_gen_shr_tl(cpu_gpr_d[r4], cpu_gpr_d[r1], temp); - tcg_gen_andi_tl(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32-width)); + tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f); + tcg_gen_shr_i32(cpu_gpr_d[r4], cpu_gpr_d[r1], temp); + tcg_gen_andi_i32(cpu_gpr_d[r4], cpu_gpr_d[r4], ~0u >> (32 - width)); } break; case OPC2_32_RRRW_IMASK: - temp2 = tcg_temp_new(); + temp2 = tcg_temp_new_i32(); CHECK_REG_PAIR(r4); - tcg_gen_andi_tl(temp, cpu_gpr_d[r3], 0x1f); - tcg_gen_movi_tl(temp2, (1 << width) - 1); - tcg_gen_shl_tl(temp2, temp2, temp); - tcg_gen_shl_tl(cpu_gpr_d[r4], cpu_gpr_d[r2], temp); - tcg_gen_mov_tl(cpu_gpr_d[r4+1], temp2); + tcg_gen_andi_i32(temp, cpu_gpr_d[r3], 0x1f); + tcg_gen_movi_i32(temp2, (1 << width) - 1); + tcg_gen_shl_i32(temp2, temp2, temp); + tcg_gen_shl_i32(cpu_gpr_d[r4], cpu_gpr_d[r2], temp); + tcg_gen_mov_i32(cpu_gpr_d[r4 + 1], temp2); break; case OPC2_32_RRRW_INSERT: - temp2 = tcg_temp_new(); + temp2 = tcg_temp_new_i32(); - tcg_gen_movi_tl(temp, width); - tcg_gen_andi_tl(temp2, cpu_gpr_d[r3], 0x1f); + tcg_gen_movi_i32(temp, width); + tcg_gen_andi_i32(temp2, cpu_gpr_d[r3], 0x1f); gen_insert(cpu_gpr_d[r4], cpu_gpr_d[r1], cpu_gpr_d[r2], temp, temp2); break; default: @@ -7901,7 +7939,7 @@ static void decode_sys_interrupts(DisasContext *ctx) uint32_t op2; uint32_t r1; TCGLabel *l1; - TCGv tmp; + TCGv_i32 tmp; op2 = MASK_OP_SYS_OP2(ctx->opcode); r1 = MASK_OP_SYS_S1D(ctx->opcode); @@ -7912,7 +7950,7 @@ static void decode_sys_interrupts(DisasContext *ctx) break; case OPC2_32_SYS_DISABLE: if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) { - tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask); + tcg_gen_andi_i32(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask); } else { generate_trap(ctx, TRAPC_PROT, TIN1_PRIV); } @@ -7920,9 +7958,9 @@ static void decode_sys_interrupts(DisasContext *ctx) case OPC2_32_SYS_DISABLE_D: if (has_feature(ctx, TRICORE_FEATURE_16)) { if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) { - tcg_gen_extract_tl(cpu_gpr_d[r1], cpu_ICR, + tcg_gen_extract_i32(cpu_gpr_d[r1], cpu_ICR, ctx->icr_ie_offset, 1); - tcg_gen_andi_tl(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask); + tcg_gen_andi_i32(cpu_ICR, cpu_ICR, ~ctx->icr_ie_mask); } else { generate_trap(ctx, TRAPC_PROT, TIN1_PRIV); } @@ -7933,7 +7971,7 @@ static void decode_sys_interrupts(DisasContext *ctx) break; case OPC2_32_SYS_ENABLE: if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) { - tcg_gen_ori_tl(cpu_ICR, cpu_ICR, ctx->icr_ie_mask); + tcg_gen_ori_i32(cpu_ICR, cpu_ICR, ctx->icr_ie_mask); ctx->base.is_jmp = DISAS_EXIT_UPDATE; } else { generate_trap(ctx, TRAPC_PROT, TIN1_PRIV); @@ -7955,12 +7993,12 @@ static void decode_sys_interrupts(DisasContext *ctx) break; case OPC2_32_SYS_RFM: if (ctx->priv == TRICORE_PRIV_SM) { - tmp = tcg_temp_new(); + tmp = tcg_temp_new_i32(); l1 = gen_new_label(); - tcg_gen_ld32u_tl(tmp, tcg_env, offsetof(CPUTriCoreState, DBGSR)); - tcg_gen_andi_tl(tmp, tmp, MASK_DBGSR_DE); - tcg_gen_brcondi_tl(TCG_COND_NE, tmp, 1, l1); + tcg_gen_ld_i32(tmp, tcg_env, offsetof(CPUTriCoreState, DBGSR)); + tcg_gen_andi_i32(tmp, tmp, MASK_DBGSR_DE); + tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 1, l1); gen_helper_rfm(tcg_env); gen_set_label(l1); ctx->base.is_jmp = DISAS_EXIT; @@ -7977,7 +8015,7 @@ static void decode_sys_interrupts(DisasContext *ctx) case OPC2_32_SYS_RESTORE: if (has_feature(ctx, TRICORE_FEATURE_16)) { if (ctx->priv == TRICORE_PRIV_SM || ctx->priv == TRICORE_PRIV_UM1) { - tcg_gen_deposit_tl(cpu_ICR, cpu_ICR, cpu_gpr_d[r1], + tcg_gen_deposit_i32(cpu_ICR, cpu_ICR, cpu_gpr_d[r1], ctx->icr_ie_offset, 1); ctx->base.is_jmp = DISAS_EXIT_UPDATE; } else { @@ -7989,13 +8027,13 @@ static void decode_sys_interrupts(DisasContext *ctx) break; case OPC2_32_SYS_TRAPSV: l1 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_SV, 0, l1); + tcg_gen_brcondi_i32(TCG_COND_GE, cpu_PSW_SV, 0, l1); generate_trap(ctx, TRAPC_ASSERT, TIN5_SOVF); gen_set_label(l1); break; case OPC2_32_SYS_TRAPV: l1 = gen_new_label(); - tcg_gen_brcondi_tl(TCG_COND_GE, cpu_PSW_V, 0, l1); + tcg_gen_brcondi_i32(TCG_COND_GE, cpu_PSW_V, 0, l1); generate_trap(ctx, TRAPC_ASSERT, TIN5_OVF); gen_set_label(l1); break; @@ -8011,7 +8049,7 @@ static void decode_32Bit_opc(DisasContext *ctx) int32_t address, const16; int8_t b, const4; int32_t bpos; - TCGv temp, temp2, temp3; + TCGv_i32 temp, temp2, temp3; op1 = MASK_OP_MAJOR(ctx->opcode); @@ -8044,18 +8082,18 @@ static void decode_32Bit_opc(DisasContext *ctx) address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); temp = tcg_constant_i32(EA_ABS_FORMAT(address)); - temp2 = tcg_temp_new(); + temp2 = tcg_temp_new_i32(); - tcg_gen_shri_tl(temp2, cpu_gpr_d[r1], 16); - tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_LEUW); + tcg_gen_shri_i32(temp2, cpu_gpr_d[r1], 16); + tcg_gen_qemu_st_i32(temp2, temp, ctx->mem_idx, MO_LEUW); break; case OPC1_32_ABS_LD_Q: address = MASK_OP_ABS_OFF18(ctx->opcode); r1 = MASK_OP_ABS_S1D(ctx->opcode); temp = tcg_constant_i32(EA_ABS_FORMAT(address)); - tcg_gen_qemu_ld_tl(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); - tcg_gen_shli_tl(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); + tcg_gen_qemu_ld_i32(cpu_gpr_d[r1], temp, ctx->mem_idx, MO_LEUW); + tcg_gen_shli_i32(cpu_gpr_d[r1], cpu_gpr_d[r1], 16); break; case OPCM_32_ABS_LEA_LHA: address = MASK_OP_ABS_OFF18(ctx->opcode); @@ -8064,13 +8102,13 @@ static void decode_32Bit_opc(DisasContext *ctx) if (has_feature(ctx, TRICORE_FEATURE_162)) { op2 = MASK_OP_ABS_OP2(ctx->opcode); if (op2 == OPC2_32_ABS_LHA) { - tcg_gen_movi_tl(cpu_gpr_a[r1], address << 14); + tcg_gen_movi_i32(cpu_gpr_a[r1], address << 14); break; } /* otherwise translate regular LEA */ } - tcg_gen_movi_tl(cpu_gpr_a[r1], EA_ABS_FORMAT(address)); + tcg_gen_movi_i32(cpu_gpr_a[r1], EA_ABS_FORMAT(address)); break; /* ABSB-format */ case OPC1_32_ABSB_ST_T: @@ -8079,12 +8117,12 @@ static void decode_32Bit_opc(DisasContext *ctx) bpos = MASK_OP_ABSB_BPOS(ctx->opcode); temp = tcg_constant_i32(EA_ABS_FORMAT(address)); - temp2 = tcg_temp_new(); + temp2 = tcg_temp_new_i32(); - tcg_gen_qemu_ld_tl(temp2, temp, ctx->mem_idx, MO_UB); - tcg_gen_andi_tl(temp2, temp2, ~(0x1u << bpos)); - tcg_gen_ori_tl(temp2, temp2, (b << bpos)); - tcg_gen_qemu_st_tl(temp2, temp, ctx->mem_idx, MO_UB); + tcg_gen_qemu_ld_i32(temp2, temp, ctx->mem_idx, MO_UB); + tcg_gen_andi_i32(temp2, temp2, ~(0x1u << bpos)); + tcg_gen_ori_i32(temp2, temp2, (b << bpos)); + tcg_gen_qemu_st_i32(temp2, temp, ctx->mem_idx, MO_UB); break; /* B-format */ case OPC1_32_B_CALL: @@ -8206,13 +8244,13 @@ static void decode_32Bit_opc(DisasContext *ctx) r3 = MASK_OP_RCRR_D(ctx->opcode); const16 = MASK_OP_RCRR_CONST4(ctx->opcode); temp = tcg_constant_i32(const16); - temp2 = tcg_temp_new(); /* width*/ - temp3 = tcg_temp_new(); /* pos */ + temp2 = tcg_temp_new_i32(); /* width*/ + temp3 = tcg_temp_new_i32(); /* pos */ CHECK_REG_PAIR(r2); - tcg_gen_andi_tl(temp2, cpu_gpr_d[r2 + 1], 0x1f); - tcg_gen_andi_tl(temp3, cpu_gpr_d[r2], 0x1f); + tcg_gen_andi_i32(temp2, cpu_gpr_d[r2 + 1], 0x1f); + tcg_gen_andi_i32(temp3, cpu_gpr_d[r2], 0x1f); gen_insert(cpu_gpr_d[r3], cpu_gpr_d[r1], temp, temp2, temp3); break; @@ -8280,7 +8318,7 @@ static void decode_32Bit_opc(DisasContext *ctx) r3 = MASK_OP_RRPW_D(ctx->opcode); const16 = MASK_OP_RRPW_POS(ctx->opcode); - tcg_gen_extract2_tl(cpu_gpr_d[r3], cpu_gpr_d[r2], cpu_gpr_d[r1], + tcg_gen_extract2_i32(cpu_gpr_d[r3], cpu_gpr_d[r2], cpu_gpr_d[r1], 32 - const16); break; /* RRR Format */ @@ -8329,10 +8367,10 @@ static void decode_32Bit_opc(DisasContext *ctx) decode_sys_interrupts(ctx); break; case OPC1_32_SYS_RSTV: - tcg_gen_movi_tl(cpu_PSW_V, 0); - tcg_gen_mov_tl(cpu_PSW_SV, cpu_PSW_V); - tcg_gen_mov_tl(cpu_PSW_AV, cpu_PSW_V); - tcg_gen_mov_tl(cpu_PSW_SAV, cpu_PSW_V); + tcg_gen_movi_i32(cpu_PSW_V, 0); + tcg_gen_mov_i32(cpu_PSW_SV, cpu_PSW_V); + tcg_gen_mov_i32(cpu_PSW_AV, cpu_PSW_V); + tcg_gen_mov_i32(cpu_PSW_SAV, cpu_PSW_V); break; default: generate_trap(ctx, TRAPC_INSN_ERR, TIN2_IOPC); @@ -8375,7 +8413,7 @@ static void tricore_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) tcg_gen_insn_start(ctx->base.pc_next); } -static bool insn_crosses_page(CPUTriCoreState *env, DisasContext *ctx) +static bool insn_crosses_page(DisasContext *ctx, CPUTriCoreState *env) { /* * Return true if the insn at ctx->base.pc_next might cross a page boundary. @@ -8413,12 +8451,12 @@ static void tricore_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) ctx->base.pc_next = ctx->pc_succ_insn; if (ctx->base.is_jmp == DISAS_NEXT) { - target_ulong page_start; + vaddr page_start; page_start = ctx->base.pc_first & TARGET_PAGE_MASK; if (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE || (ctx->base.pc_next - page_start >= TARGET_PAGE_SIZE - 3 - && insn_crosses_page(env, ctx))) { + && insn_crosses_page(ctx, env))) { ctx->base.is_jmp = DISAS_TOO_MANY; } } @@ -8479,14 +8517,14 @@ void cpu_state_reset(CPUTriCoreState *env) static void tricore_tcg_init_csfr(void) { - cpu_PCXI = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, PCXI), "PCXI"); - cpu_PSW = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, PSW), "PSW"); - cpu_PC = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, PC), "PC"); - cpu_ICR = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, ICR), "ICR"); + cpu_PCXI = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, PCXI), "PCXI"); + cpu_PSW = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, PSW), "PSW"); + cpu_PC = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, PC), "PC"); + cpu_ICR = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, ICR), "ICR"); } void tricore_tcg_init(void) @@ -8495,30 +8533,32 @@ void tricore_tcg_init(void) /* reg init */ for (i = 0 ; i < 16 ; i++) { - cpu_gpr_a[i] = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, gpr_a[i]), - regnames_a[i]); + cpu_gpr_a[i] = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, + gpr_a[i]), + regnames_a[i]); } for (i = 0 ; i < 16 ; i++) { - cpu_gpr_d[i] = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, gpr_d[i]), - regnames_d[i]); + cpu_gpr_d[i] = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, + gpr_d[i]), + regnames_d[i]); } tricore_tcg_init_csfr(); /* init PSW flag cache */ - cpu_PSW_C = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, PSW_USB_C), - "PSW_C"); - cpu_PSW_V = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, PSW_USB_V), - "PSW_V"); - cpu_PSW_SV = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, PSW_USB_SV), - "PSW_SV"); - cpu_PSW_AV = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, PSW_USB_AV), - "PSW_AV"); - cpu_PSW_SAV = tcg_global_mem_new(tcg_env, - offsetof(CPUTriCoreState, PSW_USB_SAV), - "PSW_SAV"); + cpu_PSW_C = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, PSW_USB_C), + "PSW_C"); + cpu_PSW_V = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, PSW_USB_V), + "PSW_V"); + cpu_PSW_SV = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, PSW_USB_SV), + "PSW_SV"); + cpu_PSW_AV = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, PSW_USB_AV), + "PSW_AV"); + cpu_PSW_SAV = tcg_global_mem_new_i32(tcg_env, + offsetof(CPUTriCoreState, PSW_USB_SAV), + "PSW_SAV"); } diff --git a/target/xtensa/cpu.c b/target/xtensa/cpu.c index ea9b6df3aa241..1eeed44e33663 100644 --- a/target/xtensa/cpu.c +++ b/target/xtensa/cpu.c @@ -59,13 +59,13 @@ static TCGTBCPUState xtensa_get_tb_cpu_state(CPUState *cs) { CPUXtensaState *env = cpu_env(cs); uint32_t flags = 0; - target_ulong cs_base = 0; + uint64_t cs_base = 0; flags |= xtensa_get_ring(env); if (env->sregs[PS] & PS_EXCM) { flags |= XTENSA_TBFLAG_EXCM; } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_LOOP)) { - target_ulong lend_dist = + uint64_t lend_dist = env->sregs[LEND] - (env->pc & -(1u << TARGET_PAGE_BITS)); /* @@ -83,7 +83,7 @@ static TCGTBCPUState xtensa_get_tb_cpu_state(CPUState *cs) * for the TB that contains this instruction. */ if (lend_dist < (1u << TARGET_PAGE_BITS) + env->config->max_insn_size) { - target_ulong lbeg_off = env->sregs[LEND] - env->sregs[LBEG]; + uint64_t lbeg_off = env->sregs[LEND] - env->sregs[LBEG]; cs_base = lend_dist; if (lbeg_off < 256) { diff --git a/target/xtensa/translate.c b/target/xtensa/translate.c index 34ae2f4e16241..bb8d2ed86cf5e 100644 --- a/target/xtensa/translate.c +++ b/target/xtensa/translate.c @@ -1166,7 +1166,7 @@ static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUXtensaState *env = cpu_env(cpu); - target_ulong page_start; + vaddr page_start; /* These two conditions only apply to the first insn in the TB, but this is the first TranslateOps hook that allows exiting. */ diff --git a/target/xtensa/xtensa-semi.c b/target/xtensa/xtensa-semi.c index 636f421da2b45..431c263dc57db 100644 --- a/target/xtensa/xtensa-semi.c +++ b/target/xtensa/xtensa-semi.c @@ -32,6 +32,7 @@ #include "exec/target_page.h" #include "semihosting/semihost.h" #include "semihosting/uaccess.h" +#include "system/memory.h" #include "qapi/error.h" #include "qemu/log.h" @@ -192,7 +193,9 @@ void xtensa_sim_open_console(Chardev *chr) void HELPER(simcall)(CPUXtensaState *env) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; CPUState *cs = env_cpu(env); + AddressSpace *as = cs->as; uint32_t *regs = env->regs; switch (regs[2]) { @@ -215,7 +218,7 @@ void HELPER(simcall)(CPUXtensaState *env) TARGET_PAGE_SIZE - (vaddr & (TARGET_PAGE_SIZE - 1)); uint32_t io_sz = page_left < len ? page_left : len; hwaddr sz = io_sz; - void *buf = cpu_physical_memory_map(paddr, &sz, !is_write); + void *buf = address_space_map(as, paddr, &sz, !is_write, attrs); uint32_t io_done; bool error = false; @@ -261,7 +264,7 @@ void HELPER(simcall)(CPUXtensaState *env) error = true; io_done = 0; } - cpu_physical_memory_unmap(buf, sz, !is_write, io_done); + address_space_unmap(as, buf, sz, !is_write, io_done); } else { error = true; regs[3] = TARGET_EINVAL; @@ -408,11 +411,11 @@ void HELPER(simcall)(CPUXtensaState *env) while (sz) { hwaddr len = sz; - void *buf = cpu_physical_memory_map(base, &len, 1); + void *buf = address_space_map(as, base, &len, true, attrs); if (buf && len) { memset(buf, regs[4], len); - cpu_physical_memory_unmap(buf, len, 1, len); + address_space_unmap(as, buf, len, true, len); } else { len = 1; } diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 3b088b7bd9727..caf79c742d910 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1668,7 +1668,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->oi = oi; ldst->addr_reg = addr_reg; - /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {tmp0,tmp1}. */ + /* Load CPUTLBDescFast.{mask,table} into {tmp0,tmp1}. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); tcg_out_insn(s, 3314, LDP, TCG_REG_TMP0, TCG_REG_TMP1, TCG_AREG0, diff --git a/tcg/arm/tcg-target.c.inc b/tcg/arm/tcg-target.c.inc index 836894b16ade7..87ca66bb0287f 100644 --- a/tcg/arm/tcg-target.c.inc +++ b/tcg/arm/tcg-target.c.inc @@ -975,7 +975,8 @@ static void tgen_deposit(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2, unsigned ofs, unsigned len) { /* bfi/bfc */ - tcg_out32(s, 0x07c00010 | (COND_AL << 28) | (a0 << 12) | a1 + tcg_debug_assert(a0 == a1); + tcg_out32(s, 0x07c00010 | (COND_AL << 28) | (a0 << 12) | a2 | (ofs << 7) | ((ofs + len - 1) << 16)); } @@ -1420,7 +1421,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, ldst->oi = oi; ldst->addr_reg = addr; - /* Load cpu->neg.tlb.f[mmu_idx].{mask,table} into {r0,r1}. */ + /* Load CPUTLBDescFast.{mask,table} into {r0,r1}. */ QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 4); tcg_out_ldrd_8(s, COND_AL, TCG_REG_R0, TCG_AREG0, fast_off); diff --git a/tcg/i386/tcg-target-opc.h.inc b/tcg/i386/tcg-target-opc.h.inc index 8cc0dbaeafee2..8a5cb34dbe79d 100644 --- a/tcg/i386/tcg-target-opc.h.inc +++ b/tcg/i386/tcg-target-opc.h.inc @@ -35,3 +35,4 @@ DEF(x86_punpckh_vec, 1, 2, 0, TCG_OPF_VECTOR) DEF(x86_vpshldi_vec, 1, 2, 1, TCG_OPF_VECTOR) DEF(x86_vpshldv_vec, 1, 3, 0, TCG_OPF_VECTOR) DEF(x86_vpshrdv_vec, 1, 3, 0, TCG_OPF_VECTOR) +DEF(x86_vgf2p8affineqb_vec, 1, 2, 1, TCG_OPF_VECTOR) diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 088c6c9264b01..ee272668619a7 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -451,6 +451,7 @@ static bool tcg_target_const_match(int64_t val, int ct, #define OPC_VPBROADCASTW (0x79 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTD (0x58 | P_EXT38 | P_DATA16) #define OPC_VPBROADCASTQ (0x59 | P_EXT38 | P_DATA16) +#define OPC_VGF2P8AFFINEQB (0xce | P_EXT3A | P_DATA16 | P_VEXW) #define OPC_VPMOVM2B (0x28 | P_EXT38 | P_SIMDF3 | P_EVEX) #define OPC_VPMOVM2W (0x28 | P_EXT38 | P_SIMDF3 | P_VEXW | P_EVEX) #define OPC_VPMOVM2D (0x38 | P_EXT38 | P_SIMDF3 | P_EVEX) @@ -4084,6 +4085,10 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, insn = vpshldi_insn[vece]; sub = args[3]; goto gen_simd_imm8; + case INDEX_op_x86_vgf2p8affineqb_vec: + insn = OPC_VGF2P8AFFINEQB; + sub = args[3]; + goto gen_simd_imm8; case INDEX_op_not_vec: insn = OPC_VPTERNLOGQ; @@ -4188,6 +4193,7 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) case INDEX_op_x86_punpckl_vec: case INDEX_op_x86_punpckh_vec: case INDEX_op_x86_vpshldi_vec: + case INDEX_op_x86_vgf2p8affineqb_vec: #if TCG_TARGET_REG_BITS == 32 case INDEX_op_dup2_vec: #endif @@ -4336,12 +4342,46 @@ int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece) } } +static void gen_vgf2p8affineqb0(TCGType type, TCGv_vec v0, + TCGv_vec v1, uint64_t matrix) +{ + vec_gen_4(INDEX_op_x86_vgf2p8affineqb_vec, type, MO_8, + tcgv_vec_arg(v0), tcgv_vec_arg(v1), + tcgv_vec_arg(tcg_constant_vec(type, MO_64, matrix)), 0); +} + static void expand_vec_shi(TCGType type, unsigned vece, bool right, TCGv_vec v0, TCGv_vec v1, TCGArg imm) { + static const uint64_t gf2_shi[2][8] = { + /* left shift */ + { 0, + 0x0001020408102040ull, + 0x0000010204081020ull, + 0x0000000102040810ull, + 0x0000000001020408ull, + 0x0000000000010204ull, + 0x0000000000000102ull, + 0x0000000000000001ull }, + /* right shift */ + { 0, + 0x0204081020408000ull, + 0x0408102040800000ull, + 0x0810204080000000ull, + 0x1020408000000000ull, + 0x2040800000000000ull, + 0x4080000000000000ull, + 0x8000000000000000ull } + }; uint8_t mask; tcg_debug_assert(vece == MO_8); + + if (cpuinfo & CPUINFO_GFNI) { + gen_vgf2p8affineqb0(type, v0, v1, gf2_shi[right][imm]); + return; + } + if (right) { mask = 0xff >> imm; tcg_gen_shri_vec(MO_16, v0, v1, imm); @@ -4355,10 +4395,31 @@ static void expand_vec_shi(TCGType type, unsigned vece, bool right, static void expand_vec_sari(TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGArg imm) { + static const uint64_t gf2_sar[8] = { + 0, + 0x0204081020408080ull, + 0x0408102040808080ull, + 0x0810204080808080ull, + 0x1020408080808080ull, + 0x2040808080808080ull, + 0x4080808080808080ull, + 0x8080808080808080ull, + }; TCGv_vec t1, t2; + if (imm >= (8 << vece) - 1) { + tcg_gen_cmp_vec(TCG_COND_LT, vece, v0, v1, + tcg_constant_vec(type, MO_64, 0)); + return; + } + switch (vece) { case MO_8: + if (cpuinfo & CPUINFO_GFNI) { + gen_vgf2p8affineqb0(type, v0, v1, gf2_sar[imm]); + break; + } + /* Unpack to 16-bit, shift, and repack. */ t1 = tcg_temp_new_vec(type); t2 = tcg_temp_new_vec(type); @@ -4393,8 +4454,8 @@ static void expand_vec_sari(TCGType type, unsigned vece, /* Otherwise we will need to use a compare vs 0 to produce * the sign-extend, shift and merge. */ - tcg_gen_cmp_vec(TCG_COND_GT, MO_64, t1, - tcg_constant_vec(type, MO_64, 0), v1); + tcg_gen_cmp_vec(TCG_COND_LT, MO_64, t1, v1, + tcg_constant_vec(type, MO_64, 0)); tcg_gen_shri_vec(MO_64, v0, v1, imm); tcg_gen_shli_vec(MO_64, t1, t1, 64 - imm); tcg_gen_or_vec(MO_64, v0, v0, t1); @@ -4410,12 +4471,30 @@ static void expand_vec_sari(TCGType type, unsigned vece, static void expand_vec_rotli(TCGType type, unsigned vece, TCGv_vec v0, TCGv_vec v1, TCGArg imm) { + static const uint64_t gf2_rol[8] = { + 0, + 0x8001020408102040ull, + 0x4080010204081020ull, + 0x2040800102040810ull, + 0x1020408001020408ull, + 0x0810204080010204ull, + 0x0408102040800102ull, + 0x0204081020408001ull, + }; TCGv_vec t; - if (vece != MO_8 && have_avx512vbmi2) { - vec_gen_4(INDEX_op_x86_vpshldi_vec, type, vece, - tcgv_vec_arg(v0), tcgv_vec_arg(v1), tcgv_vec_arg(v1), imm); - return; + if (vece == MO_8) { + if (cpuinfo & CPUINFO_GFNI) { + gen_vgf2p8affineqb0(type, v0, v1, gf2_rol[imm]); + return; + } + } else { + if (have_avx512vbmi2) { + vec_gen_4(INDEX_op_x86_vpshldi_vec, type, vece, + tcgv_vec_arg(v0), tcgv_vec_arg(v1), + tcgv_vec_arg(v1), imm); + return; + } } t = tcg_temp_new_vec(type); diff --git a/tcg/mips/tcg-target-has.h b/tcg/mips/tcg-target-has.h index b9eb338528869..88f0145efba88 100644 --- a/tcg/mips/tcg-target-has.h +++ b/tcg/mips/tcg-target-has.h @@ -39,11 +39,9 @@ extern bool use_mips32r2_instructions; #endif /* optional instructions */ -#if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_extr_i64_i32 1 #define TCG_TARGET_HAS_ext32s_i64 1 #define TCG_TARGET_HAS_ext32u_i64 1 -#endif /* optional instructions detected at runtime */ #define TCG_TARGET_HAS_qemu_ldst_i128 0 diff --git a/tcg/mips/tcg-target-reg-bits.h b/tcg/mips/tcg-target-reg-bits.h index 56fe0a725e9dd..ee346a3f25608 100644 --- a/tcg/mips/tcg-target-reg-bits.h +++ b/tcg/mips/tcg-target-reg-bits.h @@ -7,12 +7,10 @@ #ifndef TCG_TARGET_REG_BITS_H #define TCG_TARGET_REG_BITS_H -#if _MIPS_SIM == _ABIO32 -# define TCG_TARGET_REG_BITS 32 -#elif _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64 -# define TCG_TARGET_REG_BITS 64 -#else +#if !defined(_MIPS_SIM) || _MIPS_SIM != _ABI64 # error "Unknown ABI" #endif +#define TCG_TARGET_REG_BITS 64 + #endif diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index 400eafbab4b6f..60c703a093990 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -26,27 +26,12 @@ /* used for function call generation */ #define TCG_TARGET_STACK_ALIGN 16 -#if _MIPS_SIM == _ABIO32 -# define TCG_TARGET_CALL_STACK_OFFSET 16 -# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN -# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF -#else -# define TCG_TARGET_CALL_STACK_OFFSET 0 -# define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL -# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL -#endif +#define TCG_TARGET_CALL_STACK_OFFSET 0 #define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL +#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL #define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_EVEN -#if TCG_TARGET_REG_BITS == 32 -# define LO_OFF (HOST_BIG_ENDIAN * 4) -# define HI_OFF (4 - LO_OFF) -#else -/* Assert at compile-time that these values are never used for 64-bit. */ -# define LO_OFF ({ qemu_build_not_reached(); 0; }) -# define HI_OFF ({ qemu_build_not_reached(); 0; }) -#endif - #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "zero", @@ -90,11 +75,7 @@ static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { #define TCG_TMP3 TCG_REG_T7 #define TCG_GUEST_BASE_REG TCG_REG_S7 -#if TCG_TARGET_REG_BITS == 64 #define TCG_REG_TB TCG_REG_S6 -#else -#define TCG_REG_TB ({ qemu_build_not_reached(); TCG_REG_ZERO; }) -#endif /* check if we really need so many registers :P */ static const int tcg_target_reg_alloc_order[] = { @@ -135,12 +116,10 @@ static const TCGReg tcg_target_call_iarg_regs[] = { TCG_REG_A1, TCG_REG_A2, TCG_REG_A3, -#if _MIPS_SIM == _ABIN32 || _MIPS_SIM == _ABI64 TCG_REG_T0, TCG_REG_T1, TCG_REG_T2, TCG_REG_T3, -#endif }; static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot) @@ -361,10 +340,6 @@ typedef enum { OPC_SYNC_ACQUIRE = OPC_SYNC | 0x11 << 6, OPC_SYNC_RELEASE = OPC_SYNC | 0x12 << 6, OPC_SYNC_RMB = OPC_SYNC | 0x13 << 6, - - /* Aliases for convenience. */ - ALIAS_PADD = sizeof(void *) == 4 ? OPC_ADDU : OPC_DADDU, - ALIAS_PADDI = sizeof(void *) == 4 ? OPC_ADDIU : OPC_DADDIU, } MIPSInsn; /* @@ -567,7 +542,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long tmp; int sh, lo; - if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + if (type == TCG_TYPE_I32) { arg = (int32_t)arg; } @@ -575,7 +550,6 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, if (tcg_out_movi_two(s, ret, arg)) { return; } - assert(TCG_TARGET_REG_BITS == 64); /* Load addresses within 2GB of TB with 1 or 3 insns. */ tmp = tcg_tbrel_diff(s, (void *)arg); @@ -638,8 +612,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long arg) { - TCGReg tbreg = TCG_TARGET_REG_BITS == 64 ? TCG_REG_TB : 0; - tcg_out_movi_int(s, type, ret, arg, tbreg); + tcg_out_movi_int(s, type, ret, arg, TCG_REG_TB); } static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs) @@ -666,7 +639,6 @@ static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs) static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_opc_sa(s, OPC_SLL, rd, rs, 0); } @@ -709,7 +681,6 @@ static void tcg_out_bswap_subr(TCGContext *s, const tcg_insn_unit *sub) static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); if (use_mips32r2_instructions) { tcg_out_opc_bf(s, OPC_DEXT, ret, arg, 31, 0); } else { @@ -725,7 +696,7 @@ static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data, if (ofs != lo) { tcg_out_movi(s, TCG_TYPE_PTR, TCG_TMP0, ofs - lo); if (addr != TCG_REG_ZERO) { - tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP0, TCG_TMP0, addr); + tcg_out_opc_reg(s, OPC_DADDU, TCG_TMP0, TCG_TMP0, addr); } addr = TCG_TMP0; } @@ -735,20 +706,14 @@ static void tcg_out_ldst(TCGContext *s, MIPSInsn opc, TCGReg data, static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - MIPSInsn opc = OPC_LD; - if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { - opc = OPC_LW; - } + MIPSInsn opc = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD; tcg_out_ldst(s, opc, arg, arg1, arg2); } static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { - MIPSInsn opc = OPC_SD; - if (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32) { - opc = OPC_SW; - } + MIPSInsn opc = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD; tcg_out_ldst(s, opc, arg, arg1, arg2); } @@ -926,72 +891,6 @@ void tcg_out_br(TCGContext *s, TCGLabel *l) tgen_brcond(s, TCG_TYPE_I32, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, l); } -static int tcg_out_setcond2_int(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) -{ - int flags = 0; - - switch (cond) { - case TCG_COND_EQ: - flags |= SETCOND_INV; - /* fall through */ - case TCG_COND_NE: - flags |= SETCOND_NEZ; - tcg_out_opc_reg(s, OPC_XOR, TCG_TMP0, al, bl); - tcg_out_opc_reg(s, OPC_XOR, TCG_TMP1, ah, bh); - tcg_out_opc_reg(s, OPC_OR, ret, TCG_TMP0, TCG_TMP1); - break; - - default: - tgen_setcond(s, TCG_TYPE_I32, TCG_COND_EQ, TCG_TMP0, ah, bh); - tgen_setcond(s, TCG_TYPE_I32, tcg_unsigned_cond(cond), - TCG_TMP1, al, bl); - tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP0); - tgen_setcond(s, TCG_TYPE_I32, tcg_high_cond(cond), TCG_TMP0, ah, bh); - tcg_out_opc_reg(s, OPC_OR, ret, TCG_TMP0, TCG_TMP1); - break; - } - return ret | flags; -} - -static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg al, TCGReg ah, - TCGArg bl, bool const_bl, - TCGArg bh, bool const_bh) -{ - int tmpflags = tcg_out_setcond2_int(s, cond, ret, al, ah, bl, bh); - tcg_out_setcond_end(s, ret, tmpflags); -} - -#if TCG_TARGET_REG_BITS != 32 -__attribute__((unused)) -#endif -static const TCGOutOpSetcond2 outop_setcond2 = { - .base.static_constraint = C_O1_I4(r, r, r, rz, rz), - .out = tgen_setcond2, -}; - -static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, - TCGArg bl, bool const_bl, - TCGArg bh, bool const_bh, TCGLabel *l) -{ - int tmpflags = tcg_out_setcond2_int(s, cond, TCG_TMP0, al, ah, bl, bh); - TCGReg tmp = tmpflags & ~SETCOND_FLAGS; - MIPSInsn b_opc = tmpflags & SETCOND_INV ? OPC_BEQ : OPC_BNE; - - tcg_out_reloc(s, s->code_ptr, R_MIPS_PC16, l, 0); - tcg_out_opc_br(s, b_opc, tmp, TCG_REG_ZERO); - tcg_out_nop(s); -} - -#if TCG_TARGET_REG_BITS != 32 -__attribute__((unused)) -#endif -static const TCGOutOpBrcond2 outop_brcond2 = { - .base.static_constraint = C_O0_I4(r, r, rz, rz), - .out = tgen_brcond2, -}; - static void tgen_movcond(TCGContext *s, TCGType type, TCGCond cond, TCGReg ret, TCGReg c1, TCGArg c2, bool const_c2, TCGArg v1, bool const_v1, TCGArg v2, bool const_v2) @@ -1053,17 +952,11 @@ static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail) * Note that __mips_abicalls requires the called function's address * to be loaded into $25 (t9), even if a direct branch is in range. * - * For n64, always drop the pointer into the constant pool. - * We can re-use helper addresses often and do not want any - * of the longer sequences tcg_out_movi may try. + * We can re-use helper addresses often; always drop the pointer + * into the constant pool. */ - if (sizeof(uintptr_t) == 8) { - tcg_out_movi_pool(s, TCG_REG_T9, (uintptr_t)arg, TCG_REG_TB); - } else { - tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T9, (uintptr_t)arg); - } + tcg_out_movi_pool(s, TCG_REG_T9, (uintptr_t)arg, TCG_REG_TB); - /* But do try a direct branch, allowing the cpu better insn prefetch. */ if (tail) { if (!tcg_out_opc_jmp(s, OPC_J, arg)) { tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_T9, 0); @@ -1197,7 +1090,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP1, TCG_AREG0, table_off); /* Extract the TLB index from the address into TMP3. */ - if (TCG_TARGET_REG_BITS == 32 || addr_type == TCG_TYPE_I32) { + if (addr_type == TCG_TYPE_I32) { tcg_out_opc_sa(s, OPC_SRL, TCG_TMP3, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); } else { @@ -1206,10 +1099,10 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_opc_reg(s, OPC_AND, TCG_TMP3, TCG_TMP3, TCG_TMP0); /* Add the tlb_table pointer, creating the CPUTLBEntry address. */ - tcg_out_opc_reg(s, ALIAS_PADD, TCG_TMP3, TCG_TMP3, TCG_TMP1); + tcg_out_opc_reg(s, OPC_DADDU, TCG_TMP3, TCG_TMP3, TCG_TMP1); /* Load the tlb comparator. */ - if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { + if (addr_type == TCG_TYPE_I32) { tcg_out_ld(s, TCG_TYPE_I32, TCG_TMP0, TCG_TMP3, cmp_off + HOST_BIG_ENDIAN * 4); } else { @@ -1226,8 +1119,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, */ tcg_out_movi(s, addr_type, TCG_TMP1, TARGET_PAGE_MASK | a_mask); if (a_mask < s_mask) { - tcg_out_opc_imm(s, (TCG_TARGET_REG_BITS == 32 - || addr_type == TCG_TYPE_I32 + tcg_out_opc_imm(s, (addr_type == TCG_TYPE_I32 ? OPC_ADDIU : OPC_DADDIU), TCG_TMP2, addr, s_mask - a_mask); tcg_out_opc_reg(s, OPC_AND, TCG_TMP1, TCG_TMP1, TCG_TMP2); @@ -1236,7 +1128,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, } /* Zero extend a 32-bit guest address for a 64-bit host. */ - if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { + if (addr_type == TCG_TYPE_I32) { tcg_out_ext32u(s, TCG_TMP2, addr); addr = TCG_TMP2; } @@ -1246,7 +1138,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, /* delay slot */ base = TCG_TMP3; - tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_TMP3, addr); + tcg_out_opc_reg(s, OPC_DADDU, base, TCG_TMP3, addr); } else { if (a_mask && (use_mips32r6_instructions || a_bits != s_bits)) { ldst = new_ldst_label(s); @@ -1269,15 +1161,15 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, } base = addr; - if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { + if (addr_type == TCG_TYPE_I32) { tcg_out_ext32u(s, TCG_REG_A0, base); base = TCG_REG_A0; } if (guest_base) { if (guest_base == (int16_t)guest_base) { - tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_A0, base, guest_base); + tcg_out_opc_imm(s, OPC_DADDIU, TCG_REG_A0, base, guest_base); } else { - tcg_out_opc_reg(s, ALIAS_PADD, TCG_REG_A0, base, + tcg_out_opc_reg(s, OPC_DADDU, TCG_REG_A0, base, TCG_GUEST_BASE_REG); } base = TCG_REG_A0; @@ -1305,7 +1197,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, tcg_out_opc_imm(s, OPC_LH, lo, base, 0); break; case MO_UL: - if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64) { + if (type == TCG_TYPE_I64) { tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); break; } @@ -1314,16 +1206,7 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, tcg_out_opc_imm(s, OPC_LW, lo, base, 0); break; case MO_UQ: - /* Prefer to load from offset 0 first, but allow for overlap. */ - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_opc_imm(s, OPC_LD, lo, base, 0); - } else if (HOST_BIG_ENDIAN ? hi != base : lo == base) { - tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF); - tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF); - } else { - tcg_out_opc_imm(s, OPC_LW, lo, base, LO_OFF); - tcg_out_opc_imm(s, OPC_LW, hi, base, HI_OFF); - } + tcg_out_opc_imm(s, OPC_LD, lo, base, 0); break; default: g_assert_not_reached(); @@ -1365,21 +1248,14 @@ static void tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi, case MO_32: tcg_out_opc_imm(s, lw1, lo, base, 0); tcg_out_opc_imm(s, lw2, lo, base, 3); - if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I64 && !sgn) { + if (type == TCG_TYPE_I64 && !sgn) { tcg_out_ext32u(s, lo, lo); } break; case MO_64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_opc_imm(s, ld1, lo, base, 0); - tcg_out_opc_imm(s, ld2, lo, base, 7); - } else { - tcg_out_opc_imm(s, lw1, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 0); - tcg_out_opc_imm(s, lw2, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 3); - tcg_out_opc_imm(s, lw1, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 0); - tcg_out_opc_imm(s, lw2, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 3); - } + tcg_out_opc_imm(s, ld1, lo, base, 0); + tcg_out_opc_imm(s, ld2, lo, base, 7); break; default: @@ -1415,36 +1291,8 @@ static const TCGOutOpQemuLdSt outop_qemu_ld = { .out = tgen_qemu_ld, }; -static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, - TCGReg datahi, TCGReg addr, MemOpIdx oi) -{ - MemOp opc = get_memop(oi); - TCGLabelQemuLdst *ldst; - HostAddress h; - - tcg_debug_assert(TCG_TARGET_REG_BITS == 32); - ldst = prepare_host_addr(s, &h, addr, oi, true); - - if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { - tcg_out_qemu_ld_direct(s, datalo, datahi, h.base, opc, type); - } else { - tcg_out_qemu_ld_unalign(s, datalo, datahi, h.base, opc, type); - } - - if (ldst) { - ldst->type = type; - ldst->datalo_reg = datalo; - ldst->datahi_reg = datahi; - ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); - } -} - static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { - /* Ensure that the mips32 code is compiled but discarded for mips64. */ - .base.static_constraint = - TCG_TARGET_REG_BITS == 32 ? C_O2_I1(r, r, r) : C_NotImplemented, - .out = - TCG_TARGET_REG_BITS == 32 ? tgen_qemu_ld2 : NULL, + .base.static_constraint = C_NotImplemented, }; static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, @@ -1461,12 +1309,7 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, tcg_out_opc_imm(s, OPC_SW, lo, base, 0); break; case MO_64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_opc_imm(s, OPC_SD, lo, base, 0); - } else { - tcg_out_opc_imm(s, OPC_SW, HOST_BIG_ENDIAN ? hi : lo, base, 0); - tcg_out_opc_imm(s, OPC_SW, HOST_BIG_ENDIAN ? lo : hi, base, 4); - } + tcg_out_opc_imm(s, OPC_SD, lo, base, 0); break; default: g_assert_not_reached(); @@ -1494,15 +1337,8 @@ static void tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi, break; case MO_64: - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_opc_imm(s, sd1, lo, base, 0); - tcg_out_opc_imm(s, sd2, lo, base, 7); - } else { - tcg_out_opc_imm(s, sw1, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 0); - tcg_out_opc_imm(s, sw2, HOST_BIG_ENDIAN ? hi : lo, base, 0 + 3); - tcg_out_opc_imm(s, sw1, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 0); - tcg_out_opc_imm(s, sw2, HOST_BIG_ENDIAN ? lo : hi, base, 4 + 3); - } + tcg_out_opc_imm(s, sd1, lo, base, 0); + tcg_out_opc_imm(s, sd2, lo, base, 7); break; default: @@ -1538,36 +1374,8 @@ static const TCGOutOpQemuLdSt outop_qemu_st = { .out = tgen_qemu_st, }; -static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, - TCGReg datahi, TCGReg addr, MemOpIdx oi) -{ - MemOp opc = get_memop(oi); - TCGLabelQemuLdst *ldst; - HostAddress h; - - tcg_debug_assert(TCG_TARGET_REG_BITS == 32); - ldst = prepare_host_addr(s, &h, addr, oi, false); - - if (use_mips32r6_instructions || h.aa.align >= (opc & MO_SIZE)) { - tcg_out_qemu_st_direct(s, datalo, datahi, h.base, opc); - } else { - tcg_out_qemu_st_unalign(s, datalo, datahi, h.base, opc); - } - - if (ldst) { - ldst->type = type; - ldst->datalo_reg = datalo; - ldst->datahi_reg = datahi; - ldst->raddr = tcg_splitwx_to_rx(s->code_ptr); - } -} - static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { - /* Ensure that the mips32 code is compiled but discarded for mips64. */ - .base.static_constraint = - TCG_TARGET_REG_BITS == 32 ? C_O0_I3(rz, rz, r) : C_NotImplemented, - .out = - TCG_TARGET_REG_BITS == 32 ? tgen_qemu_st2 : NULL, + .base.static_constraint = C_NotImplemented, }; static void tcg_out_mb(TCGContext *s, unsigned a0) @@ -1592,22 +1400,14 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) int16_t lo = 0; if (a0) { - intptr_t ofs; - if (TCG_TARGET_REG_BITS == 64) { - ofs = tcg_tbrel_diff(s, (void *)a0); - lo = ofs; - if (ofs == lo) { - base = TCG_REG_TB; - } else { - base = TCG_REG_V0; - tcg_out_movi(s, TCG_TYPE_PTR, base, ofs - lo); - tcg_out_opc_reg(s, ALIAS_PADD, base, base, TCG_REG_TB); - } + intptr_t ofs = tcg_tbrel_diff(s, (void *)a0); + lo = ofs; + if (ofs == lo) { + base = TCG_REG_TB; } else { - ofs = a0; - lo = ofs; base = TCG_REG_V0; tcg_out_movi(s, TCG_TYPE_PTR, base, ofs - lo); + tcg_out_opc_reg(s, OPC_DADDU, base, base, TCG_REG_TB); } } if (!tcg_out_opc_jmp(s, OPC_J, tb_ret_addr)) { @@ -1615,7 +1415,7 @@ static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0) tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); } /* delay slot */ - tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_V0, base, lo); + tcg_out_opc_imm(s, OPC_DADDIU, TCG_REG_V0, base, lo); } static void tcg_out_goto_tb(TCGContext *s, int which) @@ -1624,35 +1424,24 @@ static void tcg_out_goto_tb(TCGContext *s, int which) TCGReg base, dest; /* indirect jump method */ - if (TCG_TARGET_REG_BITS == 64) { - dest = TCG_REG_TB; - base = TCG_REG_TB; - ofs = tcg_tbrel_diff(s, (void *)ofs); - } else { - dest = TCG_TMP0; - base = TCG_REG_ZERO; - } + dest = TCG_REG_TB; + base = TCG_REG_TB; + ofs = tcg_tbrel_diff(s, (void *)ofs); tcg_out_ld(s, TCG_TYPE_PTR, dest, base, ofs); tcg_out_opc_reg(s, OPC_JR, 0, dest, 0); /* delay slot */ tcg_out_nop(s); set_jmp_reset_offset(s, which); - if (TCG_TARGET_REG_BITS == 64) { - /* For the unlinked case, need to reset TCG_REG_TB. */ - tcg_out_ldst(s, ALIAS_PADDI, TCG_REG_TB, TCG_REG_TB, - -tcg_current_code_size(s)); - } + /* For the unlinked case, need to reset TCG_REG_TB. */ + tcg_out_ldst(s, OPC_DADDIU, TCG_REG_TB, TCG_REG_TB, + -tcg_current_code_size(s)); } static void tcg_out_goto_ptr(TCGContext *s, TCGReg a0) { tcg_out_opc_reg(s, OPC_JR, 0, a0, 0); - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0); - } else { - tcg_out_nop(s); - } + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, a0); } void tb_target_set_jmp_target(const TranslationBlock *tb, int n, @@ -1847,7 +1636,6 @@ static const TCGOutOpBinary outop_eqv = { .base.static_constraint = C_NotImplemented, }; -#if TCG_TARGET_REG_BITS == 64 static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) { tcg_out_dsra(s, a0, a1, 32); @@ -1857,7 +1645,6 @@ static const TCGOutOpUnary outop_extrh_i64_i32 = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_extrh_i64_i32, }; -#endif static void tgen_mul(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) @@ -2246,7 +2033,6 @@ static const TCGOutOpBswap outop_bswap32 = { .out_rr = tgen_bswap32, }; -#if TCG_TARGET_REG_BITS == 64 static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (use_mips32r2_instructions) { @@ -2264,7 +2050,6 @@ static const TCGOutOpUnary outop_bswap64 = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_bswap64, }; -#endif /* TCG_TARGET_REG_BITS == 64 */ static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) { @@ -2392,7 +2177,6 @@ static const TCGOutOpLoad outop_ld16s = { .out = tgen_ld16s, }; -#if TCG_TARGET_REG_BITS == 64 static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, TCGReg base, ptrdiff_t offset) { @@ -2414,7 +2198,6 @@ static const TCGOutOpLoad outop_ld32s = { .base.static_constraint = C_O1_I1(r, r), .out = tgen_ld32s, }; -#endif static void tgen_st8_r(TCGContext *s, TCGType type, TCGReg data, TCGReg base, ptrdiff_t offset) @@ -2553,7 +2336,7 @@ static tcg_insn_unit *align_code_ptr(TCGContext *s) } /* Stack frame parameters. */ -#define REG_SIZE (TCG_TARGET_REG_BITS / 8) +#define REG_SIZE 8 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) @@ -2573,7 +2356,7 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); /* TB prologue */ - tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); + tcg_out_opc_imm(s, OPC_DADDIU, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_SP, SAVE_OFS + i * REG_SIZE); @@ -2585,17 +2368,15 @@ static void tcg_target_qemu_prologue(TCGContext *s) * with the address of the prologue, so we can use that instead * of TCG_REG_TB. */ -#if TCG_TARGET_REG_BITS == 64 && !defined(__mips_abicalls) +#if !defined(__mips_abicalls) # error "Unknown mips abi" #endif tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, - TCG_TARGET_REG_BITS == 64 ? TCG_REG_T9 : 0); + TCG_REG_T9); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); } - if (TCG_TARGET_REG_BITS == 64) { - tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]); - } + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]); /* Call generated code */ tcg_out_opc_reg(s, OPC_JR, 0, tcg_target_call_iarg_regs[1], 0); @@ -2618,7 +2399,7 @@ static void tcg_target_qemu_prologue(TCGContext *s) tcg_out_opc_reg(s, OPC_JR, 0, TCG_REG_RA, 0); /* delay slot */ - tcg_out_opc_imm(s, ALIAS_PADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); + tcg_out_opc_imm(s, OPC_DADDIU, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); if (use_mips32r2_instructions) { return; @@ -2651,10 +2432,6 @@ static void tcg_target_qemu_prologue(TCGContext *s) /* t3 = dcba -- delay slot */ tcg_out_opc_reg(s, OPC_OR, TCG_TMP3, TCG_TMP3, TCG_TMP1); - if (TCG_TARGET_REG_BITS == 32) { - return; - } - /* * bswap32u -- unsigned 32-bit swap. a0 = ....abcd. */ @@ -2749,9 +2526,7 @@ static void tcg_target_init(TCGContext *s) { tcg_target_detect_isa(); tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; - if (TCG_TARGET_REG_BITS == 64) { - tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; - } + tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; tcg_target_call_clobber_regs = 0; tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0); @@ -2782,9 +2557,7 @@ static void tcg_target_init(TCGContext *s) tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); /* return address */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); /* global pointer */ - if (TCG_TARGET_REG_BITS == 64) { - tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tc->tc_ptr */ - } + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tc->tc_ptr */ } typedef struct { @@ -2802,7 +2575,7 @@ static const DebugFrame debug_frame = { .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, - .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ + .h.cie.data_align = -REG_SIZE & 0x7f, /* sleb128 */ .h.cie.return_column = TCG_REG_RA, /* Total FDE size does not include the "len" member. */ diff --git a/tcg/optimize.c b/tcg/optimize.c index 62a128bc9b794..f69702b26e82f 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -1454,7 +1454,7 @@ static bool fold_and(OptContext *ctx, TCGOp *op) a_mask = t1->z_mask & ~t2->o_mask; if (!fold_masks_zosa_int(ctx, op, z_mask, o_mask, s_mask, a_mask)) { - if (ti_is_const(t2)) { + if (op->opc == INDEX_op_and && ti_is_const(t2)) { /* * Canonicalize on extract, if valid. This aids x86 with its * 2 operand MOVZBL and 2 operand AND, selecting the TCGOpcode @@ -1568,9 +1568,10 @@ static bool fold_bitsel_vec(OptContext *ctx, TCGOp *op) return fold_and(ctx, op); } if (fv == -1 && TCG_TARGET_HAS_orc_vec) { + TCGArg ta = op->args[2]; op->opc = INDEX_op_orc_vec; op->args[2] = op->args[1]; - op->args[1] = op->args[3]; + op->args[1] = ta; return fold_orc(ctx, op); } } diff --git a/tcg/ppc/tcg-target-has.h b/tcg/ppc/tcg-target-has.h index 81ec5aece7adf..a3711feeae2b6 100644 --- a/tcg/ppc/tcg-target-has.h +++ b/tcg/ppc/tcg-target-has.h @@ -17,12 +17,9 @@ #define have_vsx (cpuinfo & CPUINFO_VSX) /* optional instructions */ -#if TCG_TARGET_REG_BITS == 64 #define TCG_TARGET_HAS_extr_i64_i32 0 -#endif -#define TCG_TARGET_HAS_qemu_ldst_i128 \ - (TCG_TARGET_REG_BITS == 64 && have_isa_2_07) +#define TCG_TARGET_HAS_qemu_ldst_i128 have_isa_2_07 #define TCG_TARGET_HAS_tst 1 diff --git a/tcg/ppc/tcg-target-reg-bits.h b/tcg/ppc/tcg-target-reg-bits.h index 0efa80e7e07f3..3a15d7bee41ec 100644 --- a/tcg/ppc/tcg-target-reg-bits.h +++ b/tcg/ppc/tcg-target-reg-bits.h @@ -7,10 +7,10 @@ #ifndef TCG_TARGET_REG_BITS_H #define TCG_TARGET_REG_BITS_H -#ifdef _ARCH_PPC64 -# define TCG_TARGET_REG_BITS 64 -#else -# define TCG_TARGET_REG_BITS 32 +#ifndef _ARCH_PPC64 +# error Expecting 64-bit host architecture #endif +#define TCG_TARGET_REG_BITS 64 + #endif diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index b8b23d44d5e2d..3c36b26f25c5c 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -29,35 +29,18 @@ * Apple XCode does not define _CALL_DARWIN. * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV or _CALL_AIX. */ -#if TCG_TARGET_REG_BITS == 64 -# ifdef _CALL_AIX +#ifdef _CALL_AIX /* ok */ -# elif defined(_CALL_ELF) && _CALL_ELF == 1 +#elif defined(_CALL_ELF) && _CALL_ELF == 1 # define _CALL_AIX -# elif defined(_CALL_ELF) && _CALL_ELF == 2 +#elif defined(_CALL_ELF) && _CALL_ELF == 2 /* ok */ -# else -# error "Unknown ABI" -# endif #else -# if defined(_CALL_SYSV) || defined(_CALL_DARWIN) - /* ok */ -# elif defined(__APPLE__) -# define _CALL_DARWIN -# elif defined(__ELF__) -# define _CALL_SYSV -# else # error "Unknown ABI" -# endif #endif -#if TCG_TARGET_REG_BITS == 64 -# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND -# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL -#else -# define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL -# define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF -#endif +#define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND +#define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL #ifdef _CALL_SYSV # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN # define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF @@ -81,7 +64,7 @@ #define TCG_VEC_TMP2 TCG_REG_V1 #define TCG_REG_TB TCG_REG_R31 -#define USE_REG_TB (TCG_TARGET_REG_BITS == 64 && !have_isa_3_00) +#define USE_REG_TB !have_isa_3_00 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */ #define SZP ((int)sizeof(void *)) @@ -327,8 +310,7 @@ static bool tcg_target_const_match(int64_t sval, int ct, if (uval == (uint32_t)uval && mask_operand(uval, &mb, &me)) { return 1; } - if (TCG_TARGET_REG_BITS == 64 && - mask64_operand(uval << clz64(uval), &mb, &me)) { + if (mask64_operand(uval << clz64(uval), &mb, &me)) { return 1; } return 0; @@ -857,10 +839,8 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) return true; } switch (type) { - case TCG_TYPE_I64: - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); - /* fallthru */ case TCG_TYPE_I32: + case TCG_TYPE_I64: if (ret < TCG_REG_V0) { if (arg < TCG_REG_V0) { tcg_out32(s, OR | SAB(arg, ret, arg)); @@ -898,7 +878,6 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) static void tcg_out_rld_rc(TCGContext *s, int op, TCGReg ra, TCGReg rs, int sh, int mb, bool rc) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1); mb = MB64((mb >> 5) | ((mb << 1) & 0x3f)); tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb | rc); @@ -946,13 +925,11 @@ static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src) static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out32(s, EXTSW | RA(dst) | RS(src)); } static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_rld(s, RLDICL, dst, src, 0, 32); } @@ -968,7 +945,6 @@ static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dst, TCGReg src) static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_mov(s, TCG_TYPE_I32, rd, rn); } @@ -1037,9 +1013,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, tcg_target_long tmp; int shift; - tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - - if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + if (type == TCG_TYPE_I32) { arg = (int32_t)arg; } @@ -1076,7 +1050,7 @@ static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret, /* Load 32-bit immediates with two insns. Note that we've already eliminated bare ADDIS, so we know both insns are required. */ - if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) { + if (arg == (int32_t)arg) { tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16)); tcg_out32(s, ORI | SAI(ret, ret, arg)); return; @@ -1227,19 +1201,10 @@ static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece, if (have_vsx) { load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX; load_insn |= VRT(ret) | RB(TCG_REG_TMP1); - if (TCG_TARGET_REG_BITS == 64) { - new_pool_label(s, val, rel, s->code_ptr, add); - } else { - new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val); - } + new_pool_label(s, val, rel, s->code_ptr, add); } else { load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1); - if (TCG_TARGET_REG_BITS == 64) { - new_pool_l2(s, rel, s->code_ptr, add, val, val); - } else { - new_pool_l4(s, rel, s->code_ptr, add, - val >> 32, val, val >> 32, val); - } + new_pool_l2(s, rel, s->code_ptr, add, val, val); } if (USE_REG_TB) { @@ -1351,7 +1316,6 @@ static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c) { int mb, me; - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); if (mask64_operand(c, &mb, &me)) { if (mb == 0) { tcg_out_rld(s, RLDICR, dst, src, 0, me); @@ -1543,7 +1507,6 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, break; case TCG_TYPE_I64: if (ret < TCG_REG_V0) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_mem_long(s, LD, LDX, ret, base, offset); break; } @@ -1598,7 +1561,6 @@ static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, break; case TCG_TYPE_I64: if (arg < TCG_REG_V0) { - tcg_debug_assert(TCG_TARGET_REG_BITS == 64); tcg_out_mem_long(s, STD, STDX, arg, base, offset); break; } @@ -1641,7 +1603,7 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2, bool const_arg2, TCGType type, bool rc) { - int mb, me; + int mb, me, sh; if (!const_arg2) { tcg_out32(s, AND | SAB(arg1, dest, arg2) | rc); @@ -1664,12 +1626,10 @@ static void tcg_out_test(TCGContext *s, TCGReg dest, TCGReg arg1, TCGArg arg2, tcg_out_rlw_rc(s, RLWINM, dest, arg1, 0, mb, me, rc); return; } - if (TCG_TARGET_REG_BITS == 64) { - int sh = clz64(arg2); - if (mask64_operand(arg2 << sh, &mb, &me)) { - tcg_out_rld_rc(s, RLDICR, dest, arg1, sh, me, rc); - return; - } + sh = clz64(arg2); + if (mask64_operand(arg2 << sh, &mb, &me)) { + tcg_out_rld_rc(s, RLDICR, dest, arg1, sh, me, rc); + return; } /* Constraints should satisfy this. */ g_assert_not_reached(); @@ -1680,8 +1640,6 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, { uint32_t op; - tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - /* * Simplify the comparisons below wrt CMPI. * All of the tests are 16-bit, so a 32-bit sign extend always works. @@ -1747,7 +1705,7 @@ static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2, static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, TCGReg dst, TCGReg src, bool neg) { - if (neg && (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I64)) { + if (neg && type == TCG_TYPE_I64) { /* * X != 0 implies X + -1 generates a carry. * RT = (~X + X) + CA @@ -1774,7 +1732,7 @@ static void tcg_out_setcond_eq0(TCGContext *s, TCGType type, static void tcg_out_setcond_ne0(TCGContext *s, TCGType type, TCGReg dst, TCGReg src, bool neg) { - if (!neg && (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I64)) { + if (!neg && type == TCG_TYPE_I64) { /* * X != 0 implies X + -1 generates a carry. Extra addition * trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. @@ -1814,8 +1772,6 @@ static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond, int sh; bool inv; - tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32); - /* Ignore high bits of a potential constant arg2. */ if (type == TCG_TYPE_I32) { arg2 = (uint32_t)arg2; @@ -2117,109 +2073,6 @@ static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc, } } -static void tcg_out_cmp2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, - TCGArg bl, bool blconst, TCGArg bh, bool bhconst) -{ - static const struct { uint8_t bit1, bit2; } bits[] = { - [TCG_COND_LT ] = { CR_LT, CR_LT }, - [TCG_COND_LE ] = { CR_LT, CR_GT }, - [TCG_COND_GT ] = { CR_GT, CR_GT }, - [TCG_COND_GE ] = { CR_GT, CR_LT }, - [TCG_COND_LTU] = { CR_LT, CR_LT }, - [TCG_COND_LEU] = { CR_LT, CR_GT }, - [TCG_COND_GTU] = { CR_GT, CR_GT }, - [TCG_COND_GEU] = { CR_GT, CR_LT }, - }; - - TCGCond cond2; - int op, bit1, bit2; - - switch (cond) { - case TCG_COND_EQ: - op = CRAND; - goto do_equality; - case TCG_COND_NE: - op = CRNAND; - do_equality: - tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32); - tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32); - tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ)); - break; - - case TCG_COND_TSTEQ: - case TCG_COND_TSTNE: - if (blconst) { - tcg_out_andi32(s, TCG_REG_R0, al, bl); - } else { - tcg_out32(s, AND | SAB(al, TCG_REG_R0, bl)); - } - if (bhconst) { - tcg_out_andi32(s, TCG_REG_TMP1, ah, bh); - } else { - tcg_out32(s, AND | SAB(ah, TCG_REG_TMP1, bh)); - } - tcg_out32(s, OR | SAB(TCG_REG_R0, TCG_REG_R0, TCG_REG_TMP1) | 1); - break; - - case TCG_COND_LT: - case TCG_COND_LE: - case TCG_COND_GT: - case TCG_COND_GE: - case TCG_COND_LTU: - case TCG_COND_LEU: - case TCG_COND_GTU: - case TCG_COND_GEU: - bit1 = bits[cond].bit1; - bit2 = bits[cond].bit2; - op = (bit1 != bit2 ? CRANDC : CRAND); - cond2 = tcg_unsigned_cond(cond); - - tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32); - tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32); - tcg_out32(s, op | BT(0, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2)); - tcg_out32(s, CROR | BT(0, CR_EQ) | BA(6, bit1) | BB(0, CR_EQ)); - break; - - default: - g_assert_not_reached(); - } -} - -static void tgen_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, - TCGReg al, TCGReg ah, - TCGArg bl, bool const_bl, - TCGArg bh, bool const_bh) -{ - tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh); - tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(0)); - tcg_out_rlw(s, RLWINM, ret, TCG_REG_R0, CR_EQ + 0*4 + 1, 31, 31); -} - -#if TCG_TARGET_REG_BITS != 32 -__attribute__((unused)) -#endif -static const TCGOutOpSetcond2 outop_setcond2 = { - .base.static_constraint = C_O1_I4(r, r, r, rU, rC), - .out = tgen_setcond2, -}; - -static void tgen_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, - TCGArg bl, bool const_bl, - TCGArg bh, bool const_bh, TCGLabel *l) -{ - assert(TCG_TARGET_REG_BITS == 32); - tcg_out_cmp2(s, cond, al, ah, bl, const_bl, bh, const_bh); - tcg_out_bc_lab(s, TCG_COND_EQ, l); -} - -#if TCG_TARGET_REG_BITS != 32 -__attribute__((unused)) -#endif -static const TCGOutOpBrcond2 outop_brcond2 = { - .base.static_constraint = C_O0_I4(r, r, rU, rC), - .out = tgen_brcond2, -}; - static void tcg_out_mb(TCGContext *s, unsigned a0) { uint32_t insn; @@ -2438,13 +2291,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off); /* Extract the page index, shifted into place for tlb index. */ - if (TCG_TARGET_REG_BITS == 32) { - tcg_out_shri32(s, TCG_REG_R0, addr, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - } else { - tcg_out_shri64(s, TCG_REG_R0, addr, - TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); - } + tcg_out_shri64(s, TCG_REG_R0, addr, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0)); /* @@ -2453,8 +2301,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * We will ignore the high bits with tcg_out_cmp(..., addr_type). */ if (cmp_off == 0) { - tcg_out32(s, (TCG_TARGET_REG_BITS == 64 ? LDUX : LWZUX) - | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2)); + tcg_out32(s, LDUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2)); } else { tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off); @@ -2464,51 +2311,36 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, * Load the TLB addend for use on the fast path. * Do this asap to minimize any load use delay. */ - if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) { - tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, - offsetof(CPUTLBEntry, addend)); - } + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, + offsetof(CPUTLBEntry, addend)); /* Clear the non-page, non-alignment bits from the address in R0. */ - if (TCG_TARGET_REG_BITS == 32) { - /* - * We don't support unaligned accesses on 32-bits. - * Preserve the bottom bits and thus trigger a comparison - * failure on unaligned accesses. - */ - if (a_bits < s_bits) { - a_bits = s_bits; - } - tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr, 0, + TCGReg t = addr; + + /* + * If the access is unaligned, we need to make sure we fail if we + * cross a page boundary. The trick is to add the access size-1 + * to the address before masking the low bits. That will make the + * address overflow to the next page if we cross a page boundary, + * which will then force a mismatch of the TLB compare. + */ + if (a_bits < s_bits) { + unsigned a_mask = (1 << a_bits) - 1; + unsigned s_mask = (1 << s_bits) - 1; + tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask)); + t = TCG_REG_R0; + } + + /* Mask the address for the requested alignment. */ + if (addr_type == TCG_TYPE_I32) { + tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); + } else if (a_bits == 0) { + tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS); } else { - TCGReg t = addr; - - /* - * If the access is unaligned, we need to make sure we fail if we - * cross a page boundary. The trick is to add the access size-1 - * to the address before masking the low bits. That will make the - * address overflow to the next page if we cross a page boundary, - * which will then force a mismatch of the TLB compare. - */ - if (a_bits < s_bits) { - unsigned a_mask = (1 << a_bits) - 1; - unsigned s_mask = (1 << s_bits) - 1; - tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask)); - t = TCG_REG_R0; - } - - /* Mask the address for the requested alignment. */ - if (addr_type == TCG_TYPE_I32) { - tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0, - (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS); - } else if (a_bits == 0) { - tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS); - } else { - tcg_out_rld(s, RLDICL, TCG_REG_R0, t, - 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits); - tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); - } + tcg_out_rld(s, RLDICL, TCG_REG_R0, t, + 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits); + tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0); } /* Full comparison into cr0. */ @@ -2537,7 +2369,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h, h->base = guest_base ? TCG_GUEST_BASE_REG : 0; } - if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) { + if (addr_type == TCG_TYPE_I32) { /* Zero-extend the guest address for use in the host address. */ tcg_out_ext32u(s, TCG_REG_TMP2, addr); h->index = TCG_REG_TMP2; @@ -2554,40 +2386,22 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi, MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst; HostAddress h; + uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; ldst = prepare_host_addr(s, &h, addr, oi, true); - if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { - if (opc & MO_BSWAP) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); - tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index)); - tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0)); - } else if (h.base != 0) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); - tcg_out32(s, LWZX | TAB(datahi, h.base, h.index)); - tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0)); - } else if (h.index == datahi) { - tcg_out32(s, LWZ | TAI(datalo, h.index, 4)); - tcg_out32(s, LWZ | TAI(datahi, h.index, 0)); - } else { - tcg_out32(s, LWZ | TAI(datahi, h.index, 0)); - tcg_out32(s, LWZ | TAI(datalo, h.index, 4)); - } + if (!have_isa_2_06 && insn == LDBRX) { + tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); + tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index)); + tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0)); + tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); + } else if (insn) { + tcg_out32(s, insn | TAB(datalo, h.base, h.index)); } else { - uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; - if (!have_isa_2_06 && insn == LDBRX) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); - tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index)); - tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0)); - tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0); - } else if (insn) { - tcg_out32(s, insn | TAB(datalo, h.base, h.index)); - } else { - insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; - tcg_out32(s, insn | TAB(datalo, h.base, h.index)); - tcg_out_movext(s, TCG_TYPE_REG, datalo, - TCG_TYPE_REG, opc & MO_SSIZE, datalo); - } + insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)]; + tcg_out32(s, insn | TAB(datalo, h.base, h.index)); + tcg_out_movext(s, TCG_TYPE_REG, datalo, + TCG_TYPE_REG, opc & MO_SSIZE, datalo); } if (ldst) { @@ -2604,32 +2418,17 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi, MemOp opc = get_memop(oi); TCGLabelQemuLdst *ldst; HostAddress h; + uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; ldst = prepare_host_addr(s, &h, addr, oi, false); - if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { - if (opc & MO_BSWAP) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); - tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index)); - tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0)); - } else if (h.base != 0) { - tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4)); - tcg_out32(s, STWX | SAB(datahi, h.base, h.index)); - tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0)); - } else { - tcg_out32(s, STW | TAI(datahi, h.index, 0)); - tcg_out32(s, STW | TAI(datalo, h.index, 4)); - } + if (!have_isa_2_06 && insn == STDBRX) { + tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index)); + tcg_out32(s, ADDI | TAI(TCG_REG_TMP2, h.index, 4)); + tcg_out_shri64(s, TCG_REG_R0, datalo, 32); + tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP2)); } else { - uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; - if (!have_isa_2_06 && insn == STDBRX) { - tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index)); - tcg_out32(s, ADDI | TAI(TCG_REG_TMP2, h.index, 4)); - tcg_out_shri64(s, TCG_REG_R0, datalo, 32); - tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP2)); - } else { - tcg_out32(s, insn | SAB(datalo, h.base, h.index)); - } + tcg_out32(s, insn | SAB(datalo, h.base, h.index)); } if (ldst) { @@ -2709,16 +2508,11 @@ static const TCGOutOpQemuLdSt outop_qemu_ld = { static void tgen_qemu_ld2(TCGContext *s, TCGType type, TCGReg datalo, TCGReg datahi, TCGReg addr, MemOpIdx oi) { - if (TCG_TARGET_REG_BITS == 32) { - tcg_out_qemu_ld(s, datalo, datahi, addr, oi, type); - } else { - tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, true); - } + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, true); } static const TCGOutOpQemuLdSt2 outop_qemu_ld2 = { - .base.static_constraint = - TCG_TARGET_REG_BITS == 64 ? C_N1O1_I1(o, m, r) : C_O2_I1(r, r, r), + .base.static_constraint = C_N1O1_I1(o, m, r), .out = tgen_qemu_ld2, }; @@ -2736,16 +2530,11 @@ static const TCGOutOpQemuLdSt outop_qemu_st = { static void tgen_qemu_st2(TCGContext *s, TCGType type, TCGReg datalo, TCGReg datahi, TCGReg addr, MemOpIdx oi) { - if (TCG_TARGET_REG_BITS == 32) { - tcg_out_qemu_st(s, datalo, datahi, addr, oi, type); - } else { - tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, false); - } + tcg_out_qemu_ldst_i128(s, datalo, datahi, addr, oi, false); } static const TCGOutOpQemuLdSt2 outop_qemu_st2 = { - .base.static_constraint = - TCG_TARGET_REG_BITS == 64 ? C_O0_I3(o, m, r) : C_O0_I3(r, r, r), + .base.static_constraint = C_O0_I3(o, m, r), .out = tgen_qemu_st2, }; @@ -2767,16 +2556,11 @@ static void tcg_out_nop_fill(tcg_insn_unit *p, int count) #elif defined(_CALL_DARWIN) # define LINK_AREA_SIZE (6 * SZR) # define LR_OFFSET (2 * SZR) -#elif TCG_TARGET_REG_BITS == 64 +#else # if defined(_CALL_ELF) && _CALL_ELF == 2 # define LINK_AREA_SIZE (4 * SZR) # define LR_OFFSET (1 * SZR) # endif -#else /* TCG_TARGET_REG_BITS == 32 */ -# if defined(_CALL_SYSV) -# define LINK_AREA_SIZE (2 * SZR) -# define LR_OFFSET (1 * SZR) -# endif #endif #ifndef LR_OFFSET # error "Unhandled abi" @@ -3107,7 +2891,6 @@ static void tgen_eqv(TCGContext *s, TCGType type, tcg_out32(s, EQV | SAB(a1, a0, a2)); } -#if TCG_TARGET_REG_BITS == 64 static void tgen_extrh_i64_i32(TCGContext *s, TCGType t, TCGReg a0, TCGReg a1) { tcg_out_shri64(s, a0, a1, 32); @@ -3117,7 +2900,6 @@ static const TCGOutOpUnary outop_extrh_i64_i32 = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_extrh_i64_i32, }; -#endif static void tgen_divs(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1, TCGReg a2) @@ -3596,7 +3378,6 @@ static const TCGOutOpBswap outop_bswap32 = { .out_rr = tgen_bswap32, }; -#if TCG_TARGET_REG_BITS == 64 static void tgen_bswap64(TCGContext *s, TCGType type, TCGReg dst, TCGReg src) { TCGReg t0 = dst == src ? TCG_REG_R0 : dst; @@ -3639,7 +3420,6 @@ static const TCGOutOpUnary outop_bswap64 = { .base.static_constraint = C_O1_I1(r, r), .out_rr = tgen_bswap64, }; -#endif /* TCG_TARGET_REG_BITS == 64 */ static void tgen_neg(TCGContext *s, TCGType type, TCGReg a0, TCGReg a1) { @@ -3776,7 +3556,6 @@ static const TCGOutOpLoad outop_ld16s = { .out = tgen_ld16s, }; -#if TCG_TARGET_REG_BITS == 64 static void tgen_ld32u(TCGContext *s, TCGType type, TCGReg dest, TCGReg base, ptrdiff_t offset) { @@ -3798,7 +3577,6 @@ static const TCGOutOpLoad outop_ld32s = { .base.static_constraint = C_O1_I1(r, r), .out = tgen_ld32s, }; -#endif static void tgen_st8(TCGContext *s, TCGType type, TCGReg data, TCGReg base, ptrdiff_t offset) @@ -4278,14 +4056,6 @@ static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc, tcg_out_bitsel_vec(s, a0, a1, a2, args[3]); return; - case INDEX_op_dup2_vec: - assert(TCG_TARGET_REG_BITS == 32); - /* With inputs a1 = xLxx, a2 = xHxx */ - tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */ - tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */ - tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */ - return; - case INDEX_op_ppc_mrgh_vec: insn = mrgh_op[vece]; break; @@ -4435,12 +4205,6 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) { switch (op) { - case INDEX_op_qemu_st: - return C_O0_I2(r, r); - case INDEX_op_qemu_st2: - return TCG_TARGET_REG_BITS == 64 - ? C_O0_I3(o, m, r) : C_O0_I3(r, r, r); - case INDEX_op_add_vec: case INDEX_op_sub_vec: case INDEX_op_mul_vec: @@ -4471,7 +4235,6 @@ tcg_target_op_def(TCGOpcode op, TCGType type, unsigned flags) case INDEX_op_ppc_muleu_vec: case INDEX_op_ppc_mulou_vec: case INDEX_op_ppc_pkum_vec: - case INDEX_op_dup2_vec: return C_O1_I2(v, v, v); case INDEX_op_not_vec: @@ -4549,9 +4312,7 @@ static void tcg_target_init(TCGContext *s) #if defined(_CALL_SYSV) tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */ #endif -#if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */ -#endif tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1); @@ -4572,11 +4333,7 @@ typedef struct { /* We're expecting a 2 byte uleb128 encoded value. */ QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); -#if TCG_TARGET_REG_BITS == 64 -# define ELF_HOST_MACHINE EM_PPC64 -#else -# define ELF_HOST_MACHINE EM_PPC -#endif +#define ELF_HOST_MACHINE EM_PPC64 static DebugFrame debug_frame = { .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ diff --git a/tcg/region.c b/tcg/region.c index 7ea0b37a84c33..2181267e48f7b 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -832,13 +832,16 @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_threads) } else { #ifdef CONFIG_POSIX rc = mprotect(start, end - start, need_prot); + if (rc) { + error_report("mprotect of jit buffer: %s", + strerror(errno)); + } #else g_assert_not_reached(); #endif } if (rc) { - error_setg_errno(&error_fatal, errno, - "mprotect of jit buffer"); + exit(1); } } if (have_prot != 0) { diff --git a/tcg/tcg-op-ldst.c b/tcg/tcg-op-ldst.c index 548496002d765..67c15fd4d0dc0 100644 --- a/tcg/tcg-op-ldst.c +++ b/tcg/tcg-op-ldst.c @@ -801,6 +801,8 @@ typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv_i64, TCGv_i32, TCGv_i32); typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64, TCGv_i32); +typedef void (*gen_atomic_op_i128)(TCGv_i128, TCGv_env, TCGv_i64, + TCGv_i128, TCGv_i32); #ifdef CONFIG_ATOMIC64 # define WITH_ATOMIC64(X) X, @@ -1201,6 +1203,94 @@ static void do_atomic_op_i64(TCGv_i64 ret, TCGTemp *addr, TCGv_i64 val, } } +static void do_nonatomic_op_i128(TCGv_i128 ret, TCGTemp *addr, TCGv_i128 val, + TCGArg idx, MemOp memop, bool new_val, + void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) +{ + TCGv_i128 t = tcg_temp_ebb_new_i128(); + TCGv_i128 r = tcg_temp_ebb_new_i128(); + + tcg_gen_qemu_ld_i128_int(r, addr, idx, memop); + gen(TCGV128_LOW(t), TCGV128_LOW(r), TCGV128_LOW(val)); + gen(TCGV128_HIGH(t), TCGV128_HIGH(r), TCGV128_HIGH(val)); + tcg_gen_qemu_st_i128_int(t, addr, idx, memop); + + tcg_gen_mov_i128(ret, r); + tcg_temp_free_i128(t); + tcg_temp_free_i128(r); +} + +static void do_atomic_op_i128(TCGv_i128 ret, TCGTemp *addr, TCGv_i128 val, + TCGArg idx, MemOp memop, void * const table[]) +{ + gen_atomic_op_i128 gen = table[memop & (MO_SIZE | MO_BSWAP)]; + + if (gen) { + MemOpIdx oi = make_memop_idx(memop & ~MO_SIGN, idx); + TCGv_i64 a64 = maybe_extend_addr64(addr); + gen(ret, tcg_env, a64, val, tcg_constant_i32(oi)); + maybe_free_addr64(a64); + return; + } + + gen_helper_exit_atomic(tcg_env); + /* Produce a result */ + tcg_gen_movi_i64(TCGV128_LOW(ret), 0); + tcg_gen_movi_i64(TCGV128_HIGH(ret), 0); +} + +#define GEN_ATOMIC_HELPER128(NAME, OP, NEW) \ +static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ + [MO_8] = gen_helper_atomic_##NAME##b, \ + [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ + [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ + [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \ + [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \ + WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ + WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ + WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_##NAME##o_le) \ + WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_##NAME##o_be) \ +}; \ +void tcg_gen_atomic_##NAME##_i32_chk(TCGv_i32 ret, TCGTemp *addr, \ + TCGv_i32 val, TCGArg idx, \ + MemOp memop, TCGType addr_type) \ +{ \ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); \ + tcg_debug_assert((memop & MO_SIZE) <= MO_32); \ + if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ + do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ + } else { \ + do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \ + tcg_gen_##OP##_i32); \ + } \ +} \ +void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr, \ + TCGv_i64 val, TCGArg idx, \ + MemOp memop, TCGType addr_type) \ +{ \ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); \ + tcg_debug_assert((memop & MO_SIZE) <= MO_64); \ + if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ + do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ + } else { \ + do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \ + tcg_gen_##OP##_i64); \ + } \ +} \ +void tcg_gen_atomic_##NAME##_i128_chk(TCGv_i128 ret, TCGTemp *addr, \ + TCGv_i128 val, TCGArg idx, \ + MemOp memop, TCGType addr_type) \ +{ \ + tcg_debug_assert(addr_type == tcg_ctx->addr_type); \ + tcg_debug_assert((memop & MO_SIZE) == MO_128); \ + if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ + do_atomic_op_i128(ret, addr, val, idx, memop, table_##NAME); \ + } else { \ + do_nonatomic_op_i128(ret, addr, val, idx, memop, NEW, \ + tcg_gen_##OP##_i64); \ + } \ +} + #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ [MO_8] = gen_helper_atomic_##NAME##b, \ @@ -1239,8 +1329,8 @@ void tcg_gen_atomic_##NAME##_i64_chk(TCGv_i64 ret, TCGTemp *addr, \ } GEN_ATOMIC_HELPER(fetch_add, add, 0) -GEN_ATOMIC_HELPER(fetch_and, and, 0) -GEN_ATOMIC_HELPER(fetch_or, or, 0) +GEN_ATOMIC_HELPER128(fetch_and, and, 0) +GEN_ATOMIC_HELPER128(fetch_or, or, 0) GEN_ATOMIC_HELPER(fetch_xor, xor, 0) GEN_ATOMIC_HELPER(fetch_smin, smin, 0) GEN_ATOMIC_HELPER(fetch_umin, umin, 0) @@ -1266,6 +1356,7 @@ static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b) tcg_gen_mov_i64(r, b); } -GEN_ATOMIC_HELPER(xchg, mov2, 0) +GEN_ATOMIC_HELPER128(xchg, mov2, 0) #undef GEN_ATOMIC_HELPER +#undef GEN_ATOMIC_HELPER128 diff --git a/tcg/tcg.c b/tcg/tcg.c index afac55a203ab0..294762c283b74 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -425,7 +425,8 @@ static uintptr_t G_GNUC_UNUSED get_jmp_target_addr(TCGContext *s, int which) static int __attribute__((unused)) tlb_mask_table_ofs(TCGContext *s, int which) { - return (offsetof(CPUNegativeOffsetState, tlb.f[which]) - + int fi = mmuidx_to_fast_index(which); + return (offsetof(CPUNegativeOffsetState, tlb.f[fi]) - sizeof(CPUNegativeOffsetState)); } diff --git a/tests/Makefile.include b/tests/Makefile.include index 23fb722d4260f..d4dfbf3716d84 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -13,6 +13,7 @@ check-help: @echo " $(MAKE) check-functional-TARGET Run functional tests for a given target" @echo " $(MAKE) check-unit Run qobject tests" @echo " $(MAKE) check-qapi-schema Run QAPI schema tests" + @echo " $(MAKE) check-tracetool Run tracetool generator tests" @echo " $(MAKE) check-block Run block tests" ifneq ($(filter $(all-check-targets), check-softfloat),) @echo " $(MAKE) check-tcg Run TCG tests" @@ -104,14 +105,18 @@ check-venv: $(TESTS_VENV_TOKEN) FUNCTIONAL_TARGETS=$(patsubst %-softmmu,check-functional-%, $(filter %-softmmu,$(TARGETS))) .PHONY: $(FUNCTIONAL_TARGETS) -$(FUNCTIONAL_TARGETS): +$(FUNCTIONAL_TARGETS): check-venv @$(MAKE) SPEED=thorough $(subst -functional,-func,$@) .PHONY: check-functional -check-functional: +check-functional: check-venv @$(NINJA) precache-functional + @$(PYTHON) $(SRC_PATH)/scripts/clean_functional_cache.py @QEMU_TEST_NO_DOWNLOAD=1 $(MAKE) SPEED=thorough check-func check-func-quick +.PHONY: check-func check-func-quick +check-func check-func-quick: + # Consolidated targets .PHONY: check check-clean diff --git a/tests/data/acpi/aarch64/virt/DSDT b/tests/data/acpi/aarch64/virt/DSDT index 18d97e8f22979..38f01adb61e6e 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT and b/tests/data/acpi/aarch64/virt/DSDT differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt b/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt index 2cef095bcc1bb..37a9af713b94a 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt and b/tests/data/acpi/aarch64/virt/DSDT.acpihmatvirt differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.acpipcihp b/tests/data/acpi/aarch64/virt/DSDT.acpipcihp index 8d55a877a40cb..04427e2d8eb8d 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.acpipcihp and b/tests/data/acpi/aarch64/virt/DSDT.acpipcihp differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.hpoffacpiindex b/tests/data/acpi/aarch64/virt/DSDT.hpoffacpiindex index 970d43f68bca0..43ab60496e5a0 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.hpoffacpiindex and b/tests/data/acpi/aarch64/virt/DSDT.hpoffacpiindex differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.memhp b/tests/data/acpi/aarch64/virt/DSDT.memhp index 372ca3d7fb1e2..3c39167444616 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.memhp and b/tests/data/acpi/aarch64/virt/DSDT.memhp differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.pxb b/tests/data/acpi/aarch64/virt/DSDT.pxb index c2779882494e1..71c632cedcca6 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.pxb and b/tests/data/acpi/aarch64/virt/DSDT.pxb differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.smmuv3-dev b/tests/data/acpi/aarch64/virt/DSDT.smmuv3-dev new file mode 100644 index 0000000000000..e8c2b376df7bd Binary files /dev/null and b/tests/data/acpi/aarch64/virt/DSDT.smmuv3-dev differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.smmuv3-legacy b/tests/data/acpi/aarch64/virt/DSDT.smmuv3-legacy new file mode 100644 index 0000000000000..e8c2b376df7bd Binary files /dev/null and b/tests/data/acpi/aarch64/virt/DSDT.smmuv3-legacy differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.topology b/tests/data/acpi/aarch64/virt/DSDT.topology index ebbeedc1ed30d..9f22cd3dc81ef 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.topology and b/tests/data/acpi/aarch64/virt/DSDT.topology differ diff --git a/tests/data/acpi/aarch64/virt/DSDT.viot b/tests/data/acpi/aarch64/virt/DSDT.viot index b897d66797150..dd3775a0762ae 100644 Binary files a/tests/data/acpi/aarch64/virt/DSDT.viot and b/tests/data/acpi/aarch64/virt/DSDT.viot differ diff --git a/tests/data/acpi/aarch64/virt/HEST b/tests/data/acpi/aarch64/virt/HEST new file mode 100644 index 0000000000000..674272922db7d Binary files /dev/null and b/tests/data/acpi/aarch64/virt/HEST differ diff --git a/tests/data/acpi/aarch64/virt/IORT.smmuv3-dev b/tests/data/acpi/aarch64/virt/IORT.smmuv3-dev new file mode 100644 index 0000000000000..67be268f62afb Binary files /dev/null and b/tests/data/acpi/aarch64/virt/IORT.smmuv3-dev differ diff --git a/tests/data/acpi/aarch64/virt/IORT.smmuv3-legacy b/tests/data/acpi/aarch64/virt/IORT.smmuv3-legacy new file mode 100644 index 0000000000000..41981a449fc30 Binary files /dev/null and b/tests/data/acpi/aarch64/virt/IORT.smmuv3-legacy differ diff --git a/tests/data/acpi/loongarch64/virt/DSDT b/tests/data/acpi/loongarch64/virt/DSDT index b31841aec6ed2..55aa34f988d6e 100644 Binary files a/tests/data/acpi/loongarch64/virt/DSDT and b/tests/data/acpi/loongarch64/virt/DSDT differ diff --git a/tests/data/acpi/loongarch64/virt/DSDT.memhp b/tests/data/acpi/loongarch64/virt/DSDT.memhp index e291200fc91ca..c0955eb60448c 100644 Binary files a/tests/data/acpi/loongarch64/virt/DSDT.memhp and b/tests/data/acpi/loongarch64/virt/DSDT.memhp differ diff --git a/tests/data/acpi/loongarch64/virt/DSDT.numamem b/tests/data/acpi/loongarch64/virt/DSDT.numamem index 07923ac39584c..61e47e7252155 100644 Binary files a/tests/data/acpi/loongarch64/virt/DSDT.numamem and b/tests/data/acpi/loongarch64/virt/DSDT.numamem differ diff --git a/tests/data/acpi/loongarch64/virt/DSDT.topology b/tests/data/acpi/loongarch64/virt/DSDT.topology index 6dfbb495f88b7..b2afebc938ce4 100644 Binary files a/tests/data/acpi/loongarch64/virt/DSDT.topology and b/tests/data/acpi/loongarch64/virt/DSDT.topology differ diff --git a/tests/data/acpi/riscv64/virt/APIC b/tests/data/acpi/riscv64/virt/APIC index 66a25dfd2d6ea..3fb5b753596fc 100644 Binary files a/tests/data/acpi/riscv64/virt/APIC and b/tests/data/acpi/riscv64/virt/APIC differ diff --git a/tests/data/acpi/riscv64/virt/FACP b/tests/data/acpi/riscv64/virt/FACP index a5276b65ea8ce..78e1b14b1d4ff 100644 Binary files a/tests/data/acpi/riscv64/virt/FACP and b/tests/data/acpi/riscv64/virt/FACP differ diff --git a/tests/data/vmstate-static-checker/aarch64/virt-7.2.json b/tests/data/vmstate-static-checker/aarch64/virt-7.2.json new file mode 100644 index 0000000000000..d7491be99cb35 --- /dev/null +++ b/tests/data/vmstate-static-checker/aarch64/virt-7.2.json @@ -0,0 +1,2571 @@ +{ + "vmschkmachine": { + "Name": "virt-7.2" + }, + "gpex-pcihost": { + "Name": "gpex-pcihost", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "PCIHost", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "config_reg", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "arm_gic": { + "Name": "arm_gic", + "version_id": 12, + "minimum_version_id": 12, + "Description": { + "name": "arm_gic", + "version_id": 12, + "minimum_version_id": 12, + "Fields": [ + { + "field": "ctlr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cpu_ctlr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irq_state", + "version_id": 1, + "field_exists": false, + "size": 7, + "Description": { + "name": "arm_gic_irq_state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "enabled", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "pending", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "active", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "level", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "model", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "edge_trigger", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "group", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + { + "field": "irq_target", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "priority1", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "priority2", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sgi_pending", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "priority_mask", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "running_priority", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "current_pending", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "bpr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "abpr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "apr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "nsapr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "arm_gic_virt_state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "h_hcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "h_misr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "h_lr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "h_apr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cpu_ctlr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "priority_mask", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "running_priority", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "current_pending", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "bpr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "abpr", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + }, + "arm-gicv3-its-common": { + "Name": "arm-gicv3-its-common", + "version_id": 0, + "minimum_version_id": 0, + "Description": { + "name": "arm_gicv3_its", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "ctlr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "iidr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cbaser", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "cwriter", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "creadr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "baser", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + "arm-gicv3-common": { + "Name": "arm-gicv3-common", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "arm_gicv3", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "gicd_ctlr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicd_statusr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "group", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "grpmod", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "enabled", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "pending", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "active", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "level", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "edge_trigger", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicd_ipriority", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "gicd_irouter", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "gicd_nsacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cpu", + "version_id": 0, + "field_exists": false, + "size": 664, + "Description": { + "name": "arm_gicv3_cpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "level", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_ctlr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_statusr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_waker", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_propbaser", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "gicr_pendbaser", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "gicr_igroupr0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_ienabler0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_ipendr0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_iactiver0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "edge_trigger", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_igrpmodr0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_nsacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gicr_ipriorityr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "icc_ctlr_el1", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "icc_pmr_el1", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "icc_bpr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "icc_apr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "icc_igrpen", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "icc_ctlr_el3", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "arm_gicv3_cpu/virt", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "ich_apr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "ich_hcr_el2", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "ich_lr_el2", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "ich_vmcr_el2", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "arm_gicv3_cpu/sre_el1", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "icc_sre_el1", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "arm_gicv3_cpu/gicv4", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "gicr_vpropbaser", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "gicr_vpendbaser", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + } + ], + "Subsections": [ + { + "name": "arm_gicv3/gicd_no_migration_shift_bug", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "gicd_no_migration_shift_bug", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + }, + "fw_cfg": { + "Name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "cfi.pflash01": { + "Name": "cfi.pflash01", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "pflash_cfi01", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "wcycle", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "cmd", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "counter", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "pflash_cfi01_blk_write", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "blk_bytes", + "version_id": 0, + "field_exists": false, + "size": 0 + }, + { + "field": "blk_offset", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "pl061_luminary": { + "Name": "pl061_luminary", + "version_id": 4, + "minimum_version_id": 4, + "Description": { + "name": "pl061", + "version_id": 4, + "minimum_version_id": 4, + "Fields": [ + { + "field": "locked", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "data", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "old_out_data", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "old_in_data", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dir", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "isense", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ibe", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "iev", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "im", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "istate", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "afsel", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dr2r", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dr4r", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dr8r", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "odr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "pur", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "pdr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "slr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "den", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "amsel", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ] + } + }, + "pl011_luminary": { + "Name": "pl011_luminary", + "version_id": 2, + "minimum_version_id": 2, + "Description": { + "name": "pl011", + "version_id": 2, + "minimum_version_id": 2, + "Fields": [ + { + "field": "readbuff", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "flags", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "lcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "rsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dmacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "int_enabled", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "int_level", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "read_fifo", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ilpr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ibrd", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "fbrd", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ifl", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "read_pos", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "read_count", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "read_trigger", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "pl011/clock", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "clk", + "version_id": 0, + "field_exists": false, + "size": 8, + "Description": { + "name": "clock", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "period", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "clock/muldiv", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "multiplier", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "divider", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + } + ] + } + ] + } + }, + "pl061": { + "Name": "pl061", + "version_id": 4, + "minimum_version_id": 4, + "Description": { + "name": "pl061", + "version_id": 4, + "minimum_version_id": 4, + "Fields": [ + { + "field": "locked", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "data", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "old_out_data", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "old_in_data", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dir", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "isense", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ibe", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "iev", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "im", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "istate", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "afsel", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dr2r", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dr4r", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dr8r", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "odr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "pur", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "pdr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "slr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "den", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "amsel", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ] + } + }, + "vmcoreinfo": { + "Name": "vmcoreinfo", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "vmcoreinfo", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "has_vmcoreinfo", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "vmcoreinfo.host_format", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "vmcoreinfo.guest_format", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "vmcoreinfo.size", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "vmcoreinfo.paddr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + "arm_gic_common": { + "Name": "arm_gic_common", + "version_id": 12, + "minimum_version_id": 12, + "Description": { + "name": "arm_gic", + "version_id": 12, + "minimum_version_id": 12, + "Fields": [ + { + "field": "ctlr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cpu_ctlr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irq_state", + "version_id": 1, + "field_exists": false, + "size": 7, + "Description": { + "name": "arm_gic_irq_state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "enabled", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "pending", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "active", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "level", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "model", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "edge_trigger", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "group", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + { + "field": "irq_target", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "priority1", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "priority2", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sgi_pending", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "priority_mask", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "running_priority", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "current_pending", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "bpr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "abpr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "apr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "nsapr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "arm_gic_virt_state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "h_hcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "h_misr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "h_lr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "h_apr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cpu_ctlr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "priority_mask", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "running_priority", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "current_pending", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "bpr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "abpr", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + }, + "arm-smmuv3": { + "Name": "arm-smmuv3", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "smmuv3", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "features", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "sid_size", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sid_split", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "cr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cr0ack", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "statusr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irq_ctrl", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gerror", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gerrorn", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gerror_irq_cfg0", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "gerror_irq_cfg1", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "gerror_irq_cfg2", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "strtab_base", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "strtab_base_cfg", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "eventq_irq_cfg0", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "eventq_irq_cfg1", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "eventq_irq_cfg2", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cmdq", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "smmuv3_queue", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "base", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "prod", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cons", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "log2size", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + { + "field": "eventq", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "smmuv3_queue", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "base", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "prod", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cons", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "log2size", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + } + ] + } + }, + "pl011": { + "Name": "pl011", + "version_id": 2, + "minimum_version_id": 2, + "Description": { + "name": "pl011", + "version_id": 2, + "minimum_version_id": 2, + "Fields": [ + { + "field": "readbuff", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "flags", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "lcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "rsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "dmacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "int_enabled", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "int_level", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "read_fifo", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ilpr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ibrd", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "fbrd", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ifl", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "read_pos", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "read_count", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "read_trigger", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "pl011/clock", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "clk", + "version_id": 0, + "field_exists": false, + "size": 8, + "Description": { + "name": "clock", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "period", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "clock/muldiv", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "multiplier", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "divider", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + } + ] + } + ] + } + }, + "armv7m": { + "Name": "armv7m", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "armv7m", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "refclk", + "version_id": 0, + "field_exists": false, + "size": 8, + "Description": { + "name": "clock", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "period", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "clock/muldiv", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "multiplier", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "divider", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + { + "field": "cpuclk", + "version_id": 0, + "field_exists": false, + "size": 8, + "Description": { + "name": "clock", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "period", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "clock/muldiv", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "multiplier", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "divider", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + } + ] + } + }, + "fw_cfg_mem": { + "Name": "fw_cfg_mem", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "pl031": { + "Name": "pl031", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "pl031", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "tick_offset_vmstate", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "mr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "lr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "im", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "is", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "pl031/tick-offset", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "tick_offset", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "fw_cfg_io": { + "Name": "fw_cfg_io", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "virtio-serial-device": { + "Name": "virtio-serial-device", + "version_id": 3, + "minimum_version_id": 3, + "Description": { + "name": "virtio-console", + "version_id": 3, + "minimum_version_id": 3, + "Fields": [ + { + "field": "virtio", + "version_id": 0, + "field_exists": false, + "size": 0 + } + ] + } + }, + "acpi-ged": { + "Name": "acpi-ged", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "acpi-ged", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "ged_state", + "version_id": 1, + "field_exists": false, + "size": 560, + "Description": { + "name": "acpi-ged-state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "sel", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ], + "Subsections": [ + { + "name": "acpi-ged/memhp", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "memhp_state", + "version_id": 1, + "field_exists": false, + "size": 304, + "Description": { + "name": "memory hotplug state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "selector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "devs", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "memory hotplug device state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "is_enabled", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "is_inserting", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "ost_event", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ost_status", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "acpi-ged/ghes", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "ghes_state", + "version_id": 1, + "field_exists": false, + "size": 16, + "Description": { + "name": "acpi-ghes", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "ghes_addr_le", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + } + ] + } + ] + } + }, + "pcie-host-bridge": { + "Name": "pcie-host-bridge", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "PCIHost", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "config_reg", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "gpio-key": { + "Name": "gpio-key", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "gpio-key", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "timer", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + "armv7m_nvic": { + "Name": "armv7m_nvic", + "version_id": 4, + "minimum_version_id": 4, + "Description": { + "name": "armv7m_nvic", + "version_id": 4, + "minimum_version_id": 4, + "Fields": [ + { + "field": "vectors", + "version_id": 1, + "field_exists": false, + "size": 6, + "Description": { + "name": "armv7m_nvic_info", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "prio", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "enabled", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "pending", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "active", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "level", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + { + "field": "prigroup[M_REG_NS]", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "armv7m_nvic/m-security", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "sec_vectors", + "version_id": 1, + "field_exists": false, + "size": 6, + "Description": { + "name": "armv7m_nvic_info", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "prio", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "enabled", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "pending", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "active", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "level", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + { + "field": "prigroup[M_REG_S]", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "itns", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + }, + "pci-host-bridge": { + "Name": "pci-host-bridge", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "PCIHost", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "config_reg", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "gpex-root": { + "Name": "gpex-root", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "gpex_root", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "parent_obj", + "version_id": 0, + "field_exists": false, + "size": 2608, + "Description": { + "name": "PCIDevice", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "version_id", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 256 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 4096 + }, + { + "field": "irq_state", + "version_id": 2, + "field_exists": false, + "size": 16 + } + ] + } + } + ] + } + }, + "armv7m_systick": { + "Name": "armv7m_systick", + "version_id": 3, + "minimum_version_id": 3, + "Description": { + "name": "armv7m_systick", + "version_id": 3, + "minimum_version_id": 3, + "Fields": [ + { + "field": "refclk", + "version_id": 0, + "field_exists": false, + "size": 8, + "Description": { + "name": "clock", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "period", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "clock/muldiv", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "multiplier", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "divider", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + { + "field": "cpuclk", + "version_id": 0, + "field_exists": false, + "size": 8, + "Description": { + "name": "clock", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "period", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "clock/muldiv", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "multiplier", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "divider", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + { + "field": "control", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "tick", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "ptimer", + "version_id": 1, + "field_exists": false, + "size": 8, + "Description": { + "name": "ptimer", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "enabled", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "limit", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "delta", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "period_frac", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "period", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "last_event", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "next_event", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "timer", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + } + ] + } + } +} diff --git a/tests/vmstate-static-checker-data/dump1.json b/tests/data/vmstate-static-checker/dump1.json similarity index 100% rename from tests/vmstate-static-checker-data/dump1.json rename to tests/data/vmstate-static-checker/dump1.json diff --git a/tests/vmstate-static-checker-data/dump2.json b/tests/data/vmstate-static-checker/dump2.json similarity index 100% rename from tests/vmstate-static-checker-data/dump2.json rename to tests/data/vmstate-static-checker/dump2.json diff --git a/tests/data/vmstate-static-checker/m68k/virt-7.2.json b/tests/data/vmstate-static-checker/m68k/virt-7.2.json new file mode 100644 index 0000000000000..16bee8cdad019 --- /dev/null +++ b/tests/data/vmstate-static-checker/m68k/virt-7.2.json @@ -0,0 +1,2936 @@ +{ + "vmschkmachine": { + "Name": "virt-7.2" + }, + "m68020-m68k-cpu": { + "Name": "m68020-m68k-cpu", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "cpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.dregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.aregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.current_sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_op", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_x", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_n", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_v", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_c", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_z", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_vector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_level", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "cpu/fpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.fpcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fpsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fregs", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + }, + { + "field": "env.fp_result", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "cpu/cf_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.macc", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "env.macsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mac_mask", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.rambar0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mbar", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_mmu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.mmu.ar", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.ssw", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.tcr", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "env.mmu.urp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.srp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.fault", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "env.mmu.ttr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.mmusr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.vbr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sfc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.dfc", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "m68030-m68k-cpu": { + "Name": "m68030-m68k-cpu", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "cpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.dregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.aregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.current_sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_op", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_x", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_n", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_v", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_c", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_z", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_vector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_level", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "cpu/fpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.fpcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fpsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fregs", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + }, + { + "field": "env.fp_result", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "cpu/cf_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.macc", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "env.macsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mac_mask", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.rambar0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mbar", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_mmu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.mmu.ar", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.ssw", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.tcr", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "env.mmu.urp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.srp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.fault", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "env.mmu.ttr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.mmusr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.vbr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sfc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.dfc", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "fw_cfg": { + "Name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "m68k-irq-controller": { + "Name": "m68k-irq-controller", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "m68k-irqc", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "ipr", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "virt-ctrl": { + "Name": "virt-ctrl", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "virt-ctrl", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "irq_enabled", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "m68040-m68k-cpu": { + "Name": "m68040-m68k-cpu", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "cpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.dregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.aregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.current_sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_op", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_x", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_n", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_v", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_c", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_z", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_vector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_level", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "cpu/fpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.fpcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fpsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fregs", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + }, + { + "field": "env.fp_result", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "cpu/cf_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.macc", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "env.macsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mac_mask", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.rambar0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mbar", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_mmu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.mmu.ar", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.ssw", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.tcr", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "env.mmu.urp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.srp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.fault", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "env.mmu.ttr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.mmusr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.vbr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sfc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.dfc", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "goldfish_pic": { + "Name": "goldfish_pic", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "goldfish_pic", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "pending", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "enabled", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "goldfish_tty": { + "Name": "goldfish_tty", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "goldfish_tty", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "data_len", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "data_ptr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "int_enabled", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "rx_fifo", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "Fifo8", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "data", + "version_id": 1, + "field_exists": false, + "size": 0 + }, + { + "field": "head", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "num", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + } + }, + "m68000-m68k-cpu": { + "Name": "m68000-m68k-cpu", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "cpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.dregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.aregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.current_sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_op", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_x", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_n", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_v", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_c", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_z", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_vector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_level", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "cpu/fpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.fpcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fpsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fregs", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + }, + { + "field": "env.fp_result", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "cpu/cf_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.macc", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "env.macsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mac_mask", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.rambar0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mbar", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_mmu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.mmu.ar", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.ssw", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.tcr", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "env.mmu.urp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.srp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.fault", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "env.mmu.ttr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.mmusr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.vbr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sfc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.dfc", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "m68010-m68k-cpu": { + "Name": "m68010-m68k-cpu", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "cpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.dregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.aregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.current_sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_op", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_x", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_n", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_v", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_c", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_z", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_vector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_level", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "cpu/fpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.fpcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fpsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fregs", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + }, + { + "field": "env.fp_result", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "cpu/cf_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.macc", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "env.macsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mac_mask", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.rambar0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mbar", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_mmu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.mmu.ar", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.ssw", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.tcr", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "env.mmu.urp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.srp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.fault", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "env.mmu.ttr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.mmusr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.vbr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sfc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.dfc", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "m68060-m68k-cpu": { + "Name": "m68060-m68k-cpu", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "cpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.dregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.aregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.current_sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_op", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_x", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_n", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_v", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_c", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_z", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_vector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_level", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "cpu/fpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.fpcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fpsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fregs", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + }, + { + "field": "env.fp_result", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "cpu/cf_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.macc", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "env.macsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mac_mask", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.rambar0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mbar", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_mmu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.mmu.ar", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.ssw", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.tcr", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "env.mmu.urp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.srp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.fault", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "env.mmu.ttr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.mmusr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.vbr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sfc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.dfc", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "fw_cfg_mem": { + "Name": "fw_cfg_mem", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "any-m68k-cpu": { + "Name": "any-m68k-cpu", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "cpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.dregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.aregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.current_sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_op", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_x", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_n", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_v", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_c", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_z", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_vector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_level", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "cpu/fpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.fpcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fpsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fregs", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + }, + { + "field": "env.fp_result", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "cpu/cf_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.macc", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "env.macsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mac_mask", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.rambar0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mbar", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_mmu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.mmu.ar", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.ssw", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.tcr", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "env.mmu.urp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.srp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.fault", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "env.mmu.ttr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.mmusr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.vbr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sfc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.dfc", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "fw_cfg_io": { + "Name": "fw_cfg_io", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "virtio-serial-device": { + "Name": "virtio-serial-device", + "version_id": 3, + "minimum_version_id": 3, + "Description": { + "name": "virtio-console", + "version_id": 3, + "minimum_version_id": 3, + "Fields": [ + { + "field": "virtio", + "version_id": 0, + "field_exists": false, + "size": 0 + } + ] + } + }, + "goldfish_rtc": { + "Name": "goldfish_rtc", + "version_id": 3, + "minimum_version_id": 0, + "Description": { + "name": "goldfish_rtc", + "version_id": 3, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tick_offset_vmstate", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "alarm_next", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "alarm_running", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irq_pending", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irq_enabled", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "time_high", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "tick_offset", + "version_id": 3, + "field_exists": false, + "size": 8 + } + ] + } + }, + "m68k-cpu": { + "Name": "m68k-cpu", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "cpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.dregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.aregs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.current_sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_op", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_x", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_n", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_v", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_c", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cc_z", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_vector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.pending_level", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "cpu/fpu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.fpcr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fpsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.fregs", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + }, + { + "field": "env.fp_result", + "version_id": 0, + "field_exists": false, + "size": 16, + "Description": { + "name": "freg", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "freg_tmp", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "tmp_mant", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tmp_exp", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "cpu/cf_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.macc", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "env.macsr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mac_mask", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.rambar0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mbar", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_mmu", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.mmu.ar", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.ssw", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.tcr", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "env.mmu.urp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.srp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.fault", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "env.mmu.ttr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.mmu.mmusr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "cpu/68040_spregs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "env.vbr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.cacr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.sfc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "env.dfc", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + } +} diff --git a/tests/data/vmstate-static-checker/ppc64/pseries-7.2.json b/tests/data/vmstate-static-checker/ppc64/pseries-7.2.json new file mode 100644 index 0000000000000..330bbbefedfad --- /dev/null +++ b/tests/data/vmstate-static-checker/ppc64/pseries-7.2.json @@ -0,0 +1,1068 @@ +{ + "vmschkmachine": { + "Name": "pseries-7.2" + }, + "spapr-nvram": { + "Name": "spapr-nvram", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "spapr_nvram", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "size", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "buf", + "version_id": 1, + "field_exists": false, + "size": 0 + } + ] + } + }, + "xive-source": { + "Name": "xive-source", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "xive-source", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "nr_irqs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "status", + "version_id": 1, + "field_exists": false, + "size": 0 + } + ] + } + }, + "xive-tctx": { + "Name": "xive-tctx", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "xive-tctx", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "regs", + "version_id": 0, + "field_exists": false, + "size": 64 + } + ] + } + }, + "pci-host-bridge": { + "Name": "pci-host-bridge", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "PCIHost", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "config_reg", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "ics-spapr": { + "Name": "ics-spapr", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "ics", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "nr_irqs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irqs", + "version_id": 0, + "field_exists": false, + "size": 8, + "Description": { + "name": "ics/irq", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "server", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "priority", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "saved_priority", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "flags", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + } + ] + } + }, + "scsi-cd": { + "Name": "scsi-cd", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "scsi-disk", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "qdev", + "version_id": 0, + "field_exists": false, + "size": 608, + "Description": { + "name": "SCSIDevice", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "unit_attention.key", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "unit_attention.asc", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "unit_attention.ascq", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sense_is_ua", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sense", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sense_len", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "requests", + "version_id": 0, + "field_exists": false, + "size": 0 + } + ], + "Subsections": [ + { + "name": "SCSIDevice/sense", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "sense", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + }, + { + "field": "media_changed", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "media_event", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "eject_request", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tray_open", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tray_locked", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "spapr-xive": { + "Name": "spapr-xive", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "spapr-xive", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "nr_irqs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "eat", + "version_id": 0, + "field_exists": false, + "size": 8, + "Description": { + "name": "spapr-xive/eas", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "w", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + { + "field": "endt", + "version_id": 0, + "field_exists": false, + "size": 32, + "Description": { + "name": "spapr-xive/end", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "w0", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "w1", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "w2", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "w3", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "w4", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "w5", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "w6", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "w7", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + } + }, + "scsi-hd": { + "Name": "scsi-hd", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "scsi-disk", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "qdev", + "version_id": 0, + "field_exists": false, + "size": 608, + "Description": { + "name": "SCSIDevice", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "unit_attention.key", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "unit_attention.asc", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "unit_attention.ascq", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sense_is_ua", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sense", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sense_len", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "requests", + "version_id": 0, + "field_exists": false, + "size": 0 + } + ], + "Subsections": [ + { + "name": "SCSIDevice/sense", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "sense", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + }, + { + "field": "media_changed", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "media_event", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "eject_request", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tray_open", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tray_locked", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "fw_cfg_mem": { + "Name": "fw_cfg_mem", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "spapr-vty": { + "Name": "spapr-vty", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "spapr_vty", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "sdev", + "version_id": 0, + "field_exists": false, + "size": 848, + "Description": { + "name": "spapr_vio", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "reg", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irq", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "signal_state", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "crq.qladdr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "crq.qsize", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "crq.qnext", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + { + "field": "in", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "out", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "buf", + "version_id": 0, + "field_exists": false, + "size": 16 + } + ] + } + }, + "spapr-wdt": { + "Name": "spapr-wdt", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "spapr_watchdog", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "timer", + "version_id": 0, + "field_exists": false, + "size": 48 + }, + { + "field": "action", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "leave_others", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "fw_cfg_io": { + "Name": "fw_cfg_io", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "spapr-vscsi": { + "Name": "spapr-vscsi", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "spapr_vscsi", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "vdev", + "version_id": 0, + "field_exists": false, + "size": 848, + "Description": { + "name": "spapr_vio", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "reg", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irq", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "signal_state", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "crq.qladdr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "crq.qsize", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "crq.qnext", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + } + }, + "spapr-rtc": { + "Name": "spapr-rtc", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "spapr/rtc", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "ns_offset", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + "spapr-vlan": { + "Name": "spapr-vlan", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "spapr_llan", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "sdev", + "version_id": 0, + "field_exists": false, + "size": 848, + "Description": { + "name": "spapr_vio", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "reg", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irq", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "signal_state", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "crq.qladdr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "crq.qsize", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "crq.qnext", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + { + "field": "isopen", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "buf_list", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "add_buf_ptr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "use_buf_ptr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "rx_bufs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "rxq_ptr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "spapr_llan/rx_pools", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "rx_pool", + "version_id": 1, + "field_exists": false, + "size": 8, + "Description": { + "name": "spapr_llan/rx_buffer_pool", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "bufsize", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "count", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "bds", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + } + ] + } + ] + } + }, + "fw_cfg": { + "Name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "ics": { + "Name": "ics", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "ics", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "nr_irqs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "irqs", + "version_id": 0, + "field_exists": false, + "size": 8, + "Description": { + "name": "ics/irq", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "server", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "priority", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "saved_priority", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "flags", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + } + ] + } + } +} diff --git a/tests/data/vmstate-static-checker/s390x/s390-ccw-virtio-7.2.json b/tests/data/vmstate-static-checker/s390x/s390-ccw-virtio-7.2.json new file mode 100644 index 0000000000000..9698852bd3870 --- /dev/null +++ b/tests/data/vmstate-static-checker/s390x/s390-ccw-virtio-7.2.json @@ -0,0 +1,475 @@ +{ + "vmschkmachine": { + "Name": "s390-ccw-virtio-7.2" + }, + "sclpquiesce": { + "Name": "sclpquiesce", + "version_id": 0, + "minimum_version_id": 0, + "Description": { + "name": "sclpquiesce", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "event_pending", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "s390-sclp-event-facility": { + "Name": "s390-sclp-event-facility", + "version_id": 0, + "minimum_version_id": 0, + "Description": { + "name": "vmstate-event-facility", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "receive_mask_pieces[1]", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "vmstate-event-facility/mask64", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "receive_mask_pieces[0]", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "vmstate-event-facility/mask_length", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "mask_length", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + ] + } + }, + "zpci": { + "Name": "zpci", + "version_id": 0, + "minimum_version_id": 0, + "Description": { + "name": "zpci", + "version_id": 0, + "minimum_version_id": 0 + } + }, + "sclpconsole": { + "Name": "sclpconsole", + "version_id": 0, + "minimum_version_id": 0, + "Description": { + "name": "sclpconsole", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "event.event_pending", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "iov", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "iov_sclp", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "iov_bs", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "iov_data_len", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "iov_sclp_rest", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "virtio-serial-device": { + "Name": "virtio-serial-device", + "version_id": 3, + "minimum_version_id": 3, + "Description": { + "name": "virtio-console", + "version_id": 3, + "minimum_version_id": 3, + "Fields": [ + { + "field": "virtio", + "version_id": 0, + "field_exists": false, + "size": 0 + } + ] + } + }, + "sclplmconsole": { + "Name": "sclplmconsole", + "version_id": 0, + "minimum_version_id": 0, + "Description": { + "name": "sclplmconsole", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "event.event_pending", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "write_errors", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "length", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "buf", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "s390-pcihost": { + "Name": "s390-pcihost", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "PCIHost", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "config_reg", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "s390-flic-qemu": { + "Name": "s390-flic-qemu", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "qemu-s390-flic", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "simm", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "nimm", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "fw_cfg_io": { + "Name": "fw_cfg_io", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "s390-ipl": { + "Name": "s390-ipl", + "version_id": 0, + "minimum_version_id": 0, + "Description": { + "name": "ipl", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "compat_start_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "compat_bios_start_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "iplb", + "version_id": 0, + "field_exists": false, + "size": 4096, + "Description": { + "name": "ipl/iplb", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "reserved1", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "devno", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "reserved2", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ], + "Subsections": [ + { + "name": "ipl/iplb_extended", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "reserved_ext", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + }, + { + "field": "iplb_valid", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "cssid", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "ssid", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "devno", + "version_id": 0, + "field_exists": false, + "size": 2 + } + ] + } + }, + "pci-host-bridge": { + "Name": "pci-host-bridge", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "PCIHost", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "config_reg", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "fw_cfg_mem": { + "Name": "fw_cfg_mem", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + } +} diff --git a/tests/data/vmstate-static-checker/x86_64/pc-q35-7.2.json b/tests/data/vmstate-static-checker/x86_64/pc-q35-7.2.json new file mode 100644 index 0000000000000..e527dffaccace --- /dev/null +++ b/tests/data/vmstate-static-checker/x86_64/pc-q35-7.2.json @@ -0,0 +1,3297 @@ +{ + "vmschkmachine": { + "Name": "pc-q35-7.2" + }, + "fw_cfg": { + "Name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "pcie-host-bridge": { + "Name": "pcie-host-bridge", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "PCIHost", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "config_reg", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "isa-pit": { + "Name": "isa-pit", + "version_id": 3, + "minimum_version_id": 2, + "Description": { + "name": "i8254", + "version_id": 3, + "minimum_version_id": 2, + "Fields": [ + { + "field": "channels[0].irq_disabled", + "version_id": 3, + "field_exists": false, + "size": 4 + }, + { + "field": "channels", + "version_id": 2, + "field_exists": false, + "size": 56, + "Description": { + "name": "pit channel", + "version_id": 2, + "minimum_version_id": 2, + "Fields": [ + { + "field": "count", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "latched_count", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "count_latched", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status_latched", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "read_state", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "write_state", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "write_latch", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "rw_mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "bcd", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "gate", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "count_load_time", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "next_transition_time", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + { + "field": "channels[0].next_transition_time", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + "mch": { + "Name": "mch", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "mch", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "parent_obj", + "version_id": 0, + "field_exists": false, + "size": 2608, + "Description": { + "name": "PCIDevice", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "version_id", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 256 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 4096 + }, + { + "field": "irq_state", + "version_id": 2, + "field_exists": false, + "size": 16 + } + ] + } + }, + { + "field": "unused", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "pic-common": { + "Name": "pic-common", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "i8259", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "last_irr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "irr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "imr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "isr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "priority_add", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "irq_base", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "read_reg_select", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "poll", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "special_mask", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "init_state", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "auto_eoi", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "rotate_on_auto_eoi", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "special_fully_nested_mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "init4", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "single_mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "elcr", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "ICH9-LPC": { + "Name": "ICH9-LPC", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "ICH9LPC", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "d", + "version_id": 0, + "field_exists": false, + "size": 2608, + "Description": { + "name": "PCIDevice", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "version_id", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 256 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 4096 + }, + { + "field": "irq_state", + "version_id": 2, + "field_exists": false, + "size": 16 + } + ] + } + }, + { + "field": "apm", + "version_id": 0, + "field_exists": false, + "size": 304, + "Description": { + "name": "APM State", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "apmc", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "apms", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + { + "field": "pm", + "version_id": 0, + "field_exists": false, + "size": 6480, + "Description": { + "name": "ich9_pm", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "acpi_regs.pm1.evt.sts", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "acpi_regs.pm1.evt.en", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "acpi_regs.pm1.cnt.cnt", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "acpi_regs.tmr.timer", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "acpi_regs.tmr.overflow_time", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "acpi_regs.gpe.sts", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "acpi_regs.gpe.en", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smi_en", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "smi_sts", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "ich9_pm/memhp", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "acpi_memory_hotplug", + "version_id": 1, + "field_exists": false, + "size": 304, + "Description": { + "name": "memory hotplug state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "selector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "devs", + "version_id": 0, + "field_exists": false, + "size": 24, + "Description": { + "name": "memory hotplug device state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "is_enabled", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "is_inserting", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "ost_event", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ost_status", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "ich9_pm/tco", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "tco_regs", + "version_id": 1, + "field_exists": false, + "size": 320, + "Description": { + "name": "tco io device status", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "tco.rld", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "tco.din", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tco.dout", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tco.sts1", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "tco.sts2", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "tco.cnt1", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "tco.cnt2", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "tco.msg1", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tco.msg2", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tco.wdcnt", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tco.tmr", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "sw_irq_gen", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tco_timer", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "expire_time", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "timeouts_no", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + } + ] + }, + { + "name": "ich9_pm/cpuhp", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cpuhp_state", + "version_id": 1, + "field_exists": false, + "size": 304, + "Description": { + "name": "CPU hotplug state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "selector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "command", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "devs", + "version_id": 0, + "field_exists": false, + "size": 32, + "Description": { + "name": "CPU hotplug device state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "is_inserting", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "is_removing", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "ost_event", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ost_status", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + } + } + ] + }, + { + "name": "ich9_pm/pcihp", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "acpi_pci_hotplug.hotplug_select", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "acpi_pci_hotplug.acpi_pcihp_pci_status", + "version_id": 1, + "field_exists": false, + "size": 12, + "Description": { + "name": "acpi_pcihp_pci_status", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "up", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "down", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + { + "field": "acpi_pci_hotplug.acpi_index", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + { + "field": "chip_config", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sci_level", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "ICH9LPC/rst_cnt", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "rst_cnt", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + }, + { + "name": "ICH9LPC/smi_feat", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "smi_guest_features_le", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smi_features_ok", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smi_negotiated_features", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "pit-common": { + "Name": "pit-common", + "version_id": 3, + "minimum_version_id": 2, + "Description": { + "name": "i8254", + "version_id": 3, + "minimum_version_id": 2, + "Fields": [ + { + "field": "channels[0].irq_disabled", + "version_id": 3, + "field_exists": false, + "size": 4 + }, + { + "field": "channels", + "version_id": 2, + "field_exists": false, + "size": 56, + "Description": { + "name": "pit channel", + "version_id": 2, + "minimum_version_id": 2, + "Fields": [ + { + "field": "count", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "latched_count", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "count_latched", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status_latched", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "read_state", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "write_state", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "write_latch", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "rw_mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "bcd", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "gate", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "count_load_time", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "next_transition_time", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + { + "field": "channels[0].next_transition_time", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + "vmgenid": { + "Name": "vmgenid", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "vmgenid", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "vmgenid_addr_le", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "ioapic-common": { + "Name": "ioapic-common", + "version_id": 3, + "minimum_version_id": 1, + "Description": { + "name": "ioapic", + "version_id": 3, + "minimum_version_id": 1, + "Fields": [ + { + "field": "id", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "ioregsel", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "unused", + "version_id": 2, + "field_exists": false, + "size": 8 + }, + { + "field": "irr", + "version_id": 2, + "field_exists": false, + "size": 4 + }, + { + "field": "ioredtbl", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + "isa-i8259": { + "Name": "isa-i8259", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "i8259", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "last_irr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "irr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "imr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "isr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "priority_add", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "irq_base", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "read_reg_select", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "poll", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "special_mask", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "init_state", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "auto_eoi", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "rotate_on_auto_eoi", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "special_fully_nested_mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "init4", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "single_mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "elcr", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "cfi.pflash01": { + "Name": "cfi.pflash01", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "pflash_cfi01", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "wcycle", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "cmd", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "counter", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ], + "Subsections": [ + { + "name": "pflash_cfi01_blk_write", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "blk_bytes", + "version_id": 0, + "field_exists": false, + "size": 0 + }, + { + "field": "blk_offset", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "i8042-mmio": { + "Name": "i8042-mmio", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "pckbd-mmio", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "kbd", + "version_id": 0, + "field_exists": false, + "size": 2328, + "Description": { + "name": "pckbd", + "version_id": 3, + "minimum_version_id": 3, + "Fields": [ + { + "field": "write_cmd", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "pending_tmp", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ], + "Subsections": [ + { + "name": "pckbd_outport", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "outport", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + }, + { + "name": "pckbd/extended_state", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "migration_flags", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "obsrc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "obdata", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "cbdata", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + } + ] + } + }, + "isa-serial": { + "Name": "isa-serial", + "version_id": 3, + "minimum_version_id": 2, + "Description": { + "name": "serial", + "version_id": 3, + "minimum_version_id": 2, + "Fields": [ + { + "field": "state", + "version_id": 0, + "field_exists": false, + "size": 656, + "Description": { + "name": "serial", + "version_id": 3, + "minimum_version_id": 2, + "Fields": [ + { + "field": "divider", + "version_id": 2, + "field_exists": false, + "size": 2 + }, + { + "field": "rbr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "ier", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "iir", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "lcr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mcr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "lsr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "msr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "scr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "fcr_vmstate", + "version_id": 3, + "field_exists": false, + "size": 1 + } + ], + "Subsections": [ + { + "name": "serial/thr_ipending", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "thr_ipending", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "serial/tsr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "tsr_retry", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "thr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tsr", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + }, + { + "name": "serial/recv_fifo", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "recv_fifo", + "version_id": 1, + "field_exists": false, + "size": 24, + "Description": { + "name": "Fifo8", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "data", + "version_id": 1, + "field_exists": false, + "size": 0 + }, + { + "field": "head", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "num", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + }, + { + "name": "serial/xmit_fifo", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "xmit_fifo", + "version_id": 1, + "field_exists": false, + "size": 24, + "Description": { + "name": "Fifo8", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "data", + "version_id": 1, + "field_exists": false, + "size": 0 + }, + { + "field": "head", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "num", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + }, + { + "name": "serial/fifo_timeout_timer", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "fifo_timeout_timer", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "serial/timeout_ipending", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "timeout_ipending", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "serial/poll", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "poll_msl", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "modem_status_poll", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + } + ] + } + }, + "ps2-mouse": { + "Name": "ps2-mouse", + "version_id": 2, + "minimum_version_id": 2, + "Description": { + "name": "ps2mouse", + "version_id": 2, + "minimum_version_id": 2, + "Fields": [ + { + "field": "parent_obj", + "version_id": 0, + "field_exists": false, + "size": 1104, + "Description": { + "name": "PS2 Common State", + "version_id": 3, + "minimum_version_id": 2, + "Fields": [ + { + "field": "write_cmd", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "queue.rptr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "queue.wptr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "queue.count", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "queue.data", + "version_id": 0, + "field_exists": false, + "size": 256 + } + ] + } + }, + { + "field": "mouse_status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mouse_resolution", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mouse_sample_rate", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mouse_wrap", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mouse_type", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mouse_detect_state", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mouse_dx", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "mouse_dy", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "mouse_dz", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "mouse_buttons", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "kvmvapic": { + "Name": "kvmvapic", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "kvm-tpr-opt", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "rom_state", + "version_id": 0, + "field_exists": false, + "size": 124, + "Description": { + "name": "kvmvapic-guest-rom", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "unused", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "vaddr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "fixup_start", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "fixup_end", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "vapic_vaddr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "vapic_size", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "vcpu_shift", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "real_tpr_addr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "up", + "version_id": 0, + "field_exists": false, + "size": 44, + "Description": { + "name": "kvmvapic-handlers", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "set_tpr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "set_tpr_eax", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "get_tpr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "get_tpr_stack", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + { + "field": "mp", + "version_id": 0, + "field_exists": false, + "size": 44, + "Description": { + "name": "kvmvapic-handlers", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "set_tpr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "set_tpr_eax", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "get_tpr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "get_tpr_stack", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + } + }, + { + "field": "state", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "real_tpr_addr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "rom_state_vaddr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "vapic_paddr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "rom_state_paddr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "ich9-ahci": { + "Name": "ich9-ahci", + "version_id": 1, + "minimum_version_id": 0, + "Description": { + "name": "ich9_ahci", + "version_id": 1, + "minimum_version_id": 0, + "Fields": [ + { + "field": "parent_obj", + "version_id": 0, + "field_exists": false, + "size": 2608, + "Description": { + "name": "PCIDevice", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "version_id", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 256 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 4096 + }, + { + "field": "irq_state", + "version_id": 2, + "field_exists": false, + "size": 16 + } + ] + } + }, + { + "field": "ahci", + "version_id": 0, + "field_exists": false, + "size": 624, + "Description": { + "name": "ahci", + "version_id": 1, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dev", + "version_id": 0, + "field_exists": false, + "size": 6088, + "Description": { + "name": "ahci port", + "version_id": 1, + "minimum_version_id": 0, + "Fields": [ + { + "field": "port", + "version_id": 1, + "field_exists": false, + "size": 2304, + "Description": { + "name": "ide_bus", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cmd", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "unit", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ], + "Subsections": [ + { + "name": "ide_bus/error", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "error_status", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "retry_sector_num", + "version_id": 2, + "field_exists": false, + "size": 8 + }, + { + "field": "retry_nsector", + "version_id": 2, + "field_exists": false, + "size": 4 + }, + { + "field": "retry_unit", + "version_id": 2, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + }, + { + "field": "port.ifs[0]", + "version_id": 1, + "field_exists": false, + "size": 984, + "Description": { + "name": "ide_drive", + "version_id": 3, + "minimum_version_id": 0, + "Fields": [ + { + "field": "mult_sectors", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "identify_set", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "identify_data", + "version_id": 0, + "field_exists": true, + "size": 512 + }, + { + "field": "feature", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "error", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "nsector", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "sector", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "lcyl", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "hcyl", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "hob_feature", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "hob_sector", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "hob_nsector", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "hob_lcyl", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "hob_hcyl", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "select", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "lba48", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "sense_key", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "asc", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "cdrom_changed", + "version_id": 3, + "field_exists": false, + "size": 1 + } + ], + "Subsections": [ + { + "name": "ide_drive/pio_state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "req_nb_sectors", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "io_buffer", + "version_id": 1, + "field_exists": false, + "size": 1 + }, + { + "field": "cur_io_buffer_offset", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "cur_io_buffer_len", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "end_transfer_fn_idx", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "elementary_transfer_size", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "packet_transfer_size", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "ide_drive/tray_state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "tray_open", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "tray_locked", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + }, + { + "name": "ide_drive/atapi/gesn_state", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "events.new_media", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "events.eject_request", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + }, + { + "field": "port_state", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "finished", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.lst_addr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.lst_addr_hi", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.fis_addr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.fis_addr_hi", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.irq_stat", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.irq_mask", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.cmd", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.tfdata", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.sig", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.scr_stat", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.scr_ctl", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.scr_err", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.scr_act", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "port_regs.cmd_issue", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "done_first_drq", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "busy_slot", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "init_d2h_sent", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "ncq_tfs", + "version_id": 1, + "field_exists": false, + "size": 112, + "Description": { + "name": "ncq state", + "version_id": 1, + "minimum_version_id": 0, + "Fields": [ + { + "field": "sector_count", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "lba", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "tag", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "cmd", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "slot", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "used", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "halt", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + } + ] + } + }, + { + "field": "control_regs.cap", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "control_regs.ghc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "control_regs.irqstatus", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "control_regs.impl", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "control_regs.version", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "idp_index", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "ports", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + } + ] + } + }, + "i8042": { + "Name": "i8042", + "version_id": 3, + "minimum_version_id": 3, + "Description": { + "name": "pckbd", + "version_id": 3, + "minimum_version_id": 3, + "Fields": [ + { + "field": "kbd", + "version_id": 0, + "field_exists": false, + "size": 2328, + "Description": { + "name": "pckbd", + "version_id": 3, + "minimum_version_id": 3, + "Fields": [ + { + "field": "write_cmd", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "pending_tmp", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ], + "Subsections": [ + { + "name": "pckbd_outport", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "outport", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + }, + { + "name": "pckbd/extended_state", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "migration_flags", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "obsrc", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "obdata", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "cbdata", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + ] + } + } + ] + } + }, + "fw_cfg_io": { + "Name": "fw_cfg_io", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + }, + "vmcoreinfo": { + "Name": "vmcoreinfo", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "vmcoreinfo", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "has_vmcoreinfo", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "vmcoreinfo.host_format", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "vmcoreinfo.guest_format", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "vmcoreinfo.size", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "vmcoreinfo.paddr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + "ps2-kbd": { + "Name": "ps2-kbd", + "version_id": 3, + "minimum_version_id": 2, + "Description": { + "name": "ps2kbd", + "version_id": 3, + "minimum_version_id": 2, + "Fields": [ + { + "field": "parent_obj", + "version_id": 0, + "field_exists": false, + "size": 1104, + "Description": { + "name": "PS2 Common State", + "version_id": 3, + "minimum_version_id": 2, + "Fields": [ + { + "field": "write_cmd", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "queue.rptr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "queue.wptr", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "queue.count", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "queue.data", + "version_id": 0, + "field_exists": false, + "size": 256 + } + ] + } + }, + { + "field": "scan_enabled", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "translate", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "scancode_set", + "version_id": 3, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "ps2kbd/ledstate", + "version_id": 3, + "minimum_version_id": 2, + "Fields": [ + { + "field": "ledstate", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + }, + { + "name": "ps2kbd/need_high_bit", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "need_high_bit", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + }, + { + "name": "ps2kbd/command_reply_queue", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "parent_obj.queue.cwptr", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + ] + } + }, + "ICH9-SMB": { + "Name": "ICH9-SMB", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "ich9_smb", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "dev", + "version_id": 0, + "field_exists": false, + "size": 2608, + "Description": { + "name": "PCIDevice", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "version_id", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 256 + }, + { + "field": "config", + "version_id": 0, + "field_exists": true, + "size": 4096 + }, + { + "field": "irq_state", + "version_id": 2, + "field_exists": false, + "size": 16 + } + ] + } + }, + { + "field": "irq_enabled", + "version_id": 0, + "field_exists": true, + "size": 1 + }, + { + "field": "smb", + "version_id": 1, + "field_exists": true, + "size": 384, + "Description": { + "name": "pmsmb", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "smb_stat", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smb_ctl", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smb_cmd", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smb_addr", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smb_data0", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smb_data1", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smb_index", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "smb_data", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smb_auxctl", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "smb_blkdata", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "i2c_enable", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "op_done", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "in_i2c_block_read", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "start_transaction_on_status_read", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + } + ] + } + }, + "isa-pcspk": { + "Name": "isa-pcspk", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "pcspk", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "data_on", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "dummy_refresh_clock", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "acpi-erst": { + "Name": "acpi-erst", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "acpi-erst", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "operation", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "busy_status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "command_status", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "record_offset", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "reg_action", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "reg_value", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "record_identifier", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "next_record_index", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "i8257": { + "Name": "i8257", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "dma", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "command", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "mask", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "flip_flop", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "dshift", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "regs", + "version_id": 1, + "field_exists": false, + "size": 40, + "Description": { + "name": "dma_regs", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "now", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "base", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "mode", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "page", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "pageh", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "dack", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "eop", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + } + ] + } + }, + "port92": { + "Name": "port92", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "port92", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "outport", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "q35-pcihost": { + "Name": "q35-pcihost", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "PCIHost", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "config_reg", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "ioapic": { + "Name": "ioapic", + "version_id": 3, + "minimum_version_id": 1, + "Description": { + "name": "ioapic", + "version_id": 3, + "minimum_version_id": 1, + "Fields": [ + { + "field": "id", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "ioregsel", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "unused", + "version_id": 2, + "field_exists": false, + "size": 8 + }, + { + "field": "irr", + "version_id": 2, + "field_exists": false, + "size": 4 + }, + { + "field": "ioredtbl", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + }, + "smbus-eeprom": { + "Name": "smbus-eeprom", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "smbus-eeprom", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "smbusdev", + "version_id": 0, + "field_exists": false, + "size": 216, + "Description": { + "name": "smbus-device", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "i2c", + "version_id": 0, + "field_exists": false, + "size": 168, + "Description": { + "name": "I2CSlave", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "address", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + { + "field": "mode", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "data_len", + "version_id": 0, + "field_exists": false, + "size": 4 + }, + { + "field": "data_buf", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + { + "field": "data", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "offset", + "version_id": 0, + "field_exists": false, + "size": 1 + }, + { + "field": "accessed", + "version_id": 0, + "field_exists": false, + "size": 1 + } + ] + } + }, + "pci-host-bridge": { + "Name": "pci-host-bridge", + "version_id": 1, + "minimum_version_id": 1, + "Description": { + "name": "PCIHost", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "config_reg", + "version_id": 0, + "field_exists": false, + "size": 4 + } + ] + } + }, + "fw_cfg_mem": { + "Name": "fw_cfg_mem", + "version_id": 2, + "minimum_version_id": 1, + "Description": { + "name": "fw_cfg", + "version_id": 2, + "minimum_version_id": 1, + "Fields": [ + { + "field": "cur_entry", + "version_id": 0, + "field_exists": false, + "size": 2 + }, + { + "field": "cur_offset", + "version_id": 0, + "field_exists": true, + "size": 4 + }, + { + "field": "cur_offset", + "version_id": 2, + "field_exists": false, + "size": 4 + } + ], + "Subsections": [ + { + "name": "fw_cfg/dma", + "version_id": 0, + "minimum_version_id": 0, + "Fields": [ + { + "field": "dma_addr", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + }, + { + "name": "fw_cfg/acpi_mr", + "version_id": 1, + "minimum_version_id": 1, + "Fields": [ + { + "field": "table_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "linker_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + }, + { + "field": "rsdp_mr_size", + "version_id": 0, + "field_exists": false, + "size": 8 + } + ] + } + ] + } + } +} diff --git a/tests/decode/meson.build b/tests/decode/meson.build index b13fada9800e6..63405ca08fdf6 100644 --- a/tests/decode/meson.build +++ b/tests/decode/meson.build @@ -41,6 +41,7 @@ succ_tests = [ 'succ_argset_type1.decode', 'succ_function.decode', 'succ_ident1.decode', + 'succ_infer1.decode', 'succ_named_field.decode', 'succ_pattern_group_nest1.decode', 'succ_pattern_group_nest2.decode', diff --git a/tests/decode/succ_infer1.decode b/tests/decode/succ_infer1.decode new file mode 100644 index 0000000000000..6fa40bada5c90 --- /dev/null +++ b/tests/decode/succ_infer1.decode @@ -0,0 +1,4 @@ +&rprr_load rd pg rn rm dtype nreg +@rprr_load .... .... ... rm:5 ... pg:3 rn:5 rd:5 &rprr_load + +LD1Q 1100 0100 000 rm:5 101 pg:3 rn:5 rd:5 diff --git a/tests/docker/common.rc b/tests/docker/common.rc index a611e6adf970c..79d533ab2e500 100755 --- a/tests/docker/common.rc +++ b/tests/docker/common.rc @@ -21,6 +21,14 @@ else DEF_TARGET_LIST=${DEF_TARGET_LIST:-"x86_64-softmmu,aarch64-softmmu"} fi +enable_rust="" +if [ "$ENABLE_RUST" = "1" ]; then + enable_rust="--enable-rust" + if [ -n "$RUST_TARGET" ]; then + enable_rust="$enable_rust --rust-target-triple=$RUST_TARGET" + fi +fi + requires_binary() { found=0 @@ -45,12 +53,13 @@ configure_qemu() config_opts="--enable-werror \ ${TARGET_LIST:+--target-list=${TARGET_LIST}} \ --prefix=$INSTALL_DIR \ + $enable_rust \ $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS \ $@" echo "Configure options:" echo $config_opts $QEMU_SRC/configure $config_opts || \ - { cat config.log && test_fail "Failed to run 'configure'"; } + { cat config.log >&2 ; cat meson-logs/meson-log.txt >&2 ; test_fail "Failed to run 'configure'"; } } build_qemu() @@ -73,7 +82,7 @@ check_qemu() test_fail() { - echo "$@" + echo "$@" >&2 exit 1 } diff --git a/tests/docker/dockerfiles/alpine.docker b/tests/docker/dockerfiles/alpine.docker index bf3bd5a30dd95..52adf9ccbb712 100644 --- a/tests/docker/dockerfiles/alpine.docker +++ b/tests/docker/dockerfiles/alpine.docker @@ -78,7 +78,7 @@ RUN apk update && \ nmap-ncat \ numactl-dev \ openssh-client \ - pcre-dev \ + pcre2-dev \ pipewire-dev \ pixman-dev \ pkgconf \ @@ -131,8 +131,12 @@ ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" ENV NINJA "/usr/bin/ninja" ENV PYTHON "/usr/bin/python3" +# https://gitlab.alpinelinux.org/alpine/aports/-/issues/17463 +RUN apk add clang19-libclang # As a final step configure the user (if env is defined) ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/centos9.docker b/tests/docker/dockerfiles/centos9.docker index a942835a1d280..0674d778262f1 100644 --- a/tests/docker/dockerfiles/centos9.docker +++ b/tests/docker/dockerfiles/centos9.docker @@ -25,6 +25,7 @@ RUN dnf distro-sync -y && \ capstone-devel \ ccache \ clang \ + compiler-rt \ ctags \ cyrus-sasl-devel \ daxctl-devel \ @@ -104,6 +105,7 @@ RUN dnf distro-sync -y && \ python3-tomli \ rdma-core-devel \ rust \ + rust-std-static \ sed \ snappy-devel \ socat \ @@ -140,3 +142,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian-all-test-cross.docker b/tests/docker/dockerfiles/debian-all-test-cross.docker index 8ab244e018ab8..420a4e33e6087 100644 --- a/tests/docker/dockerfiles/debian-all-test-cross.docker +++ b/tests/docker/dockerfiles/debian-all-test-cross.docker @@ -15,7 +15,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ apt-get install -y eatmydata && \ eatmydata apt-get dist-upgrade -y && \ - apt build-dep -yy qemu + apt build-dep -yy --arch-only qemu # Add extra build tools and as many cross compilers as we can for testing RUN DEBIAN_FRONTEND=noninteractive eatmydata \ @@ -23,7 +23,9 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \ bison \ ccache \ clang \ + dpkg-dev \ flex \ + gcc \ git \ libclang-rt-dev \ ninja-build \ @@ -33,16 +35,11 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \ python3-venv \ python3-wheel -RUN DEBIAN_FRONTEND=noninteractive eatmydata \ - apt install -y --no-install-recommends \ - gcc-aarch64-linux-gnu \ +# All the generally available compilers +ENV AVAILABLE_COMPILERS gcc-aarch64-linux-gnu \ libc6-dev-arm64-cross \ gcc-arm-linux-gnueabihf \ libc6-dev-armhf-cross \ - gcc-hppa-linux-gnu \ - libc6-dev-hppa-cross \ - gcc-m68k-linux-gnu \ - libc6-dev-m68k-cross \ gcc-mips-linux-gnu \ libc6-dev-mips-cross \ gcc-mips64-linux-gnuabi64 \ @@ -51,18 +48,23 @@ RUN DEBIAN_FRONTEND=noninteractive eatmydata \ libc6-dev-mips64el-cross \ gcc-mipsel-linux-gnu \ libc6-dev-mipsel-cross \ - gcc-powerpc-linux-gnu \ - libc6-dev-powerpc-cross \ - gcc-powerpc64-linux-gnu \ - libc6-dev-ppc64-cross \ gcc-powerpc64le-linux-gnu \ libc6-dev-ppc64el-cross \ gcc-riscv64-linux-gnu \ libc6-dev-riscv64-cross \ gcc-s390x-linux-gnu \ - libc6-dev-s390x-cross \ - gcc-sparc64-linux-gnu \ - libc6-dev-sparc64-cross && \ + libc6-dev-s390x-cross + +RUN if dpkg-architecture -e amd64; then \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-hppa-linux-gnu libc6-dev-hppa-cross"; \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-m68k-linux-gnu libc6-dev-m68k-cross"; \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-powerpc-linux-gnu libc6-dev-powerpc-cross"; \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-powerpc64-linux-gnu libc6-dev-ppc64-cross"; \ + export AVAILABLE_COMPILERS="${AVAILABLE_COMPILERS} gcc-sparc64-linux-gnu libc6-dev-sparc64-cross"; \ + fi && \ + DEBIAN_FRONTEND=noninteractive eatmydata \ + apt install -y --no-install-recommends \ + ${AVAILABLE_COMPILERS} && \ dpkg-query --showformat '${Package}_${Version}_${Architecture}\n' --show > /packages.txt diff --git a/tests/docker/dockerfiles/debian-amd64-cross.docker b/tests/docker/dockerfiles/debian-amd64-cross.docker index 081f3e00f7bc6..7f4674400df57 100644 --- a/tests/docker/dockerfiles/debian-amd64-cross.docker +++ b/tests/docker/dockerfiles/debian-amd64-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch x86_64 debian-12 qemu +# $ lcitool dockerfile --layers all --cross-arch x86_64 debian-13 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:12-slim +FROM docker.io/library/debian:13-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -30,11 +30,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ gettext \ git \ hostname \ + libclang-rt-dev \ libglib2.0-dev \ llvm \ locales \ make \ - meson \ mtools \ ncat \ ninja-build \ @@ -45,12 +45,15 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ + python3-tomli \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ - rustc-web \ + rustc \ sed \ socat \ sparse \ @@ -67,6 +70,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -81,7 +86,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-x86-64-linux-gnu \ libaio-dev:amd64 \ - libasan6:amd64 \ + libasan8:amd64 \ libasound2-dev:amd64 \ libattr1-dev:amd64 \ libbpf-dev:amd64 \ @@ -137,6 +142,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libspice-protocol-dev:amd64 \ libspice-server-dev:amd64 \ libssh-dev:amd64 \ + libstd-rust-dev:amd64 \ libsystemd-dev:amd64 \ libtasn1-6-dev:amd64 \ libubsan1:amd64 \ @@ -182,3 +188,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian-arm64-cross.docker b/tests/docker/dockerfiles/debian-arm64-cross.docker index 91c555a36e977..c7cd54ee5caa3 100644 --- a/tests/docker/dockerfiles/debian-arm64-cross.docker +++ b/tests/docker/dockerfiles/debian-arm64-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch aarch64 debian-12 qemu +# $ lcitool dockerfile --layers all --cross-arch aarch64 debian-13 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:12-slim +FROM docker.io/library/debian:13-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -30,11 +30,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ gettext \ git \ hostname \ + libclang-rt-dev \ libglib2.0-dev \ llvm \ locales \ make \ - meson \ mtools \ ncat \ ninja-build \ @@ -45,12 +45,15 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ + python3-tomli \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ - rustc-web \ + rustc \ sed \ socat \ sparse \ @@ -67,6 +70,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -81,7 +86,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-aarch64-linux-gnu \ libaio-dev:arm64 \ - libasan6:arm64 \ + libasan8:arm64 \ libasound2-dev:arm64 \ libattr1-dev:arm64 \ libbpf-dev:arm64 \ @@ -136,6 +141,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libspice-protocol-dev:arm64 \ libspice-server-dev:arm64 \ libssh-dev:arm64 \ + libstd-rust-dev:arm64 \ libsystemd-dev:arm64 \ libtasn1-6-dev:arm64 \ libubsan1:arm64 \ @@ -181,3 +187,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian-armhf-cross.docker b/tests/docker/dockerfiles/debian-armhf-cross.docker index f0e2efcda099f..627d41c6dee3c 100644 --- a/tests/docker/dockerfiles/debian-armhf-cross.docker +++ b/tests/docker/dockerfiles/debian-armhf-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch armv7l debian-12 qemu +# $ lcitool dockerfile --layers all --cross-arch armv7l debian-13 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:12-slim +FROM docker.io/library/debian:13-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -30,11 +30,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ gettext \ git \ hostname \ + libclang-rt-dev \ libglib2.0-dev \ llvm \ locales \ make \ - meson \ mtools \ ncat \ ninja-build \ @@ -45,12 +45,15 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ + python3-tomli \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ - rustc-web \ + rustc \ sed \ socat \ sparse \ @@ -67,6 +70,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -81,7 +86,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-arm-linux-gnueabihf \ libaio-dev:armhf \ - libasan6:armhf \ + libasan8:armhf \ libasound2-dev:armhf \ libattr1-dev:armhf \ libbpf-dev:armhf \ @@ -103,7 +108,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libgbm-dev:armhf \ libgcrypt20-dev:armhf \ libglib2.0-dev:armhf \ - libglusterfs-dev:armhf \ libgnutls28-dev:armhf \ libgtk-3-dev:armhf \ libgtk-vnc-2.0-dev:armhf \ @@ -123,7 +127,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libpixman-1-dev:armhf \ libpng-dev:armhf \ libpulse-dev:armhf \ - librbd-dev:armhf \ librdmacm-dev:armhf \ libsasl2-dev:armhf \ libsdl2-dev:armhf \ @@ -136,6 +139,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libspice-protocol-dev:armhf \ libspice-server-dev:armhf \ libssh-dev:armhf \ + libstd-rust-dev:armhf \ libsystemd-dev:armhf \ libtasn1-6-dev:armhf \ libubsan1:armhf \ @@ -147,7 +151,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libvirglrenderer-dev:armhf \ libvte-2.91-dev:armhf \ libxdp-dev:armhf \ - libxen-dev:armhf \ libzstd-dev:armhf \ nettle-dev:armhf \ systemtap-sdt-dev:armhf \ @@ -181,3 +184,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian-i686-cross.docker b/tests/docker/dockerfiles/debian-i686-cross.docker index 025beb1ce25b7..2998764065f6c 100644 --- a/tests/docker/dockerfiles/debian-i686-cross.docker +++ b/tests/docker/dockerfiles/debian-i686-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch i686 debian-12 qemu +# $ lcitool dockerfile --layers all --cross-arch i686 debian-13 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:12-slim +FROM docker.io/library/debian:13-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -30,11 +30,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ gettext \ git \ hostname \ + libclang-rt-dev \ libglib2.0-dev \ llvm \ locales \ make \ - meson \ mtools \ ncat \ ninja-build \ @@ -45,12 +45,15 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ + python3-tomli \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ - rustc-web \ + rustc \ sed \ socat \ sparse \ @@ -67,6 +70,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -81,7 +86,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-i686-linux-gnu \ libaio-dev:i386 \ - libasan6:i386 \ + libasan8:i386 \ libasound2-dev:i386 \ libattr1-dev:i386 \ libbpf-dev:i386 \ @@ -103,7 +108,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libgbm-dev:i386 \ libgcrypt20-dev:i386 \ libglib2.0-dev:i386 \ - libglusterfs-dev:i386 \ libgnutls28-dev:i386 \ libgtk-3-dev:i386 \ libgtk-vnc-2.0-dev:i386 \ @@ -123,7 +127,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libpixman-1-dev:i386 \ libpng-dev:i386 \ libpulse-dev:i386 \ - librbd-dev:i386 \ librdmacm-dev:i386 \ libsasl2-dev:i386 \ libsdl2-dev:i386 \ @@ -136,6 +139,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libspice-protocol-dev:i386 \ libspice-server-dev:i386 \ libssh-dev:i386 \ + libstd-rust-dev:i386 \ libsystemd-dev:i386 \ libtasn1-6-dev:i386 \ libubsan1:i386 \ @@ -174,9 +178,11 @@ ENV ABI "i686-linux-gnu" ENV MESON_OPTS "--cross-file=i686-linux-gnu" ENV RUST_TARGET "i686-unknown-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=i686-linux-gnu- -ENV DEF_TARGET_LIST x86_64-softmmu,x86_64-linux-user,i386-softmmu,i386-linux-user +ENV DEF_TARGET_LIST i386-softmmu,i386-linux-user # As a final step configure the user (if env is defined) ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian-mips64el-cross.docker b/tests/docker/dockerfiles/debian-mips64el-cross.docker index 4a941dd870eec..6e88777f76d01 100644 --- a/tests/docker/dockerfiles/debian-mips64el-cross.docker +++ b/tests/docker/dockerfiles/debian-mips64el-cross.docker @@ -30,11 +30,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ gettext \ git \ hostname \ + libclang-rt-dev \ libglib2.0-dev \ llvm \ locales \ make \ - meson \ mtools \ ncat \ ninja-build \ @@ -45,9 +45,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ rustc-web \ @@ -67,6 +69,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -135,6 +139,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libspice-protocol-dev:mips64el \ libspice-server-dev:mips64el \ libssh-dev:mips64el \ + libstd-rust-dev:mips64el \ libsystemd-dev:mips64el \ libtasn1-6-dev:mips64el \ libudev-dev:mips64el \ @@ -178,3 +183,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian-mipsel-cross.docker b/tests/docker/dockerfiles/debian-mipsel-cross.docker index 4d3e5d711bdc5..5f4e3fa963620 100644 --- a/tests/docker/dockerfiles/debian-mipsel-cross.docker +++ b/tests/docker/dockerfiles/debian-mipsel-cross.docker @@ -30,11 +30,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ gettext \ git \ hostname \ + libclang-rt-dev \ libglib2.0-dev \ llvm \ locales \ make \ - meson \ mtools \ ncat \ ninja-build \ @@ -45,9 +45,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ rustc-web \ @@ -67,6 +69,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -135,6 +139,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libspice-protocol-dev:mipsel \ libspice-server-dev:mipsel \ libssh-dev:mipsel \ + libstd-rust-dev:mipsel \ libsystemd-dev:mipsel \ libtasn1-6-dev:mipsel \ libudev-dev:mipsel \ @@ -178,3 +183,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian-ppc64el-cross.docker b/tests/docker/dockerfiles/debian-ppc64el-cross.docker index 22b4457ba996a..dfa690616d1f9 100644 --- a/tests/docker/dockerfiles/debian-ppc64el-cross.docker +++ b/tests/docker/dockerfiles/debian-ppc64el-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch ppc64le debian-12 qemu +# $ lcitool dockerfile --layers all --cross-arch ppc64le debian-13 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:12-slim +FROM docker.io/library/debian:13-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -30,11 +30,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ gettext \ git \ hostname \ + libclang-rt-dev \ libglib2.0-dev \ llvm \ locales \ make \ - meson \ mtools \ ncat \ ninja-build \ @@ -45,12 +45,15 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ + python3-tomli \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ - rustc-web \ + rustc \ sed \ socat \ sparse \ @@ -67,6 +70,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -81,7 +86,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-powerpc64le-linux-gnu \ libaio-dev:ppc64el \ - libasan6:ppc64el \ + libasan8:ppc64el \ libasound2-dev:ppc64el \ libattr1-dev:ppc64el \ libbpf-dev:ppc64el \ @@ -136,6 +141,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libspice-protocol-dev:ppc64el \ libspice-server-dev:ppc64el \ libssh-dev:ppc64el \ + libstd-rust-dev:ppc64el \ libsystemd-dev:ppc64el \ libtasn1-6-dev:ppc64el \ libubsan1:ppc64el \ @@ -180,3 +186,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian-riscv64-cross.docker b/tests/docker/dockerfiles/debian-riscv64-cross.docker index b0386cd3a1fc9..09b2953f32ce0 100644 --- a/tests/docker/dockerfiles/debian-riscv64-cross.docker +++ b/tests/docker/dockerfiles/debian-riscv64-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch riscv64 debian-13 qemu-minimal +# $ lcitool dockerfile --layers all --cross-arch riscv64 debian-13 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:trixie-slim +FROM docker.io/library/debian:13-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -13,29 +13,65 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ bash \ bc \ + bindgen \ bison \ + bsdextrautils \ + bzip2 \ ca-certificates \ ccache \ + dbus \ + debianutils \ + diffutils \ + exuberant-ctags \ findutils \ flex \ gcc \ + gcovr \ + gettext \ git \ + hostname \ + libclang-rt-dev \ libglib2.0-dev \ + llvm \ locales \ make \ - meson \ + mtools \ + ncat \ ninja-build \ + openssh-client \ pkgconf \ python3 \ + python3-numpy \ + python3-opencv \ + python3-pillow \ + python3-pip \ + python3-setuptools \ + python3-sphinx \ + python3-sphinx-rtd-theme \ + python3-tomli \ python3-venv \ + python3-wheel \ + python3-yaml \ + rpm2cpio \ + rustc \ sed \ - tar && \ + socat \ + sparse \ + swtpm \ + tar \ + tesseract-ocr \ + tesseract-ocr-eng \ + vulkan-tools \ + xorriso \ + zstd && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ sed -Ei 's,^# (en_US\.UTF-8 .*)$,\1,' /etc/locale.gen && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -52,11 +88,78 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y dpkg-dev && \ eatmydata apt-get install --no-install-recommends -y \ gcc-riscv64-linux-gnu \ + libaio-dev:riscv64 \ + libasan8:riscv64 \ + libasound2-dev:riscv64 \ + libattr1-dev:riscv64 \ + libbpf-dev:riscv64 \ + libbrlapi-dev:riscv64 \ + libbz2-dev:riscv64 \ libc6-dev:riscv64 \ + libcacard-dev:riscv64 \ + libcap-ng-dev:riscv64 \ + libcapstone-dev:riscv64 \ + libcbor-dev:riscv64 \ + libcmocka-dev:riscv64 \ + libcurl4-gnutls-dev:riscv64 \ + libdaxctl-dev:riscv64 \ + libdrm-dev:riscv64 \ + libepoxy-dev:riscv64 \ libfdt-dev:riscv64 \ libffi-dev:riscv64 \ + libfuse3-dev:riscv64 \ + libgbm-dev:riscv64 \ + libgcrypt20-dev:riscv64 \ libglib2.0-dev:riscv64 \ - libpixman-1-dev:riscv64 && \ + libglusterfs-dev:riscv64 \ + libgnutls28-dev:riscv64 \ + libgtk-3-dev:riscv64 \ + libgtk-vnc-2.0-dev:riscv64 \ + libibverbs-dev:riscv64 \ + libiscsi-dev:riscv64 \ + libjemalloc-dev:riscv64 \ + libjpeg62-turbo-dev:riscv64 \ + libjson-c-dev:riscv64 \ + liblttng-ust-dev:riscv64 \ + liblzo2-dev:riscv64 \ + libncursesw5-dev:riscv64 \ + libnfs-dev:riscv64 \ + libnuma-dev:riscv64 \ + libpam0g-dev:riscv64 \ + libpcre2-dev:riscv64 \ + libpipewire-0.3-dev:riscv64 \ + libpixman-1-dev:riscv64 \ + libpng-dev:riscv64 \ + libpulse-dev:riscv64 \ + librbd-dev:riscv64 \ + librdmacm-dev:riscv64 \ + libsasl2-dev:riscv64 \ + libsdl2-dev:riscv64 \ + libsdl2-image-dev:riscv64 \ + libseccomp-dev:riscv64 \ + libselinux1-dev:riscv64 \ + libslirp-dev:riscv64 \ + libsnappy-dev:riscv64 \ + libsndio-dev:riscv64 \ + libspice-protocol-dev:riscv64 \ + libspice-server-dev:riscv64 \ + libssh-dev:riscv64 \ + libstd-rust-dev:riscv64 \ + libsystemd-dev:riscv64 \ + libtasn1-6-dev:riscv64 \ + libubsan1:riscv64 \ + libudev-dev:riscv64 \ + liburing-dev:riscv64 \ + libusb-1.0-0-dev:riscv64 \ + libusbredirhost-dev:riscv64 \ + libvdeplug-dev:riscv64 \ + libvirglrenderer-dev:riscv64 \ + libvte-2.91-dev:riscv64 \ + libxdp-dev:riscv64 \ + libzstd-dev:riscv64 \ + nettle-dev:riscv64 \ + systemtap-sdt-dev:riscv64 \ + zlib1g-dev:riscv64 && \ eatmydata apt-get autoremove -y && \ eatmydata apt-get autoclean -y && \ mkdir -p /usr/local/share/meson/cross && \ @@ -78,6 +181,7 @@ endian = 'little'\n" > /usr/local/share/meson/cross/riscv64-linux-gnu && \ ENV ABI "riscv64-linux-gnu" ENV MESON_OPTS "--cross-file=riscv64-linux-gnu" +ENV RUST_TARGET "riscv64gc-unknown-linux-gnu" ENV QEMU_CONFIGURE_OPTS --cross-prefix=riscv64-linux-gnu- ENV DEF_TARGET_LIST riscv64-softmmu,riscv64-linux-user # As a final step configure the user (if env is defined) @@ -85,3 +189,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian-s390x-cross.docker b/tests/docker/dockerfiles/debian-s390x-cross.docker index 13ec52c8ad0b7..09a78c15baf99 100644 --- a/tests/docker/dockerfiles/debian-s390x-cross.docker +++ b/tests/docker/dockerfiles/debian-s390x-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch s390x debian-12 qemu +# $ lcitool dockerfile --layers all --cross-arch s390x debian-13 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:12-slim +FROM docker.io/library/debian:13-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -30,11 +30,11 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ gettext \ git \ hostname \ + libclang-rt-dev \ libglib2.0-dev \ llvm \ locales \ make \ - meson \ mtools \ ncat \ ninja-build \ @@ -45,12 +45,15 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ + python3-tomli \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ - rustc-web \ + rustc \ sed \ socat \ sparse \ @@ -67,6 +70,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ dpkg-reconfigure locales && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -81,7 +86,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ eatmydata apt-get install --no-install-recommends -y \ gcc-s390x-linux-gnu \ libaio-dev:s390x \ - libasan6:s390x \ + libasan8:s390x \ libasound2-dev:s390x \ libattr1-dev:s390x \ libbpf-dev:s390x \ @@ -135,6 +140,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libsndio-dev:s390x \ libspice-protocol-dev:s390x \ libssh-dev:s390x \ + libstd-rust-dev:s390x \ libsystemd-dev:s390x \ libtasn1-6-dev:s390x \ libubsan1:s390x \ @@ -179,3 +185,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/debian.docker b/tests/docker/dockerfiles/debian.docker index 0a57c1a1d3753..8dd893be4b0c4 100644 --- a/tests/docker/dockerfiles/debian.docker +++ b/tests/docker/dockerfiles/debian.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all debian-12 qemu +# $ lcitool dockerfile --layers all debian-13 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM docker.io/library/debian:12-slim +FROM docker.io/library/debian:13-slim RUN export DEBIAN_FRONTEND=noninteractive && \ apt-get update && \ @@ -32,7 +32,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ git \ hostname \ libaio-dev \ - libasan6 \ + libasan8 \ libasound2-dev \ libattr1-dev \ libbpf-dev \ @@ -43,6 +43,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcap-ng-dev \ libcapstone-dev \ libcbor-dev \ + libclang-rt-dev \ libcmocka-dev \ libcurl4-gnutls-dev \ libdaxctl-dev \ @@ -88,6 +89,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libspice-protocol-dev \ libspice-server-dev \ libssh-dev \ + libstd-rust-dev \ libsystemd-dev \ libtasn1-6-dev \ libubsan1 \ @@ -104,7 +106,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ llvm \ locales \ make \ - meson \ mtools \ multipath-tools \ ncat \ @@ -117,12 +118,15 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ + python3-tomli \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ - rustc-web \ + rustc \ sed \ socat \ sparse \ @@ -146,6 +150,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -169,3 +175,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/emsdk-wasm32-cross.docker b/tests/docker/dockerfiles/emsdk-wasm32-cross.docker index 60a7d02f5613e..6b1642a207c74 100644 --- a/tests/docker/dockerfiles/emsdk-wasm32-cross.docker +++ b/tests/docker/dockerfiles/emsdk-wasm32-cross.docker @@ -8,7 +8,7 @@ ARG PIXMAN_VERSION=0.44.2 ARG FFI_VERSION=v3.4.7 ARG MESON_VERSION=1.5.0 -FROM emscripten/emsdk:$EMSDK_VERSION_QEMU AS build-base +FROM docker.io/emscripten/emsdk:$EMSDK_VERSION_QEMU AS build-base ARG MESON_VERSION ENV TARGET=/builddeps/target ENV CPATH="$TARGET/include" diff --git a/tests/docker/dockerfiles/fedora-rust-nightly.docker b/tests/docker/dockerfiles/fedora-rust-nightly.docker index 4a033309b38f5..7d31c9f406062 100644 --- a/tests/docker/dockerfiles/fedora-rust-nightly.docker +++ b/tests/docker/dockerfiles/fedora-rust-nightly.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all fedora-40 qemu +# $ lcitool dockerfile --layers all fedora-41 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM registry.fedoraproject.org/fedora:40 +FROM registry.fedoraproject.org/fedora:41 RUN dnf install -y nosync && \ printf '#!/bin/sh\n\ @@ -32,6 +32,7 @@ exec "$@"\n' > /usr/bin/nosync && \ capstone-devel \ ccache \ clang \ + compiler-rt \ ctags \ cyrus-sasl-devel \ daxctl-devel \ @@ -91,7 +92,6 @@ exec "$@"\n' > /usr/bin/nosync && \ lzo-devel \ make \ mesa-libgbm-devel \ - meson \ mtools \ ncurses-devel \ nettle-devel \ @@ -100,7 +100,7 @@ exec "$@"\n' > /usr/bin/nosync && \ numactl-devel \ openssh-clients \ pam-devel \ - pcre-static \ + pcre2-static \ pipewire-devel \ pixman-devel \ pkgconfig \ @@ -111,11 +111,13 @@ exec "$@"\n' > /usr/bin/nosync && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx_rtd_theme \ - python3-zombie-imp \ + python3-wheel \ rdma-core-devel \ rust \ + rust-std-static \ sed \ snappy-devel \ socat \ @@ -124,7 +126,7 @@ exec "$@"\n' > /usr/bin/nosync && \ spice-server-devel \ swtpm \ systemd-devel \ - systemtap-sdt-devel \ + systemtap-sdt-dtrace \ tar \ tesseract \ tesseract-langpack-eng \ @@ -148,6 +150,8 @@ exec "$@"\n' > /usr/bin/nosync && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -181,3 +185,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/fedora-win64-cross.docker b/tests/docker/dockerfiles/fedora-win64-cross.docker index a950344402704..c76a70c368412 100644 --- a/tests/docker/dockerfiles/fedora-win64-cross.docker +++ b/tests/docker/dockerfiles/fedora-win64-cross.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all --cross-arch mingw64 fedora-40 qemu,qemu-win-installer +# $ lcitool dockerfile --layers all --cross-arch mingw64 fedora-41 qemu,qemu-win-installer # # https://gitlab.com/libvirt/libvirt-ci -FROM registry.fedoraproject.org/fedora:40 +FROM registry.fedoraproject.org/fedora:41 RUN dnf install -y nosync && \ printf '#!/bin/sh\n\ @@ -25,6 +25,7 @@ exec "$@"\n' > /usr/bin/nosync && \ bzip2 \ ca-certificates \ ccache \ + compiler-rt \ ctags \ dbus-daemon \ diffutils \ @@ -38,7 +39,6 @@ exec "$@"\n' > /usr/bin/nosync && \ hostname \ llvm \ make \ - meson \ mtools \ ninja-build \ nmap-ncat \ @@ -49,9 +49,10 @@ exec "$@"\n' > /usr/bin/nosync && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx_rtd_theme \ - python3-zombie-imp \ + python3-wheel \ rust \ sed \ socat \ @@ -69,6 +70,8 @@ exec "$@"\n' > /usr/bin/nosync && \ nosync dnf clean all -y && \ rm -f /usr/lib*/python3*/EXTERNALLY-MANAGED +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -90,13 +93,15 @@ RUN nosync dnf install -y \ mingw64-gtk-vnc2 \ mingw64-gtk3 \ mingw64-libepoxy \ + mingw64-libfdt \ mingw64-libgcrypt \ mingw64-libjpeg-turbo \ mingw64-libpng \ mingw64-libtasn1 \ mingw64-nettle \ mingw64-pixman \ - mingw64-pkg-config && \ + mingw64-pkg-config \ + rust-std-static-x86_64-pc-windows-gnu && \ nosync dnf clean all -y && \ rpm -qa | sort > /packages.txt && \ mkdir -p /usr/libexec/ccache-wrappers && \ diff --git a/tests/docker/dockerfiles/fedora.docker b/tests/docker/dockerfiles/fedora.docker index 014e3ccf17dde..891a740fcbc4d 100644 --- a/tests/docker/dockerfiles/fedora.docker +++ b/tests/docker/dockerfiles/fedora.docker @@ -1,10 +1,10 @@ # THIS FILE WAS AUTO-GENERATED # -# $ lcitool dockerfile --layers all fedora-40 qemu +# $ lcitool dockerfile --layers all fedora-41 qemu # # https://gitlab.com/libvirt/libvirt-ci -FROM registry.fedoraproject.org/fedora:40 +FROM registry.fedoraproject.org/fedora:41 RUN dnf install -y nosync && \ printf '#!/bin/sh\n\ @@ -32,6 +32,7 @@ exec "$@"\n' > /usr/bin/nosync && \ capstone-devel \ ccache \ clang \ + compiler-rt \ ctags \ cyrus-sasl-devel \ daxctl-devel \ @@ -91,7 +92,6 @@ exec "$@"\n' > /usr/bin/nosync && \ lzo-devel \ make \ mesa-libgbm-devel \ - meson \ mtools \ ncurses-devel \ nettle-devel \ @@ -100,7 +100,7 @@ exec "$@"\n' > /usr/bin/nosync && \ numactl-devel \ openssh-clients \ pam-devel \ - pcre-static \ + pcre2-static \ pipewire-devel \ pixman-devel \ pkgconfig \ @@ -111,11 +111,13 @@ exec "$@"\n' > /usr/bin/nosync && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx_rtd_theme \ - python3-zombie-imp \ + python3-wheel \ rdma-core-devel \ rust \ + rust-std-static \ sed \ snappy-devel \ socat \ @@ -124,7 +126,7 @@ exec "$@"\n' > /usr/bin/nosync && \ spice-server-devel \ swtpm \ systemd-devel \ - systemtap-sdt-devel \ + systemtap-sdt-dtrace \ tar \ tesseract \ tesseract-langpack-eng \ @@ -148,6 +150,8 @@ exec "$@"\n' > /usr/bin/nosync && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" @@ -158,3 +162,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/opensuse-leap.docker b/tests/docker/dockerfiles/opensuse-leap.docker index e90225dc23518..75e1747780733 100644 --- a/tests/docker/dockerfiles/opensuse-leap.docker +++ b/tests/docker/dockerfiles/opensuse-leap.docker @@ -19,6 +19,7 @@ RUN zypper update -y && \ ca-certificates \ ccache \ clang \ + clang-devel \ ctags \ cyrus-sasl-devel \ dbus-1 \ @@ -89,7 +90,7 @@ RUN zypper update -y && \ ninja \ openssh \ pam-devel \ - pcre-devel-static \ + pcre2-devel-static \ pipewire-devel \ pkgconfig \ python311 \ @@ -132,7 +133,7 @@ RUN zypper update -y && \ RUN /usr/bin/pip3.11 install \ PyYAML \ - meson==1.5.0 \ + meson==1.8.1 \ pillow \ sphinx \ sphinx-rtd-theme @@ -147,3 +148,5 @@ ARG USER ARG UID RUN if [ "${USER}" ]; then \ id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi + +ENV ENABLE_RUST 1 diff --git a/tests/docker/dockerfiles/ubuntu2204.docker b/tests/docker/dockerfiles/ubuntu2204.docker index 28a6f932430cd..b393db55a8cd0 100644 --- a/tests/docker/dockerfiles/ubuntu2204.docker +++ b/tests/docker/dockerfiles/ubuntu2204.docker @@ -42,6 +42,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libcap-ng-dev \ libcapstone-dev \ libcbor-dev \ + libclang-dev \ libcmocka-dev \ libcurl4-gnutls-dev \ libdaxctl-dev \ @@ -87,6 +88,7 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ libspice-protocol-dev \ libspice-server-dev \ libssh-dev \ + libstd-rust-dev \ libsystemd-dev \ libtasn1-6-dev \ libubsan1 \ @@ -102,7 +104,6 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ llvm \ locales \ make \ - meson \ mtools \ multipath-tools \ ncat \ @@ -115,10 +116,12 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ python3-opencv \ python3-pillow \ python3-pip \ + python3-setuptools \ python3-sphinx \ python3-sphinx-rtd-theme \ python3-tomli \ python3-venv \ + python3-wheel \ python3-yaml \ rpm2cpio \ rustc-1.77 \ @@ -145,6 +148,8 @@ RUN export DEBIAN_FRONTEND=noninteractive && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/clang && \ ln -s /usr/bin/ccache /usr/libexec/ccache-wrappers/gcc +RUN /usr/bin/pip3 install meson==1.8.1 + ENV CCACHE_WRAPPERSDIR "/usr/libexec/ccache-wrappers" ENV LANG "en_US.UTF-8" ENV MAKE "/usr/bin/make" diff --git a/tests/functional/aarch64/meson.build b/tests/functional/aarch64/meson.build new file mode 100644 index 0000000000000..5ad52f93e1d74 --- /dev/null +++ b/tests/functional/aarch64/meson.build @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_aarch64_timeouts = { + 'aspeed_ast2700' : 600, + 'aspeed_ast2700fc' : 600, + 'device_passthrough' : 720, + 'imx8mp_evk' : 240, + 'raspi4' : 480, + 'reverse_debug' : 180, + 'rme_virt' : 1200, + 'rme_sbsaref' : 1200, + 'sbsaref_alpine' : 1200, + 'sbsaref_freebsd' : 720, + 'smmu' : 720, + 'tuxrun' : 240, + 'virt' : 360, + 'virt_gpu' : 480, +} + +tests_aarch64_system_quick = [ + 'migration', + 'vmstate', +] + +tests_aarch64_system_thorough = [ + 'aspeed_ast2700', + 'aspeed_ast2700fc', + 'device_passthrough', + 'hotplug_pci', + 'imx8mp_evk', + 'kvm', + 'multiprocess', + 'raspi3', + 'raspi4', + 'replay', + 'reverse_debug', + 'rme_virt', + 'rme_sbsaref', + 'sbsaref', + 'sbsaref_alpine', + 'sbsaref_freebsd', + 'smmu', + 'tcg_plugins', + 'tuxrun', + 'virt', + 'virt_gpu', + 'xen', + 'xlnx_versal', +] diff --git a/tests/functional/test_aarch64_aspeed_ast2700.py b/tests/functional/aarch64/test_aspeed_ast2700.py similarity index 75% rename from tests/functional/test_aarch64_aspeed_ast2700.py rename to tests/functional/aarch64/test_aspeed_ast2700.py index d02dc7991c1a2..0ced1a25021dc 100755 --- a/tests/functional/test_aarch64_aspeed_ast2700.py +++ b/tests/functional/aarch64/test_aspeed_ast2700.py @@ -37,22 +37,21 @@ def verify_vbootrom_firmware_flow(self): wait_for_console_pattern(self, 'done') wait_for_console_pattern(self, 'Jumping to BL31 (Trusted Firmware-A)') - def verify_openbmc_boot_and_login(self, name): + def verify_openbmc_boot_start(self): wait_for_console_pattern(self, 'U-Boot 2023.10') wait_for_console_pattern(self, '## Loading kernel from FIT Image') - wait_for_console_pattern(self, 'Starting kernel ...') + wait_for_console_pattern(self, 'Linux version ') + + def verify_openbmc_boot_and_login(self, name): + self.verify_openbmc_boot_start() wait_for_console_pattern(self, f'{name} login:') exec_command_and_wait_for_pattern(self, 'root', 'Password:') exec_command_and_wait_for_pattern(self, '0penBmc', f'root@{name}:~#') - ASSET_SDK_V906_AST2700 = Asset( - 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2700-a0-default-obmc.tar.gz', - '7247b6f19dbfb700686f8d9f723ac23f3eb229226c0589cb9b06b80d1b61f3cb') - - ASSET_SDK_V906_AST2700A1 = Asset( - 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2700-default-obmc.tar.gz', - 'f1d53e0be8a404ecce3e105f72bc50fa4e090ad13160ffa91b10a6e0233a9dc6') + ASSET_SDK_V908_AST2700A1 = Asset( + 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.08/ast2700-default-obmc.tar.gz', + 'eac3dc409b7ea3cd4b03d4792d3cebd469792ad893cb51e1d15f0fc20bd1e2cd') def do_ast2700_i2c_test(self): exec_command_and_wait_for_pattern(self, @@ -65,6 +64,19 @@ def do_ast2700_i2c_test(self): exec_command_and_wait_for_pattern(self, 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '18000') + def do_ast2700_pcie_test(self): + exec_command_and_wait_for_pattern(self, + 'lspci -s 0002:00:00.0', + '0002:00:00.0 PCI bridge: ' + 'ASPEED Technology, Inc. AST1150 PCI-to-PCI Bridge') + exec_command_and_wait_for_pattern(self, + 'lspci -s 0002:01:00.0', + '0002:01:00.0 Ethernet controller: ' + 'Intel Corporation 82574L Gigabit Network Connection') + exec_command_and_wait_for_pattern(self, + 'ip addr show dev eth2', + 'inet 10.0.2.15/24') + def start_ast2700_test(self, name): num_cpu = 4 uboot_size = os.path.getsize(self.scratch_file(name, @@ -111,30 +123,28 @@ def start_ast2700_test_vbootrom(self, name): self.do_test_aarch64_aspeed_sdk_start( self.scratch_file(name, 'image-bmc')) - def test_aarch64_ast2700_evb_sdk_v09_06(self): - self.set_machine('ast2700-evb') - - self.archive_extract(self.ASSET_SDK_V906_AST2700) - self.start_ast2700_test('ast2700-a0-default') - self.verify_openbmc_boot_and_login('ast2700-a0-default') - self.do_ast2700_i2c_test() - - def test_aarch64_ast2700a1_evb_sdk_v09_06(self): + def test_aarch64_ast2700a1_evb_sdk_v09_08(self): self.set_machine('ast2700a1-evb') + self.require_netdev('user') - self.archive_extract(self.ASSET_SDK_V906_AST2700A1) + self.archive_extract(self.ASSET_SDK_V908_AST2700A1) + self.vm.add_args('-device', 'e1000e,netdev=net1,bus=pcie.2') + self.vm.add_args('-netdev', 'user,id=net1') self.start_ast2700_test('ast2700-default') self.verify_openbmc_boot_and_login('ast2700-default') self.do_ast2700_i2c_test() + self.do_ast2700_pcie_test() - def test_aarch64_ast2700a1_evb_sdk_vbootrom_v09_06(self): + def test_aarch64_ast2700a1_evb_sdk_vbootrom_v09_08(self): self.set_machine('ast2700a1-evb') + self.require_netdev('user') - self.archive_extract(self.ASSET_SDK_V906_AST2700A1) + self.archive_extract(self.ASSET_SDK_V908_AST2700A1) + self.vm.add_args('-device', 'e1000e,netdev=net1,bus=pcie.2') + self.vm.add_args('-netdev', 'user,id=net1') self.start_ast2700_test_vbootrom('ast2700-default') self.verify_vbootrom_firmware_flow() - self.verify_openbmc_boot_and_login('ast2700-default') - self.do_ast2700_i2c_test() + self.verify_openbmc_boot_start() if __name__ == '__main__': QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_aspeed_ast2700fc.py b/tests/functional/aarch64/test_aspeed_ast2700fc.py similarity index 73% rename from tests/functional/test_aarch64_aspeed_ast2700fc.py rename to tests/functional/aarch64/test_aspeed_ast2700fc.py index b85370e182ea0..8dbc8f234f9c5 100755 --- a/tests/functional/test_aarch64_aspeed_ast2700fc.py +++ b/tests/functional/aarch64/test_aspeed_ast2700fc.py @@ -20,6 +20,8 @@ def do_test_aarch64_aspeed_sdk_start(self, image): self.vm.set_console() self.vm.add_args('-device', 'tmp105,bus=aspeed.i2c.bus.1,address=0x4d,id=tmp-test') + self.vm.add_args('-device', 'e1000e,netdev=net1,bus=pcie.2') + self.vm.add_args('-netdev', 'user,id=net1') self.vm.add_args('-drive', 'file=' + image + ',if=mtd,format=raw', '-net', 'nic', '-net', 'user', '-snapshot') @@ -34,9 +36,20 @@ def verify_openbmc_boot_and_login(self, name): exec_command_and_wait_for_pattern(self, 'root', 'Password:') exec_command_and_wait_for_pattern(self, '0penBmc', f'root@{name}:~#') - ASSET_SDK_V906_AST2700 = Asset( - 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2700-default-obmc.tar.gz', - 'f1d53e0be8a404ecce3e105f72bc50fa4e090ad13160ffa91b10a6e0233a9dc6') + def load_ast2700fc_coprocessor(self, name): + load_elf_list = { + 'ssp': self.scratch_file(name, 'zephyr-aspeed-ssp.elf'), + 'tsp': self.scratch_file(name, 'zephyr-aspeed-tsp.elf') + } + + for cpu_num, key in enumerate(load_elf_list, start=4): + file = load_elf_list[key] + self.vm.add_args('-device', + f'loader,file={file},cpu-num={cpu_num}') + + ASSET_SDK_V908_AST2700 = Asset( + 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.08/ast2700-default-obmc.tar.gz', + 'eac3dc409b7ea3cd4b03d4792d3cebd469792ad893cb51e1d15f0fc20bd1e2cd') def do_ast2700_i2c_test(self): exec_command_and_wait_for_pattern(self, @@ -49,12 +62,25 @@ def do_ast2700_i2c_test(self): exec_command_and_wait_for_pattern(self, 'cat /sys/bus/i2c/devices/1-004d/hwmon/hwmon*/temp1_input', '18000') + def do_ast2700_pcie_test(self): + exec_command_and_wait_for_pattern(self, + 'lspci -s 0002:00:00.0', + '0002:00:00.0 PCI bridge: ' + 'ASPEED Technology, Inc. AST1150 PCI-to-PCI Bridge') + exec_command_and_wait_for_pattern(self, + 'lspci -s 0002:01:00.0', + '0002:01:00.0 Ethernet controller: ' + 'Intel Corporation 82574L Gigabit Network Connection') + exec_command_and_wait_for_pattern(self, + 'ip addr show dev eth2', + 'inet 10.0.2.15/24') + def do_ast2700fc_ssp_test(self): self.vm.shutdown() self.vm.set_console(console_index=1) self.vm.launch() - exec_command_and_wait_for_pattern(self, '\012', 'ssp:~$') + exec_command_and_wait_for_pattern(self, '\012', 'ssp_tsp:~$') exec_command_and_wait_for_pattern(self, 'version', 'Zephyr version 3.7.1') exec_command_and_wait_for_pattern(self, 'md 72c02000 1', @@ -108,26 +134,34 @@ def start_ast2700fc_test(self, name): self.vm.add_args('-device', f'loader,addr=0x430000000,cpu-num={i}') - load_elf_list = { - 'ssp': self.scratch_file(name, 'zephyr-aspeed-ssp.elf'), - 'tsp': self.scratch_file(name, 'zephyr-aspeed-tsp.elf') - } - - for cpu_num, key in enumerate(load_elf_list, start=4): - file = load_elf_list[key] - self.vm.add_args('-device', - f'loader,file={file},cpu-num={cpu_num}') + self.load_ast2700fc_coprocessor(name) + self.do_test_aarch64_aspeed_sdk_start( + self.scratch_file(name, 'image-bmc')) + def start_ast2700fc_test_vbootrom(self, name): + self.vm.add_args('-bios', 'ast27x0_bootrom.bin') + self.load_ast2700fc_coprocessor(name) self.do_test_aarch64_aspeed_sdk_start( self.scratch_file(name, 'image-bmc')) - def test_aarch64_ast2700fc_sdk_v09_06(self): + def test_aarch64_ast2700fc_sdk_v09_08(self): self.set_machine('ast2700fc') + self.require_netdev('user') - self.archive_extract(self.ASSET_SDK_V906_AST2700) + self.archive_extract(self.ASSET_SDK_V908_AST2700) self.start_ast2700fc_test('ast2700-default') self.verify_openbmc_boot_and_login('ast2700-default') self.do_ast2700_i2c_test() + self.do_ast2700_pcie_test() + self.do_ast2700fc_ssp_test() + self.do_ast2700fc_tsp_test() + + def test_aarch64_ast2700fc_sdk_vbootrom_v09_08(self): + self.set_machine('ast2700fc') + + self.archive_extract(self.ASSET_SDK_V908_AST2700) + self.start_ast2700fc_test_vbootrom('ast2700-default') + self.verify_openbmc_boot_and_login('ast2700-default') self.do_ast2700fc_ssp_test() self.do_ast2700fc_tsp_test() diff --git a/tests/functional/test_aarch64_device_passthrough.py b/tests/functional/aarch64/test_device_passthrough.py similarity index 85% rename from tests/functional/test_aarch64_device_passthrough.py rename to tests/functional/aarch64/test_device_passthrough.py index 1f3f158a9fff4..05a3f52d5e217 100755 --- a/tests/functional/test_aarch64_device_passthrough.py +++ b/tests/functional/aarch64/test_device_passthrough.py @@ -9,7 +9,7 @@ # # SPDX-License-Identifier: GPL-2.0-or-later -import os +from os.path import join from qemu_test import QemuSystemTest, Asset from qemu_test import exec_command, wait_for_console_pattern @@ -77,15 +77,16 @@ class Aarch64DevicePassthrough(QemuSystemTest): - # https://github.com/pbo-linaro/qemu-linux-stack + # https://github.com/pbo-linaro/qemu-linux-stack/tree/device_passthrough + # $ ./build.sh && ./archive_artifacts.sh out.tar.xz # # Linux kernel is compiled with defconfig + # IOMMUFD + VFIO_DEVICE_CDEV + ARM_SMMU_V3_IOMMUFD # https://docs.kernel.org/driver-api/vfio.html#vfio-device-cde ASSET_DEVICE_PASSTHROUGH_STACK = Asset( - ('https://fileserver.linaro.org/s/fx5DXxBYme8dw2G/' - 'download/device_passthrough.tar.xz'), - '812750b664d61c2986f2b149939ae28cafbd60d53e9c7e4b16e97143845e196d') + ('https://github.com/pbo-linaro/qemu-linux-stack/' + 'releases/download/build/device_passthrough-a9612a2.tar.xz'), + 'f7d2f70912e7231986e6e293e1a2c4786dd02bec113a7acb6bfc619e96155455') # This tests the device passthrough implementation, by booting a VM # supporting it with two nvme disks attached, and launching a nested VM @@ -96,16 +97,16 @@ def test_aarch64_device_passthrough(self): self.vm.set_console() - stack_path_tar_gz = self.ASSET_DEVICE_PASSTHROUGH_STACK.fetch() - self.archive_extract(stack_path_tar_gz, format="tar") + stack_path_tar = self.ASSET_DEVICE_PASSTHROUGH_STACK.fetch() + self.archive_extract(stack_path_tar, format="tar") stack = self.scratch_file('out') - kernel = os.path.join(stack, 'Image.gz') - rootfs_host = os.path.join(stack, 'host.ext4') - disk_vfio = os.path.join(stack, 'disk_vfio') - disk_iommufd = os.path.join(stack, 'disk_iommufd') - guest_cmd = os.path.join(stack, 'guest.sh') - nested_guest_cmd = os.path.join(stack, 'nested_guest.sh') + kernel = join(stack, 'Image.gz') + rootfs_host = join(stack, 'host.ext4') + disk_vfio = join(stack, 'disk_vfio') + disk_iommufd = join(stack, 'disk_iommufd') + guest_cmd = join(stack, 'guest.sh') + nested_guest_cmd = join(stack, 'nested_guest.sh') # we generate two random disks with open(disk_vfio, "wb") as d: d.write(randbytes(512)) with open(disk_iommufd, "wb") as d: d.write(randbytes(1024)) diff --git a/tests/functional/test_aarch64_hotplug_pci.py b/tests/functional/aarch64/test_hotplug_pci.py similarity index 83% rename from tests/functional/test_aarch64_hotplug_pci.py rename to tests/functional/aarch64/test_hotplug_pci.py index c9bb7f1d9753f..bf677204319e5 100755 --- a/tests/functional/test_aarch64_hotplug_pci.py +++ b/tests/functional/aarch64/test_hotplug_pci.py @@ -15,14 +15,14 @@ class HotplugPCI(LinuxKernelTest): ASSET_KERNEL = Asset( - ('https://ftp.debian.org/debian/dists/stable/main/installer-arm64/' - '20230607+deb12u11/images/netboot/debian-installer/arm64/linux'), - 'd92a60392ce1e379ca198a1a820899f8f0d39a62d047c41ab79492f81541a9d9') + ('https://ftp.debian.org/debian/dists/trixie/main/installer-arm64/' + '20250803/images/netboot/debian-installer/arm64/linux'), + '93a6e4f9627d759375d28f863437a86a0659e125792a435f8e526dda006b7d5e') ASSET_INITRD = Asset( - ('https://ftp.debian.org/debian/dists/stable/main/installer-arm64/' - '20230607+deb12u11/images/netboot/debian-installer/arm64/initrd.gz'), - '9f817f76951f3237bca8216bee35267bfb826815687f4b2fcdd5e6c2a917790c') + ('https://ftp.debian.org/debian/dists/trixie/main/installer-arm64/' + '20250803/images/netboot/debian-installer/arm64/initrd.gz'), + 'f6c78af7078ca67638ef3a50c926cd3c1485673243f8b37952e6bd854d6ba007') def test_hotplug_pci(self): diff --git a/tests/functional/test_aarch64_imx8mp_evk.py b/tests/functional/aarch64/test_imx8mp_evk.py similarity index 100% rename from tests/functional/test_aarch64_imx8mp_evk.py rename to tests/functional/aarch64/test_imx8mp_evk.py diff --git a/tests/functional/aarch64/test_kvm.py b/tests/functional/aarch64/test_kvm.py new file mode 100755 index 0000000000000..9fb9286139f44 --- /dev/null +++ b/tests/functional/aarch64/test_kvm.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python3 +# +# Functional test that runs subsets of kvm-unit-tests on Aarch64. +# These can run on TCG and any accelerator supporting nested +# virtualisation. +# +# Copyright (c) 2025 Linaro +# +# Author: +# Alex Bennée +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import Asset +from qemu_test import exec_command_and_wait_for_pattern as ec_and_wait +from qemu_test.linuxkernel import LinuxKernelTest + + +class Aarch64VirtKVMTests(LinuxKernelTest): + + ASSET_KVM_TEST_KERNEL = Asset( + 'https://fileserver.linaro.org/s/HmjaxXXYHYSqbes/' + 'download?path=%2F&files=' + 'image-with-kvm-tool-and-unit-tests.gz', + '34de4aaea90db5da42729e7d28b77f392c37a2f4da859f889a5234aaf0970696') + + # make it easier to detect successful return to shell + PS1 = 'RES=[$?] # ' + OK_CMD = 'RES=[0] # ' + + # base of tests + KUT_BASE = "/usr/share/kvm-unit-tests/" + + def _launch_guest(self, kvm_mode="nvhe"): + + self.set_machine('virt') + kernel_path = self.ASSET_KVM_TEST_KERNEL.fetch() + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + f"console=ttyAMA0 kvm-arm.mode={kvm_mode}") + + self.vm.add_args("-cpu", "cortex-a72") + self.vm.add_args("-machine", "virt,gic-version=3,virtualization=on", + '-kernel', kernel_path, + '-append', kernel_command_line) + self.vm.add_args("-smp", "2", "-m", "320") + + self.vm.launch() + + self.wait_for_console_pattern('buildroot login:') + ec_and_wait(self, 'root', '#') + ec_and_wait(self, f"export PS1='{self.PS1}'", self.OK_CMD) + + # this is just a smoketest, we don't run all the tests in the image + def _smoketest_kvm(self): + ec_and_wait(self, f"{self.KUT_BASE}/selftest-setup", self.OK_CMD) + ec_and_wait(self, f"{self.KUT_BASE}/selftest-smp", self.OK_CMD) + ec_and_wait(self, f"{self.KUT_BASE}/selftest-vectors-kernel", self.OK_CMD) + ec_and_wait(self, f"{self.KUT_BASE}/selftest-vectors-user", self.OK_CMD) + + def test_aarch64_nvhe_selftest(self): + self._launch_guest("nvhe") + self._smoketest_kvm() + + def test_aarch64_vhe_selftest(self): + self._launch_guest("vhe") + self._smoketest_kvm() + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/aarch64/test_migration.py b/tests/functional/aarch64/test_migration.py new file mode 100755 index 0000000000000..70267e756d9e9 --- /dev/null +++ b/tests/functional/aarch64/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# aarch64 migration test + +from migration import MigrationTest + + +class Aarch64MigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('quanta-gsj') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('quanta-gsj') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('quanta-gsj') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/aarch64/test_multiprocess.py b/tests/functional/aarch64/test_multiprocess.py new file mode 100755 index 0000000000000..1c6e45ecb6764 --- /dev/null +++ b/tests/functional/aarch64/test_multiprocess.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Test for multiprocess qemu on aarch64 + +from multiprocess import Multiprocess +from qemu_test import Asset + + +class Aarch64Multiprocess(Multiprocess): + + ASSET_KERNEL_AARCH64 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux' + '/releases/31/Everything/aarch64/os/images/pxeboot/vmlinuz'), + '3ae07fcafbfc8e4abeb693035a74fe10698faae15e9ccd48882a9167800c1527') + + ASSET_INITRD_AARCH64 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux' + '/releases/31/Everything/aarch64/os/images/pxeboot/initrd.img'), + '9fd230cab10b1dafea41cf00150e6669d37051fad133bd618d2130284e16d526') + + def test_multiprocess(self): + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'rdinit=/bin/bash console=ttyAMA0') + self.do_test(self.ASSET_KERNEL_AARCH64, self.ASSET_INITRD_AARCH64, + kernel_command_line, 'virt,gic-version=3') + + +if __name__ == '__main__': + Multiprocess.main() diff --git a/tests/functional/test_aarch64_raspi3.py b/tests/functional/aarch64/test_raspi3.py similarity index 100% rename from tests/functional/test_aarch64_raspi3.py rename to tests/functional/aarch64/test_raspi3.py diff --git a/tests/functional/test_aarch64_raspi4.py b/tests/functional/aarch64/test_raspi4.py similarity index 100% rename from tests/functional/test_aarch64_raspi4.py rename to tests/functional/aarch64/test_raspi4.py diff --git a/tests/functional/test_aarch64_replay.py b/tests/functional/aarch64/test_replay.py similarity index 100% rename from tests/functional/test_aarch64_replay.py rename to tests/functional/aarch64/test_replay.py diff --git a/tests/functional/test_aarch64_reverse_debug.py b/tests/functional/aarch64/test_reverse_debug.py similarity index 68% rename from tests/functional/test_aarch64_reverse_debug.py rename to tests/functional/aarch64/test_reverse_debug.py index 58d45328350f5..ec3348c96d88a 100755 --- a/tests/functional/test_aarch64_reverse_debug.py +++ b/tests/functional/aarch64/test_reverse_debug.py @@ -2,36 +2,34 @@ # # SPDX-License-Identifier: GPL-2.0-or-later # -# Reverse debugging test +# Reverse debugging test for aarch64 # # Copyright (c) 2020 ISP RAS +# Copyright (c) 2025 Linaro Limited # # Author: # Pavel Dovgalyuk +# Gustavo Romero (Run without Avocado) # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from qemu_test import Asset, skipIfMissingImports, skipFlakyTest +from qemu_test import Asset, skipFlakyTest from reverse_debugging import ReverseDebugging -@skipIfMissingImports('avocado.utils') class ReverseDebugging_AArch64(ReverseDebugging): - REG_PC = 32 - - KERNEL_ASSET = Asset( + ASSET_KERNEL = Asset( ('https://archives.fedoraproject.org/pub/archive/fedora/linux/' 'releases/29/Everything/aarch64/os/images/pxeboot/vmlinuz'), '7e1430b81c26bdd0da025eeb8fbd77b5dc961da4364af26e771bd39f379cbbf7') - @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/2921") def test_aarch64_virt(self): self.set_machine('virt') self.cpu = 'cortex-a53' - kernel_path = self.KERNEL_ASSET.fetch() - self.reverse_debugging(args=('-kernel', kernel_path)) + kernel_path = self.ASSET_KERNEL.fetch() + self.reverse_debugging(gdb_arch='aarch64', args=('-kernel', kernel_path)) if __name__ == '__main__': diff --git a/tests/functional/aarch64/test_rme_sbsaref.py b/tests/functional/aarch64/test_rme_sbsaref.py new file mode 100755 index 0000000000000..6f92858397ad8 --- /dev/null +++ b/tests/functional/aarch64/test_rme_sbsaref.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Realms environment on sbsa-ref machine and a +# nested guest VM using it. +# +# Copyright (c) 2024 Linaro Ltd. +# +# Author: Pierrick Bouvier +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +from os.path import join +import shutil + +from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern + + +class Aarch64RMESbsaRefMachine(QemuSystemTest): + + # Stack is inspired from: + # https://linaro.atlassian.net/wiki/spaces/QEMU/pages/29051027459/ + # https://github.com/pbo-linaro/qemu-linux-stack/tree/rme_sbsa_release + # ./build.sh && ./archive_artifacts.sh out.tar.xz + ASSET_RME_STACK_SBSA = Asset( + ('https://github.com/pbo-linaro/qemu-linux-stack/' + 'releases/download/build/rme_sbsa_release-6a2dfc5.tar.xz'), + '5adba482aa069912292a8da746c6b21268224d9d81c97fe7c0bed690579ebdcb') + + # This tests the FEAT_RME cpu implementation, by booting a VM supporting it, + # and launching a nested VM using it. + def test_aarch64_rme_sbsaref(self): + self.set_machine('sbsa-ref') + self.require_accelerator('tcg') + self.require_netdev('user') + + self.vm.set_console() + + stack_path_tar = self.ASSET_RME_STACK_SBSA.fetch() + self.archive_extract(stack_path_tar, format="tar") + + rme_stack = self.scratch_file('.') + pflash0 = join(rme_stack, 'out', 'SBSA_FLASH0.fd') + pflash1 = join(rme_stack, 'out', 'SBSA_FLASH1.fd') + rootfs = join(rme_stack, 'out', 'host.ext4') + + efi = join(rme_stack, 'out', 'EFI') + os.makedirs(efi, exist_ok=True) + shutil.copyfile(join(rme_stack, 'out', 'Image'), join(efi, 'Image')) + with open(join(efi, 'startup.nsh'), 'w') as startup: + startup.write('fs0:Image nokaslr root=/dev/vda rw init=/init --' + ' /host/out/lkvm run --realm' + ' -m 256m' + ' --restricted_mem' + ' --kernel /host/out/Image' + ' --disk /host/out/guest.ext4' + ' --params "root=/dev/vda rw init=/init"') + + self.vm.add_args('-cpu', 'max,x-rme=on') + self.vm.add_args('-smp', '2') + self.vm.add_args('-m', '2G') + self.vm.add_args('-M', 'sbsa-ref') + self.vm.add_args('-drive', f'file={pflash0},format=raw,if=pflash') + self.vm.add_args('-drive', f'file={pflash1},format=raw,if=pflash') + self.vm.add_args('-drive', f'file=fat:rw:{efi},format=raw') + self.vm.add_args('-drive', f'format=raw,file={rootfs},if=virtio') + self.vm.add_args('-virtfs', + f'local,path={rme_stack}/,mount_tag=host,' + 'security_model=mapped,readonly=off') + self.vm.launch() + # Wait for host and guest VM boot to complete. + wait_for_console_pattern(self, 'root@guest', + failure_message='Kernel panic') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/aarch64/test_rme_virt.py b/tests/functional/aarch64/test_rme_virt.py new file mode 100755 index 0000000000000..5e23773f93d77 --- /dev/null +++ b/tests/functional/aarch64/test_rme_virt.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Realms environment on virt machine and a nested +# guest VM using it. +# +# Copyright (c) 2024 Linaro Ltd. +# +# Author: Pierrick Bouvier +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from os.path import join + +from qemu_test import QemuSystemTest, Asset +from qemu_test import exec_command, wait_for_console_pattern +from qemu_test import exec_command_and_wait_for_pattern + +class Aarch64RMEVirtMachine(QemuSystemTest): + + # Stack is inspired from: + # https://linaro.atlassian.net/wiki/spaces/QEMU/pages/29051027459/ + # https://github.com/pbo-linaro/qemu-linux-stack/tree/rme_release + # ./build.sh && ./archive_artifacts.sh out.tar.xz + ASSET_RME_STACK_VIRT = Asset( + ('https://github.com/pbo-linaro/qemu-linux-stack/' + 'releases/download/build/rme_release-56bc99e.tar.xz'), + '0e3dc6b8a4b828dbae09c951a40dcb710eded084b32432b50c69cf4173ffa4be') + + # This tests the FEAT_RME cpu implementation, by booting a VM supporting it, + # and launching a nested VM using it. + def test_aarch64_rme_virt(self): + self.set_machine('virt') + self.require_accelerator('tcg') + self.require_netdev('user') + + self.vm.set_console() + + stack_path_tar = self.ASSET_RME_STACK_VIRT.fetch() + self.archive_extract(stack_path_tar, format="tar") + + rme_stack = self.scratch_file('.') + kernel = join(rme_stack, 'out', 'Image') + bios = join(rme_stack, 'out', 'flash.bin') + rootfs = join(rme_stack, 'out', 'host.ext4') + + self.vm.add_args('-cpu', 'max,x-rme=on') + self.vm.add_args('-smp', '2') + self.vm.add_args('-m', '2G') + self.vm.add_args('-M', 'virt,acpi=off,' + 'virtualization=on,' + 'secure=on,' + 'gic-version=3') + self.vm.add_args('-bios', bios) + self.vm.add_args('-kernel', kernel) + self.vm.add_args('-drive', f'format=raw,file={rootfs},if=virtio') + self.vm.add_args('-virtfs', + f'local,path={rme_stack}/,mount_tag=host,' + 'security_model=mapped,readonly=off') + # We need to add nokaslr to avoid triggering this sporadic bug: + # https://gitlab.com/qemu-project/qemu/-/issues/2823 + self.vm.add_args('-append', + 'nokaslr root=/dev/vda rw init=/init --' + ' /host/out/lkvm run --realm' + ' -m 256m' + ' --restricted_mem' + ' --kernel /host/out/Image' + ' --disk /host/out/guest.ext4' + ' --params "root=/dev/vda rw init=/init"') + + self.vm.launch() + # Wait for host and guest VM boot to complete. + wait_for_console_pattern(self, 'root@guest', + failure_message='Kernel panic') + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_sbsaref.py b/tests/functional/aarch64/test_sbsaref.py similarity index 100% rename from tests/functional/test_aarch64_sbsaref.py rename to tests/functional/aarch64/test_sbsaref.py diff --git a/tests/functional/test_aarch64_sbsaref_alpine.py b/tests/functional/aarch64/test_sbsaref_alpine.py similarity index 86% rename from tests/functional/test_aarch64_sbsaref_alpine.py rename to tests/functional/aarch64/test_sbsaref_alpine.py index 877699938319a..be84b7adb0c75 100755 --- a/tests/functional/test_aarch64_sbsaref_alpine.py +++ b/tests/functional/aarch64/test_sbsaref_alpine.py @@ -12,7 +12,7 @@ from qemu_test import QemuSystemTest, Asset, skipSlowTest from qemu_test import wait_for_console_pattern -from test_aarch64_sbsaref import fetch_firmware +from test_sbsaref import fetch_firmware class Aarch64SbsarefAlpine(QemuSystemTest): @@ -41,15 +41,9 @@ def boot_alpine_linux(self, cpu=None): self.vm.launch() wait_for_console_pattern(self, "Welcome to Alpine Linux 3.17") - def test_sbsaref_alpine_linux_cortex_a57(self): - self.boot_alpine_linux("cortex-a57") - def test_sbsaref_alpine_linux_default_cpu(self): self.boot_alpine_linux() - def test_sbsaref_alpine_linux_max_pauth_off(self): - self.boot_alpine_linux("max,pauth=off") - def test_sbsaref_alpine_linux_max_pauth_impdef(self): self.boot_alpine_linux("max,pauth-impdef=on") diff --git a/tests/functional/test_aarch64_sbsaref_freebsd.py b/tests/functional/aarch64/test_sbsaref_freebsd.py similarity index 97% rename from tests/functional/test_aarch64_sbsaref_freebsd.py rename to tests/functional/aarch64/test_sbsaref_freebsd.py index 7ef016fba6286..3b942f7795cde 100755 --- a/tests/functional/test_aarch64_sbsaref_freebsd.py +++ b/tests/functional/aarch64/test_sbsaref_freebsd.py @@ -12,7 +12,7 @@ from qemu_test import QemuSystemTest, Asset, skipSlowTest from qemu_test import wait_for_console_pattern -from test_aarch64_sbsaref import fetch_firmware +from test_sbsaref import fetch_firmware class Aarch64SbsarefFreeBSD(QemuSystemTest): diff --git a/tests/functional/test_aarch64_smmu.py b/tests/functional/aarch64/test_smmu.py similarity index 100% rename from tests/functional/test_aarch64_smmu.py rename to tests/functional/aarch64/test_smmu.py diff --git a/tests/functional/test_aarch64_tcg_plugins.py b/tests/functional/aarch64/test_tcg_plugins.py similarity index 100% rename from tests/functional/test_aarch64_tcg_plugins.py rename to tests/functional/aarch64/test_tcg_plugins.py diff --git a/tests/functional/test_aarch64_tuxrun.py b/tests/functional/aarch64/test_tuxrun.py similarity index 100% rename from tests/functional/test_aarch64_tuxrun.py rename to tests/functional/aarch64/test_tuxrun.py diff --git a/tests/functional/test_aarch64_virt.py b/tests/functional/aarch64/test_virt.py similarity index 97% rename from tests/functional/test_aarch64_virt.py rename to tests/functional/aarch64/test_virt.py index 4d0ad90ff8928..63071f9b51752 100755 --- a/tests/functional/test_aarch64_virt.py +++ b/tests/functional/aarch64/test_virt.py @@ -72,8 +72,6 @@ def common_aarch64_virt(self, machine): self.set_machine('virt') self.require_accelerator("tcg") - logger = logging.getLogger('aarch64_virt') - kernel_path = self.ASSET_KERNEL.fetch() self.vm.set_console() @@ -91,7 +89,7 @@ def common_aarch64_virt(self, machine): 'rng-random,id=rng0,filename=/dev/urandom') # Also add a scratch block device - logger.info('creating scratch qcow2 image') + self.log.info('creating scratch qcow2 image') image_path = self.scratch_file('scratch.qcow2') qemu_img = get_qemu_img(self) check_call([qemu_img, 'create', '-f', 'qcow2', image_path, '8M'], diff --git a/tests/functional/test_aarch64_virt_gpu.py b/tests/functional/aarch64/test_virt_gpu.py similarity index 97% rename from tests/functional/test_aarch64_virt_gpu.py rename to tests/functional/aarch64/test_virt_gpu.py index 38447278579e3..4e50887c3e9fe 100755 --- a/tests/functional/test_aarch64_virt_gpu.py +++ b/tests/functional/aarch64/test_virt_gpu.py @@ -76,6 +76,8 @@ def _launch_virt_gpu(self, gpu_device): self.skipTest("egl-headless support is not available") elif "'type' does not accept value 'dbus'" in excp.output: self.skipTest("dbus display support is not available") + elif "eglInitialize failed: EGL_NOT_INITIALIZED" in excp.output: + self.skipTest("EGL failed to initialize on this host") else: self.log.info("unhandled launch failure: %s", excp.output) raise excp diff --git a/tests/functional/test_aarch64_xen.py b/tests/functional/aarch64/test_xen.py similarity index 100% rename from tests/functional/test_aarch64_xen.py rename to tests/functional/aarch64/test_xen.py diff --git a/tests/functional/test_aarch64_xlnx_versal.py b/tests/functional/aarch64/test_xlnx_versal.py similarity index 77% rename from tests/functional/test_aarch64_xlnx_versal.py rename to tests/functional/aarch64/test_xlnx_versal.py index 4b9c49e5d6464..45aa6e1b88183 100755 --- a/tests/functional/test_aarch64_xlnx_versal.py +++ b/tests/functional/aarch64/test_xlnx_versal.py @@ -6,7 +6,7 @@ from qemu_test import LinuxKernelTest, Asset -class XlnxVersalVirtMachine(LinuxKernelTest): +class AmdVersalVirtMachine(LinuxKernelTest): ASSET_KERNEL = Asset( ('http://ports.ubuntu.com/ubuntu-ports/dists/bionic-updates/main/' @@ -20,8 +20,8 @@ class XlnxVersalVirtMachine(LinuxKernelTest): '/ubuntu-installer/arm64/initrd.gz'), 'e7a5e716b6f516d8be315c06e7331aaf16994fe4222e0e7cfb34bc015698929e') - def test_aarch64_xlnx_versal_virt(self): - self.set_machine('xlnx-versal-virt') + def common_aarch64_amd_versal_virt(self, machine): + self.set_machine(machine) kernel_path = self.ASSET_KERNEL.fetch() initrd_path = self.ASSET_INITRD.fetch() @@ -33,5 +33,11 @@ def test_aarch64_xlnx_versal_virt(self): self.vm.launch() self.wait_for_console_pattern('Checked W+X mappings: passed') + def test_aarch64_amd_versal_virt(self): + self.common_aarch64_amd_versal_virt('amd-versal-virt') + + def test_aarch64_amd_versal2_virt(self): + self.common_aarch64_amd_versal_virt('amd-versal2-virt') + if __name__ == '__main__': LinuxKernelTest.main() diff --git a/tests/functional/alpha/meson.build b/tests/functional/alpha/meson.build new file mode 100644 index 0000000000000..26a5b3f2e4b9c --- /dev/null +++ b/tests/functional/alpha/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_alpha_system_quick = [ + 'migration', +] + +tests_alpha_system_thorough = [ + 'clipper', + 'replay', +] diff --git a/tests/functional/test_alpha_clipper.py b/tests/functional/alpha/test_clipper.py similarity index 95% rename from tests/functional/test_alpha_clipper.py rename to tests/functional/alpha/test_clipper.py index c5d7181953152..d2a4c2a4ed94e 100755 --- a/tests/functional/test_alpha_clipper.py +++ b/tests/functional/alpha/test_clipper.py @@ -17,7 +17,6 @@ class AlphaClipperTest(LinuxKernelTest): def test_alpha_clipper(self): self.set_machine('clipper') - kernel_path = self.ASSET_KERNEL.fetch() uncompressed_kernel = self.uncompress(self.ASSET_KERNEL, format="gz") diff --git a/tests/functional/alpha/test_migration.py b/tests/functional/alpha/test_migration.py new file mode 100755 index 0000000000000..f11b523ec9e7d --- /dev/null +++ b/tests/functional/alpha/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Alpha migration test + +from migration import MigrationTest + + +class AlphaMigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('clipper') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('clipper') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('clipper') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/test_alpha_replay.py b/tests/functional/alpha/test_replay.py similarity index 100% rename from tests/functional/test_alpha_replay.py rename to tests/functional/alpha/test_replay.py diff --git a/tests/functional/arm/meson.build b/tests/functional/arm/meson.build new file mode 100644 index 0000000000000..d1ed076a6aa8e --- /dev/null +++ b/tests/functional/arm/meson.build @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_arm_timeouts = { + 'aspeed_palmetto' : 120, + 'aspeed_romulus' : 120, + 'aspeed_witherspoon' : 120, + 'aspeed_ast2500' : 720, + 'aspeed_ast2600_buildroot' : 720, + 'aspeed_ast2600_sdk' : 1200, + 'aspeed_bletchley' : 480, + 'aspeed_catalina' : 480, + 'aspeed_gb200nvl_bmc' : 480, + 'aspeed_rainier' : 480, + 'bpim2u' : 500, + 'collie' : 180, + 'cubieboard' : 360, + 'orangepi' : 540, + 'quanta_gsj' : 240, + 'raspi2' : 120, + 'replay' : 240, + 'tuxrun' : 240, + 'sx1' : 360, +} + +tests_arm_system_quick = [ + 'migration', +] + +tests_arm_system_thorough = [ + 'aspeed_ast1030', + 'aspeed_palmetto', + 'aspeed_romulus', + 'aspeed_witherspoon', + 'aspeed_ast2500', + 'aspeed_ast2600_buildroot', + 'aspeed_ast2600_sdk', + 'aspeed_bletchley', + 'aspeed_catalina', + 'aspeed_gb200nvl_bmc', + 'aspeed_rainier', + 'bpim2u', + 'canona1100', + 'collie', + 'cubieboard', + 'emcraft_sf2', + 'integratorcp', + 'max78000fthr', + 'microbit', + 'orangepi', + 'quanta_gsj', + 'raspi2', + 'realview', + 'replay', + 'smdkc210', + 'stellaris', + 'sx1', + 'vexpress', + 'virt', + 'tuxrun', +] + +tests_arm_linuxuser_thorough = [ + 'bflt', +] diff --git a/tests/functional/test_arm_aspeed_ast1030.py b/tests/functional/arm/test_aspeed_ast1030.py similarity index 67% rename from tests/functional/test_arm_aspeed_ast1030.py rename to tests/functional/arm/test_aspeed_ast1030.py index 77037f01793ce..60e2b0251c6c6 100755 --- a/tests/functional/test_arm_aspeed_ast1030.py +++ b/tests/functional/arm/test_aspeed_ast1030.py @@ -7,22 +7,23 @@ # SPDX-License-Identifier: GPL-2.0-or-later from qemu_test import LinuxKernelTest, Asset +from aspeed import AspeedTest from qemu_test import exec_command_and_wait_for_pattern -class AST1030Machine(LinuxKernelTest): +class AST1030Machine(AspeedTest): - ASSET_ZEPHYR_3_00 = Asset( + ASSET_ZEPHYR_3_03 = Asset( ('https://github.com/AspeedTech-BMC' - '/zephyr/releases/download/v00.03.00/ast1030-evb-demo.zip'), - '37fe3ecd4a1b9d620971a15b96492a81093435396eeac69b6f3e384262ff555f') + '/zephyr/releases/download/v00.03.03/ast1030-evb-demo.zip'), + '27cd73cdee6374bceb4ee58b3ace87989fa3f0684f4e612510804b588b24d4e0') - def test_ast1030_zephyros_3_00(self): + def test_arm_ast1030_zephyros_3_03(self): self.set_machine('ast1030-evb') kernel_name = "ast1030-evb-demo/zephyr.elf" kernel_file = self.archive_extract( - self.ASSET_ZEPHYR_3_00, member=kernel_name) + self.ASSET_ZEPHYR_3_03, member=kernel_name) self.vm.set_console() self.vm.add_args('-kernel', kernel_file, '-nographic') @@ -36,7 +37,7 @@ def test_ast1030_zephyros_3_00(self): '/zephyr/releases/download/v00.01.07/ast1030-evb-demo.zip'), 'ad52e27959746988afaed8429bf4e12ab988c05c4d07c9d90e13ec6f7be4574c') - def test_ast1030_zephyros_1_07(self): + def test_arm_ast1030_zephyros_1_07(self): self.set_machine('ast1030-evb') kernel_name = "ast1030-evb-demo/zephyr.bin" @@ -68,6 +69,22 @@ def test_ast1030_zephyros_1_07(self): 'kernel uptime', ]: exec_command_and_wait_for_pattern(self, shell_cmd, "uart:~$") + def test_arm_ast1030_otp_blockdev_device(self): + self.vm.set_machine("ast1030-evb") + + kernel_name = "ast1030-evb-demo/zephyr.elf" + kernel_file = self.archive_extract(self.ASSET_ZEPHYR_3_03, + member=kernel_name) + otp_img = self.generate_otpmem_image() + + self.vm.set_console() + self.vm.add_args( + "-kernel", kernel_file, + "-blockdev", f"driver=file,filename={otp_img},node-name=otp", + "-global", "aspeed-otp.drive=otp", + ) + self.vm.launch() + self.wait_for_console_pattern("Booting Zephyr OS") if __name__ == '__main__': - LinuxKernelTest.main() + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_ast2500.py b/tests/functional/arm/test_aspeed_ast2500.py similarity index 89% rename from tests/functional/test_arm_aspeed_ast2500.py rename to tests/functional/arm/test_aspeed_ast2500.py index 6923fe870170c..5efd104c2b959 100755 --- a/tests/functional/test_arm_aspeed_ast2500.py +++ b/tests/functional/arm/test_aspeed_ast2500.py @@ -37,14 +37,14 @@ def test_arm_ast2500_evb_buildroot(self): self.do_test_arm_aspeed_buildroot_poweroff() - ASSET_SDK_V906_AST2500 = Asset( - 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2500-default-obmc.tar.gz', - '542db84645b4efd8aed50385d7f4dd1caff379a987032311cfa7b563a3addb2a') + ASSET_SDK_V908_AST2500 = Asset( + 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.08/ast2500-default-obmc.tar.gz', + 'c0a2ba169efd19be5eb77c50ec2a6afd9d826e196a0be3432f969fc72d4b7c0e') def test_arm_ast2500_evb_sdk(self): self.set_machine('ast2500-evb') - self.archive_extract(self.ASSET_SDK_V906_AST2500) + self.archive_extract(self.ASSET_SDK_V908_AST2500) self.do_test_arm_aspeed_sdk_start( self.scratch_file("ast2500-default", "image-bmc")) diff --git a/tests/functional/test_arm_aspeed_ast2600.py b/tests/functional/arm/test_aspeed_ast2600_buildroot.py similarity index 71% rename from tests/functional/test_arm_aspeed_ast2600.py rename to tests/functional/arm/test_aspeed_ast2600_buildroot.py index fdae4c939d8cc..51f2676c9061f 100755 --- a/tests/functional/test_arm_aspeed_ast2600.py +++ b/tests/functional/arm/test_aspeed_ast2600_buildroot.py @@ -97,44 +97,6 @@ def test_arm_ast2600_evb_buildroot_tpm(self): self.do_test_arm_aspeed_buildroot_poweroff() - ASSET_SDK_V906_AST2600 = Asset( - 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.06/ast2600-default-obmc.tar.gz', - '768d76e247896ad78c154b9cff4f766da2ce65f217d620b286a4a03a8a4f68f5') - - def test_arm_ast2600_evb_sdk(self): - self.set_machine('ast2600-evb') - - self.archive_extract(self.ASSET_SDK_V906_AST2600) - - self.vm.add_args('-device', - 'tmp105,bus=aspeed.i2c.bus.5,address=0x4d,id=tmp-test') - self.vm.add_args('-device', - 'ds1338,bus=aspeed.i2c.bus.5,address=0x32') - self.do_test_arm_aspeed_sdk_start( - self.scratch_file("ast2600-default", "image-bmc")) - - self.wait_for_console_pattern('ast2600-default login:') - - exec_command_and_wait_for_pattern(self, 'root', 'Password:') - exec_command_and_wait_for_pattern(self, '0penBmc', - 'root@ast2600-default:~#') - - exec_command_and_wait_for_pattern(self, - 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device', - 'i2c i2c-5: new_device: Instantiated device lm75 at 0x4d') - exec_command_and_wait_for_pattern(self, - 'cat /sys/class/hwmon/hwmon19/temp1_input', '0') - self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', - property='temperature', value=18000) - exec_command_and_wait_for_pattern(self, - 'cat /sys/class/hwmon/hwmon19/temp1_input', '18000') - - exec_command_and_wait_for_pattern(self, - 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device', - 'i2c i2c-5: new_device: Instantiated device ds1307 at 0x32') - year = time.strftime("%Y") - exec_command_and_wait_for_pattern(self, - '/sbin/hwclock -f /dev/rtc1', year) if __name__ == '__main__': AspeedTest.main() diff --git a/tests/functional/arm/test_aspeed_ast2600_sdk.py b/tests/functional/arm/test_aspeed_ast2600_sdk.py new file mode 100755 index 0000000000000..e3d4ed09e2eef --- /dev/null +++ b/tests/functional/arm/test_aspeed_ast2600_sdk.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 +# +# Functional test that boots the ASPEED machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +import time + +from qemu_test import Asset +from aspeed import AspeedTest +from qemu_test import exec_command_and_wait_for_pattern + + +class AST2600Machine(AspeedTest): + + ASSET_SDK_V908_AST2600 = Asset( + 'https://github.com/AspeedTech-BMC/openbmc/releases/download/v09.08/ast2600-default-obmc.tar.gz', + 'a0414f14ad696550efe083c2156dbeda855c08cc9ae7f40fe1b41bf292295f82') + + def do_ast2600_pcie_test(self): + exec_command_and_wait_for_pattern(self, + 'lspci -s 80:00.0', + '80:00.0 Host bridge: ' + 'ASPEED Technology, Inc. Device 2600') + exec_command_and_wait_for_pattern(self, + 'lspci -s 80:08.0', + '80:08.0 PCI bridge: ' + 'ASPEED Technology, Inc. AST1150 PCI-to-PCI Bridge') + exec_command_and_wait_for_pattern(self, + 'lspci -s 81:00.0', + '81:00.0 Ethernet controller: ' + 'Intel Corporation 82574L Gigabit Network Connection') + exec_command_and_wait_for_pattern(self, + 'ip addr show dev eth4', + 'inet 10.0.2.15/24') + + def test_arm_ast2600_evb_sdk(self): + self.set_machine('ast2600-evb') + self.require_netdev('user') + + self.archive_extract(self.ASSET_SDK_V908_AST2600) + + self.vm.add_args('-device', + 'tmp105,bus=aspeed.i2c.bus.5,address=0x4d,id=tmp-test') + self.vm.add_args('-device', + 'ds1338,bus=aspeed.i2c.bus.5,address=0x32') + self.vm.add_args('-device', 'e1000e,netdev=net1,bus=pcie.0') + self.vm.add_args('-netdev', 'user,id=net1') + self.do_test_arm_aspeed_sdk_start( + self.scratch_file("ast2600-default", "image-bmc")) + + self.wait_for_console_pattern('ast2600-default login:') + + exec_command_and_wait_for_pattern(self, 'root', 'Password:') + exec_command_and_wait_for_pattern(self, '0penBmc', + 'root@ast2600-default:~#') + + exec_command_and_wait_for_pattern(self, + 'echo lm75 0x4d > /sys/class/i2c-dev/i2c-5/device/new_device', + 'i2c i2c-5: new_device: Instantiated device lm75 at 0x4d') + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon19/temp1_input', '0') + self.vm.cmd('qom-set', path='/machine/peripheral/tmp-test', + property='temperature', value=18000) + exec_command_and_wait_for_pattern(self, + 'cat /sys/class/hwmon/hwmon19/temp1_input', '18000') + + exec_command_and_wait_for_pattern(self, + 'echo ds1307 0x32 > /sys/class/i2c-dev/i2c-5/device/new_device', + 'i2c i2c-5: new_device: Instantiated device ds1307 at 0x32') + year = time.strftime("%Y") + exec_command_and_wait_for_pattern(self, + '/sbin/hwclock -f /dev/rtc1', year) + self.do_ast2600_pcie_test() + + def test_arm_ast2600_otp_blockdev_device(self): + self.vm.set_machine("ast2600-evb") + + image_path = self.archive_extract(self.ASSET_SDK_V908_AST2600) + otp_img = self.generate_otpmem_image() + + self.vm.set_console() + self.vm.add_args( + "-blockdev", f"driver=file,filename={otp_img},node-name=otp", + "-global", "aspeed-otp.drive=otp", + ) + self.do_test_arm_aspeed_sdk_start( + self.scratch_file("ast2600-default", "image-bmc")) + self.wait_for_console_pattern("ast2600-default login:") + + +if __name__ == '__main__': + AspeedTest.main() diff --git a/tests/functional/test_arm_aspeed_bletchley.py b/tests/functional/arm/test_aspeed_bletchley.py old mode 100644 new mode 100755 similarity index 100% rename from tests/functional/test_arm_aspeed_bletchley.py rename to tests/functional/arm/test_aspeed_bletchley.py diff --git a/tests/functional/test_arm_aspeed_catalina.py b/tests/functional/arm/test_aspeed_catalina.py similarity index 100% rename from tests/functional/test_arm_aspeed_catalina.py rename to tests/functional/arm/test_aspeed_catalina.py diff --git a/tests/functional/test_arm_aspeed_gb200nvl_bmc.py b/tests/functional/arm/test_aspeed_gb200nvl_bmc.py old mode 100644 new mode 100755 similarity index 100% rename from tests/functional/test_arm_aspeed_gb200nvl_bmc.py rename to tests/functional/arm/test_aspeed_gb200nvl_bmc.py diff --git a/tests/functional/test_arm_aspeed_palmetto.py b/tests/functional/arm/test_aspeed_palmetto.py similarity index 100% rename from tests/functional/test_arm_aspeed_palmetto.py rename to tests/functional/arm/test_aspeed_palmetto.py diff --git a/tests/functional/test_arm_aspeed_rainier.py b/tests/functional/arm/test_aspeed_rainier.py similarity index 100% rename from tests/functional/test_arm_aspeed_rainier.py rename to tests/functional/arm/test_aspeed_rainier.py diff --git a/tests/functional/test_arm_aspeed_romulus.py b/tests/functional/arm/test_aspeed_romulus.py similarity index 100% rename from tests/functional/test_arm_aspeed_romulus.py rename to tests/functional/arm/test_aspeed_romulus.py diff --git a/tests/functional/test_arm_aspeed_witherspoon.py b/tests/functional/arm/test_aspeed_witherspoon.py old mode 100644 new mode 100755 similarity index 100% rename from tests/functional/test_arm_aspeed_witherspoon.py rename to tests/functional/arm/test_aspeed_witherspoon.py diff --git a/tests/functional/test_arm_bflt.py b/tests/functional/arm/test_bflt.py similarity index 100% rename from tests/functional/test_arm_bflt.py rename to tests/functional/arm/test_bflt.py diff --git a/tests/functional/test_arm_bpim2u.py b/tests/functional/arm/test_bpim2u.py similarity index 100% rename from tests/functional/test_arm_bpim2u.py rename to tests/functional/arm/test_bpim2u.py diff --git a/tests/functional/test_arm_canona1100.py b/tests/functional/arm/test_canona1100.py similarity index 100% rename from tests/functional/test_arm_canona1100.py rename to tests/functional/arm/test_canona1100.py diff --git a/tests/functional/test_arm_collie.py b/tests/functional/arm/test_collie.py similarity index 100% rename from tests/functional/test_arm_collie.py rename to tests/functional/arm/test_collie.py diff --git a/tests/functional/test_arm_cubieboard.py b/tests/functional/arm/test_cubieboard.py similarity index 100% rename from tests/functional/test_arm_cubieboard.py rename to tests/functional/arm/test_cubieboard.py diff --git a/tests/functional/test_arm_emcraft_sf2.py b/tests/functional/arm/test_emcraft_sf2.py similarity index 100% rename from tests/functional/test_arm_emcraft_sf2.py rename to tests/functional/arm/test_emcraft_sf2.py diff --git a/tests/functional/test_arm_integratorcp.py b/tests/functional/arm/test_integratorcp.py similarity index 96% rename from tests/functional/test_arm_integratorcp.py rename to tests/functional/arm/test_integratorcp.py index 4f00924aa0327..23ae919359d54 100755 --- a/tests/functional/test_arm_integratorcp.py +++ b/tests/functional/arm/test_integratorcp.py @@ -77,7 +77,6 @@ def test_framebuffer_tux_logo(self): command_line='screendump %s' % screendump_path) if 'unknown command' in res: self.skipTest('screendump not available') - logger = logging.getLogger('framebuffer') cpu_count = 1 match_threshold = 0.92 @@ -88,7 +87,7 @@ def test_framebuffer_tux_logo(self): loc = np.where(result >= match_threshold) tux_count = 0 for tux_count, pt in enumerate(zip(*loc[::-1]), start=1): - logger.debug('found Tux at position [x, y] = %s', pt) + self.log.debug('found Tux at position [x, y] = %s', pt) self.assertGreaterEqual(tux_count, cpu_count) if __name__ == '__main__': diff --git a/tests/functional/test_arm_max78000fthr.py b/tests/functional/arm/test_max78000fthr.py similarity index 100% rename from tests/functional/test_arm_max78000fthr.py rename to tests/functional/arm/test_max78000fthr.py diff --git a/tests/functional/test_arm_microbit.py b/tests/functional/arm/test_microbit.py similarity index 100% rename from tests/functional/test_arm_microbit.py rename to tests/functional/arm/test_microbit.py diff --git a/tests/functional/arm/test_migration.py b/tests/functional/arm/test_migration.py new file mode 100755 index 0000000000000..0aa89f4f61a20 --- /dev/null +++ b/tests/functional/arm/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# arm migration test + +from migration import MigrationTest + + +class ArmMigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('npcm750-evb') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('npcm750-evb') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('npcm750-evb') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/test_arm_orangepi.py b/tests/functional/arm/test_orangepi.py similarity index 100% rename from tests/functional/test_arm_orangepi.py rename to tests/functional/arm/test_orangepi.py diff --git a/tests/functional/test_arm_quanta_gsj.py b/tests/functional/arm/test_quanta_gsj.py similarity index 100% rename from tests/functional/test_arm_quanta_gsj.py rename to tests/functional/arm/test_quanta_gsj.py diff --git a/tests/functional/test_arm_raspi2.py b/tests/functional/arm/test_raspi2.py similarity index 100% rename from tests/functional/test_arm_raspi2.py rename to tests/functional/arm/test_raspi2.py diff --git a/tests/functional/test_arm_realview.py b/tests/functional/arm/test_realview.py similarity index 100% rename from tests/functional/test_arm_realview.py rename to tests/functional/arm/test_realview.py diff --git a/tests/functional/test_arm_replay.py b/tests/functional/arm/test_replay.py similarity index 100% rename from tests/functional/test_arm_replay.py rename to tests/functional/arm/test_replay.py diff --git a/tests/functional/test_arm_smdkc210.py b/tests/functional/arm/test_smdkc210.py similarity index 100% rename from tests/functional/test_arm_smdkc210.py rename to tests/functional/arm/test_smdkc210.py diff --git a/tests/functional/test_arm_stellaris.py b/tests/functional/arm/test_stellaris.py similarity index 100% rename from tests/functional/test_arm_stellaris.py rename to tests/functional/arm/test_stellaris.py diff --git a/tests/functional/test_arm_sx1.py b/tests/functional/arm/test_sx1.py similarity index 100% rename from tests/functional/test_arm_sx1.py rename to tests/functional/arm/test_sx1.py diff --git a/tests/functional/test_arm_tuxrun.py b/tests/functional/arm/test_tuxrun.py similarity index 100% rename from tests/functional/test_arm_tuxrun.py rename to tests/functional/arm/test_tuxrun.py diff --git a/tests/functional/test_arm_vexpress.py b/tests/functional/arm/test_vexpress.py similarity index 100% rename from tests/functional/test_arm_vexpress.py rename to tests/functional/arm/test_vexpress.py diff --git a/tests/functional/test_arm_virt.py b/tests/functional/arm/test_virt.py similarity index 100% rename from tests/functional/test_arm_virt.py rename to tests/functional/arm/test_virt.py diff --git a/tests/functional/aspeed.py b/tests/functional/aspeed.py index b131703c5283a..47e84e035bd0a 100644 --- a/tests/functional/aspeed.py +++ b/tests/functional/aspeed.py @@ -61,3 +61,11 @@ def do_test_arm_aspeed_sdk_start(self, image): self.wait_for_console_pattern('U-Boot 2019.04') self.wait_for_console_pattern('## Loading kernel from FIT Image') self.wait_for_console_pattern('Starting kernel ...') + + def generate_otpmem_image(self): + path = self.scratch_file("otpmem.img") + pattern = b'\x00\x00\x00\x00\xff\xff\xff\xff' * (16 * 1024 // 8) + with open(path, "wb") as f: + f.write(pattern) + return path + diff --git a/tests/functional/avr/meson.build b/tests/functional/avr/meson.build new file mode 100644 index 0000000000000..7a2cb7099e75b --- /dev/null +++ b/tests/functional/avr/meson.build @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_avr_system_thorough = [ + 'mega2560', + 'uno', +] diff --git a/tests/functional/test_avr_mega2560.py b/tests/functional/avr/test_mega2560.py similarity index 100% rename from tests/functional/test_avr_mega2560.py rename to tests/functional/avr/test_mega2560.py diff --git a/tests/functional/test_avr_uno.py b/tests/functional/avr/test_uno.py similarity index 100% rename from tests/functional/test_avr_uno.py rename to tests/functional/avr/test_uno.py diff --git a/tests/functional/generic/meson.build b/tests/functional/generic/meson.build new file mode 100644 index 0000000000000..013cc96fbf889 --- /dev/null +++ b/tests/functional/generic/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_generic_system = [ + 'empty_cpu_model', + 'info_usernet', + 'version', + 'vnc', +] + +tests_generic_linuxuser = [ +] + +tests_generic_bsduser = [ +] diff --git a/tests/functional/test_empty_cpu_model.py b/tests/functional/generic/test_empty_cpu_model.py similarity index 100% rename from tests/functional/test_empty_cpu_model.py rename to tests/functional/generic/test_empty_cpu_model.py diff --git a/tests/functional/test_info_usernet.py b/tests/functional/generic/test_info_usernet.py similarity index 100% rename from tests/functional/test_info_usernet.py rename to tests/functional/generic/test_info_usernet.py diff --git a/tests/functional/test_version.py b/tests/functional/generic/test_version.py similarity index 100% rename from tests/functional/test_version.py rename to tests/functional/generic/test_version.py diff --git a/tests/functional/generic/test_vmstate.py b/tests/functional/generic/test_vmstate.py new file mode 100755 index 0000000000000..387ff5424265a --- /dev/null +++ b/tests/functional/generic/test_vmstate.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +'''This test runs the vmstate-static-checker script with the current QEMU''' + +import subprocess + +from qemu_test import QemuSystemTest, skipFlakyTest + + +@skipFlakyTest("vmstate-static-checker can produce false positives") +class VmStateTest(QemuSystemTest): + ''' + This test helps to check whether there are problems between old + reference data and the current QEMU + ''' + + def test_vmstate_7_2(self): + '''Check reference data from QEMU v7.2''' + + target_machine = { + 'aarch64': 'virt-7.2', + 'm68k': 'virt-7.2', + 'ppc64': 'pseries-7.2', + 's390x': 's390-ccw-virtio-7.2', + 'x86_64': 'pc-q35-7.2', + } + self.set_machine(target_machine[self.arch]) + + # Run QEMU to get the current vmstate json file: + dst_json = self.scratch_file('dest.json') + self.log.info('Dumping vmstate from %s', self.qemu_bin) + cp = subprocess.run([self.qemu_bin, '-nodefaults', + '-M', target_machine[self.arch], + '-dump-vmstate', dst_json], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, check=True) + if cp.stdout: + self.log.info('QEMU output: %s', cp.stdout) + + # Check whether the old vmstate json file is still compatible: + src_json = self.data_file('..', 'data', 'vmstate-static-checker', + self.arch, + target_machine[self.arch] + '.json') + self.log.info('Comparing vmstate with %s', src_json) + checkerscript = self.data_file('..', '..', 'scripts', + 'vmstate-static-checker.py') + cp = subprocess.run([checkerscript, '-s', src_json, '-d', dst_json], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, check=False) + if cp.returncode != 0: + self.fail('Running vmstate-static-checker failed:\n' + cp.stdout + + '\nThis either means that there is a migration bug ' + 'that needs to be fixed, or\nvmstate-static-checker.py ' + 'needs to be improved (e.g. extend the changed_names\n' + 'in case a field has been renamed), or drop the ' + 'problematic field from\n' + src_json + + '\nin case the script cannot be fixed easily.') + if cp.stdout: + self.log.warning('vmstate-static-checker output: %s', cp.stdout) + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_vnc.py b/tests/functional/generic/test_vnc.py similarity index 100% rename from tests/functional/test_vnc.py rename to tests/functional/generic/test_vnc.py diff --git a/tests/functional/hppa/meson.build b/tests/functional/hppa/meson.build new file mode 100644 index 0000000000000..df2f7ccc9c3fd --- /dev/null +++ b/tests/functional/hppa/meson.build @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_hppa_system_quick = [ + 'seabios', +] + +tests_hppa_system_thorough = [ + 'cdboot', +] diff --git a/tests/functional/hppa/test_cdboot.py b/tests/functional/hppa/test_cdboot.py new file mode 100755 index 0000000000000..84421e8d63e17 --- /dev/null +++ b/tests/functional/hppa/test_cdboot.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# +# CD boot test for HPPA machines +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, Asset, exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern + + +class HppaCdBoot(QemuSystemTest): + + ASSET_CD = Asset( + ('https://github.com/philmd/qemu-testing-blob/raw/ec1b741/' + 'hppa/hp9000/712/C7120023.frm'), + '32c612ad2074516986bdc27768903c561fa92af2ca48e5ac3f3359ade1c42f70') + + def test_cdboot(self): + self.set_machine('B160L') + cdrom_path = self.ASSET_CD.fetch() + + self.vm.set_console() + self.vm.add_args('-cdrom', cdrom_path, + '-boot', 'd', + '-no-reboot') + self.vm.launch() + wait_for_console_pattern(self, 'Unrecognized MODEL TYPE = 502') + wait_for_console_pattern(self, 'UPDATE PAUSED>') + + exec_command_and_wait_for_pattern(self, 'exit\r', 'UPDATE>') + exec_command_and_wait_for_pattern(self, 'ls\r', 'IMAGE1B') + wait_for_console_pattern(self, 'UPDATE>') + exec_command_and_wait_for_pattern(self, 'exit\r', + 'THIS UTILITY WILL NOW RESET THE SYSTEM.....') + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_hppa_seabios.py b/tests/functional/hppa/test_seabios.py similarity index 100% rename from tests/functional/test_hppa_seabios.py rename to tests/functional/hppa/test_seabios.py diff --git a/tests/functional/i386/meson.build b/tests/functional/i386/meson.build new file mode 100644 index 0000000000000..23d8c216be7b3 --- /dev/null +++ b/tests/functional/i386/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_i386_system_quick = [ + 'migration', +] + +tests_i386_system_thorough = [ + 'replay', + 'tuxrun', +] diff --git a/tests/functional/i386/test_migration.py b/tests/functional/i386/test_migration.py new file mode 100755 index 0000000000000..a57f316404476 --- /dev/null +++ b/tests/functional/i386/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# i386 migration test + +from migration import MigrationTest + + +class I386MigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('isapc') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('isapc') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('isapc') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/test_i386_replay.py b/tests/functional/i386/test_replay.py similarity index 100% rename from tests/functional/test_i386_replay.py rename to tests/functional/i386/test_replay.py diff --git a/tests/functional/test_i386_tuxrun.py b/tests/functional/i386/test_tuxrun.py similarity index 100% rename from tests/functional/test_i386_tuxrun.py rename to tests/functional/i386/test_tuxrun.py diff --git a/tests/functional/loongarch64/meson.build b/tests/functional/loongarch64/meson.build new file mode 100644 index 0000000000000..d1687176a3d3f --- /dev/null +++ b/tests/functional/loongarch64/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_loongarch64_system_thorough = [ + 'virt', +] diff --git a/tests/functional/test_loongarch64_virt.py b/tests/functional/loongarch64/test_virt.py similarity index 100% rename from tests/functional/test_loongarch64_virt.py rename to tests/functional/loongarch64/test_virt.py diff --git a/tests/functional/m68k/meson.build b/tests/functional/m68k/meson.build new file mode 100644 index 0000000000000..679faaf86d69e --- /dev/null +++ b/tests/functional/m68k/meson.build @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_m68k_system_quick = [ + 'vmstate', +] + +tests_m68k_system_thorough = [ + 'mcf5208evb', + 'nextcube', + 'replay', + 'q800', + 'tuxrun', +] diff --git a/tests/functional/test_m68k_mcf5208evb.py b/tests/functional/m68k/test_mcf5208evb.py similarity index 100% rename from tests/functional/test_m68k_mcf5208evb.py rename to tests/functional/m68k/test_mcf5208evb.py diff --git a/tests/functional/test_m68k_nextcube.py b/tests/functional/m68k/test_nextcube.py similarity index 75% rename from tests/functional/test_m68k_nextcube.py rename to tests/functional/m68k/test_nextcube.py index 13c72bd136a82..e5e1c69dcbd7f 100755 --- a/tests/functional/test_m68k_nextcube.py +++ b/tests/functional/m68k/test_nextcube.py @@ -29,8 +29,15 @@ def check_bootrom_framebuffer(self, screenshot_path): self.vm.launch() self.log.info('VM launched, waiting for display') - # TODO: wait for the 'displaysurface_create 1120x832' trace-event. - time.sleep(2) + # Wait for the FPU test to finish, then the display is available, too: + while True: + res = self.vm.cmd('human-monitor-command', + command_line='info registers') + if ("F0 = 400e 8400000000000000" in res and + "F1 = 400e 83ff000000000000" in res and + "F2 = 400e 83ff000000000000" in res): + break + time.sleep(0.1) res = self.vm.cmd('human-monitor-command', command_line='screendump %s' % screenshot_path) @@ -44,7 +51,8 @@ def test_bootrom_framebuffer_size(self): self.check_bootrom_framebuffer(screenshot_path) from PIL import Image - width, height = Image.open(screenshot_path).size + with Image.open(screenshot_path) as image: + width, height = image.size self.assertEqual(width, 1120) self.assertEqual(height, 832) @@ -55,10 +63,10 @@ def test_bootrom_framebuffer_ocr_with_tesseract(self): self.check_bootrom_framebuffer(screenshot_path) lines = tesseract_ocr(screenshot_path) text = '\n'.join(lines) + self.assertIn('Backplane slot', text) + self.assertIn('Ethernet address', text) self.assertIn('Testing the FPU', text) - self.assertIn('System test failed. Error code', text) - self.assertIn('Boot command', text) - self.assertIn('Next>', text) + if __name__ == '__main__': QemuSystemTest.main() diff --git a/tests/functional/test_m68k_q800.py b/tests/functional/m68k/test_q800.py similarity index 100% rename from tests/functional/test_m68k_q800.py rename to tests/functional/m68k/test_q800.py diff --git a/tests/functional/test_m68k_replay.py b/tests/functional/m68k/test_replay.py similarity index 100% rename from tests/functional/test_m68k_replay.py rename to tests/functional/m68k/test_replay.py diff --git a/tests/functional/test_m68k_tuxrun.py b/tests/functional/m68k/test_tuxrun.py similarity index 100% rename from tests/functional/test_m68k_tuxrun.py rename to tests/functional/m68k/test_tuxrun.py diff --git a/tests/functional/meson.build b/tests/functional/meson.build index 8bebcd4d94ec3..725630d30826e 100644 --- a/tests/functional/meson.build +++ b/tests/functional/meson.build @@ -9,343 +9,34 @@ if get_option('tcg_interpreter') subdir_done() endif -# Timeouts for individual tests that can be slow e.g. with debugging enabled -test_timeouts = { - 'aarch64_aspeed_ast2700' : 600, - 'aarch64_aspeed_ast2700fc' : 600, - 'aarch64_device_passthrough' : 720, - 'aarch64_imx8mp_evk' : 240, - 'aarch64_raspi4' : 480, - 'aarch64_reverse_debug' : 180, - 'aarch64_rme_virt' : 1200, - 'aarch64_rme_sbsaref' : 1200, - 'aarch64_sbsaref_alpine' : 1200, - 'aarch64_sbsaref_freebsd' : 720, - 'aarch64_smmu' : 720, - 'aarch64_tuxrun' : 240, - 'aarch64_virt' : 360, - 'aarch64_virt_gpu' : 480, - 'acpi_bits' : 420, - 'arm_aspeed_palmetto' : 120, - 'arm_aspeed_romulus' : 120, - 'arm_aspeed_witherspoon' : 120, - 'arm_aspeed_ast2500' : 720, - 'arm_aspeed_ast2600' : 1200, - 'arm_aspeed_bletchley' : 480, - 'arm_aspeed_catalina' : 480, - 'arm_aspeed_gb200nvl_bmc' : 480, - 'arm_aspeed_rainier' : 480, - 'arm_bpim2u' : 500, - 'arm_collie' : 180, - 'arm_cubieboard' : 360, - 'arm_orangepi' : 540, - 'arm_quanta_gsj' : 240, - 'arm_raspi2' : 120, - 'arm_replay' : 240, - 'arm_tuxrun' : 240, - 'arm_sx1' : 360, - 'intel_iommu': 300, - 'mips_malta' : 480, - 'mipsel_malta' : 420, - 'mipsel_replay' : 480, - 'mips64_malta' : 240, - 'mips64el_malta' : 420, - 'mips64el_replay' : 180, - 'netdev_ethtool' : 180, - 'ppc_40p' : 240, - 'ppc64_hv' : 1000, - 'ppc64_powernv' : 480, - 'ppc64_pseries' : 480, - 'ppc64_replay' : 210, - 'ppc64_tuxrun' : 420, - 'ppc64_mac99' : 120, - 'riscv64_tuxrun' : 120, - 's390x_ccw_virtio' : 420, - 'sh4_tuxrun' : 240, - 'virtio_balloon': 120, - 'x86_64_kvm_xen' : 180, - 'x86_64_replay' : 480, -} - -tests_generic_system = [ - 'empty_cpu_model', - 'info_usernet', - 'version', -] - -tests_generic_linuxuser = [ -] - -tests_generic_bsduser = [ -] - -tests_aarch64_system_quick = [ - 'migration', -] - -tests_aarch64_system_thorough = [ - 'aarch64_aspeed_ast2700', - 'aarch64_aspeed_ast2700fc', - 'aarch64_device_passthrough', - 'aarch64_hotplug_pci', - 'aarch64_imx8mp_evk', - 'aarch64_raspi3', - 'aarch64_raspi4', - 'aarch64_replay', - 'aarch64_reverse_debug', - 'aarch64_rme_virt', - 'aarch64_rme_sbsaref', - 'aarch64_sbsaref', - 'aarch64_sbsaref_alpine', - 'aarch64_sbsaref_freebsd', - 'aarch64_smmu', - 'aarch64_tcg_plugins', - 'aarch64_tuxrun', - 'aarch64_virt', - 'aarch64_virt_gpu', - 'aarch64_xen', - 'aarch64_xlnx_versal', - 'multiprocess', -] - -tests_alpha_system_quick = [ - 'migration', -] - -tests_alpha_system_thorough = [ - 'alpha_clipper', - 'alpha_replay', -] - -tests_arm_system_quick = [ - 'migration', -] - -tests_arm_system_thorough = [ - 'arm_aspeed_ast1030', - 'arm_aspeed_palmetto', - 'arm_aspeed_romulus', - 'arm_aspeed_witherspoon', - 'arm_aspeed_ast2500', - 'arm_aspeed_ast2600', - 'arm_aspeed_bletchley', - 'arm_aspeed_catalina', - 'arm_aspeed_gb200nvl_bmc', - 'arm_aspeed_rainier', - 'arm_bpim2u', - 'arm_canona1100', - 'arm_collie', - 'arm_cubieboard', - 'arm_emcraft_sf2', - 'arm_integratorcp', - 'arm_max78000fthr', - 'arm_microbit', - 'arm_orangepi', - 'arm_quanta_gsj', - 'arm_raspi2', - 'arm_realview', - 'arm_replay', - 'arm_smdkc210', - 'arm_stellaris', - 'arm_sx1', - 'arm_vexpress', - 'arm_virt', - 'arm_tuxrun', -] - -tests_arm_linuxuser_thorough = [ - 'arm_bflt', -] - -tests_avr_system_thorough = [ - 'avr_mega2560', - 'avr_uno', -] - -tests_hppa_system_quick = [ - 'hppa_seabios', -] - -tests_i386_system_quick = [ - 'migration', -] - -tests_i386_system_thorough = [ - 'i386_replay', - 'i386_tuxrun', -] - -tests_loongarch64_system_thorough = [ - 'loongarch64_virt', -] - -tests_m68k_system_thorough = [ - 'm68k_mcf5208evb', - 'm68k_nextcube', - 'm68k_replay', - 'm68k_q800', - 'm68k_tuxrun', -] - -tests_microblaze_system_thorough = [ - 'microblaze_replay', - 'microblaze_s3adsp1800' -] - -tests_microblazeel_system_thorough = [ - 'microblazeel_s3adsp1800' -] - -tests_mips_system_thorough = [ - 'mips_malta', - 'mips_replay', - 'mips_tuxrun', -] - -tests_mipsel_system_thorough = [ - 'mipsel_malta', - 'mipsel_replay', - 'mipsel_tuxrun', -] - -tests_mips64_system_thorough = [ - 'mips64_malta', - 'mips64_tuxrun', -] - -tests_mips64el_system_thorough = [ - 'mips64el_fuloong2e', - 'mips64el_loongson3v', - 'mips64el_malta', - 'mips64el_replay', - 'mips64el_tuxrun', -] - -tests_or1k_system_thorough = [ - 'or1k_replay', - 'or1k_sim', -] - -tests_ppc_system_quick = [ - 'migration', - 'ppc_74xx', -] - -tests_ppc_system_thorough = [ - 'ppc_40p', - 'ppc_amiga', - 'ppc_bamboo', - 'ppc_mac', - 'ppc_mpc8544ds', - 'ppc_replay', - 'ppc_sam460ex', - 'ppc_tuxrun', - 'ppc_virtex_ml507', -] - -tests_ppc64_system_quick = [ - 'migration', -] - -tests_ppc64_system_thorough = [ - 'ppc64_e500', - 'ppc64_hv', - 'ppc64_powernv', - 'ppc64_pseries', - 'ppc64_replay', - 'ppc64_reverse_debug', - 'ppc64_tuxrun', - 'ppc64_mac99', -] - -tests_riscv32_system_quick = [ - 'migration', - 'riscv_opensbi', -] - -tests_riscv32_system_thorough = [ - 'riscv32_tuxrun', -] - -tests_riscv64_system_quick = [ - 'migration', - 'riscv_opensbi', -] - -tests_riscv64_system_thorough = [ - 'riscv64_tuxrun', -] - -tests_rx_system_thorough = [ - 'rx_gdbsim', -] - -tests_s390x_system_thorough = [ - 's390x_ccw_virtio', - 's390x_pxelinux', - 's390x_replay', - 's390x_topology', - 's390x_tuxrun', -] - -tests_sh4_system_thorough = [ - 'sh4_r2d', - 'sh4_tuxrun', -] - -tests_sh4eb_system_thorough = [ - 'sh4eb_r2d', -] - -tests_sparc_system_quick = [ - 'migration', -] - -tests_sparc_system_thorough = [ - 'sparc_replay', - 'sparc_sun4m', -] - -tests_sparc64_system_quick = [ - 'migration', -] - -tests_sparc64_system_thorough = [ - 'sparc64_sun4u', - 'sparc64_tuxrun', -] - -tests_x86_64_system_quick = [ - 'cpu_queries', - 'mem_addr_space', - 'migration', - 'pc_cpu_hotplug_props', - 'virtio_version', - 'x86_cpu_model_versions', - 'vnc', - 'memlock', -] - -tests_x86_64_system_thorough = [ - 'acpi_bits', - 'intel_iommu', - 'linux_initrd', - 'multiprocess', - 'netdev_ethtool', - 'virtio_balloon', - 'virtio_gpu', - 'x86_64_hotplug_blk', - 'x86_64_hotplug_cpu', - 'x86_64_kvm_xen', - 'x86_64_replay', - 'x86_64_reverse_debug', - 'x86_64_tuxrun', -] - -tests_xtensa_system_thorough = [ - 'xtensa_lx60', - 'xtensa_replay', -] +subdir('aarch64') +subdir('alpha') +subdir('arm') +subdir('avr') +subdir('hppa') +subdir('i386') +subdir('loongarch64') +subdir('m68k') +subdir('microblaze') +subdir('microblazeel') +subdir('mips') +subdir('mipsel') +subdir('mips64') +subdir('mips64el') +subdir('or1k') +subdir('ppc') +subdir('ppc64') +subdir('riscv32') +subdir('riscv64') +subdir('rx') +subdir('s390x') +subdir('sh4') +subdir('sh4eb') +subdir('sparc') +subdir('sparc64') +subdir('x86_64') +subdir('xtensa') +subdir('generic') precache_all = [] foreach speed : ['quick', 'thorough'] @@ -386,9 +77,19 @@ foreach speed : ['quick', 'thorough'] test_env.set('PYTHONPATH', meson.project_source_root() / 'python:' + meson.current_source_dir()) + # Define the GDB environment variable if gdb is available. + gdb = get_option('gdb') + if gdb != '' + test_env.set('QEMU_TEST_GDB', gdb) + endif + foreach test : target_tests testname = '@0@-@1@'.format(target_base, test) - testfile = 'test_' + test + '.py' + if fs.exists('generic' / 'test_' + test + '.py') + testfile = 'generic' / 'test_' + test + '.py' + else + testfile = target_base / 'test_' + test + '.py' + endif testpath = meson.current_source_dir() / testfile teststamp = testname + '.tstamp' test_precache_env = environment() @@ -402,6 +103,11 @@ foreach speed : ['quick', 'thorough'] build_by_default: false, env: test_precache_env) precache_all += precache + if is_variable('test_' + target_base + '_timeouts') + time_out = get_variable('test_' + target_base + '_timeouts').get(test, 90) + else + time_out = 90 + endif # Ideally we would add 'precache' to 'depends' here, such that # 'build_by_default: false' lets the pre-caching automatically @@ -417,8 +123,8 @@ foreach speed : ['quick', 'thorough'] env: test_env, args: [testpath], protocol: 'tap', - timeout: test_timeouts.get(test, 90), - priority: test_timeouts.get(test, 90), + timeout: time_out, + priority: time_out, suite: suites) endforeach endforeach diff --git a/tests/functional/microblaze/meson.build b/tests/functional/microblaze/meson.build new file mode 100644 index 0000000000000..8069ca9be6014 --- /dev/null +++ b/tests/functional/microblaze/meson.build @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_microblaze_system_thorough = [ + 'replay', + 's3adsp1800' +] diff --git a/tests/functional/test_microblaze_replay.py b/tests/functional/microblaze/test_replay.py similarity index 100% rename from tests/functional/test_microblaze_replay.py rename to tests/functional/microblaze/test_replay.py diff --git a/tests/functional/test_microblaze_s3adsp1800.py b/tests/functional/microblaze/test_s3adsp1800.py similarity index 100% rename from tests/functional/test_microblaze_s3adsp1800.py rename to tests/functional/microblaze/test_s3adsp1800.py diff --git a/tests/functional/microblazeel/meson.build b/tests/functional/microblazeel/meson.build new file mode 100644 index 0000000000000..27619dc5a9a63 --- /dev/null +++ b/tests/functional/microblazeel/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_microblazeel_system_thorough = [ + 's3adsp1800' +] diff --git a/tests/functional/test_microblazeel_s3adsp1800.py b/tests/functional/microblazeel/test_s3adsp1800.py similarity index 92% rename from tests/functional/test_microblazeel_s3adsp1800.py rename to tests/functional/microblazeel/test_s3adsp1800.py index 915902d48bdef..75ce8856ed151 100755 --- a/tests/functional/test_microblazeel_s3adsp1800.py +++ b/tests/functional/microblazeel/test_s3adsp1800.py @@ -7,7 +7,7 @@ # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from test_microblaze_s3adsp1800 import MicroblazeMachine +from microblaze.test_s3adsp1800 import MicroblazeMachine class MicroblazeLittleEndianMachine(MicroblazeMachine): diff --git a/tests/functional/test_migration.py b/tests/functional/migration.py old mode 100755 new mode 100644 similarity index 74% rename from tests/functional/test_migration.py rename to tests/functional/migration.py index c4393c3543469..073955448328d --- a/tests/functional/test_migration.py +++ b/tests/functional/migration.py @@ -1,6 +1,6 @@ -#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-or-later # -# Migration test +# Migration test base class # # Copyright (c) 2019 Red Hat, Inc. # @@ -14,7 +14,7 @@ import tempfile import time -from qemu_test import QemuSystemTest, skipIfMissingCommands +from qemu_test import QemuSystemTest, which from qemu_test.ports import Ports @@ -41,24 +41,7 @@ def assert_migration(self, src_vm, dst_vm): self.assertEqual(dst_vm.cmd('query-status')['status'], 'running') self.assertEqual(src_vm.cmd('query-status')['status'],'postmigrate') - def select_machine(self): - target_machine = { - 'aarch64': 'quanta-gsj', - 'alpha': 'clipper', - 'arm': 'npcm750-evb', - 'i386': 'isapc', - 'ppc': 'sam460ex', - 'ppc64': 'mac99', - 'riscv32': 'spike', - 'riscv64': 'virt', - 'sparc': 'SS-4', - 'sparc64': 'sun4u', - 'x86_64': 'microvm', - } - self.set_machine(target_machine[self.arch]) - def do_migrate(self, dest_uri, src_uri=None): - self.select_machine() dest_vm = self.get_vm('-incoming', dest_uri, name="dest-qemu") dest_vm.add_args('-nodefaults') dest_vm.launch() @@ -76,23 +59,21 @@ def _get_free_port(self, ports): self.skipTest('Failed to find a free port') return port - def test_migration_with_tcp_localhost(self): + def migration_with_tcp_localhost(self): with Ports() as ports: dest_uri = 'tcp:localhost:%u' % self._get_free_port(ports) self.do_migrate(dest_uri) - def test_migration_with_unix(self): + def migration_with_unix(self): with tempfile.TemporaryDirectory(prefix='socket_') as socket_path: dest_uri = 'unix:%s/qemu-test.sock' % socket_path self.do_migrate(dest_uri) - @skipIfMissingCommands('ncat') - def test_migration_with_exec(self): + def migration_with_exec(self): + if not which('ncat'): + self.skipTest('ncat is not available') with Ports() as ports: free_port = self._get_free_port(ports) dest_uri = 'exec:ncat -l localhost %u' % free_port src_uri = 'exec:ncat localhost %u' % free_port self.do_migrate(dest_uri, src_uri) - -if __name__ == '__main__': - QemuSystemTest.main() diff --git a/tests/functional/mips/meson.build b/tests/functional/mips/meson.build new file mode 100644 index 0000000000000..49aaf53b02db2 --- /dev/null +++ b/tests/functional/mips/meson.build @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_mips_timeouts = { + 'malta' : 480, +} + +tests_mips_system_thorough = [ + 'malta', + 'replay', + 'tuxrun', +] diff --git a/tests/functional/test_mips_malta.py b/tests/functional/mips/test_malta.py similarity index 100% rename from tests/functional/test_mips_malta.py rename to tests/functional/mips/test_malta.py diff --git a/tests/functional/test_mips_replay.py b/tests/functional/mips/test_replay.py similarity index 100% rename from tests/functional/test_mips_replay.py rename to tests/functional/mips/test_replay.py diff --git a/tests/functional/test_mips_tuxrun.py b/tests/functional/mips/test_tuxrun.py similarity index 100% rename from tests/functional/test_mips_tuxrun.py rename to tests/functional/mips/test_tuxrun.py diff --git a/tests/functional/mips64/meson.build b/tests/functional/mips64/meson.build new file mode 100644 index 0000000000000..3ff2118987927 --- /dev/null +++ b/tests/functional/mips64/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_mips64_timeouts = { + 'malta' : 240, +} + +tests_mips64_system_thorough = [ + 'malta', + 'tuxrun', +] diff --git a/tests/functional/test_mips64_malta.py b/tests/functional/mips64/test_malta.py similarity index 96% rename from tests/functional/test_mips64_malta.py rename to tests/functional/mips64/test_malta.py index 53c3e0c122195..a553d3c5bc736 100755 --- a/tests/functional/test_mips64_malta.py +++ b/tests/functional/mips64/test_malta.py @@ -5,7 +5,7 @@ # SPDX-License-Identifier: GPL-2.0-or-later from qemu_test import LinuxKernelTest, Asset -from test_mips_malta import mips_check_wheezy +from mips.test_malta import mips_check_wheezy class MaltaMachineConsole(LinuxKernelTest): diff --git a/tests/functional/test_mips64_tuxrun.py b/tests/functional/mips64/test_tuxrun.py similarity index 100% rename from tests/functional/test_mips64_tuxrun.py rename to tests/functional/mips64/test_tuxrun.py diff --git a/tests/functional/mips64el/meson.build b/tests/functional/mips64el/meson.build new file mode 100644 index 0000000000000..69ec50174c451 --- /dev/null +++ b/tests/functional/mips64el/meson.build @@ -0,0 +1,14 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_mips64el_timeouts = { + 'malta' : 420, + 'replay' : 180, +} + +tests_mips64el_system_thorough = [ + 'fuloong2e', + 'loongson3v', + 'malta', + 'replay', + 'tuxrun', +] diff --git a/tests/functional/test_mips64el_fuloong2e.py b/tests/functional/mips64el/test_fuloong2e.py similarity index 100% rename from tests/functional/test_mips64el_fuloong2e.py rename to tests/functional/mips64el/test_fuloong2e.py diff --git a/tests/functional/test_mips64el_loongson3v.py b/tests/functional/mips64el/test_loongson3v.py similarity index 100% rename from tests/functional/test_mips64el_loongson3v.py rename to tests/functional/mips64el/test_loongson3v.py diff --git a/tests/functional/test_mips64el_malta.py b/tests/functional/mips64el/test_malta.py similarity index 97% rename from tests/functional/test_mips64el_malta.py rename to tests/functional/mips64el/test_malta.py index 3cc79b74c181a..170147bfcc206 100755 --- a/tests/functional/test_mips64el_malta.py +++ b/tests/functional/mips64el/test_malta.py @@ -16,7 +16,7 @@ from qemu_test import exec_command_and_wait_for_pattern from qemu_test import skipIfMissingImports, skipFlakyTest, skipUntrustedTest -from test_mips_malta import mips_check_wheezy +from mips.test_malta import mips_check_wheezy class MaltaMachineConsole(LinuxKernelTest): @@ -159,7 +159,6 @@ def do_test_i6400_framebuffer_logo(self, cpu_cores_count): command_line='screendump %s' % screendump_path) if 'unknown command' in res: self.skipTest('screendump not available') - logger = logging.getLogger('framebuffer') match_threshold = 0.95 screendump_bgr = cv2.imread(screendump_path, cv2.IMREAD_COLOR) @@ -171,7 +170,7 @@ def do_test_i6400_framebuffer_logo(self, cpu_cores_count): h, w = tuxlogo_bgr.shape[:2] debug_png = os.getenv('QEMU_TEST_CV2_SCREENDUMP_PNG_PATH') for tuxlogo_count, pt in enumerate(zip(*loc[::-1]), start=1): - logger.debug('found Tux at position (x, y) = %s', pt) + self.log.debug('found Tux at position (x, y) = %s', pt) cv2.rectangle(screendump_bgr, pt, (pt[0] + w, pt[1] + h), (0, 0, 255), 2) if debug_png: @@ -191,7 +190,7 @@ def test_mips_malta_i6400_framebuffer_logo_8cores(self): self.do_test_i6400_framebuffer_logo(8) -from test_mipsel_malta import MaltaMachineYAMON +from mipsel.test_malta import MaltaMachineYAMON if __name__ == '__main__': LinuxKernelTest.main() diff --git a/tests/functional/test_mips64el_replay.py b/tests/functional/mips64el/test_replay.py similarity index 100% rename from tests/functional/test_mips64el_replay.py rename to tests/functional/mips64el/test_replay.py diff --git a/tests/functional/test_mips64el_tuxrun.py b/tests/functional/mips64el/test_tuxrun.py similarity index 100% rename from tests/functional/test_mips64el_tuxrun.py rename to tests/functional/mips64el/test_tuxrun.py diff --git a/tests/functional/mipsel/meson.build b/tests/functional/mipsel/meson.build new file mode 100644 index 0000000000000..8bfdf0649b548 --- /dev/null +++ b/tests/functional/mipsel/meson.build @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_mipsel_timeouts = { + 'malta' : 420, + 'replay' : 480, +} + +tests_mipsel_system_thorough = [ + 'malta', + 'replay', + 'tuxrun', +] diff --git a/tests/functional/test_mipsel_malta.py b/tests/functional/mipsel/test_malta.py similarity index 98% rename from tests/functional/test_mipsel_malta.py rename to tests/functional/mipsel/test_malta.py index 9ee2884da8e0d..427e163d19d7f 100755 --- a/tests/functional/test_mipsel_malta.py +++ b/tests/functional/mipsel/test_malta.py @@ -13,7 +13,7 @@ from qemu_test import interrupt_interactive_console_until_pattern from qemu_test import wait_for_console_pattern -from test_mips_malta import mips_check_wheezy +from mips.test_malta import mips_check_wheezy class MaltaMachineConsole(LinuxKernelTest): diff --git a/tests/functional/test_mipsel_replay.py b/tests/functional/mipsel/test_replay.py old mode 100644 new mode 100755 similarity index 100% rename from tests/functional/test_mipsel_replay.py rename to tests/functional/mipsel/test_replay.py diff --git a/tests/functional/test_mipsel_tuxrun.py b/tests/functional/mipsel/test_tuxrun.py similarity index 100% rename from tests/functional/test_mipsel_tuxrun.py rename to tests/functional/mipsel/test_tuxrun.py diff --git a/tests/functional/test_multiprocess.py b/tests/functional/multiprocess.py old mode 100755 new mode 100644 similarity index 58% rename from tests/functional/test_multiprocess.py rename to tests/functional/multiprocess.py index 751cf10e635cd..6a06c1eda19a8 --- a/tests/functional/test_multiprocess.py +++ b/tests/functional/multiprocess.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-or-later # # Test for multiprocess qemu # @@ -9,33 +9,13 @@ import os import socket -from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern +from qemu_test import QemuSystemTest, wait_for_console_pattern from qemu_test import exec_command, exec_command_and_wait_for_pattern class Multiprocess(QemuSystemTest): KERNEL_COMMON_COMMAND_LINE = 'printk.time=0 ' - ASSET_KERNEL_X86 = Asset( - ('https://archives.fedoraproject.org/pub/archive/fedora/linux' - '/releases/31/Everything/x86_64/os/images/pxeboot/vmlinuz'), - 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129') - - ASSET_INITRD_X86 = Asset( - ('https://archives.fedoraproject.org/pub/archive/fedora/linux' - '/releases/31/Everything/x86_64/os/images/pxeboot/initrd.img'), - '3b6cb5c91a14c42e2f61520f1689264d865e772a1f0069e660a800d31dd61fb9') - - ASSET_KERNEL_AARCH64 = Asset( - ('https://archives.fedoraproject.org/pub/archive/fedora/linux' - '/releases/31/Everything/aarch64/os/images/pxeboot/vmlinuz'), - '3ae07fcafbfc8e4abeb693035a74fe10698faae15e9ccd48882a9167800c1527') - - ASSET_INITRD_AARCH64 = Asset( - ('https://archives.fedoraproject.org/pub/archive/fedora/linux' - '/releases/31/Everything/aarch64/os/images/pxeboot/initrd.img'), - '9fd230cab10b1dafea41cf00150e6669d37051fad133bd618d2130284e16d526') - def do_test(self, kernel_asset, initrd_asset, kernel_command_line, machine_type): """Main test method""" @@ -83,18 +63,5 @@ def do_test(self, kernel_asset, initrd_asset, 'cat /sys/bus/pci/devices/*/uevent', 'PCI_ID=1000:0012') - def test_multiprocess(self): - kernel_command_line = self.KERNEL_COMMON_COMMAND_LINE - if self.arch == 'x86_64': - kernel_command_line += 'console=ttyS0 rdinit=/bin/bash' - self.do_test(self.ASSET_KERNEL_X86, self.ASSET_INITRD_X86, - kernel_command_line, 'pc') - elif self.arch == 'aarch64': - kernel_command_line += 'rdinit=/bin/bash console=ttyAMA0' - self.do_test(self.ASSET_KERNEL_AARCH64, self.ASSET_INITRD_AARCH64, - kernel_command_line, 'virt,gic-version=3') - else: - assert False - -if __name__ == '__main__': - QemuSystemTest.main() + proxy_sock.close() + remote_sock.close() diff --git a/tests/functional/or1k/meson.build b/tests/functional/or1k/meson.build new file mode 100644 index 0000000000000..e246e2ab08dd0 --- /dev/null +++ b/tests/functional/or1k/meson.build @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_or1k_system_thorough = [ + 'replay', + 'sim', +] diff --git a/tests/functional/test_or1k_replay.py b/tests/functional/or1k/test_replay.py similarity index 100% rename from tests/functional/test_or1k_replay.py rename to tests/functional/or1k/test_replay.py diff --git a/tests/functional/test_or1k_sim.py b/tests/functional/or1k/test_sim.py similarity index 100% rename from tests/functional/test_or1k_sim.py rename to tests/functional/or1k/test_sim.py diff --git a/tests/functional/ppc/meson.build b/tests/functional/ppc/meson.build new file mode 100644 index 0000000000000..ae061fe5a6125 --- /dev/null +++ b/tests/functional/ppc/meson.build @@ -0,0 +1,23 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_ppc_timeouts = { + '40p' : 240, +} + +tests_ppc_system_quick = [ + 'migration', + '74xx', +] + +tests_ppc_system_thorough = [ + '40p', + 'amiga', + 'bamboo', + 'mac', + 'mpc8544ds', + 'ppe42', + 'replay', + 'sam460ex', + 'tuxrun', + 'virtex_ml507', +] diff --git a/tests/functional/test_ppc_40p.py b/tests/functional/ppc/test_40p.py similarity index 100% rename from tests/functional/test_ppc_40p.py rename to tests/functional/ppc/test_40p.py diff --git a/tests/functional/test_ppc_74xx.py b/tests/functional/ppc/test_74xx.py similarity index 100% rename from tests/functional/test_ppc_74xx.py rename to tests/functional/ppc/test_74xx.py diff --git a/tests/functional/test_ppc_amiga.py b/tests/functional/ppc/test_amiga.py similarity index 100% rename from tests/functional/test_ppc_amiga.py rename to tests/functional/ppc/test_amiga.py diff --git a/tests/functional/test_ppc_bamboo.py b/tests/functional/ppc/test_bamboo.py similarity index 100% rename from tests/functional/test_ppc_bamboo.py rename to tests/functional/ppc/test_bamboo.py diff --git a/tests/functional/test_ppc_mac.py b/tests/functional/ppc/test_mac.py similarity index 100% rename from tests/functional/test_ppc_mac.py rename to tests/functional/ppc/test_mac.py diff --git a/tests/functional/ppc/test_migration.py b/tests/functional/ppc/test_migration.py new file mode 100755 index 0000000000000..a8692826d35e4 --- /dev/null +++ b/tests/functional/ppc/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# ppc migration test + +from migration import MigrationTest + + +class PpcMigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('sam460ex') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('sam460ex') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('sam460ex') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/test_ppc_mpc8544ds.py b/tests/functional/ppc/test_mpc8544ds.py similarity index 100% rename from tests/functional/test_ppc_mpc8544ds.py rename to tests/functional/ppc/test_mpc8544ds.py diff --git a/tests/functional/ppc/test_ppe42.py b/tests/functional/ppc/test_ppe42.py new file mode 100644 index 0000000000000..26bbe11b2d37d --- /dev/null +++ b/tests/functional/ppc/test_ppe42.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +# +# Functional tests for the IBM PPE42 processor +# +# Copyright (c) 2025, IBM Corporation +# +# SPDX-License-Identifier: GPL-2.0-or-later + +from qemu_test import QemuSystemTest, Asset +import asyncio + +class Ppe42Machine(QemuSystemTest): + + timeout = 90 + poll_period = 1.0 + + ASSET_PPE42_TEST_IMAGE = Asset( + ('https://github.com/milesg-github/ppe42-tests/raw/refs/heads/main/' + 'images/ppe42-test.out'), + '03c1ac0fb7f6c025102a02776a93b35101dae7c14b75e4eab36a337e39042ea8') + + def _test_completed(self): + self.log.info("Checking for test completion...") + try: + output = self.vm.cmd('human-monitor-command', + command_line='info registers') + except Exception as err: + self.log.debug(f"'info registers' cmd failed due to {err=}," + " {type(err)=}") + raise + + self.log.info(output) + if "NIP fff80200" in output: + self.log.info("") + return True + else: + self.log.info("") + return False + + def _wait_pass_fail(self, timeout): + while not self._test_completed(): + if timeout >= self.poll_period: + timeout = timeout - self.poll_period + self.log.info(f"Waiting {self.poll_period} seconds for test" + " to complete...") + e = None + try: + e = self.vm.event_wait('STOP', self.poll_period) + + except asyncio.TimeoutError: + self.log.info("Poll period ended.") + pass + + except Exception as err: + self.log.debug(f"event_wait() failed due to {err=}," + " {type(err)=}") + raise + + if e != None: + self.log.debug(f"Execution stopped: {e}") + self.log.debug("Exiting due to test failure") + self.fail("Failure detected!") + break + else: + self.fail("Timed out waiting for test completion.") + + def test_ppe42_instructions(self): + self.set_machine('ppe42_machine') + self.require_accelerator("tcg") + image_path = self.ASSET_PPE42_TEST_IMAGE.fetch() + self.vm.add_args('-nographic') + self.vm.add_args('-device', f'loader,file={image_path}') + self.vm.add_args('-device', 'loader,addr=0xfff80040,cpu-num=0') + self.vm.add_args('-action', 'panic=pause') + self.vm.launch() + self._wait_pass_fail(self.timeout) + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_ppc_replay.py b/tests/functional/ppc/test_replay.py similarity index 100% rename from tests/functional/test_ppc_replay.py rename to tests/functional/ppc/test_replay.py diff --git a/tests/functional/test_ppc_sam460ex.py b/tests/functional/ppc/test_sam460ex.py old mode 100644 new mode 100755 similarity index 100% rename from tests/functional/test_ppc_sam460ex.py rename to tests/functional/ppc/test_sam460ex.py diff --git a/tests/functional/test_ppc_tuxrun.py b/tests/functional/ppc/test_tuxrun.py similarity index 100% rename from tests/functional/test_ppc_tuxrun.py rename to tests/functional/ppc/test_tuxrun.py diff --git a/tests/functional/test_ppc_virtex_ml507.py b/tests/functional/ppc/test_virtex_ml507.py similarity index 100% rename from tests/functional/test_ppc_virtex_ml507.py rename to tests/functional/ppc/test_virtex_ml507.py diff --git a/tests/functional/ppc64/meson.build b/tests/functional/ppc64/meson.build new file mode 100644 index 0000000000000..1fa0a70f7ed88 --- /dev/null +++ b/tests/functional/ppc64/meson.build @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_ppc64_timeouts = { + 'hv' : 1000, + 'mac99' : 120, + 'powernv' : 480, + 'pseries' : 480, + 'replay' : 210, + 'tuxrun' : 420, +} + +tests_ppc64_system_quick = [ + 'migration', + 'vmstate', +] + +tests_ppc64_system_thorough = [ + 'e500', + 'hv', + 'mac99', + 'powernv', + 'pseries', + 'replay', + 'reverse_debug', + 'tuxrun', +] diff --git a/tests/functional/test_ppc64_e500.py b/tests/functional/ppc64/test_e500.py similarity index 100% rename from tests/functional/test_ppc64_e500.py rename to tests/functional/ppc64/test_e500.py diff --git a/tests/functional/test_ppc64_hv.py b/tests/functional/ppc64/test_hv.py similarity index 100% rename from tests/functional/test_ppc64_hv.py rename to tests/functional/ppc64/test_hv.py diff --git a/tests/functional/test_ppc64_mac99.py b/tests/functional/ppc64/test_mac99.py similarity index 100% rename from tests/functional/test_ppc64_mac99.py rename to tests/functional/ppc64/test_mac99.py diff --git a/tests/functional/ppc64/test_migration.py b/tests/functional/ppc64/test_migration.py new file mode 100755 index 0000000000000..5dfdaaf709ac7 --- /dev/null +++ b/tests/functional/ppc64/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# ppc migration test + +from migration import MigrationTest + + +class PpcMigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('mac99') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('mac99') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('mac99') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/test_ppc64_powernv.py b/tests/functional/ppc64/test_powernv.py similarity index 79% rename from tests/functional/test_ppc64_powernv.py rename to tests/functional/ppc64/test_powernv.py index 685e2178ed78f..9ada832b78161 100755 --- a/tests/functional/test_ppc64_powernv.py +++ b/tests/functional/ppc64/test_powernv.py @@ -18,9 +18,14 @@ class powernvMachine(LinuxKernelTest): good_message = 'VFS: Cannot open root device' ASSET_KERNEL = Asset( - ('https://archives.fedoraproject.org/pub/archive/fedora-secondary/' - 'releases/29/Everything/ppc64le/os/ppc/ppc64/vmlinuz'), - '383c2f5c23bc0d9d32680c3924d3fd7ee25cc5ef97091ac1aa5e1d853422fc5f') + ('https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/' + 'buildroot/qemu_ppc64le_powernv8-2025.02/vmlinux'), + '6fd29aff9ad4362511ea5d0acbb510667c7031928e97d64ec15bbc5daf4b8151') + + ASSET_INITRD = Asset( + ('https://github.com/legoater/qemu-ppc-boot/raw/refs/heads/main/' + 'buildroot/qemu_ppc64le_powernv8-2025.02/rootfs.ext2'), + 'aee2192b692077c4bde31cb56ce474424b358f17cec323d5c94af3970c9aada2') def do_test_linux_boot(self, command_line = KERNEL_COMMON_COMMAND_LINE): self.require_accelerator("tcg") @@ -78,27 +83,24 @@ def test_linux_big_boot(self): wait_for_console_pattern(self, console_pattern, self.panic_message) wait_for_console_pattern(self, self.good_message, self.panic_message) - - ASSET_EPAPR_KERNEL = Asset( - ('https://github.com/open-power/op-build/releases/download/v2.7/' - 'zImage.epapr'), - '0ab237df661727e5392cee97460e8674057a883c5f74381a128fa772588d45cd') - def do_test_ppc64_powernv(self, proc): self.require_accelerator("tcg") - kernel_path = self.ASSET_EPAPR_KERNEL.fetch() + kernel_path = self.ASSET_KERNEL.fetch() + initrd_path = self.ASSET_INITRD.fetch() self.vm.set_console() self.vm.add_args('-kernel', kernel_path, - '-append', 'console=tty0 console=hvc0', + '-drive', + f'file={initrd_path},format=raw,if=none,id=drive0,readonly=on', + '-append', 'root=/dev/nvme0n1 console=tty0 console=hvc0', '-device', 'pcie-pci-bridge,id=bridge1,bus=pcie.1,addr=0x0', - '-device', 'nvme,bus=pcie.2,addr=0x0,serial=1234', + '-device', 'nvme,drive=drive0,bus=pcie.2,addr=0x0,serial=1234', '-device', 'e1000e,bus=bridge1,addr=0x3', '-device', 'nec-usb-xhci,bus=bridge1,addr=0x2') self.vm.launch() self.wait_for_console_pattern("CPU: " + proc + " generation processor") - self.wait_for_console_pattern("zImage starting: loaded") - self.wait_for_console_pattern("Run /init as init process") + self.wait_for_console_pattern("INIT: Starting kernel at ") + self.wait_for_console_pattern("Run /sbin/init as init process") # Device detection output driven by udev probing is sometimes cut off # from console output, suspect S14silence-console init script. @@ -114,5 +116,9 @@ def test_powernv10(self): self.set_machine('powernv10') self.do_test_ppc64_powernv('P10') + def test_powernv11(self): + self.set_machine('powernv11') + self.do_test_ppc64_powernv('Power11') + if __name__ == '__main__': LinuxKernelTest.main() diff --git a/tests/functional/test_ppc64_pseries.py b/tests/functional/ppc64/test_pseries.py similarity index 100% rename from tests/functional/test_ppc64_pseries.py rename to tests/functional/ppc64/test_pseries.py diff --git a/tests/functional/test_ppc64_replay.py b/tests/functional/ppc64/test_replay.py similarity index 100% rename from tests/functional/test_ppc64_replay.py rename to tests/functional/ppc64/test_replay.py diff --git a/tests/functional/test_ppc64_reverse_debug.py b/tests/functional/ppc64/test_reverse_debug.py similarity index 76% rename from tests/functional/test_ppc64_reverse_debug.py rename to tests/functional/ppc64/test_reverse_debug.py index 5931adef5a9b5..69551fb84df00 100755 --- a/tests/functional/test_ppc64_reverse_debug.py +++ b/tests/functional/ppc64/test_reverse_debug.py @@ -2,39 +2,36 @@ # # SPDX-License-Identifier: GPL-2.0-or-later # -# Reverse debugging test +# Reverse debugging test for ppc64 # # Copyright (c) 2020 ISP RAS +# Copyright (c) 2025 Linaro Limited # # Author: # Pavel Dovgalyuk +# Gustavo Romero (Run without Avocado) # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from qemu_test import skipIfMissingImports, skipFlakyTest +from qemu_test import skipFlakyTest from reverse_debugging import ReverseDebugging -@skipIfMissingImports('avocado.utils') class ReverseDebugging_ppc64(ReverseDebugging): - REG_PC = 0x40 - @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/1992") def test_ppc64_pseries(self): self.set_machine('pseries') # SLOF branches back to its entry point, which causes this test # to take the 'hit a breakpoint again' path. That's not a problem, # just slightly different than the other machines. - self.endian_is_le = False - self.reverse_debugging() + self.reverse_debugging(gdb_arch='powerpc:common64') @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/1992") def test_ppc64_powernv(self): self.set_machine('powernv') - self.endian_is_le = False - self.reverse_debugging() + self.reverse_debugging(gdb_arch='powerpc:common64') if __name__ == '__main__': diff --git a/tests/functional/test_ppc64_tuxrun.py b/tests/functional/ppc64/test_tuxrun.py similarity index 100% rename from tests/functional/test_ppc64_tuxrun.py rename to tests/functional/ppc64/test_tuxrun.py diff --git a/tests/functional/qemu_test/__init__.py b/tests/functional/qemu_test/__init__.py index 6e666a059fc69..320193591b28f 100644 --- a/tests/functional/qemu_test/__init__.py +++ b/tests/functional/qemu_test/__init__.py @@ -15,6 +15,8 @@ from .linuxkernel import LinuxKernelTest from .decorators import skipIfMissingCommands, skipIfNotMachine, \ skipFlakyTest, skipUntrustedTest, skipBigDataTest, skipSlowTest, \ - skipIfMissingImports, skipIfOperatingSystem, skipLockedMemoryTest + skipIfMissingImports, skipIfOperatingSystem, skipLockedMemoryTest, \ + skipIfMissingEnv from .archive import archive_extract from .uncompress import uncompress +from .gdb import GDB diff --git a/tests/functional/qemu_test/asset.py b/tests/functional/qemu_test/asset.py index 704b84d0ea6ef..ab3a7bb591d0a 100644 --- a/tests/functional/qemu_test/asset.py +++ b/tests/functional/qemu_test/asset.py @@ -10,12 +10,13 @@ import os import stat import sys +import time import unittest import urllib.request from time import sleep from pathlib import Path from shutil import copyfileobj -from urllib.error import HTTPError +from urllib.error import HTTPError, URLError class AssetError(Exception): def __init__(self, asset, msg, transient=False): @@ -72,6 +73,10 @@ def _check(self, cache_file): return self.hash == hl.hexdigest() def valid(self): + if os.getenv("QEMU_TEST_REFRESH_CACHE", None) is not None: + self.log.info("Force refresh of asset %s", self.url) + return False + return self.cache_file.exists() and self._check(self.cache_file) def fetchable(self): @@ -109,6 +114,16 @@ def _wait_for_other_download(self, tmp_cache_file): self.log.debug("Time out while waiting for %s!", tmp_cache_file) raise + def _save_time_stamp(self): + ''' + Update the time stamp of the asset in the cache. Unfortunately, we + cannot use the modification or access time of the asset file itself, + since e.g. the functional jobs in the gitlab CI reload the files + from the gitlab cache and thus always have recent file time stamps, + so we have to save our asset time stamp to a separate file instead. + ''' + self.cache_file.with_suffix(".stamp").write_text(f"{int(time.time())}") + def fetch(self): if not self.cache_dir.exists(): self.cache_dir.mkdir(parents=True, exist_ok=True) @@ -116,6 +131,7 @@ def fetch(self): if self.valid(): self.log.debug("Using cached asset %s for %s", self.cache_file, self.url) + self._save_time_stamp() return str(self.cache_file) if not self.fetchable(): @@ -167,9 +183,25 @@ def fetch(self): raise AssetError(self, "Unable to download: " "HTTP error %d" % e.code) continue + except URLError as e: + # This is typically a network/service level error + # eg urlopen error [Errno 110] Connection timed out> + tmp_cache_file.unlink() + self.log.error("Unable to download %s: URL error %s", + self.url, e.reason) + raise AssetError(self, "Unable to download: URL error %s" % + e.reason, transient=True) + except ConnectionError as e: + # A socket connection failure, such as dropped conn + # or refused conn + tmp_cache_file.unlink() + self.log.error("Unable to download %s: Connection error %s", + self.url, e) + continue except Exception as e: tmp_cache_file.unlink() - raise AssetError(self, "Unable to download: " % e) + raise AssetError(self, "Unable to download: %s" % e, + transient=True) if not os.path.exists(tmp_cache_file): raise AssetError(self, "Download retries exceeded", transient=True) @@ -188,6 +220,7 @@ def fetch(self): tmp_cache_file.unlink() raise AssetError(self, "Hash does not match %s" % self.hash) tmp_cache_file.replace(self.cache_file) + self._save_time_stamp() # Remove write perms to stop tests accidentally modifying them os.chmod(self.cache_file, stat.S_IRUSR | stat.S_IRGRP) @@ -205,7 +238,6 @@ def precache_test(test): log.addHandler(handler) for name, asset in vars(test.__class__).items(): if name.startswith("ASSET_") and type(asset) == Asset: - log.info("Attempting to cache '%s'" % asset) try: asset.fetch() except AssetError as e: diff --git a/tests/functional/qemu_test/cmd.py b/tests/functional/qemu_test/cmd.py index dc5f422b77db5..f544566245ba3 100644 --- a/tests/functional/qemu_test/cmd.py +++ b/tests/functional/qemu_test/cmd.py @@ -45,13 +45,16 @@ def is_readable_executable_file(path): # If end of line is seen, with neither @success or @failure # return False # +# In both cases, also return the contents of the line (in bytes) +# up to that point. +# # If @failure is seen, then mark @test as failed def _console_read_line_until_match(test, vm, success, failure): msg = bytes([]) done = False while True: c = vm.console_socket.recv(1) - if c is None: + if not c: done = True test.fail( f"EOF in console, expected '{success}'") @@ -76,10 +79,23 @@ def _console_read_line_until_match(test, vm, success, failure): except: console_logger.debug(msg) - return done + return done, msg def _console_interaction(test, success_message, failure_message, send_string, keep_sending=False, vm=None): + """ + Interact with the console until either message is seen. + + :param success_message: if this message appears, finish interaction + :param failure_message: if this message appears, test fails + :param send_string: a string to send to the console before trying + to read a new line + :param keep_sending: keep sending the send string each time + :param vm: the VM to interact with + + :return: The collected output (in bytes form). + """ + assert not keep_sending or send_string assert success_message or send_string @@ -101,6 +117,8 @@ def _console_interaction(test, success_message, failure_message, if failure_message is not None: failure_message_b = failure_message.encode() + out = bytes([]) + while True: if send_string: vm.console_socket.sendall(send_string.encode()) @@ -113,14 +131,21 @@ def _console_interaction(test, success_message, failure_message, break continue - if _console_read_line_until_match(test, vm, - success_message_b, - failure_message_b): + done, line = _console_read_line_until_match(test, vm, + success_message_b, + failure_message_b) + + out += line + + if done: break + return out + def interrupt_interactive_console_until_pattern(test, success_message, failure_message=None, - interrupt_string='\r'): + interrupt_string='\r', + vm=None): """ Keep sending a string to interrupt a console prompt, while logging the console output. Typical use case is to break a boot loader prompt, such: @@ -140,10 +165,13 @@ def interrupt_interactive_console_until_pattern(test, success_message, :param failure_message: if this message appears, test fails :param interrupt_string: a string to send to the console before trying to read a new line + :param vm: VM to use + + :return: The collected output (in bytes form). """ assert success_message - _console_interaction(test, success_message, failure_message, - interrupt_string, True) + return _console_interaction(test, success_message, failure_message, + interrupt_string, True, vm=vm) def wait_for_console_pattern(test, success_message, failure_message=None, vm=None): @@ -155,11 +183,15 @@ def wait_for_console_pattern(test, success_message, failure_message=None, :type test: :class:`qemu_test.QemuSystemTest` :param success_message: if this message appears, test succeeds :param failure_message: if this message appears, test fails + :param vm: VM to use + + :return: The collected output (in bytes form). """ assert success_message - _console_interaction(test, success_message, failure_message, None, vm=vm) + return _console_interaction(test, success_message, failure_message, + None, vm=vm) -def exec_command(test, command): +def exec_command(test, command, vm=None): """ Send a command to a console (appending CRLF characters), while logging the content. @@ -167,12 +199,16 @@ def exec_command(test, command): :param test: a test containing a VM. :type test: :class:`qemu_test.QemuSystemTest` :param command: the command to send + :param vm: VM to use :type command: str + + :return: The collected output (in bytes form). """ - _console_interaction(test, None, None, command + '\r') + return _console_interaction(test, None, None, command + '\r', vm=vm) def exec_command_and_wait_for_pattern(test, command, - success_message, failure_message=None): + success_message, failure_message=None, + vm=None): """ Send a command to a console (appending CRLF characters), then wait for success_message to appear on the console, while logging the. @@ -184,9 +220,14 @@ def exec_command_and_wait_for_pattern(test, command, :param command: the command to send :param success_message: if this message appears, test succeeds :param failure_message: if this message appears, test fails + :param vm: VM to use + + :return: The collected output (in bytes form). """ assert success_message - _console_interaction(test, success_message, failure_message, command + '\r') + + return _console_interaction(test, success_message, failure_message, + command + '\r', vm=vm) def get_qemu_img(test): test.log.debug('Looking for and selecting a qemu-img binary') diff --git a/tests/functional/qemu_test/decorators.py b/tests/functional/qemu_test/decorators.py index c0d1567b1422c..b2392958041fb 100644 --- a/tests/functional/qemu_test/decorators.py +++ b/tests/functional/qemu_test/decorators.py @@ -11,6 +11,24 @@ from .cmd import which ''' +Decorator to skip execution of a test if the provided +environment variables are not set. +Example: + + @skipIfMissingEnv("QEMU_ENV_VAR0", "QEMU_ENV_VAR1") +''' +def skipIfMissingEnv(*vars_): + missing_vars = [] + for var in vars_: + if os.getenv(var) == None: + missing_vars.append(var) + + has_vars = True if len(missing_vars) == 0 else False + + return skipUnless(has_vars, f"Missing env var(s): {', '.join(missing_vars)}") + +''' + Decorator to skip execution of a test if the list of command binaries is not available in $PATH. Example: diff --git a/tests/functional/qemu_test/gdb.py b/tests/functional/qemu_test/gdb.py new file mode 100644 index 0000000000000..558d476a68296 --- /dev/null +++ b/tests/functional/qemu_test/gdb.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# A simple interface module built around pygdbmi for handling GDB commands. +# +# Copyright (c) 2025 Linaro Limited +# +# Author: +# Gustavo Romero +# + +import re + + +class GDB: + """Provides methods to run and capture GDB command output.""" + + + def __init__(self, gdb_path, echo=True, suffix='# ', prompt="$ "): + from pygdbmi.gdbcontroller import GdbController + from pygdbmi.constants import GdbTimeoutError + type(self).TimeoutError = GdbTimeoutError + + gdb_cmd = [gdb_path, "-q", "--interpreter=mi2"] + self.gdbmi = GdbController(gdb_cmd) + self.echo = echo + self.suffix = suffix + self.prompt = prompt + self.response = None + self.cmd_output = None + + + def get_payload(self, response, kind): + output = [] + for o in response: + # Unpack payloads of the same type. + _type, _, payload, *_ = o.values() + if _type == kind: + output += [payload] + + # Some output lines do not end with \n but begin with it, + # so remove the leading \n and merge them with the next line + # that ends with \n. + lines = [line.lstrip('\n') for line in output] + lines = "".join(lines) + lines = lines.splitlines(keepends=True) + + return lines + + + def cli(self, cmd, timeout=32.0): + self.response = self.gdbmi.write(cmd, timeout_sec=timeout) + self.cmd_output = self.get_payload(self.response, kind="console") + if self.echo: + print(self.suffix + self.prompt + cmd) + + if len(self.cmd_output) > 0: + cmd_output = self.suffix.join(self.cmd_output) + print(self.suffix + cmd_output, end="") + + return self + + + def get_addr(self): + address_pattern = r"0x[0-9A-Fa-f]+" + cmd_output = "".join(self.cmd_output) # Concat output lines. + + match = re.search(address_pattern, cmd_output) + + return int(match[0], 16) if match else None + + + def get_log(self): + r = self.get_payload(self.response, kind="log") + r = "".join(r) + + return r + + + def get_console(self): + r = "".join(self.cmd_output) + + return r + + + def exit(self): + self.gdbmi.exit() diff --git a/tests/functional/qemu_test/ports.py b/tests/functional/qemu_test/ports.py index 631b77abf6bc2..81174a6153280 100644 --- a/tests/functional/qemu_test/ports.py +++ b/tests/functional/qemu_test/ports.py @@ -23,8 +23,9 @@ class Ports(): PORTS_END = PORTS_START + PORTS_RANGE_SIZE def __enter__(self): - lock_file = os.path.join(BUILD_DIR, "tests", "functional", "port_lock") - self.lock_fh = os.open(lock_file, os.O_CREAT) + lock_file = os.path.join(BUILD_DIR, "tests", "functional", + f".port_lock.{self.PORTS_START}") + self.lock_fh = os.open(lock_file, os.O_CREAT, mode=0o666) fcntl.flock(self.lock_fh, fcntl.LOCK_EX) return self diff --git a/tests/functional/qemu_test/testcase.py b/tests/functional/qemu_test/testcase.py index 2082c6fce43b0..2c0abde395785 100644 --- a/tests/functional/qemu_test/testcase.py +++ b/tests/functional/qemu_test/testcase.py @@ -19,6 +19,7 @@ from subprocess import run import sys import tempfile +import warnings import unittest import uuid @@ -204,6 +205,10 @@ def setUp(self): self.outputdir = self.build_file('tests', 'functional', self.arch, self.id()) self.workdir = os.path.join(self.outputdir, 'scratch') + if os.path.exists(self.workdir): + # Purge as safety net in case of unclean termination of + # previous test, or use of QEMU_TEST_KEEP_SCRATCH + shutil.rmtree(self.workdir) os.makedirs(self.workdir, exist_ok=True) self.log_filename = self.log_file('base.log') @@ -232,8 +237,13 @@ def tearDown(self): self.socketdir = None self.machinelog.removeHandler(self._log_fh) self.log.removeHandler(self._log_fh) + self._log_fh.close() + @staticmethod def main(): + warnings.simplefilter("default") + os.environ["PYTHONWARNINGS"] = "default" + path = os.path.basename(sys.argv[0])[:-3] cache = os.environ.get("QEMU_TEST_PRECACHE", None) @@ -244,14 +254,15 @@ def main(): tr = pycotap.TAPTestRunner(message_log = pycotap.LogMode.LogToError, test_output_log = pycotap.LogMode.LogToError) res = unittest.main(module = None, testRunner = tr, exit = False, - argv=["__dummy__", path]) + argv=[sys.argv[0], path] + sys.argv[1:]) + failed = {} for (test, message) in res.result.errors + res.result.failures: - - if hasattr(test, "log_filename"): + if hasattr(test, "log_filename") and not test.id() in failed: print('More information on ' + test.id() + ' could be found here:' '\n %s' % test.log_filename, file=sys.stderr) if hasattr(test, 'console_log_name'): print(' %s' % test.console_log_name, file=sys.stderr) + failed[test.id()] = True sys.exit(not res.result.wasSuccessful()) @@ -397,6 +408,10 @@ def set_vm_arg(self, arg, value): def tearDown(self): for vm in self._vms.values(): - vm.shutdown() + try: + vm.shutdown() + except Exception as ex: + self.log.error("Failed to teardown VM: %s" % ex) logging.getLogger('console').removeHandler(self._console_log_fh) + self._console_log_fh.close() super().tearDown() diff --git a/tests/functional/replay_kernel.py b/tests/functional/replay_kernel.py index 80795eb0520e9..acb1d29a1b525 100644 --- a/tests/functional/replay_kernel.py +++ b/tests/functional/replay_kernel.py @@ -32,15 +32,14 @@ def run_vm(self, kernel_path, kernel_command_line, console_pattern, # icount requires TCG to be available self.require_accelerator('tcg') - logger = logging.getLogger('replay') start_time = time.time() vm = self.get_vm(name='recording' if record else 'replay') vm.set_console() if record: - logger.info('recording the execution...') + self.log.info('recording the execution...') mode = 'record' else: - logger.info('replaying the execution...') + self.log.info('replaying the execution...') mode = 'replay' vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s' % (shift, mode, replay_path), @@ -54,15 +53,15 @@ def run_vm(self, kernel_path, kernel_command_line, console_pattern, self.wait_for_console_pattern(console_pattern, vm) if record: vm.shutdown() - logger.info('finished the recording with log size %s bytes' + self.log.info('finished the recording with log size %s bytes' % os.path.getsize(replay_path)) self.run_replay_dump(replay_path) - logger.info('successfully tested replay-dump.py') + self.log.info('successfully tested replay-dump.py') else: vm.wait() - logger.info('successfully finished the replay') + self.log.info('successfully finished the replay') elapsed = time.time() - start_time - logger.info('elapsed time %.2f sec' % elapsed) + self.log.info('elapsed time %.2f sec' % elapsed) return elapsed def run_replay_dump(self, replay_path): @@ -80,5 +79,4 @@ def run_rr(self, kernel_path, kernel_command_line, console_pattern, True, shift, args, replay_path) t2 = self.run_vm(kernel_path, kernel_command_line, console_pattern, False, shift, args, replay_path) - logger = logging.getLogger('replay') - logger.info('replay overhead {:.2%}'.format(t2 / t1 - 1)) + self.log.info('replay overhead {:.2%}'.format(t2 / t1 - 1)) diff --git a/tests/functional/reverse_debugging.py b/tests/functional/reverse_debugging.py index f9a1d395f1d1a..86fca8d81f1c1 100644 --- a/tests/functional/reverse_debugging.py +++ b/tests/functional/reverse_debugging.py @@ -1,18 +1,23 @@ -# Reverse debugging test -# # SPDX-License-Identifier: GPL-2.0-or-later # +# Reverse debugging test +# # Copyright (c) 2020 ISP RAS +# Copyright (c) 2025 Linaro Limited # # Author: # Pavel Dovgalyuk +# Gustavo Romero (Run without Avocado) # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -import os + import logging +import os +from subprocess import check_output -from qemu_test import LinuxKernelTest, get_qemu_img +from qemu_test import LinuxKernelTest, get_qemu_img, GDB, \ + skipIfMissingEnv, skipIfMissingImports from qemu_test.ports import Ports @@ -28,21 +33,16 @@ class ReverseDebugging(LinuxKernelTest): that the execution is stopped at the last of them. """ - timeout = 10 STEPS = 10 - endian_is_le = True def run_vm(self, record, shift, args, replay_path, image_path, port): - from avocado.utils import datadrainer - - logger = logging.getLogger('replay') vm = self.get_vm(name='record' if record else 'replay') vm.set_console() if record: - logger.info('recording the execution...') + self.log.info('recording the execution...') mode = 'record' else: - logger.info('replaying the execution...') + self.log.info('replaying the execution...') mode = 'replay' vm.add_args('-gdb', 'tcp::%d' % port, '-S') vm.add_args('-icount', 'shift=%s,rr=%s,rrfile=%s,rrsnapshot=init' % @@ -52,67 +52,31 @@ def run_vm(self, record, shift, args, replay_path, image_path, port): if args: vm.add_args(*args) vm.launch() - console_drainer = datadrainer.LineLogger(vm.console_socket.fileno(), - logger=self.log.getChild('console'), - stop_check=(lambda : not vm.is_running())) - console_drainer.start() return vm @staticmethod - def get_reg_le(g, reg): - res = g.cmd(b'p%x' % reg) - num = 0 - for i in range(len(res))[-2::-2]: - num = 0x100 * num + int(res[i:i + 2], 16) - return num - - @staticmethod - def get_reg_be(g, reg): - res = g.cmd(b'p%x' % reg) - return int(res, 16) - - def get_reg(self, g, reg): - # value may be encoded in BE or LE order - if self.endian_is_le: - return self.get_reg_le(g, reg) - else: - return self.get_reg_be(g, reg) - - def get_pc(self, g): - return self.get_reg(g, self.REG_PC) - - def check_pc(self, g, addr): - pc = self.get_pc(g) - if pc != addr: - self.fail('Invalid PC (read %x instead of %x)' % (pc, addr)) - - @staticmethod - def gdb_step(g): - g.cmd(b's', b'T05thread:01;') - - @staticmethod - def gdb_bstep(g): - g.cmd(b'bs', b'T05thread:01;') + def get_pc(gdb: GDB): + return gdb.cli("print $pc").get_addr() @staticmethod def vm_get_icount(vm): return vm.qmp('query-replay')['return']['icount'] - def reverse_debugging(self, shift=7, args=None): - from avocado.utils import gdb - from avocado.utils import process - - logger = logging.getLogger('replay') + @skipIfMissingImports("pygdbmi") # Required by GDB class + @skipIfMissingEnv("QEMU_TEST_GDB") + def reverse_debugging(self, gdb_arch, shift=7, args=None): + from qemu_test import GDB # create qcow2 for snapshots - logger.info('creating qcow2 image for VM snapshots') + self.log.info('creating qcow2 image for VM snapshots') image_path = os.path.join(self.workdir, 'disk.qcow2') qemu_img = get_qemu_img(self) if qemu_img is None: self.skipTest('Could not find "qemu-img", which is required to ' 'create the temporary qcow2 image') - cmd = '%s create -f qcow2 %s 128M' % (qemu_img, image_path) - process.run(cmd) + out = check_output([qemu_img, 'create', '-f', 'qcow2', image_path, '128M'], + encoding='utf8') + self.log.info("qemu-img: %s" % out) replay_path = os.path.join(self.workdir, 'replay.bin') @@ -123,74 +87,111 @@ def reverse_debugging(self, shift=7, args=None): last_icount = self.vm_get_icount(vm) vm.shutdown() - logger.info("recorded log with %s+ steps" % last_icount) + self.log.info("recorded log with %s+ steps" % last_icount) # replay and run debug commands with Ports() as ports: port = ports.find_free_port() vm = self.run_vm(False, shift, args, replay_path, image_path, port) - logger.info('connecting to gdbstub') - g = gdb.GDBRemote('127.0.0.1', port, False, False) - g.connect() - r = g.cmd(b'qSupported') - if b'qXfer:features:read+' in r: - g.cmd(b'qXfer:features:read:target.xml:0,ffb') - if b'ReverseStep+' not in r: + + try: + self.log.info('Connecting to gdbstub...') + gdb_cmd = os.getenv('QEMU_TEST_GDB') + gdb = GDB(gdb_cmd) + try: + self.reverse_debugging_run(gdb, vm, port, gdb_arch, last_icount) + finally: + self.log.info('exiting gdb and qemu') + gdb.exit() + vm.shutdown() + self.log.info('Test passed.') + except GDB.TimeoutError: + # Convert a GDB timeout exception into a unittest failure exception. + raise self.failureException("Timeout while connecting to or " + "communicating with gdbstub...") from None + except Exception: + # Re-throw exceptions from unittest, like the ones caused by fail(), + # skipTest(), etc. + raise + + def reverse_debugging_run(self, gdb, vm, port, gdb_arch, last_icount): + r = gdb.cli("set architecture").get_log() + if gdb_arch not in r: + self.skipTest(f"GDB does not support arch '{gdb_arch}'") + + gdb.cli("set debug remote 1") + + c = gdb.cli(f"target remote localhost:{port}").get_console() + if not f"Remote debugging using localhost:{port}" in c: + self.fail("Could not connect to gdbstub!") + + # Remote debug messages are in 'log' payloads. + r = gdb.get_log() + if 'ReverseStep+' not in r: self.fail('Reverse step is not supported by QEMU') - if b'ReverseContinue+' not in r: + if 'ReverseContinue+' not in r: self.fail('Reverse continue is not supported by QEMU') - logger.info('stepping forward') + gdb.cli("set debug remote 0") + + self.log.info('stepping forward') steps = [] # record first instruction addresses for _ in range(self.STEPS): - pc = self.get_pc(g) - logger.info('saving position %x' % pc) + pc = self.get_pc(gdb) + self.log.info('saving position %x' % pc) steps.append(pc) - self.gdb_step(g) + gdb.cli("stepi") # visit the recorded instruction in reverse order - logger.info('stepping backward') + self.log.info('stepping backward') for addr in steps[::-1]: - self.gdb_bstep(g) - self.check_pc(g, addr) - logger.info('found position %x' % addr) + self.log.info('found position %x' % addr) + gdb.cli("reverse-stepi") + pc = self.get_pc(gdb) + if pc != addr: + self.log.info('Invalid PC (read %x instead of %x)' % (pc, addr)) + self.fail('Reverse stepping failed!') # visit the recorded instruction in forward order - logger.info('stepping forward') + self.log.info('stepping forward') for addr in steps: - self.check_pc(g, addr) - self.gdb_step(g) - logger.info('found position %x' % addr) + self.log.info('found position %x' % addr) + pc = self.get_pc(gdb) + if pc != addr: + self.log.info('Invalid PC (read %x instead of %x)' % (pc, addr)) + self.fail('Forward stepping failed!') + gdb.cli("stepi") # set breakpoints for the instructions just stepped over - logger.info('setting breakpoints') + self.log.info('setting breakpoints') for addr in steps: - # hardware breakpoint at addr with len=1 - g.cmd(b'Z1,%x,1' % addr, b'OK') + gdb.cli(f"break *{hex(addr)}") # this may hit a breakpoint if first instructions are executed # again - logger.info('continuing execution') + self.log.info('continuing execution') vm.qmp('replay-break', icount=last_icount - 1) # continue - will return after pausing - # This could stop at the end and get a T02 return, or by - # re-executing one of the breakpoints and get a T05 return. - g.cmd(b'c') + # This can stop at the end of the replay-break and gdb gets a SIGINT, + # or by re-executing one of the breakpoints and gdb stops at a + # breakpoint. + gdb.cli("continue") + if self.vm_get_icount(vm) == last_icount - 1: - logger.info('reached the end (icount %s)' % (last_icount - 1)) + self.log.info('reached the end (icount %s)' % (last_icount - 1)) else: - logger.info('hit a breakpoint again at %x (icount %s)' % - (self.get_pc(g), self.vm_get_icount(vm))) + self.log.info('hit a breakpoint again at %x (icount %s)' % + (self.get_pc(gdb), self.vm_get_icount(vm))) - logger.info('running reverse continue to reach %x' % steps[-1]) + self.log.info('running reverse continue to reach %x' % steps[-1]) # reverse continue - will return after stopping at the breakpoint - g.cmd(b'bc', b'T05thread:01;') + gdb.cli("reverse-continue") # assume that none of the first instructions is executed again # breaking the order of the breakpoints - self.check_pc(g, steps[-1]) - logger.info('successfully reached %x' % steps[-1]) + pc = self.get_pc(gdb) + if pc != steps[-1]: + self.fail("'reverse-continue' did not hit the first PC in reverse order!") - logger.info('exiting gdb and qemu') - vm.shutdown() + self.log.info('successfully reached %x' % steps[-1]) diff --git a/tests/functional/riscv32/meson.build b/tests/functional/riscv32/meson.build new file mode 100644 index 0000000000000..f3ebbb8db5d00 --- /dev/null +++ b/tests/functional/riscv32/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_riscv32_system_quick = [ + 'migration', + 'opensbi', +] + +tests_riscv32_system_thorough = [ + 'tuxrun', +] diff --git a/tests/functional/riscv32/test_migration.py b/tests/functional/riscv32/test_migration.py new file mode 100755 index 0000000000000..30acbbe69f93d --- /dev/null +++ b/tests/functional/riscv32/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# riscv32 migration test + +from migration import MigrationTest + + +class Rv32MigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('spike') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('virt') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('spike') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/riscv32/test_opensbi.py b/tests/functional/riscv32/test_opensbi.py new file mode 100755 index 0000000000000..d1ac706f0bb9f --- /dev/null +++ b/tests/functional/riscv32/test_opensbi.py @@ -0,0 +1,10 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Reuse the 64-bit OpenSBI test for RISC-V 32-bit machines + +from riscv64.test_opensbi import RiscvOpenSBI + +if __name__ == '__main__': + RiscvOpenSBI.main() diff --git a/tests/functional/test_riscv32_tuxrun.py b/tests/functional/riscv32/test_tuxrun.py similarity index 100% rename from tests/functional/test_riscv32_tuxrun.py rename to tests/functional/riscv32/test_tuxrun.py diff --git a/tests/functional/riscv64/meson.build b/tests/functional/riscv64/meson.build new file mode 100644 index 0000000000000..c1704d92751be --- /dev/null +++ b/tests/functional/riscv64/meson.build @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_riscv64_timeouts = { + 'tuxrun' : 120, +} + +tests_riscv64_system_quick = [ + 'migration', + 'opensbi', +] + +tests_riscv64_system_thorough = [ + 'sifive_u', + 'tuxrun', +] diff --git a/tests/functional/riscv64/test_migration.py b/tests/functional/riscv64/test_migration.py new file mode 100755 index 0000000000000..2d613a29ec4f5 --- /dev/null +++ b/tests/functional/riscv64/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# riscv64 migration test + +from migration import MigrationTest + + +class Rv64MigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('virt') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('spike') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('virt') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/test_riscv_opensbi.py b/tests/functional/riscv64/test_opensbi.py similarity index 100% rename from tests/functional/test_riscv_opensbi.py rename to tests/functional/riscv64/test_opensbi.py diff --git a/tests/functional/riscv64/test_sifive_u.py b/tests/functional/riscv64/test_sifive_u.py new file mode 100755 index 0000000000000..358ff0d1f6090 --- /dev/null +++ b/tests/functional/riscv64/test_sifive_u.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python3 +# +# Functional test that boots a Linux kernel on a Sifive U machine +# and checks the console +# +# Copyright (c) Linaro Ltd. +# +# Author: +# Philippe Mathieu-Daudé +# +# SPDX-License-Identifier: GPL-2.0-or-later + +import os + +from qemu_test import Asset, LinuxKernelTest +from qemu_test import skipIfMissingCommands + + +class SifiveU(LinuxKernelTest): + + ASSET_KERNEL = Asset( + 'https://storage.tuxboot.com/buildroot/20241119/riscv64/Image', + '2bd8132a3bf21570290042324fff48c987f42f2a00c08de979f43f0662ebadba') + ASSET_ROOTFS = Asset( + ('https://github.com/groeck/linux-build-test/raw/' + '9819da19e6eef291686fdd7b029ea00e764dc62f/rootfs/riscv64/' + 'rootfs.ext2.gz'), + 'b6ed95610310b7956f9bf20c4c9c0c05fea647900df441da9dfe767d24e8b28b') + + def do_test_riscv64_sifive_u_mmc_spi(self, connect_card): + self.set_machine('sifive_u') + kernel_path = self.ASSET_KERNEL.fetch() + rootfs_path = self.uncompress(self.ASSET_ROOTFS) + + self.vm.set_console() + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'earlycon=sbi console=ttySIF0 ' + 'root=/dev/mmcblk0 ') + self.vm.add_args('-kernel', kernel_path, + '-append', kernel_command_line, + '-no-reboot') + if connect_card: + kernel_command_line += 'panic=-1 noreboot rootwait ' + self.vm.add_args('-drive', f'file={rootfs_path},if=sd,format=raw') + pattern = 'Boot successful.' + else: + kernel_command_line += 'panic=0 noreboot ' + pattern = 'Cannot open root device "mmcblk0" or unknown-block(0,0)' + + self.vm.launch() + self.wait_for_console_pattern(pattern) + + os.remove(rootfs_path) + + def test_riscv64_sifive_u_nommc_spi(self): + self.do_test_riscv64_sifive_u_mmc_spi(False) + + def test_riscv64_sifive_u_mmc_spi(self): + self.do_test_riscv64_sifive_u_mmc_spi(True) + + +if __name__ == '__main__': + LinuxKernelTest.main() diff --git a/tests/functional/test_riscv64_tuxrun.py b/tests/functional/riscv64/test_tuxrun.py similarity index 100% rename from tests/functional/test_riscv64_tuxrun.py rename to tests/functional/riscv64/test_tuxrun.py diff --git a/tests/functional/rx/meson.build b/tests/functional/rx/meson.build new file mode 100644 index 0000000000000..6af83a9f23fb5 --- /dev/null +++ b/tests/functional/rx/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_rx_system_thorough = [ + 'gdbsim', +] diff --git a/tests/functional/test_rx_gdbsim.py b/tests/functional/rx/test_gdbsim.py similarity index 100% rename from tests/functional/test_rx_gdbsim.py rename to tests/functional/rx/test_gdbsim.py diff --git a/tests/functional/s390x/meson.build b/tests/functional/s390x/meson.build new file mode 100644 index 0000000000000..70cd36e2913d8 --- /dev/null +++ b/tests/functional/s390x/meson.build @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_s390x_timeouts = { + 'ccw_virtio' : 420, +} + +tests_s390x_system_quick = [ + 'vmstate', +] + +tests_s390x_system_thorough = [ + 'ccw_virtio', + 'pxelinux', + 'replay', + 'topology', + 'tuxrun', +] diff --git a/tests/functional/test_s390x_ccw_virtio.py b/tests/functional/s390x/test_ccw_virtio.py similarity index 90% rename from tests/functional/test_s390x_ccw_virtio.py rename to tests/functional/s390x/test_ccw_virtio.py index 453711aa0f5e4..045533785670a 100755 --- a/tests/functional/test_s390x_ccw_virtio.py +++ b/tests/functional/s390x/test_ccw_virtio.py @@ -15,6 +15,7 @@ import tempfile from qemu_test import QemuSystemTest, Asset +from qemu_test import exec_command from qemu_test import exec_command_and_wait_for_pattern from qemu_test import wait_for_console_pattern @@ -270,5 +271,30 @@ def test_s390x_fedora(self): 'while ! (dmesg -c | grep Start.virtcrypto_remove) ; do' ' sleep 1 ; done', 'Start virtcrypto_remove.') + # Test SCLP event Control-Program Identification (CPI) + cpi = '/sys/firmware/cpi/' + sclpcpi = '/machine/sclp/s390-sclp-event-facility/sclpcpi' + self.log.info("Test SCLP event CPI") + exec_command(self, 'echo TESTVM > ' + cpi + 'system_name') + exec_command(self, 'echo LINUX > ' + cpi + 'system_type') + exec_command(self, 'echo TESTPLEX > ' + cpi + 'sysplex_name') + exec_command(self, 'echo 0x001a000000060b00 > ' + cpi + 'system_level') + exec_command_and_wait_for_pattern(self, + 'echo 1 > ' + cpi + 'set', ':/#') + try: + event = self.vm.event_wait('SCLP_CPI_INFO_AVAILABLE') + except TimeoutError: + self.fail('Timed out waiting for the SCLP_CPI_INFO_AVAILABLE event') + ts = self.vm.cmd('qom-get', path=sclpcpi, property='timestamp') + self.assertNotEqual(ts, 0) + name = self.vm.cmd('qom-get', path=sclpcpi, property='system_name') + self.assertEqual(name.strip(), 'TESTVM') + typ = self.vm.cmd('qom-get', path=sclpcpi, property='system_type') + self.assertEqual(typ.strip(), 'LINUX') + sysplex = self.vm.cmd('qom-get', path=sclpcpi, property='sysplex_name') + self.assertEqual(sysplex.strip(), 'TESTPLEX') + level = self.vm.cmd('qom-get', path=sclpcpi, property='system_level') + self.assertEqual(level, 0x001a000000060b00) + if __name__ == '__main__': QemuSystemTest.main() diff --git a/tests/functional/test_s390x_pxelinux.py b/tests/functional/s390x/test_pxelinux.py similarity index 82% rename from tests/functional/test_s390x_pxelinux.py rename to tests/functional/s390x/test_pxelinux.py index 4fc33b8c46de4..c00cce6a5a437 100755 --- a/tests/functional/test_s390x_pxelinux.py +++ b/tests/functional/s390x/test_pxelinux.py @@ -1,10 +1,11 @@ #!/usr/bin/env python3 # # SPDX-License-Identifier: GPL-2.0-or-later -# -# Functional test that checks the pxelinux.cfg network booting of a s390x VM -# (TFTP booting without config file is already tested by the pxe qtest, so -# we don't repeat that here). +''' +Functional test that checks the pxelinux.cfg network booting of a s390x VM +(TFTP booting without config file is already tested by the pxe qtest, so +we don't repeat that here). +''' import os import shutil @@ -12,7 +13,7 @@ from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern -pxelinux_cfg_contents='''# pxelinux.cfg style config file +PXELINUX_CFG_CONTENTS='''# pxelinux.cfg style config file default Debian label Nonexisting kernel kernel.notavailable @@ -26,6 +27,10 @@ ''' class S390PxeLinux(QemuSystemTest): + ''' + Test various ways of booting via a pxelinux.cfg file, for details see: + https://wiki.syslinux.org/wiki/index.php?title=PXELINUX#Configuration + ''' ASSET_DEBIAN_KERNEL = Asset( ('https://snapshot.debian.org/archive/debian/' @@ -46,6 +51,7 @@ class S390PxeLinux(QemuSystemTest): '480859574f3f44caa6cd35c62d70e1ac0609134e22ce2a954bbed9b110c06e0b') def pxelinux_launch(self, pl_name='default', extra_opts=None): + '''Create a pxelinux.cfg file in the right location and launch QEMU''' self.require_netdev('user') self.set_machine('s390-ccw-virtio') @@ -66,11 +72,11 @@ def pxelinux_launch(self, pl_name='default', extra_opts=None): cfg_fname = self.scratch_file('tftp', 'pxelinux.cfg', pl_name) with open(cfg_fname, 'w', encoding='utf-8') as f: - f.write(pxelinux_cfg_contents) + f.write(PXELINUX_CFG_CONTENTS) virtio_net_dev = 'virtio-net-ccw,netdev=n1,bootindex=1' if extra_opts: - virtio_net_dev += ',' + extra_opts + virtio_net_dev += ',' + extra_opts self.vm.add_args('-m', '384', '-netdev', f'user,id=n1,tftp={tftpdir}', @@ -80,6 +86,7 @@ def pxelinux_launch(self, pl_name='default', extra_opts=None): def test_default(self): + '''Check whether the guest uses the "default" file name''' self.pxelinux_launch() # The kernel prints its arguments to the console, so we can use # this to check whether the kernel parameters are correctly handled: @@ -89,11 +96,13 @@ def test_default(self): wait_for_console_pattern(self, 'Run /init as init process') def test_mac(self): + '''Check whether the guest uses file name based on its MAC address''' self.pxelinux_launch(pl_name='01-02-ca-fe-ba-be-42', extra_opts='mac=02:ca:fe:ba:be:42,loadparm=3') wait_for_console_pattern(self, 'Linux version 5.3.7-301.fc31.s390x') def test_uuid(self): + '''Check whether the guest uses file name based on its UUID''' # Also add a non-bootable disk to check the fallback to network boot: self.vm.add_args('-blockdev', 'null-co,size=65536,node-name=d1', '-device', 'virtio-blk,drive=d1,bootindex=0,loadparm=1', @@ -102,11 +111,13 @@ def test_uuid(self): wait_for_console_pattern(self, 'Debian 4.19.146-1 (2020-09-17)') def test_ip(self): + '''Check whether the guest uses file name based on its IP address''' self.vm.add_args('-M', 'loadparm=3') self.pxelinux_launch(pl_name='0A00020F') wait_for_console_pattern(self, 'Linux version 5.3.7-301.fc31.s390x') def test_menu(self): + '''Check whether the boot menu works for pxelinux.cfg booting''' self.vm.add_args('-boot', 'menu=on,splash-time=10') self.pxelinux_launch(pl_name='0A00') wait_for_console_pattern(self, '[1] Nonexisting') diff --git a/tests/functional/test_s390x_replay.py b/tests/functional/s390x/test_replay.py similarity index 100% rename from tests/functional/test_s390x_replay.py rename to tests/functional/s390x/test_replay.py diff --git a/tests/functional/test_s390x_topology.py b/tests/functional/s390x/test_topology.py similarity index 100% rename from tests/functional/test_s390x_topology.py rename to tests/functional/s390x/test_topology.py diff --git a/tests/functional/test_s390x_tuxrun.py b/tests/functional/s390x/test_tuxrun.py similarity index 100% rename from tests/functional/test_s390x_tuxrun.py rename to tests/functional/s390x/test_tuxrun.py diff --git a/tests/functional/sh4/meson.build b/tests/functional/sh4/meson.build new file mode 100644 index 0000000000000..56f824e1e717a --- /dev/null +++ b/tests/functional/sh4/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_sh4_timeouts = { + 'tuxrun' : 240, +} + +tests_sh4_system_thorough = [ + 'r2d', + 'tuxrun', +] diff --git a/tests/functional/test_sh4_r2d.py b/tests/functional/sh4/test_r2d.py similarity index 100% rename from tests/functional/test_sh4_r2d.py rename to tests/functional/sh4/test_r2d.py diff --git a/tests/functional/test_sh4_tuxrun.py b/tests/functional/sh4/test_tuxrun.py similarity index 100% rename from tests/functional/test_sh4_tuxrun.py rename to tests/functional/sh4/test_tuxrun.py diff --git a/tests/functional/sh4eb/meson.build b/tests/functional/sh4eb/meson.build new file mode 100644 index 0000000000000..25e9a6e40410f --- /dev/null +++ b/tests/functional/sh4eb/meson.build @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_sh4eb_system_thorough = [ + 'r2d', +] diff --git a/tests/functional/test_sh4eb_r2d.py b/tests/functional/sh4eb/test_r2d.py similarity index 100% rename from tests/functional/test_sh4eb_r2d.py rename to tests/functional/sh4eb/test_r2d.py diff --git a/tests/functional/sparc/meson.build b/tests/functional/sparc/meson.build new file mode 100644 index 0000000000000..88732becd8123 --- /dev/null +++ b/tests/functional/sparc/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_sparc_system_quick = [ + 'migration', +] + +tests_sparc_system_thorough = [ + 'replay', + 'sun4m', +] diff --git a/tests/functional/sparc/test_migration.py b/tests/functional/sparc/test_migration.py new file mode 100755 index 0000000000000..dd6d5783b11a5 --- /dev/null +++ b/tests/functional/sparc/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Sparc migration test + +from migration import MigrationTest + + +class SparcMigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('SS-4') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('SS-5') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('SS-4') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/test_sparc_replay.py b/tests/functional/sparc/test_replay.py similarity index 100% rename from tests/functional/test_sparc_replay.py rename to tests/functional/sparc/test_replay.py diff --git a/tests/functional/test_sparc_sun4m.py b/tests/functional/sparc/test_sun4m.py similarity index 100% rename from tests/functional/test_sparc_sun4m.py rename to tests/functional/sparc/test_sun4m.py diff --git a/tests/functional/sparc64/meson.build b/tests/functional/sparc64/meson.build new file mode 100644 index 0000000000000..2e04e7d4f3dfa --- /dev/null +++ b/tests/functional/sparc64/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_sparc64_system_quick = [ + 'migration', +] + +tests_sparc64_system_thorough = [ + 'sun4u', + 'tuxrun', +] diff --git a/tests/functional/sparc64/test_migration.py b/tests/functional/sparc64/test_migration.py new file mode 100755 index 0000000000000..a8a6c73c3544d --- /dev/null +++ b/tests/functional/sparc64/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Sparc64 migration test + +from migration import MigrationTest + + +class Sparc64MigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('sun4u') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('sun4u') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('sun4u') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/test_sparc64_sun4u.py b/tests/functional/sparc64/test_sun4u.py similarity index 100% rename from tests/functional/test_sparc64_sun4u.py rename to tests/functional/sparc64/test_sun4u.py diff --git a/tests/functional/test_sparc64_tuxrun.py b/tests/functional/sparc64/test_tuxrun.py similarity index 100% rename from tests/functional/test_sparc64_tuxrun.py rename to tests/functional/sparc64/test_tuxrun.py diff --git a/tests/functional/test_aarch64_rme_sbsaref.py b/tests/functional/test_aarch64_rme_sbsaref.py deleted file mode 100755 index 746770e776df9..0000000000000 --- a/tests/functional/test_aarch64_rme_sbsaref.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python3 -# -# Functional test that boots a Realms environment on sbsa-ref machine and a -# nested guest VM using it. -# -# Copyright (c) 2024 Linaro Ltd. -# -# Author: Pierrick Bouvier -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import os - -from qemu_test import QemuSystemTest, Asset, wait_for_console_pattern -from qemu_test import exec_command_and_wait_for_pattern -from test_aarch64_rme_virt import test_realms_guest - - -class Aarch64RMESbsaRefMachine(QemuSystemTest): - - # Stack is built with OP-TEE build environment from those instructions: - # https://linaro.atlassian.net/wiki/spaces/QEMU/pages/29051027459/ - # https://github.com/pbo-linaro/qemu-rme-stack - ASSET_RME_STACK_SBSA = Asset( - ('https://fileserver.linaro.org/s/KJyeBxL82mz2r7F/' - 'download/rme-stack-op-tee-4.2.0-cca-v4-sbsa.tar.gz'), - 'dd9ab28ec869bdf3b5376116cb3689103b43433fd5c4bca0f4a8d8b3c104999e') - - # This tests the FEAT_RME cpu implementation, by booting a VM supporting it, - # and launching a nested VM using it. - def test_aarch64_rme_sbsaref(self): - self.set_machine('sbsa-ref') - self.require_accelerator('tcg') - self.require_netdev('user') - - self.vm.set_console() - - stack_path_tar_gz = self.ASSET_RME_STACK_SBSA.fetch() - self.archive_extract(stack_path_tar_gz, format="tar") - - rme_stack = self.scratch_file('rme-stack-op-tee-4.2.0-cca-v4-sbsa') - pflash0 = os.path.join(rme_stack, 'images', 'SBSA_FLASH0.fd') - pflash1 = os.path.join(rme_stack, 'images', 'SBSA_FLASH1.fd') - virtual = os.path.join(rme_stack, 'images', 'disks', 'virtual') - drive = os.path.join(rme_stack, 'out-br', 'images', 'rootfs.ext4') - - self.vm.add_args('-cpu', 'max,x-rme=on,pauth-impdef=on') - self.vm.add_args('-m', '2G') - self.vm.add_args('-M', 'sbsa-ref') - self.vm.add_args('-drive', f'file={pflash0},format=raw,if=pflash') - self.vm.add_args('-drive', f'file={pflash1},format=raw,if=pflash') - self.vm.add_args('-drive', f'file=fat:rw:{virtual},format=raw') - self.vm.add_args('-drive', f'format=raw,if=none,file={drive},id=hd0') - self.vm.add_args('-device', 'virtio-blk-pci,drive=hd0') - self.vm.add_args('-device', 'virtio-9p-pci,fsdev=shr0,mount_tag=shr0') - self.vm.add_args('-fsdev', f'local,security_model=none,path={rme_stack},id=shr0') - self.vm.add_args('-device', 'virtio-net-pci,netdev=net0') - self.vm.add_args('-netdev', 'user,id=net0') - - self.vm.launch() - # Wait for host VM boot to complete. - wait_for_console_pattern(self, 'Welcome to Buildroot', - failure_message='Synchronous Exception at') - exec_command_and_wait_for_pattern(self, 'root', '#') - - test_realms_guest(self) - -if __name__ == '__main__': - QemuSystemTest.main() diff --git a/tests/functional/test_aarch64_rme_virt.py b/tests/functional/test_aarch64_rme_virt.py deleted file mode 100755 index 8452d27928fbd..0000000000000 --- a/tests/functional/test_aarch64_rme_virt.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python3 -# -# Functional test that boots a Realms environment on virt machine and a nested -# guest VM using it. -# -# Copyright (c) 2024 Linaro Ltd. -# -# Author: Pierrick Bouvier -# -# SPDX-License-Identifier: GPL-2.0-or-later - -import os - -from qemu_test import QemuSystemTest, Asset -from qemu_test import exec_command, wait_for_console_pattern -from qemu_test import exec_command_and_wait_for_pattern - -def test_realms_guest(test_rme_instance): - - # Boot the (nested) guest VM - exec_command(test_rme_instance, - 'qemu-system-aarch64 -M virt,gic-version=3 ' - '-cpu host -enable-kvm -m 512M ' - '-M confidential-guest-support=rme0 ' - '-object rme-guest,id=rme0 ' - '-device virtio-net-pci,netdev=net0,romfile= ' - '-netdev user,id=net0 ' - '-kernel /mnt/out/bin/Image ' - '-initrd /mnt/out-br/images/rootfs.cpio ' - '-serial stdio') - # Detect Realm activation during (nested) guest boot. - wait_for_console_pattern(test_rme_instance, - 'SMC_RMI_REALM_ACTIVATE') - # Wait for (nested) guest boot to complete. - wait_for_console_pattern(test_rme_instance, - 'Welcome to Buildroot') - exec_command_and_wait_for_pattern(test_rme_instance, 'root', '#') - # query (nested) guest cca report - exec_command(test_rme_instance, 'cca-workload-attestation report') - wait_for_console_pattern(test_rme_instance, - '"cca-platform-hash-algo-id": "sha-256"') - wait_for_console_pattern(test_rme_instance, - '"cca-realm-hash-algo-id": "sha-512"') - wait_for_console_pattern(test_rme_instance, - '"cca-realm-public-key-hash-algo-id": "sha-256"') - -class Aarch64RMEVirtMachine(QemuSystemTest): - - # Stack is built with OP-TEE build environment from those instructions: - # https://linaro.atlassian.net/wiki/spaces/QEMU/pages/29051027459/ - # https://github.com/pbo-linaro/qemu-rme-stack - ASSET_RME_STACK_VIRT = Asset( - ('https://fileserver.linaro.org/s/iaRsNDJp2CXHMSJ/' - 'download/rme-stack-op-tee-4.2.0-cca-v4-qemu_v8.tar.gz'), - '1851adc232b094384d8b879b9a2cfff07ef3d6205032b85e9b3a4a9ae6b0b7ad') - - # This tests the FEAT_RME cpu implementation, by booting a VM supporting it, - # and launching a nested VM using it. - def test_aarch64_rme_virt(self): - self.set_machine('virt') - self.require_accelerator('tcg') - self.require_netdev('user') - - self.vm.set_console() - - stack_path_tar_gz = self.ASSET_RME_STACK_VIRT.fetch() - self.archive_extract(stack_path_tar_gz, format="tar") - - rme_stack = self.scratch_file('rme-stack-op-tee-4.2.0-cca-v4-qemu_v8') - kernel = os.path.join(rme_stack, 'out', 'bin', 'Image') - bios = os.path.join(rme_stack, 'out', 'bin', 'flash.bin') - drive = os.path.join(rme_stack, 'out-br', 'images', 'rootfs.ext4') - - self.vm.add_args('-cpu', 'max,x-rme=on,pauth-impdef=on') - self.vm.add_args('-m', '2G') - self.vm.add_args('-M', 'virt,acpi=off,' - 'virtualization=on,' - 'secure=on,' - 'gic-version=3') - self.vm.add_args('-bios', bios) - self.vm.add_args('-kernel', kernel) - self.vm.add_args('-drive', f'format=raw,if=none,file={drive},id=hd0') - self.vm.add_args('-device', 'virtio-blk-pci,drive=hd0') - self.vm.add_args('-device', 'virtio-9p-device,fsdev=shr0,mount_tag=shr0') - self.vm.add_args('-fsdev', f'local,security_model=none,path={rme_stack},id=shr0') - self.vm.add_args('-device', 'virtio-net-pci,netdev=net0') - self.vm.add_args('-netdev', 'user,id=net0') - # We need to add nokaslr to avoid triggering this sporadic bug: - # https://gitlab.com/qemu-project/qemu/-/issues/2823 - self.vm.add_args('-append', 'root=/dev/vda nokaslr') - - self.vm.launch() - # Wait for host VM boot to complete. - wait_for_console_pattern(self, 'Welcome to Buildroot', - failure_message='Synchronous Exception at') - exec_command_and_wait_for_pattern(self, 'root', '#') - - test_realms_guest(self) - -if __name__ == '__main__': - QemuSystemTest.main() diff --git a/tests/functional/x86_64/meson.build b/tests/functional/x86_64/meson.build new file mode 100644 index 0000000000000..f78eec5e6cf43 --- /dev/null +++ b/tests/functional/x86_64/meson.build @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_x86_64_timeouts = { + 'acpi_bits' : 420, + 'intel_iommu': 300, + 'kvm_xen' : 180, + 'netdev_ethtool' : 180, + 'replay' : 480, + 'virtio_balloon': 120, +} + +tests_x86_64_system_quick = [ + 'bad_vmstate', + 'cpu_model_versions', + 'cpu_queries', + 'mem_addr_space', + 'memlock', + 'migration', + 'pc_cpu_hotplug_props', + 'virtio_version', + 'vmstate', +] + +tests_x86_64_system_thorough = [ + 'acpi_bits', + 'hotplug_blk', + 'hotplug_cpu', + 'intel_iommu', + 'kvm_xen', + 'linux_initrd', + 'multiprocess', + 'netdev_ethtool', + 'replay', + 'reverse_debug', + 'tuxrun', + 'vfio_user_client', + 'virtio_balloon', + 'virtio_gpu', +] diff --git a/tests/functional/test_acpi_bits.py b/tests/functional/x86_64/test_acpi_bits.py similarity index 92% rename from tests/functional/test_acpi_bits.py rename to tests/functional/x86_64/test_acpi_bits.py index 8e0563a97b18c..9a2816533d6a9 100755 --- a/tests/functional/test_acpi_bits.py +++ b/tests/functional/x86_64/test_acpi_bits.py @@ -121,10 +121,10 @@ def __init__(self, *args, **kwargs): self._debugcon_log = 'debugcon-log.txt' def _print_log(self, log): - self.logger.info('\nlogs from biosbits follows:') - self.logger.info('==========================================\n') - self.logger.info(log) - self.logger.info('==========================================\n') + self.log.info('\nlogs from biosbits follows:') + self.log.info('==========================================\n') + self.log.info(log) + self.log.info('==========================================\n') def copy_bits_config(self): """ copies the bios bits config file into bits. @@ -138,8 +138,8 @@ def copy_bits_config(self): self.assertTrue(os.path.exists(bits_config_file)) self.assertTrue(os.path.exists(target_config_dir)) shutil.copy2(bits_config_file, target_config_dir) - self.logger.info('copied config file %s to %s', - bits_config_file, target_config_dir) + self.log.info('copied config file %s to %s', + bits_config_file, target_config_dir) def copy_test_scripts(self): """copies the python test scripts into bits. """ @@ -163,8 +163,8 @@ def copy_test_scripts(self): newfilename = os.path.splitext(filename)[0] + '.py' shutil.copy2(os.path.join(bits_test_dir, filename), os.path.join(target_test_dir, newfilename)) - self.logger.info('copied test file %s to %s', - filename, target_test_dir) + self.log.info('copied test file %s to %s', + filename, target_test_dir) # now remove the pyc test file if it exists, otherwise the # changes in the python test script won't be executed. @@ -172,9 +172,9 @@ def copy_test_scripts(self): if os.access(os.path.join(target_test_dir, testfile_pyc), os.F_OK): os.remove(os.path.join(target_test_dir, testfile_pyc)) - self.logger.info('removed compiled file %s', - os.path.join(target_test_dir, - testfile_pyc)) + self.log.info('removed compiled file %s', + os.path.join(target_test_dir, + testfile_pyc)) def fix_mkrescue(self, mkrescue): """ grub-mkrescue is a bash script with two variables, 'prefix' and @@ -216,7 +216,7 @@ def generate_bits_iso(self): self.fix_mkrescue(mkrescue_script) - self.logger.info('using grub-mkrescue for generating biosbits iso ...') + self.log.info('using grub-mkrescue for generating biosbits iso ...') try: if os.getenv('V') or os.getenv('BITS_DEBUG'): @@ -225,7 +225,7 @@ def generate_bits_iso(self): stdout=subprocess.PIPE, stderr=subprocess.STDOUT, check=True) - self.logger.info("grub-mkrescue output %s" % proc.stdout) + self.log.info("grub-mkrescue output %s" % proc.stdout) else: subprocess.check_call([mkrescue_script, '-o', iso_file, bits_dir], @@ -238,11 +238,10 @@ def generate_bits_iso(self): self.assertTrue(os.access(iso_file, os.R_OK)) - self.logger.info('iso file %s successfully generated.', iso_file) + self.log.info('iso file %s successfully generated.', iso_file) def setUp(self): # pylint: disable=arguments-differ super().setUp() - self.logger = self.log prebuiltDir = self.scratch_file('prebuilt') if not os.path.isdir(prebuiltDir): @@ -333,7 +332,7 @@ def test_acpi_smbios_bits(self): # in batch mode and then automatically initiate a vm shutdown. self._vm.event_wait('SHUTDOWN', timeout=BITS_TIMEOUT) self._vm.wait(timeout=None) - self.logger.debug("Checking console output ...") + self.log.debug("Checking console output ...") self.parse_log() if __name__ == '__main__': diff --git a/tests/functional/x86_64/test_bad_vmstate.py b/tests/functional/x86_64/test_bad_vmstate.py new file mode 100755 index 0000000000000..40098a8490b06 --- /dev/null +++ b/tests/functional/x86_64/test_bad_vmstate.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +'''Test whether the vmstate-static-checker script detects problems correctly''' + +import subprocess + +from qemu_test import QemuBaseTest + + +EXPECTED_OUTPUT='''Warning: checking incompatible machine types: "pc-i440fx-2.1", "pc-i440fx-2.2" +Section "fw_cfg" does not exist in dest +Section "fusbh200-ehci-usb" version error: 2 > 1 +Section "fusbh200-ehci-usb", Description "ehci-core": expected field "usbsts", got "usbsts_pending"; skipping rest +Section "pci-serial-4x" Description "pci-serial-multi": Entry "Fields" missing +Section "intel-hda-generic", Description "intel-hda", Field "pci": missing description +Section "cfi.pflash01": Entry "Description" missing +Section "megasas", Description "PCIDevice": expected field "irq_state", while dest has no further fields +Section "PIIX3-xen" Description "PIIX3": minimum version error: 1 < 2 +Section "PIIX3-xen" Description "PIIX3": Entry "Subsections" missing +Section "tpci200": Description "tpci200" missing, got "tpci2002" instead; skipping +Section "sun-fdtwo" Description "fdc": version error: 2 > 1 +Section "sun-fdtwo", Description "fdrive": Subsection "fdrive/media_rate" not found +Section "usb-kbd" Description "usb-kbd" Field "kbd.keycodes" size mismatch: 4 , 2 +''' + +class BadVmStateTest(QemuBaseTest): + '''Test class for testing vmstat-static-checker script with bad input''' + + def test_checker(self): + """ + Test whether the checker script correctly detects the changes + between dump1.json and dump2.json. + """ + src_json = self.data_file('..', 'data', 'vmstate-static-checker', + 'dump1.json') + dst_json = self.data_file('..', 'data', 'vmstate-static-checker', + 'dump2.json') + checkerscript = self.data_file('..', '..', 'scripts', + 'vmstate-static-checker.py') + + self.log.info('Comparing %s with %s', src_json, dst_json) + cp = subprocess.run([checkerscript, '-s', src_json, '-d', dst_json], + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, check=False) + if cp.returncode != 13: + self.fail('Unexpected return code of vmstate-static-checker: ' + + cp.returncode) + if cp.stdout != EXPECTED_OUTPUT: + self.log.info('vmstate-static-checker output:\n%s', cp.stdout) + self.log.info('expected output:\n%s', EXPECTED_OUTPUT) + self.fail('Unexpected vmstate-static-checker output!') + + +if __name__ == '__main__': + QemuBaseTest.main() diff --git a/tests/functional/test_x86_cpu_model_versions.py b/tests/functional/x86_64/test_cpu_model_versions.py similarity index 100% rename from tests/functional/test_x86_cpu_model_versions.py rename to tests/functional/x86_64/test_cpu_model_versions.py diff --git a/tests/functional/test_cpu_queries.py b/tests/functional/x86_64/test_cpu_queries.py similarity index 100% rename from tests/functional/test_cpu_queries.py rename to tests/functional/x86_64/test_cpu_queries.py diff --git a/tests/functional/test_x86_64_hotplug_blk.py b/tests/functional/x86_64/test_hotplug_blk.py similarity index 100% rename from tests/functional/test_x86_64_hotplug_blk.py rename to tests/functional/x86_64/test_hotplug_blk.py diff --git a/tests/functional/test_x86_64_hotplug_cpu.py b/tests/functional/x86_64/test_hotplug_cpu.py similarity index 100% rename from tests/functional/test_x86_64_hotplug_cpu.py rename to tests/functional/x86_64/test_hotplug_cpu.py diff --git a/tests/functional/test_intel_iommu.py b/tests/functional/x86_64/test_intel_iommu.py similarity index 100% rename from tests/functional/test_intel_iommu.py rename to tests/functional/x86_64/test_intel_iommu.py diff --git a/tests/functional/test_x86_64_kvm_xen.py b/tests/functional/x86_64/test_kvm_xen.py similarity index 100% rename from tests/functional/test_x86_64_kvm_xen.py rename to tests/functional/x86_64/test_kvm_xen.py diff --git a/tests/functional/test_linux_initrd.py b/tests/functional/x86_64/test_linux_initrd.py similarity index 100% rename from tests/functional/test_linux_initrd.py rename to tests/functional/x86_64/test_linux_initrd.py diff --git a/tests/functional/test_mem_addr_space.py b/tests/functional/x86_64/test_mem_addr_space.py similarity index 100% rename from tests/functional/test_mem_addr_space.py rename to tests/functional/x86_64/test_mem_addr_space.py diff --git a/tests/functional/test_memlock.py b/tests/functional/x86_64/test_memlock.py similarity index 96% rename from tests/functional/test_memlock.py rename to tests/functional/x86_64/test_memlock.py index 2b515ff979ff3..81bce80b0c4ec 100755 --- a/tests/functional/test_memlock.py +++ b/tests/functional/x86_64/test_memlock.py @@ -37,7 +37,8 @@ def test_memlock_off(self): status = self.get_process_status_values(self.vm.get_pid()) - self.assertTrue(status['VmLck'] == 0) + # libgcrypt may mlock a few pages + self.assertTrue(status['VmLck'] < 32) def test_memlock_on(self): self.common_vm_setup_with_memlock('on') diff --git a/tests/functional/x86_64/test_migration.py b/tests/functional/x86_64/test_migration.py new file mode 100755 index 0000000000000..f3a517ae1f620 --- /dev/null +++ b/tests/functional/x86_64/test_migration.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# x86_64 migration test + +from migration import MigrationTest + + +class X8664MigrationTest(MigrationTest): + + def test_migration_with_tcp_localhost(self): + self.set_machine('microvm') + self.migration_with_tcp_localhost() + + def test_migration_with_unix(self): + self.set_machine('microvm') + self.migration_with_unix() + + def test_migration_with_exec(self): + self.set_machine('microvm') + self.migration_with_exec() + + +if __name__ == '__main__': + MigrationTest.main() diff --git a/tests/functional/x86_64/test_multiprocess.py b/tests/functional/x86_64/test_multiprocess.py new file mode 100755 index 0000000000000..756629dd446bb --- /dev/null +++ b/tests/functional/x86_64/test_multiprocess.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Test for multiprocess qemu on x86 + +from multiprocess import Multiprocess +from qemu_test import Asset + + +class X86Multiprocess(Multiprocess): + + ASSET_KERNEL_X86 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux' + '/releases/31/Everything/x86_64/os/images/pxeboot/vmlinuz'), + 'd4738d03dbbe083ca610d0821d0a8f1488bebbdccef54ce33e3adb35fda00129') + + ASSET_INITRD_X86 = Asset( + ('https://archives.fedoraproject.org/pub/archive/fedora/linux' + '/releases/31/Everything/x86_64/os/images/pxeboot/initrd.img'), + '3b6cb5c91a14c42e2f61520f1689264d865e772a1f0069e660a800d31dd61fb9') + + def test_multiprocess(self): + kernel_command_line = (self.KERNEL_COMMON_COMMAND_LINE + + 'console=ttyS0 rdinit=/bin/bash') + self.do_test(self.ASSET_KERNEL_X86, self.ASSET_INITRD_X86, + kernel_command_line, 'pc') + + +if __name__ == '__main__': + Multiprocess.main() diff --git a/tests/functional/test_netdev_ethtool.py b/tests/functional/x86_64/test_netdev_ethtool.py similarity index 100% rename from tests/functional/test_netdev_ethtool.py rename to tests/functional/x86_64/test_netdev_ethtool.py diff --git a/tests/functional/test_pc_cpu_hotplug_props.py b/tests/functional/x86_64/test_pc_cpu_hotplug_props.py similarity index 100% rename from tests/functional/test_pc_cpu_hotplug_props.py rename to tests/functional/x86_64/test_pc_cpu_hotplug_props.py diff --git a/tests/functional/test_x86_64_replay.py b/tests/functional/x86_64/test_replay.py similarity index 100% rename from tests/functional/test_x86_64_replay.py rename to tests/functional/x86_64/test_replay.py diff --git a/tests/functional/test_x86_64_reverse_debug.py b/tests/functional/x86_64/test_reverse_debug.py similarity index 65% rename from tests/functional/test_x86_64_reverse_debug.py rename to tests/functional/x86_64/test_reverse_debug.py index d713e91e14460..2b31ae8724331 100755 --- a/tests/functional/test_x86_64_reverse_debug.py +++ b/tests/functional/x86_64/test_reverse_debug.py @@ -2,34 +2,29 @@ # # SPDX-License-Identifier: GPL-2.0-or-later # -# Reverse debugging test +# Reverse debugging test for x86_64 # # Copyright (c) 2020 ISP RAS +# Copyright (c) 2025 Linaro Limited # # Author: # Pavel Dovgalyuk +# Gustavo Romero (Run without Avocado) # # This work is licensed under the terms of the GNU GPL, version 2 or # later. See the COPYING file in the top-level directory. -from qemu_test import skipIfMissingImports, skipFlakyTest +from qemu_test import skipFlakyTest from reverse_debugging import ReverseDebugging -@skipIfMissingImports('avocado.utils') class ReverseDebugging_X86_64(ReverseDebugging): - REG_PC = 0x10 - REG_CS = 0x12 - def get_pc(self, g): - return self.get_reg_le(g, self.REG_PC) \ - + self.get_reg_le(g, self.REG_CS) * 0x10 - @skipFlakyTest("https://gitlab.com/qemu-project/qemu/-/issues/2922") def test_x86_64_pc(self): self.set_machine('pc') # start with BIOS only - self.reverse_debugging() + self.reverse_debugging(gdb_arch='x86-64') if __name__ == '__main__': diff --git a/tests/functional/test_x86_64_tuxrun.py b/tests/functional/x86_64/test_tuxrun.py similarity index 100% rename from tests/functional/test_x86_64_tuxrun.py rename to tests/functional/x86_64/test_tuxrun.py diff --git a/tests/functional/x86_64/test_vfio_user_client.py b/tests/functional/x86_64/test_vfio_user_client.py new file mode 100755 index 0000000000000..8bc16e5e31490 --- /dev/null +++ b/tests/functional/x86_64/test_vfio_user_client.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2025 Nutanix, Inc. +# +# Author: +# Mark Cave-Ayland +# John Levon +# +# SPDX-License-Identifier: GPL-2.0-or-later +""" +Check basic vfio-user-pci client functionality. The test starts two VMs: + + - the server VM runs the libvfio-user "gpio" example server inside it, + piping vfio-user traffic between a local UNIX socket and a virtio-serial + port. On the host, the virtio-serial port is backed by a local socket. + + - the client VM loads the gpio-pci-idio-16 kernel module, with the + vfio-user client connecting to the above local UNIX socket. + +This way, we don't depend on trying to run a vfio-user server on the host +itself. + +Once both VMs are running, we run some basic configuration on the gpio device +and verify that the server is logging the expected out. As this is consistent +given the same VM images, we just do a simple direct comparison. +""" + +import os + +from qemu_test import Asset +from qemu_test import QemuSystemTest +from qemu_test import exec_command_and_wait_for_pattern +from qemu_test import wait_for_console_pattern + +# Exact output can vary, so we just sample for some expected lines. +EXPECTED_SERVER_LINES = [ + "gpio: adding DMA region [0, 0xc0000) offset=0 flags=0x3", + "gpio: devinfo flags 0x3, num_regions 9, num_irqs 5", + "gpio: region_info[0] offset 0 flags 0 size 0 argsz 32", + "gpio: region_info[1] offset 0 flags 0 size 0 argsz 32", + "gpio: region_info[2] offset 0 flags 0x3 size 256 argsz 32", + "gpio: region_info[3] offset 0 flags 0 size 0 argsz 32", + "gpio: region_info[4] offset 0 flags 0 size 0 argsz 32", + "gpio: region_info[5] offset 0 flags 0 size 0 argsz 32", + "gpio: region_info[7] offset 0 flags 0x3 size 256 argsz 32", + "gpio: region7: read 256 bytes at 0", + "gpio: region7: read 0 from (0x30:4)", + "gpio: cleared EROM", + "gpio: I/O space enabled", + "gpio: memory space enabled", + "gpio: SERR# enabled", + "gpio: region7: wrote 0x103 to (0x4:2)", + "gpio: I/O space enabled", + "gpio: memory space enabled", +] + +class VfioUserClient(QemuSystemTest): + """vfio-user testing class.""" + + ASSET_REPO = 'https://github.com/mcayland-ntx/libvfio-user-test' + + ASSET_KERNEL = Asset( + f'{ASSET_REPO}/raw/refs/heads/main/images/bzImage', + '40292fa6ce95d516e26bccf5974e138d0db65a6de0bc540cabae060fe9dea605' + ) + + ASSET_ROOTFS = Asset( + f'{ASSET_REPO}/raw/refs/heads/main/images/rootfs.ext2', + 'e1e3abae8aebb8e6e77f08b1c531caeacf46250c94c815655c6bbea59fc3d1c1' + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.kernel_path = None + self.rootfs_path = None + + def configure_server_vm_args(self, server_vm, sock_path): + """ + Configuration for the server VM. Set up virtio-serial device backed by + the given socket path. + """ + server_vm.add_args('-kernel', self.kernel_path) + server_vm.add_args('-append', 'console=ttyS0 root=/dev/sda') + server_vm.add_args('-drive', + f"file={self.rootfs_path},if=ide,format=raw,id=drv0") + server_vm.add_args('-snapshot') + server_vm.add_args('-chardev', + f"socket,id=sock0,path={sock_path},telnet=off,server=on,wait=off") + server_vm.add_args('-device', 'virtio-serial') + server_vm.add_args('-device', + 'virtserialport,chardev=sock0,name=org.fedoraproject.port.0') + + def configure_client_vm_args(self, client_vm, sock_path): + """ + Configuration for the client VM. Point the vfio-user-pci device to the + socket path configured above. + """ + + client_vm.add_args('-kernel', self.kernel_path) + client_vm.add_args('-append', 'console=ttyS0 root=/dev/sda') + client_vm.add_args('-drive', + f'file={self.rootfs_path},if=ide,format=raw,id=drv0') + client_vm.add_args('-snapshot') + client_vm.add_args('-device', + '{"driver":"vfio-user-pci",' + + '"socket":{"path": "%s", "type": "unix"}}' % sock_path) + + def setup_vfio_user_pci_server(self, server_vm): + """ + Start the libvfio-user server within the server VM, and arrange + for data to shuttle between its socket and the virtio serial port. + """ + wait_for_console_pattern(self, 'login:', None, server_vm) + exec_command_and_wait_for_pattern(self, 'root', '#', None, server_vm) + + exec_command_and_wait_for_pattern(self, + 'gpio-pci-idio-16 -v /tmp/vfio-user.sock >/var/tmp/gpio.out 2>&1 &', + '#', None, server_vm) + + # wait for libvfio-user socket to appear + while True: + out = exec_command_and_wait_for_pattern(self, + 'ls --color=no /tmp/vfio-user.sock', '#', None, server_vm) + ls_out = out.decode().splitlines()[1].strip() + if ls_out == "/tmp/vfio-user.sock": + break + + exec_command_and_wait_for_pattern(self, + 'socat UNIX-CONNECT:/tmp/vfio-user.sock /dev/vport0p1,ignoreeof ' + + ' &', '#', None, server_vm) + + def test_vfio_user_pci(self): + """Run basic sanity test.""" + + self.set_machine('pc') + self.require_device('virtio-serial') + self.require_device('vfio-user-pci') + + self.kernel_path = self.ASSET_KERNEL.fetch() + self.rootfs_path = self.ASSET_ROOTFS.fetch() + + sock_dir = self.socket_dir() + socket_path = os.path.join(sock_dir.name, 'vfio-user.sock') + + server_vm = self.get_vm(name='server') + server_vm.set_console() + self.configure_server_vm_args(server_vm, socket_path) + + server_vm.launch() + + self.log.debug('starting libvfio-user server') + + self.setup_vfio_user_pci_server(server_vm) + + client_vm = self.get_vm(name="client") + client_vm.set_console() + self.configure_client_vm_args(client_vm, socket_path) + + try: + client_vm.launch() + except: + self.log.error('client VM failed to start, dumping server logs') + exec_command_and_wait_for_pattern(self, 'cat /var/tmp/gpio.out', + '#', None, server_vm) + raise + + self.log.debug('waiting for client VM boot') + + wait_for_console_pattern(self, 'login:', None, client_vm) + exec_command_and_wait_for_pattern(self, 'root', '#', None, client_vm) + + # + # Here, we'd like to actually interact with the gpio device a little + # more as described at: + # + # https://github.com/nutanix/libvfio-user/blob/master/docs/qemu.md + # + # Unfortunately, the buildroot Linux kernel has some undiagnosed issue + # so we don't get /sys/class/gpio. Nonetheless just the basic + # initialization and setup is enough for basic testing of vfio-user. + # + + self.log.debug('collecting libvfio-user server output') + + out = exec_command_and_wait_for_pattern(self, + 'cat /var/tmp/gpio.out', + 'gpio: region2: wrote 0 to (0x1:1)', + None, server_vm) + + gpio_server_out = [s for s in out.decode().splitlines() + if s.startswith("gpio:")] + + for line in EXPECTED_SERVER_LINES: + if line not in gpio_server_out: + self.log.error(f'Missing server debug line: {line}') + self.fail(False) + + +if __name__ == '__main__': + QemuSystemTest.main() diff --git a/tests/functional/test_virtio_balloon.py b/tests/functional/x86_64/test_virtio_balloon.py similarity index 100% rename from tests/functional/test_virtio_balloon.py rename to tests/functional/x86_64/test_virtio_balloon.py diff --git a/tests/functional/test_virtio_gpu.py b/tests/functional/x86_64/test_virtio_gpu.py similarity index 98% rename from tests/functional/test_virtio_gpu.py rename to tests/functional/x86_64/test_virtio_gpu.py index 81c9156d63876..be96de24da219 100755 --- a/tests/functional/test_virtio_gpu.py +++ b/tests/functional/x86_64/test_virtio_gpu.py @@ -108,6 +108,7 @@ def test_vhost_user_vga_virgl(self): shell=False, close_fds=False, ) + self._vug_log_file.close() self.vm.set_console() self.vm.add_args("-cpu", "host") @@ -135,6 +136,7 @@ def test_vhost_user_vga_virgl(self): "features: +virgl +edid") self.vm.shutdown() qemu_sock.close() + vug_sock.close() vugp.terminate() vugp.wait() diff --git a/tests/functional/test_virtio_version.py b/tests/functional/x86_64/test_virtio_version.py similarity index 100% rename from tests/functional/test_virtio_version.py rename to tests/functional/x86_64/test_virtio_version.py diff --git a/tests/functional/xtensa/meson.build b/tests/functional/xtensa/meson.build new file mode 100644 index 0000000000000..d61d82a1356f9 --- /dev/null +++ b/tests/functional/xtensa/meson.build @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +tests_xtensa_system_thorough = [ + 'lx60', + 'replay', +] diff --git a/tests/functional/test_xtensa_lx60.py b/tests/functional/xtensa/test_lx60.py similarity index 100% rename from tests/functional/test_xtensa_lx60.py rename to tests/functional/xtensa/test_lx60.py diff --git a/tests/functional/test_xtensa_replay.py b/tests/functional/xtensa/test_replay.py similarity index 100% rename from tests/functional/test_xtensa_replay.py rename to tests/functional/xtensa/test_replay.py diff --git a/tests/guest-debug/test_gdbstub.py b/tests/guest-debug/test_gdbstub.py index 4f08089e6a982..e017ccb55d790 100644 --- a/tests/guest-debug/test_gdbstub.py +++ b/tests/guest-debug/test_gdbstub.py @@ -1,7 +1,6 @@ """Helper functions for gdbstub testing """ -from __future__ import print_function import argparse import gdb import os diff --git a/tests/lcitool/libvirt-ci b/tests/lcitool/libvirt-ci index 18c4bfe02c467..9da20ff7c3bc9 160000 --- a/tests/lcitool/libvirt-ci +++ b/tests/lcitool/libvirt-ci @@ -1 +1 @@ -Subproject commit 18c4bfe02c467e5639bf9a687139735ccd7a3fff +Subproject commit 9da20ff7c3bc9067804a7561c2ff87583b434853 diff --git a/tests/lcitool/projects/qemu.yml b/tests/lcitool/projects/qemu.yml index c07242f272873..82812e773651b 100644 --- a/tests/lcitool/projects/qemu.yml +++ b/tests/lcitool/projects/qemu.yml @@ -44,6 +44,7 @@ packages: - libcacard - libcap-ng - libcbor + - libclang-rt - libcurl - libdrm - libepoxy @@ -91,7 +92,6 @@ packages: - pkg-config - pulseaudio - python3 - - python3-imp - python3-numpy - python3-opencv - python3-pillow @@ -104,6 +104,7 @@ packages: - python3-venv - rpm2cpio - rust + - rust-std - sdl2 - sdl2-image - sed diff --git a/tests/lcitool/refresh b/tests/lcitool/refresh index d3488b2679e9f..056cfb6e9d79c 100755 --- a/tests/lcitool/refresh +++ b/tests/lcitool/refresh @@ -63,7 +63,8 @@ add_user_mapping = [ " id ${USER} 2>/dev/null || useradd -u ${UID} -U ${USER}; fi\n" ] -def generate_dockerfile(host, target, project="qemu", cross=None, trailer=None): +def generate_dockerfile(host, target, project="qemu", cross=None, trailer=None, + enable_rust=True): filename = Path(src_dir, "tests", "docker", "dockerfiles", host + ".docker") cmd = lcitool_cmd + ["dockerfile"] if cross is not None: @@ -75,6 +76,8 @@ def generate_dockerfile(host, target, project="qemu", cross=None, trailer=None): else: trailer = "\n".join(add_user_mapping) + if enable_rust: + trailer += "\nENV ENABLE_RUST 1\n" generate(filename, cmd, trailer) @@ -97,10 +100,15 @@ def generate_yaml(os, target, arch, trailer=None): generate(filename, cmd, trailer) +alpine_extras = [ + "# https://gitlab.alpinelinux.org/alpine/aports/-/issues/17463\n", + "RUN apk add clang19-libclang\n", +] + # Netmap still needs to be manually built as it is yet to be packaged # into a distro. We also add cscope and gtags which are used in the CI # test -debian12_extras = [ +debian13_extras = [ "# netmap/cscope/global\n", "RUN DEBIAN_FRONTEND=noninteractive eatmydata \\\n", " apt install -y --no-install-recommends \\\n", @@ -167,48 +175,51 @@ try: # # Standard native builds # - generate_dockerfile("alpine", "alpine-321") + generate_dockerfile("alpine", "alpine-321", + trailer="".join(alpine_extras)) generate_dockerfile("centos9", "centos-stream-9") - generate_dockerfile("debian", "debian-12", - trailer="".join(debian12_extras)) - generate_dockerfile("fedora", "fedora-40") + generate_dockerfile("debian", "debian-13", + trailer="".join(debian13_extras)) + generate_dockerfile("fedora", "fedora-41") generate_dockerfile("opensuse-leap", "opensuse-leap-15") generate_dockerfile("ubuntu2204", "ubuntu-2204", - trailer="".join(ubuntu2204_rust_extras)) + trailer="".join(ubuntu2204_rust_extras), + # https://bugs.launchpad.net/ubuntu/+source/rustc-1.83/+bug/2120318 + enable_rust=False) # # Non-fatal Rust-enabled build # - generate_dockerfile("fedora-rust-nightly", "fedora-40", + generate_dockerfile("fedora-rust-nightly", "fedora-41", trailer="".join(fedora_rustup_nightly_extras)) # # Cross compiling builds # - generate_dockerfile("debian-amd64-cross", "debian-12", + generate_dockerfile("debian-amd64-cross", "debian-13", cross="x86_64", trailer=cross_build("x86_64-linux-gnu-", "x86_64-softmmu," "x86_64-linux-user," "i386-softmmu,i386-linux-user")) - generate_dockerfile("debian-arm64-cross", "debian-12", + generate_dockerfile("debian-arm64-cross", "debian-13", cross="aarch64", trailer=cross_build("aarch64-linux-gnu-", "aarch64-softmmu,aarch64-linux-user")) - generate_dockerfile("debian-armhf-cross", "debian-12", + generate_dockerfile("debian-armhf-cross", "debian-13", cross="armv7l", trailer=cross_build("arm-linux-gnueabihf-", "arm-softmmu,arm-linux-user")) - generate_dockerfile("debian-i686-cross", "debian-12", + generate_dockerfile("debian-i686-cross", "debian-13", cross="i686", trailer=cross_build("i686-linux-gnu-", - "x86_64-softmmu," - "x86_64-linux-user," "i386-softmmu,i386-linux-user")) + # mips no longer supported in debian-13 + # https://www.debian.org/releases/trixie/release-notes/issues.html#mips-architectures-removed generate_dockerfile("debian-mips64el-cross", "debian-12", cross="mips64el", trailer=cross_build("mips64el-linux-gnuabi64-", @@ -219,7 +230,7 @@ try: trailer=cross_build("mipsel-linux-gnu-", "mipsel-softmmu,mipsel-linux-user")) - generate_dockerfile("debian-ppc64el-cross", "debian-12", + generate_dockerfile("debian-ppc64el-cross", "debian-13", cross="ppc64le", trailer=cross_build("powerpc64le-linux-gnu-", "ppc64-softmmu,ppc64-linux-user")) @@ -227,21 +238,23 @@ try: # while not yet a release architecture the packages are still # build while part of testing generate_dockerfile("debian-riscv64-cross", "debian-13", - project="qemu-minimal", cross="riscv64", trailer=cross_build("riscv64-linux-gnu-", "riscv64-softmmu,riscv64-linux-user")) - generate_dockerfile("debian-s390x-cross", "debian-12", + generate_dockerfile("debian-s390x-cross", "debian-13", cross="s390x", trailer=cross_build("s390x-linux-gnu-", "s390x-softmmu,s390x-linux-user")) - generate_dockerfile("fedora-win64-cross", "fedora-40", + generate_dockerfile("fedora-win64-cross", "fedora-41", project='qemu,qemu-win-installer', cross="mingw64", trailer=cross_build("x86_64-w64-mingw32-", - "x86_64-softmmu")) + "x86_64-softmmu"), + # linking with rust is buggy: + # https://github.com/mesonbuild/meson/pull/14991 + enable_rust=False) # # Cirrus packages lists for GitLab @@ -257,8 +270,8 @@ try: # # Ansible package lists # - generate_yaml("ubuntu", "ubuntu-2204", "aarch64") - generate_yaml("ubuntu", "ubuntu-2204", "s390x") + generate_yaml("ubuntu", "ubuntu-2404", "aarch64") + generate_yaml("ubuntu", "ubuntu-2404", "s390x") sys.exit(0) diff --git a/tests/meson.build b/tests/meson.build index c59619220f71f..cbe79162411b0 100644 --- a/tests/meson.build +++ b/tests/meson.build @@ -88,3 +88,4 @@ subdir('qapi-schema') subdir('qtest') subdir('migration-stress') subdir('functional') +subdir('tracetool') diff --git a/tests/qapi-schema/doc-non-first-section.err b/tests/qapi-schema/doc-non-first-section.err deleted file mode 100644 index eeced2bca71e1..0000000000000 --- a/tests/qapi-schema/doc-non-first-section.err +++ /dev/null @@ -1 +0,0 @@ -doc-non-first-section.json:5:1: '=' heading must come first in a comment block diff --git a/tests/qapi-schema/doc-non-first-section.json b/tests/qapi-schema/doc-non-first-section.json deleted file mode 100644 index 1590876061d57..0000000000000 --- a/tests/qapi-schema/doc-non-first-section.json +++ /dev/null @@ -1,6 +0,0 @@ -# = section must be first line - -## -# -# = Not first -## diff --git a/tests/qapi-schema/doc-non-first-section.out b/tests/qapi-schema/doc-non-first-section.out deleted file mode 100644 index e69de29bb2d1d..0000000000000 diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py index 4be930228cc31..cf7fb8a6df5cc 100755 --- a/tests/qapi-schema/test-qapi.py +++ b/tests/qapi-schema/test-qapi.py @@ -165,7 +165,7 @@ def test_and_diff(test_name, dir_name, update): if actual_out == expected_out and actual_err == expected_err: return 0 - print("%s %s" % (test_name, 'UPDATE' if update else 'FAIL'), + print("%s: %s" % (test_name, 'UPDATE' if update else 'FAIL'), file=sys.stderr) out_diff = difflib.unified_diff(expected_out, actual_out, outfp.name) err_diff = difflib.unified_diff(expected_err, actual_err, errfp.name) @@ -173,6 +173,9 @@ def test_and_diff(test_name, dir_name, update): sys.stdout.writelines(err_diff) if not update: + print(("\n%s: set QEMU_TEST_REGENERATE=1 to recreate reference output" + + "if the QAPI schema generator was intentionally changed") % test_name, + file=sys.stderr) return 1 try: @@ -197,7 +200,7 @@ def main(argv): parser.add_argument('-d', '--dir', action='store', default='', help="directory containing tests") parser.add_argument('-u', '--update', action='store_true', - default='QAPI_TEST_UPDATE' in os.environ, + default='QEMU_TEST_REGENERATE' in os.environ, help="update expected test results") parser.add_argument('tests', nargs='*', metavar='TEST', action='store') args = parser.parse_args() diff --git a/tests/qemu-iotests/039.out b/tests/qemu-iotests/039.out index e52484d4be1bd..8fdbcc528aaa1 100644 --- a/tests/qemu-iotests/039.out +++ b/tests/qemu-iotests/039.out @@ -11,7 +11,7 @@ No errors were found on the image. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [0] ERROR cluster 5 refcount=0 reference=1 ERROR OFLAG_COPIED data cluster: l2_entry=8000000000050000 refcount=0 @@ -46,7 +46,7 @@ read 512/512 bytes at offset 0 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [0] ERROR cluster 5 refcount=0 reference=1 Rebuilding refcount structure @@ -60,7 +60,7 @@ incompatible_features [] Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [] No errors were found on the image. @@ -79,7 +79,7 @@ No errors were found on the image. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [0] ERROR cluster 5 refcount=0 reference=1 ERROR OFLAG_COPIED data cluster: l2_entry=8000000000050000 refcount=0 @@ -89,7 +89,7 @@ Data may be corrupted, or further writes to the image may corrupt it. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728 wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) incompatible_features [] No errors were found on the image. *** done diff --git a/tests/qemu-iotests/061.out b/tests/qemu-iotests/061.out index 24c33add7ce60..951c6bf3e62c2 100644 --- a/tests/qemu-iotests/061.out +++ b/tests/qemu-iotests/061.out @@ -118,7 +118,7 @@ No errors were found on the image. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 wrote 131072/131072 bytes at offset 0 128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) magic 0x514649fb version 3 backing_file_offset 0x0 @@ -304,7 +304,7 @@ No errors were found on the image. Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 wrote 131072/131072 bytes at offset 0 128 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) magic 0x514649fb version 3 backing_file_offset 0x0 diff --git a/tests/qemu-iotests/137.out b/tests/qemu-iotests/137.out index 86377c80cde6c..e19df5b6ba82a 100644 --- a/tests/qemu-iotests/137.out +++ b/tests/qemu-iotests/137.out @@ -35,7 +35,7 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 qemu-io: Unsupported value 'blubb' for qcow2 option 'overlap-check'. Allowed are any of the following: none, constant, cached, all wrote 512/512 bytes at offset 0 512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) -./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) +./common.rc: Killed ( VALGRIND_QEMU="${VALGRIND_QEMU_IO}" _qemu_proc_exec "${VALGRIND_LOGFILE}" "$QEMU_IO_PROG" $QEMU_IO_ARGS "$@" ) OK: Dirty bit not set Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 qemu-io: Parameter 'lazy-refcounts' expects 'on' or 'off' diff --git a/tests/qemu-iotests/147 b/tests/qemu-iotests/147 index 6d6f077a14d42..3e14bd389a4ed 100755 --- a/tests/qemu-iotests/147 +++ b/tests/qemu-iotests/147 @@ -277,6 +277,7 @@ class BuiltinNBD(NBDBlockdevAddBase): } } self.client_test(filename, flatten_sock_addr(address), 'nbd-export') + sockfd.close() self._server_down() diff --git a/tests/qemu-iotests/151 b/tests/qemu-iotests/151 index f2ff9c5dac221..06ee3585db904 100755 --- a/tests/qemu-iotests/151 +++ b/tests/qemu-iotests/151 @@ -263,6 +263,11 @@ class TestThrottledWithNbdExportBase(iotests.QMPTestCase): break except subprocess.TimeoutExpired: self.vm.qtest(f'clock_step {1 * 1000 * 1000 * 1000}') + try: + p.kill() + p.stdout.close() + except: + pass except IndexError: pass diff --git a/tests/qemu-iotests/check b/tests/qemu-iotests/check index 545f9ec7bdd8c..d9b7c1d598924 100755 --- a/tests/qemu-iotests/check +++ b/tests/qemu-iotests/check @@ -21,6 +21,7 @@ import sys import argparse import shutil from pathlib import Path +import warnings from findtests import TestFinder from testenv import TestEnv @@ -137,6 +138,9 @@ def make_argparser() -> argparse.ArgumentParser: if __name__ == '__main__': + warnings.simplefilter("default") + os.environ["PYTHONWARNINGS"] = "default" + args = make_argparser().parse_args() env = TestEnv(source_dir=args.source_dir, diff --git a/tests/qemu-iotests/common.filter b/tests/qemu-iotests/common.filter index 67f819d866a86..511a55b1e88e2 100644 --- a/tests/qemu-iotests/common.filter +++ b/tests/qemu-iotests/common.filter @@ -74,7 +74,7 @@ _filter_qemu_io() { _filter_win32 | \ gsed -e "s/[0-9]* ops\; [0-9/:. sec]* ([0-9/.inf]* [EPTGMKiBbytes]*\/sec and [0-9/.inf]* ops\/sec)/X ops\; XX:XX:XX.X (XXX YYY\/sec and XXX ops\/sec)/" \ - -e "s/: line [0-9][0-9]*: *[0-9][0-9]*\( Aborted\| Killed\)/:\1/" \ + -e "s/: line [0-9][0-9]*: *[0-9][0-9]*\( Aborted\| Killed\) \{2,\}/:\1 /" \ -e "s/qemu-io> //g" } diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py index 6326e46b7b100..29caaa8a349c3 100644 --- a/tests/qemu-iotests/testenv.py +++ b/tests/qemu-iotests/testenv.py @@ -22,15 +22,12 @@ from pathlib import Path import shutil import collections +import contextlib import random import subprocess import glob from typing import List, Dict, Any, Optional -if sys.version_info >= (3, 9): - from contextlib import AbstractContextManager as ContextManager -else: - from typing import ContextManager DEF_GDB_OPTIONS = 'localhost:12345' @@ -58,7 +55,7 @@ def get_default_machine(qemu_prog: str) -> str: return default_machine -class TestEnv(ContextManager['TestEnv']): +class TestEnv(contextlib.AbstractContextManager['TestEnv']): """ Manage system environment for running tests diff --git a/tests/qemu-iotests/testrunner.py b/tests/qemu-iotests/testrunner.py index 2e236c8fa3906..14cc8492f9fb0 100644 --- a/tests/qemu-iotests/testrunner.py +++ b/tests/qemu-iotests/testrunner.py @@ -30,11 +30,6 @@ from typing import List, Optional, Any, Sequence, Dict from testenv import TestEnv -if sys.version_info >= (3, 9): - from contextlib import AbstractContextManager as ContextManager -else: - from typing import ContextManager - def silent_unlink(path: Path) -> None: try: @@ -57,7 +52,7 @@ def file_diff(file1: str, file2: str) -> List[str]: return res -class LastElapsedTime(ContextManager['LastElapsedTime']): +class LastElapsedTime(contextlib.AbstractContextManager['LastElapsedTime']): """ Cache for elapsed time for tests, to show it during new test run It is safe to use get() at any time. To use update(), you must either @@ -112,7 +107,7 @@ def __init__(self, status: str, description: str = '', self.interrupted = interrupted -class TestRunner(ContextManager['TestRunner']): +class TestRunner(contextlib.AbstractContextManager['TestRunner']): shared_self = None @staticmethod diff --git a/tests/qemu-iotests/tests/mirror-sparse b/tests/qemu-iotests/tests/mirror-sparse index cfcaa600ab4fb..ee7101bd50ec8 100755 --- a/tests/qemu-iotests/tests/mirror-sparse +++ b/tests/qemu-iotests/tests/mirror-sparse @@ -40,6 +40,7 @@ cd .. _supported_fmt qcow2 raw # Format of the source. dst is always raw file _supported_proto file _supported_os Linux +_require_o_direct _require_disk_usage echo diff --git a/tests/qtest/aspeed_gpio-test.c b/tests/qtest/aspeed_gpio-test.c index 12675d4cbba93..c2f9ca2298a7f 100644 --- a/tests/qtest/aspeed_gpio-test.c +++ b/tests/qtest/aspeed_gpio-test.c @@ -27,28 +27,115 @@ #include "qemu/timer.h" #include "qobject/qdict.h" #include "libqtest-single.h" +#include "qemu/typedefs.h" #define AST2600_GPIO_BASE 0x1E780000 #define GPIO_ABCD_DATA_VALUE 0x000 #define GPIO_ABCD_DIRECTION 0x004 +static uint32_t qtest_qom_get_uint32(QTestState *s, const char *path, + const char *property) +{ + QDict *r; + + uint32_t res; + r = qtest_qmp(s, "{ 'execute': 'qom-get', 'arguments': " + "{ 'path': %s, 'property': %s } }", path, property); + res = qdict_get_uint(r, "return"); + qobject_unref(r); + + return res; +} + +static void qtest_qom_set_uint32(QTestState *s, const char *path, + const char *property, uint32_t value) +{ + QDict *r; + + r = qtest_qmp(s, "{ 'execute': 'qom-set', 'arguments': " + "{ 'path': %s, 'property': %s, 'value': %" PRIu32 " } }", + path, property, value); + qobject_unref(r); +} + +static const char *resp_get_error(QDict *r, const char* error_key) +{ + QDict *qdict; + + g_assert(r); + + qdict = qdict_get_qdict(r, "error"); + if (qdict) { + return qdict_get_str(qdict, error_key); + } + + return NULL; +} + +static bool qtest_qom_check_error(QTestState *s, const char *path, + const char *property, const char *error_msg, + const char *error_msg_key) +{ + QDict *r; + bool b; + + r = qtest_qmp(s, "{ 'execute': 'qom-get', 'arguments': " + "{ 'path': %s, 'property': %s } }", path, property); + b = g_str_equal(resp_get_error(r, error_msg_key), error_msg); + qobject_unref(r); + + return b; +} + static void test_set_colocated_pins(const void *data) { QTestState *s = (QTestState *)data; - + const char path[] = "/machine/soc/gpio"; /* * gpioV4-7 occupy bits within a single 32-bit value, so we want to make * sure that modifying one doesn't affect the other. */ - qtest_qom_set_bool(s, "/machine/soc/gpio", "gpioV4", true); - qtest_qom_set_bool(s, "/machine/soc/gpio", "gpioV5", false); - qtest_qom_set_bool(s, "/machine/soc/gpio", "gpioV6", true); - qtest_qom_set_bool(s, "/machine/soc/gpio", "gpioV7", false); - g_assert(qtest_qom_get_bool(s, "/machine/soc/gpio", "gpioV4")); - g_assert(!qtest_qom_get_bool(s, "/machine/soc/gpio", "gpioV5")); - g_assert(qtest_qom_get_bool(s, "/machine/soc/gpio", "gpioV6")); - g_assert(!qtest_qom_get_bool(s, "/machine/soc/gpio", "gpioV7")); + qtest_qom_set_bool(s, path, "gpioV4", true); + qtest_qom_set_bool(s, path, "gpioV5", false); + qtest_qom_set_bool(s, path, "gpioV6", true); + qtest_qom_set_bool(s, path, "gpioV7", false); + g_assert(qtest_qom_get_bool(s, path, "gpioV4")); + g_assert(!qtest_qom_get_bool(s, path, "gpioV5")); + g_assert(qtest_qom_get_bool(s, path, "gpioV6")); + g_assert(!qtest_qom_get_bool(s, path, "gpioV7")); + + /* + * Testing the gpio-set[%d] properties, using individual gpio boolean + * properties to do cross check. + * We use gpioR4-7 for test, Setting them to be 0b1010. + */ + qtest_qom_set_uint32(s, path, "gpio-set[4]", 0x0); + g_assert(qtest_qom_get_uint32(s, path, "gpio-set[4]") == 0x0); + qtest_qom_set_uint32(s, path, "gpio-set[4]", 0xa000); + g_assert(qtest_qom_get_uint32(s, path, "gpio-set[4]") == 0xa000); + + g_assert(!qtest_qom_get_bool(s, path, "gpioR4")); + g_assert(qtest_qom_get_bool(s, path, "gpioR5")); + g_assert(!qtest_qom_get_bool(s, path, "gpioR6")); + g_assert(qtest_qom_get_bool(s, path, "gpioR7")); + + /* + * Testing the invalid indexing, the response info should contain following + * info: + * {key: "class", value: "GenericError"} + * + * For pins, it should follow "gpio%2[A-Z]%1d" or "gpio%3[18A-E]%1d" format. + */ + const char error_msg[] = "GenericError"; + const char error_msg_key[] = "class"; + + g_assert(qtest_qom_check_error(s, path, "gpioR+1", error_msg, + error_msg_key)); + g_assert(qtest_qom_check_error(s, path, "gpio-set[99]", error_msg, + error_msg_key)); + g_assert(qtest_qom_check_error(s, path, "gpio-set[-3]", error_msg, + error_msg_key)); } static void test_set_input_pins(const void *data) diff --git a/tests/qtest/aspeed_smc-test.c b/tests/qtest/aspeed_smc-test.c index 52a00e6f0a7e1..50a87e6250016 100644 --- a/tests/qtest/aspeed_smc-test.c +++ b/tests/qtest/aspeed_smc-test.c @@ -134,10 +134,10 @@ static void test_ast2600_evb(AspeedSMCTestData *data) "-drive file=%s,format=raw,if=mtd", data->tmp_path); - /* fmc cs0 with mx66u51235f flash */ + /* fmc cs0 with w25q512jv flash */ data->flash_base = 0x20000000; data->spi_base = 0x1E620000; - data->jedec_id = 0xc2253a; + data->jedec_id = 0xef4020; data->cs = 0; data->node = "/machine/soc/fmc/ssi.0/child[0]"; /* beyond 16MB */ diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c index 6aec68deccfa3..6b892ef23e1a7 100644 --- a/tests/qtest/bios-tables-test.c +++ b/tests/qtest/bios-tables-test.c @@ -2208,7 +2208,7 @@ static void test_acpi_aarch64_virt_tcg(void) data.smbios_cpu_max_speed = 2900; data.smbios_cpu_curr_speed = 2700; - test_acpi_one("-cpu cortex-a57 " + test_acpi_one("-cpu cortex-a57 -machine ras=on " "-smbios type=4,max-speed=2900,current-speed=2700", &data); free_test_data(&data); } @@ -2337,6 +2337,86 @@ static void test_acpi_aarch64_virt_viot(void) free_test_data(&data); } +static void test_acpi_aarch64_virt_smmuv3_legacy(void) +{ + test_data data = { + .machine = "virt", + .arch = "aarch64", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .ram_start = 0x40000000ULL, + .scan_len = 128ULL * MiB, + }; + + /* + * cdrom is plugged into scsi controller to avoid conflict + * with pxb-pcie. See comments in test_acpi_aarch64_virt_tcg_pxb() for + * details. + * + * The setup includes three PCIe root complexes, one of which has + * bypass_iommu enabled. The generated IORT table contains a single + * SMMUv3 node and a Root Complex node with three ID mappings. Two + * of the ID mappings have output references pointing to the SMMUv3 + * node and the remaining one points to ITS. + */ + data.variant = ".smmuv3-legacy"; + test_acpi_one(" -device pcie-root-port,chassis=1,id=pci.1" + " -device virtio-scsi-pci,id=scsi0,bus=pci.1" + " -drive file=" + "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2," + "if=none,media=cdrom,id=drive-scsi0-0-0-1,readonly=on" + " -device scsi-cd,bus=scsi0.0,scsi-id=0," + "drive=drive-scsi0-0-0-1,id=scsi0-0-0-1,bootindex=1" + " -cpu cortex-a57" + " -M iommu=smmuv3" + " -device pxb-pcie,id=pcie.1,bus=pcie.0,bus_nr=0x10" + " -device pxb-pcie,id=pcie.2,bus=pcie.0,bus_nr=0x20,bypass_iommu=on", + &data); + free_test_data(&data); +} + +static void test_acpi_aarch64_virt_smmuv3_dev(void) +{ + test_data data = { + .machine = "virt", + .arch = "aarch64", + .tcg_only = true, + .uefi_fl1 = "pc-bios/edk2-aarch64-code.fd", + .uefi_fl2 = "pc-bios/edk2-arm-vars.fd", + .ram_start = 0x40000000ULL, + .scan_len = 128ULL * MiB, + }; + + /* + * cdrom is plugged into scsi controller to avoid conflict + * with pxb-pcie. See comments in test_acpi_aarch64_virt_tcg_pxb() + * for details. + * + * The setup includes three PCie root complexes, two of which are + * connected to separate SMMUv3 devices. The resulting IORT table + * contains two SMMUv3 nodes and a Root Complex node with ID mappings + * of which two of the ID mappings have output references pointing + * to two different SMMUv3 nodes and the remaining ones pointing to + * ITS. + */ + data.variant = ".smmuv3-dev"; + test_acpi_one(" -device pcie-root-port,chassis=1,id=pci.1" + " -device virtio-scsi-pci,id=scsi0,bus=pci.1" + " -drive file=" + "tests/data/uefi-boot-images/bios-tables-test.aarch64.iso.qcow2," + "if=none,media=cdrom,id=drive-scsi0-0-0-1,readonly=on" + " -device scsi-cd,bus=scsi0.0,scsi-id=0," + "drive=drive-scsi0-0-0-1,id=scsi0-0-0-1,bootindex=1" + " -cpu cortex-a57" + " -device arm-smmuv3,primary-bus=pcie.0,id=smmuv3.0" + " -device pxb-pcie,id=pcie.1,bus=pcie.0,bus_nr=0x10" + " -device arm-smmuv3,primary-bus=pcie.1,id=smmuv3.1" + " -device pxb-pcie,id=pcie.2,bus=pcie.0,bus_nr=0x20", + &data); + free_test_data(&data); +} + #ifndef _WIN32 # define DEV_NULL "/dev/null" #else @@ -2768,6 +2848,12 @@ int main(int argc, char *argv[]) if (qtest_has_device("virtio-iommu-pci")) { qtest_add_func("acpi/virt/viot", test_acpi_aarch64_virt_viot); } + qtest_add_func("acpi/virt/smmuv3-legacy", + test_acpi_aarch64_virt_smmuv3_legacy); + if (qtest_has_device("arm-smmuv3")) { + qtest_add_func("acpi/virt/smmuv3-dev", + test_acpi_aarch64_virt_smmuv3_dev); + } } } else if (strcmp(arch, "riscv64") == 0) { if (has_tcg && qtest_has_device("virtio-blk-pci")) { @@ -2778,7 +2864,7 @@ int main(int argc, char *argv[]) test_acpi_riscv64_virt_tcg_acpi_spcr); } } else if (strcmp(arch, "loongarch64") == 0) { - if (has_tcg) { + if (has_tcg && qtest_has_machine("virt")) { qtest_add_func("acpi/virt", test_acpi_loongarch64_virt); qtest_add_func("acpi/virt/topology", test_acpi_loongarch64_virt_topology); diff --git a/tests/qtest/cpu-plug-test.c b/tests/qtest/cpu-plug-test.c index 44d704680b146..0aa4ccc5b6191 100644 --- a/tests/qtest/cpu-plug-test.c +++ b/tests/qtest/cpu-plug-test.c @@ -190,7 +190,7 @@ int main(int argc, char **argv) qtest_cb_for_every_machine(add_pseries_test_case, g_test_quick()); } else if (g_str_equal(arch, "s390x")) { qtest_cb_for_every_machine(add_s390x_test_case, g_test_quick()); - } else if (g_str_equal(arch, "loongarch64")) { + } else if (g_str_equal(arch, "loongarch64") && qtest_has_machine("virt")) { add_loongarch_test_case("virt"); } diff --git a/tests/qtest/ds1338-test.c b/tests/qtest/ds1338-test.c index d12424d27f676..b8d0e65ec44c6 100644 --- a/tests/qtest/ds1338-test.c +++ b/tests/qtest/ds1338-test.c @@ -18,16 +18,12 @@ */ #include "qemu/osdep.h" +#include "qemu/bcd.h" #include "libqtest.h" #include "libqos/i2c.h" #define DS1338_ADDR 0x68 -static inline uint8_t bcd2bin(uint8_t x) -{ - return ((x) & 0x0f) + ((x) >> 4) * 10; -} - static void send_and_receive(void *obj, void *data, QGuestAllocator *alloc) { QI2CDevice *i2cdev = (QI2CDevice *)obj; @@ -39,9 +35,9 @@ static void send_and_receive(void *obj, void *data, QGuestAllocator *alloc) i2c_read_block(i2cdev, 0, resp, sizeof(resp)); /* check retrieved time against local time */ - g_assert_cmpuint(bcd2bin(resp[4]), == , tm_ptr->tm_mday); - g_assert_cmpuint(bcd2bin(resp[5]), == , 1 + tm_ptr->tm_mon); - g_assert_cmpuint(2000 + bcd2bin(resp[6]), == , 1900 + tm_ptr->tm_year); + g_assert_cmpuint(from_bcd(resp[4]), == , tm_ptr->tm_mday); + g_assert_cmpuint(from_bcd(resp[5]), == , 1 + tm_ptr->tm_mon); + g_assert_cmpuint(2000 + from_bcd(resp[6]), == , 1900 + tm_ptr->tm_year); } static void ds1338_register_nodes(void) diff --git a/tests/qtest/fuzz/virtio_net_fuzz.c b/tests/qtest/fuzz/virtio_net_fuzz.c index e239875e3b429..e9b13d3e4fabf 100644 --- a/tests/qtest/fuzz/virtio_net_fuzz.c +++ b/tests/qtest/fuzz/virtio_net_fuzz.c @@ -132,7 +132,7 @@ static void *virtio_net_test_setup_socket(GString *cmd_line, void *arg) { int ret = socketpair(PF_UNIX, SOCK_STREAM, 0, sockfds); g_assert_cmpint(ret, !=, -1); - g_unix_set_fd_nonblocking(sockfds[0], true, NULL); + qemu_set_blocking(sockfds[0], false, &error_abort); sockfds_initialized = true; g_string_append_printf(cmd_line, " -netdev socket,fd=%d,id=hs0 ", sockfds[1]); diff --git a/tests/qtest/libqos/virtio.c b/tests/qtest/libqos/virtio.c index 5a709d0bc59f8..010ff40834021 100644 --- a/tests/qtest/libqos/virtio.c +++ b/tests/qtest/libqos/virtio.c @@ -265,8 +265,9 @@ void qvring_init(QTestState *qts, const QGuestAllocator *alloc, QVirtQueue *vq, /* vq->avail->flags */ qvirtio_writew(vq->vdev, qts, vq->avail, 0); - /* vq->avail->idx */ - qvirtio_writew(vq->vdev, qts, vq->avail + 2, 0); + + qvirtqueue_set_avail_idx(qts, vq->vdev, vq, 0); + /* vq->avail->used_event */ qvirtio_writew(vq->vdev, qts, vq->avail + 4 + (2 * vq->size), 0); @@ -388,6 +389,13 @@ uint32_t qvirtqueue_add_indirect(QTestState *qts, QVirtQueue *vq, return vq->free_head++; /* Return and increase, in this order */ } +void qvirtqueue_set_avail_idx(QTestState *qts, QVirtioDevice *d, + QVirtQueue *vq, uint16_t idx) +{ + /* vq->avail->idx */ + qvirtio_writew(d, qts, vq->avail + 2, idx); +} + void qvirtqueue_kick(QTestState *qts, QVirtioDevice *d, QVirtQueue *vq, uint32_t free_head) { @@ -400,8 +408,8 @@ void qvirtqueue_kick(QTestState *qts, QVirtioDevice *d, QVirtQueue *vq, /* vq->avail->ring[idx % vq->size] */ qvirtio_writew(d, qts, vq->avail + 4 + (2 * (idx % vq->size)), free_head); - /* vq->avail->idx */ - qvirtio_writew(d, qts, vq->avail + 2, idx + 1); + + qvirtqueue_set_avail_idx(qts, d, vq, idx + 1); /* Must read after idx is updated */ flags = qvirtio_readw(d, qts, vq->used); diff --git a/tests/qtest/libqos/virtio.h b/tests/qtest/libqos/virtio.h index 7adc7cbd10502..e238f1726f15e 100644 --- a/tests/qtest/libqos/virtio.h +++ b/tests/qtest/libqos/virtio.h @@ -143,6 +143,8 @@ uint32_t qvirtqueue_add(QTestState *qts, QVirtQueue *vq, uint64_t data, uint32_t len, bool write, bool next); uint32_t qvirtqueue_add_indirect(QTestState *qts, QVirtQueue *vq, QVRingIndirectDesc *indirect); +void qvirtqueue_set_avail_idx(QTestState *qts, QVirtioDevice *d, + QVirtQueue *vq, uint16_t idx); void qvirtqueue_kick(QTestState *qts, QVirtioDevice *d, QVirtQueue *vq, uint32_t free_head); bool qvirtqueue_get_buf(QTestState *qts, QVirtQueue *vq, uint32_t *desc_idx, diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c index 94526b7f9cc5f..933d0858697c2 100644 --- a/tests/qtest/libqtest.c +++ b/tests/qtest/libqtest.c @@ -357,7 +357,7 @@ void qtest_remove_abrt_handler(void *data) } } -static const char *qtest_qemu_binary(const char *var) +const char *qtest_qemu_binary(const char *var) { const char *qemu_bin; @@ -409,30 +409,30 @@ static pid_t qtest_create_process(char *cmd) } #endif /* _WIN32 */ -static QTestState *G_GNUC_PRINTF(2, 3) qtest_spawn_qemu(const char *qemu_bin, - const char *fmt, ...) +static QTestState *qtest_create_test_state(int pid) { - va_list ap; QTestState *s = g_new0(QTestState, 1); - const char *trace = g_getenv("QTEST_TRACE"); - g_autofree char *tracearg = trace ? - g_strdup_printf("-trace %s ", trace) : g_strdup(""); - g_autoptr(GString) command = g_string_new(""); - - va_start(ap, fmt); - g_string_append_printf(command, CMD_EXEC "%s %s", qemu_bin, tracearg); - g_string_append_vprintf(command, fmt, ap); - va_end(ap); + s->qemu_pid = pid; qtest_add_abrt_handler(kill_qemu_hook_func, s); + return s; +} + +static QTestState *qtest_spawn_qemu(const char *qemu_bin, const char *args, + void *opaque) +{ + int pid; + g_autoptr(GString) command = g_string_new(""); + + g_string_printf(command, CMD_EXEC "%s %s", qemu_bin, args); if (!silence_spawn_log) { g_test_message("starting QEMU: %s", command->str); } #ifndef _WIN32 - s->qemu_pid = fork(); - if (s->qemu_pid == 0) { + pid = fork(); + if (pid == 0) { #ifdef __linux__ /* * Although we register a ABRT handler to kill off QEMU @@ -455,10 +455,10 @@ static QTestState *G_GNUC_PRINTF(2, 3) qtest_spawn_qemu(const char *qemu_bin, exit(1); } #else - s->qemu_pid = qtest_create_process(command->str); + pid = qtest_create_process(command->str); #endif /* _WIN32 */ - return s; + return qtest_create_test_state(pid); } static char *qtest_socket_path(const char *suffix) @@ -466,14 +466,48 @@ static char *qtest_socket_path(const char *suffix) return g_strdup_printf("%s/qtest-%d.%s", g_get_tmp_dir(), getpid(), suffix); } +gchar *qtest_qemu_args(const char *extra_args) +{ + g_autofree gchar *socket_path = qtest_socket_path("sock"); + g_autofree gchar *qmp_socket_path = qtest_socket_path("qmp"); + const char *trace = g_getenv("QTEST_TRACE"); + g_autofree char *tracearg = trace ? g_strdup_printf("-trace %s ", trace) : + g_strdup(""); + gchar *args = g_strdup_printf( + "%s" + "-qtest unix:%s " + "-qtest-log %s " + "-chardev socket,path=%s,id=char0 " + "-mon chardev=char0,mode=control " + "-display none " + "-audio none " + "%s" + " -accel qtest", + + tracearg, + socket_path, + getenv("QTEST_LOG") ? DEV_STDERR : DEV_NULL, + qmp_socket_path, + extra_args ?: ""); + + return args; +} + +typedef QTestState *(*qtest_qemu_spawn_func)(const char *qemu_bin, + const char *extra_args, + void *opaque); + static QTestState *qtest_init_internal(const char *qemu_bin, const char *extra_args, - bool do_connect) + bool do_connect, + qtest_qemu_spawn_func spawn, + void *opaque) { QTestState *s; int sock, qmpsock, i; g_autofree gchar *socket_path = qtest_socket_path("sock"); g_autofree gchar *qmp_socket_path = qtest_socket_path("qmp"); + g_autofree gchar *args = qtest_qemu_args(extra_args); /* * It's possible that if an earlier test run crashed it might @@ -488,19 +522,7 @@ static QTestState *qtest_init_internal(const char *qemu_bin, sock = init_socket(socket_path); qmpsock = init_socket(qmp_socket_path); - s = qtest_spawn_qemu(qemu_bin, - "-qtest unix:%s " - "-qtest-log %s " - "-chardev socket,path=%s,id=char0 " - "-mon chardev=char0,mode=control " - "-display none " - "-audio none " - "%s" - " -accel qtest", - socket_path, - getenv("QTEST_LOG") ? DEV_STDERR : DEV_NULL, - qmp_socket_path, - extra_args ?: ""); + s = spawn(qemu_bin, args, opaque); qtest_client_set_rx_handler(s, qtest_client_socket_recv_line); qtest_client_set_tx_handler(s, qtest_client_socket_send); @@ -555,7 +577,8 @@ void qtest_connect(QTestState *s) QTestState *qtest_init_without_qmp_handshake(const char *extra_args) { - return qtest_init_internal(qtest_qemu_binary(NULL), extra_args, true); + return qtest_init_internal(qtest_qemu_binary(NULL), extra_args, true, + qtest_spawn_qemu, NULL); } void qtest_qmp_handshake(QTestState *s, QList *capabilities) @@ -578,7 +601,7 @@ QTestState *qtest_init_ext(const char *var, const char *extra_args, QList *capabilities, bool do_connect) { QTestState *s = qtest_init_internal(qtest_qemu_binary(var), extra_args, - do_connect); + do_connect, qtest_spawn_qemu, NULL); if (do_connect) { qtest_qmp_handshake(s, capabilities); @@ -592,6 +615,25 @@ QTestState *qtest_init_ext(const char *var, const char *extra_args, return s; } +static QTestState *qtest_attach_qemu(const char *qemu_bin, + const char *extra_args, + void *opaque) +{ + int pid = *(int *)opaque; + return qtest_create_test_state(pid); +} + +QTestState *qtest_init_after_exec(QTestState *qts) +{ + void *opaque = (void *)&qts->qemu_pid; + QTestState *s; + + s = qtest_init_internal(NULL, NULL, true, qtest_attach_qemu, opaque); + qts->qemu_pid = -1; + qtest_qmp_handshake(s, NULL); + return s; +} + QTestState *qtest_init(const char *extra_args) { return qtest_init_ext(NULL, extra_args, NULL, true); @@ -1630,7 +1672,8 @@ static void qtest_free_machine_list(struct MachInfo *machines) static struct MachInfo *qtest_get_machines(const char *var) { static struct MachInfo *machines; - static char *qemu_var; + static char *qemu_bin; + const char *new_qemu_bin; QDict *response, *minfo; QList *list; const QListEntry *p; @@ -1639,9 +1682,10 @@ static struct MachInfo *qtest_get_machines(const char *var) QTestState *qts; int idx; - if (g_strcmp0(qemu_var, var)) { - g_free(qemu_var); - qemu_var = g_strdup(var); + new_qemu_bin = qtest_qemu_binary(var); + if (g_strcmp0(qemu_bin, new_qemu_bin)) { + g_free(qemu_bin); + qemu_bin = g_strdup(new_qemu_bin); /* new qemu, clear the cache */ qtest_free_machine_list(machines); @@ -1654,7 +1698,7 @@ static struct MachInfo *qtest_get_machines(const char *var) silence_spawn_log = !g_test_verbose(); - qts = qtest_init_ext(qemu_var, "-machine none", NULL, true); + qts = qtest_init_ext(var, "-machine none", NULL, true); response = qtest_qmp(qts, "{ 'execute': 'query-machines' }"); g_assert(response); list = qdict_get_qlist(response, "return"); diff --git a/tests/qtest/libqtest.h b/tests/qtest/libqtest.h index b3f2e7fbefd8d..9c118c89ca0bc 100644 --- a/tests/qtest/libqtest.h +++ b/tests/qtest/libqtest.h @@ -47,6 +47,31 @@ QTestState *qtest_initf(const char *fmt, ...) G_GNUC_PRINTF(1, 2); */ QTestState *qtest_vinitf(const char *fmt, va_list ap) G_GNUC_PRINTF(1, 0); +/** + * qtest_qemu_binary: + * @var: environment variable name + * + * Look up @var and return its value as the qemu binary path. + * If @var is NULL, look up the default var name. + */ +const char *qtest_qemu_binary(const char *var); + +/** + * qtest_init_after_exec: + * @qts: the previous QEMU state + * + * Return a test state representing new QEMU after @qts exec's it. + */ +QTestState *qtest_init_after_exec(QTestState *qts); + +/** + * qtest_qemu_args: + * @extra_args: Other arguments to pass to QEMU. + * + * Return the command line used to start QEMU, sans binary. + */ +gchar *qtest_qemu_args(const char *extra_args); + /** * qtest_init: * @extra_args: other arguments to pass to QEMU. CAUTION: these @@ -977,7 +1002,7 @@ void qtest_qmp_fds_assert_success(QTestState *qts, int *fds, size_t nfds, * @cb: Pointer to the callback function * @skip_old_versioned: true if versioned old machine types should be skipped * - * Call a callback function for every name of all available machines. + * Call a callback function for every name of all available machines. */ void qtest_cb_for_every_machine(void (*cb)(const char *machine), bool skip_old_versioned); diff --git a/tests/qtest/migration/bootfile.c b/tests/qtest/migration/bootfile.c index fac059d11d74a..479c43231d717 100644 --- a/tests/qtest/migration/bootfile.c +++ b/tests/qtest/migration/bootfile.c @@ -68,3 +68,8 @@ char *bootfile_create(const char *arch, const char *dir, bool suspend_me) return bootpath; } + +char *bootfile_get(void) +{ + return bootpath; +} diff --git a/tests/qtest/migration/bootfile.h b/tests/qtest/migration/bootfile.h index 6d6a67386e1be..96e784b163403 100644 --- a/tests/qtest/migration/bootfile.h +++ b/tests/qtest/migration/bootfile.h @@ -35,5 +35,6 @@ void bootfile_delete(void); char *bootfile_create(const char *arch, const char *dir, bool suspend_me); +char *bootfile_get(void); #endif /* BOOTFILE_H */ diff --git a/tests/qtest/migration/cpr-tests.c b/tests/qtest/migration/cpr-tests.c index 5e764a67876e0..9388ad64be4c3 100644 --- a/tests/qtest/migration/cpr-tests.c +++ b/tests/qtest/migration/cpr-tests.c @@ -97,7 +97,10 @@ static void test_mode_transfer_common(bool incoming_defer) .start_hook = test_mode_transfer_start, }; - test_precopy_common(&args); + if (test_precopy_common(&args) < 0) { + close(cpr_sockfd); + unlink(cpr_path); + } } static void test_mode_transfer(void) @@ -110,6 +113,138 @@ static void test_mode_transfer_defer(void) test_mode_transfer_common(true); } +static void set_cpr_exec_args(QTestState *who, MigrateCommon *args) +{ + g_autofree char *qtest_from_args = NULL; + g_autofree char *from_args = NULL; + g_autofree char *to_args = NULL; + g_autofree char *exec_args = NULL; + g_auto(GStrv) argv = NULL; + char *from_str, *src, *dst; + int ret; + + /* + * hide_stderr appends "2>/dev/null" to the command line, but cpr-exec + * passes the command-line words to execv, not to the shell, so suppress it + * here. fd 2 was already bound in the source VM, and execv preserves it. + */ + g_assert(args->start.hide_stderr == false); + + ret = migrate_args(&from_args, &to_args, args->listen_uri, &args->start); + g_assert(!ret); + qtest_from_args = qtest_qemu_args(from_args); + + /* + * The generated args may have been formatted using "%s %s" with empty + * strings, which can produce consecutive spaces, which g_strsplit would + * convert into empty strings. Ditto for leading and trailing space. + * De-dup spaces to avoid that. + */ + + from_str = src = dst = g_strstrip(qtest_from_args); + do { + if (*src != ' ' || src[-1] != ' ') { + *dst++ = *src; + } + } while (*src++); + + exec_args = g_strconcat(qtest_qemu_binary(migration_get_env()->qemu_dst), + " -incoming defer ", from_str, NULL); + argv = g_strsplit(exec_args, " ", -1); + migrate_set_parameter_strv(who, "cpr-exec-command", argv); +} + +static void wait_for_migration_event(QTestState *who, const char *waitfor) +{ + QDict *rsp, *data; + char *status; + bool done = false; + + while (!done) { + rsp = qtest_qmp_eventwait_ref(who, "MIGRATION"); + g_assert(qdict_haskey(rsp, "data")); + data = qdict_get_qdict(rsp, "data"); + g_assert(qdict_haskey(data, "status")); + status = g_strdup(qdict_get_str(data, "status")); + g_assert(strcmp(status, "failed")); + done = !strcmp(status, waitfor); + qobject_unref(rsp); + } +} + +static void test_cpr_exec(MigrateCommon *args) +{ + QTestState *from, *to; + void *data_hook = NULL; + g_autofree char *connect_uri = g_strdup(args->connect_uri); + g_autofree char *filename = g_strdup_printf("%s/%s", tmpfs, + FILE_TEST_FILENAME); + + if (migrate_start(&from, NULL, args->listen_uri, &args->start)) { + return; + } + + /* Source and dest never run concurrently */ + g_assert_false(args->live); + + if (args->start_hook) { + data_hook = args->start_hook(from, NULL); + } + + wait_for_serial("src_serial"); + set_cpr_exec_args(from, args); + migrate_set_capability(from, "events", true); + migrate_qmp(from, NULL, connect_uri, NULL, "{}"); + wait_for_migration_event(from, "completed"); + + to = qtest_init_after_exec(from); + + qtest_qmp_assert_success(to, "{ 'execute': 'migrate-incoming'," + " 'arguments': { " + " 'channels': [ { 'channel-type': 'main'," + " 'addr': { 'transport': 'file'," + " 'filename': %s," + " 'offset': 0 } } ] } }", + filename); + wait_for_migration_complete(to); + + wait_for_resume(to, get_dst()); + /* Device on target is still named src_serial because args do not change */ + wait_for_serial("src_serial"); + + if (args->end_hook) { + args->end_hook(from, to, data_hook); + } + + migrate_end(from, to, args->result == MIG_TEST_SUCCEED); +} + +static void *test_mode_exec_start(QTestState *from, QTestState *to) +{ + assert(!to); + migrate_set_parameter_str(from, "mode", "cpr-exec"); + return NULL; +} + +static void test_mode_exec(void) +{ + g_autofree char *uri = g_strdup_printf("file:%s/%s", tmpfs, + FILE_TEST_FILENAME); + g_autofree char *listen_uri = g_strdup_printf("defer"); + + MigrateCommon args = { + .start.only_source = true, + .start.opts_source = "-machine aux-ram-share=on -nodefaults", + .start.memory_backend = "-object memory-backend-memfd,id=pc.ram,size=%s" + " -machine memory-backend=pc.ram", + .connect_uri = uri, + .listen_uri = listen_uri, + .start_hook = test_mode_exec_start, + }; + + test_cpr_exec(&args); +} + void migration_test_add_cpr(MigrationTestEnv *env) { tmpfs = env->tmpfs; @@ -132,5 +267,6 @@ void migration_test_add_cpr(MigrationTestEnv *env) migration_test_add("/migration/mode/transfer", test_mode_transfer); migration_test_add("/migration/mode/transfer/defer", test_mode_transfer_defer); + migration_test_add("/migration/mode/exec", test_mode_exec); } } diff --git a/tests/qtest/migration/framework.c b/tests/qtest/migration/framework.c index 407c9023c0511..a9be9c2dbf8d1 100644 --- a/tests/qtest/migration/framework.c +++ b/tests/qtest/migration/framework.c @@ -234,7 +234,7 @@ static void migrate_start_set_capabilities(QTestState *from, QTestState *to, * to mimic as closer as that. */ migrate_set_capability(from, "events", true); - if (!args->defer_target_connect) { + if (!args->defer_target_connect && to) { migrate_set_capability(to, "events", true); } @@ -246,20 +246,26 @@ static void migrate_start_set_capabilities(QTestState *from, QTestState *to, if (args->caps[MIGRATION_CAPABILITY_MULTIFD]) { migrate_set_parameter_int(from, "multifd-channels", MULTIFD_TEST_CHANNELS); - migrate_set_parameter_int(to, "multifd-channels", - MULTIFD_TEST_CHANNELS); + if (to) { + migrate_set_parameter_int(to, "multifd-channels", + MULTIFD_TEST_CHANNELS); + } } return; } -int migrate_start(QTestState **from, QTestState **to, const char *uri, - MigrateStart *args) +static char *test_shmem_path(void) +{ + return g_strdup_printf("/dev/shm/qemu-%d", getpid()); +} + +int migrate_args(char **from, char **to, const char *uri, MigrateStart *args) { /* options for source and target */ g_autofree gchar *arch_opts = NULL; - g_autofree gchar *cmd_source = NULL; - g_autofree gchar *cmd_target = NULL; + gchar *cmd_source = NULL; + gchar *cmd_target = NULL; const gchar *ignore_stderr; g_autofree char *shmem_opts = NULL; g_autofree char *shmem_path = NULL; @@ -268,23 +274,10 @@ int migrate_start(QTestState **from, QTestState **to, const char *uri, const char *memory_size; const char *machine_alias, *machine_opts = ""; g_autofree char *machine = NULL; - const char *bootpath; - g_autoptr(QList) capabilities = migrate_start_get_qmp_capabilities(args); + const char *bootpath = bootfile_get(); g_autofree char *memory_backend = NULL; const char *events; - if (args->use_shmem) { - if (!g_file_test("/dev/shm", G_FILE_TEST_IS_DIR)) { - g_test_skip("/dev/shm is not supported"); - return -1; - } - } - - dst_state = (QTestMigrationState) { }; - src_state = (QTestMigrationState) { }; - bootpath = bootfile_create(arch, tmpfs, args->suspend_me); - src_state.suspend_me = args->suspend_me; - if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { memory_size = "150M"; @@ -340,7 +333,7 @@ int migrate_start(QTestState **from, QTestState **to, const char *uri, } if (args->use_shmem) { - shmem_path = g_strdup_printf("/dev/shm/qemu-%d", getpid()); + shmem_path = test_shmem_path(); shmem_opts = g_strdup_printf( "-object memory-backend-file,id=mem0,size=%s" ",mem-path=%s,share=on -numa node,memdev=mem0", @@ -381,12 +374,6 @@ int migrate_start(QTestState **from, QTestState **to, const char *uri, shmem_opts ? shmem_opts : "", args->opts_source ? args->opts_source : "", ignore_stderr); - if (!args->only_target) { - *from = qtest_init_ext(QEMU_ENV_SRC, cmd_source, capabilities, true); - qtest_qmp_set_event_callback(*from, - migrate_watch_for_events, - &src_state); - } /* * If the monitor connection is deferred, enable events on the command line @@ -410,21 +397,62 @@ int migrate_start(QTestState **from, QTestState **to, const char *uri, shmem_opts ? shmem_opts : "", args->opts_target ? args->opts_target : "", ignore_stderr); - *to = qtest_init_ext(QEMU_ENV_DST, cmd_target, capabilities, - !args->defer_target_connect); - qtest_qmp_set_event_callback(*to, - migrate_watch_for_events, - &dst_state); + + *from = cmd_source; + *to = cmd_target; + return 0; +} + +int migrate_start(QTestState **from, QTestState **to, const char *uri, + MigrateStart *args) +{ + g_autofree gchar *cmd_source = NULL; + g_autofree gchar *cmd_target = NULL; + g_autoptr(QList) capabilities = migrate_start_get_qmp_capabilities(args); + + if (args->use_shmem) { + if (!g_file_test("/dev/shm", G_FILE_TEST_IS_DIR)) { + g_test_skip("/dev/shm is not supported"); + return -1; + } + } + + dst_state = (QTestMigrationState) { }; + src_state = (QTestMigrationState) { }; + bootfile_create(qtest_get_arch(), tmpfs, args->suspend_me); + src_state.suspend_me = args->suspend_me; + + if (migrate_args(&cmd_source, &cmd_target, uri, args)) { + return -1; + } + + if (!args->only_target) { + *from = qtest_init_ext(QEMU_ENV_SRC, cmd_source, capabilities, true); + qtest_qmp_set_event_callback(*from, + migrate_watch_for_events, + &src_state); + } + + if (!args->only_source) { + *to = qtest_init_ext(QEMU_ENV_DST, cmd_target, capabilities, + !args->defer_target_connect); + qtest_qmp_set_event_callback(*to, + migrate_watch_for_events, + &dst_state); + } /* * Remove shmem file immediately to avoid memory leak in test failed case. * It's valid because QEMU has already opened this file */ if (args->use_shmem) { + g_autofree char *shmem_path = test_shmem_path(); unlink(shmem_path); } - migrate_start_set_capabilities(*from, *to, args); + migrate_start_set_capabilities(*from, + args->only_source ? NULL : *to, + args); return 0; } @@ -736,7 +764,7 @@ void test_postcopy_recovery_common(MigrateCommon *args) migrate_postcopy_complete(from, to, args); } -void test_precopy_common(MigrateCommon *args) +int test_precopy_common(MigrateCommon *args) { QTestState *from, *to; void *data_hook = NULL; @@ -746,7 +774,7 @@ void test_precopy_common(MigrateCommon *args) g_assert(!args->cpr_channel || args->connect_channels); if (migrate_start(&from, &to, args->listen_uri, &args->start)) { - return; + return -1; } if (args->start_hook) { @@ -869,6 +897,8 @@ void test_precopy_common(MigrateCommon *args) } migrate_end(from, to, args->result == MIG_TEST_SUCCEED); + + return 0; } static void file_dirty_offset_region(void) @@ -994,6 +1024,11 @@ QTestMigrationState *get_src(void) return &src_state; } +QTestMigrationState *get_dst(void) +{ + return &dst_state; +} + MigrationTestEnv *migration_get_env(void) { static MigrationTestEnv *env; diff --git a/tests/qtest/migration/framework.h b/tests/qtest/migration/framework.h index 01e425e64e2d7..9bb584a6bb1db 100644 --- a/tests/qtest/migration/framework.h +++ b/tests/qtest/migration/framework.h @@ -103,6 +103,8 @@ typedef struct { */ bool hide_stderr; bool use_shmem; + /* only launch the source process */ + bool only_source; /* only launch the target process */ bool only_target; /* Use dirty ring if true; dirty logging otherwise */ @@ -221,13 +223,15 @@ typedef struct { void wait_for_serial(const char *side); void migrate_prepare_for_dirty_mem(QTestState *from); void migrate_wait_for_dirty_mem(QTestState *from, QTestState *to); + +int migrate_args(char **from, char **to, const char *uri, MigrateStart *args); int migrate_start(QTestState **from, QTestState **to, const char *uri, MigrateStart *args); void migrate_end(QTestState *from, QTestState *to, bool test_dest); void test_postcopy_common(MigrateCommon *args); void test_postcopy_recovery_common(MigrateCommon *args); -void test_precopy_common(MigrateCommon *args); +int test_precopy_common(MigrateCommon *args); void test_file_common(MigrateCommon *args, bool stop_src); void *migrate_hook_start_precopy_tcp_multifd_common(QTestState *from, QTestState *to, @@ -235,6 +239,7 @@ void *migrate_hook_start_precopy_tcp_multifd_common(QTestState *from, typedef struct QTestMigrationState QTestMigrationState; QTestMigrationState *get_src(void); +QTestMigrationState *get_dst(void); #ifdef CONFIG_GNUTLS void migration_test_add_tls(MigrationTestEnv *env); diff --git a/tests/qtest/migration/migration-qmp.c b/tests/qtest/migration/migration-qmp.c index 66dd369ba7ec9..c803fcee9d36d 100644 --- a/tests/qtest/migration/migration-qmp.c +++ b/tests/qtest/migration/migration-qmp.c @@ -442,6 +442,22 @@ void migrate_set_parameter_str(QTestState *who, const char *parameter, migrate_check_parameter_str(who, parameter, value); } +void migrate_set_parameter_strv(QTestState *who, const char *parameter, + char **strv) +{ + g_autofree char *args = g_strjoinv("\",\"", strv); + g_autoptr(GString) value = g_string_new(""); + g_autofree char *command = NULL; + + g_string_printf(value, "\"%s\"", args); + + command = g_strdup_printf("{ 'execute': 'migrate-set-parameters'," + "'arguments': { %%s: [ %s ]}}", + value->str); + + qtest_qmp_assert_success(who, command, parameter); +} + static long long migrate_get_parameter_bool(QTestState *who, const char *parameter) { diff --git a/tests/qtest/migration/migration-qmp.h b/tests/qtest/migration/migration-qmp.h index faa8181d9164a..44482d250f352 100644 --- a/tests/qtest/migration/migration-qmp.h +++ b/tests/qtest/migration/migration-qmp.h @@ -34,6 +34,8 @@ void read_blocktime(QTestState *who); void wait_for_migration_pass(QTestState *who, QTestMigrationState *src_state); void migrate_set_parameter_str(QTestState *who, const char *parameter, const char *value); +void migrate_set_parameter_strv(QTestState *who, const char *parameter, + char **strv); void migrate_set_parameter_bool(QTestState *who, const char *parameter, int value); void migrate_ensure_non_converge(QTestState *who); diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c index cf718761861de..279a8f5614e96 100644 --- a/tests/qtest/qmp-cmd-test.c +++ b/tests/qtest/qmp-cmd-test.c @@ -52,6 +52,8 @@ static int query_error_class(const char *cmd) /* Only valid with accel=tcg */ { "x-query-jit", ERROR_CLASS_GENERIC_ERROR }, { "xen-event-list", ERROR_CLASS_GENERIC_ERROR }, + /* requires firmware with memory buffer logging support */ + { "query-firmware-log", ERROR_CLASS_GENERIC_ERROR }, { NULL, -1 } }; int i; diff --git a/tests/qtest/qom-test.c b/tests/qtest/qom-test.c index 4ade1c728c014..2da9918e164eb 100644 --- a/tests/qtest/qom-test.c +++ b/tests/qtest/qom-test.c @@ -180,7 +180,7 @@ static void test_properties(QTestState *qts, const char *path, bool recurse) links = g_slist_delete_link(links, links); } while (children) { - test_properties(qts, children->data, true); + test_properties(qts, children->data, g_test_slow()); g_free(children->data); children = g_slist_delete_link(children, children); } @@ -211,14 +211,10 @@ static void test_machine(gconstpointer data) test_properties(qts, "/machine", true); - qlist_append_str(paths, "/machine"); + qlist_append_str(paths, "/"); test_list_get(qts, paths); test_list_get_value(qts); - response = qtest_qmp(qts, "{ 'execute': 'quit' }"); - g_assert(qdict_haskey(response, "return")); - qobject_unref(response); - qtest_quit(qts); g_free((void *)machine); } diff --git a/tests/qtest/qos-test.c b/tests/qtest/qos-test.c index abfd4b9512d5a..00f39f33f65d7 100644 --- a/tests/qtest/qos-test.c +++ b/tests/qtest/qos-test.c @@ -328,11 +328,6 @@ static void walk_path(QOSGraphNode *orig_path, int len) int main(int argc, char **argv, char** envp) { g_test_init(&argc, &argv, NULL); - - if (g_test_subprocess()) { - qos_printf("qos_test running single test in subprocess\n"); - } - if (g_test_verbose()) { qos_printf("ENVIRONMENT VARIABLES: {\n"); for (char **env = envp; *env != 0; env++) { diff --git a/tests/qtest/riscv-csr-test.c b/tests/qtest/riscv-csr-test.c index ff5c29e6c6f29..bb1b0ffed3080 100644 --- a/tests/qtest/riscv-csr-test.c +++ b/tests/qtest/riscv-csr-test.c @@ -50,7 +50,9 @@ int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); - qtest_add_func("/cpu/csr", run_test_csr); + if (qtest_has_machine("virt")) { + qtest_add_func("/cpu/csr", run_test_csr); + } return g_test_run(); } diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c index 75cb3e44b217c..609ff24059cad 100644 --- a/tests/qtest/vhost-user-test.c +++ b/tests/qtest/vhost-user-test.c @@ -26,7 +26,6 @@ #include "libqos/virtio-pci.h" #include "libqos/malloc-pc.h" -#include "libqos/qgraph_internal.h" #include "hw/virtio/virtio-net.h" #include "standard-headers/linux/vhost_types.h" @@ -331,7 +330,6 @@ static int chr_can_read(void *opaque) static void chr_read(void *opaque, const uint8_t *buf, int size) { - g_autoptr(GError) err = NULL; TestServer *s = opaque; CharBackend *chr = &s->chr; VhostUserMsg msg; @@ -345,7 +343,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) } if (size != VHOST_USER_HDR_SIZE) { - qos_printf("%s: Wrong message size received %d\n", __func__, size); + g_test_message("Wrong message size received %d", size); return; } @@ -356,8 +354,8 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) p += VHOST_USER_HDR_SIZE; size = qemu_chr_fe_read_all(chr, p, msg.size); if (size != msg.size) { - qos_printf("%s: Wrong message size received %d != %d\n", - __func__, size, msg.size); + g_test_message("Wrong message size received %d != %d", + size, msg.size); goto out; } } @@ -393,7 +391,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) * We don't need to do anything here, the remote is just * letting us know it is in charge. Just log it. */ - qos_printf("set_owner: start of session\n"); + g_test_message("set_owner: start of session"); break; case VHOST_USER_GET_PROTOCOL_FEATURES: @@ -419,7 +417,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) * the remote end to send this. There is no handshake reply so * just log the details for debugging. */ - qos_printf("set_protocol_features: 0x%"PRIx64 "\n", msg.payload.u64); + g_test_message("set_protocol_features: 0x%"PRIx64, msg.payload.u64); break; /* @@ -427,11 +425,11 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) * address of the vrings but we can simply report them. */ case VHOST_USER_SET_VRING_NUM: - qos_printf("set_vring_num: %d/%d\n", + g_test_message("set_vring_num: %d/%d", msg.payload.state.index, msg.payload.state.num); break; case VHOST_USER_SET_VRING_ADDR: - qos_printf("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64"\n", + g_test_message("set_vring_addr: 0x%"PRIx64"/0x%"PRIx64"/0x%"PRIx64, msg.payload.addr.avail_user_addr, msg.payload.addr.desc_user_addr, msg.payload.addr.used_user_addr); @@ -464,7 +462,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) case VHOST_USER_SET_VRING_CALL: /* consume the fd */ if (!qemu_chr_fe_get_msgfds(chr, &fd, 1) && fd < 0) { - qos_printf("call fd: %d, do not set non-blocking\n", fd); + g_test_message("call fd: %d, do not set non-blocking", fd); break; } /* @@ -472,8 +470,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) * The receive function forces it to be blocking, * so revert it back to non-blocking. */ - g_unix_set_fd_nonblocking(fd, true, &err); - g_assert_no_error(err); + qemu_set_blocking(fd, false, &error_abort); break; case VHOST_USER_SET_LOG_BASE: @@ -510,12 +507,12 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) * fully functioning vhost-user we would enable/disable the * vring monitoring. */ - qos_printf("set_vring(%d)=%s\n", msg.payload.state.index, + g_test_message("set_vring(%d)=%s", msg.payload.state.index, msg.payload.state.num ? "enabled" : "disabled"); break; default: - qos_printf("vhost-user: un-handled message: %d\n", msg.request); + g_test_message("vhost-user: un-handled message: %d", msg.request); break; } @@ -539,7 +536,7 @@ static const char *init_hugepagefs(void) } if (access(path, R_OK | W_OK | X_OK)) { - qos_printf("access on path (%s): %s", path, strerror(errno)); + g_test_message("access on path (%s): %s", path, strerror(errno)); g_test_fail(); return NULL; } @@ -549,13 +546,13 @@ static const char *init_hugepagefs(void) } while (ret != 0 && errno == EINTR); if (ret != 0) { - qos_printf("statfs on path (%s): %s", path, strerror(errno)); + g_test_message("statfs on path (%s): %s", path, strerror(errno)); g_test_fail(); return NULL; } if (fs.f_type != HUGETLBFS_MAGIC) { - qos_printf("Warning: path not on HugeTLBFS: %s", path); + g_test_message("Warning: path not on HugeTLBFS: %s", path); g_test_fail(); return NULL; } diff --git a/tests/qtest/virtio-scsi-test.c b/tests/qtest/virtio-scsi-test.c index db10d572d0fcf..e2350c52f6fc1 100644 --- a/tests/qtest/virtio-scsi-test.c +++ b/tests/qtest/virtio-scsi-test.c @@ -311,6 +311,31 @@ static void test_iothread_attach_node(void *obj, void *data, unlink(tmp_path); } +static void test_iothread_virtio_error(void *obj, void *data, + QGuestAllocator *t_alloc) +{ + QVirtioSCSIPCI *scsi_pci = obj; + QVirtioSCSI *scsi = &scsi_pci->scsi; + QVirtioSCSIQueues *vs; + QVirtQueue *vq; + + alloc = t_alloc; + vs = qvirtio_scsi_init(scsi->vdev); + vq = vs->vq[2]; + + /* Move avail.idx out of bounds to trigger virtio_error() */ + qvirtqueue_set_avail_idx(global_qtest, scsi->vdev, vq, vq->size * 2); + scsi->vdev->bus->virtqueue_kick(scsi->vdev, vq); + + /* + * Reset the device out of the error state. If QEMU hangs or crashes then + * this will fail. + */ + qvirtio_reset(scsi->vdev); + + qvirtio_scsi_pci_free(vs); +} + static void *virtio_scsi_hotplug_setup(GString *cmd_line, void *arg) { g_string_append(cmd_line, @@ -383,6 +408,13 @@ static void register_virtio_scsi_test(void) }; qos_add_test("iothread-attach-node", "virtio-scsi-pci", test_iothread_attach_node, &opts); + + opts.before = virtio_scsi_setup_iothread; + opts.edge = (QOSGraphEdgeOptions) { + .extra_device_opts = "iothread=thread0", + }; + qos_add_test("iothread-virtio-error", "virtio-scsi-pci", + test_iothread_virtio_error, &opts); } libqos_init(register_virtio_scsi_test); diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target index af68f11664fb0..af72903f89861 100644 --- a/tests/tcg/Makefile.target +++ b/tests/tcg/Makefile.target @@ -127,8 +127,14 @@ else # build options for bare programs are usually pretty different. They # are expected to provide their own build recipes. EXTRA_CFLAGS += -ffreestanding -fno-stack-protector + +# We skip the multiarch tests if the target hasn't provided a boot.S +MULTIARCH_SOFTMMU_TARGETS = i386 alpha aarch64 arm loongarch64 s390x x86_64 + +ifneq ($(filter $(TARGET_NAME),$(MULTIARCH_SOFTMMU_TARGETS)),) -include $(SRC_PATH)/tests/tcg/minilib/Makefile.target -include $(SRC_PATH)/tests/tcg/multiarch/system/Makefile.softmmu-target +endif -include $(SRC_PATH)/tests/tcg/$(TARGET_NAME)/Makefile.softmmu-target endif @@ -155,28 +161,58 @@ VPATH+=$(PLUGIN_LIB) # For example, libpatch.so only needs to run against the arch-specific patch # target test, so we explicitly run it in the arch-specific Makefile. DISABLE_PLUGINS=libpatch.so + +# Likewise don't bother with the syscall plugin for softmmu +ifneq ($(filter %-softmmu, $(TARGET)),) +DISABLE_PLUGINS += libsyscall.so +endif + PLUGINS=$(filter-out $(DISABLE_PLUGINS), \ $(patsubst %.c, lib%.so, $(notdir $(wildcard $(PLUGIN_SRC)/*.c)))) +strip-plugin = $(wordlist 1, 1, $(subst -with-, ,$1)) +extract-plugin = $(wordlist 2, 2, $(subst -with-, ,$1)) +extract-test = $(subst run-plugin-,,$(wordlist 1, 1, $(subst -with-, ,$1))) + # We need to ensure expand the run-plugin-TEST-with-PLUGIN # pre-requistes manually here as we can't use stems to handle it. We # only expand MULTIARCH_TESTS which are common on most of our targets -# to avoid an exponential explosion as new tests are added. We also -# add some special helpers the run-plugin- rules can use below. -# In more, extra tests can be added using ADDITIONAL_PLUGINS_TESTS variable. +# and rotate the plugins so we don't grow too out of control as new +# tests are added. Plugins that need to run with a specific test +# should ensure they add their combination to EXTRA_RUNS. ifneq ($(MULTIARCH_TESTS),) -$(foreach p,$(PLUGINS), \ - $(foreach t,$(MULTIARCH_TESTS) $(ADDITIONAL_PLUGINS_TESTS),\ - $(eval run-plugin-$(t)-with-$(p): $t $p) \ - $(eval RUN_TESTS+=run-plugin-$(t)-with-$(p)))) + +# Extract extra tests from the extra test+plugin combination. +EXTRA_TESTS_WITH_PLUGIN=$(foreach test, \ + $(EXTRA_RUNS_WITH_PLUGIN),$(call extract-test,$(test))) +# Exclude tests that were specified to run with specific plugins from the tests +# which can run with any plugin combination, so we don't run it twice. +MULTIARCH_TESTS:=$(filter-out $(EXTRA_TESTS_WITH_PLUGIN), $(MULTIARCH_TESTS)) + +NUM_PLUGINS := $(words $(PLUGINS)) +NUM_TESTS := $(words $(MULTIARCH_TESTS)) + +define mod_plus_one + $(shell $(PYTHON) -c "print( ($(1) % $(2)) + 1 )") +endef + +# Rules for running tests with any plugin combination, i.e., no specific plugin. +$(foreach _idx, $(shell seq 1 $(NUM_TESTS)), \ + $(eval _test := $(word $(_idx), $(MULTIARCH_TESTS))) \ + $(eval _plugin := $(word $(call mod_plus_one, $(_idx), $(NUM_PLUGINS)), $(PLUGINS))) \ + $(eval run-plugin-$(_test)-with-$(_plugin): $(_test) $(_plugin)) \ + $(eval RUN_TESTS+=run-plugin-$(_test)-with-$(_plugin))) + +# Rules for running extra tests with specific plugins. +$(foreach f,$(EXTRA_RUNS_WITH_PLUGIN), \ + $(eval $(f): $(call extract-test,$(f)) $(call extract-plugin,$(f)))) + endif # MULTIARCH_TESTS endif # CONFIG_PLUGIN -strip-plugin = $(wordlist 1, 1, $(subst -with-, ,$1)) -extract-plugin = $(wordlist 2, 2, $(subst -with-, ,$1)) - RUN_TESTS+=$(EXTRA_RUNS) +RUN_TESTS+=$(EXTRA_RUNS_WITH_PLUGIN) # Some plugins need additional arguments above the default to fully # exercise things. We can define them on a per-test basis here. diff --git a/tests/tcg/aarch64/Makefile.target b/tests/tcg/aarch64/Makefile.target index 16ddcf4f8831d..55ce34e45eeca 100644 --- a/tests/tcg/aarch64/Makefile.target +++ b/tests/tcg/aarch64/Makefile.target @@ -75,6 +75,11 @@ AARCH64_TESTS += $(SME_TESTS) $(SME_TESTS): CFLAGS += $(CROSS_AS_HAS_ARMV9_SME) endif +# GCS Tests +GCS_TESTS += gcsstr gcspushm gcsss +AARCH64_TESTS += $(GCS_TESTS) +$(GCS_TESTS): gcs.h + # System Registers Tests AARCH64_TESTS += sysregs @@ -134,6 +139,35 @@ run-gdbstub-sve-ioctls: sve-ioctls EXTRA_RUNS += run-gdbstub-sysregs run-gdbstub-sve-ioctls +ifneq ($(CROSS_AS_HAS_ARMV9_SME),) +# SME gdbstub tests + +run-gdbstub-sysregs-sme: sysregs + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(GDB) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(AARCH64_SRC)/gdbstub/test-sme.py \ + -- test_sme --gdb_basic_za_test, \ + basic gdbstub SME support) + +ifeq ($(GDB_HAS_SME_TILES),y) +run-gdbstub-sysregs-sme-tile-slice: sysregs + $(call run-test, $@, $(GDB_SCRIPT) \ + --gdb $(GDB) \ + --qemu $(QEMU) --qargs "$(QEMU_OPTS)" \ + --bin $< --test $(AARCH64_SRC)/gdbstub/test-sme.py \ + -- test_sme --gdb_tile_slice_test, \ + gdbstub SME ZA tile slice support) +else +run-gdbstub-sysregs-sme-tile-slice: sysregs + $(call skip-test,"gdbstub SME ZA tile slice support", \ + "selected gdb ($(GDB)) does not support SME ZA tile slices") +endif + +EXTRA_RUNS += run-gdbstub-sysregs-sme run-gdbstub-sysregs-sme-tile-slice + +endif + ifeq ($(GDB_HAS_MTE),y) run-gdbstub-mte: mte-8 $(call run-test, $@, $(GDB_SCRIPT) \ diff --git a/tests/tcg/aarch64/gcs.h b/tests/tcg/aarch64/gcs.h new file mode 100644 index 0000000000000..6f013d0f1e0d5 --- /dev/null +++ b/tests/tcg/aarch64/gcs.h @@ -0,0 +1,80 @@ +/* + * Linux kernel fallback API definitions for GCS and test helpers. + * + * Copyright (c) 2025 Linaro Ltd + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef PR_GET_SHADOW_STACK_STATUS +#define PR_GET_SHADOW_STACK_STATUS 74 +#endif +#ifndef PR_SET_SHADOW_STACK_STATUS +#define PR_SET_SHADOW_STACK_STATUS 75 +#endif +#ifndef PR_LOCK_SHADOW_STACK_STATUS +#define PR_LOCK_SHADOW_STACK_STATUS 76 +#endif +#ifndef PR_SHADOW_STACK_ENABLE +# define PR_SHADOW_STACK_ENABLE (1 << 0) +# define PR_SHADOW_STACK_WRITE (1 << 1) +# define PR_SHADOW_STACK_PUSH (1 << 2) +#endif +#ifndef SHADOW_STACK_SET_TOKEN +#define SHADOW_STACK_SET_TOKEN (1 << 0) +#endif +#ifndef SHADOW_STACK_SET_MARKER +#define SHADOW_STACK_SET_MARKER (1 << 1) +#endif +#ifndef SEGV_CPERR +#define SEGV_CPERR 10 +#endif +#ifndef __NR_map_shadow_stack +#define __NR_map_shadow_stack 453 +#endif + +/* + * Macros, and implement the syscall inline, lest we fail + * the checked return from any function call. + */ +#define enable_gcs(flags) \ + do { \ + register long num __asm__ ("x8") = __NR_prctl; \ + register long arg1 __asm__ ("x0") = PR_SET_SHADOW_STACK_STATUS; \ + register long arg2 __asm__ ("x1") = PR_SHADOW_STACK_ENABLE | flags; \ + register long arg3 __asm__ ("x2") = 0; \ + register long arg4 __asm__ ("x3") = 0; \ + register long arg5 __asm__ ("x4") = 0; \ + asm volatile("svc #0" \ + : "+r"(arg1) \ + : "r"(arg2), "r"(arg3), "r"(arg4), "r"(arg5), "r"(num) \ + : "memory", "cc"); \ + if (arg1) { \ + errno = -arg1; \ + perror("PR_SET_SHADOW_STACK_STATUS"); \ + exit(2); \ + } \ + } while (0) + +#define gcspr() \ + ({ uint64_t *r; asm volatile("mrs %0, s3_3_c2_c5_1" : "=r"(r)); r; }) + +#define gcsss1(val) \ + do { \ + asm volatile("sys #3, c7, c7, #2, %0" : : "r"(val) : "memory"); \ + } while (0) + +#define gcsss2() \ + ({ uint64_t *r; \ + asm volatile("sysl %0, #3, c7, c7, #3" : "=r"(r) : : "memory"); r; }) diff --git a/tests/tcg/aarch64/gcspushm.c b/tests/tcg/aarch64/gcspushm.c new file mode 100644 index 0000000000000..c330417a2fa37 --- /dev/null +++ b/tests/tcg/aarch64/gcspushm.c @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gcs.h" + + +#define GCSPUSHM "sys #3, c7, c7, #0, %[push]" +#define GCSPOPM "sysl %[pop], #3, c7, c7, #1" + +static void test_sigsegv(int sig, siginfo_t *info, void *vuc) +{ + ucontext_t *uc = vuc; + uint64_t inst_sigsegv; + + __asm__("adr %0, inst_sigsegv" : "=r"(inst_sigsegv)); + assert(uc->uc_mcontext.pc == inst_sigsegv); + assert(info->si_code == SEGV_CPERR); + /* TODO: Dig for ESR and verify syndrome. */ + uc->uc_mcontext.pc += 4; +} + +static void test_sigill(int sig, siginfo_t *info, void *vuc) +{ + ucontext_t *uc = vuc; + uint64_t inst_sigill; + + __asm__("adr %0, inst_sigill" : "=r"(inst_sigill)); + assert(uc->uc_mcontext.pc == inst_sigill); + assert(info->si_code == ILL_ILLOPC); + uc->uc_mcontext.pc += 4; +} + +int main() +{ + struct sigaction sa = { .sa_flags = SA_SIGINFO }; + uint64_t old, new; + + sa.sa_sigaction = test_sigsegv; + if (sigaction(SIGSEGV, &sa, NULL) < 0) { + perror("sigaction"); + exit(1); + } + + sa.sa_sigaction = test_sigill; + if (sigaction(SIGILL, &sa, NULL) < 0) { + perror("sigaction"); + exit(1); + } + + /* Pushm is disabled -- SIGILL via EC_SYSTEMREGISTERTRAP */ + asm volatile("inst_sigill:\t" GCSPUSHM + : : [push] "r" (1)); + + enable_gcs(PR_SHADOW_STACK_PUSH); + + /* Valid value -- low 2 bits clear */ + old = 0xdeadbeeffeedcaec; + asm volatile(GCSPUSHM "\n\t" GCSPOPM + : [pop] "=r" (new) + : [push] "r" (old) + : "memory"); + assert(old == new); + + /* Invalid value -- SIGSEGV via EC_GCS */ + asm volatile(GCSPUSHM "\n" + "inst_sigsegv:\t" GCSPOPM + : [pop] "=r" (new) + : [push] "r" (1) + : "memory"); + + exit(0); +} diff --git a/tests/tcg/aarch64/gcsss.c b/tests/tcg/aarch64/gcsss.c new file mode 100644 index 0000000000000..9550c68e7e745 --- /dev/null +++ b/tests/tcg/aarch64/gcsss.c @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gcs.h" + +#define IN_PROGRESS(X) ((uint64_t)(X) | 5) +#define CAP(X) (((uint64_t)(X) & ~0xfff) + 1) + +static uint64_t * __attribute__((noinline)) recurse(size_t index) +{ + if (index == 0) { + return gcspr(); + } + return recurse(index - 1); +} + +int main() +{ + void *tmp; + uint64_t *alt_stack, *alt_cap; + uint64_t *orig_pr, *orig_cap; + uint64_t *bottom; + size_t pagesize = getpagesize(); + size_t words; + + enable_gcs(0); + orig_pr = gcspr(); + + /* Allocate a guard page before and after. */ + tmp = mmap(0, 3 * pagesize, PROT_NONE, MAP_ANON | MAP_PRIVATE, -1, 0); + assert(tmp != MAP_FAILED); + + /* map_shadow_stack won't replace existing mappings */ + munmap(tmp + pagesize, pagesize); + + /* Allocate a new stack between the guards. */ + alt_stack = (uint64_t *) + syscall(__NR_map_shadow_stack, tmp + pagesize, pagesize, + SHADOW_STACK_SET_TOKEN); + assert(alt_stack == tmp + pagesize); + + words = pagesize / 8; + alt_cap = alt_stack + words - 1; + + /* SHADOW_STACK_SET_TOKEN set the cap. */ + assert(*alt_cap == CAP(alt_cap)); + + /* Swap to the alt stack, one step at a time. */ + gcsss1(alt_cap); + + assert(gcspr() == alt_cap); + assert(*alt_cap == IN_PROGRESS(orig_pr)); + + orig_cap = gcsss2(); + + assert(orig_cap == orig_pr - 1); + assert(*orig_cap == CAP(orig_cap)); + assert(gcspr() == alt_stack + words); + + /* We should be able to use the whole stack. */ + bottom = recurse(words - 1); + assert(bottom == alt_stack); + + /* We should be back where we started. */ + assert(gcspr() == alt_stack + words); + + /* Swap back to the original stack. */ + gcsss1(orig_cap); + tmp = gcsss2(); + + assert(gcspr() == orig_pr); + assert(tmp == alt_cap); + + exit(0); +} diff --git a/tests/tcg/aarch64/gcsstr.c b/tests/tcg/aarch64/gcsstr.c new file mode 100644 index 0000000000000..b045aee925156 --- /dev/null +++ b/tests/tcg/aarch64/gcsstr.c @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "gcs.h" + +/* + * A single garbage store to the gcs stack. + * The asm inside must be unique, so disallow inlining. + */ +void __attribute__((noinline)) +test_gcsstr(void) +{ + register uint64_t *ptr __asm__("x0") = gcspr(); + /* GCSSTR x1, x0 */ + __asm__("inst_gcsstr: .inst 0xd91f1c01" : : "r"(--ptr)); +} + +static void test_sigsegv(int sig, siginfo_t *info, void *vuc) +{ + ucontext_t *uc = vuc; + uint64_t inst_gcsstr; + + __asm__("adr %0, inst_gcsstr" : "=r"(inst_gcsstr)); + assert(uc->uc_mcontext.pc == inst_gcsstr); + assert(info->si_code == SEGV_CPERR); + /* TODO: Dig for ESR and verify syndrome. */ + exit(0); +} + +int main() +{ + struct sigaction sa = { + .sa_sigaction = test_sigsegv, + .sa_flags = SA_SIGINFO, + }; + + /* Enable GCSSTR and test the store succeeds. */ + enable_gcs(PR_SHADOW_STACK_WRITE); + test_gcsstr(); + + /* Disable GCSSTR and test the resulting sigsegv. */ + enable_gcs(0); + if (sigaction(SIGSEGV, &sa, NULL) < 0) { + perror("sigaction"); + exit(1); + } + test_gcsstr(); + abort(); +} diff --git a/tests/tcg/aarch64/gdbstub/test-mte.py b/tests/tcg/aarch64/gdbstub/test-mte.py index 9ad98e7a54c84..f4a7d7b446501 100644 --- a/tests/tcg/aarch64/gdbstub/test-mte.py +++ b/tests/tcg/aarch64/gdbstub/test-mte.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # Test GDB memory-tag commands that exercise the stubs for the qIsAddressTagged, # qMemTag, and QMemTag packets, which are used for manipulating allocation tags. diff --git a/tests/tcg/aarch64/gdbstub/test-sme.py b/tests/tcg/aarch64/gdbstub/test-sme.py new file mode 100644 index 0000000000000..ec03189642756 --- /dev/null +++ b/tests/tcg/aarch64/gdbstub/test-sme.py @@ -0,0 +1,117 @@ +# +# Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. +# +# SPDX-License-Identifier: GPL-2.0-or-later + +# +# Test the SME registers are visible and changeable via gdbstub +# +# This is launched via tests/guest-debug/run-test.py +# + +import argparse +import gdb +from test_gdbstub import main, report + +MAGIC = 0x01020304 +BASIC_ZA_TEST = 0 +TILE_SLICE_TEST = 0 + + +def run_test(): + """Run the requested test(s) for SME ZA gdbstub support""" + + if BASIC_ZA_TEST: + run_basic_sme_za_gdbstub_support_test() + if TILE_SLICE_TEST: + run_basic_sme_za_tile_slice_gdbstub_support_test() + + +def run_basic_sme_za_gdbstub_support_test(): + """Test reads and writes to the SME ZA register at the byte level""" + + frame = gdb.selected_frame() + rname = "za" + za = frame.read_register(rname) + report(True, "Reading %s" % rname) + + # Writing to the ZA register, byte by byte. + for i in range(0, 16): + for j in range(0, 16): + cmd = "set $za[%d][%d] = 0x01" % (i, j) + gdb.execute(cmd) + report(True, "%s" % cmd) + + # Reading from the ZA register, byte by byte. + for i in range(0, 16): + for j in range(0, 16): + reg = "$za[%d][%d]" % (i, j) + v = gdb.parse_and_eval(reg) + report(str(v.type) == "uint8_t", "size of %s" % (reg)) + report(v == 0x1, "%s is 0x%x" % (reg, 0x1)) + + +def run_basic_sme_za_tile_slice_gdbstub_support_test(): + """Test reads and writes of SME ZA horizontal and vertical tile slices + + Test if SME ZA tile slices, both horizontal and vertical, + can be correctly read and written to. The sizes to test + are quadwords and doublewords. + """ + + sizes = {} + sizes["q"] = "uint128_t" + sizes["d"] = "uint64_t" + + # Accessing requested sizes of elements of ZA + for size in sizes: + + # Accessing various ZA tiles + for i in range(0, 4): + + # Accessing various horizontal slices for each ZA tile + for j in range(0, 4): + # Writing to various elements in each tile slice + for k in range(0, 4): + cmd = "set $za%dh%c%d[%d] = 0x%x" % (i, size, j, k, MAGIC) + gdb.execute(cmd) + report(True, "%s" % cmd) + + # Reading from the written elements in each tile slice + for k in range(0, 4): + reg = "$za%dh%c%d[%d]" % (i, size, j, k) + v = gdb.parse_and_eval(reg) + report(str(v.type) == sizes[size], "size of %s" % (reg)) + report(v == MAGIC, "%s is 0x%x" % (reg, MAGIC)) + + # Accessing various vertical slices for each ZA tile + for j in range(0, 4): + # Writing to various elements in each tile slice + for k in range(0, 4): + cmd = "set $za%dv%c%d[%d] = 0x%x" % (i, size, j, k, MAGIC) + gdb.execute(cmd) + report(True, "%s" % cmd) + + # Reading from the written elements in each tile slice + for k in range(0, 4): + reg = "$za%dv%c%d[%d]" % (i, size, j, k) + v = gdb.parse_and_eval(reg) + report(str(v.type) == sizes[size], "size of %s" % (reg)) + report(v == MAGIC, "%s is 0x%x" % (reg, MAGIC)) + + +parser = argparse.ArgumentParser(description="A gdbstub test for SME support") +parser.add_argument("--gdb_basic_za_test", + help="Enable test for basic SME ZA support", + action="store_true") +parser.add_argument("--gdb_tile_slice_test", + help="Enable test for ZA tile slice support", + action="store_true") +args = parser.parse_args() + +if args.gdb_basic_za_test: + BASIC_ZA_TEST = 1 +if args.gdb_tile_slice_test: + TILE_SLICE_TEST = 1 + +main(run_test, expected_arch="aarch64") diff --git a/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py b/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py index a78a3a2514da2..2c5c2180319f4 100644 --- a/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py +++ b/tests/tcg/aarch64/gdbstub/test-sve-ioctl.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # Test the SVE ZReg reports the right amount of data. It uses the # sve-ioctl test and examines the register data each time the diff --git a/tests/tcg/aarch64/gdbstub/test-sve.py b/tests/tcg/aarch64/gdbstub/test-sve.py index 84cdcd4a32ea1..7b0489a622b8f 100644 --- a/tests/tcg/aarch64/gdbstub/test-sve.py +++ b/tests/tcg/aarch64/gdbstub/test-sve.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # Test the SVE registers are visible and changeable via gdbstub # diff --git a/tests/tcg/hexagon/signal_context.c b/tests/tcg/hexagon/signal_context.c index 7202fa64b67d7..9de7f6be4fd14 100644 --- a/tests/tcg/hexagon/signal_context.c +++ b/tests/tcg/hexagon/signal_context.c @@ -26,7 +26,11 @@ void sig_user(int sig, siginfo_t *info, void *puc) "p1 = r7\n\t" "p2 = r7\n\t" "p3 = r7\n\t" - : : : "r7", "p0", "p1", "p2", "p3"); + "r6 = #0x12345678\n\t" + "cs0 = r6\n\t" + "r6 = #0x87654321\n\t" + "cs1 = r6\n\t" + : : : "r6", "r7", "p0", "p1", "p2", "p3", "cs0", "cs1"); } int main() @@ -53,7 +57,11 @@ int main() timer_settime(tid, 0, &it, NULL); asm("loop0(1f, %1)\n\t" - "1: r8 = #0xff\n\t" + "1: r9 = #0xdeadbeef\n\t" + " cs0 = r9\n\t" + " r9 = #0xbadc0fee\n\t" + " cs1 = r9\n\t" + " r8 = #0xff\n\t" " p0 = r8\n\t" " p1 = r8\n\t" " p2 = r8\n\t" @@ -74,10 +82,19 @@ int main() " r8 = p3\n\t" " p0 = cmp.eq(r8, #0xff)\n\t" " if (!p0) jump 2b\n\t" + " r8 = cs0\n\t" + " r9 = #0xdeadbeef\n\t" + " p0 = cmp.eq(r8, r9)\n\t" + " if (!p0) jump 2b\n\t" + " r8 = cs1\n\t" + " r9 = #0xbadc0fee\n\t" + " p0 = cmp.eq(r8, r9)\n\t" + " if (!p0) jump 2b\n\t" "4: {}: endloop0\n\t" : : "r"(&err), "r"(i) - : "memory", "r8", "p0", "p1", "p2", "p3"); + : "memory", "r8", "r9", "p0", "p1", "p2", "p3", "cs0", "cs1", "lc0", + "sa0"); puts(err ? "FAIL" : "PASS"); return err; diff --git a/tests/tcg/multiarch/Makefile.target b/tests/tcg/multiarch/Makefile.target index bfdf7197a7b6f..f5b4d2b81381c 100644 --- a/tests/tcg/multiarch/Makefile.target +++ b/tests/tcg/multiarch/Makefile.target @@ -46,6 +46,8 @@ vma-pthread: LDFLAGS+=-pthread sigreturn-sigmask: CFLAGS+=-pthread sigreturn-sigmask: LDFLAGS+=-pthread +tb-link: LDFLAGS+=-lpthread + # GCC versions 12/13/14/15 at least incorrectly complain about # "'SHA1Transform' reading 64 bytes from a region of size 0"; see the gcc bug # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=106709 @@ -189,6 +191,10 @@ run-plugin-semiconsole-with-%: TESTS += semihosting semiconsole endif +test-plugin-mem-access: CFLAGS+=-pthread -O0 +test-plugin-mem-access: LDFLAGS+=-pthread -O0 + +ifeq ($(CONFIG_PLUGIN),y) # Test plugin memory access instrumentation run-plugin-test-plugin-mem-access-with-libmem.so: \ PLUGIN_ARGS=$(COMMA)print-accesses=true @@ -197,8 +203,8 @@ run-plugin-test-plugin-mem-access-with-libmem.so: \ $(SRC_PATH)/tests/tcg/multiarch/check-plugin-output.sh \ $(QEMU) $< -test-plugin-mem-access: CFLAGS+=-pthread -O0 -test-plugin-mem-access: LDFLAGS+=-pthread -O0 +EXTRA_RUNS_WITH_PLUGIN += run-plugin-test-plugin-mem-access-with-libmem.so +endif # Update TESTS TESTS += $(MULTIARCH_TESTS) diff --git a/tests/tcg/multiarch/gdbstub/interrupt.py b/tests/tcg/multiarch/gdbstub/interrupt.py index 2d5654d154025..4eccdb41b9744 100644 --- a/tests/tcg/multiarch/gdbstub/interrupt.py +++ b/tests/tcg/multiarch/gdbstub/interrupt.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # Test some of the system debug features with the multiarch memory # test. It is a port of the original vmlinux focused test case but diff --git a/tests/tcg/multiarch/gdbstub/memory.py b/tests/tcg/multiarch/gdbstub/memory.py index 532b92e7fb31e..76d75e52512ed 100644 --- a/tests/tcg/multiarch/gdbstub/memory.py +++ b/tests/tcg/multiarch/gdbstub/memory.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # Test some of the system debug features with the multiarch memory # test. It is a port of the original vmlinux focused test case but diff --git a/tests/tcg/multiarch/gdbstub/sha1.py b/tests/tcg/multiarch/gdbstub/sha1.py index 1ce711a402ced..3403b82fd4a77 100644 --- a/tests/tcg/multiarch/gdbstub/sha1.py +++ b/tests/tcg/multiarch/gdbstub/sha1.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # A very simple smoke test for debugging the SHA1 userspace test on # each target. diff --git a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py index 6eb6ebf7b170b..796dca75f0cb8 100644 --- a/tests/tcg/multiarch/gdbstub/test-proc-mappings.py +++ b/tests/tcg/multiarch/gdbstub/test-proc-mappings.py @@ -1,7 +1,6 @@ """Test that gdbstub has access to proc mappings. This runs as a sourced script (via -x, via run-test.py).""" -from __future__ import print_function import gdb from test_gdbstub import gdb_exit, main, report diff --git a/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py b/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py index 00c26ab4a9567..fa36c943d6681 100644 --- a/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py +++ b/tests/tcg/multiarch/gdbstub/test-qxfer-auxv-read.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # Test auxiliary vector is loaded via gdbstub # diff --git a/tests/tcg/multiarch/gdbstub/test-qxfer-siginfo-read.py b/tests/tcg/multiarch/gdbstub/test-qxfer-siginfo-read.py index 862596b07a765..b18fa1234fb54 100644 --- a/tests/tcg/multiarch/gdbstub/test-qxfer-siginfo-read.py +++ b/tests/tcg/multiarch/gdbstub/test-qxfer-siginfo-read.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # Test gdbstub Xfer:siginfo:read stub. # diff --git a/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py b/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py index 4d6b6b9fbe7b5..49cbc3548f607 100644 --- a/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py +++ b/tests/tcg/multiarch/gdbstub/test-thread-breakpoint.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # Test auxiliary vector is loaded via gdbstub # diff --git a/tests/tcg/multiarch/linux/linux-test.c b/tests/tcg/multiarch/linux/linux-test.c index 64f57cb287eb2..bf6e0fda2626e 100644 --- a/tests/tcg/multiarch/linux/linux-test.c +++ b/tests/tcg/multiarch/linux/linux-test.c @@ -155,9 +155,14 @@ static void test_file(void) error("stat mode"); if ((st.st_mode & 0777) != 0600) error("stat mode2"); - if (st.st_atime != 1001 || - st.st_mtime != 1000) + /* + * Only check mtime, not atime: other processes such as + * virus scanners might race with this test program and get + * in and update the atime, causing random failures. + */ + if (st.st_mtime != 1000) { error("stat time"); + } chk_error(stat(tmpdir, &st)); if (!S_ISDIR(st.st_mode)) diff --git a/tests/tcg/multiarch/system/Makefile.softmmu-target b/tests/tcg/multiarch/system/Makefile.softmmu-target index 07be001102bd5..98c4eda5e0000 100644 --- a/tests/tcg/multiarch/system/Makefile.softmmu-target +++ b/tests/tcg/multiarch/system/Makefile.softmmu-target @@ -6,6 +6,11 @@ # architecture to add to the test dependencies and deal with the # complications of building. # +# To support the multiarch guests the target arch needs to provide a +# boot.S that jumps to main and provides a __sys_outc functions. +# Remember to update MULTIARCH_SOFTMMU_TARGETS in the tcg test +# Makefile.target when this is done. +# MULTIARCH_SRC=$(SRC_PATH)/tests/tcg/multiarch MULTIARCH_SYSTEM_SRC=$(MULTIARCH_SRC)/system @@ -66,8 +71,11 @@ endif MULTIARCH_RUNS += run-gdbstub-memory run-gdbstub-interrupt \ run-gdbstub-untimely-packet run-gdbstub-registers +ifeq ($(CONFIG_PLUGIN),y) # Test plugin memory access instrumentation -run-plugin-memory-with-libmem.so: \ - PLUGIN_ARGS=$(COMMA)region-summary=true -run-plugin-memory-with-libmem.so: \ - CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py $@.out +run-plugin-memory-with-libmem.so: memory libmem.so +run-plugin-memory-with-libmem.so: PLUGIN_ARGS=$(COMMA)region-summary=true +run-plugin-memory-with-libmem.so: CHECK_PLUGIN_OUTPUT_COMMAND=$(MULTIARCH_SYSTEM_SRC)/validate-memory-counts.py $@.out + +EXTRA_RUNS_WITH_PLUGIN += run-plugin-memory-with-libmem.so +endif diff --git a/tests/tcg/multiarch/tb-link.c b/tests/tcg/multiarch/tb-link.c new file mode 100644 index 0000000000000..4e40306fa18c3 --- /dev/null +++ b/tests/tcg/multiarch/tb-link.c @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Verify that a single TB spin-loop is properly invalidated, + * releasing the thread from the spin-loop. + */ + +#include +#include +#include +#include +#include +#include +#include + + +#ifdef __x86_64__ +#define READY 0x000047c6 /* movb $0,0(%rdi) */ +#define LOOP 0xfceb9090 /* 1: nop*2; jmp 1b */ +#define RETURN 0x909090c3 /* ret; nop*3 */ +#define NOP 0x90909090 /* nop*4 */ +#elif defined(__aarch64__) +#define READY 0x3900001f /* strb wzr,[x0] */ +#define LOOP 0x14000000 /* b . */ +#define RETURN 0xd65f03c0 /* ret */ +#define NOP 0xd503201f /* nop */ +#elif defined(__riscv) +#define READY 0x00050023 /* sb zero, (a0) */ +#define LOOP 0x0000006f /* jal zero, #0 */ +#define RETURN 0x00008067 /* jalr zero, ra, 0 */ +#define NOP 0x00000013 /* nop */ +#endif + + +int main() +{ +#ifdef READY + int tmp; + pthread_t thread_id; + bool hold = true; + uint32_t *buf; + + buf = mmap(NULL, 3 * sizeof(uint32_t), + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + assert(buf != MAP_FAILED); + + buf[0] = READY; + buf[1] = LOOP; + buf[2] = RETURN; + + alarm(2); + + tmp = pthread_create(&thread_id, NULL, (void *(*)(void *))buf, &hold); + assert(tmp == 0); + + while (hold) { + sched_yield(); + } + + buf[1] = NOP; + __builtin___clear_cache(&buf[1], &buf[2]); + + tmp = pthread_join(thread_id, NULL); + assert(tmp == 0); +#endif + return 0; +} diff --git a/tests/tcg/s390x/gdbstub/test-signals-s390x.py b/tests/tcg/s390x/gdbstub/test-signals-s390x.py index b6b7b39fc46dc..398ad534ebf6c 100644 --- a/tests/tcg/s390x/gdbstub/test-signals-s390x.py +++ b/tests/tcg/s390x/gdbstub/test-signals-s390x.py @@ -1,4 +1,3 @@ -from __future__ import print_function # # Test that signals and debugging mix well together on s390x. diff --git a/tests/tcg/s390x/gdbstub/test-svc.py b/tests/tcg/s390x/gdbstub/test-svc.py index 17210b4e02083..29a0aa0ede453 100644 --- a/tests/tcg/s390x/gdbstub/test-svc.py +++ b/tests/tcg/s390x/gdbstub/test-svc.py @@ -1,7 +1,6 @@ """Test single-stepping SVC. This runs as a sourced script (via -x, via run-test.py).""" -from __future__ import print_function import gdb from test_gdbstub import main, report diff --git a/tests/tcg/x86_64/Makefile.softmmu-target b/tests/tcg/x86_64/Makefile.softmmu-target index 3e30ca930749b..4e65f58b570c1 100644 --- a/tests/tcg/x86_64/Makefile.softmmu-target +++ b/tests/tcg/x86_64/Makefile.softmmu-target @@ -40,5 +40,5 @@ run-plugin-patch-target-with-libpatch.so: \ run-plugin-patch-target-with-libpatch.so: \ CHECK_PLUGIN_OUTPUT_COMMAND=$(X64_SYSTEM_SRC)/validate-patch.py $@.out run-plugin-patch-target-with-libpatch.so: patch-target libpatch.so -EXTRA_RUNS+=run-plugin-patch-target-with-libpatch.so +EXTRA_RUNS_WITH_PLUGIN+=run-plugin-patch-target-with-libpatch.so endif diff --git a/tests/tracetool/dtrace.c b/tests/tracetool/dtrace.c new file mode 100644 index 0000000000000..9f862fa14d299 --- /dev/null +++ b/tests/tracetool/dtrace.c @@ -0,0 +1,32 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "trace-testsuite.h" + +uint16_t _TRACE_TEST_BLAH_DSTATE; +uint16_t _TRACE_TEST_WIBBLE_DSTATE; +TraceEvent _TRACE_TEST_BLAH_EVENT = { + .id = 0, + .name = "test_blah", + .sstate = TRACE_TEST_BLAH_ENABLED, + .dstate = &_TRACE_TEST_BLAH_DSTATE +}; +TraceEvent _TRACE_TEST_WIBBLE_EVENT = { + .id = 0, + .name = "test_wibble", + .sstate = TRACE_TEST_WIBBLE_ENABLED, + .dstate = &_TRACE_TEST_WIBBLE_DSTATE +}; +TraceEvent *testsuite_trace_events[] = { + &_TRACE_TEST_BLAH_EVENT, + &_TRACE_TEST_WIBBLE_EVENT, + NULL, +}; + +static void trace_testsuite_register_events(void) +{ + trace_event_register_group(testsuite_trace_events); +} +trace_init(trace_testsuite_register_events) diff --git a/tests/tracetool/dtrace.d b/tests/tracetool/dtrace.d new file mode 100644 index 0000000000000..5cc06f9f4f510 --- /dev/null +++ b/tests/tracetool/dtrace.d @@ -0,0 +1,10 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +provider qemu { + +probe test_blah(void * context,const char * filename); + +probe test_wibble(void * context,int value); + +}; diff --git a/tests/tracetool/dtrace.h b/tests/tracetool/dtrace.h new file mode 100644 index 0000000000000..c8931a8d7b3d6 --- /dev/null +++ b/tests/tracetool/dtrace.h @@ -0,0 +1,45 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef TRACE_TESTSUITE_GENERATED_TRACERS_H +#define TRACE_TESTSUITE_GENERATED_TRACERS_H + +#include "trace/control.h" + +extern TraceEvent _TRACE_TEST_BLAH_EVENT; +extern TraceEvent _TRACE_TEST_WIBBLE_EVENT; +extern uint16_t _TRACE_TEST_BLAH_DSTATE; +extern uint16_t _TRACE_TEST_WIBBLE_DSTATE; +#define TRACE_TEST_BLAH_ENABLED 1 +#define TRACE_TEST_WIBBLE_ENABLED 1 +#ifndef SDT_USE_VARIADIC +#define SDT_USE_VARIADIC 1 +#endif +#include "trace-dtrace-testsuite.h" + +#undef SDT_USE_VARIADIC +#ifndef QEMU_TEST_BLAH_ENABLED +#define QEMU_TEST_BLAH_ENABLED() true +#endif +#ifndef QEMU_TEST_WIBBLE_ENABLED +#define QEMU_TEST_WIBBLE_ENABLED() true +#endif + +#define TRACE_TEST_BLAH_BACKEND_DSTATE() ( \ + QEMU_TEST_BLAH_ENABLED() || \ + false) + +static inline void trace_test_blah(void *context, const char *filename) +{ + QEMU_TEST_BLAH(context, filename); +} + +#define TRACE_TEST_WIBBLE_BACKEND_DSTATE() ( \ + QEMU_TEST_WIBBLE_ENABLED() || \ + false) + +static inline void trace_test_wibble(void *context, int value) +{ + QEMU_TEST_WIBBLE(context, value); +} +#endif /* TRACE_TESTSUITE_GENERATED_TRACERS_H */ diff --git a/tests/tracetool/dtrace.log-stap b/tests/tracetool/dtrace.log-stap new file mode 100644 index 0000000000000..092986e0b61e5 --- /dev/null +++ b/tests/tracetool/dtrace.log-stap @@ -0,0 +1,15 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +probe qemu.log.test_blah = qemu.test_blah ? +{ + try { + argfilename_str = filename ? user_string_n(filename, 512) : "" + } catch {} + printf("%d@%d test_blah Blah context=%p filename=%s\n", pid(), gettimeofday_ns(), context, argfilename_str) +} +probe qemu.log.test_wibble = qemu.test_wibble ? +{ + printf("%d@%d test_wibble Wibble context=%p value=%d\n", pid(), gettimeofday_ns(), context, value) +} + diff --git a/tests/tracetool/dtrace.simpletrace-stap b/tests/tracetool/dtrace.simpletrace-stap new file mode 100644 index 0000000000000..d064e3e286a8a --- /dev/null +++ b/tests/tracetool/dtrace.simpletrace-stap @@ -0,0 +1,16 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +probe qemu.simpletrace.test_blah = qemu.test_blah ? +{ + try { + argfilename_str = filename ? user_string_n(filename, 512) : "" + } catch {} + argfilename_len = strlen(argfilename_str) + printf("%8b%8b%8b%4b%4b%8b%4b%.*s", 1, 0, gettimeofday_ns(), 24 + 8 + 4 + argfilename_len, pid(), context, argfilename_len, argfilename_len, argfilename_str) +} +probe qemu.simpletrace.test_wibble = qemu.test_wibble ? +{ + printf("%8b%8b%8b%4b%4b%8b%8b", 1, 1, gettimeofday_ns(), 24 + 8 + 8, pid(), context, value) +} + diff --git a/tests/tracetool/dtrace.stap b/tests/tracetool/dtrace.stap new file mode 100644 index 0000000000000..9c5d8a527ce4c --- /dev/null +++ b/tests/tracetool/dtrace.stap @@ -0,0 +1,14 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +probe qemu.test_blah = process("qemu").mark("test_blah") +{ + context = $arg1; + filename = $arg2; +} +probe qemu.test_wibble = process("qemu").mark("test_wibble") +{ + context = $arg1; + value = $arg2; +} + diff --git a/tests/tracetool/ftrace.c b/tests/tracetool/ftrace.c new file mode 100644 index 0000000000000..9f862fa14d299 --- /dev/null +++ b/tests/tracetool/ftrace.c @@ -0,0 +1,32 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "trace-testsuite.h" + +uint16_t _TRACE_TEST_BLAH_DSTATE; +uint16_t _TRACE_TEST_WIBBLE_DSTATE; +TraceEvent _TRACE_TEST_BLAH_EVENT = { + .id = 0, + .name = "test_blah", + .sstate = TRACE_TEST_BLAH_ENABLED, + .dstate = &_TRACE_TEST_BLAH_DSTATE +}; +TraceEvent _TRACE_TEST_WIBBLE_EVENT = { + .id = 0, + .name = "test_wibble", + .sstate = TRACE_TEST_WIBBLE_ENABLED, + .dstate = &_TRACE_TEST_WIBBLE_DSTATE +}; +TraceEvent *testsuite_trace_events[] = { + &_TRACE_TEST_BLAH_EVENT, + &_TRACE_TEST_WIBBLE_EVENT, + NULL, +}; + +static void trace_testsuite_register_events(void) +{ + trace_event_register_group(testsuite_trace_events); +} +trace_init(trace_testsuite_register_events) diff --git a/tests/tracetool/ftrace.h b/tests/tracetool/ftrace.h new file mode 100644 index 0000000000000..1dfe42394136c --- /dev/null +++ b/tests/tracetool/ftrace.h @@ -0,0 +1,43 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef TRACE_TESTSUITE_GENERATED_TRACERS_H +#define TRACE_TESTSUITE_GENERATED_TRACERS_H + +#include "trace/control.h" + +extern TraceEvent _TRACE_TEST_BLAH_EVENT; +extern TraceEvent _TRACE_TEST_WIBBLE_EVENT; +extern uint16_t _TRACE_TEST_BLAH_DSTATE; +extern uint16_t _TRACE_TEST_WIBBLE_DSTATE; +#define TRACE_TEST_BLAH_ENABLED 1 +#define TRACE_TEST_WIBBLE_ENABLED 1 +#include "trace/ftrace.h" + + +#define TRACE_TEST_BLAH_BACKEND_DSTATE() ( \ + trace_event_get_state_dynamic_by_id(TRACE_TEST_BLAH) || \ + false) + +static inline void trace_test_blah(void *context, const char *filename) +{ + if (trace_event_get_state(TRACE_TEST_BLAH)) { +#line 4 "trace-events" + ftrace_write("test_blah " "Blah context=%p filename=%s" "\n" , context, filename); +#line 28 "ftrace.h" + } +} + +#define TRACE_TEST_WIBBLE_BACKEND_DSTATE() ( \ + trace_event_get_state_dynamic_by_id(TRACE_TEST_WIBBLE) || \ + false) + +static inline void trace_test_wibble(void *context, int value) +{ + if (trace_event_get_state(TRACE_TEST_WIBBLE)) { +#line 5 "trace-events" + ftrace_write("test_wibble " "Wibble context=%p value=%d" "\n" , context, value); +#line 41 "ftrace.h" + } +} +#endif /* TRACE_TESTSUITE_GENERATED_TRACERS_H */ diff --git a/tests/tracetool/ftrace.rs b/tests/tracetool/ftrace.rs new file mode 100644 index 0000000000000..07b9259cf29f3 --- /dev/null +++ b/tests/tracetool/ftrace.rs @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// This file is @generated by tracetool, do not edit. + +#[allow(unused_imports)] +use std::ffi::c_char; +#[allow(unused_imports)] +use util::bindings; + +#[inline(always)] +fn trace_event_state_is_enabled(dstate: u16) -> bool { + (unsafe { trace_events_enabled_count }) != 0 && dstate != 0 +} + +extern "C" { + static mut trace_events_enabled_count: u32; +} +extern "C" { + static mut _TRACE_TEST_BLAH_DSTATE: u16; + static mut _TRACE_TEST_WIBBLE_DSTATE: u16; +} + +#[inline(always)] +#[allow(dead_code)] +pub fn trace_test_blah(_context: *mut (), _filename: &std::ffi::CStr) +{ + if trace_event_state_is_enabled(unsafe { _TRACE_TEST_BLAH_DSTATE}) { + let format_string = c"Blah context=%p filename=%s"; + unsafe {bindings::ftrace_write(format_string.as_ptr() as *const c_char, _context /* as *mut () */, _filename.as_ptr());} + } +} + +#[inline(always)] +#[allow(dead_code)] +pub fn trace_test_wibble(_context: *mut (), _value: std::ffi::c_int) +{ + if trace_event_state_is_enabled(unsafe { _TRACE_TEST_WIBBLE_DSTATE}) { + let format_string = c"Wibble context=%p value=%d"; + unsafe {bindings::ftrace_write(format_string.as_ptr() as *const c_char, _context /* as *mut () */, _value /* as std::ffi::c_int */);} + } +} diff --git a/tests/tracetool/log.c b/tests/tracetool/log.c new file mode 100644 index 0000000000000..9f862fa14d299 --- /dev/null +++ b/tests/tracetool/log.c @@ -0,0 +1,32 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "trace-testsuite.h" + +uint16_t _TRACE_TEST_BLAH_DSTATE; +uint16_t _TRACE_TEST_WIBBLE_DSTATE; +TraceEvent _TRACE_TEST_BLAH_EVENT = { + .id = 0, + .name = "test_blah", + .sstate = TRACE_TEST_BLAH_ENABLED, + .dstate = &_TRACE_TEST_BLAH_DSTATE +}; +TraceEvent _TRACE_TEST_WIBBLE_EVENT = { + .id = 0, + .name = "test_wibble", + .sstate = TRACE_TEST_WIBBLE_ENABLED, + .dstate = &_TRACE_TEST_WIBBLE_DSTATE +}; +TraceEvent *testsuite_trace_events[] = { + &_TRACE_TEST_BLAH_EVENT, + &_TRACE_TEST_WIBBLE_EVENT, + NULL, +}; + +static void trace_testsuite_register_events(void) +{ + trace_event_register_group(testsuite_trace_events); +} +trace_init(trace_testsuite_register_events) diff --git a/tests/tracetool/log.h b/tests/tracetool/log.h new file mode 100644 index 0000000000000..c7795871f8514 --- /dev/null +++ b/tests/tracetool/log.h @@ -0,0 +1,47 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef TRACE_TESTSUITE_GENERATED_TRACERS_H +#define TRACE_TESTSUITE_GENERATED_TRACERS_H + +#include "trace/control.h" + +extern TraceEvent _TRACE_TEST_BLAH_EVENT; +extern TraceEvent _TRACE_TEST_WIBBLE_EVENT; +extern uint16_t _TRACE_TEST_BLAH_DSTATE; +extern uint16_t _TRACE_TEST_WIBBLE_DSTATE; +#define TRACE_TEST_BLAH_ENABLED 1 +#define TRACE_TEST_WIBBLE_ENABLED 1 +#include "qemu/log-for-trace.h" + + +#define TRACE_TEST_BLAH_BACKEND_DSTATE() ( \ + trace_event_get_state_dynamic_by_id(TRACE_TEST_BLAH) || \ + false) + +static inline void trace_test_blah(void *context, const char *filename) +{ + if (trace_event_get_state(TRACE_TEST_BLAH)) { + if (qemu_loglevel_mask(LOG_TRACE)) { +#line 4 "trace-events" + qemu_log("test_blah " "Blah context=%p filename=%s" "\n", context, filename); +#line 29 "log.h" + } + } +} + +#define TRACE_TEST_WIBBLE_BACKEND_DSTATE() ( \ + trace_event_get_state_dynamic_by_id(TRACE_TEST_WIBBLE) || \ + false) + +static inline void trace_test_wibble(void *context, int value) +{ + if (trace_event_get_state(TRACE_TEST_WIBBLE)) { + if (qemu_loglevel_mask(LOG_TRACE)) { +#line 5 "trace-events" + qemu_log("test_wibble " "Wibble context=%p value=%d" "\n", context, value); +#line 44 "log.h" + } + } +} +#endif /* TRACE_TESTSUITE_GENERATED_TRACERS_H */ diff --git a/tests/tracetool/log.rs b/tests/tracetool/log.rs new file mode 100644 index 0000000000000..c191895c8f87e --- /dev/null +++ b/tests/tracetool/log.rs @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// This file is @generated by tracetool, do not edit. + +#[allow(unused_imports)] +use std::ffi::c_char; +#[allow(unused_imports)] +use util::bindings; + +#[inline(always)] +fn trace_event_state_is_enabled(dstate: u16) -> bool { + (unsafe { trace_events_enabled_count }) != 0 && dstate != 0 +} + +extern "C" { + static mut trace_events_enabled_count: u32; +} +extern "C" { + static mut _TRACE_TEST_BLAH_DSTATE: u16; + static mut _TRACE_TEST_WIBBLE_DSTATE: u16; +} + +#[inline(always)] +#[allow(dead_code)] +pub fn trace_test_blah(_context: *mut (), _filename: &std::ffi::CStr) +{ + if trace_event_state_is_enabled(unsafe { _TRACE_TEST_BLAH_DSTATE}) { + let format_string = c"test_blah Blah context=%p filename=%s\n"; + if (unsafe { bindings::qemu_loglevel } & bindings::LOG_TRACE) != 0 { + unsafe { bindings::qemu_log(format_string.as_ptr() as *const c_char, _context /* as *mut () */, _filename.as_ptr());} + } + } +} + +#[inline(always)] +#[allow(dead_code)] +pub fn trace_test_wibble(_context: *mut (), _value: std::ffi::c_int) +{ + if trace_event_state_is_enabled(unsafe { _TRACE_TEST_WIBBLE_DSTATE}) { + let format_string = c"test_wibble Wibble context=%p value=%d\n"; + if (unsafe { bindings::qemu_loglevel } & bindings::LOG_TRACE) != 0 { + unsafe { bindings::qemu_log(format_string.as_ptr() as *const c_char, _context /* as *mut () */, _value /* as std::ffi::c_int */);} + } + } +} diff --git a/tests/tracetool/meson.build b/tests/tracetool/meson.build new file mode 100644 index 0000000000000..09bbaaa86bf49 --- /dev/null +++ b/tests/tracetool/meson.build @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0-or-later + +test_env = environment() +test_env.set('PYTHONPATH', meson.project_source_root() / 'scripts') +test_env.set('PYTHONIOENCODING', 'utf-8') + +backends = [ + 'dtrace', + 'ftrace', + 'log', + 'simple', + 'syslog', + 'ust' +] + +# The tracetool-test.py program has portability problems on Windows. +if host_machine.system() != 'windows' + foreach backend: backends + test(backend, + python, + args: [meson.current_source_dir() / 'tracetool-test.py', + meson.project_source_root() / 'scripts' / 'tracetool.py', + backend, + meson.current_source_dir(), + meson.current_build_dir()], + suite: ['tracetool']) + endforeach +endif diff --git a/tests/tracetool/simple.c b/tests/tracetool/simple.c new file mode 100644 index 0000000000000..0484177481ce3 --- /dev/null +++ b/tests/tracetool/simple.c @@ -0,0 +1,61 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "trace-testsuite.h" + +uint16_t _TRACE_TEST_BLAH_DSTATE; +uint16_t _TRACE_TEST_WIBBLE_DSTATE; +TraceEvent _TRACE_TEST_BLAH_EVENT = { + .id = 0, + .name = "test_blah", + .sstate = TRACE_TEST_BLAH_ENABLED, + .dstate = &_TRACE_TEST_BLAH_DSTATE +}; +TraceEvent _TRACE_TEST_WIBBLE_EVENT = { + .id = 0, + .name = "test_wibble", + .sstate = TRACE_TEST_WIBBLE_ENABLED, + .dstate = &_TRACE_TEST_WIBBLE_DSTATE +}; +TraceEvent *testsuite_trace_events[] = { + &_TRACE_TEST_BLAH_EVENT, + &_TRACE_TEST_WIBBLE_EVENT, + NULL, +}; + +static void trace_testsuite_register_events(void) +{ + trace_event_register_group(testsuite_trace_events); +} +trace_init(trace_testsuite_register_events) +#include "qemu/osdep.h" +#include "trace/control.h" +#include "trace/simple.h" + +void _simple_trace_test_blah(void *context, const char *filename) +{ + TraceBufferRecord rec; + size_t argfilename_len = filename ? MIN(strlen(filename), MAX_TRACE_STRLEN) : 0; + + if (trace_record_start(&rec, _TRACE_TEST_BLAH_EVENT.id, 8 + 4 + argfilename_len)) { + return; /* Trace Buffer Full, Event Dropped ! */ + } + trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)context); + trace_record_write_str(&rec, filename, argfilename_len); + trace_record_finish(&rec); +} + +void _simple_trace_test_wibble(void *context, int value) +{ + TraceBufferRecord rec; + + if (trace_record_start(&rec, _TRACE_TEST_WIBBLE_EVENT.id, 8 + 8)) { + return; /* Trace Buffer Full, Event Dropped ! */ + } + trace_record_write_u64(&rec, (uintptr_t)(uint64_t *)context); + trace_record_write_u64(&rec, (uint64_t)value); + trace_record_finish(&rec); +} + diff --git a/tests/tracetool/simple.h b/tests/tracetool/simple.h new file mode 100644 index 0000000000000..ec6fcb22c3c8d --- /dev/null +++ b/tests/tracetool/simple.h @@ -0,0 +1,40 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef TRACE_TESTSUITE_GENERATED_TRACERS_H +#define TRACE_TESTSUITE_GENERATED_TRACERS_H + +#include "trace/control.h" + +extern TraceEvent _TRACE_TEST_BLAH_EVENT; +extern TraceEvent _TRACE_TEST_WIBBLE_EVENT; +extern uint16_t _TRACE_TEST_BLAH_DSTATE; +extern uint16_t _TRACE_TEST_WIBBLE_DSTATE; +#define TRACE_TEST_BLAH_ENABLED 1 +#define TRACE_TEST_WIBBLE_ENABLED 1 +void _simple_trace_test_blah(void *context, const char *filename); +void _simple_trace_test_wibble(void *context, int value); + + +#define TRACE_TEST_BLAH_BACKEND_DSTATE() ( \ + trace_event_get_state_dynamic_by_id(TRACE_TEST_BLAH) || \ + false) + +static inline void trace_test_blah(void *context, const char *filename) +{ + if (trace_event_get_state(TRACE_TEST_BLAH)) { + _simple_trace_test_blah(context, filename); + } +} + +#define TRACE_TEST_WIBBLE_BACKEND_DSTATE() ( \ + trace_event_get_state_dynamic_by_id(TRACE_TEST_WIBBLE) || \ + false) + +static inline void trace_test_wibble(void *context, int value) +{ + if (trace_event_get_state(TRACE_TEST_WIBBLE)) { + _simple_trace_test_wibble(context, value); + } +} +#endif /* TRACE_TESTSUITE_GENERATED_TRACERS_H */ diff --git a/tests/tracetool/simple.rs b/tests/tracetool/simple.rs new file mode 100644 index 0000000000000..9ee39495e387d --- /dev/null +++ b/tests/tracetool/simple.rs @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// This file is @generated by tracetool, do not edit. + +#[allow(unused_imports)] +use std::ffi::c_char; +#[allow(unused_imports)] +use util::bindings; + +#[inline(always)] +fn trace_event_state_is_enabled(dstate: u16) -> bool { + (unsafe { trace_events_enabled_count }) != 0 && dstate != 0 +} + +extern "C" { + static mut trace_events_enabled_count: u32; +} +extern "C" { + static mut _TRACE_TEST_BLAH_DSTATE: u16; + static mut _TRACE_TEST_WIBBLE_DSTATE: u16; +} + +#[inline(always)] +#[allow(dead_code)] +pub fn trace_test_blah(_context: *mut (), _filename: &std::ffi::CStr) +{ + if trace_event_state_is_enabled(unsafe { _TRACE_TEST_BLAH_DSTATE}) { + extern "C" { fn _simple_trace_test_blah(_context: *mut (), _filename: *const std::ffi::c_char); } + unsafe { _simple_trace_test_blah(_context, _filename.as_ptr()); } + } +} + +#[inline(always)] +#[allow(dead_code)] +pub fn trace_test_wibble(_context: *mut (), _value: std::ffi::c_int) +{ + if trace_event_state_is_enabled(unsafe { _TRACE_TEST_WIBBLE_DSTATE}) { + extern "C" { fn _simple_trace_test_wibble(_context: *mut (), _value: std::ffi::c_int); } + unsafe { _simple_trace_test_wibble(_context, _value); } + } +} diff --git a/tests/tracetool/syslog.c b/tests/tracetool/syslog.c new file mode 100644 index 0000000000000..9f862fa14d299 --- /dev/null +++ b/tests/tracetool/syslog.c @@ -0,0 +1,32 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "trace-testsuite.h" + +uint16_t _TRACE_TEST_BLAH_DSTATE; +uint16_t _TRACE_TEST_WIBBLE_DSTATE; +TraceEvent _TRACE_TEST_BLAH_EVENT = { + .id = 0, + .name = "test_blah", + .sstate = TRACE_TEST_BLAH_ENABLED, + .dstate = &_TRACE_TEST_BLAH_DSTATE +}; +TraceEvent _TRACE_TEST_WIBBLE_EVENT = { + .id = 0, + .name = "test_wibble", + .sstate = TRACE_TEST_WIBBLE_ENABLED, + .dstate = &_TRACE_TEST_WIBBLE_DSTATE +}; +TraceEvent *testsuite_trace_events[] = { + &_TRACE_TEST_BLAH_EVENT, + &_TRACE_TEST_WIBBLE_EVENT, + NULL, +}; + +static void trace_testsuite_register_events(void) +{ + trace_event_register_group(testsuite_trace_events); +} +trace_init(trace_testsuite_register_events) diff --git a/tests/tracetool/syslog.h b/tests/tracetool/syslog.h new file mode 100644 index 0000000000000..ed4305554c174 --- /dev/null +++ b/tests/tracetool/syslog.h @@ -0,0 +1,43 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef TRACE_TESTSUITE_GENERATED_TRACERS_H +#define TRACE_TESTSUITE_GENERATED_TRACERS_H + +#include "trace/control.h" + +extern TraceEvent _TRACE_TEST_BLAH_EVENT; +extern TraceEvent _TRACE_TEST_WIBBLE_EVENT; +extern uint16_t _TRACE_TEST_BLAH_DSTATE; +extern uint16_t _TRACE_TEST_WIBBLE_DSTATE; +#define TRACE_TEST_BLAH_ENABLED 1 +#define TRACE_TEST_WIBBLE_ENABLED 1 +#include + + +#define TRACE_TEST_BLAH_BACKEND_DSTATE() ( \ + trace_event_get_state_dynamic_by_id(TRACE_TEST_BLAH) || \ + false) + +static inline void trace_test_blah(void *context, const char *filename) +{ + if (trace_event_get_state(TRACE_TEST_BLAH)) { +#line 4 "trace-events" + syslog(LOG_INFO, "test_blah " "Blah context=%p filename=%s" , context, filename); +#line 28 "syslog.h" + } +} + +#define TRACE_TEST_WIBBLE_BACKEND_DSTATE() ( \ + trace_event_get_state_dynamic_by_id(TRACE_TEST_WIBBLE) || \ + false) + +static inline void trace_test_wibble(void *context, int value) +{ + if (trace_event_get_state(TRACE_TEST_WIBBLE)) { +#line 5 "trace-events" + syslog(LOG_INFO, "test_wibble " "Wibble context=%p value=%d" , context, value); +#line 41 "syslog.h" + } +} +#endif /* TRACE_TESTSUITE_GENERATED_TRACERS_H */ diff --git a/tests/tracetool/syslog.rs b/tests/tracetool/syslog.rs new file mode 100644 index 0000000000000..9d3675a0b5767 --- /dev/null +++ b/tests/tracetool/syslog.rs @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +// This file is @generated by tracetool, do not edit. + +#[allow(unused_imports)] +use std::ffi::c_char; +#[allow(unused_imports)] +use util::bindings; + +#[inline(always)] +fn trace_event_state_is_enabled(dstate: u16) -> bool { + (unsafe { trace_events_enabled_count }) != 0 && dstate != 0 +} + +extern "C" { + static mut trace_events_enabled_count: u32; +} +extern "C" { + static mut _TRACE_TEST_BLAH_DSTATE: u16; + static mut _TRACE_TEST_WIBBLE_DSTATE: u16; +} + +#[inline(always)] +#[allow(dead_code)] +pub fn trace_test_blah(_context: *mut (), _filename: &std::ffi::CStr) +{ + if trace_event_state_is_enabled(unsafe { _TRACE_TEST_BLAH_DSTATE}) { + let format_string = c"Blah context=%p filename=%s"; + unsafe {::trace::syslog(::trace::LOG_INFO, format_string.as_ptr() as *const c_char, _context /* as *mut () */, _filename.as_ptr());} + } +} + +#[inline(always)] +#[allow(dead_code)] +pub fn trace_test_wibble(_context: *mut (), _value: std::ffi::c_int) +{ + if trace_event_state_is_enabled(unsafe { _TRACE_TEST_WIBBLE_DSTATE}) { + let format_string = c"Wibble context=%p value=%d"; + unsafe {::trace::syslog(::trace::LOG_INFO, format_string.as_ptr() as *const c_char, _context /* as *mut () */, _value /* as std::ffi::c_int */);} + } +} diff --git a/tests/tracetool/trace-events b/tests/tracetool/trace-events new file mode 100644 index 0000000000000..72cf4d6f70d18 --- /dev/null +++ b/tests/tracetool/trace-events @@ -0,0 +1,5 @@ +# See docs/devel/tracing.rst for syntax documentation. +# SPDX-License-Identifier: GPL-2.0-or-later + +test_blah(void *context, const char *filename) "Blah context=%p filename=%s" +test_wibble(void *context, int value) "Wibble context=%p value=%d" diff --git a/tests/tracetool/tracetool-test.py b/tests/tracetool/tracetool-test.py new file mode 100755 index 0000000000000..786083ad7fb7a --- /dev/null +++ b/tests/tracetool/tracetool-test.py @@ -0,0 +1,109 @@ +#!/usr/bin/python3 +# SPDX-License-Identifier: GPL-2.0-or-later + +import os +from pathlib import Path +from shutil import copyfile +from subprocess import check_call +import sys +import tempfile + + +def get_formats(backend): + formats = [ + "c", + "h", + ] + if backend in {"ftrace", "log", "simple", "syslog"}: + formats += ["rs"] + if backend == "dtrace": + formats += [ + "d", + "log-stap", + "simpletrace-stap", + "stap", + ] + if backend == "ust": + formats += [ + "ust-events-c", + "ust-events-h", + ] + return formats + + +def test_tracetool_one(tracetool, backend, fmt, src_dir, build_dir): + rel_filename = backend + "." + fmt + actual_file = Path(build_dir, rel_filename) + expect_file = Path(src_dir, rel_filename) + + args = [tracetool, f"--format={fmt}", f"--backends={backend}", "--group=testsuite"] + + if fmt.find("stap") != -1: + args += ["--binary=qemu", "--probe-prefix=qemu"] + + # Use relative files for both, as these filenames end + # up in '#line' statements in the output + args += ["trace-events", rel_filename] + + try: + check_call(args, cwd=build_dir) + actual = actual_file.read_text() + finally: + actual_file.unlink() + + if os.getenv("QEMU_TEST_REGENERATE", False): + print(f"# regenerate {expect_file}") + expect_file.write_text(actual) + + expect = expect_file.read_text() + + assert expect == actual + + +def test_tracetool(tracetool, backend, source_dir, build_dir): + fail = False + scenarios = len(get_formats(backend)) + + print(f"1..{scenarios}") + + src_events = Path(source_dir, "trace-events") + build_events = Path(build_dir, "trace-events") + + try: + # We need a stable relative filename under build dir + # for the '#line' statements, so copy over the input + copyfile(src_events, build_events) + + num = 1 + for fmt in get_formats(backend): + status = "not ok" + hint = "" + try: + test_tracetool_one(tracetool, backend, fmt, source_dir, build_dir) + status = "ok" + except Exception as e: + print(f"# {e}") + fail = True + hint = ( + " (set QEMU_TEST_REGENERATE=1 to recreate reference " + + "output if tracetool generator was intentionally changed)" + ) + finally: + print(f"{status} {num} - {backend}.{fmt}{hint}") + finally: + build_events.unlink() + + return fail + + +if __name__ == "__main__": + if len(sys.argv) != 5: + argv0 = sys.argv[0] + print("syntax: {argv0} TRACE-TOOL BACKEND SRC-DIR BUILD-DIR", file=sys.stderr) + sys.exit(1) + + with tempfile.TemporaryDirectory(prefix=sys.argv[4]) as tmpdir: + fail = test_tracetool(sys.argv[1], sys.argv[2], sys.argv[3], tmpdir) + if fail: + sys.exit(1) + sys.exit(0) diff --git a/tests/tracetool/ust.c b/tests/tracetool/ust.c new file mode 100644 index 0000000000000..9f862fa14d299 --- /dev/null +++ b/tests/tracetool/ust.c @@ -0,0 +1,32 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" +#include "qemu/module.h" +#include "trace-testsuite.h" + +uint16_t _TRACE_TEST_BLAH_DSTATE; +uint16_t _TRACE_TEST_WIBBLE_DSTATE; +TraceEvent _TRACE_TEST_BLAH_EVENT = { + .id = 0, + .name = "test_blah", + .sstate = TRACE_TEST_BLAH_ENABLED, + .dstate = &_TRACE_TEST_BLAH_DSTATE +}; +TraceEvent _TRACE_TEST_WIBBLE_EVENT = { + .id = 0, + .name = "test_wibble", + .sstate = TRACE_TEST_WIBBLE_ENABLED, + .dstate = &_TRACE_TEST_WIBBLE_DSTATE +}; +TraceEvent *testsuite_trace_events[] = { + &_TRACE_TEST_BLAH_EVENT, + &_TRACE_TEST_WIBBLE_EVENT, + NULL, +}; + +static void trace_testsuite_register_events(void) +{ + trace_event_register_group(testsuite_trace_events); +} +trace_init(trace_testsuite_register_events) diff --git a/tests/tracetool/ust.h b/tests/tracetool/ust.h new file mode 100644 index 0000000000000..b7acd0c39b155 --- /dev/null +++ b/tests/tracetool/ust.h @@ -0,0 +1,41 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#ifndef TRACE_TESTSUITE_GENERATED_TRACERS_H +#define TRACE_TESTSUITE_GENERATED_TRACERS_H + +#include "trace/control.h" + +extern TraceEvent _TRACE_TEST_BLAH_EVENT; +extern TraceEvent _TRACE_TEST_WIBBLE_EVENT; +extern uint16_t _TRACE_TEST_BLAH_DSTATE; +extern uint16_t _TRACE_TEST_WIBBLE_DSTATE; +#define TRACE_TEST_BLAH_ENABLED 1 +#define TRACE_TEST_WIBBLE_ENABLED 1 +#include +#include "trace-ust-testsuite.h" + +/* tracepoint_enabled() was introduced in LTTng UST 2.7 */ +#ifndef tracepoint_enabled +#define tracepoint_enabled(a, b) true +#endif + + +#define TRACE_TEST_BLAH_BACKEND_DSTATE() ( \ + tracepoint_enabled(qemu, test_blah) || \ + false) + +static inline void trace_test_blah(void *context, const char *filename) +{ + tracepoint(qemu, test_blah, context, filename); +} + +#define TRACE_TEST_WIBBLE_BACKEND_DSTATE() ( \ + tracepoint_enabled(qemu, test_wibble) || \ + false) + +static inline void trace_test_wibble(void *context, int value) +{ + tracepoint(qemu, test_wibble, context, value); +} +#endif /* TRACE_TESTSUITE_GENERATED_TRACERS_H */ diff --git a/tests/tracetool/ust.ust-events-c b/tests/tracetool/ust.ust-events-c new file mode 100644 index 0000000000000..db232240568b3 --- /dev/null +++ b/tests/tracetool/ust.ust-events-c @@ -0,0 +1,14 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#include "qemu/osdep.h" + +#define TRACEPOINT_DEFINE +#define TRACEPOINT_CREATE_PROBES + +/* If gcc version 4.7 or older is used, LTTng ust gives a warning when compiling with + -Wredundant-decls. + */ +#pragma GCC diagnostic ignored "-Wredundant-decls" + +#include "trace-ust-all.h" diff --git a/tests/tracetool/ust.ust-events-h b/tests/tracetool/ust.ust-events-h new file mode 100644 index 0000000000000..4621a995fc1a2 --- /dev/null +++ b/tests/tracetool/ust.ust-events-h @@ -0,0 +1,56 @@ +/* This file is autogenerated by tracetool, do not edit. */ +/* SPDX-License-Identifier: GPL-2.0-or-later */ + +#undef TRACEPOINT_PROVIDER +#define TRACEPOINT_PROVIDER qemu + +#undef TRACEPOINT_INCLUDE +#define TRACEPOINT_INCLUDE "./trace-ust.h" + +#if !defined (TRACE_TESTSUITE_GENERATED_UST_H) || \ + defined(TRACEPOINT_HEADER_MULTI_READ) +#define TRACE_TESTSUITE_GENERATED_UST_H + +#include + +/* + * LTTng ust 2.0 does not allow you to use TP_ARGS(void) for tracepoints + * requiring no arguments. We define these macros introduced in more recent * versions of LTTng ust as a workaround + */ +#ifndef _TP_EXPROTO1 +#define _TP_EXPROTO1(a) void +#endif +#ifndef _TP_EXDATA_PROTO1 +#define _TP_EXDATA_PROTO1(a) void *__tp_data +#endif +#ifndef _TP_EXDATA_VAR1 +#define _TP_EXDATA_VAR1(a) __tp_data +#endif +#ifndef _TP_EXVAR1 +#define _TP_EXVAR1(a) +#endif + +TRACEPOINT_EVENT( + qemu, + test_blah, + TP_ARGS(void *, context, const char *, filename), + TP_FIELDS( + ctf_integer_hex(void *, context, context) + ctf_string(filename, filename) + ) +) + +TRACEPOINT_EVENT( + qemu, + test_wibble, + TP_ARGS(void *, context, int, value), + TP_FIELDS( + ctf_integer_hex(void *, context, context) + ctf_integer(int, value, value) + ) +) + +#endif /* TRACE_TESTSUITE_GENERATED_UST_H */ + +/* This part must be outside ifdef protection */ +#include diff --git a/tests/tsan/ignore.tsan b/tests/tsan/ignore.tsan index 423e482d2f9ec..8fa00a2c49baf 100644 --- a/tests/tsan/ignore.tsan +++ b/tests/tsan/ignore.tsan @@ -4,7 +4,7 @@ # The eventual goal would be to fix these warnings. # TSan is not happy about setting/getting of dirty bits, -# for example, cpu_physical_memory_set_dirty_range, -# and cpu_physical_memory_get_dirty. +# for example, physical_memory_set_dirty_range, +# and physical_memory_get_dirty. src:bitops.c src:bitmap.c diff --git a/tests/unit/io-channel-helpers.c b/tests/unit/io-channel-helpers.c index c0799c21c233c..22b42d14cdd7e 100644 --- a/tests/unit/io-channel-helpers.c +++ b/tests/unit/io-channel-helpers.c @@ -20,6 +20,7 @@ #include "qemu/osdep.h" #include "io-channel-helpers.h" +#include "qapi/error.h" #include "qemu/iov.h" struct QIOChannelTest { @@ -109,8 +110,8 @@ void qio_channel_test_run_threads(QIOChannelTest *test, test->src = src; test->dst = dst; - qio_channel_set_blocking(test->dst, blocking, NULL); - qio_channel_set_blocking(test->src, blocking, NULL); + qio_channel_set_blocking(test->dst, blocking, &error_abort); + qio_channel_set_blocking(test->src, blocking, &error_abort); reader = g_thread_new("reader", test_io_thread_reader, diff --git a/tests/unit/socket-helpers.c b/tests/unit/socket-helpers.c index 37db24f72a1e3..46d2ff101c9ae 100644 --- a/tests/unit/socket-helpers.c +++ b/tests/unit/socket-helpers.c @@ -19,6 +19,7 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" #include "qemu/sockets.h" #include "socket-helpers.h" @@ -88,7 +89,8 @@ static int socket_can_bind_connect(const char *hostname, int family) goto cleanup; } - qemu_socket_set_nonblock(cfd); + qemu_set_blocking(cfd, false, &error_abort); + if (connect(cfd, (struct sockaddr *)&ss, sslen) < 0) { if (errno == EINPROGRESS) { check_soerr = true; diff --git a/tests/unit/test-crypto-tlssession.c b/tests/unit/test-crypto-tlssession.c index 554054e9344c8..61311cbe6efe1 100644 --- a/tests/unit/test-crypto-tlssession.c +++ b/tests/unit/test-crypto-tlssession.c @@ -112,8 +112,8 @@ static void test_crypto_tls_session_psk(void) * thread, so we need these non-blocking to avoid deadlock * of ourselves */ - qemu_socket_set_nonblock(channel[0]); - qemu_socket_set_nonblock(channel[1]); + qemu_set_blocking(channel[0], false, &error_abort); + qemu_set_blocking(channel[1], false, &error_abort); clientCreds = test_tls_creds_psk_create( QCRYPTO_TLS_CREDS_ENDPOINT_CLIENT, @@ -264,8 +264,8 @@ static void test_crypto_tls_session_x509(const void *opaque) * thread, so we need these non-blocking to avoid deadlock * of ourselves */ - qemu_socket_set_nonblock(channel[0]); - qemu_socket_set_nonblock(channel[1]); + qemu_set_blocking(channel[0], false, &error_abort); + qemu_set_blocking(channel[1], false, &error_abort); #define CLIENT_CERT_DIR "tests/test-crypto-tlssession-client/" #define SERVER_CERT_DIR "tests/test-crypto-tlssession-server/" diff --git a/tests/unit/test-error-report.c b/tests/unit/test-error-report.c index 54319c86c927f..0cbde3c4cf5f4 100644 --- a/tests/unit/test-error-report.c +++ b/tests/unit/test-error-report.c @@ -104,22 +104,6 @@ test_error_report_timestamp(void) "); } -static void -test_error_warn(void) -{ - if (g_test_subprocess()) { - error_setg(&error_warn, "Testing &error_warn"); - return; - } - - g_test_trap_subprocess(NULL, 0, 0); - g_test_trap_assert_passed(); - g_test_trap_assert_stderr("\ -test-error-report: warning: Testing &error_warn*\ -"); -} - - int main(int argc, char *argv[]) { @@ -133,7 +117,6 @@ main(int argc, char *argv[]) g_test_add_func("/error-report/glog", test_error_report_glog); g_test_add_func("/error-report/once", test_error_report_once); g_test_add_func("/error-report/timestamp", test_error_report_timestamp); - g_test_add_func("/error-report/warn", test_error_warn); return g_test_run(); } diff --git a/tests/unit/test-io-channel-tls.c b/tests/unit/test-io-channel-tls.c index e036ac5df4c29..6f282ad45d0cb 100644 --- a/tests/unit/test-io-channel-tls.c +++ b/tests/unit/test-io-channel-tls.c @@ -184,8 +184,8 @@ static void test_io_channel_tls(const void *opaque) * thread, so we need these non-blocking to avoid deadlock * of ourselves */ - qio_channel_set_blocking(QIO_CHANNEL(clientChanSock), false, NULL); - qio_channel_set_blocking(QIO_CHANNEL(serverChanSock), false, NULL); + qio_channel_set_blocking(QIO_CHANNEL(clientChanSock), false, &error_abort); + qio_channel_set_blocking(QIO_CHANNEL(serverChanSock), false, &error_abort); /* Now the real part of the test, setup the sessions */ clientChanTLS = qio_channel_tls_new_client( diff --git a/tests/unit/test-iov.c b/tests/unit/test-iov.c index 75bc3be00578e..63e2b1583cb66 100644 --- a/tests/unit/test-iov.c +++ b/tests/unit/test-iov.c @@ -1,4 +1,5 @@ #include "qemu/osdep.h" +#include "qapi/error.h" #include "qemu/iov.h" #include "qemu/sockets.h" @@ -186,7 +187,7 @@ static void test_io(void) close(sv[0]); FD_SET(sv[1], &fds); - g_unix_set_fd_nonblocking(sv[1], true, NULL); + qemu_set_blocking(sv[1], false, &error_abort); r = g_test_rand_int_range(sz / 2, sz); setsockopt(sv[1], SOL_SOCKET, SO_SNDBUF, &r, sizeof(r)); @@ -222,7 +223,7 @@ static void test_io(void) close(sv[1]); FD_SET(sv[0], &fds); - g_unix_set_fd_nonblocking(sv[0], true, NULL); + qemu_set_blocking(sv[0], false, &error_abort); r = g_test_rand_int_range(sz / 2, sz); setsockopt(sv[0], SOL_SOCKET, SO_RCVBUF, &r, sizeof(r)); usleep(500000); diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c index 63f28f26f4569..cadbab3c5e260 100644 --- a/tests/unit/test-vmstate.c +++ b/tests/unit/test-vmstate.c @@ -30,6 +30,7 @@ #include "../migration/savevm.h" #include "qemu/module.h" #include "io/channel-file.h" +#include "qapi/error.h" static int temp_fd; @@ -66,9 +67,13 @@ static QEMUFile *open_test_file(bool write) static void save_vmstate(const VMStateDescription *desc, void *obj) { QEMUFile *f = open_test_file(true); + Error *local_err = NULL; /* Save file with vmstate */ - int ret = vmstate_save_state(f, desc, obj, NULL); + int ret = vmstate_save_state(f, desc, obj, NULL, &local_err); + if (ret) { + error_report_err(local_err); + } g_assert(!ret); qemu_put_byte(f, QEMU_VM_EOF); g_assert(!qemu_file_get_error(f)); @@ -108,14 +113,16 @@ static int load_vmstate_one(const VMStateDescription *desc, void *obj, { QEMUFile *f; int ret; + Error *local_err = NULL; f = open_test_file(true); qemu_put_buffer(f, wire, size); qemu_fclose(f); f = open_test_file(false); - ret = vmstate_load_state(f, desc, obj, version); + ret = vmstate_load_state(f, desc, obj, version, &local_err); if (ret) { + error_report_err(local_err); g_assert(qemu_file_get_error(f)); } else{ g_assert(!qemu_file_get_error(f)); @@ -355,6 +362,8 @@ static const VMStateDescription vmstate_versioned = { static void test_load_v1(void) { + Error *local_err = NULL; + int ret; uint8_t buf[] = { 0, 0, 0, 10, /* a */ 0, 0, 0, 30, /* c */ @@ -365,7 +374,10 @@ static void test_load_v1(void) QEMUFile *loading = open_test_file(false); TestStruct obj = { .b = 200, .e = 500, .f = 600 }; - vmstate_load_state(loading, &vmstate_versioned, &obj, 1); + ret = vmstate_load_state(loading, &vmstate_versioned, &obj, 1, &local_err); + if (ret < 0) { + error_report_err(local_err); + } g_assert(!qemu_file_get_error(loading)); g_assert_cmpint(obj.a, ==, 10); g_assert_cmpint(obj.b, ==, 200); @@ -378,6 +390,8 @@ static void test_load_v1(void) static void test_load_v2(void) { + Error *local_err = NULL; + int ret; uint8_t buf[] = { 0, 0, 0, 10, /* a */ 0, 0, 0, 20, /* b */ @@ -391,7 +405,10 @@ static void test_load_v2(void) QEMUFile *loading = open_test_file(false); TestStruct obj; - vmstate_load_state(loading, &vmstate_versioned, &obj, 2); + ret = vmstate_load_state(loading, &vmstate_versioned, &obj, 2, &local_err); + if (ret < 0) { + error_report_err(local_err); + } g_assert_cmpint(obj.a, ==, 10); g_assert_cmpint(obj.b, ==, 20); g_assert_cmpint(obj.c, ==, 30); @@ -425,10 +442,15 @@ static const VMStateDescription vmstate_skipping = { static void test_save_noskip(void) { + Error *local_err = NULL; QEMUFile *fsave = open_test_file(true); TestStruct obj = { .a = 1, .b = 2, .c = 3, .d = 4, .e = 5, .f = 6, .skip_c_e = false }; - int ret = vmstate_save_state(fsave, &vmstate_skipping, &obj, NULL); + int ret = vmstate_save_state(fsave, &vmstate_skipping, &obj, NULL, + &local_err); + if (ret) { + error_report_err(local_err); + } g_assert(!ret); g_assert(!qemu_file_get_error(fsave)); @@ -447,10 +469,15 @@ static void test_save_noskip(void) static void test_save_skip(void) { + Error *local_err = NULL; QEMUFile *fsave = open_test_file(true); TestStruct obj = { .a = 1, .b = 2, .c = 3, .d = 4, .e = 5, .f = 6, .skip_c_e = true }; - int ret = vmstate_save_state(fsave, &vmstate_skipping, &obj, NULL); + int ret = vmstate_save_state(fsave, &vmstate_skipping, &obj, NULL, + &local_err); + if (ret) { + error_report_err(local_err); + } g_assert(!ret); g_assert(!qemu_file_get_error(fsave)); @@ -467,6 +494,8 @@ static void test_save_skip(void) static void test_load_noskip(void) { + Error *local_err = NULL; + int ret; uint8_t buf[] = { 0, 0, 0, 10, /* a */ 0, 0, 0, 20, /* b */ @@ -480,7 +509,10 @@ static void test_load_noskip(void) QEMUFile *loading = open_test_file(false); TestStruct obj = { .skip_c_e = false }; - vmstate_load_state(loading, &vmstate_skipping, &obj, 2); + ret = vmstate_load_state(loading, &vmstate_skipping, &obj, 2, &local_err); + if (ret < 0) { + error_report_err(local_err); + } g_assert(!qemu_file_get_error(loading)); g_assert_cmpint(obj.a, ==, 10); g_assert_cmpint(obj.b, ==, 20); @@ -493,6 +525,8 @@ static void test_load_noskip(void) static void test_load_skip(void) { + Error *local_err = NULL; + int ret; uint8_t buf[] = { 0, 0, 0, 10, /* a */ 0, 0, 0, 20, /* b */ @@ -504,7 +538,10 @@ static void test_load_skip(void) QEMUFile *loading = open_test_file(false); TestStruct obj = { .skip_c_e = true, .c = 300, .e = 500 }; - vmstate_load_state(loading, &vmstate_skipping, &obj, 2); + ret = vmstate_load_state(loading, &vmstate_skipping, &obj, 2, &local_err); + if (ret < 0) { + error_report_err(local_err); + } g_assert(!qemu_file_get_error(loading)); g_assert_cmpint(obj.a, ==, 10); g_assert_cmpint(obj.b, ==, 20); @@ -744,6 +781,8 @@ static void test_save_q(void) static void test_load_q(void) { + int ret; + Error *local_err = NULL; TestQtailq obj_q = { .i16 = -512, .i32 = 70000, @@ -773,7 +812,10 @@ static void test_load_q(void) TestQtailq tgt; QTAILQ_INIT(&tgt.q); - vmstate_load_state(fload, &vmstate_q, &tgt, 1); + ret = vmstate_load_state(fload, &vmstate_q, &tgt, 1, &local_err); + if (ret < 0) { + error_report_err(local_err); + } char eof = qemu_get_byte(fload); g_assert(!qemu_file_get_error(fload)); g_assert_cmpint(tgt.i16, ==, obj_q.i16); @@ -1115,6 +1157,8 @@ static void diff_iommu(TestGTreeIOMMU *iommu1, TestGTreeIOMMU *iommu2) static void test_gtree_load_domain(void) { + Error *local_err = NULL; + int ret; TestGTreeDomain *dest_domain = g_new0(TestGTreeDomain, 1); TestGTreeDomain *orig_domain = create_first_domain(); QEMUFile *fload, *fsave; @@ -1127,7 +1171,11 @@ static void test_gtree_load_domain(void) fload = open_test_file(false); - vmstate_load_state(fload, &vmstate_domain, dest_domain, 1); + ret = vmstate_load_state(fload, &vmstate_domain, dest_domain, 1, + &local_err); + if (ret < 0) { + error_report_err(local_err); + } eof = qemu_get_byte(fload); g_assert(!qemu_file_get_error(fload)); g_assert_cmpint(orig_domain->id, ==, dest_domain->id); @@ -1230,6 +1278,8 @@ static void test_gtree_save_iommu(void) static void test_gtree_load_iommu(void) { + Error *local_err = NULL; + int ret; TestGTreeIOMMU *dest_iommu = g_new0(TestGTreeIOMMU, 1); TestGTreeIOMMU *orig_iommu = create_iommu(); QEMUFile *fsave, *fload; @@ -1241,7 +1291,10 @@ static void test_gtree_load_iommu(void) qemu_fclose(fsave); fload = open_test_file(false); - vmstate_load_state(fload, &vmstate_iommu, dest_iommu, 1); + ret = vmstate_load_state(fload, &vmstate_iommu, dest_iommu, 1, &local_err); + if (ret < 0) { + error_report_err(local_err); + } eof = qemu_get_byte(fload); g_assert(!qemu_file_get_error(fload)); g_assert_cmpint(orig_iommu->id, ==, dest_iommu->id); @@ -1363,6 +1416,8 @@ static void test_save_qlist(void) static void test_load_qlist(void) { + Error *local_err = NULL; + int ret; QEMUFile *fsave, *fload; TestQListContainer *orig_container = alloc_container(); TestQListContainer *dest_container = g_new0(TestQListContainer, 1); @@ -1376,7 +1431,11 @@ static void test_load_qlist(void) qemu_fclose(fsave); fload = open_test_file(false); - vmstate_load_state(fload, &vmstate_container, dest_container, 1); + ret = vmstate_load_state(fload, &vmstate_container, dest_container, 1, + &local_err); + if (ret < 0) { + error_report_err(local_err); + } eof = qemu_get_byte(fload); g_assert(!qemu_file_get_error(fload)); g_assert_cmpint(eof, ==, QEMU_VM_EOF); diff --git a/tests/vm/freebsd b/tests/vm/freebsd index 2e96c9eba52d6..ea09b21fbc1dc 100755 --- a/tests/vm/freebsd +++ b/tests/vm/freebsd @@ -40,7 +40,9 @@ class FreeBSDVM(basevm.BaseVM): tar -xf /dev/vtbd1; cd ../build; ../src/configure --extra-ldflags=-L/usr/local/lib \ - --extra-cflags=-I/usr/local/include {configure_opts}; + --extra-cflags=-I/usr/local/include \ + --enable-rust \ + {configure_opts}; gmake --output-sync -j{jobs} {target} {verbose}; """ diff --git a/tools/i386/qemu-vmsr-helper.c b/tools/i386/qemu-vmsr-helper.c index 5f19a48cbd275..6c0f4fe870e96 100644 --- a/tools/i386/qemu-vmsr-helper.c +++ b/tools/i386/qemu-vmsr-helper.c @@ -213,8 +213,10 @@ static void coroutine_fn vh_co_entry(void *opaque) uint64_t vmsr; int r; - qio_channel_set_blocking(QIO_CHANNEL(client->ioc), - false, NULL); + if (!qio_channel_set_blocking(QIO_CHANNEL(client->ioc), + false, &local_err)) { + goto out; + } qio_channel_set_follow_coroutine_ctx(QIO_CHANNEL(client->ioc), true); diff --git a/trace/ftrace.c b/trace/ftrace.c index 9749543d9b242..6875faedb9c1d 100644 --- a/trace/ftrace.c +++ b/trace/ftrace.c @@ -38,6 +38,21 @@ static int find_mount(char *mount_point, const char *fstype) return ret; } +void ftrace_write(const char *fmt, ...) +{ + char ftrace_buf[MAX_TRACE_STRLEN]; + int unused __attribute__ ((unused)); + int trlen; + va_list ap; + + va_start(ap, fmt); + trlen = vsnprintf(ftrace_buf, MAX_TRACE_STRLEN, fmt, ap); + va_end(ap); + + trlen = MIN(trlen, MAX_TRACE_STRLEN - 1); + unused = write(trace_marker_fd, ftrace_buf, trlen); +} + bool ftrace_init(void) { char mount_point[PATH_MAX]; diff --git a/trace/ftrace.h b/trace/ftrace.h index cb5e35d2171d7..16c122816d178 100644 --- a/trace/ftrace.h +++ b/trace/ftrace.h @@ -8,5 +8,6 @@ extern int trace_marker_fd; bool ftrace_init(void); +G_GNUC_PRINTF(1, 2) void ftrace_write(const char *fmt, ...); #endif /* TRACE_FTRACE_H */ diff --git a/trace/meson.build b/trace/meson.build index 9c42a57a05337..d89a0db82a143 100644 --- a/trace/meson.build +++ b/trace/meson.build @@ -1,5 +1,5 @@ system_ss.add(files('control-target.c', 'trace-hmp-cmds.c')) - +trace_rs_targets = [] trace_events_files = [] foreach item : [ '.' ] + trace_events_subdirs + qapi_trace_events if item in qapi_trace_events @@ -24,6 +24,11 @@ foreach item : [ '.' ] + trace_events_subdirs + qapi_trace_events input: trace_events_file, command: [ tracetool, group, '--format=c', '@INPUT@', '@OUTPUT@' ], depend_files: tracetool_depends) + trace_rs = custom_target(fmt.format('trace', 'rs'), + output: fmt.format('trace', 'rs'), + input: trace_events_file, + command: [ tracetool, group, '--format=rs', '@INPUT@', '@OUTPUT@' ], + depend_files: tracetool_depends) if 'ust' in get_option('trace_backends') trace_ust_h = custom_target(fmt.format('trace-ust', 'h'), output: fmt.format('trace-ust', 'h'), @@ -34,6 +39,7 @@ foreach item : [ '.' ] + trace_events_subdirs + qapi_trace_events genh += trace_ust_h endif trace_ss.add(trace_h, trace_c) + trace_rs_targets += trace_rs if 'dtrace' in get_option('trace_backends') trace_dtrace = custom_target(fmt.format('trace-dtrace', 'dtrace'), output: fmt.format('trace-dtrace', 'dtrace'), diff --git a/ui/curses.c b/ui/curses.c index a39aee8762344..161f78c35c32f 100644 --- a/ui/curses.c +++ b/ui/curses.c @@ -265,7 +265,8 @@ static int curses2foo(const int _curses2foo[], const int _curseskey2foo[], static void curses_refresh(DisplayChangeListener *dcl) { - int chr, keysym, keycode, keycode_alt; + wint_t chr = 0; + int keysym, keycode, keycode_alt; enum maybe_keycode maybe_keycode = CURSES_KEYCODE; curses_winch_check(); @@ -284,8 +285,9 @@ static void curses_refresh(DisplayChangeListener *dcl) /* while there are any pending key strokes to process */ chr = console_getch(&maybe_keycode); - if (chr == -1) + if (chr == WEOF) { break; + } #ifdef KEY_RESIZE /* this shouldn't occur when we use a custom SIGWINCH handler */ @@ -304,9 +306,9 @@ static void curses_refresh(DisplayChangeListener *dcl) /* alt or esc key */ if (keycode == 1) { enum maybe_keycode next_maybe_keycode = CURSES_KEYCODE; - int nextchr = console_getch(&next_maybe_keycode); + wint_t nextchr = console_getch(&next_maybe_keycode); - if (nextchr != -1) { + if (nextchr != WEOF) { chr = nextchr; maybe_keycode = next_maybe_keycode; keycode_alt = ALT; diff --git a/ui/dbus-listener.c b/ui/dbus-listener.c index 42875b8eed8f1..52e041edb0f72 100644 --- a/ui/dbus-listener.c +++ b/ui/dbus-listener.c @@ -214,24 +214,32 @@ static void dbus_update_gl_cb(GObject *source_object, GAsyncResult *res, gpointer user_data) { - g_autoptr(GError) err = NULL; + g_autoptr(GError) gerr = NULL; +#ifdef WIN32 + Error *err = NULL; +#endif DBusDisplayListener *ddl = user_data; bool success; #ifdef CONFIG_GBM success = qemu_dbus_display1_listener_call_update_dmabuf_finish( - ddl->proxy, res, &err); + ddl->proxy, res, &gerr); + if (!success) { + error_report("Failed to call update: %s", gerr->message); + } #endif #ifdef WIN32 success = qemu_dbus_display1_listener_win32_d3d11_call_update_texture2d_finish( - ddl->d3d11_proxy, res, &err); - d3d_texture2d_acquire0(ddl->d3d_texture, &error_warn); -#endif - + ddl->d3d11_proxy, res, &gerr); if (!success) { - error_report("Failed to call update: %s", err->message); + error_report("Failed to call update: %s", gerr->message); + } + + if (!d3d_texture2d_acquire0(ddl->d3d_texture, &err)) { + error_report_err(err); } +#endif graphic_hw_gl_block(ddl->dcl.con, false); g_object_unref(ddl); diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c index 0b787bea25eee..ae9239999cdb6 100644 --- a/ui/gtk-egl.c +++ b/ui/gtk-egl.c @@ -72,7 +72,7 @@ void gd_egl_draw(VirtualConsole *vc) #endif int ww, wh, pw, ph, gs; - if (!vc->gfx.gls) { + if (!vc->gfx.gls || !vc->gfx.ds) { return; } @@ -112,9 +112,6 @@ void gd_egl_draw(VirtualConsole *vc) } #endif } else { - if (!vc->gfx.ds) { - return; - } eglMakeCurrent(qemu_egl_display, vc->gfx.esurface, vc->gfx.esurface, vc->gfx.ectx); diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c index 8151cc413cf7f..cd86022d264a2 100644 --- a/ui/gtk-gl-area.c +++ b/ui/gtk-gl-area.c @@ -48,7 +48,7 @@ void gd_gl_area_draw(VirtualConsole *vc) int fbw, fbh; int wx_offset, wy_offset; - if (!vc->gfx.gls) { + if (!vc->gfx.gls || !vc->gfx.ds) { return; } @@ -135,9 +135,6 @@ void gd_gl_area_draw(VirtualConsole *vc) } #endif } else { - if (!vc->gfx.ds) { - return; - } gtk_gl_area_make_current(GTK_GL_AREA(vc->gfx.drawing_area)); surface_gl_setup_viewport(vc->gfx.gls, vc->gfx.ds, pw, ph); @@ -168,7 +165,22 @@ void gd_gl_area_refresh(DisplayChangeListener *dcl) if (vc->gfx.guest_fb.dmabuf && qemu_dmabuf_get_draw_submitted(vc->gfx.guest_fb.dmabuf)) { - gd_gl_area_draw(vc); + /* + * gd_egl_refresh() calls gd_egl_draw() if a DMA-BUF draw has already + * been submitted, but this function does not call gd_gl_area_draw() in + * such a case due to display corruption. + * + * Calling gd_gl_area_draw() is necessary to prevent a situation where + * there is a scheduled draw event but it won't happen bacause the window + * is currently in inactive state (minimized or tabified). If draw is not + * done for a long time, gl_block timeout and/or fence timeout (on the + * guest) will happen eventually. + * + * However, it is found that calling gd_gl_area_draw() here causes guest + * display corruption on a Wayland Compositor. The display corruption is + * more serious than the possible fence timeout so gd_gl_area_draw() is + * omitted for now. + */ return; } diff --git a/ui/gtk.c b/ui/gtk.c index e91d093a49e5d..48571bedbf5a7 100644 --- a/ui/gtk.c +++ b/ui/gtk.c @@ -766,9 +766,9 @@ static gboolean gd_render_event(GtkGLArea *area, GdkGLContext *context, } static void gd_resize_event(GtkGLArea *area, - gint width, gint height, gpointer *opaque) + gint width, gint height, gpointer opaque) { - VirtualConsole *vc = (void *)opaque; + VirtualConsole *vc = opaque; double pw = width, ph = height; double sx = vc->gfx.scale_x, sy = vc->gfx.scale_y; GdkWindow *window = gtk_widget_get_window(GTK_WIDGET(area)); @@ -1181,6 +1181,7 @@ static gboolean gd_touch_event(GtkWidget *widget, GdkEventTouch *touch, void *opaque) { VirtualConsole *vc = opaque; + Error *err = NULL; uint64_t num_slot = GPOINTER_TO_UINT(touch->sequence); int type = -1; @@ -1203,7 +1204,10 @@ static gboolean gd_touch_event(GtkWidget *widget, GdkEventTouch *touch, console_handle_touch_event(vc->gfx.dcl.con, touch_slots, num_slot, surface_width(vc->gfx.ds), surface_height(vc->gfx.ds), touch->x, - touch->y, type, &error_warn); + touch->y, type, &err); + if (err) { + warn_report_err(err); + } return TRUE; } diff --git a/ui/icons/qemu.svg b/ui/icons/qemu.svg index 24ca23a1e95dd..f2500de339170 100644 --- a/ui/icons/qemu.svg +++ b/ui/icons/qemu.svg @@ -918,7 +918,26 @@ image/svg+xml - + Kew the Angry Emu + + + Benoît Canet + + + + + CC BY 3.0 + + + + + QEMU Community + + + 2012-02-15 + + https://lists.gnu.org/archive/html/qemu-devel/2012-02/msg02865.html diff --git a/ui/input-barrier.c b/ui/input-barrier.c index 9793258aac1d7..0a2198ca50031 100644 --- a/ui/input-barrier.c +++ b/ui/input-barrier.c @@ -490,7 +490,6 @@ static gboolean input_barrier_event(QIOChannel *ioc G_GNUC_UNUSED, static void input_barrier_complete(UserCreatable *uc, Error **errp) { InputBarrier *ib = INPUT_BARRIER(uc); - Error *local_err = NULL; if (!ib->name) { error_setg(errp, QERR_MISSING_PARAMETER, "name"); @@ -506,9 +505,7 @@ static void input_barrier_complete(UserCreatable *uc, Error **errp) ib->sioc = qio_channel_socket_new(); qio_channel_set_name(QIO_CHANNEL(ib->sioc), "barrier-client"); - qio_channel_socket_connect_sync(ib->sioc, &ib->saddr, &local_err); - if (local_err) { - error_propagate(errp, local_err); + if (qio_channel_socket_connect_sync(ib->sioc, &ib->saddr, errp) < 0) { return; } diff --git a/ui/input-linux.c b/ui/input-linux.c index 92e1a1aa64ad1..44d0c15a9b724 100644 --- a/ui/input-linux.c +++ b/ui/input-linux.c @@ -316,8 +316,7 @@ static void input_linux_complete(UserCreatable *uc, Error **errp) error_setg_file_open(errp, errno, il->evdev); return; } - if (!g_unix_set_fd_nonblocking(il->fd, true, NULL)) { - error_setg_errno(errp, errno, "Failed to set FD nonblocking"); + if (!qemu_set_blocking(il->fd, false, errp)) { return; } diff --git a/ui/keymaps.c b/ui/keymaps.c index 6ceaa97085abf..2359dbfe7e6d6 100644 --- a/ui/keymaps.c +++ b/ui/keymaps.c @@ -86,19 +86,25 @@ static int parse_keyboard_layout(kbd_layout_t *k, const name2keysym_t *table, const char *language, Error **errp) { + g_autofree char *filename = NULL; int ret; FILE *f; - char * filename; char line[1024]; char keyname[64]; int len; filename = qemu_find_file(QEMU_FILE_TYPE_KEYMAP, language); + if (!filename) { + error_setg(errp, "could not find keymap file for language '%s'", + language); + return -1; + } + trace_keymap_parse(filename); - f = filename ? fopen(filename, "r") : NULL; - g_free(filename); + + f = fopen(filename, "r"); if (!f) { - error_setg(errp, "could not read keymap file: '%s'", language); + error_setg_file_open(errp, errno, filename); return -1; } diff --git a/ui/qemu-pixman.c b/ui/qemu-pixman.c index ef4e71da111d3..aea09755b9495 100644 --- a/ui/qemu-pixman.c +++ b/ui/qemu-pixman.c @@ -288,7 +288,12 @@ qemu_pixman_shareable_free(qemu_pixman_shareable handle, void *ptr, size_t size) { #ifdef WIN32 - qemu_win32_map_free(ptr, handle, &error_warn); + Error *err = NULL; + + qemu_win32_map_free(ptr, handle, &err); + if (err) { + error_report_err(err); + } #else qemu_memfd_free(ptr, size, handle); #endif diff --git a/ui/sdl2.c b/ui/sdl2.c index b00e421f7f869..032dc14bc3988 100644 --- a/ui/sdl2.c +++ b/ui/sdl2.c @@ -421,7 +421,7 @@ static void handle_keydown(SDL_Event *ev) sdl_grab_end(scon); } break; - case SDL_SCANCODE_U: + case SDL_SCANCODE_0: sdl2_window_resize(scon); if (!scon->opengl) { /* re-create scon->texture */ diff --git a/ui/spice-core.c b/ui/spice-core.c index 5992f9daecc25..8a6050f4ae236 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -50,8 +50,6 @@ static int spice_migration_completed; static int spice_display_is_running; static int spice_have_target_host; -static QemuThread me; - struct SpiceTimer { QEMUTimer *timer; }; @@ -128,11 +126,13 @@ static void watch_update_mask(SpiceWatch *watch, int event_mask) static SpiceWatch *watch_add(int fd, int event_mask, SpiceWatchFunc func, void *opaque) { SpiceWatch *watch; - #ifdef WIN32 + g_autofree char *msg = NULL; + fd = _open_osfhandle(fd, _O_BINARY); if (fd < 0) { - error_setg_win32(&error_warn, WSAGetLastError(), "Couldn't associate a FD with the SOCKET"); + msg = g_win32_error_message(WSAGetLastError()); + warn_report("Couldn't associate a FD with the SOCKET: %s", msg); return NULL; } #endif @@ -222,7 +222,7 @@ static void channel_event(int event, SpiceChannelEventInfo *info) * thread and grab the BQL if so before calling qemu * functions. */ - bool need_lock = !qemu_thread_is_self(&me); + bool need_lock = !bql_locked(); if (need_lock) { bql_lock(); } @@ -675,8 +675,6 @@ static void qemu_spice_init(void) spice_wan_compression_t wan_compr; bool seamless_migration; - qemu_thread_get_self(&me); - if (!opts) { return; } diff --git a/ui/spice-display.c b/ui/spice-display.c index 9ce622cefce23..db71e866f89ff 100644 --- a/ui/spice-display.c +++ b/ui/spice-display.c @@ -980,7 +980,9 @@ static void spice_server_gl_scanout(QXLInstance *qxl, spice_qxl_gl_scanout2(qxl, fd, width, height, offset, stride, num_planes, format, modifier, y_0_top); #else - if (num_planes <= 1) { + if (fd == NULL) { + spice_qxl_gl_scanout(qxl, -1, 0, 0, 0, 0, false); + } else if (num_planes <= 1) { spice_qxl_gl_scanout(qxl, fd[0], width, height, stride[0], format, y_0_top); } else { error_report("SPICE server does not support multi plane GL scanout"); @@ -1183,20 +1185,20 @@ static void qemu_spice_gl_release_dmabuf(DisplayChangeListener *dcl, egl_dmabuf_release_texture(dmabuf); } -static bool spice_gl_blit_scanout_texture(SimpleSpiceDisplay *ssd, - egl_fb *scanout_tex_fb) +static bool spice_gl_blit_scanout_texture(SimpleSpiceDisplay *ssd) { uint32_t offsets[DMABUF_MAX_PLANES], strides[DMABUF_MAX_PLANES]; int fds[DMABUF_MAX_PLANES], num_planes, fourcc; + egl_fb scanout_tex_fb = {}; uint64_t modifier; bool ret; - egl_fb_destroy(scanout_tex_fb); - egl_fb_setup_for_tex(scanout_tex_fb, + egl_fb_setup_for_tex(&scanout_tex_fb, surface_width(ssd->ds), surface_height(ssd->ds), ssd->ds->texture, false); - egl_fb_blit(scanout_tex_fb, &ssd->guest_fb, false); + egl_fb_blit(&scanout_tex_fb, &ssd->guest_fb, false); glFlush(); + egl_fb_destroy(&scanout_tex_fb); if (!ssd->new_scanout_texture) { return true; @@ -1330,9 +1332,7 @@ static void qemu_spice_gl_update(DisplayChangeListener *dcl, } if (spice_remote_client && ssd->blit_scanout_texture) { - egl_fb scanout_tex_fb; - - ret = spice_gl_blit_scanout_texture(ssd, &scanout_tex_fb); + ret = spice_gl_blit_scanout_texture(ssd); if (!ret) { return; } diff --git a/ui/trace-events b/ui/trace-events index 3da0d5e2800db..3eba9ca3a825e 100644 --- a/ui/trace-events +++ b/ui/trace-events @@ -48,13 +48,27 @@ vnc_msg_server_ext_desktop_resize(void *state, void *ioc, int width, int height, vnc_msg_client_audio_enable(void *state, void *ioc) "VNC client msg audio enable state=%p ioc=%p" vnc_msg_client_audio_disable(void *state, void *ioc) "VNC client msg audio disable state=%p ioc=%p" vnc_msg_client_audio_format(void *state, void *ioc, int fmt, int channels, int freq) "VNC client msg audio format state=%p ioc=%p fmt=%d channels=%d freq=%d" +vnc_msg_client_cut_text(void *state, void *ioc, int len) "VNC client msg cut text state=%p ioc=%p len=%u" +vnc_msg_client_cut_text_ext(void *state, void *ioc, int len, int flags) "VNC client msg cut text state=%p ioc=%p len=%u flags=%u" +vnc_msg_client_ext_key_event(void *state, void *ioc, int down, int sym, int keycode) "VNC client msg ext key event state=%p ioc=%p down=%u sym=%u keycode=%u" +vnc_msg_client_framebuffer_update_request(void *state, void *ioc, int incremental, int x, int y, int w, int h) "VNC client msg framebuffer update request state=%p ioc=%p incremental=%u x=%u y=%u w=%u h=%u" +vnc_msg_client_key_event(void *state, void *ioc, int down, int sym) "VNC client msg key event state=%p ioc=%p down=%u sym=%u" +vnc_msg_client_pointer_event(void *state, void *ioc, int button_mask, int x, int y) "VNC client msg pointer event state=%p ioc=%p button_mask=%u x=%u y=%u" vnc_msg_client_set_desktop_size(void *state, void *ioc, int width, int height, int screens) "VNC client msg set desktop size state=%p ioc=%p size=%dx%d screens=%d" +vnc_msg_client_set_encodings(void *state, void *ioc, int limit) "VNC client msg set encodings state=%p ioc=%p limit=%u" +vnc_msg_client_set_pixel_format(void *state, void *ioc, int bpp, int big_endian, int true_color) "VNC client msg set pixel format state=%p ioc=%p bpp=%u big_endian=%u true_color=%u" +vnc_msg_client_set_pixel_format_rgb(void *state, void *ioc, int red_max, int green_max, int blue_max, int red_shift, int green_shift, int blue_shift) "VNC client msg set pixel format RGB state=%p ioc=%p red_max=%u green_max=%u blue_max=%u red_shift=%u green_shift=%u blue_shift=%u" +vnc_msg_client_xvp(void *state, void *ioc, int version, int action) "VNC client msg XVP state=%p ioc=%p version=%u action=%u" vnc_client_eof(void *state, void *ioc) "VNC client EOF state=%p ioc=%p" vnc_client_io_error(void *state, void *ioc, const char *msg) "VNC client I/O error state=%p ioc=%p errmsg=%s" vnc_client_connect(void *state, void *ioc) "VNC client connect state=%p ioc=%p" vnc_client_disconnect_start(void *state, void *ioc) "VNC client disconnect start state=%p ioc=%p" vnc_client_disconnect_finish(void *state, void *ioc) "VNC client disconnect finish state=%p ioc=%p" vnc_client_io_wrap(void *state, void *ioc, const char *type) "VNC client I/O wrap state=%p ioc=%p type=%s" +vnc_client_pixel_format(void *state, void *ioc, int bpp, int depth, int endian) "VNC client pixel format state=%p ioc=%p bpp=%u depth=%u endian=%u" +vnc_client_pixel_format_red(void *state, void *ioc, int max, int bits, int shift, int mask) "VNC client pixel format red state=%p ioc=%p max=%u bits=%u shift=%u mask=%u" +vnc_client_pixel_format_green(void *state, void *ioc, int max, int bits, int shift, int mask) "VNC client pixel format green state=%p ioc=%p max=%u bits=%u shift=%u mask=%u" +vnc_client_pixel_format_blue(void *state, void *ioc, int max, int bits, int shift, int mask) "VNC client pixel format blue state=%p ioc=%p max=%u bits=%u shift=%u mask=%u" vnc_client_throttle_threshold(void *state, void *ioc, size_t oldoffset, size_t offset, int client_width, int client_height, int bytes_per_pixel, void *audio_cap) "VNC client throttle threshold state=%p ioc=%p oldoffset=%zu newoffset=%zu width=%d height=%d bpp=%d audio=%p" vnc_client_throttle_incremental(void *state, void *ioc, int job_update, size_t offset) "VNC client throttle incremental state=%p ioc=%p job-update=%d offset=%zu" vnc_client_throttle_forced(void *state, void *ioc, int job_update, size_t offset) "VNC client throttle forced state=%p ioc=%p job-update=%d offset=%zu" diff --git a/ui/vdagent.c b/ui/vdagent.c index c0746fe5b168f..ddb91e75c64a2 100644 --- a/ui/vdagent.c +++ b/ui/vdagent.c @@ -992,7 +992,8 @@ static int put_cbinfo(QEMUFile *f, void *pv, size_t size, } } - return vmstate_save_state(f, &vmstate_cbinfo_array, &cbinfo, vmdesc); + return vmstate_save_state(f, &vmstate_cbinfo_array, &cbinfo, vmdesc, + &error_fatal); } static int get_cbinfo(QEMUFile *f, void *pv, size_t size, @@ -1001,6 +1002,7 @@ static int get_cbinfo(QEMUFile *f, void *pv, size_t size, VDAgentChardev *vd = QEMU_VDAGENT_CHARDEV(pv); struct CBInfoArray cbinfo = {}; int i, ret; + Error *local_err = NULL; if (!have_clipboard(vd)) { return 0; @@ -1008,8 +1010,10 @@ static int get_cbinfo(QEMUFile *f, void *pv, size_t size, vdagent_clipboard_peer_register(vd); - ret = vmstate_load_state(f, &vmstate_cbinfo_array, &cbinfo, 0); + ret = vmstate_load_state(f, &vmstate_cbinfo_array, &cbinfo, 0, + &local_err); if (ret) { + error_report_err(local_err); return ret; } diff --git a/ui/vnc.c b/ui/vnc.c index 1df35832d5411..77c823bf2e85f 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -2309,6 +2309,25 @@ static void set_pixel_format(VncState *vs, int bits_per_pixel, vs->client_pf.bytes_per_pixel = bits_per_pixel / 8; vs->client_pf.depth = bits_per_pixel == 32 ? 24 : bits_per_pixel; vs->client_endian = big_endian_flag ? G_BIG_ENDIAN : G_LITTLE_ENDIAN; + trace_vnc_client_pixel_format(vs, vs->ioc, + vs->client_pf.bits_per_pixel, + vs->client_pf.depth, + vs->client_endian); + trace_vnc_client_pixel_format_red(vs, vs->ioc, + vs->client_pf.rmax, + vs->client_pf.rbits, + vs->client_pf.rshift, + vs->client_pf.rmask); + trace_vnc_client_pixel_format_green(vs, vs->ioc, + vs->client_pf.gmax, + vs->client_pf.gbits, + vs->client_pf.gshift, + vs->client_pf.gmask); + trace_vnc_client_pixel_format_blue(vs, vs->ioc, + vs->client_pf.bmax, + vs->client_pf.bbits, + vs->client_pf.bshift, + vs->client_pf.bmask); if (!true_color_flag) { send_color_map(vs); @@ -2324,6 +2343,7 @@ static void pixel_format_message (VncState *vs) { char pad[3] = { 0, 0, 0 }; vs->client_pf = qemu_default_pixelformat(32); + vs->client_endian = G_BYTE_ORDER; vnc_write_u8(vs, vs->client_pf.bits_per_pixel); /* bits-per-pixel */ vnc_write_u8(vs, vs->client_pf.depth); /* depth */ @@ -2382,6 +2402,17 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) if (len == 1) return 20; + trace_vnc_msg_client_set_pixel_format(vs, vs->ioc, + read_u8(data, 4), + read_u8(data, 6), + read_u8(data, 7)); + trace_vnc_msg_client_set_pixel_format_rgb(vs, vs->ioc, + read_u16(data, 8), + read_u16(data, 10), + read_u16(data, 12), + read_u8(data, 14), + read_u8(data, 15), + read_u8(data, 16)); set_pixel_format(vs, read_u8(data, 4), read_u8(data, 6), read_u8(data, 7), read_u16(data, 8), read_u16(data, 10), @@ -2404,12 +2435,19 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) memcpy(data + 4 + (i * 4), &val, sizeof(val)); } + trace_vnc_msg_client_set_encodings(vs, vs->ioc, limit); set_encodings(vs, (int32_t *)(data + 4), limit); break; case VNC_MSG_CLIENT_FRAMEBUFFER_UPDATE_REQUEST: if (len == 1) return 10; + trace_vnc_msg_client_framebuffer_update_request(vs, vs->ioc, + read_u8(data, 1), + read_u16(data, 2), + read_u16(data, 4), + read_u16(data, 6), + read_u16(data, 8)); framebuffer_update_request(vs, read_u8(data, 1), read_u16(data, 2), read_u16(data, 4), read_u16(data, 6), read_u16(data, 8)); @@ -2418,12 +2456,19 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) if (len == 1) return 8; + trace_vnc_msg_client_key_event(vs, vs->ioc, + read_u8(data, 1), + read_u32(data, 4)); key_event(vs, read_u8(data, 1), read_u32(data, 4)); break; case VNC_MSG_CLIENT_POINTER_EVENT: if (len == 1) return 6; + trace_vnc_msg_client_pointer_event(vs, vs->ioc, + read_u8(data, 1), + read_u16(data, 2), + read_u16(data, 4)); pointer_event(vs, read_u8(data, 1), read_u16(data, 2), read_u16(data, 4)); break; case VNC_MSG_CLIENT_CUT_TEXT: @@ -2455,9 +2500,12 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) vnc_client_error(vs); break; } + trace_vnc_msg_client_cut_text_ext(vs, vs->ioc, + dlen, read_u32(data, 8)); vnc_client_cut_text_ext(vs, dlen, read_u32(data, 8), data + 12); break; } + trace_vnc_msg_client_cut_text(vs, vs->ioc, read_u32(data, 4)); vnc_client_cut_text(vs, read_u32(data, 4), data + 8); break; case VNC_MSG_CLIENT_XVP: @@ -2472,6 +2520,7 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) if (len == 4) { uint8_t version = read_u8(data, 2); uint8_t action = read_u8(data, 3); + trace_vnc_msg_client_xvp(vs, vs->ioc, version, action); if (version != 1) { error_report("vnc: xvp client message version %d != 1", @@ -2505,6 +2554,10 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) if (len == 2) return 12; + trace_vnc_msg_client_ext_key_event(vs, vs->ioc, + read_u16(data, 2), + read_u32(data, 4), + read_u32(data, 8)); ext_key_event(vs, read_u16(data, 2), read_u32(data, 4), read_u32(data, 8)); break; @@ -3284,7 +3337,7 @@ static void vnc_connect(VncDisplay *vd, QIOChannelSocket *sioc, VNC_DEBUG("New client on socket %p\n", vs->sioc); update_displaychangelistener(&vd->dcl, VNC_REFRESH_INTERVAL_BASE); - qio_channel_set_blocking(vs->ioc, false, NULL); + qio_channel_set_blocking(vs->ioc, false, &error_abort); if (vs->ioc_tag) { g_source_remove(vs->ioc_tag); } @@ -4256,8 +4309,9 @@ void vnc_display_add_client(const char *id, int csock, bool skipauth) } } -static void vnc_auto_assign_id(QemuOptsList *olist, QemuOpts *opts) +static char *vnc_auto_assign_id(QemuOpts *opts) { + QemuOptsList *olist = qemu_find_opts("vnc"); int i = 2; char *id; @@ -4267,23 +4321,18 @@ static void vnc_auto_assign_id(QemuOptsList *olist, QemuOpts *opts) id = g_strdup_printf("vnc%d", i++); } qemu_opts_set_id(opts, id); + + return id; } void vnc_parse(const char *str) { QemuOptsList *olist = qemu_find_opts("vnc"); QemuOpts *opts = qemu_opts_parse_noisily(olist, str, !is_help_option(str)); - const char *id; if (!opts) { exit(1); } - - id = qemu_opts_id(opts); - if (!id) { - /* auto-assign id if not present */ - vnc_auto_assign_id(olist, opts); - } } int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp) @@ -4291,7 +4340,11 @@ int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp) Error *local_err = NULL; char *id = (char *)qemu_opts_id(opts); - assert(id); + if (!id) { + /* auto-assign id if not present */ + id = vnc_auto_assign_id(opts); + } + vnc_display_init(id, &local_err); if (local_err) { error_propagate(errp, local_err); diff --git a/util/aio-win32.c b/util/aio-win32.c index 6583d5c5f31a3..c6fbce64c2cb8 100644 --- a/util/aio-win32.c +++ b/util/aio-win32.c @@ -121,7 +121,7 @@ void aio_set_fd_handler(AioContext *ctx, QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); event = event_notifier_get_handle(&ctx->notifier); - qemu_socket_select(fd, event, bitmask, NULL); + qemu_socket_select_nofail(fd, event, bitmask); } if (old_node) { aio_remove_fd_handler(ctx, old_node); diff --git a/util/async.c b/util/async.c index 2719c629ae976..a736d2cd0d089 100644 --- a/util/async.c +++ b/util/async.c @@ -256,8 +256,9 @@ static int64_t aio_compute_bh_timeout(BHList *head, int timeout) QEMUBH *bh; QSLIST_FOREACH_RCU(bh, head, next) { - if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { - if (bh->flags & BH_IDLE) { + int flags = qatomic_load_acquire(&bh->flags); + if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { + if (flags & BH_IDLE) { /* idle bottom halves will be polled at least * every 10ms */ timeout = 10000000; @@ -335,14 +336,16 @@ aio_ctx_check(GSource *source) aio_notify_accept(ctx); QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { - if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { + int flags = qatomic_load_acquire(&bh->flags); + if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { return true; } } QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { - if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { + int flags = qatomic_load_acquire(&bh->flags); + if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { return true; } } diff --git a/util/cacheflush.c b/util/cacheflush.c index 17c58918de60d..69c9614e2c9bb 100644 --- a/util/cacheflush.c +++ b/util/cacheflush.c @@ -153,7 +153,7 @@ static void arch_cache_info(int *isize, int *dsize) } } -#elif defined(_ARCH_PPC) && defined(__linux__) +#elif defined(_ARCH_PPC64) && defined(__linux__) # include "elf.h" static void arch_cache_info(int *isize, int *dsize) @@ -187,7 +187,7 @@ static void fallback_cache_info(int *isize, int *dsize) } else if (*dsize) { *isize = *dsize; } else { -#if defined(_ARCH_PPC) +#if defined(_ARCH_PPC64) /* * For PPC, we're going to use the cache sizes computed for * flush_idcache_range. Which means that we must use the diff --git a/util/cpuinfo-i386.c b/util/cpuinfo-i386.c index c8c8a1b3705eb..f4c5b6ff407c9 100644 --- a/util/cpuinfo-i386.c +++ b/util/cpuinfo-i386.c @@ -50,6 +50,7 @@ unsigned __attribute__((constructor)) cpuinfo_init(void) if ((bv & 6) == 6) { info |= CPUINFO_AVX1; info |= (b7 & bit_AVX2 ? CPUINFO_AVX2 : 0); + info |= (c7 & bit_GFNI ? CPUINFO_GFNI : 0); if ((bv & 0xe0) == 0xe0) { info |= (b7 & bit_AVX512F ? CPUINFO_AVX512F : 0); diff --git a/util/error.c b/util/error.c index daea2142f3012..0ae08225c095a 100644 --- a/util/error.c +++ b/util/error.c @@ -19,7 +19,6 @@ Error *error_abort; Error *error_fatal; -Error *error_warn; static void error_handle(Error **errp, Error *err) { @@ -41,9 +40,7 @@ static void error_handle(Error **errp, Error *err) error_report_err(err); exit(1); } - if (errp == &error_warn) { - warn_report_err(err); - } else if (errp && !*errp) { + if (errp && !*errp) { *errp = err; } else { error_free(err); diff --git a/util/event_notifier-posix.c b/util/event_notifier-posix.c index 76420c5b560c1..83fdbb96bbcfc 100644 --- a/util/event_notifier-posix.c +++ b/util/event_notifier-posix.c @@ -11,6 +11,7 @@ */ #include "qemu/osdep.h" +#include "qapi/error.h" #include "qemu/cutils.h" #include "qemu/event_notifier.h" #include "qemu/main-loop.h" @@ -36,6 +37,7 @@ int event_notifier_init(EventNotifier *e, int active) { int fds[2]; int ret; + Error *local_err = NULL; #ifdef CONFIG_EVENTFD ret = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC); @@ -52,11 +54,11 @@ int event_notifier_init(EventNotifier *e, int active) if (!g_unix_open_pipe(fds, FD_CLOEXEC, NULL)) { return -errno; } - if (!g_unix_set_fd_nonblocking(fds[0], true, NULL)) { + if (!qemu_set_blocking(fds[0], false, &local_err)) { ret = -errno; goto fail; } - if (!g_unix_set_fd_nonblocking(fds[1], true, NULL)) { + if (!qemu_set_blocking(fds[1], false, &local_err)) { ret = -errno; goto fail; } @@ -70,6 +72,7 @@ int event_notifier_init(EventNotifier *e, int active) return 0; fail: + error_report_err(local_err); close(fds[0]); close(fds[1]); return ret; diff --git a/util/log.c b/util/log.c index 58d24de48a01a..41f78ce86b252 100644 --- a/util/log.c +++ b/util/log.c @@ -44,7 +44,7 @@ static FILE *global_file; static __thread FILE *thread_file; static __thread Notifier qemu_log_thread_cleanup_notifier; -int qemu_loglevel; +unsigned qemu_loglevel; static bool log_per_thread; static GArray *debug_regions; @@ -145,10 +145,28 @@ void qemu_log_unlock(FILE *logfile) void qemu_log(const char *fmt, ...) { - FILE *f = qemu_log_trylock(); + FILE *f; + g_autofree const char *timestr = NULL; + + /* + * Prepare the timestamp *outside* the logging + * lock so it better reflects when the message + * was emitted if we are delayed acquiring the + * mutex + */ + if (message_with_timestamp) { + g_autoptr(GDateTime) dt = g_date_time_new_now_utc(); + timestr = g_date_time_format_iso8601(dt); + } + + f = qemu_log_trylock(); if (f) { va_list ap; + if (timestr) { + fprintf(f, "%s ", timestr); + } + va_start(ap, fmt); vfprintf(f, fmt, ap); va_end(ap); diff --git a/util/main-loop.c b/util/main-loop.c index 51aeb2432e77e..b8ddda8f5eecf 100644 --- a/util/main-loop.c +++ b/util/main-loop.c @@ -114,7 +114,10 @@ static int qemu_signal_init(Error **errp) return -errno; } - g_unix_set_fd_nonblocking(sigfd, true, NULL); + if (!qemu_set_blocking(sigfd, false, errp)) { + close(sigfd); + return -EINVAL; + } qemu_set_fd_handler(sigfd, sigfd_handler, NULL, (void *)(intptr_t)sigfd); diff --git a/util/oslib-posix.c b/util/oslib-posix.c index 4ff577e5de666..3c14b726659f4 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -250,21 +250,19 @@ void qemu_anon_ram_free(void *ptr, size_t size) #endif } -void qemu_socket_set_block(int fd) +bool qemu_set_blocking(int fd, bool block, Error **errp) { - g_unix_set_fd_nonblocking(fd, false, NULL); -} + g_autoptr(GError) err = NULL; -int qemu_socket_try_set_nonblock(int fd) -{ - return g_unix_set_fd_nonblocking(fd, true, NULL) ? 0 : -errno; -} + if (!g_unix_set_fd_nonblocking(fd, !block, &err)) { + error_setg_errno(errp, errno, + "Can't set file descriptor %d %s: %s", fd, + block ? "blocking" : "non-blocking", + err->message); + return false; + } -void qemu_socket_set_nonblock(int fd) -{ - int f; - f = qemu_socket_try_set_nonblock(fd); - assert(f == 0); + return true; } int socket_set_fast_reuse(int fd) @@ -307,6 +305,15 @@ int qemu_socketpair(int domain, int type, int protocol, int sv[2]) return ret; } +void qemu_clear_cloexec(int fd) +{ + int f; + f = fcntl(fd, F_GETFD); + assert(f != -1); + f = fcntl(fd, F_SETFD, f & ~FD_CLOEXEC); + assert(f != -1); +} + char * qemu_get_local_state_dir(void) { diff --git a/util/oslib-win32.c b/util/oslib-win32.c index b7351634ece12..839b8a4170e44 100644 --- a/util/oslib-win32.c +++ b/util/oslib-win32.c @@ -177,25 +177,22 @@ static int socket_error(void) } } -void qemu_socket_set_block(int fd) +bool qemu_set_blocking(int fd, bool block, Error **errp) { - unsigned long opt = 0; - qemu_socket_unselect(fd, NULL); - ioctlsocket(fd, FIONBIO, &opt); -} + unsigned long opt = block ? 0 : 1; + + if (block) { + qemu_socket_unselect_nofail(fd); + } -int qemu_socket_try_set_nonblock(int fd) -{ - unsigned long opt = 1; if (ioctlsocket(fd, FIONBIO, &opt) != NO_ERROR) { - return -socket_error(); + error_setg_errno(errp, socket_error(), + "Can't set file descriptor %d %s", fd, + block ? "blocking" : "non-blocking"); + return false; } - return 0; -} -void qemu_socket_set_nonblock(int fd) -{ - (void)qemu_socket_try_set_nonblock(fd); + return true; } int socket_set_fast_reuse(int fd) @@ -222,6 +219,10 @@ void qemu_set_cloexec(int fd) { } +void qemu_clear_cloexec(int fd) +{ +} + int qemu_get_thread_id(void) { return GetCurrentThreadId(); @@ -296,10 +297,6 @@ bool qemu_socket_select(int sockfd, WSAEVENT hEventObject, { SOCKET s = _get_osfhandle(sockfd); - if (errp == NULL) { - errp = &error_warn; - } - if (s == INVALID_SOCKET) { error_setg(errp, "invalid socket fd=%d", sockfd); return false; @@ -318,6 +315,25 @@ bool qemu_socket_unselect(int sockfd, Error **errp) return qemu_socket_select(sockfd, NULL, 0, errp); } +void qemu_socket_select_nofail(int sockfd, WSAEVENT hEventObject, + long lNetworkEvents) +{ + Error *err = NULL; + + if (!qemu_socket_select(sockfd, hEventObject, lNetworkEvents, &err)) { + warn_report_err(err); + } +} + +void qemu_socket_unselect_nofail(int sockfd) +{ + Error *err = NULL; + + if (!qemu_socket_unselect(sockfd, &err)) { + warn_report_err(err); + } +} + int qemu_socketpair(int domain, int type, int protocol, int sv[2]) { struct sockaddr_un addr = { diff --git a/util/qemu-timer.c b/util/qemu-timer.c index 1fb48be281a75..56f11b6a641f6 100644 --- a/util/qemu-timer.c +++ b/util/qemu-timer.c @@ -89,7 +89,7 @@ static inline QEMUClock *qemu_clock_ptr(QEMUClockType type) return &qemu_clocks[type]; } -static bool timer_expired_ns(QEMUTimer *timer_head, int64_t current_time) +static bool timer_expired_ns(const QEMUTimer *timer_head, int64_t current_time) { return timer_head && (timer_head->expire_time <= current_time); } @@ -475,12 +475,12 @@ void timer_mod_anticipate(QEMUTimer *ts, int64_t expire_time) timer_mod_anticipate_ns(ts, expire_time * ts->scale); } -bool timer_pending(QEMUTimer *ts) +bool timer_pending(const QEMUTimer *ts) { return ts->expire_time >= 0; } -bool timer_expired(QEMUTimer *timer_head, int64_t current_time) +bool timer_expired(const QEMUTimer *timer_head, int64_t current_time) { return timer_expired_ns(timer_head, current_time * timer_head->scale); } @@ -649,7 +649,7 @@ void init_clocks(QEMUTimerListNotifyCB *notify_cb) #endif } -uint64_t timer_expire_time_ns(QEMUTimer *ts) +uint64_t timer_expire_time_ns(const QEMUTimer *ts) { return timer_pending(ts) ? ts->expire_time : -1; } diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c index b19229074ad98..1dbe409f82938 100644 --- a/util/vhost-user-server.c +++ b/util/vhost-user-server.c @@ -62,7 +62,7 @@ static void vmsg_close_fds(VhostUserMsg *vmsg) } } -static void vmsg_unblock_fds(VhostUserMsg *vmsg) +static bool vmsg_unblock_fds(VhostUserMsg *vmsg, Error **errp) { int i; @@ -74,12 +74,16 @@ static void vmsg_unblock_fds(VhostUserMsg *vmsg) */ if (vmsg->request == VHOST_USER_ADD_MEM_REG || vmsg->request == VHOST_USER_SET_MEM_TABLE) { - return; + return true; } for (i = 0; i < vmsg->fd_num; i++) { - qemu_socket_set_nonblock(vmsg->fds[i]); + if (!qemu_set_blocking(vmsg->fds[i], false, errp)) { + return false; + } } + + return true; } static void panic_cb(VuDev *vu_dev, const char *buf) @@ -122,7 +126,6 @@ vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg) vmsg->fd_num = 0; if (!ioc) { - error_report_err(local_err); goto fail; } @@ -176,7 +179,10 @@ vu_message_read(VuDev *vu_dev, int conn_fd, VhostUserMsg *vmsg) } while (read_bytes != VHOST_USER_HDR_SIZE); /* qio_channel_readv_full will make socket fds blocking, unblock them */ - vmsg_unblock_fds(vmsg); + if (!vmsg_unblock_fds(vmsg, &local_err)) { + error_report_err(local_err); + goto fail; + } if (vmsg->size > sizeof(vmsg->payload)) { error_report("Error: too big message request: %d, " "size: vmsg->size: %u, " @@ -303,7 +309,8 @@ set_watch(VuDev *vu_dev, int fd, int vu_evt, vu_fd_watch->fd = fd; vu_fd_watch->cb = cb; - qemu_socket_set_nonblock(fd); + /* TODO: handle error more gracefully than aborting */ + qemu_set_blocking(fd, false, &error_abort); aio_set_fd_handler(server->ctx, fd, kick_handler, NULL, NULL, NULL, vu_fd_watch); vu_fd_watch->vu_dev = vu_dev; @@ -336,6 +343,7 @@ static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc, gpointer opaque) { VuServer *server = opaque; + Error *local_err = NULL; if (server->sioc) { warn_report("Only one vhost-user client is allowed to " @@ -368,7 +376,11 @@ static void vu_accept(QIONetListener *listener, QIOChannelSocket *sioc, object_ref(OBJECT(server->ioc)); /* TODO vu_message_write() spins if non-blocking! */ - qio_channel_set_blocking(server->ioc, false, NULL); + if (!qio_channel_set_blocking(server->ioc, false, &local_err)) { + error_report_err(local_err); + vu_deinit(&server->vu_dev); + return; + } qio_channel_set_follow_coroutine_ctx(server->ioc, true);